Skip to content

Commit 7a50cf3

Browse files
CANN: format code using .clang-format (ggml-org#15863)
This commit applies .clang-format rules to all source files under the ggml-cann directory to ensure consistent coding style and readability. The .clang-format option `SortIncludes: false` has been set to disable automatic reordering of include directives. No functional changes are introduced. Co-authored-by: hipudding <[email protected]>
1 parent 6f5d924 commit 7a50cf3

File tree

6 files changed

+2082
-2351
lines changed

6 files changed

+2082
-2351
lines changed

ggml/src/ggml-cann/acl_tensor.cpp

100755100644
Lines changed: 46 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -51,28 +51,31 @@ aclDataType ggml_cann_type_mapping(ggml_type type) {
5151
return ACL_DT_UNDEFINED;
5252
}
5353

54-
aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne,
55-
size_t* nb, int64_t dims, aclFormat format,
56-
size_t offset) {
54+
aclTensor * ggml_cann_create_tensor(const ggml_tensor * tensor,
55+
int64_t * ne,
56+
size_t * nb,
57+
int64_t dims,
58+
aclFormat format,
59+
size_t offset) {
5760
// If tensor is bcasted, Up to GGML_MAX_DIMS additional dimensions will be
5861
// added.
5962
int64_t acl_ne[GGML_MAX_DIMS * 2], acl_stride[GGML_MAX_DIMS * 2];
6063

6164
if (ne == nullptr) {
6265
for (int i = 0; i < GGML_MAX_DIMS; i++) {
63-
acl_ne[i] = tensor->ne[i];
66+
acl_ne[i] = tensor->ne[i];
6467
// The step size of acl is in elements.
6568
acl_stride[i] = tensor->nb[i] / ggml_element_size(tensor);
6669
}
6770
} else {
6871
// With bcast
6972
for (int i = 0; i < dims; i++) {
70-
acl_ne[i] = ne[i];
73+
acl_ne[i] = ne[i];
7174
acl_stride[i] = nb[i] / ggml_element_size(tensor);
7275
}
7376
}
7477

75-
int64_t final_dims = (dims == 0 ? GGML_MAX_DIMS : dims);
78+
int64_t final_dims = (dims == 0 ? GGML_MAX_DIMS : dims);
7679
int64_t acl_storage_len = 1;
7780
for (int i = 0; i < final_dims; i++) {
7881
acl_storage_len += (acl_ne[i] - 1) * acl_stride[i];
@@ -84,15 +87,13 @@ aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne,
8487
std::reverse(acl_ne, acl_ne + final_dims);
8588
std::reverse(acl_stride, acl_stride + final_dims);
8689

87-
aclTensor* acl_tensor = aclCreateTensor(
88-
acl_ne, final_dims, ggml_cann_type_mapping(tensor->type), acl_stride,
89-
elem_offset, format, &acl_storage_len, 1,
90-
tensor->data);
90+
aclTensor * acl_tensor = aclCreateTensor(acl_ne, final_dims, ggml_cann_type_mapping(tensor->type), acl_stride,
91+
elem_offset, format, &acl_storage_len, 1, tensor->data);
9192

9293
return acl_tensor;
9394
}
9495

95-
bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1) {
96+
bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1) {
9697
for (int i = 0; i < GGML_MAX_DIMS; i++) {
9798
if (t1->ne[i] != t0->ne[i] && t1->ne[i] != 1) {
9899
return true;
@@ -101,15 +102,16 @@ bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1) {
101102
return false;
102103
}
103104

104-
int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0,
105-
const ggml_tensor* src1,
106-
int64_t* bcast_src0_ne,
107-
int64_t* bcast_src1_ne, size_t* bcast_src0_nb,
108-
size_t* bcast_src1_nb) {
105+
int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0,
106+
const ggml_tensor * src1,
107+
int64_t * bcast_src0_ne,
108+
int64_t * bcast_src1_ne,
109+
size_t * bcast_src0_nb,
110+
size_t * bcast_src1_nb) {
109111
GGML_ASSERT(ggml_can_repeat(src1, src0));
110112
int bcast_dim_cnt = 0;
111113
for (int i = 0; i < GGML_MAX_DIMS; i++) {
112-
int64_t nr = src0->ne[i] / src1->ne[i];
114+
int64_t nr = src0->ne[i] / src1->ne[i];
113115
bcast_src0_ne[bcast_dim_cnt] = src0->ne[i] / nr;
114116
bcast_src1_ne[bcast_dim_cnt] = src1->ne[i];
115117
bcast_src0_nb[bcast_dim_cnt] = src0->nb[i];
@@ -119,21 +121,26 @@ int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0,
119121
// Need to add an extra dim.
120122
bcast_src0_ne[bcast_dim_cnt] = nr;
121123
bcast_src1_ne[bcast_dim_cnt] = 1;
122-
bcast_src0_nb[bcast_dim_cnt] = bcast_src0_nb[bcast_dim_cnt - 1] *
123-
bcast_src0_ne[bcast_dim_cnt - 1];
124-
bcast_src1_nb[bcast_dim_cnt] = bcast_src1_nb[bcast_dim_cnt - 1] *
125-
bcast_src1_ne[bcast_dim_cnt - 1];
124+
bcast_src0_nb[bcast_dim_cnt] = bcast_src0_nb[bcast_dim_cnt - 1] * bcast_src0_ne[bcast_dim_cnt - 1];
125+
bcast_src1_nb[bcast_dim_cnt] = bcast_src1_nb[bcast_dim_cnt - 1] * bcast_src1_ne[bcast_dim_cnt - 1];
126126
bcast_dim_cnt++;
127127
}
128128
}
129129
return bcast_dim_cnt;
130130
}
131131

132-
int64_t ggml_cann_get_mulmat_bcast_shape(
133-
const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne,
134-
const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb,
135-
int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
136-
size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb) {
132+
int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne,
133+
const int64_t * weight_ne,
134+
const int64_t * dst_ne,
135+
const size_t * input_nb,
136+
const size_t * weight_nb,
137+
const size_t * dst_nb,
138+
int64_t * bcast_input_ne,
139+
int64_t * bcast_weight_ne,
140+
int64_t * bcast_dst_ne,
141+
size_t * bcast_input_nb,
142+
size_t * bcast_weight_nb,
143+
size_t * bcast_dst_nb) {
137144
// input and dst shoule in same shape, except first two dims.
138145
GGML_ASSERT(input_ne[2] == dst_ne[2]);
139146
GGML_ASSERT(input_ne[3] == dst_ne[3]);
@@ -148,34 +155,30 @@ int64_t ggml_cann_get_mulmat_bcast_shape(
148155
// Do not use bcast in the first two dimensions because we only support
149156
// the bcast batch dimension. Just copy them.
150157
if (i < 2 || nr == 1) {
151-
bcast_input_ne[bcast_dim_cnt] = input_ne[i];
158+
bcast_input_ne[bcast_dim_cnt] = input_ne[i];
152159
bcast_weight_ne[bcast_dim_cnt] = weight_ne[i];
153-
bcast_dst_ne[bcast_dim_cnt] = dst_ne[i];
160+
bcast_dst_ne[bcast_dim_cnt] = dst_ne[i];
154161

155-
bcast_input_nb[bcast_dim_cnt] = input_nb[i];
162+
bcast_input_nb[bcast_dim_cnt] = input_nb[i];
156163
bcast_weight_nb[bcast_dim_cnt] = weight_nb[i];
157-
bcast_dst_nb[bcast_dim_cnt] = dst_nb[i];
164+
bcast_dst_nb[bcast_dim_cnt] = dst_nb[i];
158165
bcast_dim_cnt++;
159166
} else {
160167
// Need to add an extra dim.
161-
bcast_input_ne[bcast_dim_cnt] = nr;
162-
bcast_dst_ne[bcast_dim_cnt] = nr;
168+
bcast_input_ne[bcast_dim_cnt] = nr;
169+
bcast_dst_ne[bcast_dim_cnt] = nr;
163170
bcast_weight_ne[bcast_dim_cnt] = 1;
164-
bcast_input_nb[bcast_dim_cnt] = input_nb[i];
165-
bcast_dst_nb[bcast_dim_cnt] = dst_nb[i];
171+
bcast_input_nb[bcast_dim_cnt] = input_nb[i];
172+
bcast_dst_nb[bcast_dim_cnt] = dst_nb[i];
166173
bcast_weight_nb[bcast_dim_cnt] = weight_nb[i];
167174
bcast_dim_cnt++;
168175

169-
bcast_input_ne[bcast_dim_cnt] = input_ne[i] / nr;
170-
bcast_dst_ne[bcast_dim_cnt] = dst_ne[i] / nr;
176+
bcast_input_ne[bcast_dim_cnt] = input_ne[i] / nr;
177+
bcast_dst_ne[bcast_dim_cnt] = dst_ne[i] / nr;
171178
bcast_weight_ne[bcast_dim_cnt] = weight_ne[i];
172-
bcast_input_nb[bcast_dim_cnt] = bcast_input_nb[bcast_dim_cnt - 1] *
173-
bcast_input_ne[bcast_dim_cnt - 1];
174-
bcast_dst_nb[bcast_dim_cnt] = bcast_dst_nb[bcast_dim_cnt - 1] *
175-
bcast_dst_ne[bcast_dim_cnt - 1];
176-
bcast_weight_nb[bcast_dim_cnt] =
177-
bcast_weight_nb[bcast_dim_cnt - 1] *
178-
bcast_weight_ne[bcast_dim_cnt - 1];
179+
bcast_input_nb[bcast_dim_cnt] = bcast_input_nb[bcast_dim_cnt - 1] * bcast_input_ne[bcast_dim_cnt - 1];
180+
bcast_dst_nb[bcast_dim_cnt] = bcast_dst_nb[bcast_dim_cnt - 1] * bcast_dst_ne[bcast_dim_cnt - 1];
181+
bcast_weight_nb[bcast_dim_cnt] = bcast_weight_nb[bcast_dim_cnt - 1] * bcast_weight_ne[bcast_dim_cnt - 1];
179182
bcast_dim_cnt++;
180183
}
181184
}

ggml/src/ggml-cann/acl_tensor.h

100755100644
Lines changed: 54 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,12 @@ aclDataType ggml_cann_type_mapping(ggml_type type);
6262
* @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
6363
* @return Pointer to the created ACL tensor.
6464
*/
65-
aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne = nullptr,
66-
size_t* nb = nullptr, int64_t dims = 0,
67-
aclFormat format = ACL_FORMAT_ND,
68-
size_t offset = 0);
65+
aclTensor * ggml_cann_create_tensor(const ggml_tensor * tensor,
66+
int64_t * ne = nullptr,
67+
size_t * nb = nullptr,
68+
int64_t dims = 0,
69+
aclFormat format = ACL_FORMAT_ND,
70+
size_t offset = 0);
6971

7072
/**
7173
* @brief Template for creating an ACL tensor from provided parameters. typename TYPE
@@ -87,12 +89,15 @@ aclTensor* ggml_cann_create_tensor(const ggml_tensor* tensor, int64_t* ne = null
8789
* @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
8890
* @return Pointer to the created ACL tensor.
8991
*/
90-
template<typename TYPE>
91-
aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype,
92-
TYPE type_size, int64_t* ne, TYPE* nb,
93-
int64_t dims,
94-
aclFormat format = ACL_FORMAT_ND,
95-
size_t offset = 0) {
92+
template <typename TYPE>
93+
aclTensor * ggml_cann_create_tensor(void * data_ptr,
94+
aclDataType dtype,
95+
TYPE type_size,
96+
int64_t * ne,
97+
TYPE * nb,
98+
int64_t dims,
99+
aclFormat format = ACL_FORMAT_ND,
100+
size_t offset = 0) {
96101
int64_t tmp_ne[GGML_MAX_DIMS * 2];
97102
int64_t tmp_stride[GGML_MAX_DIMS * 2];
98103

@@ -109,9 +114,8 @@ aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype,
109114
std::reverse(tmp_ne, tmp_ne + dims);
110115
std::reverse(tmp_stride, tmp_stride + dims);
111116

112-
aclTensor* acl_tensor =
113-
aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size,
114-
format, &acl_storage_len, 1, data_ptr);
117+
aclTensor * acl_tensor =
118+
aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size, format, &acl_storage_len, 1, data_ptr);
115119

116120
return acl_tensor;
117121
}
@@ -132,7 +136,7 @@ aclTensor* ggml_cann_create_tensor(void* data_ptr, aclDataType dtype,
132136
* to 1. If such a dimension is found, broadcasting is required to align t1
133137
* with t0 for element-wise operations.
134138
*/
135-
bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1);
139+
bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1);
136140

137141
/**
138142
* @brief Computes broadcast shapes and strides for two ggml_tensors.
@@ -187,19 +191,21 @@ bool ggml_cann_need_bcast(const ggml_tensor* t0, const ggml_tensor* t1);
187191
* dim1 in a inserted dim, should add nb for dim1,
188192
* and all other nb moves to next in order.
189193
*/
190-
int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* src1,
191-
int64_t* bcast_ne_src0, int64_t* bcast_ne_src1,
192-
size_t* bcast_nb_src0, size_t* bcast_nb_src1);
194+
int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0,
195+
const ggml_tensor * src1,
196+
int64_t * bcast_ne_src0,
197+
int64_t * bcast_ne_src1,
198+
size_t * bcast_nb_src0,
199+
size_t * bcast_nb_src1);
193200

194201
// Bcast macro to avoid duplicate code.
195-
#define BCAST_SHAPE(src0, src1) \
196-
int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
197-
int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
198-
size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
199-
size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
200-
int64_t bcast_dims = ggml_cann_get_bcast_shape( \
201-
src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, bcast_##src0##_nb, \
202-
bcast_##src1##_nb);
202+
#define BCAST_SHAPE(src0, src1) \
203+
int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
204+
int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
205+
size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
206+
size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
207+
int64_t bcast_dims = ggml_cann_get_bcast_shape(src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, \
208+
bcast_##src0##_nb, bcast_##src1##_nb);
203209

204210
#define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
205211

@@ -233,26 +239,31 @@ int64_t ggml_cann_get_bcast_shape(const ggml_tensor* src0, const ggml_tensor* sr
233239
* before cast dim.
234240
* @sa ggml_cann_get_bcast_shape
235241
*/
236-
int64_t ggml_cann_get_mulmat_bcast_shape(
237-
const int64_t* input_ne, const int64_t* weight_ne, const int64_t* dst_ne,
238-
const size_t* input_nb, const size_t* weight_nb, const size_t* dst_nb,
239-
int64_t* bcast_input_ne, int64_t* bcast_weight_ne, int64_t* bcast_dst_ne,
240-
size_t* bcast_input_nb, size_t* bcast_weight_nb, size_t* bcast_dst_nb);
242+
int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne,
243+
const int64_t * weight_ne,
244+
const int64_t * dst_ne,
245+
const size_t * input_nb,
246+
const size_t * weight_nb,
247+
const size_t * dst_nb,
248+
int64_t * bcast_input_ne,
249+
int64_t * bcast_weight_ne,
250+
int64_t * bcast_dst_ne,
251+
size_t * bcast_input_nb,
252+
size_t * bcast_weight_nb,
253+
size_t * bcast_dst_nb);
241254

242255
// Bcast macro to avoid duplicate code.
243-
#define BCAST_MUL_MAT_SHAPE(input, weight, dst) \
244-
int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \
245-
int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \
246-
int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \
247-
size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \
248-
size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \
249-
size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \
250-
int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \
251-
input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, \
252-
bcast_##input##_ne, bcast_##weight##_ne, bcast_##dst##_ne, \
253-
bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb);
256+
#define BCAST_MUL_MAT_SHAPE(input, weight, dst) \
257+
int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \
258+
int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \
259+
int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \
260+
size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \
261+
size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \
262+
size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \
263+
int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \
264+
input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, bcast_##input##_ne, bcast_##weight##_ne, \
265+
bcast_##dst##_ne, bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb);
254266

255-
#define BCAST_MUL_MAT_PARAM(tensor) \
256-
bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
267+
#define BCAST_MUL_MAT_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
257268

258269
#endif // CANN_ACL_TENSOR_H

0 commit comments

Comments
 (0)