/* * Copyright (C) Cvitek Co., Ltd. 2019-2020. All rights reserved. */ #include #include #include #include #include "SoftmaxOp.h" namespace cvi { void SoftmaxOp::interpretFp32( std::vector>> &operand_tensors, std::vector> &operand_shapes, std::shared_ptr> &result_tensor, std::vector &result_shape) { (void)result_shape; auto axis = param.get("axis"); auto& shape = operand_shapes[0]; axis = axis % shape.size(); int32_t n = 1, inner_dim = 1; for(int i = 0; i < axis; ++i) { n *= shape[i]; } for(size_t i = axis + 1; i < shape.size(); ++i) { inner_dim *= shape[i]; } int32_t c = shape[axis]; int32_t dim = c * inner_dim; float *max = new float[inner_dim]; float *sum = new float[inner_dim]; float *p = operand_tensors[0]->data(); float *q = result_tensor->data(); for (int i = 0; i < n; ++i) { memcpy(max, p, inner_dim * sizeof(float)); memset(sum, 0, inner_dim * sizeof(float)); // find max value accross channel int c_offset = i *dim; for (int j = 0; j