mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-15 21:00:47 +00:00
Summary: A continuation of https://github.com/pytorch/pytorch/pull/10504 for GPU, torch, etc. builds. I was testing with ``` FULL_CAFFE2=1 python setup.py build_deps | tee ~/log.txt cat ~/log.txt | egrep 'undefined refer' | sort | less ``` I'll rebase on master when Yangqing's changes in 10504 land, but putting up for some testing. cc mingzhe09088 anderspapitto ezyang Pull Request resolved: https://github.com/pytorch/pytorch/pull/10507 Reviewed By: Yangqing Differential Revision: D9359606 Pulled By: orionr fbshipit-source-id: c2a3683b3ea5839689f5d2661da0bc9055a54cd2
29 lines
812 B
C++
29 lines
812 B
C++
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
|
|
#define CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
|
|
|
|
#include <tuple>
|
|
#include <vector>
|
|
|
|
#include "caffe2/core/context.h"
|
|
#include "caffe2/core/tensor.h"
|
|
|
|
namespace caffe2 {
|
|
namespace elementwise_ops_utils {
|
|
|
|
CAFFE2_API std::tuple<size_t, size_t, size_t>
|
|
ComputeLegacyBroadcastSizes(const Tensor& A, const Tensor& B, int axis);
|
|
|
|
CAFFE2_API std::vector<int> ComputeBinaryBroadcastForwardDims(
|
|
const std::vector<int>& A_dims,
|
|
const std::vector<int>& B_dims);
|
|
|
|
CAFFE2_API void ComputeBinaryBroadcastBackwardAxes(
|
|
const std::vector<int>& A_dims,
|
|
const std::vector<int>& B_dims,
|
|
std::vector<int>* A_axes,
|
|
std::vector<int>* B_axes);
|
|
|
|
} // namespace elementwise_ops_utils
|
|
} // namespace caffe2
|
|
|
|
#endif // CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
|