Skip to content

Commit 1f633c0

Browse files
fix int8 cpp can't work by introducing new jit namespace (#922)
* fix int8 cpp can't work by introducing new jit namespace * remove some code * fix code format
1 parent cd0f103 commit 1f633c0

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

64 files changed

+575
-480
lines changed

intel_extension_for_pytorch/csrc/initialization.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ void init_jit_fusion_pass() {
1515
// jit fusion pass
1616
torch::jit::registerPrePass([](std::shared_ptr<torch::jit::Graph>& g) {
1717
if (AutoOptConfig::singleton().get_jit_fuse()) {
18-
torch::jit::FusionPass(g);
18+
torch_ipex::jit::FusionPass(g);
1919
}
2020
});
2121
}

intel_extension_for_pytorch/csrc/jit/codegen/LlgaTensorImpl.cpp

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,18 @@
33

44
#include <c10/core/CPUAllocator.h>
55

6-
namespace at {
6+
namespace torch_ipex {
7+
namespace jit {
8+
namespace fuser {
9+
namespace onednn {
710

811
LlgaTensorImpl::LlgaTensorImpl(
9-
Storage&& storage,
12+
c10::Storage&& storage,
1013
const caffe2::TypeMeta& data_type,
1114
const LlgaTensorDesc& desc)
1215
: TensorImpl(
1316
std::move(storage),
14-
c10::DispatchKeySet(DispatchKey::MkldnnCPU),
17+
c10::DispatchKeySet(c10::DispatchKey::MkldnnCPU),
1518
data_type),
1619
desc_(desc) {
1720
set_sizes_and_strides(desc.sizes(), desc.strides());
@@ -22,7 +25,9 @@ bool LlgaTensorImpl::has_storage() const {
2225
return true;
2326
}
2427

25-
Tensor empty_llga(const LlgaTensorDesc& desc, const TensorOptions& options) {
28+
at::Tensor empty_llga(
29+
const LlgaTensorDesc& desc,
30+
const at::TensorOptions& options) {
2631
auto sizes = desc.sizes();
2732
auto nbytes = desc.storage_size();
2833

@@ -38,36 +43,36 @@ Tensor empty_llga(const LlgaTensorDesc& desc, const TensorOptions& options) {
3843
std::move(storage_impl), options.dtype(), desc);
3944
}
4045

41-
const LlgaTensorDesc& get_llga_desc(const Tensor& tensor) {
46+
const LlgaTensorDesc& get_llga_desc(const at::Tensor& tensor) {
4247
TORCH_INTERNAL_ASSERT(
4348
tensor.is_mkldnn(), "get_llga_desc expects Mkldnn tensor input");
4449
return static_cast<LlgaTensorImpl*>(tensor.unsafeGetTensorImpl())->desc();
4550
}
4651

47-
dnnl::graph::tensor llga_from_aten_tensor(const Tensor& tensor) {
52+
dnnl::graph::tensor llga_from_aten_tensor(const at::Tensor& tensor) {
4853
return {
4954
get_llga_desc(tensor).logical_tensor(),
50-
torch::jit::fuser::onednn::Engine::getEngine(),
55+
Engine::getEngine(),
5156
tensor.data_ptr()};
5257
}
5358

54-
Tensor LlgaTensorImpl::llga_to_aten_tensor(LlgaTensorImpl* llgaImpl) {
59+
at::Tensor LlgaTensorImpl::llga_to_aten_tensor(LlgaTensorImpl* llgaImpl) {
5560
auto aten_tensor = at::detail::make_tensor<TensorImpl>(
5661
std::move(llgaImpl->storage_),
57-
c10::DispatchKeySet(DispatchKey::CPU),
62+
c10::DispatchKeySet(c10::DispatchKey::CPU),
5863
llgaImpl->data_type_);
5964
auto impl = aten_tensor.unsafeGetTensorImpl();
6065
impl->set_storage_offset(llgaImpl->storage_offset_);
6166
impl->set_sizes_and_strides(llgaImpl->sizes(), llgaImpl->strides());
6267
return aten_tensor;
6368
}
6469

65-
Tensor LlgaTensorImpl::llga_to_aten_tensor(
70+
at::Tensor LlgaTensorImpl::llga_to_aten_tensor(
6671
LlgaTensorImpl* llgaImpl,
67-
QuantizerPtr quantizer) {
68-
auto aten_tensor = at::detail::make_tensor<QTensorImpl>(
72+
at::QuantizerPtr quantizer) {
73+
auto aten_tensor = at::detail::make_tensor<at::QTensorImpl>(
6974
std::move(llgaImpl->storage_),
70-
c10::DispatchKeySet(DispatchKey::QuantizedCPU),
75+
c10::DispatchKeySet(c10::DispatchKey::QuantizedCPU),
7176
llgaImpl->data_type_,
7277
quantizer);
7378
auto impl = aten_tensor.unsafeGetTensorImpl();
@@ -127,5 +132,7 @@ at::ScalarType LlgaTensorDesc::aten_scalar_type() const {
127132
TORCH_CHECK(false, "Invalid data type ", static_cast<size_t>(dtype_));
128133
}
129134
}
130-
131-
} // namespace at
135+
} // namespace onednn
136+
} // namespace fuser
137+
} // namespace jit
138+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/LlgaTensorImpl.h

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,10 @@
66
#include <oneapi/dnnl/dnnl_graph.hpp>
77
#include <torch/csrc/jit/ir/ir.h>
88

9-
namespace at {
9+
namespace torch_ipex {
10+
namespace jit {
11+
namespace fuser {
12+
namespace onednn {
1013

1114
struct LlgaTensorDesc {
1215
using desc = dnnl::graph::logical_tensor;
@@ -46,8 +49,8 @@ struct LlgaTensorDesc {
4649
{},
4750
desc::data_type::f32,
4851
get_property_type(v)) {
49-
if (v->type()->isSubtypeOf(TensorType::get())) {
50-
auto tt = v->type()->cast<TensorType>();
52+
if (v->type()->isSubtypeOf(torch::jit::TensorType::get())) {
53+
auto tt = v->type()->cast<torch::jit::TensorType>();
5154

5255
if (tt->scalarType())
5356
dtype_ = getLlgaDataType(tt->scalarType().value());
@@ -108,7 +111,7 @@ struct LlgaTensorDesc {
108111
return ret;
109112
}
110113

111-
LlgaTensorDesc set_quantizer(QuantizerPtr new_quantizer) {
114+
LlgaTensorDesc set_quantizer(at::QuantizerPtr new_quantizer) {
112115
auto ret = *this;
113116
ret.quantizer_ = new_quantizer;
114117
return ret;
@@ -118,13 +121,13 @@ struct LlgaTensorDesc {
118121
return LlgaTensorDesc(t).set_quantizer(quantizer_);
119122
}
120123

121-
QuantizerPtr get_quantizer() {
124+
at::QuantizerPtr get_quantizer() {
122125
return quantizer_;
123126
}
124127

125128
desc::property_type get_property_type(const torch::jit::Value* v) {
126129
switch (v->node()->kind()) {
127-
case prim::Constant:
130+
case torch::jit::prim::Constant:
128131
return desc::property_type::constant;
129132
default:
130133
return desc::property_type::variable;
@@ -200,7 +203,7 @@ struct LlgaTensorDesc {
200203
desc::property_type property_type_;
201204
desc::layout_type layout_type_;
202205
size_t layout_id_;
203-
QuantizerPtr quantizer_;
206+
at::QuantizerPtr quantizer_;
204207
};
205208

206209
// Initially, oneDNN Graph also used to have blocked layout for tensors between
@@ -212,7 +215,7 @@ struct LlgaTensorDesc {
212215
// otherwise expecting.
213216
struct TORCH_API LlgaTensorImpl : public c10::TensorImpl {
214217
LlgaTensorImpl(
215-
Storage&& storage,
218+
c10::Storage&& storage,
216219
const caffe2::TypeMeta& data_type,
217220
const LlgaTensorDesc& desc);
218221

@@ -223,17 +226,22 @@ struct TORCH_API LlgaTensorImpl : public c10::TensorImpl {
223226
// Override a bunch of methods inherited from TensorImpl to return error
224227
// messages.
225228
bool has_storage() const override;
226-
static Tensor llga_to_aten_tensor(LlgaTensorImpl* llgaImpl);
227-
static Tensor llga_to_aten_tensor(
229+
static at::Tensor llga_to_aten_tensor(LlgaTensorImpl* llgaImpl);
230+
static at::Tensor llga_to_aten_tensor(
228231
LlgaTensorImpl* llgaImpl,
229-
QuantizerPtr quantizer);
232+
at::QuantizerPtr quantizer);
230233

231234
private:
232235
LlgaTensorDesc desc_;
233236
};
234237

235-
Tensor empty_llga(const LlgaTensorDesc& desc, const TensorOptions& options);
238+
at::Tensor empty_llga(
239+
const LlgaTensorDesc& desc,
240+
const at::TensorOptions& options);
236241

237-
dnnl::graph::tensor llga_from_aten_tensor(const Tensor& tensor);
242+
dnnl::graph::tensor llga_from_aten_tensor(const at::Tensor& tensor);
238243

239-
} // namespace at
244+
} // namespace onednn
245+
} // namespace fuser
246+
} // namespace jit
247+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/onednn/defer_size_check.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@
22

33
#include "utils.h"
44

5-
namespace torch {
5+
namespace torch_ipex {
66
namespace jit {
77
namespace fuser {
88
namespace onednn {
99

10+
using namespace torch::jit;
11+
1012
class SizeCheckMover {
1113
private:
1214
Block* block_;
@@ -91,4 +93,4 @@ void DeferSizeCheck(std::shared_ptr<Graph>& graph) {
9193
} // namespace onednn
9294
} // namespace fuser
9395
} // namespace jit
94-
} // namespace torch
96+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/onednn/defer_size_check.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,14 @@
22

33
#include <torch/csrc/jit/ir/ir.h>
44

5-
namespace torch {
5+
namespace torch_ipex {
66
namespace jit {
77
namespace fuser {
88
namespace onednn {
99

10-
void DeferSizeCheck(std::shared_ptr<Graph>& graph);
10+
void DeferSizeCheck(std::shared_ptr<torch::jit::Graph>& graph);
1111

1212
} // namespace onednn
1313
} // namespace fuser
1414
} // namespace jit
15-
} // namespace torch
15+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/onednn/fusion_group_name.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#include "fusion_group_name.h"
22

3-
namespace torch {
3+
namespace torch_ipex {
44
namespace jit {
55
namespace fuser {
66
namespace onednn {
@@ -18,4 +18,4 @@ const std::string& LlgaGuardName() {
1818
} // namespace onednn
1919
} // namespace fuser
2020
} // namespace jit
21-
} // namespace torch
21+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/onednn/fusion_group_name.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
#include <string>
44

5-
namespace torch {
5+
namespace torch_ipex {
66
namespace jit {
77
namespace fuser {
88
namespace onednn {
@@ -18,4 +18,4 @@ extern const std::string& LlgaGuardName();
1818
} // namespace onednn
1919
} // namespace fuser
2020
} // namespace jit
21-
} // namespace torch
21+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/onednn/graph_fuser.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,15 @@
66
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
77
#include "graph_helper.h"
88

9-
namespace torch {
9+
namespace torch_ipex {
1010
namespace jit {
1111
namespace fuser {
1212
namespace onednn {
1313

1414
namespace {
1515

16+
using namespace torch::jit;
17+
1618
struct WorkBlock : public std::pair<Node*, Node*> {
1719
using pair::pair;
1820

@@ -203,4 +205,4 @@ void CreateLlgaSubgraphs(std::shared_ptr<Graph>& graph) {
203205
} // namespace onednn
204206
} // namespace fuser
205207
} // namespace jit
206-
} // namespace torch
208+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/onednn/graph_fuser.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,14 @@
22

33
#include <torch/csrc/jit/ir/ir.h>
44

5-
namespace torch {
5+
namespace torch_ipex {
66
namespace jit {
77
namespace fuser {
88
namespace onednn {
99

10-
void CreateLlgaSubgraphs(std::shared_ptr<Graph>& graph);
10+
void CreateLlgaSubgraphs(std::shared_ptr<torch::jit::Graph>& graph);
1111

1212
} // namespace onednn
1313
} // namespace fuser
1414
} // namespace jit
15-
} // namespace torch
15+
} // namespace torch_ipex

intel_extension_for_pytorch/csrc/jit/codegen/onednn/graph_helper.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,12 @@
88
#include <torch/csrc/jit/jit_log.h>
99
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
1010

11-
namespace torch {
11+
namespace torch_ipex {
1212
namespace jit {
1313
namespace fuser {
1414
namespace onednn {
1515

16+
using namespace torch::jit;
1617
using opkind = dnnl::graph::op::kind;
1718

1819
void fixConvOptionalBias(Node* node) {
@@ -435,7 +436,7 @@ bool LlgaGraphHelper::isSupported(Node* node) const {
435436
return createOperator(node).kind() != opkind::Wildcard;
436437
}
437438

438-
DeviceType inferDeviceFromValue(Value* v) {
439+
at::DeviceType inferDeviceFromValue(Value* v) {
439440
auto tt = v->type()->cast<TensorType>();
440441
if (!tt)
441442
return at::kCPU;
@@ -445,7 +446,7 @@ DeviceType inferDeviceFromValue(Value* v) {
445446
return device->type();
446447
}
447448

448-
DeviceType inferDevice(const std::shared_ptr<Graph>& graph) {
449+
at::DeviceType inferDevice(const std::shared_ptr<Graph>& graph) {
449450
auto dt = inferDeviceFromValue(graph->inputs()[0]);
450451
TORCH_CHECK(
451452
std::all_of(
@@ -456,9 +457,9 @@ DeviceType inferDevice(const std::shared_ptr<Graph>& graph) {
456457
return dt;
457458
}
458459

459-
dnnl::graph::engine::kind getLlgaEngineKind(DeviceType type) {
460+
dnnl::graph::engine::kind getLlgaEngineKind(at::DeviceType type) {
460461
switch (type) {
461-
case DeviceType::CPU:
462+
case at::DeviceType::CPU:
462463
return dnnl::graph::engine::kind::cpu;
463464
default:
464465
TORCH_CHECK(false, "Not support device type ", type);
@@ -730,4 +731,4 @@ void LlgaNodeWrapper::initOutputLayouts() {
730731
} // namespace onednn
731732
} // namespace fuser
732733
} // namespace jit
733-
} // namespace torch
734+
} // namespace torch_ipex

0 commit comments

Comments
 (0)