-
Notifications
You must be signed in to change notification settings - Fork 14
Expand file tree
/
Copy pathbackend.py
More file actions
163 lines (128 loc) · 4.63 KB
/
backend.py
File metadata and controls
163 lines (128 loc) · 4.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from .utils import int_shape
class ABackend:
name = None
def shape(self, x): raise NotImplementedError
def contiguous(self, x): raise NotImplementedError
def view(self, x, shape): raise NotImplementedError
def transpose(self, x, dims): raise NotImplementedError
def expand(self, x, mul_shape): raise NotImplementedError
def stack(self, x, mul_shape): raise NotImplementedError
def concat(self, x, mul_shape): raise NotImplementedError
def einsum(self, eqn, args): raise NotImplementedError
class Numpy(ABackend):
name = 'numpy'
def __init__(self):
import numpy
self.np = numpy
def shape(self, x): return x.shape
def contiguous(self, x): self.np.ascontiguousarray(x)
def view(self, x, shape):
#print (type(x), x.shape, shape)
return x.reshape(shape)
def transpose(self, x, dims): return x.transpose(dims)
def expand(self, x, mul_shape): return x.tile(mul_shape)
def stack(self, xlist, axis): return self.np.stack(xlist, axis=axis)
def concat(self, xlist, axis): return self.np.concatenate(xlist, axis=axis)
def einsum(self, eqn, args): return self.np.einsum(eqn, *args)
class PyTorch(ABackend):
name = 'pytorch'
def __init__(self):
import torch
self.torch = torch
def shape(self, x): return x.size()
def contiguous(self, x): return x.contiguous()
def view(self, x, shape): return x.view(shape)
def transpose(self, x, dims): return x.permute(dims)
def expand(self, x, mul_shape): return x.expand(mul_shape)
def stack(self, xlist, axis): return self.torch.stack(xlist, dim=axis)
def concat(self, xlist, axis): return self.torch.cat(xlist, dim=axis)
def einsum(self, eqn, args): return self.torch.einsum(eqn, args)
def get_tf_shape(tf, tensor):
"""Returns a list of the shape of tensor, preferring static dimensions.
(inspired by get_shape_list in BERT code)
Args:
tensor: A tf.Tensor object to find the shape of.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
shape = tuple(tensor.shape.as_list())
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
class TF(ABackend):
name = 'tensorflow'
def __init__(self):
import tensorflow
self.tf = tensorflow
def shape(self, x):
if self.tf.executing_eagerly():
return int_shape(x.shape)
else:
#return tuple(self.tf.unstack(self.tf.shape(x)))
return get_tf_shape(self.tf, x)
def contiguous(self, x): return x
def view(self, x, shape): return self.tf.reshape(x, shape)
def transpose(self, x, dims): return self.tf.transpose(x, dims)
def expand(self, x, mul_shape): return self.tf.tile(x, mul_shape)
def stack(self, xlist, axis): return self.tf.stack(xlist, axis=axis)
def concat(self, xlist, axis): return self.tf.concat(xlist, axis=axis)
def einsum(self, eqn, args): return self.tf.einsum(eqn, args)
becache = {}
def from_cache(C):
s = str(C)
if s not in becache:
becache[s] = C()
return becache[s]
def get_str_type(x):
#print (x)
if isinstance(x, (tuple,list)):
if len(x) == 0: return ''
x = x[0]
t = str(type(x))
return t
def get_tensor_lib(x):
t = get_str_type(x)
if 'numpy.' in t: ret = Numpy
elif 'torch.' in t: ret = PyTorch
elif 'tensorflow.' in t: ret = TF
else: ret = None
return ret
def is_tensor(x):
t = get_str_type(x)
if 'numpy.' in t: ret = ('numpy.ndarray' in t)
elif 'torch.' in t: ret = ('torch.Tensor' in t)
elif 'tensorflow.' in t: ret = ('ops.Tensor' in t)
else: ret = False
return ret
def get_backend_for_tensor(x):
'''
get backend for tensor x
'''
tlib = get_tensor_lib(x)
if tlib is None:
raise NotImplementedError(f'Unsupported tensor type {type(x)}. Contributions welcome.')
ret = from_cache(tlib)
return ret
def get_backend_by_name (b):
if isinstance(b, (Numpy, TF, PyTorch)): return b
assert isinstance(b, str)
bemap = {
'numpy': Numpy,
'np': Numpy,
'torch': PyTorch,
'pytorch': PyTorch,
'tensorflow': TF,
'tf': TF
}
if b in bemap: return from_cache(bemap[b])
else:
raise NotImplementedError(f'Unsupported backend {b}. Contributions welcome.')