Skip to content

Commit f28e1dc

Browse files
authored
Bump requirements (Version 1.4.0)
* Bump requirements * Bump OpenCV version * Update jpeg samples
1 parent 54ce195 commit f28e1dc

11 files changed

Lines changed: 68 additions & 33 deletions

requirements-hw.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
.
2-
tensorflow>=1.13.1
2+
tensorflow>=1.13.1,<1.14
33
matplotlib>=3.0.2
4-
seaborn>=0.9.0
4+
seaborn>=0.9.0

setup.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
setup(
1414
name="trdg",
15-
version="1.3.2",
15+
version="1.4.0",
1616
description="TextRecognitionDataGenerator: A synthetic data generator for text recognition",
1717
long_description=long_description,
1818
long_description_content_type="text/markdown",
@@ -26,7 +26,7 @@
2626
# 3 - Alpha
2727
# 4 - Beta
2828
# 5 - Production/Stable
29-
"Development Status :: 3 - Alpha",
29+
"Development Status :: 4 - Beta",
3030
"Intended Audience :: Developers",
3131
"License :: OSI Approved :: MIT License",
3232
"Programming Language :: Python :: 2",
@@ -41,10 +41,10 @@
4141
packages=find_packages(exclude=["contrib", "docs", "tests"]),
4242
include_package_data=True,
4343
install_requires=[
44-
"pillow==5.1.0",
45-
"numpy>=1.15.1,<1.17",
44+
"pillow==7.0.0",
45+
"numpy>=1.16.4,<1.17",
4646
"requests>=2.20.0",
47-
"opencv-python>=4.0.0.21",
47+
"opencv-python>=4.2.0.32",
4848
"tqdm>=4.23.0",
4949
"beautifulsoup4>=4.6.0"
5050
],
0 Bytes
Loading
-1 Bytes
Loading
2 Bytes
Loading

trdg/background_generator.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,11 @@ def picture(height, width):
6363
pictures = os.listdir(os.path.join(script_path, "pictures"))
6464

6565
if len(pictures) > 0:
66-
pic = Image.open(os.path.join(script_path, "pictures", pictures[rnd.randint(0, len(pictures) - 1)]))
66+
pic = Image.open(
67+
os.path.join(
68+
script_path, "pictures", pictures[rnd.randint(0, len(pictures) - 1)]
69+
)
70+
)
6771

6872
if pic.size[0] < width:
6973
pic = pic.resize(

trdg/data_generator.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,9 @@ def generate(
162162
background_img = background_generator.picture(
163163
background_height, background_width
164164
)
165-
background_mask = Image.new("RGB", (background_width, background_height), (0, 0, 0))
165+
background_mask = Image.new(
166+
"RGB", (background_width, background_height), (0, 0, 0)
167+
)
166168

167169
#############################
168170
# Place text with alignment #

trdg/distorsion_generator.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def _apply_func_distorsion(image, mask, vertical, horizontal, max_offset, func):
9393
).convert("RGBA"),
9494
Image.fromarray(
9595
np.uint8(new_mask_arr_copy if horizontal and vertical else new_mask_arr)
96-
).convert("RGB")
96+
).convert("RGB"),
9797
)
9898

9999

@@ -139,5 +139,10 @@ def random(image, mask, vertical=False, horizontal=False):
139139
max_offset = int(image.height ** 0.4)
140140

141141
return _apply_func_distorsion(
142-
image, mask, vertical, horizontal, max_offset, (lambda x: rnd.randint(0, max_offset))
142+
image,
143+
mask,
144+
vertical,
145+
horizontal,
146+
max_offset,
147+
(lambda x: rnd.randint(0, max_offset)),
143148
)

trdg/generators/from_strings.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,4 +94,4 @@ def next(self):
9494
self.output_mask,
9595
),
9696
self.strings[(self.generated_count - 1) % len(self.strings)],
97-
)
97+
)

trdg/handwritten_text_generator.py

Lines changed: 37 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -9,22 +9,35 @@
99
import seaborn
1010
from PIL import Image, ImageColor
1111
from collections import namedtuple
12+
import warnings
13+
14+
warnings.filterwarnings("ignore")
15+
1216

1317
def download_model_weights():
1418
from pathlib import Path
15-
import urllib.request
19+
import urllib.request
20+
1621
cwd = os.path.dirname(os.path.abspath(__file__))
17-
for k in ['model-29.data-00000-of-00001','model-29.index','model-29.meta','translation.pkl']:
18-
download_dir = Path(cwd)/'handwritten_model/'
19-
download_dir.mkdir(exist_ok=True,parents=True)
20-
if (download_dir/f'{k}').exists(): continue
21-
print(f'file {k} not found, downloading from git repo..')
22+
for k in [
23+
"model-29.data-00000-of-00001",
24+
"model-29.index",
25+
"model-29.meta",
26+
"translation.pkl",
27+
]:
28+
download_dir = Path(cwd) / "handwritten_model/"
29+
download_dir.mkdir(exist_ok=True, parents=True)
30+
if (download_dir / f"{k}").exists():
31+
continue
32+
print(f"file {k} not found, downloading from git repo..")
2233
urllib.request.urlretrieve(
23-
f'https://raw.github.com/Belval/TextRecognitionDataGenerator/master/trdg/handwritten_model/{k}',
24-
download_dir/f'{k}')
25-
print(f'file {k} saved to disk')
34+
f"https://raw.github.com/Belval/TextRecognitionDataGenerator/master/trdg/handwritten_model/{k}",
35+
download_dir / f"{k}",
36+
)
37+
print(f"file {k} saved to disk")
2638
return cwd
2739

40+
2841
def _sample(e, mu1, mu2, std1, std2, rho):
2942
cov = np.array([[std1 * std1, std1 * std2 * rho], [std1 * std2 * rho, std2 * std2]])
3043
mean = np.array([mu1, mu2])
@@ -71,7 +84,9 @@ def _sample_text(sess, args_text, translation):
7184
"finish",
7285
"zero_states",
7386
]
74-
vs = namedtuple("Params", fields)(*[tf.compat.v1.get_collection(name)[0] for name in fields])
87+
vs = namedtuple("Params", fields)(
88+
*[tf.compat.v1.get_collection(name)[0] for name in fields]
89+
)
7590

7691
text = np.array([translation.get(c, 0) for c in args_text])
7792
sequence = np.eye(len(translation), dtype=np.float32)[text]
@@ -163,14 +178,20 @@ def _join_images(images):
163178

164179
def generate(text, text_color):
165180
cd = download_model_weights()
166-
with open(os.path.join(cd, os.path.join("handwritten_model", "translation.pkl")), "rb") as file:
181+
with open(
182+
os.path.join(cd, os.path.join("handwritten_model", "translation.pkl")), "rb"
183+
) as file:
167184
translation = pickle.load(file)
168185

169186
config = tf.compat.v1.ConfigProto(device_count={"GPU": 0})
170187
tf.compat.v1.reset_default_graph()
171188
with tf.compat.v1.Session(config=config) as sess:
172-
saver = tf.compat.v1.train.import_meta_graph(os.path.join(cd,"handwritten_model/model-29.meta"))
173-
saver.restore(sess,os.path.join(cd,os.path.join("handwritten_model/model-29")))
189+
saver = tf.compat.v1.train.import_meta_graph(
190+
os.path.join(cd, "handwritten_model/model-29.meta")
191+
)
192+
saver.restore(
193+
sess, os.path.join(cd, os.path.join("handwritten_model/model-29"))
194+
)
174195
images = []
175196
colors = [ImageColor.getrgb(c) for c in text_color.split(",")]
176197
c1, c2 = colors[0], colors[-1]
@@ -203,13 +224,11 @@ def generate(text, text_color):
203224

204225
canvas = plt.get_current_fig_manager().canvas
205226
canvas.draw()
206-
227+
207228
s, (width, height) = canvas.print_to_buffer()
208-
image = Image.frombytes(
209-
"RGBA", (width, height), s
210-
)
229+
image = Image.frombytes("RGBA", (width, height), s)
211230
mask = Image.new("RGB", (width, height), (0, 0, 0))
212-
231+
213232
images.append(_crop_white_borders(image))
214233

215234
plt.close()

0 commit comments

Comments
 (0)