-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest.py
More file actions
99 lines (74 loc) · 3.04 KB
/
test.py
File metadata and controls
99 lines (74 loc) · 3.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import numpy as np
import cv2
import tensorflow as tf
cap = cv2.VideoCapture(0)
import tensorflow_datasets as tfds
from tensorflow_examples.models.pix2pix import pix2pix
import os
import time
import matplotlib.pyplot as plt
from IPython.display import clear_output
AUTOTUNE = tf.data.AUTOTUNE
BUFFER_SIZE = 1000
BATCH_SIZE = 1
IMG_WIDTH = 256
IMG_HEIGHT = 256
def normalize(image):
image = tf.cast(image, tf.float32)
image = (image / 127.5) - 1
return image
OUTPUT_CHANNELS = 3
generator_g = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')
generator_f = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')
discriminator_x = pix2pix.discriminator(norm_type='instancenorm', target=False)
discriminator_y = pix2pix.discriminator(norm_type='instancenorm', target=False)
LAMBDA = 10
loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)
generator_g_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
generator_f_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_x_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_y_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(generator_g=generator_g,
generator_f=generator_f,
discriminator_x=discriminator_x,
discriminator_y=discriminator_y,
generator_g_optimizer=generator_g_optimizer,
generator_f_optimizer=generator_f_optimizer,
discriminator_x_optimizer=discriminator_x_optimizer,
discriminator_y_optimizer=discriminator_y_optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
def generate_images(model, test_input):
prediction = model(test_input)
plt.figure(figsize=(12, 12))
# display_list = [test_input[0], prediction[0]]
# title = ['Input Image', 'Predicted Image']
# for i in range(2):
# plt.subplot(1, 2, i+1)
# plt.title(title[i])
# # getting the pixel values between [0, 1] to plot it.
# plt.imshow(display_list[i] * 0.5 + 0.5)
# plt.axis('off')
# plt.show()
return prediction[0]*0.5+0.5
while(True):
# Capture frame-by-frame
dim = (256,256)
ret, frame = cap.read()
resized = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
# Our operations on the frame come here
gen = generate_images(generator_g, normalize(cv2.cvtColor(resized, cv2.COLOR_BGR2RGB).reshape(1, 256, 256, 3)))
gen = gen.numpy()
gen = gen.reshape(256,256,3)
gen = cv2.cvtColor(gen, cv2.COLOR_RGB2BGR)
# Display the resulting frame
cv2.imshow('frame',gen)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()