Skip to content

Commit d0fafb6

Browse files
authored
Merge pull request #62 from Azure-Samples/CustomVisionUpdateAMD64
Custom Vision Update For AMD64
2 parents 37abcd2 + 5272b8f commit d0fafb6

File tree

4 files changed

+305
-3
lines changed

4 files changed

+305
-3
lines changed

modules/ImageClassifierService/amd64.Dockerfile

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
1-
FROM python:3.7-slim
1+
FROM python:3.7-slim
22

33
RUN pip install -U pip
44
RUN pip install numpy==1.17.3 tensorflow==2.0.0 flask pillow
55

6-
COPY app /app
6+
RUN mkdir app
7+
COPY ./app/app-amd64.py ./app/app.py
8+
COPY ./app/predict-amd64.py ./app/predict.py
9+
COPY ./app/labels.txt ./app/model.pb ./app/
710

811
# Expose the port
912
EXPOSE 80
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
2+
import json
3+
import os
4+
import io
5+
6+
# Imports for the REST API
7+
from flask import Flask, request, jsonify
8+
9+
# Imports for image procesing
10+
from PIL import Image
11+
12+
# Imports for prediction
13+
from predict import initialize, predict_image, predict_url
14+
15+
app = Flask(__name__)
16+
17+
# 4MB Max image size limit
18+
app.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024
19+
20+
# Default route just shows simple text
21+
@app.route('/')
22+
def index():
23+
return 'CustomVision.ai model host harness'
24+
25+
# Like the CustomVision.ai Prediction service /image route handles either
26+
# - octet-stream image file
27+
# - a multipart/form-data with files in the imageData parameter
28+
@app.route('/image', methods=['POST'])
29+
@app.route('/<project>/image', methods=['POST'])
30+
@app.route('/<project>/image/nostore', methods=['POST'])
31+
@app.route('/<project>/classify/iterations/<publishedName>/image', methods=['POST'])
32+
@app.route('/<project>/classify/iterations/<publishedName>/image/nostore', methods=['POST'])
33+
@app.route('/<project>/detect/iterations/<publishedName>/image', methods=['POST'])
34+
@app.route('/<project>/detect/iterations/<publishedName>/image/nostore', methods=['POST'])
35+
def predict_image_handler(project=None, publishedName=None):
36+
try:
37+
imageData = None
38+
if ('imageData' in request.files):
39+
imageData = request.files['imageData']
40+
elif ('imageData' in request.form):
41+
imageData = request.form['imageData']
42+
else:
43+
imageData = io.BytesIO(request.get_data())
44+
45+
img = Image.open(imageData)
46+
results = predict_image(img)
47+
return jsonify(results)
48+
except Exception as e:
49+
print('EXCEPTION:', str(e))
50+
return 'Error processing image', 500
51+
52+
53+
# Like the CustomVision.ai Prediction service /url route handles url's
54+
# in the body of hte request of the form:
55+
# { 'Url': '<http url>'}
56+
@app.route('/url', methods=['POST'])
57+
@app.route('/<project>/url', methods=['POST'])
58+
@app.route('/<project>/url/nostore', methods=['POST'])
59+
@app.route('/<project>/classify/iterations/<publishedName>/url', methods=['POST'])
60+
@app.route('/<project>/classify/iterations/<publishedName>/url/nostore', methods=['POST'])
61+
@app.route('/<project>/detect/iterations/<publishedName>/url', methods=['POST'])
62+
@app.route('/<project>/detect/iterations/<publishedName>/url/nostore', methods=['POST'])
63+
def predict_url_handler(project=None, publishedName=None):
64+
try:
65+
image_url = json.loads(request.get_data().decode('utf-8'))['url']
66+
results = predict_url(image_url)
67+
return jsonify(results)
68+
except Exception as e:
69+
print('EXCEPTION:', str(e))
70+
return 'Error processing image'
71+
72+
if __name__ == '__main__':
73+
# Load and intialize the model
74+
initialize()
75+
76+
# Run the server
77+
app.run(host='0.0.0.0', port=80)
78+
Lines changed: 221 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,221 @@
1+
2+
from urllib.request import urlopen
3+
from datetime import datetime
4+
5+
import tensorflow as tf
6+
7+
from PIL import Image
8+
import numpy as np
9+
import sys
10+
11+
filename = 'model.pb'
12+
labels_filename = 'labels.txt'
13+
14+
network_input_size = 0
15+
16+
output_layer = 'loss:0'
17+
input_node = 'Placeholder:0'
18+
19+
graph_def = tf.compat.v1.GraphDef()
20+
labels = []
21+
22+
def initialize():
23+
print('Loading model...',end=''),
24+
with open(filename, 'rb') as f:
25+
graph_def.ParseFromString(f.read())
26+
tf.import_graph_def(graph_def, name='')
27+
28+
# Retrieving 'network_input_size' from shape of 'input_node'
29+
with tf.compat.v1.Session() as sess:
30+
input_tensor_shape = sess.graph.get_tensor_by_name(input_node).shape.as_list()
31+
32+
assert len(input_tensor_shape) == 4
33+
assert input_tensor_shape[1] == input_tensor_shape[2]
34+
35+
global network_input_size
36+
network_input_size = input_tensor_shape[1]
37+
38+
print('Success!')
39+
print('Loading labels...', end='')
40+
with open(labels_filename, 'rt') as lf:
41+
global labels
42+
labels = [l.strip() for l in lf.readlines()]
43+
print(len(labels), 'found. Success!')
44+
45+
def log_msg(msg):
46+
print("{}: {}".format(datetime.now(),msg))
47+
48+
def extract_bilinear_pixel(img, x, y, ratio, xOrigin, yOrigin):
49+
xDelta = (x + 0.5) * ratio - 0.5
50+
x0 = int(xDelta)
51+
xDelta -= x0
52+
x0 += xOrigin
53+
if x0 < 0:
54+
x0 = 0;
55+
x1 = 0;
56+
xDelta = 0.0;
57+
elif x0 >= img.shape[1]-1:
58+
x0 = img.shape[1]-1;
59+
x1 = img.shape[1]-1;
60+
xDelta = 0.0;
61+
else:
62+
x1 = x0 + 1;
63+
64+
yDelta = (y + 0.5) * ratio - 0.5
65+
y0 = int(yDelta)
66+
yDelta -= y0
67+
y0 += yOrigin
68+
if y0 < 0:
69+
y0 = 0;
70+
y1 = 0;
71+
yDelta = 0.0;
72+
elif y0 >= img.shape[0]-1:
73+
y0 = img.shape[0]-1;
74+
y1 = img.shape[0]-1;
75+
yDelta = 0.0;
76+
else:
77+
y1 = y0 + 1;
78+
79+
#Get pixels in four corners
80+
bl = img[y0, x0]
81+
br = img[y0, x1]
82+
tl = img[y1, x0]
83+
tr = img[y1, x1]
84+
#Calculate interpolation
85+
b = xDelta * br + (1. - xDelta) * bl
86+
t = xDelta * tr + (1. - xDelta) * tl
87+
pixel = yDelta * t + (1. - yDelta) * b
88+
return pixel
89+
90+
def extract_and_resize(img, targetSize):
91+
determinant = img.shape[1] * targetSize[0] - img.shape[0] * targetSize[1]
92+
if determinant < 0:
93+
ratio = float(img.shape[1]) / float(targetSize[1])
94+
xOrigin = 0
95+
yOrigin = int(0.5 * (img.shape[0] - ratio * targetSize[0]))
96+
elif determinant > 0:
97+
ratio = float(img.shape[0]) / float(targetSize[0])
98+
xOrigin = int(0.5 * (img.shape[1] - ratio * targetSize[1]))
99+
yOrigin = 0
100+
else:
101+
ratio = float(img.shape[0]) / float(targetSize[0])
102+
xOrigin = 0
103+
yOrigin = 0
104+
resize_image = np.empty((targetSize[0], targetSize[1], img.shape[2]), dtype=np.float32)
105+
for y in range(targetSize[0]):
106+
for x in range(targetSize[1]):
107+
resize_image[y, x] = extract_bilinear_pixel(img, x, y, ratio, xOrigin, yOrigin)
108+
return resize_image
109+
110+
def extract_and_resize_to_256_square(image):
111+
h, w = image.shape[:2]
112+
log_msg("crop_center: " + str(w) + "x" + str(h) +" and resize to " + str(256) + "x" + str(256))
113+
return extract_and_resize(image, (256, 256))
114+
115+
def crop_center(img,cropx,cropy):
116+
h, w = img.shape[:2]
117+
startx = max(0, w//2-(cropx//2))
118+
starty = max(0, h//2-(cropy//2))
119+
log_msg("crop_center: " + str(w) + "x" + str(h) +" to " + str(cropx) + "x" + str(cropy))
120+
return img[starty:starty+cropy, startx:startx+cropx]
121+
122+
def resize_down_to_1600_max_dim(image):
123+
w,h = image.size
124+
if h < 1600 and w < 1600:
125+
return image
126+
127+
new_size = (1600 * w // h, 1600) if (h > w) else (1600, 1600 * h // w)
128+
log_msg("resize: " + str(w) + "x" + str(h) + " to " + str(new_size[0]) + "x" + str(new_size[1]))
129+
if max(new_size) / max(image.size) >= 0.5:
130+
method = Image.BILINEAR
131+
else:
132+
method = Image.BICUBIC
133+
return image.resize(new_size, method)
134+
135+
def predict_url(imageUrl):
136+
log_msg("Predicting from url: " +imageUrl)
137+
with urlopen(imageUrl) as testImage:
138+
image = Image.open(testImage)
139+
return predict_image(image)
140+
141+
def convert_to_nparray(image):
142+
# RGB -> BGR
143+
log_msg("Convert to numpy array")
144+
image = np.array(image)
145+
return image[:, :, (2,1,0)]
146+
147+
def update_orientation(image):
148+
exif_orientation_tag = 0x0112
149+
if hasattr(image, '_getexif'):
150+
exif = image._getexif()
151+
if exif != None and exif_orientation_tag in exif:
152+
orientation = exif.get(exif_orientation_tag, 1)
153+
log_msg('Image has EXIF Orientation: ' + str(orientation))
154+
# orientation is 1 based, shift to zero based and flip/transpose based on 0-based values
155+
orientation -= 1
156+
if orientation >= 4:
157+
image = image.transpose(Image.TRANSPOSE)
158+
if orientation == 2 or orientation == 3 or orientation == 6 or orientation == 7:
159+
image = image.transpose(Image.FLIP_TOP_BOTTOM)
160+
if orientation == 1 or orientation == 2 or orientation == 5 or orientation == 6:
161+
image = image.transpose(Image.FLIP_LEFT_RIGHT)
162+
return image
163+
164+
def predict_image(image):
165+
166+
log_msg('Predicting image')
167+
try:
168+
if image.mode != "RGB":
169+
log_msg("Converting to RGB")
170+
image = image.convert("RGB")
171+
172+
w,h = image.size
173+
log_msg("Image size: " + str(w) + "x" + str(h))
174+
175+
# Update orientation based on EXIF tags
176+
image = update_orientation(image)
177+
178+
# If the image has either w or h greater than 1600 we resize it down respecting
179+
# aspect ratio such that the largest dimention is 1600
180+
image = resize_down_to_1600_max_dim(image)
181+
182+
# Convert image to numpy array
183+
image = convert_to_nparray(image)
184+
185+
# Crop the center square and resize that square down to 256x256
186+
resized_image = extract_and_resize_to_256_square(image)
187+
188+
# Crop the center for the specified network_input_Size
189+
cropped_image = crop_center(resized_image, network_input_size, network_input_size)
190+
191+
tf.compat.v1.reset_default_graph()
192+
tf.import_graph_def(graph_def, name='')
193+
194+
with tf.compat.v1.Session() as sess:
195+
prob_tensor = sess.graph.get_tensor_by_name(output_layer)
196+
predictions, = sess.run(prob_tensor, {input_node: [cropped_image] })
197+
198+
result = []
199+
for p, label in zip(predictions, labels):
200+
truncated_probablity = np.float64(round(p,8))
201+
if truncated_probablity > 1e-8:
202+
result.append({
203+
'tagName': label,
204+
'probability': truncated_probablity,
205+
'tagId': '',
206+
'boundingBox': None })
207+
208+
response = {
209+
'id': '',
210+
'project': '',
211+
'iteration': '',
212+
'created': datetime.utcnow().isoformat(),
213+
'predictions': result
214+
}
215+
216+
log_msg("Results: " + str(response))
217+
return response
218+
219+
except Exception as e:
220+
log_msg(str(e))
221+
return 'Error: Could not preprocess image for prediction. ' + str(e)

modules/ImageClassifierService/module.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
"image": {
55
"repository": "$CONTAINER_REGISTRY_ADDRESS/imageclassifierservice",
66
"tag": {
7-
"version": "0.2.6",
7+
"version": "0.2.16",
88
"platforms": {
99
"amd64": "./amd64.Dockerfile",
1010
"arm32v7": "./arm32v7.Dockerfile"

0 commit comments

Comments
 (0)