- adapt the models into base64 encoded images
authorEric Zavesky <ezavesky@research.att.com>
Tue, 17 Oct 2017 22:11:06 +0000 (17:11 -0500)
committerEric Zavesky <ezavesky@research.att.com>
Tue, 17 Oct 2017 22:11:06 +0000 (17:11 -0500)
face_privacy_filter/transform_detect.py
face_privacy_filter/transform_region.py
testing/app.py
testing/swagger.yaml

index b700787..d4b732e 100644 (file)
@@ -8,6 +8,7 @@ import os
 import pandas as pd
 import numpy as np
 from sklearn.base import BaseEstimator, ClassifierMixin
+import base64
 
 class FaceDetectTransform(BaseEstimator, ClassifierMixin):
     '''
@@ -21,7 +22,7 @@ class FaceDetectTransform(BaseEstimator, ClassifierMixin):
     COL_REGION_IDX = 'region'
     COL_IMAGE_IDX = 'image'
     COL_IMAGE_MIME = 'mime_type'
-    COL_IMAGE_DATA = 'binary_stream'
+    COL_IMAGE_DATA = 'base64_data'
     VAL_REGION_IMAGE_ID = -1
 
     def __init__(self, cascade_path=None, include_image=True):
@@ -37,6 +38,9 @@ class FaceDetectTransform(BaseEstimator, ClassifierMixin):
         # munge stream and mimetype into input sample
         if path_image and os.path.exists(path_image):
             bin_stream = open(path_image, 'rb').read()
+        bin_stream = base64.b64encode(bin_stream)
+        if type(bin_stream) == bytes:
+            bin_stream = bin_stream.decode()
         return pd.DataFrame([['image/jpeg', bin_stream]], columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
 
     @staticmethod
@@ -105,7 +109,11 @@ class FaceDetectTransform(BaseEstimator, ClassifierMixin):
 
         dfReturn = None
         for image_idx in range(len(X)):
-            file_bytes = np.asarray(bytearray(X[FaceDetectTransform.COL_IMAGE_DATA][image_idx]), dtype=np.uint8)
+            image_byte = X[FaceDetectTransform.COL_IMAGE_DATA][image_idx]
+            if type(image_byte)==str:
+                image_byte = image_byte.encode()
+            image_byte = bytearray(base64.b64decode(image_byte))
+            file_bytes = np.asarray(image_byte, dtype=np.uint8)
             img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
             # img = cv2.imread(image_set[1])
             faces = self.detect_faces(img)
@@ -146,17 +154,4 @@ class FaceDetectTransform(BaseEstimator, ClassifierMixin):
         #    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
         return faces
 
-    #############################################
-    ## helper for i/o
-    @staticmethod
-    def read_byte_arrays(bytearray_string):
-        """Method to recover bytes from pandas read/cast function:
-            inputDf = pd.read_csv(config['input'], converters:{FaceDetectTransform.COL_IMAGE_DATA:FaceDetectTransform.read_byte_arrays})
-           https://stackoverflow.com/a/43024993
-        """
-        from ast import literal_eval
-        if type(bytearray_string)==str and bytearray_string.startswith("b'"):
-            return bytearray(literal_eval(bytearray_string))
-        return bytearray_string
-
 # FaceDetectTransform.__module__ = '__main__'
index 533efbd..1ff7f26 100644 (file)
@@ -8,6 +8,7 @@ import os
 import pandas as pd
 import numpy as np
 from sklearn.base import BaseEstimator, ClassifierMixin
+import base64
 
 # NOTE: If this class were built in another model (e.g. another vendor, class, etc), we would need to
 #       *exactly match* the i/o for the upstream (detection) and downstream (this processing)
@@ -27,6 +28,9 @@ class RegionTransform(BaseEstimator, ClassifierMixin):
     @staticmethod
     def generate_out_df(media_type="", bin_stream=b""):
         # munge stream and mimetype into input sample
+        bin_stream = base64.b64encode(bin_stream)
+        if type(bin_stream)==bytes:
+            bin_stream = bin_stream.decode()
         return pd.DataFrame([[media_type, bin_stream]], columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
 
     @staticmethod
@@ -112,7 +116,11 @@ class RegionTransform(BaseEstimator, ClassifierMixin):
             if not len(image_row[FaceDetectTransform.COL_IMAGE_DATA]):  # must have valid image data
                 print("Error: RegionTransform expected image data, but found empty binary string {:}".format(nameG))
                 continue
-            file_bytes = np.asarray(bytearray(FaceDetectTransform.read_byte_arrays(image_row[FaceDetectTransform.COL_IMAGE_DATA][0])), dtype=np.uint8)
+            image_byte = image_row[FaceDetectTransform.COL_IMAGE_DATA][0]
+            if type(image_byte)==str:
+                image_byte = image_byte.encode()
+            image_byte = bytearray(base64.b64decode(image_byte))
+            file_bytes = np.asarray(image_byte, dtype=np.uint8)
             local_image['data'] = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
             local_image['image'] = nameG
             local_image['mime'] = image_row[FaceDetectTransform.COL_IMAGE_MIME]
index 2054c51..9eb3fd8 100755 (executable)
@@ -14,13 +14,16 @@ import requests
 
 from cognita_client.wrap.load import load_model
 from face_privacy_filter.transform_detect import FaceDetectTransform
+import base64
 
 def generate_image_df(path_image="", bin_stream=b""):
     # munge stream and mimetype into input sample
     if path_image and os.path.exists(path_image):
         bin_stream = open(path_image, 'rb').read()
-    return pd.DataFrame([['image/jpeg', bin_stream]],
-                        columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
+    bin_stream = base64.b64encode(bin_stream)
+    if type(bin_stream)==bytes:
+        bin_stream = bin_stream.decode()
+    return pd.DataFrame([['image/jpeg', bin_stream]], columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
 
 def transform(mime_type, image_binary):
     app = current_app
index 548a80a..4cccfcd 100644 (file)
@@ -32,4 +32,5 @@ parameters:
     description: Binary image blob
     in: formData
     type: file
+    format: base64
     required: true