- first pass at app endpoint simulation
authorEric Zavesky <ezavesky@research.att.com>
Tue, 17 Oct 2017 14:25:28 +0000 (09:25 -0500)
committerEric Zavesky <ezavesky@research.att.com>
Tue, 17 Oct 2017 14:25:28 +0000 (09:25 -0500)
README.md
face_privacy_filter/_version.py
face_privacy_filter/transform_detect.py
face_privacy_filter/transform_region.py
setup.py
testing/app.py
testing/swagger.yaml

index f2a16c0..f7f1e1a 100644 (file)
--- a/README.md
+++ b/README.md
@@ -51,7 +51,7 @@ composed together for operation.
 ```
 * Dump the `pixelate` model to disk.
 ```
-./bin/run_local.sh -d model_pix -f pixelate -i image.jpg -p predict.csv
+./bin/run_local.sh -d model_pix -f pixelate
 ```
 * Evaluate the `detect` model from disk and a previously produced detect object
 ```
index f6fe3c9..67d38d0 100644 (file)
@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
-__version__ = "0.1.0"
+__version__ = "0.1.1"
 MODEL_NAME = 'face_privacy_filter'
index 656db86..b700787 100644 (file)
@@ -158,3 +158,5 @@ class FaceDetectTransform(BaseEstimator, ClassifierMixin):
         if type(bytearray_string)==str and bytearray_string.startswith("b'"):
             return bytearray(literal_eval(bytearray_string))
         return bytearray_string
+
+# FaceDetectTransform.__module__ = '__main__'
index 6f92bf6..533efbd 100644 (file)
@@ -112,7 +112,7 @@ class RegionTransform(BaseEstimator, ClassifierMixin):
             if not len(image_row[FaceDetectTransform.COL_IMAGE_DATA]):  # must have valid image data
                 print("Error: RegionTransform expected image data, but found empty binary string {:}".format(nameG))
                 continue
-            file_bytes = np.asarray(image_row[FaceDetectTransform.COL_IMAGE_DATA][0], dtype=np.uint8)
+            file_bytes = np.asarray(bytearray(FaceDetectTransform.read_byte_arrays(image_row[FaceDetectTransform.COL_IMAGE_DATA][0])), dtype=np.uint8)
             local_image['data'] = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
             local_image['image'] = nameG
             local_image['mime'] = image_row[FaceDetectTransform.COL_IMAGE_MIME]
@@ -145,3 +145,4 @@ class RegionTransform(BaseEstimator, ClassifierMixin):
                 img[x:max_x, y:max_y] = fill_color
         return img
 
+# RegionTransform.__module__ = '__main__'
index 340a0ab..c157c3b 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@ setup(
     install_requires=['cognita_client',
                       'numpy',
                       'sklearn',
-                      'opencv-python'
+                      'opencv-python',
                       globals_dict['MODEL_NAME']],
     tests_require=['pytest',
                    'pexpect'],
index 6c59982..2054c51 100755 (executable)
@@ -5,6 +5,7 @@ import logging
 import argparse
 import json
 import time
+import os
 
 from flask import Flask, request, current_app, make_response
 
@@ -14,49 +15,45 @@ import requests
 from cognita_client.wrap.load import load_model
 from face_privacy_filter.transform_detect import FaceDetectTransform
 
+def generate_image_df(path_image="", bin_stream=b""):
+    # munge stream and mimetype into input sample
+    if path_image and os.path.exists(path_image):
+        bin_stream = open(path_image, 'rb').read()
+    return pd.DataFrame([['image/jpeg', bin_stream]],
+                        columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
 
-#def invoke_method(model_method):
-def transform(mime_type, image_binary, rich_output=False):
+def transform(mime_type, image_binary):
     app = current_app
     time_start = time.clock()
     image_read = image_binary.stream.read()
-    X = FaceDetectTransform.generate_in_df(bin_stream=image_read)
+    X = generate_image_df(bin_stream=image_read)
+    print(X)
 
-    pred = app.model.transform.from_native(X).as_native()
+    if app.model_detect is not None:
+        dfPred = app.model_detect.transform.from_native(X).as_native()
+    if app.model_proc is not None:
+        dfRegions = dfPred
+        dfPred = app.model_proc.transform.from_native(dfRegions).as_native()
     time_stop = time.clock()
 
-    if rich_output:
-        # NOTE: This response is specially formatted for the webdemo included with this package.
-        #       Alternate forms of a response are viable for any other desired application.
-        retObj = {
-            'regions': pred.to_dict(orient="records"),
-            'clientfile': 'undefined',
-            'info': 'Processed',
-            'processingtime': (time_stop - time_start),
-            'serverfilename': '/dev/null',
-            'status': 'Succeeded'
-        }
-
-        # dump to pretty JSON
-        retStr = json.dumps({'results':retObj}, indent=4)
-    else:
-        retStr = json.dumps(pred.to_dict(orient='records'), indent=4)
+    retStr = json.dumps(dfPred.to_dict(orient='records'), indent=4)
 
     # formulate response
     resp = make_response((retStr, 200, { } ))
     # allow 'localhost' from 'file' or other;
     # NOTE: DO NOT USE IN PRODUCTION!!!
     resp.headers['Access-Control-Allow-Origin'] = '*'
-    print(type(pred))
-    print(retStr[:min(200,len(retStr))])
+    print(type(dfPred))
+    print(retStr[:min(200, len(retStr))])
     #print(pred)
     return resp
 
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
-    parser.add_argument("--port", type=int, default=8884, help='port to launch the simple web server')
-    parser.add_argument("--modeldir", type=str, default='../model', help='model directory to load dumped artifact')
+    parser.add_argument('-p', "--port", type=int, default=8884, help='port to launch the simple web server')
+    parser.add_argument('-d', "--modeldir_detect", type=str, default='../model_detect', help='model directory for detection')
+    parser.add_argument('-a', "--modeldir_analyze", type=str, default='../model_pix', help='model directory for detection')
     pargs = parser.parse_args()
 
     print("Configuring local application... {:}".format(__name__))
@@ -66,14 +63,22 @@ if __name__ == '__main__':
     # example usage:
     #     curl -F image_binary=@test.jpg -F mime_type="image/jpeg" "http://localhost:8885/transform"
 
-    print("Loading model... {:}".format(pargs.modeldir))
-    app.app.model = load_model(pargs.modeldir)  # refers to ./model dir in pwd. generated by helper script also in this dir
-    # # dynamically add handlers depending on model capabilities
-    # for method_name, method in model.methods.items():
-    #     url = "/{}".format(method_name)
-    #     print("Adding route {}".format(url))
-    #     handler = partial(invoke_method, model_method=method)
-    #     app.add_url_rule(url, method_name, handler, methods=['POST'])
+    app.app.model_detect = None
+    if pargs.modeldir_detect:
+        if not os.path.exists(pargs.modeldir_detect):
+            print("Failed loading of detect model '{:}' even though it was specified...".format(pargs.modeldir_detect))
+        else:
+            print("Loading detect model... {:}".format(pargs.modeldir_detect))
+            app.app.model_detect = load_model(pargs.modeldir_detect)
+
+    app.app.model_proc = None
+    if pargs.modeldir_analyze:
+        if not os.path.exists(pargs.modeldir_analyze):
+            print("Failed loading of processing model '{:}' even though it was specified...".format(
+                pargs.modeldir_analyze))
+        else:
+            print("Loading processing model... {:}".format(pargs.modeldir_analyze))
+            app.app.model_proc = load_model(pargs.modeldir_analyze)
 
     # run our standalone gevent server
     app.run(port=pargs.port) #, server='gevent')
index be01601..548a80a 100644 (file)
@@ -14,7 +14,6 @@ paths:
       parameters:
         - $ref: '#/parameters/mime_type'
         - $ref: '#/parameters/image_binary'
-        - $ref: '#/parameters/rich_output'
       responses:
         200:
           description: Image processed
@@ -34,11 +33,3 @@ parameters:
     in: formData
     type: file
     required: true
-  rich_output:
-    name: rich_output
-    description: Rich or regular output
-    in: formData
-    type: boolean
-    required: false
-    default: true
-