import argparse
import json
import time
+import os
from flask import Flask, request, current_app, make_response
from cognita_client.wrap.load import load_model
from face_privacy_filter.transform_detect import FaceDetectTransform
+def generate_image_df(path_image="", bin_stream=b""):
+ # munge stream and mimetype into input sample
+ if path_image and os.path.exists(path_image):
+ bin_stream = open(path_image, 'rb').read()
+ return pd.DataFrame([['image/jpeg', bin_stream]],
+ columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
-#def invoke_method(model_method):
-def transform(mime_type, image_binary, rich_output=False):
+def transform(mime_type, image_binary):
app = current_app
time_start = time.clock()
image_read = image_binary.stream.read()
- X = FaceDetectTransform.generate_in_df(bin_stream=image_read)
+ X = generate_image_df(bin_stream=image_read)
+ print(X)
- pred = app.model.transform.from_native(X).as_native()
+ if app.model_detect is not None:
+ dfPred = app.model_detect.transform.from_native(X).as_native()
+ if app.model_proc is not None:
+ dfRegions = dfPred
+ dfPred = app.model_proc.transform.from_native(dfRegions).as_native()
time_stop = time.clock()
- if rich_output:
- # NOTE: This response is specially formatted for the webdemo included with this package.
- # Alternate forms of a response are viable for any other desired application.
- retObj = {
- 'regions': pred.to_dict(orient="records"),
- 'clientfile': 'undefined',
- 'info': 'Processed',
- 'processingtime': (time_stop - time_start),
- 'serverfilename': '/dev/null',
- 'status': 'Succeeded'
- }
-
- # dump to pretty JSON
- retStr = json.dumps({'results':retObj}, indent=4)
- else:
- retStr = json.dumps(pred.to_dict(orient='records'), indent=4)
+ retStr = json.dumps(dfPred.to_dict(orient='records'), indent=4)
# formulate response
resp = make_response((retStr, 200, { } ))
# allow 'localhost' from 'file' or other;
# NOTE: DO NOT USE IN PRODUCTION!!!
resp.headers['Access-Control-Allow-Origin'] = '*'
- print(type(pred))
- print(retStr[:min(200,len(retStr))])
+ print(type(dfPred))
+ print(retStr[:min(200, len(retStr))])
#print(pred)
return resp
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument("--port", type=int, default=8884, help='port to launch the simple web server')
- parser.add_argument("--modeldir", type=str, default='../model', help='model directory to load dumped artifact')
+ parser.add_argument('-p', "--port", type=int, default=8884, help='port to launch the simple web server')
+ parser.add_argument('-d', "--modeldir_detect", type=str, default='../model_detect', help='model directory for detection')
+ parser.add_argument('-a', "--modeldir_analyze", type=str, default='../model_pix', help='model directory for detection')
pargs = parser.parse_args()
print("Configuring local application... {:}".format(__name__))
# example usage:
# curl -F image_binary=@test.jpg -F mime_type="image/jpeg" "http://localhost:8885/transform"
- print("Loading model... {:}".format(pargs.modeldir))
- app.app.model = load_model(pargs.modeldir) # refers to ./model dir in pwd. generated by helper script also in this dir
- # # dynamically add handlers depending on model capabilities
- # for method_name, method in model.methods.items():
- # url = "/{}".format(method_name)
- # print("Adding route {}".format(url))
- # handler = partial(invoke_method, model_method=method)
- # app.add_url_rule(url, method_name, handler, methods=['POST'])
+ app.app.model_detect = None
+ if pargs.modeldir_detect:
+ if not os.path.exists(pargs.modeldir_detect):
+ print("Failed loading of detect model '{:}' even though it was specified...".format(pargs.modeldir_detect))
+ else:
+ print("Loading detect model... {:}".format(pargs.modeldir_detect))
+ app.app.model_detect = load_model(pargs.modeldir_detect)
+
+ app.app.model_proc = None
+ if pargs.modeldir_analyze:
+ if not os.path.exists(pargs.modeldir_analyze):
+ print("Failed loading of processing model '{:}' even though it was specified...".format(
+ pargs.modeldir_analyze))
+ else:
+ print("Loading processing model... {:}".format(pargs.modeldir_analyze))
+ app.app.model_proc = load_model(pargs.modeldir_analyze)
# run our standalone gevent server
app.run(port=pargs.port) #, server='gevent')