def model_create_pipeline(transformer):
- from acumos.modeling import Model
from acumos.session import Requirements
+ from acumos.modeling import Model, List, create_namedtuple
import sklearn
import cv2
from os import path
- type_in = transformer._acumos_type_in
- type_out = transformer._acumos_type_out
+ # derive the input type from the transformer
+ type_list, type_name = transformer._type_in # it looked like this {'test': int, 'tag': str}
+ input_type = [(k, List[type_list[k]]) for k in type_list]
+ type_in = create_namedtuple(type_name, input_type)
- def predict_class(df: type_in) -> type_out:
+ # derive the output type from the transformer
+ type_list, type_name = transformer._type_out
+ output_type = [(k, List[type_list[k]]) for k in type_list]
+ type_out = create_namedtuple(type_name, output_type)
+
+ def predict_class(value: type_in) -> type_out:
'''Returns an array of float predictions'''
- return transformer.predict(df)
+ df = pd.DataFrame(np.column_stack(value), columns=value._fields)
+ tags_df = transformer.predict(df)
+ tags_list = type_out(*(col for col in tags_df.values.T)) # flatten to tag set
+ return tags_list
# compute path of this package to add it as a dependency
package_path = path.dirname(path.realpath(__file__))
- return Model(classify=predict_class), Requirements(packages=[package_path], reqs=[pd, np, sklearn],
- req_map={cv2: 'opencv-python'})
+ return Model(transform=predict_class), Requirements(packages=[package_path], reqs=[pd, np, sklearn],
+ req_map={cv2: 'opencv-python'})
def main(config={}):
inputDf = FaceDetectTransform.generate_in_df(config['input'])
else:
inputDf = pd.read_csv(config['input'], converters={FaceDetectTransform.COL_IMAGE_DATA: FaceDetectTransform.read_byte_arrays})
- dfPred = model.transform.from_native(inputDf).as_wrapped()
- dfPred = dfPred[0]
+
+ type_in = model.transform._input_type
+ transform_in = type_in(*tuple(col for col in inputDf.values.T))
+ transform_out = model.transform.from_wrapped(transform_in).as_wrapped()
+ dfPred = pd.DataFrame(np.column_stack(transform_out), columns=transform_out._fields)
+
+ if not config['csv_input']:
+ dfPred = FaceDetectTransform.suppress_image(dfPred)
+ print("ALMOST DONE")
+ print(dfPred)
if config['predict_path']:
print("Writing prediction to file '{:}'...".format(config['predict_path']))
dfPred.to_csv(config['predict_path'], sep=",", index=False)
else:
FaceDetectTransform.generate_out_image(dfPred, config['predict_path'])
- if not config['csv_input']:
- dfPred = FaceDetectTransform.suppress_image(dfPred)
if dfPred is not None:
print("Predictions:\n{:}".format(dfPred))
@staticmethod
def suppress_image(df):
- keep_col = [FaceDetectTransform.COL_FACE_X, FaceDetectTransform.COL_FACE_Y,
- FaceDetectTransform.COL_FACE_W, FaceDetectTransform.COL_FACE_H,
- FaceDetectTransform.COL_FACE_W, FaceDetectTransform.COL_FACE_H,
- FaceDetectTransform.COL_REGION_IDX, FaceDetectTransform.COL_IMAGE_IDX]
- blank_cols = [col for col in df.columns if col not in keep_col]
+ blank_cols = [FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA]
# set columns that aren't in our known column list to empty strings; search where face index==-1 (no face)
- df.loc[df[FaceDetectTransform.COL_REGION_IDX] == FaceDetectTransform.VAL_REGION_IMAGE_ID, blank_cols] = ""
+ df[blank_cols] = None
return df
@property
- def _acumos_type_in(self):
+ def _type_in(self):
"""Custom input type for this processing transformer"""
- from acumos.modeling import List, create_namedtuple
- # base input for detect is image itself
- ImageRow = create_namedtuple('ImageRow', [(FaceDetectTransform.COL_IMAGE_MIME, str),
- (FaceDetectTransform.COL_IMAGE_DATA, bytes)])
- # represents a collection of flattened image arrays
- return List[ImageRow]
+ return {FaceDetectTransform.COL_IMAGE_MIME: str, FaceDetectTransform.COL_IMAGE_DATA: bytes}, "FaceImage"
@property
- def _acumos_type_out(self):
+ def _type_out(self):
"""Custom input type for this processing transformer"""
- from acumos.modeling import List, create_namedtuple
output_dict = FaceDetectTransform.generate_out_dict()
- tuple_types = [(k, type(output_dict[k])) for k in output_dict]
- # base output for detect is several parts
- DetectionRow = create_namedtuple('DetectionRow', tuple_types)
- # represents a collection of flattened image arrays
- return List[DetectionRow]
+ return {k: type(output_dict[k]) for k in output_dict}, "DetectionFrames"
def score(self, X, y=None):
return 0
image_byte = X[FaceDetectTransform.COL_IMAGE_DATA][image_idx]
if type(image_byte) == str:
image_byte = image_byte.encode()
- image_byte = bytearray(base64.b64decode(image_byte))
+ image_byte = base64.b64decode(image_byte)
+ image_byte = bytearray(image_byte)
file_bytes = np.asarray(image_byte, dtype=np.uint8)
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# img = cv2.imread(image_set[1])
face_rect[2], face_rect[3], image=image_idx)]),
ignore_index=True)
if dfReturn is None: # create an NP container for all image samples + features
- dfReturn = df.reindex_axis(self.output_names_, axis=1)
+ dfReturn = df # df.reindex_axis(self.output_names_, axis=1)
else:
dfReturn = dfReturn.append(df, ignore_index=True)
# print("IMAGE {:} found {:} total rows".format(image_idx, len(df)))
return FaceDetectTransform.generate_out_dict(idx=idx, x=x, y=y, h=h, w=w, image=image, bin_stream=bin_stream, media=media)
@property
- def _acumos_type_in(self):
+ def _type_in(self):
"""Custom input type for this processing transformer"""
- from acumos.modeling import List, create_namedtuple
- # base input for detect is image itself
input_dict = RegionTransform.generate_in_dict()
- tuple_types = [(k, type(input_dict[k])) for k in input_dict]
- # base output for detect is several parts
- DetectionRow = create_namedtuple('DetectionRow', tuple_types)
- # represents a collection of flattened detect arrays
- return List[DetectionRow]
+ return {k: type(input_dict[k]) for k in input_dict}, "DetectionFrames"
@property
- def _acumos_type_out(self):
+ def _type_out(self):
"""Custom input type for this processing transformer"""
- from acumos.modeling import List, create_namedtuple
- # base input for detect is image itself
- ImageRow = create_namedtuple('ImageRow', [(FaceDetectTransform.COL_IMAGE_MIME, str),
- (FaceDetectTransform.COL_IMAGE_DATA, bytes)])
- # represents a collection of flattened image arrays
- return List[ImageRow]
+ return {FaceDetectTransform.COL_IMAGE_MIME: str, FaceDetectTransform.COL_IMAGE_DATA: bytes}, "TransformedImage"
def score(self, X, y=None):
return 0
from flask import current_app, make_response
import pandas as pd
+import numpy as np
from acumos.wrapped import load_model
-from face_privacy_filter.transform_detect import FaceDetectTransform
import base64
# munge stream and mimetype into input sample
if path_image and os.path.exists(path_image):
bin_stream = open(path_image, 'rb').read()
- bin_stream = base64.b64encode(bin_stream)
- if type(bin_stream) == bytes:
- bin_stream = bin_stream.decode()
- return pd.DataFrame([['image/jpeg', bin_stream]], columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
+ # bin_stream = base64.b64encode(bin_stream)
+ # if type(bin_stream) == bytes:
+ # bin_stream = bin_stream.decode()
+ return pd.DataFrame([['image/jpeg', bin_stream]], columns=["mime_type", "image_binary"])
-def transform(mime_type, base64_data):
+def transform(mime_type, image_binary):
app = current_app
time_start = time.clock()
- image_read = base64_data.stream.read()
+ image_read = image_binary.stream.read()
X = generate_image_df(bin_stream=image_read)
- print(X)
- if app.model_detect is not None:
- pred_out = app.model_detect.transform.from_native(X)
- if app.model_proc is not None:
- pred_prior = pred_out
- #pred_out = app.model_proc.transform.from_msg(pred_prior.as_msg())
- pred_out = app.model_proc.transform.from_native(pred_prior.as_native())
- time_stop = time.clock()
+ pred_out = None
+ if app.model_detect is not None: # first translate to input type
+ type_in = app.model_detect.transform._input_type
+ detect_in = type_in(*tuple(col for col in X.values.T))
+ pred_out = app.model_detect.transform.from_wrapped(detect_in)
+ if app.model_proc is not None and pred_out is not None: # then transform to output type
+ pred_out = app.model_proc.transform.from_msg(pred_out.as_msg())
+ time_stop = time.clock()-time_start
- retStr = json.dumps(pred_out.as_native().to_dict(orient='records'), indent=4)
+ pred = None
+ if pred_out is not None:
+ pred = pd.DataFrame(np.column_stack(pred_out), columns=pred_out._fields)
+ retStr = json.dumps(pred.to_dict(orient='records'), indent=4)
# formulate response
- resp = make_response((retStr, 200, { } ))
+ resp = make_response((retStr, 200, {}))
# allow 'localhost' from 'file' or other;
# NOTE: DO NOT USE IN PRODUCTION!!!
resp.headers['Access-Control-Allow-Origin'] = '*'
print(retStr[:min(200, len(retStr))])
- #print(pred)
+ # print(pred)
return resp
swagger: '2.0'
info:
title: Face Privacy Filter Example
- version: "0.1"
+ version: "0.2"
consumes:
- application/json
produces:
summary: Post an image for processing
parameters:
- $ref: '#/parameters/mime_type'
- - $ref: '#/parameters/base64_data'
+ - $ref: '#/parameters/image_binary'
responses:
200:
description: Image processed
required: true
default: 'image/jpeg'
# pattern: "^[a-zA-Z0-9-]+$"
- base64_data:
- name: base64_data
+ image_binary:
+ name: image_binary
description: Binary image blob
in: formData
type: file