i/o for single image, js update (CORS+generalize) 15/2015/4
authorEric Z <ezavesky@research.att.com>
Mon, 4 Jun 2018 05:34:53 +0000 (00:34 -0500)
committerEric Z <ezavesky@research.att.com>
Mon, 4 Jun 2018 05:34:53 +0000 (00:34 -0500)
- modify docs for environment variable definition/examples
- allow image predict independent of model dump
- modify i/o for single image instead of array of images
- fork JS code into generic (demo-framework.js) and specific (image-classes.js)
- update generic JS+HTML for cross-domain functionality (e.g. http://htmlpreview.github.io/)
- update generic JS to correctly get/set url parameter

Issue: ACUMOS-942

Signed-off-by: Eric Z <ezavesky@research.att.com>
Change-Id: If2f8870301269d5c5735787d3e76f3e23cd82d1

15 files changed:
.gitignore
docs/face-privacy-filter.md
docs/release-notes.md
docs/tutorials/lesson3.md
face_privacy_filter/_version.py
face_privacy_filter/filter_image.py
face_privacy_filter/transform_detect.py
face_privacy_filter/transform_region.py
web_demo/demo-framework.js [new file with mode: 0644]
web_demo/face-privacy.css
web_demo/face-privacy.html
web_demo/face-privacy.js
web_demo/jquery-ajax-native.js [new file with mode: 0755]
web_demo/model.detect.proto
web_demo/model.pixelate.proto

index 1857fad..837e962 100644 (file)
@@ -101,6 +101,7 @@ ENV/
 
 # Spyder project settings
 .spyderproject
+.pytest_cache
 
 # Rope project settings
 .ropeproject
index 38354ea..cdf439d 100644 (file)
@@ -45,7 +45,8 @@ the original image and mime type are also embedded with the special
 region code `-1` within the output.
 
 A web demo is included with the source code, available via the
-[Acumos Gerrit repository](https://gerrit.acumos.org/r/gitweb?p=face-privacy-filter.git;a=summary).
+[Acumos Gerrit repository](https://gerrit.acumos.org/r/gitweb?p=face-privacy-filter.git;a=summary)
+or the mirrored [Acumos Github repository](https://github.com/acumos/face-privacy-filter).
 It utilizes a protobuf javascript library and processes
 input images to detect all faces within an image.
 
index bf71121..74450f2 100644 (file)
 
 # Face Privacy Filter Release Notes
 ## 0.3
+### 0.3.1
+* Update model to use single image as input type
+* Update javascript demo to run with better CORS behavior (github htmlpreview)
+* Additional documentation for environmental variables
+* Simplify operation for active prediction to use created model (no save+load required)
+
 ### 0.3.0
 * Documentation (lesson1) updated with model runner examples.  Deprecation notice
   in using explicit proto- and swagger-based serves.
index 360e7df..53975bd 100644 (file)
@@ -32,6 +32,21 @@ CORS or other cross-domain objections to dropping the file `face-privacy.html`
 into the browser and accesing a locally hosted server API, as configured
 in [the previous tutorial](lesson2.md).
 
+### Open-source hosted run
+Utilizing the generous [htmlpreview function](https://htmlpreview.github.io/) available on
+GitHub, you can also experiment with the respository-based web resource.  This resource
+will proxy the repository `web_demo` directory into a live resource.
+
+Navigate to the [default webhost page](http://htmlpreview.github.io/?https://github.com/acumos/face-privacy-filter/blob/master/web_demo/face-privacy.html)
+and confirm that the resource load properly.  The image at the bottom of this guide
+is a good reference for correct page loading and display.
+
+After confirming correct page load, simply replace the value in the `Transform URL`
+field to point at your deployed instance.  For example, if you've created a
+dumped model locally, it might be a localhost port.
+
+
+### Local webserver run
 If you want to run the test locally, you can use the built-in python
 webserver with the line below while working in the `web_demo` directory
 (assuming you're running python3).
index 3f69a1e..e1b7def 100644 (file)
@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
-__version__ = "0.3.0"
+__version__ = "0.3.1"
 MODEL_NAME = 'face_privacy_filter'
index 20e20f8..2015e31 100644 (file)
@@ -28,49 +28,62 @@ import numpy as np
 import pandas as pd
 
 
-def model_create_pipeline(transformer, funcName):
+def model_create_pipeline(transformer, funcName, inputIsSet, outputIsSet):
     from acumos.session import Requirements
     from acumos.modeling import Model, List, create_namedtuple
+    from face_privacy_filter._version import MODEL_NAME, __version__ as VERSION
     import sklearn
     import cv2
     from os import path
 
     # derive the input type from the transformer
-    type_list, type_name = transformer._type_in  # it looked like this {'test': int, 'tag': str}
-    input_type = [(k, type_list[k]) for k in type_list]   # flat, no lists
+    input_type, type_name = transformer._type_in  # it looked like this [('test', int), ('tag', str)]
     type_in = create_namedtuple(type_name, input_type)
-    name_multiple_in = type_name + "s"
-    input_set = create_namedtuple(type_name + "Set", [(name_multiple_in, List[type_in])])
+    input_set = type_in
+    name_multiple_in = type_name
+    if inputIsSet:
+        name_multiple_in = type_name + "s"
+        input_set = create_namedtuple(type_name + "Set", [(name_multiple_in, List[type_in])])
 
     # derive the output type from the transformer
-    type_list, type_name = transformer._type_out
-    output_type = [(k, type_list[k]) for k in type_list]  # flat, no lists
+    output_type, type_name = transformer._type_out
     type_out = create_namedtuple(type_name, output_type)
-    name_multiple_out = type_name + "s"
-    output_set = create_namedtuple(type_name + "Set", [(name_multiple_out, List[type_out])])
+    output_set = type_out
+    if outputIsSet:
+        name_multiple_out = type_name + "s"
+        output_set = create_namedtuple(type_name + "Set", [(name_multiple_out, List[type_out])])
 
-    def predict_class(val_wrapped: input_set) -> output_set:
-        '''Returns an array of float predictions'''
+    def transform(val_wrapped: input_set) -> output_set:
+        '''Transform from image or detection and return score or image'''
         # print("-===== input -===== ")
         # print(input_set)
-        df = pd.DataFrame(getattr(val_wrapped, name_multiple_in), columns=type_in._fields)
+        if inputIsSet:
+            df = pd.DataFrame(getattr(val_wrapped, name_multiple_in), columns=type_in._fields)
+        else:
+            df = pd.DataFrame([val_wrapped], columns=type_in._fields)
         # print("-===== df -===== ")
         # print(df)
-        tags_df = transformer.predict(df)
+        result_df = transformer.predict(df)
         # print("-===== out df -===== ")
-        # print(tags_df)
-        tags_parts = tags_df.to_dict('split')
+        # print(result_df)
+        # print(result_parts)
+        result_parts = result_df.to_dict('split')
+        print("[{} - {}:{}]: Input {} row(s) ({}), output {} row(s) ({}))".format(
+              "classify", MODEL_NAME, VERSION, len(df), type_in, len(result_df), output_set))
+        output_obj = []
+        if len(df):
+            if outputIsSet:
+                output_obj = output_set([type_out(*r) for r in result_parts['data']])
+            else:
+                output_obj = output_set(*result_parts['data'][0])
         # print("-===== out list -===== ")
-        # print(output_set)
-        print("[{}]: Input {} row(s) ({}), output {} row(s) ({}))".format(
-              funcName, len(df), input_set, len(tags_df), output_set))
-        tags_list = [type_out(*r) for r in tags_parts['data']]
-        return output_set(tags_list)
+        # print(output_obj)
+        return output_obj
 
     # compute path of this package to add it as a dependency
     package_path = path.dirname(path.realpath(__file__))
     objModelDeclare = {}
-    objModelDeclare[funcName] = predict_class
+    objModelDeclare[funcName] = transform
     # add the model dependency manually because of the way we constructed the package;
     # the opencv-python/cv2 dependency is not picked up automagically
     return Model(**objModelDeclare), Requirements(packages=[package_path], reqs=[pd, np, sklearn, 'opencv-python'],
@@ -88,60 +101,56 @@ def main(config={}):
     parser.add_argument('-c', '--csv_input', dest='csv_input', action='store_true', default=False, help='input as CSV format not an image')
     parser.add_argument('-s', '--suppress_image', dest='suppress_image', action='store_true', default=False, help='do not create an extra row for a returned image')
     parser.add_argument('-f', '--function', type=str, default='detect', help='which type of model to generate', choices=['detect', 'pixelate'])
-    parser.add_argument('-a', '--push_address', help='server address to push the model (e.g. http://localhost:8887/v2/models)', default='')
-    parser.add_argument('-A', '--auth_address', help='server address for login and push of the model (e.g. http://localhost:8887/v2/auth)', default='')
+    parser.add_argument('-a', '--push_address', help='server address to push the model (e.g. http://localhost:8887/v2/models)', default=os.getenv('ACUMOS_PUSH', ""))
+    parser.add_argument('-A', '--auth_address', help='server address for login and push of the model (e.g. http://localhost:8887/v2/auth)', default=os.getenv('ACUMOS_AUTH', ""))
     parser.add_argument('-d', '--dump_model', help='dump model to a pickle directory for local running', default='')
     config.update(vars(parser.parse_args()))     # pargs, unparsed = parser.parse_known_args()
 
     if not config['predict_path']:
         print("Attempting to create new model for dump or push...")
-
-        # refactor the raw samples from upstream image classifier
-        if config['function'] == "detect":
-            transform = FaceDetectTransform(include_image=not config['suppress_image'])
-        elif config['function'] == "pixelate":
-            transform = RegionTransform()
-        else:
-            print("Error: Functional mode '{:}' unknown, aborting create".format(config['function']))
-        inputDf = transform.generate_in_df()
-        pipeline, reqs = model_create_pipeline(transform, config['function'])
-
-        # formulate the pipeline to be used
-        model_name = MODEL_NAME + "_" + config['function']
-        if config['push_address']:
-            from acumos.session import AcumosSession
-            print("Pushing new model to '{:}'...".format(config['push_address']))
-            session = AcumosSession(push_api=config['push_address'], auth_api=config['auth_address'])
-            session.push(pipeline, model_name, reqs)  # creates ./my-iris.zip
-
-        if config['dump_model']:
-            from acumos.session import AcumosSession
-            from os import makedirs
-            if not os.path.exists(config['dump_model']):
-                makedirs(config['dump_model'])
-            print("Dumping new model to '{:}'...".format(config['dump_model']))
-            session = AcumosSession()
-            session.dump(pipeline, model_name, config['dump_model'], reqs)  # creates ./my-iris.zip
-
+    elif not os.path.exists(config['input']):
+        print("Prediction requested but target input '{:}' was not found, please check input arguments.".format(config['input']))
+        sys.exit(-1)
+
+    # refactor the raw samples from upstream image classifier
+    if config['function'] == "detect":
+        transform = FaceDetectTransform(include_image=not config['suppress_image'])
+        pipeline, reqs = model_create_pipeline(transform, config['function'], False, True)
+    elif config['function'] == "pixelate":
+        transform = RegionTransform()
+        pipeline, reqs = model_create_pipeline(transform, config['function'], True, False)
     else:
-        if not config['dump_model'] or not os.path.exists(config['dump_model']):
-            print("Attempting to predict from a dumped model, but model not found.".format(config['dump_model']))
-            sys.exit(-1)
-        if not os.path.exists(config['input']):
-            print("Predictino requested but target input '{:}' was not found, please check input arguments.".format(config['input']))
-            sys.exit(-1)
-
-        print("Attempting predict/transform on input sample...")
-        from acumos.wrapped import load_model
-        model = load_model(config['dump_model'])
+        print("Error: Functional mode '{:}' unknown, aborting create".format(config['function']))
+    print(pipeline)
+    print(getattr(pipeline, config['function']))
+
+    # formulate the pipeline to be used
+    model_name = MODEL_NAME + "_" + config['function']
+    if config['push_address']:
+        from acumos.session import AcumosSession
+        print("Pushing new model to '{:}'...".format(config['push_address']))
+        session = AcumosSession(push_api=config['push_address'], auth_api=config['auth_address'])
+        session.push(pipeline, model_name, reqs)  # pushes model directly to servers
+
+    if config['dump_model']:
+        from acumos.session import AcumosSession
+        from os import makedirs
+        if not os.path.exists(config['dump_model']):
+            makedirs(config['dump_model'])
+        print("Dumping new model to '{:}'...".format(config['dump_model']))
+        session = AcumosSession()
+        session.dump(pipeline, model_name, config['dump_model'], reqs)  # creates model subdirectory
+
+    if config['predict_path']:
+        print("Using newly created model for local prediction...")
         if not config['csv_input']:
             inputDf = FaceDetectTransform.generate_in_df(config['input'])
         else:
             inputDf = pd.read_csv(config['input'], converters={FaceDetectTransform.COL_IMAGE_DATA: FaceDetectTransform.read_byte_arrays})
 
-        type_in = model.transform._input_type
-        transform_in = type_in(*tuple(col for col in inputDf.values.T))
-        transform_out = model.transform.from_wrapped(transform_in).as_wrapped()
+        func_action = getattr(pipeline, config['function'])  # simplify to just use loaded model 6/1
+        pred_raw = func_action.wrapped(inputDf)
+        transform_out = func_action.from_wrapped(pred_raw).as_wrapped()
         dfPred = pd.DataFrame(list(zip(*transform_out)), columns=transform_out._fields)
 
         if not config['csv_input']:
index b5ad6aa..21a44dc 100644 (file)
@@ -116,13 +116,13 @@ class FaceDetectTransform(BaseEstimator, ClassifierMixin):
     @property
     def _type_in(self):
         """Custom input type for this processing transformer"""
-        return {FaceDetectTransform.COL_IMAGE_MIME: str, FaceDetectTransform.COL_IMAGE_DATA: bytes}, "Image"
+        return [(FaceDetectTransform.COL_IMAGE_MIME, str), (FaceDetectTransform.COL_IMAGE_DATA, bytes)], "Image"
 
     @property
     def _type_out(self):
         """Custom input type for this processing transformer"""
         output_dict = FaceDetectTransform.generate_out_dict()
-        return {k: type(output_dict[k]) for k in output_dict}, "DetectionFrame"
+        return [(k, type(output_dict[k])) for k in output_dict], "DetectionFrame"
 
     def score(self, X, y=None):
         return 0
index e791a32..938fbec 100644 (file)
@@ -65,12 +65,12 @@ class RegionTransform(BaseEstimator, ClassifierMixin):
     def _type_in(self):
         """Custom input type for this processing transformer"""
         input_dict = RegionTransform.generate_in_dict()
-        return {k: type(input_dict[k]) for k in input_dict}, "DetectionFrame"
+        return [(k, type(input_dict[k])) for k in input_dict], "DetectionFrame"
 
     @property
     def _type_out(self):
         """Custom input type for this processing transformer"""
-        return {FaceDetectTransform.COL_IMAGE_MIME: str, FaceDetectTransform.COL_IMAGE_DATA: bytes}, "Image"
+        return [(FaceDetectTransform.COL_IMAGE_MIME, str), (FaceDetectTransform.COL_IMAGE_DATA, bytes)], "Image"
 
     def score(self, X, y=None):
         return 0
diff --git a/web_demo/demo-framework.js b/web_demo/demo-framework.js
new file mode 100644 (file)
index 0000000..fe11fdb
--- /dev/null
@@ -0,0 +1,545 @@
+/*
+  ===============LICENSE_START=======================================================
+  Acumos Apache-2.0
+  ===================================================================================
+  Copyright (C) 2017-2018 AT&T Intellectual Property & Tech Mahindra. All rights reserved.
+  ===================================================================================
+  This Acumos software file is distributed by AT&T and Tech Mahindra
+  under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  This file is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  ===============LICENSE_END=========================================================
+*/
+/**
+ face-privacy.js - send frames to an face privacy service; clone from image-classes.js
+
+ Videos or camera are displayed locally and frames are periodically sent to GPU image-net classifier service (developed by Zhu Liu) via http post.
+ For webRTC, See: https://gist.github.com/greenido/6238800
+
+ D. Gibbon 6/3/15
+ D. Gibbon 4/19/17 updated to new getUserMedia api, https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
+ D. Gibbon 8/1/17 adapted for system
+ E. Zavesky 10/19/17 adapted for video+image
+ E. Zavesky 05/05/18 adapted for row-based image and other results
+ E. Zavesky 05/30/18 adapted for single image input, github preview, safe posting (forked from model-specific code)
+ */
+
+
+/**
+ * main entry point
+ */
+function demo_init(objSetting) {
+    if (!objSetting) objSetting = {};
+
+    // clone/extend the default input from our main script
+    $(document.body).data('hdparams', $.extend(true, objSetting, {     // store global vars in the body element
+        classificationServer: getUrlParameter('url-image'), // default to what's in our url prameter
+        protoObj: null,   // to be back-filled after protobuf load {'root':obj, 'methods':{'xx':{'typeIn':x, 'typeOut':y}} }
+        protoPayloadInput: null,   //payload for encoded message download (if desired)
+        protoPayloadOutput: null,   //payload for encoded message download (if desired)
+        protoKeys: null,  // currently selected protobuf method (if any)
+        frameCounter: 0,
+        totalFrames: 900000,   // stop after this many frames just to avoid sending frames forever if someone leaves page up
+        frameInterval: 500,            // Milliseconds to sleep between sending frames to reduce server load and reduce results updates
+        frameTimer: -1,                // frame clock for processing
+        maxSrcVideoWidth: 512, // maximum image width for processing
+        serverIsLocal: true,    // server is local versus 'firewall' version
+        imageIsWaiting: false,  // blocking to prevent too many queued frames
+        // functional customizations for each demo
+        documentTitle: "Protobuf Demo",
+        mediaList: [],        //relative URLs of media files
+        protoList: [],        //relative URLs of proto files to include
+        // Objects from DOM elements
+        video: document.getElementById('srcVideo'),
+        srcImgCanvas: document.getElementById('srcImgCanvas'), // we have a 'src' source image
+    }));
+
+    var hd = $(document.body).data('hdparams');
+    if (hd.video) {
+        hd.video.addEventListener("loadedmetadata", newVideo);
+    }
+
+    $("#protoInput").prop("disabled",true).click(downloadBlobIn);
+    $("#protoOutput").prop("disabled",true).click(downloadBlobOut);
+    $("#resultText").hide();
+
+    //add text input tweak
+    $("#serverUrl").change(function() {
+        $(document.body).data('hdparams')['classificationServer'] = $(this).val();
+        updateLink("serverLink");
+    }).val($(document.body).data('hdparams')['classificationServer'])
+    //set launch link at first
+    $("#protoMethod").change(function() {
+        updateProto($(this).attr('id'));
+    });
+
+    //if protobuf is enabled, fire load event for it as well
+    hd.protoObj = {};  //clear from last load
+    $.each(hd.protoList, function(idx, protoTuple) {     //load relevant protobuf tuples
+        protobuf_load.apply(this, protoTuple);      //load each file independently
+    });
+
+    // add buttons to change video
+    $.each(hd.mediaList, function(key) {
+        //TODO: integrarte as DIV instead of button
+        var button = $('<button/>').text(videos[key].name).attr('movie', videos[key].url);
+        $("#sourceRibbon").append(button);
+    });
+
+    // add buttons to change video or image
+       $("#sourceRibbon").children("div,button").click(function() {
+        var $this = $(this);
+        $this.siblings().removeClass('selected'); //clear other selection
+        $this.addClass('selected');
+
+        var movieAttr = $this.attr('movie');
+        var objImg = $this.children('img')[0];
+        if (objImg) {
+            movieAttr = $(objImg).attr('movie');
+            objImg = $(objImg);
+        }
+
+        clearInterval(hd.frameTimer);  // stop the processing
+        hd.video.pause();
+
+        if (movieAttr) {
+            switchVideo(movieAttr);
+        }
+        else {
+            $(hd.video).hide();
+            $(srcImgCanvas).show();
+            if (objImg)
+                switchImage(objImg.attr('src'));
+        }
+       }).first().click();
+}
+
+
+function protobuf_load(pathProto, forceSelect) {
+    protobuf.load(pathProto, function(err, root) {
+        if (err) {
+            console.log("[protobuf]: Error!: "+err);
+            throw err;
+        }
+        var domSelect = $("#protoMethod");
+        var numMethods = domSelect.children().length;
+        $.each(root.nested, function(namePackage, objPackage) {    // walk all
+            if ('Model' in objPackage && 'methods' in objPackage.Model) {    // walk to model and functions...
+                var typeSummary = {'root':root, 'methods':{} };
+                $.each(objPackage.Model.methods, function(nameMethod, objMethod) {  // walk methods
+                    typeSummary['methods'][nameMethod] = {};
+                    typeSummary['methods'][nameMethod]['typeIn'] = namePackage+'.'+objMethod.requestType;
+                    typeSummary['methods'][nameMethod]['typeOut'] = namePackage+'.'+objMethod.responseType;
+                    typeSummary['methods'][nameMethod]['service'] = namePackage+'.'+nameMethod;
+
+                    //create HTML object as well
+                    var namePretty = namePackage+"."+nameMethod;
+                    var domOpt = $("<option />").attr("value", namePretty).text(
+                        nameMethod+ " (input: "+objMethod.requestType
+                        +", output: "+objMethod.responseType+")");
+                    if (numMethods==0) {    // first method discovery
+                        domSelect.append($("<option />").attr("value","").text("(disabled, not loaded)")); //add 'disabled'
+                    }
+                    if (forceSelect) {
+                        domOpt.attr("selected", 1);
+                    }
+                    domSelect.append(domOpt);
+                    numMethods++;
+                });
+                $(document.body).data('hdparams').protoObj[namePackage] = typeSummary;   //save new method set
+                $("#protoContainer").show();
+            }
+        });
+        console.log("[protobuf]: Load successful, found "+numMethods+" model methods.");
+    });
+}
+
+// modify the link and update our url
+function updateLink(domId) {
+    var newServer = $(document.body).data('hdparams')['classificationServer'];
+    var sNewUrl = updateQueryStringParameter(window.location.href, "url-image", newServer, "?");
+    $("#"+domId).attr('href', sNewUrl);
+    $("#serverUrl").val(newServer);
+    //window.history.pushState({}, $(document.body).data('hdparams')['documentTitle'], sNewUrl);
+}
+
+// update proto link
+function updateProto(domProtoCombo) {
+    var nameProtoMethod = $("#"+domProtoCombo+" option:selected").attr('value');
+    $(document.body).data('hdparams').protoKeys = null;
+    if (nameProtoMethod && nameProtoMethod.length) {     //valid protobuf type?
+        var partsURL = $(document.body).data('hdparams').classificationServer.split("/");
+        var protoKeys = nameProtoMethod.split(".", 2);       //modified for multiple detect/pixelate models
+        $(document.body).data('hdparams').protoKeys = protoKeys;
+        partsURL[partsURL.length-1] = protoKeys[1];
+        $(document.body).data('hdparams').classificationServer = partsURL.join("/");   //rejoin with new endpoint
+        updateLink("serverLink", $(document.body).data('hdparams').classificationServer);
+    }
+}
+
+// https://stackoverflow.com/a/6021027
+function updateQueryStringParameter(uri, key, value, separator) {
+    var re = new RegExp("([?&])" + key + "=.*?(&|$)", "i");
+    if (uri.match(re)) {
+        return uri.replace(re, '$1' + key + "=" + value + '$2');
+    }
+    if (!separator) //allow forced/override
+       separator = uri.indexOf('?') !== -1 ? "&" : "?";
+    return uri + separator + key + "=" + value;
+}
+
+// https://stackoverflow.com/a/3354511
+window.onpopstate = function(e){
+    if(e.state){
+        //document.getElementById("content").innerHTML = e.state.html;
+        $(document.body).data('hdparams')['documentTitle'] = e.state.pageTitle;
+    }
+};
+
+function getUrlParameter(key) {
+    var re = new RegExp("([?&])" + key + "=(.*?)(&|$)", "i");
+    var match = window.location.search.match(re)
+    if (match) {
+        //console.log(match);
+        return match[match.length-2];
+    }
+};
+
+/**
+ * Change the video source and restart
+ */
+function switchVideo(movieAttr) {
+       var hd = $(document.body).data('hdparams');
+
+    // Set the video source based on URL specified in the 'videos' list, or select camera input
+    $(hd.video).show();
+    $(srcImgCanvas).hide();
+    if (movieAttr == "Camera") {
+        var constraints = {audio: false, video: true};
+        navigator.mediaDevices.getUserMedia(constraints)
+            .then(function(mediaStream) {
+                hd.video.srcObject = mediaStream;
+                hd.video.play();
+            })
+            .catch(function(err) {
+                console.log(err.name + ": " + err.message);
+            });
+    } else {
+        var mp4 = document.getElementById("mp4");
+        mp4.setAttribute("src", movieAttr);
+        hd.video.load();
+        hd.video.autoplay = true;
+        newVideo();
+    }
+}
+
+/**
+ * Called after a new video has loaded (at least the video metadata has loaded)
+ */
+function newVideo() {
+       var hd = $(document.body).data('hdparams');
+       hd.frameCounter = 0;
+       hd.imageIsWaiting = false;
+
+       // set processing canvas size based on source video
+       var pwidth = hd.video.videoWidth;
+       var pheight = hd.video.videoHeight;
+       if (pwidth > hd.maxSrcVideoWidth) {
+               pwidth = hd.maxSrcVideoWidth;
+               pheight = Math.floor((pwidth / hd.video.videoWidth) * pheight); // preserve aspect ratio
+       }
+       hd.srcImgCanvas.width = pwidth;
+       hd.srcImgCanvas.height = pheight;
+
+    updateProto("protoMethod");
+    hd.frameTimer = setInterval(nextFrame, hd.frameInterval); // start the processing
+}
+
+
+/**
+ * process the next video frame
+ */
+function nextFrame() {
+       var hd = $(document.body).data('hdparams');
+       if (hd.video.ended || hd.video.paused) {
+               return;
+       }
+    switchImage(hd.video, true);
+}
+
+function switchImage(imgSrc, isVideo) {
+    var canvas = $(document.body).data('hdparams')['srcImgCanvas'];
+    if (!isVideo) {
+        var img = new Image();
+        img.crossOrigin = "Anonymous";
+        img.onload = function () {
+            var ctx = canvas.getContext('2d');
+            var canvasCopy = document.createElement("canvas");
+            var copyContext = canvasCopy.getContext("2d");
+
+            var ratio = 1;
+
+            //console.log( $(document.body).data('hdparams'));
+            //console.log( [ img.width, img.height]);
+            // https://stackoverflow.com/a/2412606
+            if(img.width > $(document.body).data('hdparams')['canvasMaxW'])
+                ratio = $(document.body).data('hdparams')['canvasMaxW'] / img.width;
+            if(ratio*img.height > $(document.body).data('hdparams')['canvasMaxH'])
+                ratio = $(document.body).data('hdparams')['canvasMaxH'] / img.height;
+
+            canvasCopy.width = img.width;
+            canvasCopy.height = img.height;
+            copyContext.drawImage(img, 0, 0);
+
+            canvas.width = img.width * ratio;
+            canvas.height = img.height * ratio;
+            ctx.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvas.width, canvas.height);
+            //document.removeChild(canvasCopy);
+            updateProto("protoMethod");
+            doPostImage(canvas, '#resultsDiv', '#destImg', canvas.toDataURL());
+        }
+        img.src = imgSrc;  //copy source, let image load
+    }
+    else if (!$(document.body).data('hdparams').imageIsWaiting) {
+        var ctx = canvas.getContext('2d');
+        var canvasCopy = document.createElement("canvas");
+        var copyContext = canvasCopy.getContext("2d");
+        var ratio = 1;
+
+        if(imgSrc.videoWidth > $(document.body).data('hdparams')['canvasMaxW'])
+            ratio = $(document.body).data('hdparams')['canvasMaxW'] / imgSrc.videoWidth;
+        if(ratio*imgSrc.videoHeight > $(document.body).data('hdparams')['canvasMaxH'])
+            ratio = $(document.body).data('hdparams')['canvasMaxH'] / canvasCopy.height;
+
+        //console.log("Canvas Copy:"+canvasCopy.width+"/"+canvasCopy.height);
+        //console.log("Canvas Ratio:"+ratio);
+        //console.log("Video: "+imgSrc.videoWidth+"x"+imgSrc.videoHeight);
+        canvasCopy.width = imgSrc.videoWidth;     //large as possible
+        canvasCopy.height = imgSrc.videoHeight;
+        copyContext.drawImage(imgSrc, 0, 0);
+
+        canvas.width = canvasCopy.width * ratio;
+        canvas.height = canvasCopy.height * ratio;
+        ctx.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvas.width, canvas.height);
+        //document.removeChild(canvasCopy);
+        doPostImage(canvas, '#resultsDiv', '#destImg', canvas.toDataURL());
+    }
+}
+
+
+/**
+ * post an image from the canvas to the service
+ */
+function doPostImage(srcCanvas, dstDiv, dstImg, imgPlaceholder) {
+    var dataURL = srcCanvas.toDataURL('image/jpeg', 1.0);
+    var hd = $(document.body).data('hdparams');
+    var sendPayload = null;
+
+    hd.imageIsWaiting = true;
+    var domHeaders = {};
+    dstDiv = $(dstDiv);
+    $("#postSpinner").remove();     //erase previously existing one
+    dstDiv.append($("<div id='postSpinner' class='spinner'>&nbsp;</div>"));
+    if (dstImg)     //convert to jquery dom object
+        dstImg = $(dstImg);
+
+    //console.log("[doPostImage]: Selected method ... '"+typeInput+"'");
+    if (hd.protoKeys) {     //valid protobuf type?
+        var blob = dataURItoBlob(dataURL, true);
+
+        // fields from .proto file at time of writing...
+        // message Image {
+        //   string mime_type = 1;
+        //   bytes image_binary = 2;
+        // }
+
+        //TODO: should we always assume this is input? answer: for now, YES, always image input!
+        var inputPayload = { "mimeType": blob.type, "imageBinary": blob.bytes };
+
+        // ---- method for processing from a type ----
+        var msgInput = hd.protoObj[hd.protoKeys[0]]['root'].lookupType(hd.protoObj[hd.protoKeys[0]]['methods'][hd.protoKeys[1]]['typeIn']);
+        // Verify the payload if necessary (i.e. when possibly incomplete or invalid)
+        var errMsg = msgInput.verify(inputPayload);
+        if (errMsg) {
+            var strErr = "[doPostImage]: Error during type verify for object input into protobuf method. ("+errMsg+")";
+            dstDiv.empty().html(strErr);
+            console.log(strErr);
+            throw Error(strErr);
+        }
+        // Create a new message
+        var msgTransmit = msgInput.create(inputPayload);
+        // Encode a message to an Uint8Array (browser) or Buffer (node)
+        sendPayload = msgInput.encode(msgTransmit).finish();
+
+        //downloadBlob(sendPayload, 'protobuf.bin', 'application/octet-stream');
+        // NOTE: TO TEST THIS BINARY BLOB, use some command-line magic like this...
+        //  protoc --decode=mMJuVapnmIbrHlZGKyuuPDXsrkzpGqcr.FaceImage model.proto < protobuf.bin
+        $("#protoInput").prop("disabled",false);
+        hd.protoPayloadInput = sendPayload;
+
+        //request.setRequestHeader("Content-type", "application/octet-stream;charset=UTF-8");
+        domHeaders["Content-type"] = "text/plain;charset=UTF-8";
+        //request.responseType = 'arraybuffer';
+    }
+    else if (hd.protoList.length) {
+        var strErr = "[doPostImage]: Proto method expected but unavailable in POST, aborting send.";
+        console.log(strErr);
+        throw Error(strErr);
+    }
+    else {
+        var blob = dataURItoBlob(dataURL, false);
+        sendPayload = new FormData();
+        if (hd.serverIsLocal) {
+            serviceURL = hd.classificationServer;
+            sendPayload.append("image_binary", blob);
+            sendPayload.append("mime_type", blob.type);
+        }
+        else {      //disabled now for direct URL specification
+            serviceURL = hd.classificationServerFirewall;
+            sendPayload.append("myFile", blob);
+            sendPayload.append("rtnformat", "json");
+            sendPayload.append("myList", "5"); // limit the number of classes (max 1000)
+        }
+    }
+
+    //$(dstImg).addClaas('workingImage').siblings('.spinner').remove().after($("<span class='spinner'>&nbsp;</span>"));
+    $.ajax({
+        type: 'POST',
+        url: hd.classificationServer,
+        data: sendPayload,
+        crossDomain: true,
+        dataType: 'native',
+        xhrFields: {
+            responseType: 'arraybuffer'
+        },
+        processData: false,
+        headers: domHeaders,
+        error: function (data, textStatus, errorThrown) {
+            //console.log(textStatus);
+            if (textStatus=="error") {
+                textStatus += " (Was the transform URL valid? Was the right method selected?) ";
+            }
+            var errStr = "Error: Failed javascript POST (err: "+textStatus+","+errorThrown+")";
+            console.log(errStr);
+            dstDiv.html(errStr);
+            hd.imageIsWaiting = false;
+            return false;
+        },
+        success: function(data, textStatus, jqXHR) {
+            // what do we do with a good processing result?
+            //
+            //  data: the raw body from the response
+            //  dstImg: the dom element of a destination image
+            //  methodKeys: which protomethod was selected
+            //  dstImg: the dom element of a destination image (if available)
+            //  imgPlaceholder: the exported canvas image from last source
+            //
+            var returnState = processResult(data, dstDiv, hd.protoKeys, dstImg, imgPlaceholder);
+            hd.imageIsWaiting = false;
+            return returnState;
+        }
+       });
+}
+
+/**
+ * convert base64/URLEncoded data component to raw binary data held in a string
+ *
+ * Stoive, http://stackoverflow.com/questions/4998908/convert-data-uri-to-file-then-append-to-formdata
+ */
+function dataURItoBlob(dataURI, wantBytes) {
+    // convert base64/URLEncoded data component to raw binary data held in a string
+    var byteString;
+    if (dataURI.split(',')[0].indexOf('base64') >= 0)
+        byteString = atob(dataURI.split(',')[1]);
+    else
+        byteString = unescape(dataURI.split(',')[1]);
+
+    // separate out the mime component
+    var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
+
+    // write the bytes of the string to a typed array
+    var ia = new Uint8Array(byteString.length);
+    for (var i = 0; i < byteString.length; i++) {
+        ia[i] = byteString.charCodeAt(i);
+    }
+    //added for returning bytes directly
+    if (wantBytes) {
+        return {'bytes':ia, 'type':mimeString};
+    }
+    return new Blob([ia], {type:mimeString});
+}
+
+// https://stackoverflow.com/a/12713326
+function Uint8ToString(u8a){
+  var CHUNK_SZ = 0x8000;
+  var c = [];
+  for (var i=0; i < u8a.length; i+=CHUNK_SZ) {
+    c.push(String.fromCharCode.apply(null, u8a.subarray(i, i+CHUNK_SZ)));
+  }
+  return c.join("");
+}
+
+function BlobToDataURI(data, mime) {
+    var b64encoded = btoa(Uint8ToString(data));
+    return "data:"+mime+";base64,"+b64encoded;
+}
+
+// ----- diagnostic tool to download binary blobs ----
+function downloadBlobOut() {
+    return downloadBlob($(document.body).data('hdparams').protoPayloadOutput, "protobuf.out.bin");
+}
+
+function downloadBlobIn() {
+    return downloadBlob($(document.body).data('hdparams').protoPayloadInput, "protobuf.in.bin");
+}
+
+//  https://stackoverflow.com/a/33622881
+function downloadBlob(data, fileName, mimeType) {
+    //if there is no data, filename, or mime provided, make our own
+    if (!data)
+        data = $(document.body).data('hdparams').protoPayloadInput;
+    if (!fileName)
+        fileName = "protobuf.bin";
+    if (!mimeType)
+        mimeType = "application/octet-stream";
+
+    var blob, url;
+    blob = new Blob([data], {
+        type: mimeType
+    });
+    url = window.URL.createObjectURL(blob);
+    downloadURL(url, fileName, mimeType);
+    setTimeout(function() {
+        return window.URL.revokeObjectURL(url);
+    }, 1000);
+};
+
+function downloadURL(data, fileName) {
+    var a;
+    a = document.createElement('a');
+    a.href = data;
+    a.download = fileName;
+    document.body.appendChild(a);
+    a.style = 'display: none';
+    a.click();
+    a.remove();
+};
+
+
+//load image that has been uploaded into a canvas
+function handleImage(e){
+    var reader = new FileReader();
+    reader.onload = function(event){
+        switchImage(event.target.result);
+    }
+    reader.readAsDataURL(e.target.files[0]);
+}
+
+
index 4832ff5..853f93d 100644 (file)
@@ -22,18 +22,6 @@ body {
   font-family:Trebuchet MS,Tahoma,Verdana,Arial,sans-serif;
 }
 
-#resultsDiv {
-  position: absolute;
-  top: 120px;
-  right: 50px;
-  color: #000;
-  text-align: center;
-  font-size: 20px;
-  background-color: rgba(221, 221, 221, 0.8);
-  width: 220px;
-  padding: 10px 0;
-  z-index: 2147483647;
-}
 
 hr {
     margin: 10px 0;
index 40d7e4d..c264983 100644 (file)
@@ -1,4 +1,7 @@
 <html>
+<head>
+<meta content="text/html;charset=utf-8" http-equiv="Content-Type">
+<meta content="utf-8" http-equiv="encoding">
 <!---
   ===============LICENSE_START=======================================================
   Acumos Apache-2.0
@@ -24,17 +27,13 @@ D. Gibbon 8/1/17
 E. Zavesky 10/17/17
 Rewrite to utilize simple image-based processing steps.
 -->
-<head>
 <title>Face Privacy Processing</title>
 <link rel="stylesheet" type="text/css" href="face-privacy.css" />
-<script type="text/javascript" src="protobuf/protobuf.min.js"></script>
-<script type="text/javascript" src="jquery.js"></script>
-<script type="text/javascript" src="face-privacy.js"></script>
 </head>
 <body>
 <div class="mediaRibbon">
     <div>
-        <video class="videoWin" id="srcVideo" width="380" height="270" muted controls>
+        <video class="videoWin" id="srcVideo" width="380" height="270" muted controls crossorigin>
             <source id="mp4" src="images/commercial.mp4" type="video/mp4"></source>
             Your browser does not support the video tag. Please use Chrome or Firefox.
         </video>
@@ -42,12 +41,12 @@ Rewrite to utilize simple image-based processing steps.
         <span>raw image</span>
     </div>
     <div>
-        <img id="destImg" width="380" height="270" />
+        <img id="destImg" width="380" height="270" crossorigin />
         <span>post-processed image</span>
     </div>
 </div>
 <br />
-<div id="resultText">
+<div id="resultsDiv">
    <div>
     If utilized textual results would go here...
     </div>
@@ -104,5 +103,11 @@ Rewrite to utilize simple image-based processing steps.
 <br/>
 <a href='http://www.research.att.com/projects/Video/'>Video and Multimedia Technologies Research</a>
 </div>
+<!-- move script to buttom for faster page load --->
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="protobuf/protobuf.min.js"></script>
+<script type="text/javascript" src="jquery-ajax-native.js"></script>
+<script type="text/javascript" src="demo-framework.js"></script>
+<script type="text/javascript" src="face-privacy.js"></script>
 </body>
 </html>
index ca66edb..59f0727 100644 (file)
@@ -18,7 +18,7 @@
   ===============LICENSE_END=========================================================
 */
 /**
- face-privacy.js - send frames to an face privacy service
+ face-privacy.js - send frames to an face privacy service; clone from image-classes.js
 
  Videos or camera are displayed locally and frames are periodically sent to GPU image-net classifier service (developed by Zhu Liu) via http post.
  For webRTC, See: https://gist.github.com/greenido/6238800
@@ -28,6 +28,7 @@
  D. Gibbon 8/1/17 adapted for system
  E. Zavesky 10/19/17 adapted for video+image
  E. Zavesky 05/05/18 adapted for row-based image and other results
+ E. Zavesky 05/30/18 forked model generic code to `demo-framework.js`, switch to flat image
  */
 
 "use strict";
 /**
  * main entry point
  */
+
+// called one time when document is ready
 $(document).ready(function() {
     var urlDefault = getUrlParameter('url-image');
     if (!urlDefault)
-        urlDefault = "http://localhost:8884/transform";
-
-       $(document.body).data('hdparams', {     // store global vars in the body element
-               classificationServer: urlDefault,
-               protoObj: null,   // to be back-filled after protobuf load {'root':obj, 'methods':{'xx':{'typeIn':x, 'typeOut':y}} }
-               protoPayloadInput: null,   //payload for encoded message download (if desired)
-               protoPayloadOutput: null,   //payload for encoded message download (if desired)
-               frameCounter: 0,
-               frameInterval: 500,             // Milliseconds to sleep between sending frames to reduce server load and reduce results updates
-               frameTimer: -1,         // frame clock for processing
-               imageIsWaiting: false,  // blocking to prevent too many queued frames
-               // Objects from DOM elements
-               srcImgCanvas: document.getElementById('srcImgCanvas'),  // we have a 'src' source image
-               destImg: document.getElementById('destImg'),    // we have a 'src' source image
-               video: document.getElementById('srcVideo'),
-       });
-    $(document.body).data('hdparams')['canvasMaxH'] = $(document.body).data('hdparams')['srcImgCanvas'].height;
-    $(document.body).data('hdparams')['canvasMaxW'] = $(document.body).data('hdparams')['srcImgCanvas'].width;
-
-    $("#protoInput").prop("disabled",true).click(downloadBlobIn);
-    $("#protoOutput").prop("disabled",true).click(downloadBlobOut);
-    $("#resultText").hide();
-
-       //add text input tweak
-       $("#serverUrl").change(function() {
-           $(document.body).data('hdparams')['classificationServer'] = $(this).val();
-        updateLink("serverLink");
-       }).val($(document.body).data('hdparams')['classificationServer'])
-       //set launch link at first
-    updateLink("serverLink");
-
-       // add buttons to change video
-       $("#sourceRibbon div").click(function() {
-           var $this = $(this);
-           $this.siblings().removeClass('selected'); //clear other selection
-           $this.addClass('selected');
-           var objImg = $this.children('img')[0];
-           var hd = $(document.body).data('hdparams');
-           if (objImg) {
-               switchImage(objImg.src);
-            clearInterval(hd.frameTimer);      // stop the processing
-
-            var movieAttr = $(objImg).attr('movie');
-            if (movieAttr) {
-                // Set the video source based on URL specified in the 'videos' list, or select camera input
-                $(hd.video).show();
-                $(srcImgCanvas).hide();
-                if (movieAttr == "Camera") {
-                    var constraints = {audio: false, video: true};
-                    navigator.mediaDevices.getUserMedia(constraints)
-                        .then(function(mediaStream) {
-                            hd.video.srcObject = mediaStream;
-                            hd.video.play();
-                        })
-                        .catch(function(err) {
-                            console.log(err.name + ": " + err.message);
-                        });
-                } else {
-                    var mp4 = document.getElementById("mp4");
-                    mp4.setAttribute("src", movieAttr);
-                    hd.video.load();
-                    newVideo();
-                }
-            }
-            else {
-                hd.video.pause();
-                $(hd.video).hide();
-                $(srcImgCanvas).show();
-            }
-           }
-       });
-
-       //allow user-uploaded images
-    var imageLoader = document.getElementById('imageLoader');
-    imageLoader.addEventListener('change', handleImage, false);
-
-    //if protobuf is enabled, fire load event for it as well
-    $(document.body).data('hdparams').protoObj = {};  //clear from last load
-    protobuf_load("model.pixelate.proto", true);
-    protobuf_load("model.detect.proto");
-
-    //trigger first click
-    $("#sourceRibbon div")[0].click();
-});
-
-
-function protobuf_load(pathProto, forceSelect) {
-    protobuf.load(pathProto, function(err, root) {
-        if (err) {
-            console.log("[protobuf]: Error!: "+err);
-            domResult.html("<strong>"+"[protobuf]: Error!: "+err+"</strong>");
-            throw err;
-        }
-        var domSelect = $("#protoMethod");
-        var numMethods = domSelect.children().length;
-        $.each(root.nested, function(namePackage, objPackage) {    // walk all
-            if ('Model' in objPackage && 'methods' in objPackage.Model) {    // walk to model and functions...
-                var typeSummary = {'root':root, 'methods':{} };
-                $.each(objPackage.Model.methods, function(nameMethod, objMethod) {  // walk methods
-                    typeSummary['methods'][nameMethod] = {};
-                    typeSummary['methods'][nameMethod]['typeIn'] = namePackage+'.'+objMethod.requestType;
-                    typeSummary['methods'][nameMethod]['typeOut'] = namePackage+'.'+objMethod.responseType;
-                    typeSummary['methods'][nameMethod]['service'] = namePackage+'.'+nameMethod;
-
-                    //create HTML object as well
-                    var namePretty = namePackage+"."+nameMethod;
-                    var domOpt = $("<option />").attr("value", namePretty).text(
-                        nameMethod+ " (input: "+objMethod.requestType
-                        +", output: "+objMethod.responseType+")");
-                    if (numMethods==0) {    // first method discovery
-                        domSelect.append($("<option />").attr("value","").text("(disabled, not loaded)")); //add 'disabled'
-                    }
-                    if (forceSelect) {
-                        domOpt.attr("selected", 1);
-                    }
-                    domSelect.append(domOpt);
-                    numMethods++;
-                });
-                $(document.body).data('hdparams').protoObj[namePackage] = typeSummary;   //save new method set
-                $("#protoContainer").show();
-            }
-        });
-        console.log("[protobuf]: Load successful, found "+numMethods+" model methods.");
+        urlDefault = "http://localhost:8884/classify";
+    demo_init({
+        classificationServer: urlDefault,
+        protoList: [["model.pixelate.proto", true], ["model.detect.proto", false] ]
     });
-}
-
-/**
- * Called after a new video has loaded (at least the video metadata has loaded)
- */
-function newVideo() {
-       var hd = $(document.body).data('hdparams');
-       hd.frameCounter = 0;
-       hd.imageIsWaiting = false;
-       hd.video.play();
-
-       // set processing canvas size based on source video
-       var pwidth = hd.video.videoWidth;
-       var pheight = hd.video.videoHeight;
-       if (pwidth > hd.maxSrcVideoWidth) {
-               pwidth = hd.maxSrcVideoWidth;
-               pheight = Math.floor((pwidth / hd.video.videoWidth) * pheight); // preserve aspect ratio
-       }
-       hd.srcImgCanvas.width = pwidth;
-       hd.srcImgCanvas.height = pheight;
-
-       hd.frameTimer = setInterval(nextFrame, hd.frameInterval); // start the processing
-}
-
-/**
- * process the next video frame
- */
-function nextFrame() {
-       var hd = $(document.body).data('hdparams');
-       if (hd.video.ended || hd.video.paused) {
-               return;
-       }
-    switchImage(hd.video, true);
-}
-
-function updateLink(domId, newServer) {
-    var sPageURL = decodeURIComponent(window.location.search.split('?')[0]);
-    if (newServer==undefined) {
-        newServer = $(document.body).data('hdparams')['classificationServer'];
-    }
-    else {
-        $("#serverUrl").val(newServer);
-    }
-    var sNewUrl = sPageURL+"?url-image="+newServer;
-    $("#"+domId).attr('href', sNewUrl);
-}
-
-function switchImage(imgSrc, isVideo) {
-    var canvas = $(document.body).data('hdparams')['srcImgCanvas'];
-    if (!isVideo) {
-        var img = new Image();
-        img.onload = function () {
-            var ctx = canvas.getContext('2d');
-            var canvasCopy = document.createElement("canvas");
-            var copyContext = canvasCopy.getContext("2d");
-
-            var ratio = 1;
-
-            //console.log( $(document.body).data('hdparams'));
-            //console.log( [ img.width, img.height]);
-            // https://stackoverflow.com/a/2412606
-            if(img.width > $(document.body).data('hdparams')['canvasMaxW'])
-                ratio = $(document.body).data('hdparams')['canvasMaxW'] / img.width;
-            if(ratio*img.height > $(document.body).data('hdparams')['canvasMaxH'])
-                ratio = $(document.body).data('hdparams')['canvasMaxH'] / img.height;
-
-            canvasCopy.width = img.width;
-            canvasCopy.height = img.height;
-            copyContext.drawImage(img, 0, 0);
-
-            canvas.width = img.width * ratio;
-            canvas.height = img.height * ratio;
-            ctx.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvas.width, canvas.height);
-            //document.removeChild(canvasCopy);
-            doPostImage(canvas, '#destImg', canvas.toDataURL());
-        }
-        img.src = imgSrc;  //copy source, let image load
-    }
-    else if (!$(document.body).data('hdparams').imageIsWaiting) {
-        var ctx = canvas.getContext('2d');
-        var canvasCopy = document.createElement("canvas");
-        var copyContext = canvasCopy.getContext("2d");
-        var ratio = 1;
-
-        if(imgSrc.videoWidth > $(document.body).data('hdparams')['canvasMaxW'])
-            ratio = $(document.body).data('hdparams')['canvasMaxW'] / imgSrc.videoWidth;
-        if(ratio*imgSrc.videoHeight > $(document.body).data('hdparams')['canvasMaxH'])
-            ratio = $(document.body).data('hdparams')['canvasMaxH'] / canvasCopy.height;
-
-        //console.log("Canvas Copy:"+canvasCopy.width+"/"+canvasCopy.height);
-        //console.log("Canvas Ratio:"+ratio);
-        //console.log("Video: "+imgSrc.videoWidth+"x"+imgSrc.videoHeight);
-        canvasCopy.width = imgSrc.videoWidth;     //large as possible
-        canvasCopy.height = imgSrc.videoHeight;
-        copyContext.drawImage(imgSrc, 0, 0);
-
-        canvas.width = canvasCopy.width * ratio;
-        canvas.height = canvasCopy.height * ratio;
-        ctx.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvas.width, canvas.height);
-        //document.removeChild(canvasCopy);
-        doPostImage(canvas, '#destImg', canvas.toDataURL());
-    }
-}
-
-
-//load image that has been uploaded into a vancas
-function handleImage(e){
-    var reader = new FileReader();
-    reader.onload = function(event){
-        switchImage(event.target.result);
-    }
-    reader.readAsDataURL(e.target.files[0]);
-}
-
-
-
-// https://stackoverflow.com/questions/19491336/get-url-parameter-jquery-or-how-to-get-query-string-values-in-js
-function getUrlParameter(sParam) {
-    var sPageURL = decodeURIComponent(window.location.search.substring(1)),
-        sURLVariables = sPageURL.split('&'),
-        sParameterName,
-        i;
-
-    for (i = 0; i < sURLVariables.length; i++) {
-        sParameterName = sURLVariables[i].split('=');
+});
 
-        if (sParameterName[0] === sParam) {
-            return sParameterName[1] === undefined ? true : sParameterName[1];
-        }
-    }
-};
 
 
-/**
- * post an image from the canvas to the service
- */
-function doPostImage(srcCanvas, dstImg, dataPlaceholder) {
-    var dataURL = srcCanvas.toDataURL('image/jpeg', 1.0);
+// what do we do with a good processing result?
+//
+//  data: the raw body from the response
+//  dstImg: the dom element of a destination image
+//  methodKeys: which protomethod was selected
+//  dstImg: the dom element of a destination image (if available)
+//  imgPlaceholder: the exported canvas image from last source
+//
+function processResult(data, dstDiv, methodKeys, dstImg, imgPlaceholder) {
     var hd = $(document.body).data('hdparams');
-    var sendPayload = null;
-
-    var nameProtoMethod = $("#protoMethod option:selected").attr('value');
-    var methodKeys = null;
-    if (nameProtoMethod && nameProtoMethod.length) {     //valid protobuf type?
-        var partsURL = hd.classificationServer.split("/");
-        methodKeys = nameProtoMethod.split(".", 2);       //modified for multiple detect/pixelate models
-        partsURL[partsURL.length-1] = methodKeys[1];
-        hd.classificationServer = partsURL.join("/");   //rejoin with new endpoint
-        updateLink("serverLink", hd.classificationServer);
-    }
-
-    var serviceURL = hd.classificationServer;
-    var request = new XMLHttpRequest();     // create request to manipulate
-    request.open("POST", serviceURL, true);
-    var domResult = $("#resultText");
-
-    //console.log("[doPostImage]: Selected method ... '"+typeInput+"'");
-    if (nameProtoMethod && nameProtoMethod.length) {     //valid protobuf type?
-        var blob = dataURItoBlob(dataURL, true);
-
-        // fields from .proto file at time of writing...
-        //    message FaceImage {
-        //      repeated string mime_type = 1;   -> becomes "mimeType" (NOTE repeated type)
-        //      repeated bytes image_binary = 2; -> becomes "imageBinary"
-        //    }
-
-        //TODO: should we always assume this is input? answer: for now, YES, always image input!
-        var inputPayload = {'Images':[{ "mimeType": blob.type, "imageBinary": blob.bytes }]};
+    if (methodKeys) {
+        //console.log(request);
+        var bodyEncodedInString = new Uint8Array(data);
+        //console.log(bodyEncodedInString);
+        //console.log(bodyEncodedInString.length);
+        $("#protoOutput").prop("disabled",false);
+        hd.protoPayloadOutput = bodyEncodedInString;
 
         // ---- method for processing from a type ----
-        var msgInput = hd.protoObj[methodKeys[0]]['root'].lookupType(hd.protoObj[methodKeys[0]]['methods'][methodKeys[1]]['typeIn']);
-        // Verify the payload if necessary (i.e. when possibly incomplete or invalid)
-        var errMsg = msgInput.verify(inputPayload);
-        if (errMsg) {
-            var errStr = "[doPostImage]: Error during type verify for object input into protobuf method."+errMsg;
+        var msgOutput = hd.protoObj[methodKeys[0]]['root'].lookupType(hd.protoObj[methodKeys[0]]['methods'][methodKeys[1]]['typeOut']);
+        var objOutput = null;
+        try {
+            objOutput = msgOutput.decode(hd.protoPayloadOutput);
+        }
+        catch(err) {
+            var errStr = "Error: Failed to parse protobuf response, was the right method chosen? (err: "+err.message+")";
             console.log(errStr);
-            domResult.html("<strong>"+errStr+"</strong>");
-            throw Error(errMsg);
+            dstDiv.html(errStr);
+            return false;
         }
-        // Create a new message
-        var msgTransmit = msgInput.create(inputPayload);
-        // Encode a message to an Uint8Array (browser) or Buffer (node)
-        sendPayload = msgInput.encode(msgTransmit).finish();
+        var nameRepeated = null;
 
-        // ----------
+        // NOTE: this code expects one top-level item to be an array of nested results
+        //  e.g.   Image{mime_type, image_binary}
+        //  e.g.   DetectionFrameSet [ DetectionFrame{x, y, ...., mime_type, image_binary}, .... ]
 
-        /*
-        // ---- method for processing from a service ----
-        var serviceInput = hd.protoObj['root'].lookup(hd.protoObj['methods'][nameProtoMethod]['service']);
+        //try to crawl the fields in the protobuf....
+        var numFields = 0;
+        $.each(msgOutput.fields, function(name, val) {           //collect field names
+            if (val.repeated) {     //indicates it's a repeated field (likely an array)
+                nameRepeated = name;      //save this as last repeated field (ideally there is just one)
+            }
+            numFields += 1;
+        });
 
-        function rpcImpl(method, requestData, callback) {
-            // perform the request using an HTTP request or a WebSocket for example
-            var responseData = ...;
-            // and call the callback with the binary response afterwards:
-            callback(null, responseData);
+        var typeNested = methodKeys[0]+"."+msgOutput.name;
+        if (nameRepeated) {
+            objOutput = objOutput[nameRepeated];  // dereference neseted object
+            typeNested = methodKeys[0]+"."+msgOutput.fields[nameRepeated].type;
+        }
+        else {
+            objOutput = [objOutput];    // simple singleton wrapper for uniform code below
         }
-        var serviceCall = serviceInput.create(rpcImpl, false, false); //request dlimited? response delimited?
 
-        serviceCall.sayHello(sendPayload).then(response) {
-            console.log('Greeting:', response.message);
+        //grab the nested array type and print out the fields of interest
+        var msgOutputNested = hd.protoObj[methodKeys[0]]['root'].lookupType(typeNested);
+        //console.log(msgOutputNested);
+        var domTable = $("<tr />");
+        var arrNames = [];
+        $.each(msgOutputNested.fields, function(name, val) {           //collect field names
+            var nameClean = val.name;
+            if (nameClean != 'imageBinary') {
+                domTable.append($("<th />").html(nameClean));
+                arrNames.push(nameClean);
+            }
         });
-        // ---------------------------
-        */
-
-        //downloadBlob(sendPayload, 'protobuf.bin', 'application/octet-stream');
-        // NOTE: TO TEST THIS BINARY BLOB, use some command-line magic like this...
-        //  protoc --decode=mMJuVapnmIbrHlZGKyuuPDXsrkzpGqcr.FaceImage model.proto < protobuf.bin
-        $("#protoInput").prop("disabled",false);
-        hd.protoPayloadInput = sendPayload;
-
-        // append our encoded chunk
-        //console.log(sendPayload);
-        //console.log(typeof(blob.type));
-        // console.log(nameProtoMethod);
-        request.setRequestHeader("Content-type", "text/plain;charset=UTF-8");
-        request.responseType = 'arraybuffer';
-    }
-    else {
-        var blob = dataURItoBlob(dataURL, false);
-        sendPayload = new FormData();
-        sendPayload.append("image_binary", blob);
-        sendPayload.append("mime_type", blob.type);
-    }
-    //$(dstImg).addClaas('workingImage').siblings('.spinner').remove().after($("<span class='spinner'>&nbsp;</span>"));
-    $(document.body).data('hdparams').imageIsWaiting = true;
-    var $dstImg = $(dstImg);
-    if ($dstImg.attr('src')=='') {
-        $dstImg.attr('src', dataPlaceholder);
-        //$(dstImg).addClass('workingImage').attr('src', dataPlaceholder);
-    }
-
-    hd.imageIsWaiting = true;
-    request.onreadystatechange=function() {
-        if (request.readyState==4 && request.status>=200 && request.status<300) {
-            if (methodKeys!=null) {     //valid protobuf type?
-                //console.log(request);
-                var bodyEncodedInString = new Uint8Array(request.response);
-                //console.log(bodyEncodedInString);
-                //console.log(bodyEncodedInString.length);
-                $("#protoOutput").prop("disabled",false);
-                hd.protoPayloadOutput = bodyEncodedInString;
-
-                // ---- method for processing from a type ----
-                var msgOutput = hd.protoObj[methodKeys[0]]['root'].lookupType(hd.protoObj[methodKeys[0]]['methods'][methodKeys[1]]['typeOut']);
-                var objOutput = null;
-                try {
-                    objOutput = msgOutput.decode(hd.protoPayloadOutput);
-                }
-                catch(err) {
-                    var errStr = "Error: Failed to parse protobuf response, was the right method chosen? (err: "+err.message+")";
-                    console.log(errStr);
-                    domResult.html(errStr);
-                    hd.imageIsWaiting = false;
-                    return false;
+        domTable = $("<table />").append(domTable);     // create embedded table
+
+        // loop through all members of array to do two things:
+        //  (1) find the biggest/best image
+        //  (2) print out the textual fields
+        var objBest = null;
+        $.each(objOutput, function(idx, val) {
+            if ('imageBinary' in val) {
+                // at this time, we only support ONE output image, so we will loop through
+                //  to grab the largest image (old code could grab the one with region == -1)
+                if (objBest==null || val.imageBinary.length>objBest.imageBinary.length) {
+                    objBest = val;
                 }
-                var nameRepeated = null;
-
-                // NOTE: this code expects one top-level item to be an array of nested results
-                //  e.g.   ImageSet [ Image{mime_type, image_binary}, .... ]
-                //  e.g.   DetectionFrameSet [ DetectionFrame{x, y, ...., mime_type, image_binary}, .... ]
-
-                //try to crawl the fields in the protobuf....
-                var numFields = 0;
-                $.each(msgOutput.fields, function(name, val) {           //collect field names
-                    if (val.repeated) {     //indicates it's a repeated field (likely an array)
-                        nameRepeated = name;      //save this as last repeated field (ideally there is just one)
-                    }
-                    numFields += 1;
-                });
-                if (numFields > 1) {
-                    var errStr = "Error: Expected array/repeated structure in response, but got non-flat array result ("+numFields+" fields)";
-                    console.log(errStr);
-                    domResult.html(errStr);
-                    hd.imageIsWaiting = false;
-                    return false;
-                }
-                var objRecv = objOutput[nameRepeated];
-
-                //grab the nested array type and print out the fields of interest
-                var typeNested = methodKeys[0]+"."+msgOutput.fields[nameRepeated].type;
-                var msgOutputNested = hd.protoObj[methodKeys[0]]['root'].lookupType(typeNested);
-                //console.log(msgOutputNested);
-                var domTable = $("<tr />");
-                var arrNames = [];
-                $.each(msgOutputNested.fields, function(name, val) {           //collect field names
-                    var nameClean = val.name;
-                    if (nameClean != 'imageBinary') {
-                        domTable.append($("<th />").html(nameClean));
-                        arrNames.push(nameClean);
-                    }
-                });
-                domTable = $("<table />").append(domTable);     // create embedded table
-
-                // loop through all members of array to do two things:
-                //  (1) find the biggest/best image
-                //  (2) print out the textual fields
-                var objBest = null;
-                $.each(objRecv, function(idx, val) {
-                    if ('imageBinary' in val) {
-                        // at this time, we only support ONE output image, so we will loop through
-                        //  to grab the largest image (old code could grab the one with region == -1)
-                        if (objBest==null || val.imageBinary.length>objBest.imageBinary.length) {
-                            objBest = val;
-                        }
-                    }
-
-                    var domRow = $("<tr />");
-                    $.each(arrNames, function(idx, name) {      //collect data from each column
-                        domRow.append($("<td />").html(val[name]));
-                    });
-                    domTable.append(domRow);
-                });
-                domResult.empty().append($("<strong />").html("Results")).show();
-                domResult.append(domTable);
-
-
-                //did we find an image? show it now!
-                if (objBest != null) {
-                    var strImage = btoa(String.fromCharCode.apply(null, objBest.imageBinary));
-                    $dstImg.attr('src', "data:"+objBest.mimeType+";base64,"+strImage).removeClass('workingImage');
-                }
-                else {
-                    var errStr = "Error: No valid image data was found, aborting display.";
-                    console.log(errStr);
-                    domResult.html(errStr);
-                    hd.imageIsWaiting = false;
-                    return false;
-                }
-            }
-            else {       //legacy code where response was in base64 encoded image...
-                var responseJson = $.parseJSON(request.responseText);
-                var respImage = responseJson[0];
-                // https://stackoverflow.com/questions/21227078/convert-base64-to-image-in-javascript-jquery
-                $dstImg.attr('src', "data:"+respImage['mime_type']+";base64,"+respImage['image_binary']).removeClass('workingImage');
-                //genClassTable($.parseJSON(request.responseText), dstDiv);
             }
-            hd.imageIsWaiting = false;
-        }
-       }
-       request.send(sendPayload);
-       $(document.body).data('hdparams').imageIsWaiting = false;
-}
-
 
-/**
- * convert base64/URLEncoded data component to raw binary data held in a string
- *
- * Stoive, http://stackoverflow.com/questions/4998908/convert-data-uri-to-file-then-append-to-formdata
- */
-function dataURItoBlob(dataURI, wantBytes) {
-    // convert base64/URLEncoded data component to raw binary data held in a string
-    var byteString;
-    if (dataURI.split(',')[0].indexOf('base64') >= 0)
-        byteString = atob(dataURI.split(',')[1]);
-    else
-        byteString = unescape(dataURI.split(',')[1]);
-
-    // separate out the mime component
-    var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
-
-    // write the bytes of the string to a typed array
-    var ia = new Uint8Array(byteString.length);
-    for (var i = 0; i < byteString.length; i++) {
-        ia[i] = byteString.charCodeAt(i);
+            var domRow = $("<tr />");
+            $.each(arrNames, function(idx, name) {      //collect data from each column
+                domRow.append($("<td />").html(val[name]));
+            });
+            domTable.append(domRow);
+        });
+        dstDiv.empty().append($("<strong />").html("Results")).show();
+        dstDiv.append(domTable);
+
+        //did we find an image? show it now!
+        if (objBest != null) {
+            //some images are too big for direct btoa/array processing...
+            //dstImg.attr('src', "data:"+objBest.mimeType+";base64,"+strImage).removeClass('workingImage');
+            dstImg.attr('src', BlobToDataURI(objBest.imageBinary)).removeClass('workingImage');
+        }
+        else {
+            var errStr = "Error: No valid image data was found, aborting display.";
+            console.log(errStr);
+            dstDiv.html(errStr);
+            return false;
+        }
     }
-    //added for returning bytes directly
-    if (wantBytes) {
-        return {'bytes':ia, 'type':mimeString};
+    else {       //legacy code where response was in base64 encoded image...
+        var responseJson = $.parseJSON(data);
+        var respImage = responseJson[0];
+        // https://stackoverflow.com/questions/21227078/convert-base64-to-image-in-javascript-jquery
+        dstImg.attr('src', "data:"+respImage['mime_type']+";base64,"+respImage['image_binary']).removeClass('workingImage');
     }
-    return new Blob([ia], {type:mimeString});
 }
-
-function Uint8ToString(u8a){
-  var CHUNK_SZ = 0x8000;
-  var c = [];
-  for (var i=0; i < u8a.length; i+=CHUNK_SZ) {
-    c.push(String.fromCharCode.apply(null, u8a.subarray(i, i+CHUNK_SZ)));
-  }
-  return c.join("");
-}
-
-
-// ----- diagnostic tool to download binary blobs ----
-function downloadBlobOut() {
-    return downloadBlob($(document.body).data('hdparams').protoPayloadOutput, "protobuf.out.bin");
-}
-
-function downloadBlobIn() {
-    return downloadBlob($(document.body).data('hdparams').protoPayloadInput, "protobuf.in.bin");
-}
-
-//  https://stackoverflow.com/a/33622881
-function downloadBlob(data, fileName, mimeType) {
-  //if there is no data, filename, or mime provided, make our own
-  if (!data)
-      data = $(document.body).data('hdparams').protoPayloadInput;
-  if (!fileName)
-      fileName = "protobuf.bin";
-  if (!mimeType)
-      mimeType = "application/octet-stream";
-
-  var blob, url;
-  blob = new Blob([data], {
-    type: mimeType
-  });
-  url = window.URL.createObjectURL(blob);
-  downloadURL(url, fileName, mimeType);
-  setTimeout(function() {
-    return window.URL.revokeObjectURL(url);
-  }, 1000);
-};
-
-function downloadURL(data, fileName) {
-  var a;
-  a = document.createElement('a');
-  a.href = data;
-  a.download = fileName;
-  document.body.appendChild(a);
-  a.style = 'display: none';
-  a.click();
-  a.remove();
-};
diff --git a/web_demo/jquery-ajax-native.js b/web_demo/jquery-ajax-native.js
new file mode 100755 (executable)
index 0000000..cbbf4db
--- /dev/null
@@ -0,0 +1,173 @@
+//     jQuery Ajax Native Plugin
+
+//     (c) 2015 Tarik Zakaria Benmerar, Acigna Inc.
+//      jQuery Ajax Native Plugin may be freely distributed under the MIT license.
+(function (root, factory) {
+    if (typeof define === 'function' && define.amd) {
+        // AMD. Register as an anonymous module.
+        define(['jquery'], factory);
+    } else if (typeof exports === 'object') {
+        // Node. Does not work with strict CommonJS, but
+        // only CommonJS-like environments that support module.exports,
+        // like Node.
+        module.exports = factory(require('jquery'));
+    } else {
+        // Browser globals (root is window)
+        factory(root.jQuery);
+    }
+}(this, function ( $ ) {
+    var ajaxSettings = $.ajaxSettings;
+    ajaxSettings.responseFields.native = 'responseNative';
+    ajaxSettings.converters[ '* native' ] = true;
+    var support = {},
+        xhrId = 0,
+        xhrSuccessStatus = {
+            // file protocol always yields status code 0, assume 200
+            0: 200,
+            // Support: IE9
+            // #1450: sometimes IE returns 1223 when it should be 204
+            1223: 204
+        },
+        xhrCallbacks = {},
+        xhrSupported = jQuery.ajaxSettings.xhr();
+    // Support: IE9
+    // Open requests must be manually aborted on unload (#5280)
+    if ( window.ActiveXObject ) {
+        $( window ).on( "unload", function() {
+            for ( var key in xhrCallbacks ) {
+                xhrCallbacks[ key ]();
+            }
+        });
+    }
+    support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported );
+    support.ajax = xhrSupported = !!xhrSupported;
+
+    //Native Data Type Ajax Transport
+    $.ajaxTransport('native', function ( options ) {
+        var callback;
+        // Cross domain only allowed if supported through XMLHttpRequest
+        if ( support.cors || xhrSupported && !options.crossDomain ) {
+            return {
+                send: function( headers, complete ) {
+                    var i,
+                        xhr = options.xhr(),
+                        id = ++xhrId,
+                        responses = {};
+
+                    xhr.open( options.type, options.url, options.async, options.username, options.password );
+
+                    // Apply custom fields if provided
+                    if ( options.xhrFields ) {
+                        for ( i in options.xhrFields ) {
+                            xhr[ i ] = options.xhrFields[ i ];
+                        }
+                    }
+
+                    // Override mime type if needed
+                    if ( options.mimeType && xhr.overrideMimeType ) {
+                        xhr.overrideMimeType( options.mimeType );
+                    }
+
+                    // X-Requested-With header
+                    // For cross-domain requests, seeing as conditions for a preflight are
+                    // akin to a jigsaw puzzle, we simply never set it to be sure.
+                    // (it can always be set on a per-request basis or even using ajaxSetup)
+                    // For same-domain requests, won't change header if already provided.
+                    if ( !options.crossDomain && !headers["X-Requested-With"] ) {
+                        headers["X-Requested-With"] = "XMLHttpRequest";
+                    }
+
+                    // Set headers
+                    for ( i in headers ) {
+                        xhr.setRequestHeader( i, headers[ i ] );
+                    }
+
+                    // Callback
+                    callback = function( type ) {
+                        return function() {
+                            if ( callback ) {
+                                delete xhrCallbacks[ id ];
+                                callback = xhr.onload = xhr.onerror = null;
+
+                                if ( type === "abort" ) {
+                                    xhr.abort();
+                                } else if ( type === "error" ) {
+                                    complete(
+                                        // file: protocol always yields status 0; see #8605, #14207
+                                        xhr.status,
+                                        xhr.statusText
+                                    );
+                                } else {
+                                    // The native response associated with the responseType
+                                    // Stored in the xhr.response attribute (XHR2 Spec)
+                                    if ( xhr.response ) {
+                                        responses.native = xhr.response;
+                                    }
+
+                                    complete(
+                                        xhrSuccessStatus[ xhr.status ] || xhr.status,
+                                        xhr.statusText,
+                                        responses,
+                                        xhr.getAllResponseHeaders()
+                                    );
+                                }
+                            }
+                        };
+                    };
+
+                    // Listen to events
+                    xhr.onload = callback();
+                    xhr.onerror = callback("error");
+
+                    // Create the abort callback
+                    callback = xhrCallbacks[ id ] = callback("abort");
+
+                    try {
+                        // Do send the request (this may raise an exception)
+                        xhr.send( options.hasContent && options.data || null );
+                    } catch ( e ) {
+                        // #14683: Only rethrow if this hasn't been notified as an error yet
+                        if ( callback ) {
+                            throw e;
+                        }
+                    }
+                },
+
+                abort: function() {
+                    if ( callback ) {
+                        callback();
+                    }
+                }
+            };
+        }
+    });
+
+
+    //$.getNative wrapper
+    $.getNative = function ( url, callback ) {
+        return $.ajax({
+            dataType: 'native',
+            url: url,
+            xhrFields: {
+                responseType: 'arraybuffer'
+            },
+            success: callback
+        });
+    };
+
+    //$.getBlob wrapper
+    $.getBlob = function ( url, callback ) {
+        return $.ajax({
+            dataType: 'native',
+            url: url,
+            xhrFields: {
+                responseType: 'blob'
+            },
+            success: callback
+        });
+    };
+
+    //Return the jQuery Object
+    return $;
+
+}));
index ed664f8..b1d913c 100644 (file)
@@ -1,12 +1,8 @@
 syntax = "proto3";
-package nnjTkneQgiZzmyiPFLQEudNBTxOvOBrK;
+package wvHeYuPGGFSeCSLULBxxJquKipiUljuZ;
 
 service Model {
-  rpc detect (ImageSet) returns (DetectionFrameSet);
-}
-
-message ImageSet {
-  repeated Image Images = 1;
+  rpc detect (Image) returns (DetectionFrameSet);
 }
 
 message Image {
index fcc0a50..4bc53d7 100644 (file)
@@ -2,8 +2,8 @@ syntax = "proto3";
 package HipTviKTkIkcmyuMCIAIDkeOOQQYyJne;
 
 service Model {
-  rpc pixelate (DetectionFrameSet) returns (ImageSet);
-  rpc detect (ImageSet) returns (ImageSet);
+  rpc pixelate (DetectionFrameSet) returns (Image);
+  rpc detect (Image) returns (Image);
 }
 
 message DetectionFrameSet {
@@ -21,10 +21,6 @@ message DetectionFrame {
   bytes image_binary = 8;
 }
 
-message ImageSet {
-  repeated Image Images = 1;
-}
-
 message Image {
   string mime_type = 1;
   bytes image_binary = 2;