- first pass as rename and dependency update
[face-privacy-filter.git] / face_privacy_filter / transform_detect.py
1 #! python
2 # -*- coding: utf-8 -*-
3 """
4 Wrapper for face detection task; wrapped in classifier for pipieline terminus
5 """
6 import cv2
7 import os
8 import pandas as pd
9 import numpy as np
10 from sklearn.base import BaseEstimator, ClassifierMixin
11 import base64
12
13
14 class FaceDetectTransform(BaseEstimator, ClassifierMixin):
15     '''
16     A sklearn transformer mixin that detects faces and optionally outputa the original detected image
17     '''
18     CASCADE_DEFAULT_FILE = "data/haarcascade_frontalface_alt.xml.gz"
19     COL_FACE_X = 'x'
20     COL_FACE_Y = 'y'
21     COL_FACE_W = 'w'
22     COL_FACE_H = 'h'
23     COL_REGION_IDX = 'region'
24     COL_IMAGE_IDX = 'image'
25     COL_IMAGE_MIME = 'mime_type'
26     COL_IMAGE_DATA = 'image_binary'
27     VAL_REGION_IMAGE_ID = -1
28
29     def __init__(self, cascade_path=None, include_image=True):
30         self.include_image = include_image    # should output transform include image?
31         self.cascade_path = cascade_path    # abs path outside of module
32         self.cascade_obj = None  # late-load this component
33
34     def get_params(self, deep=False):
35         return {'include_image': self.include_image}
36
37     @staticmethod
38     def generate_in_df(path_image="", bin_stream=b""):
39         # munge stream and mimetype into input sample
40         if path_image and os.path.exists(path_image):
41             bin_stream = open(path_image, 'rb').read()
42         return pd.DataFrame([['image/jpeg', bin_stream]], columns=[FaceDetectTransform.COL_IMAGE_MIME, FaceDetectTransform.COL_IMAGE_DATA])
43
44     @staticmethod
45     def generate_out_image(row, path_image):
46         # take image row and output to disk
47         with open(path_image, 'wb') as f:
48             f.write(row[FaceDetectTransform.COL_IMAGE_DATA][0])
49
50     @staticmethod
51     def generate_out_dict(idx=VAL_REGION_IMAGE_ID, x=0, y=0, w=0, h=0, image=0, bin_stream=b"", media=""):
52         return {FaceDetectTransform.COL_IMAGE_IDX: image, FaceDetectTransform.COL_REGION_IDX: idx,
53                 FaceDetectTransform.COL_FACE_X: x, FaceDetectTransform.COL_FACE_Y: y,
54                 FaceDetectTransform.COL_FACE_W: w, FaceDetectTransform.COL_FACE_H: h,
55                 FaceDetectTransform.COL_IMAGE_MIME: media, FaceDetectTransform.COL_IMAGE_DATA: bin_stream}
56
57     @staticmethod
58     def suppress_image(df):
59         keep_col = [FaceDetectTransform.COL_FACE_X, FaceDetectTransform.COL_FACE_Y,
60                     FaceDetectTransform.COL_FACE_W, FaceDetectTransform.COL_FACE_H,
61                     FaceDetectTransform.COL_FACE_W, FaceDetectTransform.COL_FACE_H,
62                     FaceDetectTransform.COL_REGION_IDX, FaceDetectTransform.COL_IMAGE_IDX]
63         blank_cols = [col for col in df.columns if col not in keep_col]
64         # set columns that aren't in our known column list to empty strings; search where face index==-1 (no face)
65         df.loc[df[FaceDetectTransform.COL_REGION_IDX] == FaceDetectTransform.VAL_REGION_IMAGE_ID, blank_cols] = ""
66         return df
67
68     @property
69     def _acumos_type_in(self):
70         """Custom input type for this processing transformer"""
71         from acumos.modeling import List, create_namedtuple
72         # base input for detect is image itself
73         ImageRow = create_namedtuple('ImageRow', [(FaceDetectTransform.COL_IMAGE_MIME, str),
74                                                   (FaceDetectTransform.COL_IMAGE_DATA, bytes)])
75         # represents a collection of flattened image arrays
76         return List[ImageRow]
77
78     @property
79     def _acumos_type_out(self):
80         """Custom input type for this processing transformer"""
81         from acumos.modeling import List, create_namedtuple
82         output_dict = FaceDetectTransform.generate_out_dict()
83         tuple_types = [(k, type(output_dict[k])) for k in output_dict]
84         # base output for detect is several parts
85         DetectionRow = create_namedtuple('DetectionRow', tuple_types)
86         # represents a collection of flattened image arrays
87         return List[DetectionRow]
88
89     def score(self, X, y=None):
90         return 0
91
92     def fit(self, X, y=None):
93         return self
94
95     def predict(self, X, y=None):
96         """
97         Assumes a numpy array of [[mime_type, binary_string] ... ]
98            where mime_type is an image-specifying mime type and binary_string is the raw image bytes
99         """
100         # if no model exists yet, create it
101         if self.cascade_obj is None:
102             if self.cascade_path is not None:
103                 self.cascade_obj = cv2.CascadeClassifier(self.cascade_path)
104             else:   # none provided, load what came with the package
105                 pathRoot = os.path.dirname(os.path.abspath(__file__))
106                 pathFile = os.path.join(pathRoot, FaceDetectTransform.CASCADE_DEFAULT_FILE)
107                 self.cascade_obj = cv2.CascadeClassifier(pathFile)
108
109         dfReturn = None
110         for image_idx in range(len(X)):
111             image_byte = X[FaceDetectTransform.COL_IMAGE_DATA][image_idx]
112             if type(image_byte) == str:
113                 image_byte = image_byte.encode()
114             image_byte = bytearray(base64.b64decode(image_byte))
115             file_bytes = np.asarray(image_byte, dtype=np.uint8)
116             img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
117             # img = cv2.imread(image_set[1])
118             faces = self.detect_faces(img)
119
120             df = pd.DataFrame()  # start with empty DF for this image
121             if self.include_image:  # create and append the image if that's requested
122                 dict_image = FaceDetectTransform.generate_out_dict(w=img.shape[1], h=img.shape[0], image=image_idx)
123                 dict_image[FaceDetectTransform.COL_IMAGE_MIME] = X[FaceDetectTransform.COL_IMAGE_MIME][image_idx]
124                 dict_image[FaceDetectTransform.COL_IMAGE_DATA] = X[FaceDetectTransform.COL_IMAGE_DATA][image_idx]
125                 df = pd.DataFrame([dict_image])
126             for idxF in range(len(faces)):  # walk through detected faces
127                 face_rect = faces[idxF]
128                 df = df.append(pd.DataFrame([FaceDetectTransform.generate_out_dict(idxF, face_rect[0], face_rect[1],
129                                                                                    face_rect[2], face_rect[3], image=image_idx)]),
130                                ignore_index=True)
131             if dfReturn is None:  # create an NP container for all image samples + features
132                 dfReturn = df.reindex_axis(self.output_names_, axis=1)
133             else:
134                 dfReturn = dfReturn.append(df, ignore_index=True)
135             # print("IMAGE {:} found {:} total rows".format(image_idx, len(df)))
136
137         return dfReturn
138
139     def detect_faces(self, img):
140         if self.cascade_obj is None:
141             return []
142         gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
143
144         faces = self.cascade_obj.detectMultiScale(
145             gray,
146             scaleFactor=1.1,
147             minNeighbors=5,
148             minSize=(30, 30),
149             flags=cv2.CASCADE_SCALE_IMAGE
150         )
151
152         # Draw a rectangle around the faces
153         # for (x, y, w, h) in faces:
154         #    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
155         return faces