Algorithm:
- First, find images with large areas of skin-colored pixels.
- Then, in these areas, find elongated areas and group them into possible human limbs or combined groups of limbs, using specialized grouping modules that contain a significant amount of information about the structure of the object.
# NSFW occlusion experiment from StringIO import StringIO import matplotlib.pyplot as plt import numpy as np from PIL import Image, ImageDraw import requests import scipy.sparse as sp from clarifai.client import ClarifaiApi CLARIFAI_APP_ID = '...' CLARIFAI_APP_SECRET = '...' clarifai = ClarifaiApi(app_id=CLARIFAI_APP_ID, app_secret=CLARIFAI_APP_SECRET, base_url='https://api.clarifai.com') def batch_request(imgs, bboxes): """use the API to tag a batch of occulded images""" assert len(bboxes) < 128 #convert to image bytes stringios = [] for img in imgs: stringio = StringIO() img.save(stringio, format='JPEG') stringios.append(stringio) #call api and parse response output = [] response = clarifai.tag_images(stringios, model='nsfw-v1.0') for result,bbox in zip(response['results'], bboxes): nsfw_idx = result['result']['tag']['classes'].index("sfw") nsfw_score = result['result']['tag']['probs'][nsfw_idx] output.append((nsfw_score, bbox)) return output def build_bboxes(img, boxsize=72, stride=25): """Generate all the bboxes used in the experiment""" width = boxsize height = boxsize bboxes = [] for top in range(0, img.size[1], stride): for left in range(0, img.size[0], stride): bboxes.append((left, top, left+width, top+height)) return bboxes def draw_occulsions(img, bboxes): """Overlay bboxes on the test image""" images = [] for bbox in bboxes: img2 = img.copy() draw = ImageDraw.Draw(img2) draw.rectangle(bbox, fill=True) images.append(img2) return images def alpha_composite(img, heatmap): """Blend a PIL image and a numpy array corresponding to a heatmap in a nice way""" if img.mode == 'RBG': img.putalpha(100) cmap = plt.get_cmap('jet') rgba_img = cmap(heatmap) rgba_img[:,:,:][:] = 0.7 #alpha overlay rgba_img = Image.fromarray(np.uint8(cmap(heatmap)*255)) return Image.blend(img, rgba_img, 0.8) def get_nsfw_occlude_mask(img, boxsize=64, stride=25): """generate bboxes and occluded images, call the API, blend the results together""" bboxes = build_bboxes(img, boxsize=boxsize, stride=stride) print 'api calls needed:{}'.format(len(bboxes)) scored_bboxes = [] batch_size = 125 for i in range(0, len(bboxes), batch_size): bbox_batch = bboxes[i:i + batch_size] occluded_images = draw_occulsions(img, bbox_batch) results = batch_request(occluded_images, bbox_batch) scored_bboxes.extend(results) heatmap = np.zeros(img.size) sparse_masks = [] for idx, (nsfw_score, bbox) in enumerate(scored_bboxes): mask = np.zeros(img.size) mask[bbox[0]:bbox[2], bbox[1]:bbox[3]] = nsfw_score Asp = sp.csr_matrix(mask) sparse_masks.append(Asp) heatmap = heatmap + (mask - heatmap)/(idx+1) return alpha_composite(img, 80*np.transpose(heatmap)), np.stack(sparse_masks) #Download full Lena image r = requests.get('https://clarifai-img.s3.amazonaws.com/blog/len_full.jpeg') stringio = StringIO(r.content) img = Image.open(stringio, 'r') img.putalpha(1000) #set boxsize and stride (warning! a low stride will lead to thousands of API calls) boxsize= 64 stride= 48 blended, masks = get_nsfw_occlude_mask(img, boxsize=boxsize, stride=stride) #viz blended.show()
A developmental neural network can be represented as a convolutional neural network that uses the same components (filtering, pooling), but vice versa, so instead of displaying pixels for attributes, it does the opposite. To study the specific activation of the convolutional neural network, we set all other activations in this layer to zero and skip the feature maps as input parameters to the attached layer of the deploying neural network. Then we successfully produce 1) unpling; 2) correction and 3) filtering to restore activity in the lower layer, which gave rise to the selected activation. Then the procedure is repeated until we reach the original pixel layer.
[...]
The procedure is similar to the reverse propagation of one strong activation (as opposed to ordinary gradients), for example, calculatingwhere
Is an element of the feature map with strong activation, and
- the original image.
Source: https://habr.com/ru/post/282071/
All Articles