adb devices
in the console in ubuntu (the body itself must be connected via usb and adb must be installed - sudo apt-get install android-tools-adb
): malefic@lepeshka:~$ adb devices List of devices attached 64cb5c59 recovery
malefic@lepeshka:~$ adb sideload SuperSU_Bootloader_FIXED.zip sending: 'sideload' 100%
malefic@lepeshka:~$ adb devices List of devices attached 64cb5c59 recovery
malefic@lepeshka:~$ adb shell mount -o rw -t ext4 /dev/block/platform/msm_sdcc.1/by-name/system /system
malefic@lepeshka:~$ adb shell "echo \"persist.service.adb.enable=1\" >> /system/build.prop"
malefic@lepeshka:~$ adb reboot
malefic@lepeshka:~$ adb devices List of devices attached 64cb5c59 device
on the language selection screen, we consistently touch the lower left corner (above the emergency call button), lower right corner, lower left, lower right and volume + on the screen.
malefic@lepeshka:~$ adb shell input tap 10 1150 malefic@lepeshka:~$ adb shell input tap 710 1150 malefic@lepeshka:~$ adb shell input tap 10 1150 malefic@lepeshka:~$ adb shell input tap 710 1150
malefic@lepeshka:~$ adb shell input tap 50 600 malefic@lepeshka:~$ adb shell input tap 650 600
malefic@lepeshka:~$ adb shell input swipe 100 100 500 100
malefic@lepeshka:~$ adb install droid+VNC+server+v1.1RC0.apk 4055 KB/s (2084419 bytes in 0.501s) pkg: /data/local/tmp/droid+VNC+server+v1.1RC0.apk Success
malefic@lepeshka:~$ adb shell input keyevent 26 malefic@lepeshka:~$ adb shell input swipe 100 100 500 100
malefic@lepeshka:~$ adb shell am start -a android.intent.action.Main -n org.onaips.vnc/.MainActivity
malefic@lepeshka:~$ adb shell input tap 50 900
malefic@lepeshka:~$ adb shell input tap 350 300
malefic@lepeshka:~$ adb shell input tap 600 1000
malefic@lepeshka:~$ adb forward tcp:5801 tcp:5801 malefic@lepeshka:~$ adb forward tcp:5901 tcp:5901
import sys import os from models import getImage, sess from gdrive import getImagesFromGDrive, createImageFromGDriveObject if __name__ == '__main__': # images, http = getImagesFromGDrive() # for img_info in images: # img = createImageFromGDriveObject (img_info, http) file_name = img_info['title'] # try: dbimage = getImage(os.path.basename(file_name)) dbimage.img = img dbimage.download_url = img_info["downloadUrl"] dbimage.img_link = img_info['webContentLink'].replace('&export=download','') except ValueError as e: print e continue # dbimage.identifyDigits() # sess.commit()
getImagesFromGDrive
- a function that returns a list of unrecognized images from Google DrivecreateImageFromGDriveObject
- a function that downloads the image itself and converts it into the OpenCV formatgetImage
- the function searches for an image record in the database, if there is none, it creates itidentifyDigits
is a method that recognizes readings on a given imagehttp
- an authorized client to access Google Drive, read in detail about access to the Disk API heresess
- database connection object, using SQL Alchemy library import os from datetime import tzinfo, timedelta, date from dateutil.relativedelta import relativedelta from apiclient.discovery import build from models import getLastRecognizedImage def getImagesFromGDrive(): # id Google , FOLDER_ID = '0B5mI3ROgk0mJcHJKTm95Ri1mbVU' # http = getAuthorizedHttp() # drive_service = build('drive', 'v2', http=http) # , month_ago = date.today() + relativedelta( months = -1 ) q = "'%s' in parents and mimeType = 'image/jpeg' and trashed = false and modifiedDate<'%s'" % (FOLDER_ID, month_ago.isoformat()) files = drive_service.files().list(q = q, maxResults=1000).execute() for image in files.get('items'): drive_service.files().trash(fileId=image['id']).execute() # , last_image = getLastRecognizedImage() # , page_size = 1000 result = [] pt = None # API 1000 , # 1000 while True: q = "'%s' in parents and trashed = false and mimeType = 'image/jpeg' and modifiedDate>'%s'" % (FOLDER_ID, last_image.check_time.replace(tzinfo=TZ()).isoformat('T')) files = drive_service.files().list(q = q, maxResults=page_size, pageToken=pt).execute() result.extend(files.get('items')) pt = files.get('nextPageToken') if not pt: break # , result.reverse() return result, http
import httplib2 import ConfigParser from oauth2client.client import OAuth2WebServerFlow from oauth2client.file import Storage def getAuthorizedHttp(): # config.ini CLIENT_ID CLIENT_SECRET config = ConfigParser.ConfigParser() config.read([os.path.dirname(__file__)+'/config.ini']) CLIENT_ID = config.get('gdrive','CLIENT_ID') CLIENT_SECRET = config.get('gdrive','CLIENT_SECRET') # OAuth 2.0 scope that will be authorized. # Check https://developers.google.com/drive/scopes for all available scopes. OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive' # Redirect URI for installed apps REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob' # client_secrets.json storage = Storage(os.path.dirname(__file__) + '/client_secrets.json') credentials = storage.get() # , if not credentials: # Perform OAuth2.0 authorization flow. flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, OAUTH_SCOPE, REDIRECT_URI) authorize_url = flow.step1_get_authorize_url() # , print 'Go to the following link in your browser: ' + authorize_url # code = raw_input('Enter verification code: ').strip() credentials = flow.step2_exchange(code) # storage.put(credentials) # http http = httplib2.Http() credentials.authorize(http) return http
client_secrets.json
file and will not ask anything on subsequent launches. import cv2 import numpy as np def downloadImageFromGDrive (downloadUrl, http=None): if http==None: http = getAuthorizedHttp() # resp, content = http.request(downloadUrl) # OpenCV img_array = np.asarray(bytearray(content), dtype=np.uint8) return cv2.imdecode(img_array, cv2.IMREAD_COLOR) def createImageFromGDriveObject (img_info, http=None): return downloadImageFromGDrive(img_info['downloadUrl'], http)
extractDigitsFromImage
method: def extractDigitsFromImage (self): img = self.img
# 90 h, w, k = img.shape M = cv2.getRotationMatrix2D((w/2,h/2),270,1) img = cv2.warpAffine(img,M,(w,h))
# , img = img[0:h, (wh)/2:h+(wh)/2] h, w, k = img.shape
sample.jpg
file and wrote the following code to find its coordinates: # sample = cv2.imread(os.path.dirname(__file__)+"/sample.jpg") sample_h, sample_w, sample_k = sample.shape # res = cv2.matchTemplate(img,sample,cv2.TM_CCORR_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # x_center = max_loc[0] + sample_w/2 y_center = max_loc[1] + sample_h/2 # , , # if x_center>w*0.6: img = img[0:h, 0.2*w:w] h, w, k = img.shape x_center = x_center-0.2*w
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Canny edges = cv2.Canny(gray, 100, 200)
HoughLines
method also takes as parameters the magnitudes of the search steps for the distance and angle of rotation and the threshold value responsible for the minimum number of points that should form a line. The smaller this threshold, the more lines the algorithm will find. # lines = cv2.HoughLines(edges, 1, np.pi/180, threshold=100)
# rho_below = rho_above = np.sqrt(h*h+w*w) line_above = None line_below = None for line in lines: rho,theta = line[0] sin = np.sin(theta) cos = np.cos(theta) # if (sin<0.7): continue # , "" rho_center = x_center*cos + y_center*sin # if rho_center>rho and rho_center-rho<rho_above: rho_above = rho_center-rho line_above = {"rho":rho, "theta":theta, "sin":sin, "cos":cos} # if rho_center<rho and rho-rho_center<rho_below: rho_below = rho-rho_center line_below = {"rho":rho, "theta":theta, "sin":sin, "cos":cos} # , if line_below==None or line_above==None: mylogger.warn("No lines found") return False # , if rho_below/rho_above>1.7 or rho_below/rho_above<0.6: mylogger.warn("Wrong lines found: %f" % (rho_below/rho_above)) return False
# M = cv2.getRotationMatrix2D((0,(line_below["rho"]-line_above["rho"])/2+line_above["rho"]),line_above["theta"]/np.pi*180-90,1) img = cv2.warpAffine(img,M,(w,h))
# img = img[line_above["rho"]:line_below["rho"], 0:w] h, w, k = img.shape
# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) thres = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
sample_right = cv2.imread(os.path.dirname(__file__)+"/sample_right.jpg",cv2.IMREAD_GRAYSCALE) # res = cv2.matchTemplate(thres,sample_right,cv2.TM_CCORR_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # x_right = max_loc[0]-6
# kernel = np.ones((7,7),np.uint8) thres = cv2.morphologyEx(thres, cv2.MORPH_CLOSE, kernel)
# x_left=0 while x_left<w : if thres[h/2,x_left]==0: break x_left+=1
# img = img[:, x_left:x_right] h, w, k = img.shape
# if float(w)/float(h)<6.5 or float(w)/float(h)>9.5: mylogger.warn("Image has bad ratio: %f" % (float(w)/float(h))) return False self.digits_img = img return True
splitDigits
method is used to splitDigits
dial-out function selected by the previous function into separate digits: def splitDigits (self): # , , if None == self.digits_img: if not self.extractDigitsFromImage(): return False img = self.digits_img h, w, k = img.shape
adaptiveThreshold
method, the parameters are chosen empirically: # 8 for i in range(1,8): digit = img[0:h, (i-1)*w/8:i*w/8] dh, dw, dk = digit.shape # / digit_gray = cv2.cvtColor(digit,cv2.COLOR_BGR2GRAY) digit_bin = cv2.adaptiveThreshold(digit_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 9, 0)
# kernel = np.ones((2,2),np.uint8) digit_bin = cv2.morphologyEx(digit_bin, cv2.MORPH_OPEN, kernel)
# other, contours, hierarhy = cv2.findContours(digit_bin.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# biggest_contour = None biggest_contour_area = 0 for cnt in contours: M = cv2.moments(cnt) # if cv2.contourArea(cnt)<30: continue # if cv2.arcLength(cnt,True)<30: continue # cx = M['m10']/M['m00'] cy = M['m01']/M['m00'] # , - if cx/dw<0.3 or cx/dw>0.7: continue # if cv2.contourArea(cnt)>biggest_contour_area: biggest_contour = cnt biggest_contour_area = cv2.contourArea(cnt) biggest_contour_cx = cx biggest_contour_cy = cy # , if biggest_contour==None: digit = self.dbDigit(i, digit_bin) digit.markDigitForManualRecognize (use_for_training=False) mylogger.warn("Digit %d: no biggest contour found" % i) continue
# , mask = np.zeros(digit_bin.shape,np.uint8) cv2.drawContours(mask,[biggest_contour],0,255,-1) digit_bin = cv2.bitwise_and(digit_bin,digit_bin,mask = mask)
# rw = dw/2.0 rh = dh/1.4 # , if biggest_contour_cy-rh/2 < 0: biggest_contour_cy = rh/2 if biggest_contour_cx-rw/2 < 0: biggest_contour_cx = rw/2
digit_base_h = 2
4, digit_base_w = 16
. The result is saved in the database. # digit_bin = digit_bin[int(biggest_contour_cy-rh/2):int(biggest_contour_cy+rh/2), int(biggest_contour_cx-rw/2):int(biggest_contour_cx+rw/2)] # digit_bin = cv2.resize(digit_bin,(digit_base_w, digit_base_h)) digit_bin = cv2.threshold(digit_bin, 128, 255, cv2.THRESH_BINARY)[1] # digit = self.dbDigit(i, digit_bin) return True
identifyDigits
method, which is called from the main program for each image: def identifyDigits(self): # , if self.result!='': return True # if len(self.digits)==0: # , if self.img == None: return False # if not self.splitDigits(): return False # , sess.commit() # for digit in self.digits: digit.identifyDigit() # str_digits = map(str,self.digits) # , if '?' in str_digits: return False # self.result = ''.join(str_digits) return True
identifyDigit
method: def identifyDigit (self): # , if self.result!='?': return True if not KNN.recognize(self): # , self.markDigitForManualRecognize() # 7- , "0", , , if self.i==7: self.result = 0 return True return False else: self.use_for_training = True return True
identifyDigit
method is also trivial, the recognition occurs in the KNN.recognize
method, the algorithm for finding the nearest neighbors from OpenCV is used: @staticmethod def recognize(dbdigit): # , if not KNN._trained: KNN.train() # , , h,w = dbdigit.body.shape if h!=digit_base_h or w!=digit_base_w: dbdigit.markDigitForManualRecognize(use_for_training=False) mylogger.warn("Digit %d has bad resolution: %dx %d" % (dbdigit.i,h,w)) return False # sample = dbdigit.body.reshape(digit_base_h*digit_base_w).astype(np.float32) test_data = np.array([sample]) # , - - 5 knn = KNN.getKNN() ret,result,neighbours,dist = knn.find_nearest(test_data,k=5) # if result[0,0]!=neighbours[0,0]: # dbdigit.markDigitForManualRecognize() return False if neighbours[0,1]!=neighbours[0,0] or neighbours[0,2]!=neighbours[0,0]: # dbdigit.markDigitForManualRecognize() return False if dist[0,0]>3000000 or dist[0,1]>3500000 or dist[0,2]>4000000: # dbdigit.markDigitForManualRecognize() return False # , dbdigit.result = str(int(ret)) return True
KNN.train
: @staticmethod def getKNN(): # cv2.KNearest if KNN._knn==None: KNN._knn = cv2.KNearest() return KNN._knn @staticmethod def train(): knn = KNN.getKNN() # train_digits = sess.query(Digit).filter(Digit.result!='?').filter_by(use_for_training=True).all() train_data = [] responses = [] for dbdigit in train_digits: h,w = dbdigit.body.shape # if h*w != digit_base_h*digit_base_w: continue # sample = dbdigit.body.reshape(digit_base_h*digit_base_w).astype(np.float32) train_data.append(sample) responses.append(int(dbdigit.result)) # KNN knn.train(np.array(train_data), np.array(responses)) KNN._trained = True
models.py
, if the reader has questions about the work of some used, but not described functions. import datetime from sqlalchemy import Column, Integer, String, Text, Boolean, ForeignKey, DateTime, PickleType from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker import base64 import cv2 import numpy as np import os import logging import sys dbengine = create_engine('sqlite:///' + os.path.dirname(__file__) + '/../db/images.db', echo=False) Session = sessionmaker(bind=dbengine) sess = Session() Base = declarative_base() # image class class Image(Base): __tablename__ = 'images' id = Column(Integer, primary_key=True) file_name = Column(String) img_link = Column(Text) download_url = Column(Text) check_time = Column(DateTime) result = Column(String(8)) digits = relationship("Digit", backref="image") img = None # source image digits_img = None # cropped source image def __init__(self, file_name): self.file_name = file_name self.check_time = datetime.datetime.strptime(file_name, "gaz.%Y-%m-%d.%H.%M.%S.jpg") self.result = "" def __repr__(self): return "<Image ('%s','%s','%s')>" % (self.id, self.file_name, self.result) def dbDigit(self, i, digit_img): digit = sess.query(Digit).filter_by(image_id=self.id).filter_by(i=i).first() if not digit: digit = Digit(self, i, digit_img) sess.add(digit) else: digit.body = digit_img return digit ## # digit class class Digit(Base): __tablename__ = 'digits' id = Column(Integer, primary_key=True) image_id = Column(Integer, ForeignKey("images.id")) i = Column(Integer) body = Column(PickleType) result = Column(String(1)) use_for_training = Column(Boolean) def __init__(self, image, i, digit_img): self.image_id = image.id self.i = i self.body = digit_img self.markDigitForManualRecognize() def __repr__(self): return "%s" % self.result def markDigitForManualRecognize (self, use_for_training=False): self.result = '?' self.use_for_training = use_for_training def getEncodedBody (self): enc = cv2.imencode('.png',self.body)[1] b64 = base64.b64encode(enc) return b64 ## Base.metadata.create_all(bind=dbengine) # function to get Image object by file_name and img def getImage(file_name): image = sess.query(Image).filter_by(file_name=file_name).first() if not image: image = Image(file_name) sess.add(image) # store image object to base sess.commit() image.digits_img = None return image def getLastRecognizedImage(): return sess.query(Image).filter(Image.result!='').order_by(Image.check_time.desc()).first() def dgDigitById(digit_id): digit = sess.query(Digit).get(digit_id) return digit
Source: https://habr.com/ru/post/220869/
All Articles