728x90
반응형
** TensorFlow 2.0부터는 라이브러리 구조가 변경되어 아래 코드를 바로 사용할 수 없음.
1. AI_PI.py
- 사전에 학습된 가중치 파일과 모델을 라즈베리파이에 설치하여 작동
- 판단 이후는 GPIO를 통하여 외부 장치(LED) 등으로 결과를 알려줌 (미구현)
# python 3.4
from socket import *
from matplotlib.image import imread
import tensorflow as tf
import numpy as np
# SOCKET
HOST = ''
PORT = 12345
SIZE = 2048
MSGLEN = 0
# NETWORK
learning_rate = 0.001
n_input = 30000
n_classes = 2
dropout = 0.75
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
weights ={
'wc1' : tf.get_variable("wc1", shape=[3, 3, 3, 8], initializer =tf.contrib.layers.xavier_initializer()),
'wc2' : tf.get_variable("wc2", shape=[3, 3, 8, 16], initializer =tf.contrib.layers.xavier_initializer()),
'wc3' : tf.get_variable("wc3", shape=[3, 3, 16, 32], initializer =tf.contrib.layers.xavier_initializer()),
'wc4' : tf.get_variable("wc4", shape=[3, 3, 32, 64], initializer =tf.contrib.layers.xavier_initializer()),
'wc5' : tf.get_variable("wc5", shape=[3, 3, 64, 128], initializer =tf.contrib.layers.xavier_initializer()),
'wd1' : tf.get_variable("wd1", shape=[1*1*128, 1024], initializer =tf.contrib.layers.xavier_initializer()),
'wout' : tf.get_variable("wout", shape=[1024, n_classes], initializer =tf.contrib.layers.xavier_initializer())
}
biases = {
'bc1' : tf.Variable(tf.zeros([8])),
'bc2' : tf.Variable(tf.zeros([16])),
'bc3' : tf.Variable(tf.zeros([32])),
'bc4' : tf.Variable(tf.zeros([64])),
'bc5' : tf.Variable(tf.zeros([128])),
'bd1' : tf.Variable(tf.zeros([1024])),
'bout': tf.Variable(tf.zeros([n_classes]))
}
def conv2d(img, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='VALID'), b))
def max_pool(img, k):
return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='VALID')
def conv_net(x, weights, biases, dropout):
_X = tf.reshape(x, shape=[-1, 100, 100, 3])
conv1 = conv2d(_X, weights['wc1'], biases['bc1'])
conv1 = max_pool(conv1, k=2)
conv1 = tf.nn.dropout(conv1, dropout)
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
conv2 = max_pool(conv2, k=2)
conv2 = tf.nn.dropout(conv2, dropout)
conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
conv3 = max_pool(conv3, k=2)
conv3 = tf.nn.dropout(conv3, dropout)
conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])
conv4 = max_pool(conv4, k=2)
conv4 = tf.nn.dropout(conv4, dropout)
conv5 = conv2d(conv4, weights['wc5'], biases['bc5'])
conv5 = max_pool(conv5, k=2)
conv5 = tf.nn.dropout(conv5, dropout)
dense1 = tf.reshape(conv5, [-1, weights['wd1'].get_shape().as_list()[0]])
dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, weights['wd1']), biases['bd1']))
dense1 = tf.nn.dropout(dense1, dropout)
out = tf.add(tf.matmul(dense1, weights['wout']), biases['bout'])
return out
def autoConnection():
s_broad = socket(AF_INET, SOCK_DGRAM)
s_broad.connect(('8.8.8.8', 0))
priip = s_broad.getsockname()[0]
s_broad.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s_broad.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
s_broad.sendto(priip.encode(), ('255.255.255.255', 8000))
print('ip : ', priip)
s_broad.close()
def testNetwork():
print('Start server')
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(1)
autoConnection()
s, addr = s.accept()
print('conneted ... ' , addr)
# File transfer
print('File receiving')
filesize = int(s.recv(4).decode())
total = 0
f = open('recv.jpg', 'wb')
while 1:
data = s.recv(2048)
if data:
total += len(data)
f.write(data)
if(total == filesize):
break
f.close()
# Session run
print('Start Network')
img = imread('recv.jpg')
float_img = img.astype(np.float32)
float_img *= 1.0/255.0
float_img = float_img.reshape([1, 100 * 100 * 3])
pred = conv_net(x, weights, biases, keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, 'tmp/model.ckpt')
sess.run(optimizer, feed_dict=)
prd, acc = sess.run([pred, accuracy], feed_dict=)
if acc > 0.9:
s.send(b'1') # hwan
else:
s.send(b'0') # other
s.close()
testNetwork()
print('exit')
exit()
2. AI_PC.py
- 라즈베리 파이에서 PyCamera가 OpenCV로 인식되지 않아 노트북카메라와 소켓을 활용함
(차후 수정 시 없어지는 부분)
- 카메라에 사람 얼굴이 인식되면 캡쳐 후 전송 (Haar Cascade 인식기를 사용)
- 동일 네트워크 상에 라즈베리파이가 존재하면 자동 연결
# python3.5
import cv2
import numpy as np
from socket import *
import os
import time
# Haar cascade load
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
IMAGE_NAME = 'face.jpg'
# face detection & capture
def face_detect():
img = cv2.VideoCapture(0)
while img.isOpened():
ret, frame = img.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) > 0:
print('\nFace Detaction! -> ', end='')
for (x, y, w, h) in faces:
crop = frame[y:y+h, x:x+h]
roi_gray = gray[y:y+h, x:x+h]
eyes = eye_cascade.detectMultiScale(roi_gray)
if len(eyes) > 0:
crop[:, :, 0] = cv2.equalizeHist(crop[:, :, 0])
crop[:, :, 1] = cv2.equalizeHist(crop[:, :, 1])
crop[:, :, 2] = cv2.equalizeHist(crop[:, :, 2])
out_img = cv2.resize(crop, (100, 100), interpolation = cv2.INTER_CUBIC)
cv2.imwrite(IMAGE_NAME, out_img)
print('Capture!\n')
return 1
return 0
# NETWORK
print('Start Client')
s_broad = socket(AF_INET, SOCK_DGRAM)
s_broad.bind(('', 8000))
msg, addr = s_broad.recvfrom(1024)
HOST = msg.decode()
PORT = 12345
print('Server IP :', HOST)
s = socket(AF_INET, SOCK_STREAM)
print('Server connect ...', end='')
time.sleep(1)
s.connect((HOST, PORT))
print('OK')
while 1:
if(face_detect()):
#if(1):
#IMAGE_NAME = 'tmp_.jpg' # other image 1
#IMAGE_NAME = 'tmp.jpg' # other image 2
filesize = os.path.getsize(IMAGE_NAME)
s.send(str(filesize).encode())
f = open(IMAGE_NAME, 'rb')
while 1:
data = f.read(2048)
if data:
s.send(data)
else:
break
f.close()
while 1:
result = s.recv(10)
if(result == b'1'):
print('\nGood day Master\n')
exit()
else:
print('\nAuthorization Error.\n')
exit()
s.close()
3. 얼굴 이미지 학습 시 사용한 모델 (CNN)
728x90
반응형
'기타 > 토이 프로젝트' 카테고리의 다른 글
윈도우 원격제어 프로그램 (0) | 2022.03.08 |
---|---|
아두이노로 키보드 입력 방지 우회하기 (2) | 2021.09.10 |
미국 주식 시뮬레이터 (0) | 2021.07.25 |
Python으로 UiPath Custom Activity 만들기 (Captcha) (2) | 2021.07.17 |
레이싱 드론만들기 #1 재료 구매 (0) | 2021.06.09 |
파일 자동 분류/정리 시스템 (0) | 2017.07.11 |
댓글