Skip to content

SUStech_EdgeCrowdCountingSystem #51

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
510 changes: 510 additions & 0 deletions arc_design_contest/2019/SUStech_EdgeCrowdCountingSystem/README.md

Large diffs are not rendered by default.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
import time
import cv2
import os
import sys
from itertools import cycle
import tensorflow as tf
import numpy as np
import socket


def conv2d(x, w):
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')


def inf(x):
# tf.reset_default_graph()
# s net ###########################################################
w_conv1_1 = tf.get_variable('w_conv1_1', [5, 5, 1, 24])
b_conv1_1 = tf.get_variable('b_conv1_1', [24])
h_conv1_1 = tf.nn.relu(conv2d(x, w_conv1_1) + b_conv1_1)

h_pool1_1 = max_pool_2x2(h_conv1_1)

w_conv2_1 = tf.get_variable('w_conv2_1', [3, 3, 24, 48])
b_conv2_1 = tf.get_variable('b_conv2_1', [48])
h_conv2_1 = tf.nn.relu(conv2d(h_pool1_1, w_conv2_1) + b_conv2_1)

h_pool2_1 = max_pool_2x2(h_conv2_1)

w_conv3_1 = tf.get_variable('w_conv3_1', [3, 3, 48, 24])
b_conv3_1 = tf.get_variable('b_conv3_1', [24])
h_conv3_1 = tf.nn.relu(conv2d(h_pool2_1, w_conv3_1) + b_conv3_1)

w_conv4_1 = tf.get_variable('w_conv4_1', [3, 3, 24, 12])
b_conv4_1 = tf.get_variable('b_conv4_1', [12])
h_conv4_1 = tf.nn.relu(conv2d(h_conv3_1, w_conv4_1) + b_conv4_1)

# m net ###########################################################
w_conv1_2 = tf.get_variable('w_conv1_2', [7, 7, 1, 20])
b_conv1_2 = tf.get_variable('b_conv1_2', [20])
h_conv1_2 = tf.nn.relu(conv2d(x, w_conv1_2) + b_conv1_2)

h_pool1_2 = max_pool_2x2(h_conv1_2)

w_conv2_2 = tf.get_variable('w_conv2_2', [5, 5, 20, 40])
b_conv2_2 = tf.get_variable('b_conv2_2', [40])
h_conv2_2 = tf.nn.relu(conv2d(h_pool1_2, w_conv2_2) + b_conv2_2)

h_pool2_2 = max_pool_2x2(h_conv2_2)

w_conv3_2 = tf.get_variable('w_conv3_2', [5, 5, 40, 20])
b_conv3_2 = tf.get_variable('b_conv3_2', [20])
h_conv3_2 = tf.nn.relu(conv2d(h_pool2_2, w_conv3_2) + b_conv3_2)

w_conv4_2 = tf.get_variable('w_conv4_2', [5, 5, 20, 10])
b_conv4_2 = tf.get_variable('b_conv4_2', [10])
h_conv4_2 = tf.nn.relu(conv2d(h_conv3_2, w_conv4_2) + b_conv4_2)

# l net ###########################################################
w_conv1_3 = tf.get_variable('w_conv1_3', [9, 9, 1, 16])
b_conv1_3 = tf.get_variable('b_conv1_3', [16])
h_conv1_3 = tf.nn.relu(conv2d(x, w_conv1_3) + b_conv1_3)

h_pool1_3 = max_pool_2x2(h_conv1_3)

w_conv2_3 = tf.get_variable('w_conv2_3', [7, 7, 16, 32])
b_conv2_3 = tf.get_variable('b_conv2_3', [32])
h_conv2_3 = tf.nn.relu(conv2d(h_pool1_3, w_conv2_3) + b_conv2_3)

h_pool2_3 = max_pool_2x2(h_conv2_3)

w_conv3_3 = tf.get_variable('w_conv3_3', [7, 7, 32, 16])
b_conv3_3 = tf.get_variable('b_conv3_3', [16])
h_conv3_3 = tf.nn.relu(conv2d(h_pool2_3, w_conv3_3) + b_conv3_3)

w_conv4_3 = tf.get_variable('w_conv4_3', [7, 7, 16, 8])
b_conv4_3 = tf.get_variable('b_conv4_3', [8])
h_conv4_3 = tf.nn.relu(conv2d(h_conv3_3, w_conv4_3) + b_conv4_3)

# merge ###########################################################
h_conv4_merge = tf.concat([h_conv4_1, h_conv4_2, h_conv4_3], 3)

w_conv5 = tf.get_variable('w_conv5', [1, 1, 30, 1])
b_conv5 = tf.get_variable('b_conv5', [1])
h_conv5 = conv2d(h_conv4_merge, w_conv5) + b_conv5

y_pre = h_conv5

return y_pre


graph1 = tf.Graph()
with graph1.as_default():
x = tf.placeholder(tf.float32, [None, None, None, 1], name="input")
y_act = tf.placeholder(tf.float32, [None, None, None, 1])
y_pre = inf(x)

frame_path = './data/'
filenames = os.listdir(frame_path)
img_iter = cycle([cv2.imread(os.sep.join([frame_path, name]), 0)
for name in filenames])

key = 0
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 640, 480)

SEVER_ADDR = ('192.168.31.67', 80)

client_socket = socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(SEVER_ADDR)
print('connect sever(%s) success' % str(SEVER_ADDR))

with tf.Session(graph=graph1) as sess:
saver = tf.train.Saver()
saver.restore(sess, 'modelA/model.ckpt')

while key & 0xFF != 28:
img = next(img_iter)
img = np.array(img)
img = (img-127.5)/128
data = []
data.append([img])
d = data[0]
x_in = d[0]
x_in = np.reshape(d[0], (1, d[0].shape[0], d[0].shape[1], 1))
timea = time.time()
y_p_den = sess.run(y_pre, feed_dict={"input:0": x_in})
timeb = time.time()
cv2.imshow('image', img)

tm = time.strftime("%Y%m%d%H%M%S", time.localtime())
real_num = "{:0>4d}".format(np.sum(y_p_den))
send_data = 'A'+real_num+tm
client_socket.send(send_data.encode())

print("prediction result:%f" % real_num)
print('send message %s' % send_data)
print('=============================================================')
print('cost time: %.2f s' % (timea - timeb))

sys.stdout.flush()
key = cv2.waitKey(1000)
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "model.ckpt"
all_model_checkpoint_paths: "model.ckpt"
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
import numpy as np
import cv2
import tensorflow as tf


class MCNN:
def __init__(self, dataset=None):
self.dataset = dataset
self.LEARNING_RATE = 1e-4

self.x = tf.placeholder(tf.float32, [None, None, None, 1])
self.y_act = tf.placeholder(tf.float32, [None, None, None, 1])
self.y_pre = self.inf(self.x)

# Loss - Euclidean Distance
self.loss = tf.sqrt(tf.reduce_mean(tf.square(self.y_act - self.y_pre)))
self.act_sum = tf.reduce_sum(self.y_act)
self.pre_sum = tf.reduce_sum(self.y_pre)

# Mean Absolute Error
self.MAE = tf.abs(self.act_sum - self.pre_sum)

self.train_step = tf.train.AdamOptimizer(
self.LEARNING_RATE).minimize(self.loss)

def conv2d(self, x, w):
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')

def inf(self, x):
# tf.reset_default_graph()
# s net ###########################################################
w_conv1_1 = tf.get_variable('w_conv1_1', [5, 5, 1, 24])
b_conv1_1 = tf.get_variable('b_conv1_1', [24])
h_conv1_1 = tf.nn.relu(self.conv2d(x, w_conv1_1) + b_conv1_1)

h_pool1_1 = self.max_pool_2x2(h_conv1_1)

w_conv2_1 = tf.get_variable('w_conv2_1', [3, 3, 24, 48])
b_conv2_1 = tf.get_variable('b_conv2_1', [48])
h_conv2_1 = tf.nn.relu(self.conv2d(h_pool1_1, w_conv2_1) + b_conv2_1)

h_pool2_1 = self.max_pool_2x2(h_conv2_1)

w_conv3_1 = tf.get_variable('w_conv3_1', [3, 3, 48, 24])
b_conv3_1 = tf.get_variable('b_conv3_1', [24])
h_conv3_1 = tf.nn.relu(self.conv2d(h_pool2_1, w_conv3_1) + b_conv3_1)

w_conv4_1 = tf.get_variable('w_conv4_1', [3, 3, 24, 12])
b_conv4_1 = tf.get_variable('b_conv4_1', [12])
h_conv4_1 = tf.nn.relu(self.conv2d(h_conv3_1, w_conv4_1) + b_conv4_1)

# m net ###########################################################
w_conv1_2 = tf.get_variable('w_conv1_2', [7, 7, 1, 20])
b_conv1_2 = tf.get_variable('b_conv1_2', [20])
h_conv1_2 = tf.nn.relu(self.conv2d(x, w_conv1_2) + b_conv1_2)

h_pool1_2 = self.max_pool_2x2(h_conv1_2)

w_conv2_2 = tf.get_variable('w_conv2_2', [5, 5, 20, 40])
b_conv2_2 = tf.get_variable('b_conv2_2', [40])
h_conv2_2 = tf.nn.relu(self.conv2d(h_pool1_2, w_conv2_2) + b_conv2_2)

h_pool2_2 = self.max_pool_2x2(h_conv2_2)

w_conv3_2 = tf.get_variable('w_conv3_2', [5, 5, 40, 20])
b_conv3_2 = tf.get_variable('b_conv3_2', [20])
h_conv3_2 = tf.nn.relu(self.conv2d(h_pool2_2, w_conv3_2) + b_conv3_2)

w_conv4_2 = tf.get_variable('w_conv4_2', [5, 5, 20, 10])
b_conv4_2 = tf.get_variable('b_conv4_2', [10])
h_conv4_2 = tf.nn.relu(self.conv2d(h_conv3_2, w_conv4_2) + b_conv4_2)

# l net ###########################################################
w_conv1_3 = tf.get_variable('w_conv1_3', [9, 9, 1, 16])
b_conv1_3 = tf.get_variable('b_conv1_3', [16])
h_conv1_3 = tf.nn.relu(self.conv2d(x, w_conv1_3) + b_conv1_3)

h_pool1_3 = self.max_pool_2x2(h_conv1_3)

w_conv2_3 = tf.get_variable('w_conv2_3', [7, 7, 16, 32])
b_conv2_3 = tf.get_variable('b_conv2_3', [32])
h_conv2_3 = tf.nn.relu(self.conv2d(h_pool1_3, w_conv2_3) + b_conv2_3)

h_pool2_3 = self.max_pool_2x2(h_conv2_3)

w_conv3_3 = tf.get_variable('w_conv3_3', [7, 7, 32, 16])
b_conv3_3 = tf.get_variable('b_conv3_3', [16])
h_conv3_3 = tf.nn.relu(self.conv2d(h_pool2_3, w_conv3_3) + b_conv3_3)

w_conv4_3 = tf.get_variable('w_conv4_3', [7, 7, 16, 8])
b_conv4_3 = tf.get_variable('b_conv4_3', [8])
h_conv4_3 = tf.nn.relu(self.conv2d(h_conv3_3, w_conv4_3) + b_conv4_3)

# merge ###########################################################
h_conv4_merge = tf.concat([h_conv4_1, h_conv4_2, h_conv4_3], 3)

w_conv5 = tf.get_variable('w_conv5', [1, 1, 30, 1])
b_conv5 = tf.get_variable('b_conv5', [1])
h_conv5 = self.conv2d(h_conv4_merge, w_conv5) + b_conv5

y_pre = h_conv5

return y_pre

def predict(self, img_path='./data/IMG_3.jpg'):
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, 'model' + self.dataset + '/model.ckpt')

# img_path = '.\\data\\original\\
# shanghaitech\\part_'+ self.dataset +'\\test_data\\images\\'

print('Image Loading!')

data = []
img = cv2.imread(img_path, 0)
img = np.array(img)
img = (img - 127.5) / 128
data.append([img])

print('Image loaded!')

d = data[0]
x_in = d[0]

x_in = np.reshape(d[0], (1, d[0].shape[0], d[0].shape[1], 1))
y_p_den = sess.run(self.y_pre, feed_dict={self.x: x_in})
y_p = np.sum(y_p_den)
print('y_p : ', y_p)
return y_p_den, y_p
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/* ------------------------------------------
* Copyright (c) 2017, Synopsys, Inc. All rights reserved.

* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:

* 1) Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.

* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.

* 3) Neither the name of the Synopsys, Inc., nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.

* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
--------------------------------------------- */

/**
* \defgroup BOARD_EMSK_DRV_MID_FS_SDCARD EMSK Fatfs Middleware SDCard Driver
* \ingroup BOARD_EMSK_DRIVER
* \brief EMSK Fatfs Middleware SDCard Interface Driver
* \details
* Realize the sdcard driver for fatfs based on the middleware fatfs diskio abstract
* layer, sdcard interface can be spi or sdio and so on.
*/

/**
* \file
* \ingroup BOARD_EMSK_DRV_MID_FS_SDCARD
* \brief sdcard driver for fatfs of emsk board
*/

/**
* \addtogroup BOARD_EMSK_DRV_MID_FS_SDCARD
* @{
*/
#ifdef MID_FATFS /* only available when enable fatfs middleware */

#include "embARC_toolchain.h"
#include "ff_diskio.h"
#include "emsk_sdcard.h"
#include <time.h>

uint32_t diskio_get_fattime(void)
{
struct tm *p_tm;
time_t cur_time;
uint32_t fattime;

cur_time = time(NULL);
p_tm = localtime(&cur_time);
fattime = ((p_tm->tm_year+1900-1980)&0x7f) << 25;
fattime |= ((p_tm->tm_mon+1)&0xf) << 21;
fattime |= ((p_tm->tm_mday)&0x1f) << 16;
fattime |= ((p_tm->tm_hour)&0x1f) << 11;
fattime |= ((p_tm->tm_min)&0x3f) << 5;
fattime |= ((p_tm->tm_sec>>1)&0x1f);

return fattime;
}

FATFS_DISKIO *get_fatfs_diskio(uint32_t drvid)
{
switch (drvid) {
#if (USE_EMSK_SDCARD_SPI_0)
case EMSK_SDCARD_0_DRVID:
return &sdcard_spi_0_diskio;
break;
#endif
default:
break;
}
return NULL;
}

/** update in 1ms period interrupt for disk status update */
void emsk_sdcard_1ms_update(void)
{
#if (USE_EMSK_SDCARD_SPI_0)
sdcard_spi_0_diskio.diskio_timerproc();
#endif
}


#endif /* MID_FATFS */

/** @} end of group BOARD_EMSK_DRV_MID_FS_SDCARD */
Loading