repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2019-12-25 21:25
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : kafka-producer.py
# ----------------------------------------------
from kafka import KafkaProducer
from time import sleep
def start_producer():
producer = KafkaProducer(bootstrap_servers='kafka-0-0.kafka-0-inside-svc.kafka.svc.cluster.local:32010,'
'kafka-1-0.kafka-1-inside-svc.kafka.svc.cluster.local:32011,'
'kafka-2-0.kafka-2-inside-svc.kafka.svc.cluster.local:32012,'
'kafka-3-0.kafka-3-inside-svc.kafka.svc.cluster.local:32013,'
'kafka-4-0.kafka-4-inside-svc.kafka.svc.cluster.local:32014,'
'kafka-5-0.kafka-5-inside-svc.kafka.svc.cluster.local:32015')
for i in range(0, 100000):
msg = 'msg is ' + str(i)
print(msg)
producer.send('my_test_topic1', msg.encode('utf-8'))
sleep(3)
if __name__ == '__main__':
start_producer()
| Python | 27 | 42.148148 | 108 | /part-kafka/kafka-producer.py | 0.472103 | 0.411159 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-01 10:39
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test02.py
# ----------------------------------------------
if __name__ == "__main__":
# gbk 和 utf-8 格式之间的转换
# gbk 编码,针对于中文字符
t1 = "中国加油"
t1_gbk = t1.encode("gbk")
print(t1.encode("gbk"))
# utf-8 字符
t2_utf = t1_gbk.decode("gbk").encode("utf-8")
print(t2_utf)
print(t1.encode("utf-8"))
# 正则切分字符串
s1 = "info : xiaoZhang 33 shandong"
import re
# 非捕获分组
c1 = re.compile(r'\s*[:\s]\s*')
l1 = re.split(c1, s1)
print(l1)
# 捕获分组
c2 = re.compile(r'(\s*:\s*|\s)')
l2 = re.split(c2, s1)
print(l2)
# 如果仍需使用圆括号输出非捕获分组的话
c3 = re.compile(r'(?:\s*:\s*|\s)')
l3 = re.split(c3, s1)
print(l3)
# 去除多余空格
a = "你好 中国 "
a = a.rstrip()
print(a)
# 字符串转换成小写
b = "sdsHOJOK"
print(b.lower())
# 单引号 双引号 三引号的区别
# 单引号 和 双引号 输出结果一样,都显示转义后的字符
a = '-\t-\\-\'-%-/-\n'
b = "-\t-\\-\'-%-/-\n"
print(a)
print(b)
c = r"-\t-\\-\'-%-/-\n"
print(c) | Python | 47 | 22.723404 | 49 | /part-interview/test02.py | 0.431777 | 0.391382 |
wuljchange/interesting_python | refs/heads/master | from collections import defaultdict
counter_words = defaultdict(list)
# 定位文件中的每一行出现某个字符串的次数
def locate_word(test_file):
with open(test_file, 'r') as f:
lines = f.readlines()
for num, line in enumerate(lines, 1):
for word in line.split():
counter_words[word].append(num)
return counter_words
if __name__ == "__main__":
file = 'data/test.txt'
ret = locate_word(file)
print(ret.get('test', []))
| Python | 19 | 22.631578 | 43 | /part-text/test-enumerate.py | 0.621381 | 0.619154 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-08 11:30
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test19.py
# ----------------------------------------------
# 单例模式的 N 种实现方法,就是程序在不同位置都可以且仅可以取到同一个实例
# 函数装饰器实现
def singleton(cls):
_instance = {}
def inner():
if cls not in _instance:
# cls 作为 key,value 值为 cls 的实例化
_instance[cls] = cls()
return _instance[cls]
return inner
@singleton
class Cls(object):
def __init__(self):
print("__init__")
# 类装饰器实现
class Singleton:
def __init__(self, cls):
self._cls = cls
self._instance = {}
def __call__(self, *args, **kwargs):
if self._cls not in self._instance:
self._instance[self._cls] = self._cls()
return self._instance[self._cls]
@Singleton
class Cls2:
def __init__(self):
print("__init__2")
# 使用 new 关键字实现单例模式
class Singleton1(object):
# 类属性,公共属性
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
print("__init__3")
# 使用 metaclass 实现单例模式
class Singleton3(type):
_instance = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instance:
cls._instance[cls] = super(Singleton3, cls).__call__(*args, **kwargs)
return cls._instance[cls]
class Singleton4(metaclass=Singleton3):
def __init__(self):
print("__init__4")
if __name__ == "__main__":
c = Cls()
d = Cls()
print(id(c) == id(d))
e = Cls2()
f = Cls2()
print(id(e) == id(f))
g = Singleton1()
h = Singleton1()
print(id(g) == id(h))
i = Singleton4()
j = Singleton4()
print(id(i) == id(j)) | Python | 88 | 20.113636 | 81 | /part-interview/test19.py | 0.501885 | 0.48573 |
wuljchange/interesting_python | refs/heads/master | import numpy as np
if __name__ == "__main__":
"""
使用numpy模块来对数组进行运算
"""
x = [1, 2, 3, 4]
y = [5, 6, 7, 8]
print(x+y)
print(x*2)
nx = np.array(x)
ny = np.array(y)
print(nx*2)
print(nx+10)
print(nx+ny)
print(np.sqrt(nx))
print(np.cos(nx))
# 二维数组操作
a = np.array([[1, 2, 3], [2, 3, 4]])
# select row 1
print(a[1])
# select column 1
print(a[:, 1])
print(np.where(a > 1, a, 0)) | Python | 25 | 17.32 | 40 | /part-data/test-numpy.py | 0.459519 | 0.407002 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-01 11:28
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test03.py
# ----------------------------------------------
if __name__ == "__main__":
# 对列表元素去重
aList = [1, 2, 3, 2, 1]
b = set(aList)
print(list(b))
# 2,太简单 不说了
s1 = "1,2,3"
print(s1.split(","))
# 3,找出两个列表中相同元素和不同元素
a = [1, 2, 5, 3, 2]
b = [4, 5, 6, 1, 2]
common_l = list(set(a) & set(b))
print(common_l)
only_in_a = list(set(a) - set(common_l))
only_in_b = list(set(b) - set(common_l))
print(only_in_a)
print(only_in_b)
# 一行代码展开 list,nice
a = [[1, 2], [3, 4], [5, 6]]
b = [j for i in a for j in i]
print(b)
# numpy 实现,flatten 方法,然后转换成 list
import numpy as np
c = np.array(a).flatten().tolist()
print(c)
# 合并列表,list 可以用 extend 方法
a = [1, 2, 3]
b = [4, 5, 6]
a.extend(b)
print(a)
# 打乱一个列表
import random
a = [1, 2, 3, 4, 5]
random.shuffle(a)
print(a)
print(random.randint(1, 10)) | Python | 43 | 23.953489 | 48 | /part-interview/test03.py | 0.445896 | 0.392724 |
wuljchange/interesting_python | refs/heads/master | if __name__ == "__main__":
names = set()
dct = {"test": "new"}
data = ['wulinjiang1', 'test', 'test', 'wulinjiang1']
print('\n'.join(data))
from collections import defaultdict
data1 = defaultdict(list)
# print(data1)
# for d in data:
# data1[d].append("1")
# print(data1)
content = 'aydsad'
for k, v in data1.items():
print(k)
content += '\n'.join(v)
print('\n'.join(v))
print(content)
if data1:
print(True)
# dct = {"test1": "wulinjiang1",}
# for i in range(3):
# dct.update({'content': i})
# print(dct)
# for d in data:
# names.add(d)
# for name in names:
# print(name)
# with open('deployments.yaml') as fp:
# content = fp.readlines()
# print(content[25].format('http://www.baidu.com'))
# content[25] = content[25].format('http://www.baidu.com')
# with open('deployments.yaml', 'w') as fp:
# for c in content:
# fp.writeline | Python | 34 | 28.764706 | 62 | /part-yaml/test-file.py | 0.522255 | 0.504451 |
wuljchange/interesting_python | refs/heads/master | from collections import defaultdict
if __name__ == "__main__":
d = {
"1": 1,
"2": 2,
"5": 5,
"4": 4,
}
print(d.keys())
print(d.values())
print(zip(d.values(), d.keys()))
max_value = max(zip(d.values(), d.keys()))
min_value = min(zip(d.values(), d.keys()))
print(max_value)
print(min_value) | Python | 17 | 20.17647 | 46 | /part-struct/test-dict.py | 0.476323 | 0.454039 |
wuljchange/interesting_python | refs/heads/master | # ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2019-11-07 18:50
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test-sanic.py
# ----------------------------------------------
from sanic import Sanic
from sanic import response
from pprint import pprint
app = Sanic()
@app.route('/', methods=['POST'])
async def g(request):
data = request.json
resp = []
for k, v in data:
for d in v:
resp.append(sorted(d.items()))
pprint(sorted(resp))
return response.json(True)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=10000, debug=True)
| Python | 28 | 21.428572 | 51 | /part-sanic/test_g_10000.py | 0.488854 | 0.453822 |
opn7d/Lab2 | refs/heads/master | from keras.models import Sequential
from keras import layers
from keras.preprocessing.text import Tokenizer
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# read the file
df = pd.read_csv('train.tsv',
header=None,
delimiter='\t', low_memory=False)
# labels columns
df.columns = ['PhraseID', 'SentenceID', 'Phrase', 'Sentiment']
sentences = df['Phrase'].values
y = df['Sentiment'].values
tokenizer = Tokenizer(num_words=2000)
tokenizer.fit_on_texts(sentences)
sentences = tokenizer.texts_to_matrix(sentences)
le = preprocessing.LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000)
# Number of features
# print(input_dim)
model = Sequential()
model.add(layers.Dense(300, input_dim=2000, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['acc'])
history=model.fit(X_train,y_train, epochs=5, verbose=True, validation_data=(X_test,y_test), batch_size=256)
| Python | 31 | 35.612904 | 107 | /Question4 | 0.736564 | 0.715419 |
jfstepha/minecraft-ros | refs/heads/master | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 23:52:09 2013
@author: jfstepha
"""
# parts of this code borrowed from:
# Minecraft save file creator from kinect images created by getSnapshot.py
# By: Nathan Viniconis
#
# in it, he said: "You can use this code freely without any obligation to the original or myself"
from math import sqrt
import sys
from pymclevel import mclevel
from pymclevel.box import BoundingBox
import re
import argparse
import os
import yaml
# Possible blocks in (Name, ID, (RGB1,RGB2,..),Data)
#RGBs are used to color match.
possibleBlocks = ( \
("Smooth Stone", 1, ( \
(125,125, 125),),0), \
("Dirt", 3, ( \
(133,96,66),),0), \
("Cobblestone", 4, ( \
(117,117,117),),0), \
("Wooden Plank", 5, ( \
(156,127,78),),0), \
("Bedrock", 7, ( \
(83,83,83),),0), \
#("Lava", 11, ( \
# (255,200,200),),0), \
("Sand", 12, ( \
(217,210,158),),0), \
("Gravel", 13, ( \
(136, 126, 125),),0), \
("Gold Ore", 14, ( \
(143,139,124),),0), \
("Iron Ore", 15, ( \
(135,130,126),),0), \
("Coal Ore", 16, ( \
(115,115,115),),0), \
("Wood", 17, ( \
(154,125,77),),0), \
("Sponge", 19, ( \
(182,182,57),),0), \
#("Glass", 20, ( \
# (60,66,67),),0), \
("White Wool", 35, ( \
(221,221,221),),0), \
("Orange Wool", 35, ( \
(233,126,55),),1), \
("Magenta Wool", 35, ( \
(179,75,200),),2), \
("Light Blue Wool", 35, ( \
(103,137,211),),3), \
("Yellow Wool", 35, ( \
(192,179,28),),4), \
("Light Green Wool", 35, ( \
(59,187,47),),5), \
("Pink Wool", 35, ( \
(217,132,153),),6), \
("Dark Gray Wool", 35, ( \
(66,67,67),),7), \
("Gray Wool", 35, ( \
(157,164,165),),8), \
("Cyan Wool", 35, ( \
(39,116,148),),9), \
("Purple Wool", 35, ( \
(128,53,195),),10), \
("Blue Wool", 35, ( \
(39,51,153),),11), \
("Brown Wool", 35, ( \
(85,51,27),),12), \
("Dark Green Wool", 35, ( \
(55,76,24),),13), \
("Red Wool", 35, ( \
(162,44,42),),14), \
("Black Wool", 35, ( \
(26,23,23),),15), \
("Gold", 41, ( \
(249,236,77),),0), \
("Iron", 42, ( \
(230,230,230),),0), \
("TwoHalves", 43, (
(159,159,159),),0),
("Brick", 45, ( \
(155,110,97),),0), \
#("TNT", 46, ( \
# (200,50,50),),0), \
("Mossy Cobblestone", 48, ( \
(90,108,90),),0), \
("Obsidian", 49, ( \
(20,18,29),),0), \
("Diamond Ore", 56, ( \
(129,140,143),),0), \
("Diamond Block", 57, ( \
(99,219,213),),0), \
("Workbench", 58, ( \
(107,71,42),),0), \
("Redstone Ore", 73, ( \
(132,107,107),),0), \
#("Ice", 79, ( \
# (125,173,255),),0), \
("Snow Block", 80, ( \
(239,251,251),),0), \
("Clay", 82, ( \
(158,164,176),),0), \
("Jukebox", 84, ( \
(107,73,55),),0), \
("Pumpkin", 86, ( \
(192,118,21),),0), \
("Netherrack", 87, ( \
(110,53,51),),0), \
("Soul Sand", 88, ( \
(84,64,51),),0), \
("Glowstone", 89, ( \
(137,112,64),),0) \
)
# /////////////////////////////////////////////////////////////////////////////
# Calculates distance between two HLS colors
# /////////////////////////////////////////////////////////////////////////////
def getColorDist(colorRGB, blockRGB):
# RGB manhatten distance
return sqrt( pow(colorRGB[0]-blockRGB[0],2) + pow(colorRGB[1]-blockRGB[1],2) + pow(colorRGB[2]-blockRGB[2],2))
# /////////////////////////////////////////////////////////////////////////////
# For a given RGB color, determines which block should represent it
# /////////////////////////////////////////////////////////////////////////////
def getBlockFromColor(RGB):
# find the closest color
smallestDistIndex = -1
smallestDist = 300000
curIndex = 0
for block in possibleBlocks:
for blockRGB in block[2]:
curDist = getColorDist(RGB, blockRGB)
if (curDist < smallestDist):
smallestDist = curDist
smallestDistIndex = curIndex
curIndex = curIndex + 1
if (smallestDistIndex == -1):
return -1
return possibleBlocks[smallestDistIndex]
########################################################
########################################################
class Octomap2Minecraft():
########################################################
########################################################
##########################################
def __init__(self):
##########################################
self.min_x = 1e99
self.min_y = 1e99
self.min_z = 1e99
self.max_x = -1e99
self.max_y = -1e99
self.max_z = -1e99
self.size_x = 0
self.size_y = 0
self.size_z = 0
self.resolution = 0
self.settings = {}
###############################################
def read_settings(self, filename):
###############################################
defaults = {
"level_name" : "robot_octo",
"origin_x" : 0,
"origin_y" : 100,
"origin_z" : 0,
"spawn_x" : 246,
"spawn_y" : 1,
"spawn_z" : 77,
"oversize" : 100,
"clear_height" : 256,
"base_item" : "3:0"}
parser = argparse.ArgumentParser(description='Translate a ROS map to a minecraft world')
parser.add_argument("--settings", default=filename, dest="filename")
for setting in defaults.keys():
parser.add_argument("--"+setting, dest=setting)
args = parser.parse_args()
print( "reading settings from %s" % args.filename)
stream = open(args.filename)
settings_file = yaml.load(stream)
for setting in defaults.keys():
if vars(args)[setting] == None:
if setting in settings_file:
self.settings[ setting ] = settings_file[ setting ]
else:
self.settings[ setting ] = defaults[ setting ]
else:
self.settings[ setting ] = vars(args)[setting]
print( "settings: %s" % (str(self.settings)))
##########################################
def check_empty(self):
##########################################
retval = False
if self.min_x == 1e99:
print "no value for min_x found"
retval = True
if self.min_y == 1e99:
print "no value for min_y found"
retval = True
if self.min_z == 1e99:
print "no value for min_z found"
retval = True
if self.max_x == -1e99:
print "no value for max_x found"
retval = True
if self.max_y == -1e99:
print "no value for max_y found"
retval = True
if self.max_z == -1e99:
print "no value for max_z found"
retval = True
if self.size_x == 0:
print "no value for size_x found"
retval = True
if self.size_y == 0:
print "no value for size_y found"
retval = True
if self.size_z == 0:
print "no value for size_z found"
retval = True
if self.resolution == 0:
print "no value for resolution found"
retval = True
return retval
##########################################
def read_input(self):
##########################################
print "starting"
firstline = True
beforefirstblock = True
linecount = 0
actual_min = 256
print "opening file"
for line in sys.stdin:
if firstline:
firstline = False
if re.match("^#octomap dump", line) :
print "first line found"
else:
print "ERROR: First line is not ""#octomap dump"""
exit(-1)
if beforefirstblock:
a = re.match("(\w+): x (-?\d+.?\d*) y (-?\d+.?\d*) z (-?\d+.?\d*)", line)
if a:
print("found values: %s" % str(a.groups()))
if (a.groups()[0] == 'min'):
self.min_x = float(a.groups()[1])
self.min_y = float(a.groups()[2])
self.min_z = float(a.groups()[3])
if (a.groups()[0] == 'max'):
self.max_x = float(a.groups()[1])
self.max_y = float(a.groups()[2])
self.max_z = float(a.groups()[3])
if (a.groups()[0] == 'size'):
self.size_x = float(a.groups()[1])
self.size_y = float(a.groups()[2])
self.size_z = float(a.groups()[3])
a = re.match("resolution: (-?\d+.\d+)", line)
if a:
print("found resolution: %s" % str(a.groups()))
self.resolution = float(a.groups()[0])
if re.match("^block", line):
if self.check_empty():
print "ERROR: not all values found!"
exit(-1)
self.init_map()
beforefirstblock = False
if beforefirstblock == False:
a = re.match("block (-?\d+.?\d*) (-?\d+.?\d*) (-?\d+.?\d*) \((\d+) (\d+) (\d+)\) (-?\d+.?\d*)", line)
if a:
linecount += 1
if linecount % 1000 == 0 :
print "processed %d lines" % linecount
self.add_block(a.groups())
else:
print "ERROR: line improperly formed: %s" % line
print("saving map")
self.level.saveInPlace()
###############################################
def readBlockInfo(self, keyword):
###############################################
blockID, data = map(int, keyword.split(":"))
blockInfo = self.level.materials.blockWithID(blockID, data)
return blockInfo
###############################################
def create_map(self):
###############################################
if (os.path.exists( self.settings["level_name"])) :
print("ERROR: %s directory already exists. Delete it or pick a new name" % self.settings["level_name"])
sys.exit()
if (os.path.exists( os.getenv("HOME") + "/.minecraft/saves/" + self.settings["level_name"])) :
print("ERROR: Minecraft world %s already exists. Delete it (at ~/.minecraft/saves/%s) or pick a new name" % (self.settings["level_name"], self.settings["level_name"]))
sys.exit()
print("creating map file")
os.system("pymclevel/mce.py " + self.settings["level_name"] + " create")
###############################################
def init_map(self):
###############################################
filename = self.settings["level_name"]
self.level = mclevel.fromFile(filename)
self.level.setPlayerGameType(1, "Player")
pos = [self.settings["spawn_x"], self.settings["spawn_y"], self.settings["spawn_z"]]
self.level.setPlayerPosition( pos )
self.level.setPlayerSpawnPosition( pos )
rows = self.size_x / self.resolution
cols = self.size_y / self.resolution
o_x = self.settings["origin_x"]
o_y = self.settings["origin_y"]
o_z = self.settings["origin_z"]
ovs = self.settings["oversize"]
box = BoundingBox( (o_x - ovs, o_y, o_z - ovs ),
( rows + ovs * 2, ovs, cols + ovs * 2))
print("creating chunks")
chunksCreated = self.level.createChunksInBox( box )
print("Created %d chunks" % len( chunksCreated ) )
print("filling air")
self.level.fillBlocks( box, self.level.materials.blockWithID(0,0) )
print("filled %d blocks" % box.volume )
print("filling base layer")
box = BoundingBox( (o_x - ovs, o_y - 10, o_z - ovs ),
( rows + ovs * 2, 10, cols + ovs * 2))
item = self.readBlockInfo( self.settings["base_item"] )
self.level.fillBlocks( box, item )
print("filled %d blocks" % box.volume )
###############################################
def add_block(self, blk):
###############################################
o_x = self.settings["origin_x"]
o_y = self.settings["origin_y"]
o_z = self.settings["origin_z"]
blk_size = float(blk[6]) / self.resolution
x1 = (self.max_x - float(blk[0])) / self.resolution + o_x
y1 = (float(blk[1]) - self.min_y) / self.resolution + o_y
z1 = (float(blk[2]) - self.min_z) / self.resolution + o_z
r = (int(blk[3]))
g = (int(blk[4]))
b = (int(blk[5]))
box = BoundingBox( ( x1, y1, z1 ), (blk_size, blk_size, blk_size) )
closest_block = getBlockFromColor( ( r,g,b))
blockID = closest_block[1]
data = closest_block[3]
item = self.level.materials.blockWithID(blockID, data)
self.level.fillBlocks( box, item )
###############################################
def move_map(self):
###############################################
print("moving to minecraft saves")
os.system("mv %s ~/.minecraft/saves/" % self.settings["level_name"])
if __name__ == "__main__":
o = Octomap2Minecraft()
o.read_settings("map_octo.yaml")
o.create_map()
o.read_input()
o.move_map()
| Python | 425 | 32.875294 | 180 | /src/octomap_2_minecraft.py | 0.414213 | 0.366791 |
jfstepha/minecraft-ros | refs/heads/master | #!/usr/bin/env python
import re
import numpy
import yaml
import sys
import argparse
try:
from pymclevel import mclevel
from pymclevel.box import BoundingBox
except:
print ("\nERROR: pymclevel could not be imported")
print (" Get it with git clone git://github.com/mcedit/pymclevel.git\n\n")
raise
import os
############################################################################
############################################################################
class Map2d2Minecraft():
############################################################################
############################################################################
###############################################
def __init__(self):
###############################################
self.settings = {}
###############################################
def readBlockInfo(self, keyword):
###############################################
blockID, data = map(int, keyword.split(":"))
blockInfo = self.level.materials.blockWithID(blockID, data)
return blockInfo
###############################################
def read_settings(self, filename):
###############################################
defaults = {
"level_name" : "robot_map",
"map_file" : "/home/jfstepha/ros_workspace/maps/map_whole_house_13_02_17_fixed.pgm",
"occ_thresh" : 200,
"empty_thresh" : 250,
"empty_item" : "12:0",
"empty_height" : 1,
"occupied_item" : "5:0",
"occupied_height" : 15,
"unexplored_item" : "3:0",
"origin_x" : 0,
"origin_y" : 100,
"origin_z" : 0,
"spawn_x" : 246,
"spawn_y" : 1,
"spawn_z" : 77,
"oversize" : 100,
"clear_height" : 256,
"do_ceiling" : True,
"ceiling_item" : "89:0"}
parser = argparse.ArgumentParser(description='Translate a ROS map to a minecraft world')
parser.add_argument("--settings", default=filename, dest="filename")
for setting in defaults.keys():
parser.add_argument("--"+setting, dest=setting)
args = parser.parse_args()
print( "reading settings from %s" % args.filename)
this_dir, this_file = os.path.split( os.path.realpath(__file__) )
stream = open( os.path.join( this_dir, args.filename ) )
settings_file = yaml.load(stream)
for setting in defaults.keys():
if vars(args)[setting] == None:
if setting in settings_file:
self.settings[ setting ] = settings_file[ setting ]
else:
self.settings[ setting ] = defaults[ setting ]
else:
self.settings[ setting ] = vars(args)[setting]
print( "settings: %s" % (str(self.settings)))
###############################################
def do_convert(self, image):
###############################################
filename = self.settings["level_name"]
self.level = mclevel.fromFile(filename)
self.level.setPlayerGameType(1, "Player")
pos = [self.settings["spawn_x"], self.settings["spawn_y"], self.settings["spawn_z"]]
self.level.setPlayerPosition( pos )
self.level.setPlayerSpawnPosition( pos )
rows = image.shape[0]
cols = image.shape[1]
o_x = self.settings["origin_x"]
o_y = self.settings["origin_y"]
o_z = self.settings["origin_z"]
ovs = self.settings["oversize"]
box = BoundingBox( (o_x - ovs, o_y - ovs, o_z - ovs ),
( rows + ovs * 2, ovs * 2, cols + ovs * 2))
print("creating chunks")
chunksCreated = self.level.createChunksInBox( box )
print("Created %d chunks" % len( chunksCreated ) )
print("filling air")
self.level.fillBlocks( box, self.level.materials.blockWithID(0,0) )
print("filled %d blocks" % box.volume )
print("filling base layer")
box = BoundingBox( (o_x - ovs, o_y - 10, o_z - ovs ),
( rows + ovs * 2, 10, cols + ovs * 2))
item = self.readBlockInfo( self.settings["unexplored_item"] )
self.level.fillBlocks( box, item )
print("filled %d blocks" % box.volume )
print("creating map")
for r in range( rows ):
print(" row %d / %d" % (r, rows) );
for c in range( cols ):
x = o_x + r
y = o_y
z = o_z + c
if image[rows-r-1,c] > self.settings["empty_thresh"]:
item = self.readBlockInfo( self.settings["empty_item"])
self.level.setBlockAt(x,y,z, item.ID)
if self.settings["do_ceiling"] :
item = self.readBlockInfo( self.settings["ceiling_item"])
y2 = y + self.settings["occupied_height"]
self.level.setBlockAt(x,y2,z, item.ID)
if image[rows-r-1,c] < self.settings["occ_thresh"]:
h = self.settings["occupied_height"]
item = self.readBlockInfo( self.settings["occupied_item"])
box = BoundingBox( (x,y,z),(1,h,1) )
self.level.fillBlocks( box, item )
print("saving map")
self.level.saveInPlace()
print("done")
###############################################
def read_pgm(self, filename, byteorder='>'):
###############################################
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return numpy.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
###############################################
def create_map(self):
###############################################
if (os.path.exists( self.settings["level_name"])) :
print("ERROR: %s directory already exists. Delete it or pick a new name" % self.settings["level_name"])
sys.exit()
if (os.path.exists( os.getenv("HOME") + "/.minecraft/saves/" + self.settings["level_name"])) :
print("ERROR: Minecraft world %s already exists. Delete it (at ~/.minecraft/saves/%s) or pick a new name" % (self.settings["level_name"], self.settings["level_name"]))
sys.exit()
print("creating map file")
os.system("pymclevel/mce.py " + self.settings["level_name"] + " create")
###############################################
def move_map(self):
###############################################
print("moving to minecraft saves")
os.system("mv %s ~/.minecraft/saves/" % self.settings["level_name"])
if __name__ == "__main__":
map2d2minecraft = Map2d2Minecraft()
map2d2minecraft.read_settings("map_2d.yaml")
image = map2d2minecraft.read_pgm(map2d2minecraft.settings["map_file"], byteorder='<')
map2d2minecraft.create_map()
map2d2minecraft.do_convert( image )
map2d2minecraft.move_map()
| Python | 198 | 39.924244 | 180 | /src/map_2d_2_minecraft.py | 0.448914 | 0.438179 |
Cryptek768/MacGyver-Game | refs/heads/master | import pygame
import random
from Intel import *
#Classe du Niveau(placement des murs)
class Level:
#Preparation de la classe
def __init__(self, map_pool):
self.map_pool = map_pool
self.map_structure = []
self.position_x = 0
self.position_y = 0
self.sprite_x = int(0 /30)
self.sprite_y = int(0 /30)
self.image_Macgyver = pygame.image.load(MacGyver).convert_alpha()
self.image_Guardian = pygame.image.load(Guardian).convert_alpha()
self.background = pygame.image.load(Background).convert()
#Prépartion de la liste pour le fichier map
def level(self):
with open (self.map_pool, "r") as map_pool:
level_structure = []
for line in map_pool:
line_level = []
for char in line:
if char != '/n':
line_level.append(char)
level_structure.append(line_level)
self.map_structure = level_structure
#Placement des murs
def display_wall (self, screen):
wall = pygame.image.load(Wall).convert_alpha()
screen.blit(self.background, (0, 0))
num_line = 0
for ligne_horiz in self.map_structure:
num_col = 0
for ligne_verti in ligne_horiz:
position_x = num_col * Sprite_Size
position_y = num_line * Sprite_Size
if ligne_verti == str(1):
screen.blit(wall, (position_x, position_y))
num_col +=1
num_line +=1
| Python | 47 | 32.702129 | 73 | /Maze.py | 0.529123 | 0.519926 |
Cryptek768/MacGyver-Game | refs/heads/master | import pygame
import random
from Intel import *
#Classe des placements d'objets
class Items:
#Preparation de la classe
def __init__(self, map_pool):
self.item_needle = pygame.image.load(Object_N).convert_alpha()
self.item_ether = pygame.image.load(Object_E).convert_alpha()
self.item_tube = pygame.image.load(Object_T).convert_alpha()
#Méthode de spawn des objets
def items_spawn(self, screen):
while items:
rand_x = random.randint(0, 14)
rand_y = random.randint(0, 14)
if self.map_structure [rand_x][rand_y] == 0:
screen.blit(self.image_(Object_N), (rand_x, rand_y))
| Python | 22 | 29.954546 | 70 | /Items.py | 0.594595 | 0.584637 |
Cryptek768/MacGyver-Game | refs/heads/master | # Information des variables Global et des images
Sprite_Size_Level = 15
Sprite_Size = 30
Size_Level = Sprite_Size_Level * Sprite_Size
Background = 'images/Background.jpg'
Wall = 'images/Wall.png'
MacGyver = 'images/MacGyver.png'
Guardian = 'images/Guardian.png'
Object_N = 'images/Needle.png'
Object_E = 'images/Ether.png'
Object_T = 'images/Tube.png'
items = ["Object_N","Object_E","Object_T"]
| Python | 14 | 27.357143 | 48 | /Intel.py | 0.70073 | 0.690998 |
Cryptek768/MacGyver-Game | refs/heads/master | import pygame
from Intel import *
class Characters:
def __init__(self, map_pool):
self.map_pool = map_pool
self.position_x = 0
self.position_y = 0
self.sprite_x = int(0 /30)
self.sprite_y = int(0 /30)
self.image_Macgyver = pygame.image.load(MacGyver).convert_alpha()
self.image_Guardian = pygame.image.load(Guardian).convert_alpha()
#Placement du Gardien
def blit_mg(self, screen):
screen.blit(self.image_Macgyver, (self.position_x, self.position_y))
#Placement de Macgyver
def blit_g(self, screen):
num_line = 14
for line in self.map_structure:
num_col = 14
for ligne_verti in line:
position_x = num_col * Sprite_Size
position_y = num_line * Sprite_Size
if ligne_verti == str(3):
screen.blit(self.image_Guardian, (position_x, position_y))
else:
if ligne_verti == str(3):
self.available_tiles.append((num_col, num_line))
#Méthode de déplacement de Macgyver(player)
def move_mg(self, direction, screen):
if direction == 'down':
if self.sprite_y < (Sprite_Size_Level - 1):
if self.map_structure[self.sprite_y+1][self.sprite_x] != '1':
self.position_y += 30
self.sprite_y += 1
elif direction == 'up':
if self.sprite_y > 0:
if self.map_structure[self.sprite_y-1][self.sprite_x] != '1':
self.position_y -= 30
self.sprite_y -= 1
elif direction == 'left':
if self.sprite_x > 0:
if self.map_structure[self.sprite_y][self.sprite_x-1] != '1':
self.position_x -= 30
self.sprite_x -= 1
elif direction == 'right':
if self.sprite_x < (Sprite_Size_Level - 1):
if self.map_structure[self.sprite_y][self.sprite_x+1] != '1':
self.position_x += 30
self.sprite_x += 1
| Python | 56 | 42.089287 | 82 | /Characters.py | 0.442284 | 0.426893 |
Cryptek768/MacGyver-Game | refs/heads/master | import pygame
from Maze import *
from Intel import *
from Characters import *
from Items import *
from pygame import K_DOWN, K_UP, K_LEFT, K_RIGHT
#Classe Main du jeux avec gestion des movements et l'affichage
class Master:
def master():
pygame.init()
screen = pygame.display.set_mode((Size_Level, Size_Level))
maze = Level("Map.txt")
maze.level()
#Boucle de rafraichisement
while 1:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == K_DOWN:
Characters.move_mg(maze, 'down', screen)
if event.key == K_UP:
Characters.move_mg(maze, 'up', screen)
if event.key == K_LEFT:
Characters.move_mg(maze, 'left', screen)
if event.key == K_RIGHT:
Characters.move_mg(maze, 'right', screen)
maze.display_wall(screen)
Characters.blit_mg(maze, screen)
Characters.move_mg(maze, 'direction', screen)
Characters.blit_g(maze, screen)
Items. items_spawn(maze, screen)
pygame.display.flip()
if __name__ =="__main__":
master()
| Python | 36 | 34.833332 | 66 | /Main.py | 0.512821 | 0.512066 |
daphnejwang/MentoreeMatch | refs/heads/master | import tabledef
from tabledef import Topic
TOPICS = {1: "Arts & Crafts",
2: "Career & Business",
3: "Community & Environment",
4: "Education & Learning",
5: "Fitness",
6: "Food & Drinks",
7: "Health & Well Being",
8: "Language & Ethnic Identity",
9: "Life Experiences",
10: "Literature & Writing",
11: "Motivation",
12: "New Age & Spirituality",
13: "Outdoors & Adventure",
14: "Parents & Family",
15: "Peer Pressure",
16: "Pets & Animals",
17: "Religion & Beliefs",
18: "Self-improvement/Growth",
19: "Sports & Recreation",
20: "Support",
21: "Tech",
22: "Women"}
def seed_topic_table():
topics = []
for items in TOPICS:
topics.append(Topic(title=TOPICS[items]))
print "~~~~~ TOPICS ~~~~~~~"
print topics
tabledef.dbsession.add_all(topics)
tabledef.dbsession.commit()
seed_topic_table() | Python | 36 | 22.361111 | 43 | /Project/topic_seed.py | 0.635714 | 0.594048 |
daphnejwang/MentoreeMatch | refs/heads/master | from flask_oauthlib.client import OAuth
from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session
import jinja2
import tabledef
from tabledef import *
from sqlalchemy import update
from xml.dom.minidom import parseString
import os
import urllib
import json
from Project import app
import pdb
from tabledef import User
oauth = OAuth(app)
linkedin = oauth.remote_app(
'linkedin',
consumer_key='75ifkmbvuebxtg',
consumer_secret='LAUPNTnEbsBu7axq',
request_token_params={
'scope': 'r_fullprofile,r_basicprofile,r_emailaddress',
'state': 'RandomString',
},
base_url='https://api.linkedin.com/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',
authorize_url='https://www.linkedin.com/uas/oauth2/authorization',
)
def authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['linkedin_token'] = (resp['access_token'], '')
linkedin_json_string = linkedin.get('people/~:(id,first-name,last-name,industry,headline,site-standard-profile-request,certifications,educations,summary,specialties,positions,picture-url,email-address)')
session['linkedin_id'] = linkedin_json_string.data['id']
tabledef.import_linkedin_user(linkedin_json_string.data)
return jsonify(linkedin_json_string.data)
@linkedin.tokengetter
def get_linkedin_oauth_token():
return session.get('linkedin_token')
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
def save_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics):
tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=session['linkedin_id']).update({
'mentor': mentoree_choice,
'age':age_range,
'gender':gender_input,
'description':description_input,
'new_user':False})
for topics in mentor_topics:
mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id'])
tabledef.dbsession.add(mentor_selected_topics)
return tabledef.dbsession.commit()
def update_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics):
user = tabledef.dbsession.query(User).filter_by(linkedin_id=session['linkedin_id']).first()
user.mentor = mentoree_choice
user.age = age_range
user.gender = gender_input
user.description = description_input
current_selected_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=session['linkedin_id']).all()
for curr_topics in current_selected_topics:
tabledef.dbsession.delete(curr_topics)
# pdb.set_trace()
for topics in mentor_topics:
mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id'])
tabledef.dbsession.add(mentor_selected_topics)
return tabledef.dbsession.commit()
linkedin.pre_request = change_linkedin_query
| Python | 93 | 35.827957 | 207 | /Project/linkedin.py | 0.697518 | 0.695183 |
daphnejwang/MentoreeMatch | refs/heads/master | import tabledef
from tabledef import User, MentoreeTopic, Topic, Email
import requests
import sqlalchemy
from sqlalchemy import update
import datetime
from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session
# import pdb
def save_email_info_to_database(sender, mentor, subject, subject_body):
today = datetime.datetime.now()
email_info = tabledef.Email(sender_id=sender, receiver_id=mentor, subject=subject, text_body=subject_body, sent_date=today)
print "!!~~~!!^^^ email info"
print email_info
tabledef.dbsession.add(email_info)
return tabledef.dbsession.commit()
def send_email(sender_email, mentor_email, subject, subject_body):
return requests.post(
"https://api.mailgun.net/v2/app27934969.mailgun.org/messages",
auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"),
data={"from": sender_email,
"to": mentor_email,
"subject": subject,
"text": subject_body})
def get_email_history_per_mentor(linkedin_id):
email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).filter_by(receiver_id=linkedin_id).all()
return email_hist
def get_sent_email_history_per_sender():
email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).all()
return email_hist
def get_email_history():
email_hist = tabledef.dbsession.query(Email).filter_by(receiver_id=session['linkedin_id']).all()
for mail in email_hist:
print "~!@#$%^&*( email history!! !@#$%^&"
print mail.subject
return email_hist
def get_email_with_id(email_id):
email_id = tabledef.dbsession.query(Email).filter_by(id=email_id).all()
eid = email_id[0]
return eid
def format_json(row):
formatted_json_dict={}
for column in row.__table__.columns:
formatted_json_dict[column.name] = str(getattr(row, column.name))
return formatted_json_dict
def delete_email(id):
deleted_email=tabledef.dbsession.query(Email).filter_by(id=id).first()
tabledef.dbsession.delete(deleted_email)
tabledef.dbsession.commit()
# return requests.post(
# "https://api.mailgun.net/v2/app27934969.mailgun.org/messages",
# auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"),
# data={"from": "Excited User <me@samples.mailgun.org>",
# "to": "daphnejwang@gmail.com",
# "subject": "Hello",
# "text": "Testing some Mailgun awesomness!"}) | Python | 65 | 36.246155 | 133 | /Project/email_module.py | 0.663636 | 0.655785 |
daphnejwang/MentoreeMatch | refs/heads/master | from flask_oauthlib.client import OAuth
from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session
import jinja2
import tabledef
from tabledef import User, MentoreeTopic, Topic
import linkedin
from xml.dom.minidom import parseString
import pdb
# from Project import app
def search(searchtopics):
search_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all()
return search_results
def search_topic_display(searchtopics):
search_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all()
search_topic = tabledef.dbsession.query(tabledef.Topic).filter_by(topic_id=search_results[0].topic_id).first()
search_topic_title = search_topic.title
print search_topic_title
return search_topic_title
def mentor_detail_display(linkedin_id):
# pdb.set_trace()
ment_data = tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=linkedin_id).first()
# print "!!~~~~~~~~~~~ment_data.positions[0].positions_title~~~~~~~~~~~~~~~~~~~~~~!!"
# print ment_data.positions[0].positions_title
# ment_data.positions.positions_title
return ment_data
def mentor_personal_topics(linkedin_id):
# pdb.set_trace()
ment_pers_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=linkedin_id).all()
# for topics in ment_pers_topics:
# print "((((((~~~~~~~~~~~topics.topic_id~~~~~~~~~~~~~~~~~~~~~~))"
# print topics.topic_id
return ment_pers_topics
| Python | 38 | 38.052631 | 111 | /Project/search.py | 0.745283 | 0.742588 |
daphnejwang/MentoreeMatch | refs/heads/master | # from flask import Flask, render_template, redirect, request, flash, url_for, session
# import jinja2
# import tabledef
# from tabledef import Users, MentorCareer, MentorSkills
# from xml.dom.minidom import parseString
# import os
# import urllib
# app = Flask(__name__)
# app.secret_key = "topsecretkey"
# app.jinja_env.undefined = jinja2.StrictUndefined
# @app.route("/")
# def index():
# print "hello"
# return "hello"
# @app.route("/login", methods=["GET"])
# def get_userlogin():
# error = None
# f = urllib.urlopen("http://127.0.0.1:5000/login")
# print "!~~~~!~~~~!"
# print f.read()
# # url = os.environ['HTTP_HOST']
# # xmlDoc = parseString(url)
# # print xmlDoc
# # linkedin_auth = {}
# return render_template("login.html", error = error)
# @app.route("/login", methods=["POST"])
# def login_user():
# found_user = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first()
# print "found user", found_user
# error = None
# if found_user:
# print "User found"
# session['user'] = found_user.id
# return redirect("/")
# else:
# print "User not found"
# #flash('Invalid username/password.')
# error = "Invalid Username"
# return render_template('login.html', error = error)
# # return redirect("/")
# @app.route("/create_newuser", methods=["GET"])
# def get_newuser():
# return render_template("newuser.html")
# @app.route("/create_newuser", methods=["POST"])
# def create_newuser():
# # print "SESSION", tabledef.dbsession
# user_exists = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first()
# print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
# print "USER EXISTS", user_exists
# if user_exists != None:
# flash(" User already exists. Please login")
# return redirect("/create_newuser")
# else:
# user = User(email=request.form['email'], password= request.form['password'], age=request.form['age'], sex=request.form['sex'], occupation=request.form['occupation'], zipcode=request.form['zipcode'])
# tabledef.dbsession.add(user)
# tabledef.dbsession.commit()
# flash("Successfully added new user!")
# return redirect("/")
# if __name__ == "__main__":
# app.run(debug = True) | Python | 68 | 33.514706 | 208 | /Project/mentorsearch.py | 0.604433 | 0.599318 |
daphnejwang/MentoreeMatch | refs/heads/master | import tabledef
from tabledef import User, MentoreeTopic, Topic, Email, Endorsement
import requests
import sqlalchemy
from sqlalchemy import update
import datetime
from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session
# import pdb
def save_endorsement_info_to_database(sender, mentor, endorsement_title, endorsement_body):
today = datetime.datetime.now()
endorsement_info = tabledef.Endorsement(sender_id=sender, receiver_id=mentor, title=endorsement_title, endorsements_text=endorsement_body, sent_date=today)
print "!!~~~!!^^^ endorsement_info info"
print endorsement_info
tabledef.dbsession.add(endorsement_info)
return tabledef.dbsession.commit()
def get_endorsement_info_per_mentor(linkedin_id):
endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=linkedin_id).all()
# for endorsements in endorsement_hist:
# print "!^^^^^^^^^^^^^^^^endorsement history!! ^^^^^^^^^^^^^^^^^^^^^"
# print endorsements.sender.picture_url
return endorsement_hist
def get_endorsement_info_for_self():
profile_endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=session['linkedin_id']).all()
for endorsements in profile_endorsement_hist:
print "!^^^^^^^^^^^^^^^^endorsements_text!!^^^^^^^^^^^^^^^^"
print endorsements.endorsements_text
return profile_endorsement_hist
| Python | 31 | 45.064518 | 159 | /Project/endorsements.py | 0.720588 | 0.720588 |
daphnejwang/MentoreeMatch | refs/heads/master | from flask_oauthlib.client import OAuth
from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session
import jinja2
import tabledef
import search
from tabledef import User, MentoreeTopic, Topic
import linkedin
from xml.dom.minidom import parseString
from Project import app
import json
from flask import redirect
import pagination
import email_module
import endorsements
app.debug = True
app.secret_key = 'iLoveHelloKitty'
# Pagination
PER_PAGE = 5
def url_for_other_page(page, mentee_topic_choice):
args = dict(request.view_args.items() + request.args.to_dict().items())
args['page'] = page
args['mentee_topic_choice'] = mentee_topic_choice
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
# LOGIN Pages
@app.route('/login')
def login():
return linkedin.linkedin.authorize(callback=url_for('get_linkedin_data', _external=True))
@app.route('/logout')
def logout():
session.pop('linkedin_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
@linkedin.linkedin.authorized_handler
def get_linkedin_data(resp):
user_json = linkedin.authorized(resp)
user_json = user_json.data
user_string = json.loads(user_json)
user = tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=user_string["id"]).first()
if user and user.new_user:
return redirect(url_for('addinfo_page'))
# print linkedin.authorize(callback=url_for('authorized', _external=True))
return redirect(url_for('index'))
# HOME & ACCOUNT CREATION Pages
@app.route('/')
def homepage():
return render_template('home_page.html')
@app.route('/home')
def index():
if 'linkedin_token' in session:
me = linkedin.linkedin.get('people/~')
jsonify(me.data)
# linkedin_data = json.loads(linkedin_json_string)
topics = tabledef.Topic.query.order_by("topic_id").all()
return render_template('index.html', topics=topics)
return redirect(url_for('login'))
@app.route('/additionalinfo', methods=["GET"])
def addinfo_page():
return render_template('additionalinfo.html')
@app.route('/additionalinfo', methods=["POST"])
def addinfo():
mentoree_choice = request.form.get('mentoree-radios')
age_range = request.form.get('agerange')
gender_input = request.form.get('gender_radios')
description_input = request.form.get('description')
mentor_topics = request.form.getlist('mentortopics')
linkedin.save_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics)
# current_user = tabledef.dbsession.query(tabledef.User).filter_by(linkedintoken=session['linkedin_token']).first()
return redirect(url_for('index'))
@app.route('/home', defaults={'page': 1}, methods=["POST"])
@app.route('/home/page/<int:page>/<mentee_topic_choice>')
def search_results(page, mentee_topic_choice = None):
mentee_topic_choice = mentee_topic_choice or request.form.get('searchtopics')
print "~~~~~~~~~~~~~~~~mentee_topic_choice"
print mentee_topic_choice
mentor_data = search.search(mentee_topic_choice)
if mentor_data:
start_index = (page - 1) * (PER_PAGE)
end_index = (page) * (PER_PAGE)
ment_count = len(mentor_data)
users = mentor_data[start_index:end_index]
# users = mentor_data.paginate(page, PER_PAGE, False)
if not users and page != 1:
abort(404)
pagination_per_page = pagination.Pagination(page, PER_PAGE, ment_count)
search_topic = search.search_topic_display(mentee_topic_choice)
return render_template('searchresults.html', search_topic_display=search_topic,
pagination=pagination_per_page, users=users, mentee_topic_choice=mentee_topic_choice)
messages = flash('Sorry! There are no mentors under this search topic')
return redirect(url_for('index'))
# MENTOR DETAIL PAGES
@app.route('/mentor_detail/<linkedin_id>', methods=["GET"])
def mentor_page(linkedin_id):
ment_data = search.mentor_detail_display(linkedin_id)
user_data = search.mentor_detail_display(session['linkedin_id'])
endorsement_history = endorsements.get_endorsement_info_per_mentor(linkedin_id)
return render_template('mentor_detail.html', ment_data=ment_data, user_data=user_data, endorsement_history=endorsement_history)
@app.route('/mentor_detail', methods=["POST"])
def add_endorsement():
sender = session['linkedin_id']
sender_data= search.mentor_detail_display(sender)
mentor = request.form.get('mentor_id')
print "~~~~~~~~~~~~~~~~MENTOR ID on main"
print mentor
mentor_data = search.mentor_detail_display(mentor)
endorsement_title = request.form.get('endorsement_title')
endorsement_body = request.form.get('endorsement_txt')
endorsements.save_endorsement_info_to_database(sender, mentor, endorsement_title, endorsement_body)
return redirect(url_for('mentor_page', linkedin_id=mentor))
# SELF PROFILE PAGES
@app.route('/profile', methods=["GET"])
def self_page():
if 'linkedin_id' in session:
ment_data = search.mentor_detail_display(session['linkedin_id'])
profile_endorsement_hist = endorsements.get_endorsement_info_for_self()
return render_template('self_profile.html', ment_data=ment_data, profile_endorsement_hist=profile_endorsement_hist)
return redirect(url_for('login'))
@app.route('/profile', methods=["POST"])
def update_self_page():
if 'linkedin_id' in session:
ment_data = search.mentor_detail_display(session['linkedin_id'])
update_data = tabledef.update_linkedin_user()
return render_template('self_profile.html', ment_data=ment_data)
return redirect(url_for('self_page'))
@app.route('/edit_profile', methods=["GET"])
def mentor_page_update():
if 'linkedin_id' in session:
ment_data = search.mentor_detail_display(session['linkedin_id'])
ment_pers_topics = search.mentor_personal_topics(session['linkedin_id'])
topics = tabledef.Topic.query.order_by("topic_id").all()
return render_template('edit_self_profile.html', ment_data=ment_data, ment_pers_topics=ment_pers_topics, topics=topics)
return redirect(url_for('login'))
@app.route('/edit_profile', methods=["POST"])
def mentor_page_update_post():
mentoree_choice = request.form.get('mentoree-radios')
age_range = request.form.get('agerange')
gender_input = request.form.get('gender_radios')
description_input = request.form.get('description')
mentor_topics = request.form.getlist('mentortopics')
linkedin.update_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics)
return redirect(url_for('self_page'))
# EMAIL FORM Page
@app.route('/email/<linkedin_id>', methods=["GET"])
def email_get(linkedin_id):
ment_data = search.mentor_detail_display(linkedin_id)
user_data = search.mentor_detail_display(session['linkedin_id'])
email_history = email_module.get_email_history_per_mentor(linkedin_id)
return render_template('email_form.html', ment_data=ment_data, user_data=user_data, email_history=email_history)
@app.route('/email', methods=["POST"])
def email_post():
sender = session['linkedin_id']
sender_data= search.mentor_detail_display(sender)
sender_email = sender_data.email
mentor = request.form.get('mentor_id')
mentor_data = search.mentor_detail_display(mentor)
mentor_email = mentor_data.email
subject = request.form.get('subject')
subject_body = request.form.get('message')
email_module.save_email_info_to_database(sender, mentor, subject, subject_body)
email_module.send_email(sender_email, mentor_email, subject, subject_body)
messages = flash('Success! Your message has been sent successfully.')
return redirect(url_for('email_get', linkedin_id=mentor, messages=messages))
# EMAIL INBOX Page
@app.route('/email_history', methods=["GET"])
def email_history():
user_data = search.mentor_detail_display(session['linkedin_id'])
email_history = email_module.get_email_history()
return render_template('email_history.html', user_data=user_data, email_history=email_history)
@app.route('/email_sent_history', methods=["GET"])
def email_sent_history():
user_data = search.mentor_detail_display(session['linkedin_id'])
email_history = email_module.get_sent_email_history_per_sender()
return render_template('email_sent_history.html', user_data=user_data, email_history=email_history)
@app.route('/email_detail/<email_id>', methods=["GET"])
def email_detail(email_id):
eid = email_module.get_email_with_id(email_id)
email_selected = {}
email_selected["id"] = eid.id
email_selected["receiver_id"] = eid.receiver_id
email_selected["sender_id"] = eid.sender_id
email_selected["sent_date"] = eid.sent_date.strftime("%d/%m/%Y")
email_selected["subject"] = eid.subject
email_selected["text_body"] = eid.text_body
email_selected["sender"] = {}
email_selected["sender"]["first_name"] = eid.sender.first_name
email_selected["sender"]["last_name"] = eid.sender.last_name
return json.dumps(email_selected)
@app.route('/delete_email/<int:id>', methods=["GET"])
def delete_email(id):
if 'linkedin_id' not in session:
return 'error'
email_module.delete_email(id)
return str(id)
@app.route('/about', methods=["GET"])
def about_us():
return render_template('about_us.html')
| Python | 241 | 38.489628 | 131 | /Project/main.py | 0.698803 | 0.697963 |
daphnejwang/MentoreeMatch | refs/heads/master | from Project import app
# app.run(debug=True)
app.run(debug=True)
app.secret_key = 'development'
| Python | 5 | 18.6 | 30 | /server.py | 0.744898 | 0.744898 |
daphnejwang/MentoreeMatch | refs/heads/master | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Boolean, Text, DateTime
from sqlalchemy.orm import sessionmaker
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker, scoped_session
import pdb
import os
DATABASE_URL = os.environ.get("DATABASE_URL", "sqlite:///mentoring.db")
engine = create_engine(DATABASE_URL, echo=False)
dbsession = scoped_session(sessionmaker(bind=engine, autocommit=False, autoflush=False))
Base = declarative_base()
Base.query = dbsession.query_property()
### Class declarations
class User(Base):
__tablename__ = "users"
# use linkedin ID, therefore never duplicating a user
linkedin_id = Column(String(50), primary_key = True)
linkedintoken = Column(String(50), nullable = True)
new_user = Column(Boolean, nullable = True)
first_name = Column(String(64), nullable = True)
last_name = Column(String(64), nullable = True)
email = Column(String(255), nullable = True)
#~~~# Data From Additional Info Page
mentor = Column (Boolean, nullable = True)
age = Column(String(50), nullable = True)
gender = Column(String(50), nullable = True)
description = Column(String(1000), nullable = True)
#~~~#
industry = Column(String(64), nullable = True)
headline = Column(String(100), nullable = True)
picture_url = Column(String(200), nullable = True)
certifications = Column(String(200), nullable = True)
summary = Column(String(500), nullable=True)
educations = relationship("Education")
positions = relationship("Position")
def import_linkedin_user(data):
user = User();
# parsing siteStandardProfileRequest to get authToken
user.linkedin_id = data.get('id',None)
user.new_user = True
token = data.get('siteStandardProfileRequest', None)
if token != None:
token_data = token['url']
start = token_data.find('authToken=')+10
end = token_data.find('=api', start)
user.linkedintoken = token_data[start:end]
user.first_name = data.get('firstName', None)
user.last_name = data.get('lastName', None)
user.email = data.get('emailAddress', None)
user.industry = data.get('industry', None)
user.headline = data.get('headline',None)
educations = data.get('educations',None)
education_models = []
# pdb.set_trace()
ed_values = educations.get('values',None)
if ed_values != None:
for entry in ed_values:
education = Education()
education.linkedin_id = user.linkedin_id
if 'startDate' in entry:
edstartyear = entry['startDate']['year']
# print edstartyear
education.educations_start_year = edstartyear
if 'endDate' in entry:
edendyear = entry['endDate']['year']
# print edendyear
education.educations_end_year = edendyear
if 'schoolName' in entry:
schlname = entry['schoolName']
# print schlname
education.educations_school_name = schlname
if 'fieldOfStudy' in entry:
edfield = entry['fieldOfStudy']
# print edfield
education.educations_field_of_study = edfield
if 'degree' in entry:
eddegree = entry['degree']
# print eddegree
education.educations_degree = eddegree
education_models.append(education)
positions = data.get('positions',None)
position_models = []
pos_values = positions.get('values',None)
if pos_values != None:
for entry in pos_values:
position = Position()
position.linkedin_id = user.linkedin_id
if 'startDate' in entry:
posstartyear = entry['startDate']['year']
# print posstartyear
position.positions_start_year = posstartyear
if 'endDate' in entry:
posendyear = entry['endDate']['year']
# print posendyear
position.positions_end_year = posendyear
if 'title' in entry:
postitle = entry['title']
# print postitle
position.positions_title = postitle
if 'company' in entry:
co_entry = entry['company']
if 'name' in co_entry:
print "~~~~~~~~~~~~~~~~~~~~~~ company name"
print entry
print entry['company']
coname = entry['company']['name']
print coname
position.positions_company_name = coname
position_models.append(position)
cert = data.get('certifications',None)
if cert != None:
cert_name = cert['values'][0]['name']
user.certifications = cert_name
mentor_topics = MentoreeTopic()
mentor_topics.linkedin_id = user.linkedin_id
user.summary = data.get('summary',None)
user.picture_url = data.get('pictureUrl', None)
current_user_id = user.linkedin_id
# print "~~!!^_^!!~~"
existing_user = dbsession.query(User).filter_by(linkedin_id = current_user_id).first()
if existing_user == None:
dbsession.add(user)
dbsession.add(mentor_topics)
for model in education_models:
# print "model"
# print model
dbsession.add(model)
for models in position_models:
dbsession.add(models)
dbsession.commit()
return user
def update_linkedin_user(data):
user = dbsession.query(tabledef.User).filter_by(linkedin_id=session['linkedin_id']).first();
# parsing siteStandardProfileRequest to get authToken
user.linkedin_id = data.get('id',None)
user.new_user = True
token = data.get('siteStandardProfileRequest', None)
if token != None:
token_data = token['url']
start = token_data.find('authToken=')+10
end = token_data.find('=api', start)
user.linkedintoken = token_data[start:end]
user.first_name = data.get('firstName', None)
user.last_name = data.get('lastName', None)
user.email = data.get('emailAddress', None)
user.industry = data.get('industry', None)
user.headline = data.get('headline',None)
educations = data.get('educations',None)
education_models = []
# pdb.set_trace()
ed_values = educations.get('values',None)
if ed_values != None:
for entry in ed_values:
education = Education()
education.linkedin_id = user.linkedin_id
if 'startDate' in entry:
edstartyear = entry['startDate']['year']
# print edstartyear
education.educations_start_year = edstartyear
if 'endDate' in entry:
edendyear = entry['endDate']['year']
# print edendyear
education.educations_end_year = edendyear
if 'schoolName' in entry:
schlname = entry['schoolName']
# print schlname
education.educations_school_name = schlname
if 'fieldOfStudy' in entry:
edfield = entry['fieldOfStudy']
# print edfield
education.educations_field_of_study = edfield
if 'degree' in entry:
eddegree = entry['degree']
# print eddegree
education.educations_degree = eddegree
education_models.append(education)
positions = data.get('positions',None)
position_models = []
pos_values = positions.get('values',None)
if pos_values != None:
for entry in pos_values:
position = Position()
position.linkedin_id = user.linkedin_id
if 'startDate' in entry:
posstartyear = entry['startDate']['year']
# print posstartyear
position.positions_start_year = posstartyear
if 'endDate' in entry:
posendyear = entry['endDate']['year']
# print posendyear
position.positions_end_year = posendyear
if 'title' in entry:
postitle = entry['title']
# print postitle
position.positions_title = postitle
if 'company' in entry:
co_entry = entry['company']
if 'name' in co_entry:
print "~~~~~~~~~~~~~~~~~~~~~~ company name"
print entry
print entry['company']
coname = entry['company']['name']
print coname
position.positions_company_name = coname
position_models.append(position)
cert = data.get('certifications',None)
if cert != None:
cert_name = cert['values'][0]['name']
user.certifications = cert_name
mentor_topics = MentoreeTopic()
mentor_topics.linkedin_id = user.linkedin_id
user.summary = data.get('summary',None)
user.picture_url = data.get('pictureUrl', None)
current_user_id = user.linkedin_id
# print "~~!!^_^!!~~"
existing_user = dbsession.query(User).filter_by(linkedin_id = current_user_id).first()
if existing_user == None:
dbsession.add(user)
dbsession.add(mentor_topics)
for model in education_models:
# print "model"
# print model
dbsession.add(model)
for models in position_models:
dbsession.add(models)
dbsession.commit()
return user
class Education(Base):
__tablename__="educations"
id = Column(Integer, primary_key=True)
linkedin_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = True)
# educations
educations_start_year = Column(Integer, nullable = True)
educations_end_year = Column(Integer, nullable = True)
educations_school_name = Column(String(200), nullable = True)
educations_field_of_study = Column(String(200), nullable = True)
educations_degree = Column(String(200), nullable = True)
# ment_user = relationship("User", backref=backref("educations", order_by=id))
class Position(Base):
__tablename__="positions"
id = Column(Integer, primary_key=True)
linkedin_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = True)
positions_start_year = Column(Integer, nullable = True)
positions_end_year = Column(Integer, nullable = True)
positions_company_name = Column(String(200), nullable = True)
positions_industry = Column(String(200), nullable = True)
positions_title = Column(String(200), nullable = True)
# ment_user = relationship("User", backref=backref("positions", order_by=id))
class MentoreeTopic(Base):
__tablename__ = "mentoree_topics"
id = Column(Integer, primary_key=True)
topic_id = Column(Integer, ForeignKey('topics.topic_id'), nullable=True)
mentor_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable=True)
ment_user = relationship("User", backref=backref("mentoree_topics", order_by=id))
class Topic(Base):
__tablename__ = "topics"
topic_id = Column(Integer, primary_key=True)
title = Column(String(100), nullable=True)
class Endorsement(Base):
__tablename__ = "endorsements"
id = Column(Integer, primary_key=True)
sender_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)
receiver_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)
title = Column(String(100), nullable=True)
endorsements_text = Column(String(500), nullable=True)
sent_date = Column(DateTime, nullable=True)
sender = relationship("User", primaryjoin="User.linkedin_id==Endorsement.sender_id")
receiver = relationship("User", primaryjoin="User.linkedin_id==Endorsement.receiver_id")
class Email(Base):
__tablename__ = "emails"
id = Column(Integer, primary_key=True)
sender_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)
receiver_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)
subject = Column(String(100), nullable=True)
text_body = Column(String(50000), nullable=True)
sent_date = Column(DateTime, nullable=True)
sender = relationship("User", primaryjoin="User.linkedin_id==Email.sender_id")
receiver = relationship("User", primaryjoin="User.linkedin_id==Email.receiver_id")
class Quote(Base):
__tablename__ = "quotes"
id = Column(Integer, primary_key=True)
quote_author = Column(String(100), nullable=True)
quote = Column(String(10000), nullable=True)
def createTable():
Base.metadata.create_all(engine)
def main():
"""In case we need this for something"""
pass
if __name__ == "__main__":
main()
| Python | 339 | 37.292034 | 96 | /Project/tabledef.py | 0.610739 | 0.603343 |
daphnejwang/MentoreeMatch | refs/heads/master | #import tabledef
#from tabledef import User, MentoreeTopic, Topic
import requests
print requests
# import pdb
# def send_message(recipient, subject, text):
# return requests.post(
# "https://api.mailgun.net/v2/samples.mailgun.org/messages",
# auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"),
# data={"from": "Mentoree Match <mentoreematch@app27934969.mailgun.org>",
# "to": recipient.email_address,
# "subject": subject,
# "text": "Testing some Mailgun awesomness!"})
def send_message():
# pdb.set_trace()
print dir(requests)
x = requests.post(
"https://api.mailgun.net/v2/samples.mailgun.org/messages",
auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"),
data={"from": "Mentoree Match <mentoreematch@app27934969.mailgun.org>",
"to": "Daphnejwang@gmail.com",
"subject": "testing email",
"text": "Testing some Mailgun awesomness!"})
return 'hi'
# key = 'YOUR API KEY HERE'
# sandbox = 'YOUR SANDBOX URL HERE'
# recipient = 'YOUR EMAIL HERE'
# request_url = 'https://api.mailgun.net/v2/{0}/messages'.format(sandbox)
# request = requests.post(request_url, auth=('api', key), data={
# 'from': 'hello@example.com',
# 'to': recipient,
# 'subject': 'Hello',
# 'text': 'Hello from Mailgun'
# })
# print 'Status: {0}'.format(request.status_code)
# print 'Body: {0}'.format(request.text)
send_message()
| Python | 43 | 29.953489 | 73 | /Project/email_.py | 0.583772 | 0.579264 |
JUNGEEYOU/QuickSort | refs/heads/master | def quick_sort(array):
"""
분할 정복을 이용한 퀵 정렬 재귀함수
:param array:
:return:
"""
if(len(array)<2):
return array
else:
pivot = array[0]
less = [i for i in array[1:] if i <= pivot]
greater = [i for i in array[1:] if i > pivot]
return quick_sort(less) + [pivot] + quick_sort(greater)
exam1 = [4, 2, 1, 7, 10]
print(quick_sort(exam1))
| Python | 16 | 23.625 | 63 | /1_basic_quick_sort.py | 0.527919 | 0.497462 |
JUNGEEYOU/QuickSort | refs/heads/master | def sum_func(arr):
"""
:param arr:
:return:
"""
if len(arr) <1:
return 0
else:
return arr[0] + sum_func(arr[1:])
arr1 = [1, 4, 5, 9]
print(sum_func(arr1)) | Python | 14 | 13.142858 | 41 | /2_sum_function.py | 0.467005 | 0.416244 |
JUNGEEYOU/QuickSort | refs/heads/master | def find_the_largest_num(arr):
"""
:param arr:
:return:
"""
| Python | 6 | 11.833333 | 30 | /3_find_the_largest_num.py | 0.480519 | 0.480519 |
Terfno/tdd_challenge | refs/heads/master | import sys
import io
import unittest
from calc_price import Calc_price
from di_sample import SomeKVSUsingDynamoDB
class TestCalculatePrice(unittest.TestCase):
def test_calculater_price(self):
calc_price = Calc_price()
assert 24 == calc_price.calculater_price([10, 12])
assert 62 == calc_price.calculater_price([40, 16])
assert 160 == calc_price.calculater_price([100, 45])
assert 171 == calc_price.calculater_price([50, 50, 55])
assert 1100 == calc_price.calculater_price([1000])
assert 66 == calc_price.calculater_price([20,40])
assert 198 == calc_price.calculater_price([30,60,90])
assert 40 == calc_price.calculater_price([11,12,13])
def test_input_to_data(self):
calc_price = Calc_price()
input = io.StringIO('10,12,3\n40,16\n100,45\n')
calc_price.input_to_data(input)
input = io.StringIO('1,25,3\n40,16\n\n100,45\n')
calc_price.input_to_data(input)
def test_calculater(self):
calc_price = Calc_price()
self.assertEqual(calc_price.calculater(io.StringIO('1,25,3\n40,16\n\n100,45\n')),[32,62,0,160])
| Python | 31 | 36.322582 | 103 | /test/calc_price.py | 0.641314 | 0.547969 |
Terfno/tdd_challenge | refs/heads/master | class STACK():
def isEmpty(self):
return True
def top(self):
return 1
| Python | 5 | 17.799999 | 22 | /stack.py | 0.542553 | 0.531915 |
Terfno/tdd_challenge | refs/heads/master | import sys
class Calc_price():
def calculater_price(self, values):
round=lambda x:(x*2+1)//2
sum = 0
for value in values:
sum += int(value)
ans = sum * 1.1
ans = int(round(ans))
return ans
def input_to_data(self, input):
result = []
lines = []
input = input.read()
input = input.split('\n')
for i in input:
i = i.split(',')
lines.append(i)
lines.pop(-1)
for i in lines:
if i == [''] :
result.append([])
continue
result.append(list(map(lambda x: int(x), i)))
return result
def calculater(self,input):
result = []
input = self.input_to_data(input)
for i in input:
result.append(self.calculater_price(i))
return result
if __name__ == '__main__':
calc_price = Calc_price()
print(calc_price.calculater(sys.stdin))
| Python | 39 | 24.282051 | 57 | /calc_price.py | 0.483773 | 0.476673 |
Terfno/tdd_challenge | refs/heads/master | import unittest
from stack import STACK
class TestSTACK(unittest.TestCase):
@classmethod
def setUpClass(cls):
stack=STACK()
def test_isEmpty(self):
self.assertEqual(stack.isEmpty(), True)
def test_push_top(self):
self.assertEqual(stack.top(),1)
| Python | 13 | 21.153847 | 47 | /test/stack.py | 0.666667 | 0.663194 |
ksoltan/robot_learning | refs/heads/master | #!/usr/bin/env python
from keras.models import load_model
import tensorflow as tensorflow
# import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
# import glob
# from PIL import Image
# from scipy.misc import imread, imresize
import rospy
import cv2 # OpenCV
from sensor_msgs.msg import CompressedImage, LaserScan
from visualization_msgs.msg import Marker, MarkerArray
from geometry_msgs.msg import PoseArray, Point, PoseStamped, Pose, PoseWithCovarianceStamped
from neato_node.msg import Bump
import tf
# from data_processing_utilities.msgs import ImageScanStamped
class MLTag(object):
# TODO: Add cmd_vel command based on where person supposedly is
# TODO: Add logic if robot does not see person
# TODO: Tag logic
def __init__(self, model_name='convolutional_model_v5.h5'):
rospy.init_node("ml_tag_node")
self.my_model = load_model(model_name)
self.my_graph = tensorflow.get_default_graph()
self.scan_ranges = []
self.is_tagger = True # Switch state based on whether robot is tagging or running away
self.got_scan = False
self.ready_to_process = False
self.camera_subscriber = rospy.Subscriber("/camera/image_raw/compressed", CompressedImage, self.process_image)
self.scan_subscriber = rospy.Subscriber("/scan", LaserScan, self.process_scan)
self.bump_subscriber = rospy.Subscriber("/bump", Bump, self.process_bump)
# Publisher for logging
self.object_from_scan_publisher = rospy.Publisher("/object_from_scan", PoseStamped, queue_size=10)
# Transform
self.tf_listener = tf.TransformListener()
# Visuzaliations
self.position_publisher = rospy.Publisher('/positions_pose_array', PoseArray, queue_size=10)
self.position_pose_array = PoseArray()
self.position_pose_array.header.frame_id = "base_link"
# self.image_scan_publisher = rospy.Publisher('/image_scan_pose', ImageScanStamped, queue_size=10)
# self.last_scan_msg = None
# self.last_image_msg = None
self.object_publisher = rospy.Publisher('/object_marker', Marker, queue_size=10)
self.my_object_marker = Marker()
self.my_object_marker.header.frame_id = "base_link"
self.my_object_marker.color.a = 0.5
self.my_object_marker.color.g = 1.0
self.my_object_marker.type = Marker.SPHERE
self.my_object_marker.scale.x = 0.25
self.my_object_marker.scale.y = 0.25
self.my_object_marker.scale.z = 0.25
self.model_object_publisher = rospy.Publisher('/model_object_marker', Marker, queue_size=10)
self.my_model_object_marker = Marker()
self.my_model_object_marker.header.frame_id = "base_link"
self.my_model_object_marker.color.a = 0.5
self.my_model_object_marker.color.b = 1.0
self.my_model_object_marker.type = Marker.SPHERE
self.my_model_object_marker.scale.x = 0.25
self.my_model_object_marker.scale.y = 0.25
self.my_model_object_marker.scale.z = 0.25
def process_image(self, compressed_image_msg):
# Display compressed image:
# http://wiki.ros.org/rospy_tutorials/Tutorials/WritingImagePublisherSubscriber
#### direct conversion to CV2 ####
# if(self.got_scan and not self.ready_to_process):
np_arr = np.fromstring(compressed_image_msg.data, np.uint8)
image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
# Show image
cv2.imshow('cv_img', image_np)
cv2.waitKey(2)
# Resize image
height, width = image_np.shape[:2]
new_width = 200
new_height = int(height * new_width * 1.0 / width)
image_np_resized = cv2.resize(image_np, (new_width, new_height), interpolation = cv2.INTER_CUBIC)
img_tensor = np.expand_dims(image_np_resized, axis=0) # Add 4th dimension it expects
with self.my_graph.as_default():
# Without using graph, it gives error: Tensor is not an element of this graph.
# Could fix this by not doing image processing in the callback, and in the main run loop.
# https://stackoverflow.com/questions/47115946/tensor-is-not-an-element-of-this-graph
predicted = self.my_model.predict(img_tensor)
# print("Model predict: x: {}, y:{}, theta: {}".format(predicted[0][0], predicted[0][1], math.degrees(math.atan2(predicted[0][0], predicted[0][1]))))
self.my_model_object_marker.pose.position.x = predicted[0][0]
self.my_model_object_marker.pose.position.y = predicted[0][1]
self.model_object_publisher.publish(self.my_model_object_marker)
# self.last_image_msg = compressed_image_msg
# self.got_scan = False
# self.ready_to_process = True
def process_scan(self, scan_msg):
self.scan_ranges = scan_msg.ranges
self.visualize_positions_in_scan()
self.visualize_object_from_scan()
# if(not self.ready_to_process):
# self.scan_ranges = scan_msg.ranges
# self.last_scan_msg = scan_msg
# self.got_scan = True
def process_bump(self, bump_msg):
pass
def find_poses_in_scan(self):
# Use front field of view of the robot's lidar to detect a person's x, y offset
field_of_view = 40
maximum_range = 2 # m
# Cycle through ranges and filter out 0 or too far away measurements
# Calculate the x, y coordinate of the point the lidar detected
poses = []
for angle in range(-1 * field_of_view, field_of_view):
r = self.scan_ranges[angle]
# print("angle: {}, r = {}".format(angle, r))
if(r > 0 and r < maximum_range):
try:
# Confirm that transform exists.
(trans,rot) = self.tf_listener.lookupTransform('/base_link', '/base_laser_link', rospy.Time(0))
# Convert angle to radians. Adjust it to compensate for lidar placement.
theta = math.radians(angle + 180)
x_pos = r * math.cos(theta)
y_pos = r * math.sin(theta)
# Use transform for correct positioning in the x, y plane.
p = PoseStamped()
p.header.stamp = rospy.Time.now()
p.header.frame_id = 'base_laser_link'
p.pose.position.x = x_pos
p.pose.position.y = y_pos
p_model = PoseStamped()
p_model.header.stamp = rospy.Time.now()
p_model.header.frame_id = 'base_laser_link'
p_model.pose.position.x = self.my_model_object_marker.pose.position.x
p_model.pose.position.y = self.my_model_object_marker.pose.position.y
p_base_link = self.tf_listener.transformPose('base_link', p)
p_model_base_link = self.tf_listener.transformPose('base_link', p_model)
# print("{}, {} at angle {}".format(p_base_link.pose.position.x, p_base_link.pose.position.y, math.degrees(theta)))
print("Lidar predict: x: {}, y:{}, theta: {}".format(p_base_link.pose.position.x, p_base_link.pose.position.y, math.degrees(theta)))
print("Lidar predict: x: {}, y:{}, theta: {}".format(p_model_base_link.pose.position.x, p_model_base_link.pose.position.y, math.degrees(theta)))
# Only care about the pose
poses.append(p_base_link.pose)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
# Return a list of poses (no header)
return poses
def find_object_from_scan(self):
# Get the x, y coordinates of objects in the field of view
poses = self.find_poses_in_scan()
min_points_for_object = 3
if(len(poses) < min_points_for_object):
# Not enough points
pose_stamped = PoseStamped()
pose_stamped.header.stamp = rospy.Time.now()
pose_stamped.header.frame_id = "base_link"
self.object_from_scan_publisher.publish(pose_stamped)
return (0, 0)
# Not the most efficient list traversal (double), but we don't have that many values.
center_of_mass = (sum([pose.position.x for pose in poses]) * 1.0 / len(poses),
sum([pose.position.y for pose in poses]) * 1.0 / len(poses))
pose_stamped = PoseStamped()
pose_stamped.header.stamp = rospy.Time.now()
pose_stamped.header.frame_id = "base_link"
pose_stamped.pose.position.x = center_of_mass[0]
pose_stamped.pose.position.y = center_of_mass[1]
self.object_from_scan_publisher.publish(pose_stamped)
return center_of_mass
def visualize_positions_in_scan(self):
poses = self.find_poses_in_scan()
self.position_pose_array.poses = poses
self.position_publisher.publish(self.position_pose_array)
def visualize_object_from_scan(self):
x, y = self.find_object_from_scan()
self.my_object_marker.header.stamp = rospy.Time.now()
self.my_object_marker.pose.position.x = x
self.my_object_marker.pose.position.y = y
self.object_publisher.publish(self.my_object_marker)
def run(self):
# while not rospy.is_shutdown():
# if(self.ready_to_process):
# self.visualize_positions_in_scan()
# # Publish an image/scan msg
# self.publish_image_scan()
rospy.spin()
# def publish_image_scan(self):
# msg = ImageScanStamped()
# msg.header.stamp = rospy.Time.now()
# msg.image = self.last_image_msg
# msg.scan = self.last_scan_msg
# x, y = self.visualize_object_from_scan()
# msg.pose.position.x = x
# msg.pose.position.y = y
# self.image_scan_publisher.publish(msg)
# self.ready_to_process = False
if __name__ == "__main__":
tag = MLTag()
tag.run()
| Python | 237 | 41.945148 | 164 | /data_processing_utilities/scripts/ml_tag.py | 0.611712 | 0.602279 |
ksoltan/robot_learning | refs/heads/master | # Given a folder of images and a metadata.csv file, output an npz file with an imgs, spatial x, and spatial x dimensions.
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
import math
from PIL import Image
from scipy.misc import imread, imresize
def process_scan(ranges):
"""
process a 360 point set of laser data in a certain viewing range.
inputs: list of ranges from the laser scan
output: lists of x and y points within viewing angle and range
"""
max_r = 1.5
view_angle = int(70 / 2) # only look at points in the forwardmost 70 degs
infront = range(-view_angle, view_angle)
# ranges[0:int(view_angle/2)]+ranges[int(360-view_angle/2):360]
xs = []
ys = []
# loop through and grab points in desired view range
for i in range(-view_angle, view_angle):
if ranges[i] != 0:
theta = math.radians(90 + i)
r = ranges[i]
xf = r * math.cos(theta)
yf = r * math.sin(theta)
xs.append(xf)
ys.append(yf)
return(xs, ys)
def center_of_mass(x, y):
"""
compute the center of mass in a lidar scan.
inputs: x and y lists of cleaned laser data
output: spatial x and y coordinate of the CoM
"""
if len(x) < 4: # if below a threshold of grouped points
return(np.inf, np.inf)
else:
x_cord = sum(x)/len(x)
y_cord = sum(y)/len(y)
plt.plot(x, y, 'ro')
plt.plot(0,0, 'bo', markersize=15)
plt.plot(x_cord, y_cord, 'go', markersize=15)
plt.ylim(-2,2)
plt.xlim(-2,2) # plt.show()
return (x_cord, y_cord)
def resize_image(img_name):
"""
load and resize images for the final numpy array.
inputs: filename of an image
output: resized image as a numpy array
"""
# new size definition
width = 200
height = 150
new_size = width, height
img = Image.open(img_name, 'r')
resize = img.resize(new_size)
array = np.array(resize)
return array
def find_corresponding_scan(image_time, scan_times, start_idx):
max_tolerance = 0.015
while start_idx < len(scan_times):
diff = abs(scan_times[start_idx] - image_time)
# print("Idx: {}, Diff: {}".format(start_idx, abs(scan_times[start_idx] - image_time)))
if diff < max_tolerance:
return (start_idx, diff)
start_idx += 1
return None
if __name__ == '__main__':
# location definitions
# # Katya
data_path = '/home/ksoltan/catkin_ws/src/robot_learning/data_processing_utilities/data/'
# Anil
# data_path ='/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/'
folder_name = 'anil_shining_2'
# folder_name = 'latest_person'
path = data_path + folder_name + '/'
metadata_csv = data_path + folder_name + '/' + 'metadata.csv'
# image definitions
os.chdir(path)
filenames = glob.glob("*.jpg")
# pull from metadata
array_form = np.genfromtxt(metadata_csv, delimiter=",")
lidar_all = array_form[:,6:366]
pic_times = array_form[:,0]
lidar_times = array_form[:,-1]
images = []
object_xs = []
object_ys = []
i_s = []
j_s = []
# loop through all images
for i in range(lidar_all.shape[0]-26):
for j in range(i,i+25):
delta = lidar_times[j]-pic_times[i]
if abs(delta) < 0.025:
i_s.append(i)
j_s.append(j)
# print('pic', i)
# print('lid', j)
# print('delta', delta)
# print('------------------')
break
imgs_a = []
xs_a = []
ys_a = []
for i in range(len(i_s)):
img_ind = i_s[i]
lid_ind = j_s[i]
scan_now = lidar_all[lid_ind] # scan data for this index
# process if scan isn't NaN (laser hasn't fired yet)
if not np.isnan(scan_now[10]):
points_x, points_y = process_scan(scan_now)
xp, yp = center_of_mass(points_x, points_y)
# only add if CoM is defined, AKA object is in frame
if xp != np.inf:
# print(pic_times[img_ind]-lidar_times[lid_ind], xp, yp, round(math.degrees(math.atan2(xp, yp)),2))
# add image
img_name = filenames[img_ind]
img_np = resize_image(img_name)
imgs_a.append(img_np)
# add object position
xs_a.append(xp)
ys_a.append(yp)
# verify
# plt.show()
plt.imshow(img_np)
# plt.show()
print(len(imgs_a))
# save all data
save_path = data_path + folder_name + '_data' '.npz'
np.savez_compressed(save_path, imgs=imgs_a, object_x=xs_a, object_y=ys_a)
| Python | 164 | 28.621952 | 121 | /data_preparation/clean_process.py | 0.554755 | 0.541169 |
ksoltan/robot_learning | refs/heads/master | # Given a folder of images and a metadata.csv file, output an npz file with an imgs, mouse_x, and mouse_y columns.
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
from PIL import Image
from scipy.misc import imread, imresize
folder_name = 'ball_dataset_classroom'
# Katya
data_path = '/home/ksoltan/catkin_ws/src/robot_learning/data_processing_utilities/data/'
# Anil
# data_path ='/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/'
path = data_path + folder_name + '/'
metadata_name = 'metadata.csv'
os.chdir(path)
filenames = glob.glob("*.jpg")
# Get size of images
filename = filenames[0]
sample_img = Image.open(filename, 'r')
print("height: {}, width: {}, aspect: {}".format(sample_img.height, sample_img.width, 1.0 * sample_img.height/sample_img.width))
aspect = 1.0 * sample_img.height / sample_img.width
width = 200
height = int(width*aspect)
new_size = width, height
# Create numpy array of all x and y mouse positions
METADATA_CSV = data_path + folder_name + '/' + metadata_name
df = pd.read_csv(METADATA_CSV, ',')[['image_file_name', 'object_from_scan_x', 'object_from_scan_y']]
print(df.head())
print(df.info())
images = []
object_xs = []
object_ys = []
# Loop through lidar predicted object positions and save only those that do not contain 0, 0
for index in range(len(df.object_from_scan_x)):
x = df.object_from_scan_x[index]
y = df.object_from_scan_y[index]
if(x == 0.0 and y == 0.0):
continue
# Add image
img_name = filenames[index]
img = Image.open(img_name, 'r')
resize = img.resize(new_size)
array = np.array(resize)
images.append(array)
# Add object position
object_xs.append(x)
object_ys.append(y)
#
# # plt.imshow(data)
# # plt.show()
# index = 0
# images = []
# # Create numpy array of resized images
# for name in filenames:
# img = Image.open(name, 'r')
# resize = img.resize(new_size)
# array = np.array(resize)
# # images[:,:,:,index] = array
# images.append(array)
# index += 1
SAVE_FILENAME = data_path + folder_name + '_data' '.npz'
np.savez_compressed(SAVE_FILENAME, imgs=images, object_x=object_xs, object_y=object_ys)
test_data = np.load(SAVE_FILENAME)
print(test_data['object_x'].shape)
| Python | 80 | 27.674999 | 128 | /data_preparation/image_processing.py | 0.678727 | 0.670881 |
ksoltan/robot_learning | refs/heads/master | #!/usr/bin/env python
"""quick script for trying to pull spatial x, y from metadata"""
from __future__ import print_function
from geometry_msgs.msg import PointStamped, PointStamped, Twist
from std_msgs.msg import Header
from neato_node.msg import Bump
from sensor_msgs.msg import LaserScan
import matplotlib.pyplot as plt
from datetime import datetime
import pandas as pd
import time, numpy, math, rospy, statistics
def process_scan(ranges):
""" process a 360 point set of laser data in a certain viewing range """
max_r = 1.5
view_angle = 80 # only look at points in the forwardmost 70 degs
infront = ranges[0:int(view_angle/2)]+ranges[int(360-view_angle/2):360]
xs = []
ys = []
# loop through and grab points in desired view range
for i in range(len(ranges)):
if i<len(infront):
if infront[i] !=0 and infront[i] < max_r:
if i >= view_angle/2:
theta = math.radians(90-(view_angle-i))
else:
theta = math.radians(i+90)
r = infront[i]
xf = math.cos(theta)*r
yf = math.sin(theta)*r
xs.append(xf)
ys.append(yf)
return(xs, ys)
def center_of_mass(x, y):
""" with arguments as lists of x and y values, compute center of mass """
if len(x) < 4: # if below a threshold of grouped points
return(0, 0) # TODO pick a return value for poor scans
x_cord = sum(x)/len(x)
y_cord = sum(y)/len(y)
plt.plot(x_cord, y_cord, 'go', markersize=15)
return (x_cord, y_cord)
if __name__ == '__main__':
path = '/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/'
folder = 'mydataset'
look_in = path+folder + '/' # final path for metadata
filename = 'metadata.csv'
file_csv = look_in + filename
array_form = numpy.genfromtxt(file_csv, delimiter=",")
lidar_all = array_form[:, 6:366]
lidar_label = []
ind = 0
for i in range(lidar_all.shape[0]):
scan_now = lidar_all[i,:]
if not numpy.isnan(scan_now[10]):
points_x, points_y = process_scan(scan_now)
xp, yp = center_of_mass(points_x, points_y)
if xp != 0:
# lidar_label[ind,0] = i
# lidar_label[ind,1] = xp
# lidar_label[ind,2] = yp
# ind += 1
lidar_label.append([i, xp, yp])
print(ind, i, xp, yp, math.degrees(math.atan2(xp, yp)))
# plt.plot(points_x, points_y, 'ro')
# plt.plot(0,0, 'bo', markersize=15)
# plt.show()
lidar_label = numpy.array(lidar_label)
print(lidar_label[:,0])
SAVE_FILENAME = path + folder + '.npz'
numpy.savez_compressed(SAVE_FILENAME, indices=lidar_label[:,0], xs=lidar_label[:,1], ys=lidar_label[:,2])
"""
# loop through images and get spatial x and y
for i in range(lidar_all.shape[0]):
lidar_here = lidar_all[i,:]
xs, ys = process_scan(lidar_here)
xp, yp = center_of_mass(xs, ys)
lidar_label[i,0] = xp
lidar_label[i,1] = yp
print(xp, yp)
"""
| Python | 95 | 32.799999 | 109 | /data_preparation/lidar_processing.py | 0.56649 | 0.54905 |
DSGDSR/pykedex | refs/heads/master | import sys, requests, json
from io import BytesIO
from PIL import Image
from pycolors import *
from funcs import *
print( pycol.BOLD + pycol.HEADER + "Welcome to the pokedex, ask for a pokemon: " + pycol.ENDC, end="" )
pokemon = input()
while True:
response = getPokemon(pokemon)
if response.status_code == 404:
print( "This pokemon name is not valid, try again: ", end="" )
pokemon = input()
continue
data = response.json()
#############################################################
########################### IMAGE ###########################
#############################################################
#imgburl = "https://assets.pokemon.com/assets/cms2/img/pokedex/full/" + str(data["id"]) + ".png"
imgburl = "https://img.pokemondb.net/artwork/" + str(data["name"]) + ".jpg"
imgr = requests.get(imgburl)
img = Image.open(BytesIO(imgr.content))
w, h = img.size
img.resize((w, h)).show()
#############################################################
######################### BASE INFO #########################
#############################################################
print( "\n" + pycol.BOLD + pycol.UNDERLINE + data["name"].capitalize() + pycol.ENDC + " (ID: " + str(data["id"]) + ")" + "\n" +
"Weight: " + str(data["weight"]/10) + "kg\n" +
"Height: " + str(data["height"]/10) + "m\n" +
"Base experience: " + str(data["base_experience"]) )
########################### TYPES ###########################
types, abilities = [], []
for t in data["types"]:
types.append(t["type"]["name"])
print( "Types: " + ', '.join(types) )
######################### ABILITIES #########################
for a in data["abilities"]:
ab = a["ability"]["name"]
if a["is_hidden"]:
ab = ab + " (hidden ab.)"
abilities.append(ab)
print( "Abilities: " )
for ab in abilities:
print( " - " + ab.capitalize() )
########################### STATS ###########################
print( "Stats: " )
for s in data["stats"]:
print(getStrBar((s["stat"]["name"] + ":").ljust(17), s["base_stat"]))
######################## EVOL CHAIN #########################
print("Evolutions:\n" + " " + getEvolChain(data["id"]))
print()
#############################################################
#############################################################
######################## END OF LOOP ########################
#############################################################
print( "Do you wanna ask for another pokemon? (Y/n) ", end="" )
answer = input()
if answer == 'n':
break
else:
print( "Enter the pokemon name: ", end="" )
pokemon = input()
| Python | 74 | 37.202702 | 131 | /main.py | 0.386983 | 0.383445 |
DSGDSR/pykedex | refs/heads/master | import requests, math
def getPokemon(pokemon):
return requests.get("http://pokeapi.co/api/v2/pokemon/"+pokemon)
def getEvolChain(id):
url = "http://pokeapi.co/api/v2/pokemon-species/" + str(id)
resp = requests.get(url)
data = resp.json()
evol = requests.get(data["evolution_chain"]["url"]).json()["chain"]
evols = evol["species"]["name"].capitalize()
while evol["evolves_to"]:
evol = evol["evolves_to"][0]
evols = evols + " -> " + evol["species"]["name"].capitalize()
return evols
def getStrBar(stat, base):
# ▓▓▓▓▓▓▓▓░░░░░░░
num = math.ceil(base/20)
stat = stat.capitalize()
statStr = " - " + stat + "▓" * num + "░" * (10-num) + " " + str(base)
return statStr
if __name__ == "__main__":
print(getStrBar("speed", 90))
#print(getPokemon("pikachu")) | Python | 28 | 28.678572 | 73 | /funcs.py | 0.575904 | 0.56506 |
tbohne/AoC18 | refs/heads/master | import sys
import copy
def parse_info(claim):
offsets = claim.strip().split("@")[1].split(":")[0].split(",")
inches_from_left = int(offsets[0].strip())
inches_from_top = int(offsets[1].strip())
dims = claim.strip().split("@")[1].split(":")[1].split("x")
width = int(dims[0].strip())
height = int(dims[1].strip())
return (inches_from_left, inches_from_top, width, height)
def part_one(square, input):
collision_cnt = 0
for claim in input:
info = parse_info(claim)
for i in range(info[1], info[1] + info[3]):
for j in range(info[0], info[0] + info[2]):
if square[i][j] == "#":
square[i][j] = "X"
collision_cnt += 1
elif square[i][j] == ".":
square[i][j] = "#"
print("sol p1: " + str(collision_cnt))
return square
def part_two(filled_square, input):
for claim in input:
info = parse_info(claim)
overlapping = False
for i in range(info[1], info[1] + info[3]):
if overlapping:
break
for j in range(info[0], info[0] + info[2]):
if filled_square[i][j] == "X":
overlapping = True
break
if not overlapping:
print("sol p2: " + claim.split("#")[1].split("@")[0].strip())
if __name__ == '__main__':
input = sys.stdin.readlines()
lst = ["." for _ in range(0, 1000)]
square = [copy.copy(lst) for _ in range(0, 1000)]
filled_square = part_one(square, input)
part_two(filled_square, input)
| Python | 54 | 28.888889 | 73 | /day3/main.py | 0.506196 | 0.483891 |
tbohne/AoC18 | refs/heads/master | import sys
import copy
from string import ascii_lowercase
def step_time(letter, sample):
if not sample:
return 60 + ord(letter) - 64
else:
return ord(letter) - 64
def get_names():
names = dict()
cnt = 0
for i in ascii_lowercase:
if cnt == len(input) - 1:
break
names[i.upper()] = []
cnt += 1
return names
def delete_item(item):
for i in names.keys():
if i == item:
del names[i]
break
for i in names.keys():
if item in names[i]:
names[i].remove(item)
def get_waiting_lists(names):
waiting_lists = []
for i in names.keys():
waiting_lists.append((names[i], i))
return waiting_lists
def get_admissible_item(waiting_lists):
tmp = copy.copy(waiting_lists)
valid = False
while not valid:
valid = True
if len(tmp) == 0:
return None
tmp_best = min(tmp)
if len(tmp_best[0]) == 0:
for w in workers:
if w[2] == tmp_best[1]:
valid = False
else:
valid = False
if not valid:
tmp.remove(tmp_best)
return tmp_best[1]
if __name__ == '__main__':
input = sys.stdin.readlines()
num_of_workers = 5
sample = False
names = get_names()
workers = []
for i in range(0, num_of_workers):
# (idx, available, working item, time_left)
workers.append((i, True, "", 0))
for i in input:
before = i.strip().split("must")[0].split("Step")[1].strip()
after = i.strip().split("can")[0].split("step")[1].strip()
names[after].append(before)
time = 0
while len(names.keys()) > 0:
for w in workers:
# worker available
if w[1]:
waiting_lists = get_waiting_lists(names)
item = get_admissible_item(waiting_lists)
if item == None:
pass
# print("no item available for worker" + str(w[0]))
else:
workers[workers.index(w)] = (w[0], False, item, step_time(item, sample))
# print("time " + str(time) + " worker" + str(w[0]) + " starts to work on item " + str(item) + " needs time: " + str(step_time(item, sample)))
# worker busy
else:
time_left = w[3] - 1
if time_left != 0:
workers[workers.index(w)] = (w[0], False, w[2], time_left)
else:
delete_item(str(w[2]))
# print("time " + str(time) + " worker" + str(w[0]) + " finished working on item " + str(w[2]))
waiting_lists = get_waiting_lists(names)
item = get_admissible_item(waiting_lists)
if item == None:
workers[workers.index(w)] = (w[0], True, "", 0)
# print("no item available for worker" + str(w[0]))
else:
workers[workers.index(w)] = (w[0], False, item, step_time(item, sample))
# print("time " + str(time) + " worker" + str(w[0]) + " starts to work on item " + str(item) + " needs time: " + str(step_time(item, sample)))
continue
time += 1
print("sol p2: " + str(time - 1))
| Python | 115 | 28.573914 | 166 | /day7/p2.py | 0.479271 | 0.466333 |
tbohne/AoC18 | refs/heads/master | import sys
import copy
import string
from string import ascii_lowercase
def get_names():
names = dict()
cnt = 0
for i in ascii_lowercase:
if cnt == len(input) - 1:
break
names[i.upper()] = []
cnt += 1
return names
def delete_item(item):
for i in names.keys():
if i == item:
del names[i]
break
for i in names.keys():
if item in names[i]:
names[i].remove(item)
def parse_input():
for i in input:
before = i.strip().split("must")[0].split("Step")[1].strip()
after = i.strip().split("can")[0].split("step")[1].strip()
names[after].append(before)
if __name__ == '__main__':
input = sys.stdin.readlines()
names = get_names()
parse_input()
order = []
while len(names) > 0:
deps = []
for i in names.keys():
deps.append(names[i])
min_list = min(deps)
for j in names.keys():
if names[j] == min_list:
order.append(j)
delete_item(j)
break
print("sol p1: " + "".join(order))
| Python | 52 | 20.923077 | 68 | /day7/p1.py | 0.496491 | 0.488596 |
tbohne/AoC18 | refs/heads/master | import sys
import copy
import string
from string import ascii_lowercase
# 42384 too low
if __name__ == '__main__':
input = sys.stdin.read().split()
print(input)
stack = []
tree = []
tmp_input = copy.copy(input)
open_meta_data = 0
idx = 0
while len(tmp_input) > open_meta_data:
print("len: " + str(len(tmp_input)))
print("need: " + str(int(input[idx + 1]) + 2))
print("open meta len: " + str(open_meta_data))
need = int(input[idx + 1]) + 2
if need + open_meta_data > len(tmp_input):
print("DONE")
break
node = (input[idx], input[idx + 1], [])
print("looking at: " + str(node))
# if len(tmp_input) <= open_meta_data:
# print("len of rest: " + str(len(tmp_input)))
# print("open meta data: " + str(open_meta_data))
# print("current need: " + str(node[1]))
# print("DONE")
# break
for i in range(0, len(tmp_input) - 1):
if tmp_input[i] == node[0] and tmp_input[i + 1] == node[1]:
tmp_idx = i
if node[0] == '0':
print("remove: " + str(tmp_input[tmp_idx : (tmp_idx + 2 + int(node[1]))]))
del tmp_input[tmp_idx : (tmp_idx + 2 + int(node[1]))]
else:
print("remove::: " + str(tmp_input[tmp_idx : tmp_idx + 2]))
del tmp_input[tmp_idx : tmp_idx + 2]
# no childs
if node[0] == '0':
print("handle now")
print(node)
for i in range(idx + 2, idx + 2 + int(node[1])):
node[2].append(input[i])
tree.append(node)
else:
open_meta_data += int(node[1])
print("append to stack")
stack.append(node)
print(node)
idx += 2
if node[0] == '0':
idx += int(node[1])
print("TODO: " + str(tmp_input))
for i in stack:
node = (i[0], i[1], [])
for j in range(0, int(i[1])):
node[2].append(tmp_input[j])
del tmp_input[0 : int(i[1])]
tree.append(node)
res = 0
for i in tree:
res += sum([int(x) for x in i[2]])
print("sol p1: " + str(res))
| Python | 87 | 24.804598 | 87 | /day8/main.py | 0.462151 | 0.441346 |
tbohne/AoC18 | refs/heads/master | import sys
import copy
from string import ascii_lowercase
def remove_unit(tmp_input, idx):
del tmp_input[idx]
del tmp_input[idx]
def react_polymer(tmp_input):
modified = True
while modified:
modified = False
for i in range(0, len(tmp_input) - 1):
if tmp_input[i] != tmp_input[i + 1] and tmp_input[i].lower() == tmp_input[i + 1].lower():
modified = True
remove_unit(tmp_input, i)
break
return tmp_input
if __name__ == '__main__':
input = sys.stdin.read().strip()
polymer_lengths = []
print("sol p1: " + str(len(react_polymer(list(input)))))
for unit_type in ascii_lowercase:
tmp_input = list(input.replace(unit_type, "").replace(unit_type.upper(), ""))
tmp_input = react_polymer(tmp_input)
polymer_lengths.append(len(tmp_input))
print("sol p2: " + str(min(polymer_lengths)))
| Python | 35 | 25.4 | 101 | /day5/main.py | 0.584416 | 0.577922 |
tbohne/AoC18 | refs/heads/master | import sys
if __name__ == '__main__':
input = sys.stdin.readlines()
curr_freq = 0
reached_twice = False
list_of_freqs = []
while not reached_twice:
for change in input:
sign = change[0]
change = int(change.replace(sign, ""))
if (sign == "+"):
curr_freq += change
else:
curr_freq -= change
if curr_freq in list_of_freqs:
reached_twice = True
print("sol p2: " + str(curr_freq))
break
else:
list_of_freqs.append(curr_freq)
if len(list_of_freqs) == len(input):
print("sol p1: " + str(curr_freq))
| Python | 30 | 22.866667 | 50 | /day1/main.py | 0.458101 | 0.452514 |
tbohne/AoC18 | refs/heads/master | import sys
import copy
from string import ascii_lowercase
def manhattan_dist(c1, c2):
return abs(c1[1] - c2[1]) + abs(c1[0] - c2[0])
def part_two():
total = 0
for i in range(0, 1000):
for j in range(0, 1000):
sum = 0
for c in coord_by_name.keys():
sum += manhattan_dist((j, i), coord_by_name[c])
if sum < 10000:
total += 1
print("sol p2: " + str(total))
def part_one():
for i in range(0, 1000):
for j in range(0, 1000):
if square[i][j] == ".":
min_dist = 99999
name = ""
collision_dist = min_dist
for coords in list_of_coords:
distance = abs(i - coords[1]) + abs(j - coords[0])
if distance < min_dist:
min_dist = distance
name = coordinate_names[coords].lower()
elif distance == min_dist:
collision_dist = min_dist
if collision_dist == min_dist:
square[i][j] = "."
else:
square[i][j] = name
area_cnt = dict()
y_min = 2000
x_min = 2000
x_max = 0
y_max = 0
x_min_remove = []
x_max_remove = []
y_min_remove = []
y_max_remove = []
for c in list_of_coords:
if c[0] <= x_min:
x_min = c[0]
x_min_remove.append(coordinate_names[c])
for i in x_min_remove:
if coord_by_name[i][0] > x_min:
x_min_remove.remove(i)
if c[0] >= x_max:
x_max = c[0]
x_max_remove.append(coordinate_names[c])
for i in x_max_remove:
if coord_by_name[i][0] < x_max:
x_max_remove.remove(i)
if c[1] <= y_min:
y_min = c[1]
y_min_remove.append(coordinate_names[c])
for i in y_min_remove:
if coord_by_name[i][1] > y_min:
y_min_remove.remove(i)
if c[1] >= y_max:
y_max = c[1]
y_max_remove.append(coordinate_names[c])
for i in y_max_remove:
if coord_by_name[i][1] < y_max:
y_max_remove.remove(i)
for i in coordinate_names.values():
dist = abs(coord_by_name[i][1] - x_max)
man_dists = []
for j in coordinate_names.values():
if coord_by_name[j][1] == x_max:
man_dist = manhattan_dist((coord_by_name[i][0], x_max), coord_by_name[j])
man_dists.append(man_dist)
if min(man_dists) > dist:
x_max_remove.append(i)
dist = abs(coord_by_name[i][1] - x_min)
man_dists = []
for j in coordinate_names.values():
if coord_by_name[j][1] == x_min:
man_dist = manhattan_dist((coord_by_name[i][0], x_min), coord_by_name[j])
man_dists.append(man_dist)
if min(man_dists) > dist:
x_min_remove.append(i)
dist = abs(coord_by_name[i][0] - y_max)
man_dists = []
for j in coordinate_names.values():
if coord_by_name[j][0] == y_max:
man_dist = manhattan_dist((y_max, coord_by_name[i][1]), coord_by_name[j])
man_dists.append(man_dist)
if min(man_dists) > dist:
y_max_remove.append(i)
dist = abs(coord_by_name[i][0] - y_min)
man_dists = []
for j in coordinate_names.values():
if coord_by_name[j][0] == y_min:
man_dist = manhattan_dist((y_min, coord_by_name[i][1]), coord_by_name[j])
man_dists.append(man_dist)
if min(man_dists) > dist:
y_min_remove.append(i)
area_cnt[i] = 0
for i in range(0, 1000):
for j in range(0, 1000):
if square[i][j].islower():
if square[i][j].upper() not in x_max_remove and square[i][j].upper() not in x_min_remove and square[i][j].upper() not in y_max_remove and square[i][j].upper() not in y_min_remove:
area_cnt[square[i][j].upper()] += 1
max = 0
caused_by = ""
for i in area_cnt.keys():
cnt = 0
if i != 0:
cnt = area_cnt[i] + 1
if cnt > max:
max = cnt
caused_by = i
print(caused_by + ": " + str(max))
if __name__ == '__main__':
input = sys.stdin.readlines()
test = dict()
tmp_cnt = 0
for c in ascii_lowercase:
test[tmp_cnt] = c.upper()
tmp_cnt += 1
rest = len(input) - 26
for c in ascii_lowercase:
if rest > 0:
rest -= 1
test[tmp_cnt] = c.upper() + c.upper()
tmp_cnt += 1
cnt = 0
lst = ["." for _ in range(0, 1000)]
square = [copy.copy(lst) for _ in range(0, 1000)]
list_of_coords = []
coordinate_names = dict()
coord_by_name = dict()
for i in input:
coords = (int(i.strip().split(",")[0]), int(i.strip().split(",")[1].strip()))
list_of_coords.append(coords)
square[coords[1]][coords[0]] = test[cnt]
coordinate_names[coords] = test[cnt]
coord_by_name[test[cnt]] = (coords[1], coords[0])
cnt += 1
part_one()
part_two()
| Python | 176 | 29.210228 | 195 | /day6/main.py | 0.470002 | 0.447245 |
tbohne/AoC18 | refs/heads/master | import sys
def part_one(input):
exactly_two = 0
exactly_three = 0
for boxID in input:
letter_count = [boxID.count(letter) for letter in boxID]
if 2 in letter_count:
exactly_two += 1
if 3 in letter_count:
exactly_three += 1
return exactly_two * exactly_three
def part_two(input):
for boxID_one in input:
for boxID_two in input:
if boxID_one != boxID_two:
equal_letters = [l1 for l1, l2 in zip(boxID_one, boxID_two) if l1 == l2]
if len(boxID_one) - len(equal_letters) == 1:
return "".join(equal_letters).strip()
if __name__ == '__main__':
input = sys.stdin.readlines()
print("sol p1: " + str(part_one(input)))
print("sol p2: " + part_two(input))
| Python | 30 | 25.799999 | 88 | /day2/main.py | 0.549751 | 0.532338 |
tbohne/AoC18 | refs/heads/master | import sys
from datetime import datetime
def calc_timespan(t1, t2):
fmt = '%H:%M'
return datetime.strptime(t2, fmt) - datetime.strptime(t1, fmt)
def parse_info():
date = i.split("[")[1].split("]")[0].split(" ")[0].strip()
time = i.split("[")[1].split("]")[0].split(" ")[1].strip()
action = i.split("[")[1].split("]")[1].strip()
return (date, time, action)
if __name__ == '__main__':
input = sys.stdin.readlines()
input.sort()
current_guard_id = ""
start_sleeping = -1
sleep_times = dict()
sleeping_minutes = dict()
for i in input:
info = parse_info()
if current_guard_id != "":
if "falls" in i:
start_sleeping = info[1]
elif "wakes" in i:
if not current_guard_id in sleep_times.keys():
sleep_times[current_guard_id] = 0
if not current_guard_id in sleeping_minutes.keys():
sleeping_minutes[current_guard_id] = []
time_to_add_in_minutes = int(str(calc_timespan(start_sleeping, info[1])).split(":")[0]) * 60
time_to_add_in_minutes += int(str(calc_timespan(start_sleeping, info[1])).split(":")[1])
start = int(start_sleeping.split(":")[1])
end = int(info[1].split(":")[1]) - 1
sleeping_minutes[current_guard_id].append(start)
sleeping_minutes[current_guard_id].append(end)
for idx in range(start + 1, start + time_to_add_in_minutes - 1):
sleeping_minutes[current_guard_id].append(idx % 60)
current_sleep_time = sleep_times[current_guard_id] + time_to_add_in_minutes
sleep_times[current_guard_id] = int(current_sleep_time)
if "#" in info[2]:
current_guard_id = info[2].split("#")[1].split("begins")[0].strip()
lazy_guard = max(sleep_times, key = sleep_times.get)
# min, guard
strategy1 = [max(sleeping_minutes[lazy_guard], key = sleeping_minutes[lazy_guard].count), int(lazy_guard)]
# min, count, guard
strategy2 = [0, 0, 0]
for i in sleep_times.keys():
tmp_min = max(sleeping_minutes[i], key = sleeping_minutes[i].count)
if sleeping_minutes[i].count(tmp_min) > strategy2[1]:
strategy2[0] = tmp_min
strategy2[1] = sleeping_minutes[i].count(tmp_min)
strategy2[2] = i
print("sol p1: " + str(strategy1[0] * strategy1[1]))
print("sol p2: " + str(int(strategy2[2]) * strategy2[0]))
| Python | 70 | 35.257141 | 110 | /day4/main.py | 0.552403 | 0.530339 |
w5688414/selfdriving_cv | refs/heads/master | import numpy as np
import tensorflow as tf
def weight_ones(shape, name):
initial = tf.constant(1.0, shape=shape, name=name)
return tf.Variable(initial)
def weight_xavi_init(shape, name):
initial = tf.get_variable(name=name, shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
return initial
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape, name=name)
return tf.Variable(initial)
class Network(object):
def __init__(self, train_state):
""" We put a few counters to see how many times we called each function """
self._count_conv = 0
self._count_pool = 0
self._count_bn = 0
self._count_dropouts = 0
self._count_activations = 0
self._count_fc = 0
self._count_lstm = 0
self._count_soft_max = 0
self._conv_kernels = []
self._conv_strides = []
self._weights = {}
self._features = {}
self._train_state = train_state
""" Our conv is currently using bias """
def conv(self, x, kernel_size, stride, output_size, padding_in='SAME'):
self._count_conv += 1
filters_in = x.get_shape()[-1]
shape = [kernel_size, kernel_size, filters_in, output_size]
weights = weight_xavi_init(shape, 'W_c_' + str(self._count_conv))
bias = bias_variable([output_size], name='B_c_' + str(self._count_conv))
self._weights['W_conv' + str(self._count_conv)] = weights
self._conv_kernels.append(kernel_size)
self._conv_strides.append(stride)
conv_res = tf.add(tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding=padding_in,
name='conv2d_' + str(self._count_conv)), bias,
name='add_' + str(self._count_conv))
self._features['conv_block' + str(self._count_conv - 1)] = conv_res
return conv_res
def max_pool(self, x, ksize=3, stride=2):
self._count_pool += 1
return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1],
padding='SAME', name='max_pool' + str(self._count_pool))
def bn(self, x):
self._count_bn += 1
return tf.contrib.layers.batch_norm(x, is_training=False,
updates_collections=None, scope='bn' + str(self._count_bn))
def activation(self, x):
self._count_activations += 1
return tf.nn.relu(x, name='relu' + str(self._count_activations))
def dropout(self, x, prob=1):
print ("Dropout", self._count_dropouts)
self._count_dropouts += 1
output = tf.nn.dropout(x, prob,
name='dropout' + str(self._count_dropouts))
return output
def fc(self, x, output_size):
self._count_fc += 1
filters_in = x.get_shape()[-1]
shape = [filters_in, output_size]
weights = weight_xavi_init(shape, 'W_f_' + str(self._count_fc))
bias = bias_variable([output_size], name='B_f_' + str(self._count_fc))
return tf.nn.xw_plus_b(x, weights, bias, name='fc_' + str(self._count_fc))
def conv_block(self, x, kernel_size, stride, output_size, padding_in='SAME', dropout_prob=None):
print (" === Conv", self._count_conv, " : ", kernel_size, stride, output_size)
with tf.name_scope("conv_block" + str(self._count_conv)):
x = self.conv(x, kernel_size, stride, output_size, padding_in=padding_in)
x = self.bn(x)
if dropout_prob is not None:
x = tf.cond(self._train_state,
true_fn=lambda: self.dropout(x, dropout_prob),
false_fn=lambda: x)
return self.activation(x)
def fc_block(self, x, output_size, dropout_prob=None):
print (" === FC", self._count_fc, " : ", output_size)
with tf.name_scope("fc" + str(self._count_fc + 1)):
x = self.fc(x, output_size)
if dropout_prob is not None:
x = tf.cond(self._train_state,
true_fn=lambda: self.dropout(x, dropout_prob),
false_fn=lambda: x)
self._features['fc_block' + str(self._count_fc + 1)] = x
return self.activation(x)
def get_weigths_dict(self):
return self._weights
def get_feat_tensors_dict(self):
return self._features
def make_network():
inp_img = tf.placeholder(tf.float32, shape=[None, 88, 200, 3], name='input_image')
inp_speed = tf.placeholder(tf.float32, shape=[None, 1], name='input_speed')
target_control = tf.placeholder(tf.float32, shape=[None, 3], name='target_control')
#target_command = tf.placeholder(tf.float32, shape=[None, 4], name='target_command')
train_state = tf.placeholder(tf.bool, shape=[], name='train_state')
network_manager = Network(train_state)
with tf.name_scope('Network'):
xc = network_manager.conv_block(inp_img, 5, 2, 32, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 32, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 2, 64, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 64, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 2, 128, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 128, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID')
print (xc)
xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID')
print (xc)
x = tf.reshape(xc, [-1, int(np.prod(xc.get_shape()[1:]))], name='reshape')
print (x)
x = network_manager.fc_block(x, 512, dropout_prob=0.7)
print (x)
x = network_manager.fc_block(x, 512, dropout_prob=0.7)
with tf.name_scope("Speed"):
speed = network_manager.fc_block(inp_speed, 128, dropout_prob=0.5)
speed = network_manager.fc_block(speed, 128, dropout_prob=0.5)
j = tf.concat([x, speed], 1)
j = network_manager.fc_block(j, 512, dropout_prob=0.5)
control_out = network_manager.fc_block(j, 256, dropout_prob=0.5)
control_out = network_manager.fc_block(control_out, 256)
control_out = network_manager.fc(control_out, 3)
loss = tf.reduce_mean(tf.square(tf.subtract(control_out, target_control)))
tf.summary.scalar('loss', loss)
'''
branch_config = [["Steer", "Gas", "Brake"], ["Steer", "Gas", "Brake"], \
["Steer", "Gas", "Brake"], ["Steer", "Gas", "Brake"]]
branches = []
losses = []
for i in range(0, len(branch_config)):
with tf.name_scope("Branch_" + str(i)):
branch_output = network_manager.fc_block(j, 256, dropout_prob=0.5)
branch_output = network_manager.fc_block(branch_output, 256)
branches.append(network_manager.fc(branch_output, len(branch_config[i])))
losses.append(tf.square(tf.subtract(branches[i], target_control)))
print (branch_output)
losses = tf.convert_to_tensor(losses)
losses = tf.reduce_mean(tf.transpose(losses, [1, 2, 0]), axis=1) * target_command;
loss = tf.reduce_sum(losses)
'''
return {'loss': loss,
'train_state': train_state,
'inputs': [inp_img, inp_speed],
'labels': [target_control],
'outputs': [control_out]}
| Python | 197 | 37.7868 | 103 | /carla-train/network_fine_tune.py | 0.565951 | 0.548024 |
w5688414/selfdriving_cv | refs/heads/master | import tensorflow as tf
from tensorflow.python_io import TFRecordWriter
import numpy as np
import h5py
import glob
import os
from tqdm import tqdm
from IPython import embed
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
input_roots = '/data/dataTrain/val_*/'
output_name = '/data/dataTrain/val.tfrecords'
writer = TFRecordWriter(output_name)
h5files = glob.glob(os.path.join(input_roots, '*.h5'))
for h5file in tqdm(h5files):
try:
data = h5py.File(h5file, 'r')
for i in range(200):
img = data['CameraRGB'][i]
target = data['targets'][i]
feature_dict = {'image': _bytes_feature(img.tostring()),
'targets': _float_feature(target)}
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
writer.write(example.SerializeToString())
data.close()
except:
print('filename: {}'.format(h5file))
writer.close()
| Python | 43 | 25.046511 | 88 | /carla-train/h5_to_tfrecord.py | 0.65 | 0.640179 |
w5688414/selfdriving_cv | refs/heads/master | import tensorflow as tf
import numpy as np
import glob
import os
import h5py
from imgaug.imgaug import Batch, BatchLoader, BackgroundAugmenter
import imgaug.augmenters as iaa
import cv2
from IPython import embed
BATCHSIZE = 120
st = lambda aug: iaa.Sometimes(0.4, aug)
oc = lambda aug: iaa.Sometimes(0.3, aug)
rl = lambda aug: iaa.Sometimes(0.09, aug)
seq = iaa.Sequential([
rl(iaa.GaussianBlur((0, 1.5))),
rl(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5)),
oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)),
oc(iaa.CoarseDropout((0.0, 0.10), size_percent=(0.08, 0.2),per_channel=0.5)),
oc(iaa.Add((-40, 40), per_channel=0.5)),
st(iaa.Multiply((0.10, 2.5), per_channel=0.2)),
rl(iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)),
], random_order=True)
'''
def augmentation(imgs):
return imgs
'''
def parse_proto(example_proto):
features = tf.parse_single_example(example_proto,
features={'image': tf.FixedLenFeature([], tf.string),
'targets': tf.FixedLenSequenceFeature([], tf.float32, allow_missing=True)})
image = tf.decode_raw(features['image'], tf.uint8)
image = tf.reshape(image, [88, 200, 3])
speed = features['targets'][10]
target_control = features['targets'][0:3]
target_command = features['targets'][24] % 4
return image, speed[None], target_control, target_command
class DataProvider:
def __init__(self, filename, session):
dataset = tf.data.TFRecordDataset(filename)
dataset = dataset.repeat().shuffle(buffer_size=2000).map(parse_proto).batch(BATCHSIZE)
iterator = tf.data.Iterator.from_structure(dataset.output_types,
dataset.output_shapes)
dataset_init = iterator.make_initializer(dataset)
session.run(dataset_init)
self.dataset = dataset
self.session = session
self.next = iterator.get_next()
def get_minibatch(self, augment = False):
data = self.session.run(self.next)
imgs = data[0].astype('float32')
if augment:
imgs = seq.augment_images(imgs)
return Batch(images=imgs, data=data[1:])
def show_imgs(self):
batch = self.get_minibatch(True)
for img in batch.images:
cv2.imshow('img', img)
cv2.waitKey(0)
# Test tf.data & imgaug backgroud loader APIs
if __name__ == '__main__':
import time
sess = tf.Session()
dp = DataProvider('/mnt/AgentHuman/train.tfrecords', sess)
while True:
a = time.time()
dp.get_minibatch()
b = time.time()
print(b-a)
| Python | 81 | 32.444443 | 100 | /carla-train/data_provider.py | 0.612915 | 0.580443 |
w5688414/selfdriving_cv | refs/heads/master | import numpy as np
import tensorflow as tf
from network import make_network
from data_provider import DataProvider
from tensorflow.core.protobuf import saver_pb2
import time
import os
log_path = './log'
save_path = './data'
if __name__ == '__main__':
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
train_provider = DataProvider('/data/dataTrain/train.tfrecords', sess)
val_provider = DataProvider('/data/dataTrain/val.tfrecords', sess)
network = make_network()
lr = 0.0001
lr_placeholder = tf.placeholder(tf.float32, [])
optimizer = tf.train.AdamOptimizer(learning_rate=lr_placeholder,
beta1=0.7, beta2=0.85)
optimizer = optimizer.minimize(network['loss'])
sess.run(tf.global_variables_initializer())
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V2)
saver.restore(sess, os.path.join(save_path, 'step-7500.ckpt'))
step = 0
while True:
if step % 50 == 0:
val_batch = val_provider.get_minibatch()
val_loss = sess.run(network['loss'],
feed_dict={network['inputs'][0]: val_batch.images,
network['inputs'][1]: val_batch.data[0],
network['labels'][0]: val_batch.data[1]})
print('VALIDATION--------loss: %.4f' % val_loss)
if step % 500 == 0:
model_path = os.path.join(save_path, 'step-%d.ckpt' % step)
saver.save(sess, model_path)
print("Checkpoint saved to %s" % model_path)
a = time.time()
batch = train_provider.get_minibatch(augment=True)
imgs = batch.images
speed, target_control, _ = batch.data
b = time.time()
_, train_loss = sess.run([optimizer, network['loss']],
feed_dict={network['inputs'][0]: imgs,
network['inputs'][1]: speed,
network['labels'][0]: target_control,
lr_placeholder: lr})
c = time.time()
print('step: %d loss %.4f prepare: %.3fs gpu: %.3fs' % (step, train_loss, b-a, c-b))
step += 1
| Python | 62 | 37.016129 | 96 | /carla-train/train.py | 0.53794 | 0.520136 |
w5688414/selfdriving_cv | refs/heads/master | import tensorflow as tf
import glob
import h5py
import numpy as np
from network import make_network
# read an example h5 file
datasetDirTrain = '/home/eric/self-driving/AgentHuman/SeqTrain/'
datasetDirVal = '/home/eric/self-driving/AgentHuman/SeqVal/'
datasetFilesTrain = glob.glob(datasetDirTrain+'*.h5')
datasetFilesVal = glob.glob(datasetDirVal+'*.h5')
print("Len train:{0},len val{1}".format(len(datasetFilesTrain),len(datasetFilesVal)))
data = h5py.File(datasetFilesTrain[1], 'r')
image_input = data['rgb'][1]
input_speed =np.array([[100]])
image_input = image_input.reshape(
(1, 88, 200, 3))
with tf.Session() as sess:
network = make_network()
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint("./data")
if ckpt:
saver.restore(sess, ckpt)
output=sess.run(network['outputs'], feed_dict={network['inputs'][0]:image_input,
network['inputs'][1]: input_speed})
print(output)
sess.close() | Python | 27 | 34.222221 | 85 | /carla-train/predict.py | 0.698947 | 0.676842 |
rojoso/pydot | refs/heads/master | from PIL import Image
from numpy import *
from pylab import *
import os
import sift
imlist = os.listdir('pages')
nbr_images = len(imlist)
imlist_dir = [str('../pages/'+imlist[n]) for n in range(nbr_images)]
imname = [imlist[n][:-4] for n in range(nbr_images)]
os.mkdir('sifts')
os.chdir('sifts')
for n in range(nbr_images):
sift.process_image(imlist_dir[n],str(imname[n]+'.sift'))
| Python | 18 | 20.5 | 68 | /auto-sift.py | 0.682051 | 0.679487 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import torch
import numpy as np
import os
l = [{'test': 0, 'test2': 1}, {'test': 3, 'test2': 4}]
print(l)
for i, j in enumerate(l):
print(i)
print(l)
| Python | 13 | 11.230769 | 54 | /test.py | 0.575 | 0.5375 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import os, shutil, gc
from argparse import ArgumentParser
from time import sleep
import h5py
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import io, signal
from scipy.signal.windows import nuttall, taylor
from .util import *
def proc(args):
rawpath = f'dataset/{args.pathin}/chext'
savepath = f'dataset/{args.pathout}/proc' if args.pathout else f'dataset/{args.pathin}/proc'
print(f'[LOG] Proc | Starting: {args.pathin}')
# Create the subsequent save folders
# if os.path.isdir(savepath):
# shutil.rmtree(savepath)
if not os.path.isdir(savepath):
os.makedirs(savepath + '/raw/')
os.mkdir(savepath + '/denoised/')
# # # PARAMETERS INIT # # #
c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12) # speed of light
f_start = 76e9
f_stop = 78e9
# Tramp_up = 180e-6
# Tramp_down = 32e-6
Tp = 250e-6
# T_int = 66.667e-3
N = 512
# N_frames = 1250
N_loop = 256
# Tx_power = 100
kf = 1.1106e13
BrdFuSca = 4.8828e-5
fs = 2.8571e6
fc = (f_start + f_stop)/2
# # # CONFIGURE SIGNAL PROCESSING # # #
# # Range dimension
NFFT = 2**10 # number of fft points in range dim
nr_chn = 16 # number of channels
# fft will be computed using a hannng window to lower border effects
win_range = np.broadcast_to(np.hanning(N-1), (N_loop, nr_chn, N-1)).T # integral of the window for normalization
# print(win_range.shape)
sca_win = np.sum(win_range[:, 0, 0])
v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf) # vector of range values for each range bin
r_min = 0 # min range considered
r_max = 10 # max range considered
arg_rmin = np.argmin(np.abs(v_range - r_min)) # index of the min range considered value
arg_rmax = np.argmin(np.abs(v_range - r_max)) # index of the max range considered value
vrange_ext = v_range[arg_rmin:arg_rmax+1] # vector of range values from rmin to rmax
# # Doppler dimension
NFFT_vel = 256 # number of fft points in angle dim
win_vel = np.broadcast_to(np.hanning(N_loop).reshape(1, 1, -1), (vrange_ext.shape[0], nr_chn, N_loop))
scawin_vel = np.sum(win_vel[0, 0, :])
vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp) # vector of considered frequencies in Doppler dim
v_vel = vfreq_vel*c0/(2*fc) # transform freqs into velocities
v_vel = np.delete(v_vel, np.arange(124, 132)) # delete velocities close to 0
# # Angle dimension
NFFT_ant = 64 # number of fft points in angle dim
win_ant = np.broadcast_to(taylor(nr_chn, nbar=20, sll=20).reshape(1,-1,1), (vrange_ext.shape[0], nr_chn, NFFT_vel))
scawin_ant = np.sum(win_ant[0, :, 0])
# win_ant = np.tile(win_ant, (len(vrange_ext), 1))
# vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180 # vector of considered angles [-90, 90-dtheta]
# print(vang_deg)
# print(deg2rad_shift(vang_deg))
# ant_idx = np.concatenate([np.arange(nr_chn), np.arange(nr_chn+1, 2*nr_chn)]) # indices of virtual antenna elements
# ant_idx = np.arange(nr_chn)
cal_data = io.loadmat('dataprep/calibration.mat')['CalData'] # load complex calibration weights for each antenna element
cal_data = cal_data[:16] # keep weights for TX1 only
mcal_data = np.broadcast_to(cal_data, (N-1, cal_data.shape[0], N_loop))
# # # PROCESS THE RDA SLICES FOR EACH FRAME # # #
# sequences = [1, 2, 3, 4, 5, 6] # this is just as an example, you should put here the ids of the sequences you want to process
# sequences = range(0, len(os.listdir(rawpath))) # this is just as an example, you should put here the ids of the sequences you want to process
for i, fname in enumerate(os.listdir(rawpath)):
frawname = fname.split('.')[0]
logprefix = f'[LOG] Proc | {i+1} / {len(os.listdir(rawpath))} {frawname}'
print(f'{logprefix} {fname}', end='\r')
Data_orig = np.load(f'{rawpath}/{fname}')
# print(f'{logprefix} Original data shape: {Data_orig.shape}', end='\r')
parts = [0, 1, 2, 3]
SIDELOBE_LEVEL = 3
LINTHR_HIGH = -97
LINTHR_LOW = -107
for part in parts: # split processing in parts for memory, each track is split in 4
savename = f'{args.saveprefix}_seq_{frawname.split("_")[2]}_sub_{part}' \
if args.saveprefix else f'{frawname}_sub_{part}'
logprefix = f'[LOG] Proc | {i*len(parts)+part+1} / {len(os.listdir(rawpath))*len(parts)} {frawname}'
print(f'{logprefix} {savename}', end='\r')
Data = Data_orig[:, :, part*32000:(part+1)*32000] # each part has 32k blocks (128k/4)
split_locs = np.arange(Data.shape[2], step=N_loop, dtype=np.int)[1:]
Data = np.stack(np.split(Data, split_locs, axis=2)[:-1], axis=-1) # split data into a sequence of radar cubes
print(f'{logprefix} Time-split \t\t\t', end='\r')
nsteps = Data.shape[-1] # last dim is time
rda_data = np.zeros((len(vrange_ext), NFFT_ant, NFFT_vel, nsteps), dtype=np.float32)
raw_ra = np.zeros((len(vrange_ext), NFFT_ant, nsteps), dtype=np.float32)
for j in range(nsteps): # loop on the timesteps
print(f'{logprefix} Timestep: {j+1} \t\t\t', end='\r')
RawRadarCube = Data[1:, :, :, j]
# print(RawRadarCube.shape)
# Range fft: window, calibration and scaling are applied
range_profile = np.fft.fft(RawRadarCube*win_range*mcal_data, NFFT, axis=0)*BrdFuSca/sca_win
rp_ext = range_profile[arg_rmin:arg_rmax+1] # extract only ranges of interest (0 to 10 m)
# background subtraction for MTI
rp_ext -= np.mean(rp_ext, axis=2, keepdims=True)
# Doppler fft
range_doppler = np.fft.fftshift(np.fft.fft(rp_ext*win_vel, NFFT_vel, axis=2)/scawin_vel, axes=2)
# Angle fft
range_angle_doppler = np.fft.fftshift(np.fft.fft(range_doppler*win_ant, NFFT_ant, axis=1)/scawin_ant, axes=1)
# absolute value + 20log10 to compute power
range_angle_doppler = 20*np.log10(np.abs(range_angle_doppler))
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(range_angle_doppler.max(2))
# ax[1].imshow(range_angle_doppler.max(1))
# plt.show()
raw_ra[..., j] = range_angle_doppler.max(2) # store raw range-angle image
# at this point you have the RDA representation and you can apply further denoising
rdep_thr = np.linspace(LINTHR_HIGH, LINTHR_LOW, range_angle_doppler.shape[0]).reshape((-1, 1, 1))
range_angle_doppler -= rdep_thr
range_angle_doppler[range_angle_doppler < 0] = 0
maxs = np.max(range_angle_doppler, axis=1).reshape(range_angle_doppler.shape[0], 1, range_angle_doppler.shape[2])
# maxs = np.max(range_angle_doppler, axis=(0, 2)).reshape(1, range_angle_doppler.shape[1], 1)
threshold = maxs - SIDELOBE_LEVEL
range_angle_doppler[range_angle_doppler < threshold] = 0
rda_data[..., j] = range_angle_doppler
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(range_angle_doppler.max(2))
# ax[1].imshow(range_angle_doppler.max(1))
# plt.show()
print(f'{logprefix} Saving: {savename} \t\t\t')
np.save(f'{savepath}/denoised/{savename}.npy', rda_data)
np.save(f'{savepath}/raw/{savename}.npy', raw_ra)
del Data, rda_data, split_locs, raw_ra
gc.collect()
del Data_orig
gc.collect()
print('\n') | Python | 167 | 49.856289 | 149 | /dataprep/processing.py | 0.553344 | 0.525789 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import os
# import shutil, time, pickle
# from argparse import ArgumentParser
# import matplotlib
import matplotlib.patches as patches
from matplotlib import pyplot as plt
# from matplotlib import rc
import numpy as np
from sklearn.cluster import DBSCAN
# from .channel_extraction import ChannelExtraction
from .util import Cluster, deg2rad_shift, get_box
from .kalman_tracker import KalmanTracker
def truth(args):
action = 'save'
rawpath = f'dataset/{args.pathin}/proc'
savepath = f'dataset/{args.pathout}/final' if args.pathout else f'dataset/{args.pathin}/final'
print(f'[LOG] Truth | Starting: {args.pathin}')
# Create the subsequent save folders
# if os.path.isdir(savepath):
# shutil.rmtree(savepath)
if not os.path.isdir(savepath):
os.makedirs(savepath)
for i, fname in enumerate(os.listdir(rawpath + '/denoised')):
frawname = args.saveprefix if args.saveprefix else args.pathin
frawname = f'{frawname}_ra_{fname.split("_")[2]}{fname.split("_")[4].split(".")[0]}'
logprefix = f'[LOG] Truth | {i+1} / {len(os.listdir(rawpath + "/denoised"))}'
print(f'{logprefix} {frawname}', end='\r')
# starting index in the loaded data
start = 10
# load RDA data, MUST have 4D shape: (N_range_bins, N_angle_bins, N_doppler_bins, N_timesteps)
rda_data = np.load(f'{rawpath}/denoised/{fname}')[..., start:]
raw_ra_seq = np.load(f'{rawpath}/raw/{fname}')[..., start:]
# path where to save the resulting figures
# initialize clustering/tracker parameters
MAX_AGE = 10
MIN_DET_NUMBER = 15
MIN_PTS_THR = 30
MIN_SAMPLES = 40
EPS = 0.04
thr = 20
# assoc_score = 'Mahalanobis' # either 'IOU' or 'Mahalanobis'
# CLASS_CONF_THR = 0.0
# init radar parameters
c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12)
f_start = 76e9
f_stop = 78e9
# Tramp_up = 180e-6
# Tramp_down = 32e-6
Tp = 250e-6
# T_int = 66.667e-3
# N = 512
# N_loop = 256
# Tx_power = 100
kf = 1.1106e13
# BrdFuSca = 4.8828e-5
fs = 2.8571e6
fc = (f_start + f_stop)/2
# compute range angle doppler intervals
NFFT = 2**10
# nr_chn = 16
v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf)
r_min = 0.5
r_max = 10
arg_rmin = np.argmin(np.abs(v_range - r_min))
arg_rmax = np.argmin(np.abs(v_range - r_max))
vrange_ext = v_range[arg_rmin:arg_rmax+1]
NFFT_ant = 64
vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180
NFFT_vel = 256
vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp)
v_vel = vfreq_vel*c0/(2*fc)
# delta_r = vrange_ext[1] - vrange_ext[0]
# delta_v = v_vel[1] - v_vel[0]
# delta_a = vang_deg[1] - vang_deg[0]
track_id_list = list(range(1000)) # list with possible track id numbers
tracking_list = []
# loop over the time-steps
for timestep in range(rda_data.shape[-1]):
print(f'{logprefix} {frawname} Timestep: {timestep} \t\t\t', end='\r')
# select RDA map of the current time-step
data = rda_data[..., timestep]
data = data[arg_rmin:arg_rmax + 1]
# plt.imshow(data.max(1))
# plt.show()
# compute normalized maps for DBSCAN
norm_ang = (vang_deg - np.min(vang_deg)) / (np.max(vang_deg) - np.min(vang_deg))
norm_vel = (v_vel - np.min(v_vel)) / (np.max(v_vel) - np.min(v_vel))
norm_ran = (vrange_ext - np.min(vrange_ext)) / (np.max(vrange_ext) - np.min(vrange_ext))
rav_pts = np.asarray(np.meshgrid(vrange_ext, vang_deg, v_vel, indexing='ij'))
# print(rav_pts[1, :, :, 0])
norm_rav_pts = np.asarray(np.meshgrid(norm_ran, norm_ang, norm_vel, indexing='ij'))
# select values which are over the threshold
raw_ra = raw_ra_seq[arg_rmin:arg_rmax + 1, :, timestep]
full_indices = (data > thr)
data[data < thr] = 0
rav_pts = rav_pts[:, full_indices]
power_values_full = data[full_indices]
norm_rav_pts = norm_rav_pts[:, full_indices]
rav_pts_lin = rav_pts.reshape(rav_pts.shape[0], -1)
# save range and angle for tracking
ra_totrack = np.copy(rav_pts_lin[:2, :])
ra_totrack[1] = deg2rad_shift(ra_totrack[1])
normrav_pts_lin = norm_rav_pts.reshape(norm_rav_pts.shape[0], -1)
if rav_pts.shape[1] > MIN_SAMPLES:
# apply DBSCAN on normalized RDA map
labels = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit_predict(normrav_pts_lin.T)
unique, counts = np.unique(labels, return_counts=True)
if not len(unique):
print('[WAR] Truth | DBSCAN found no clusters! Skipping frame.')
continue
else:
print('[WAR] Truth | No points to cluster! Skipping frame.')
continue
# loop over the detected clusters
detected_clusters = [] # list containing all the detected clusters
for cluster_id in unique:
if cluster_id == -1: # -1 is the label for noise in DBSCAN, skip it
continue
number = counts[unique == cluster_id]
if number < MIN_PTS_THR:
continue
# initialize new cluster object and fill its fields
new_cluster = Cluster(cluster_id)
new_cluster.cardinality = number
new_cluster.elements = ra_totrack[:, labels == cluster_id] # range and angle
new_cluster.dopplers = rav_pts_lin[2, labels == cluster_id]
w = np.squeeze(power_values_full[labels == cluster_id])
weights = w/np.sum(w) # normalized powers
new_cluster.center_polar = np.average(new_cluster.elements, weights=weights, axis=1).reshape(2, 1)
new_cluster.center_cartesian = np.array([new_cluster.center_polar[0]*np.cos(new_cluster.center_polar[1]),
new_cluster.center_polar[0]*np.sin(new_cluster.center_polar[1])],
dtype=np.float64).reshape(-1, 1)
new_cluster.box = get_box(new_cluster)
detected_clusters.append(new_cluster)
if not timestep: # happens only in the first time-step
for cl in detected_clusters:
tracking_list.append(KalmanTracker(id_=track_id_list.pop(0),
s0=np.array([cl.center_cartesian[0], 0, cl.center_cartesian[1], 0],
dtype=np.float64).reshape(-1,1)))
tracking_list[-1].box = cl.box
sel_tracking_list = np.copy(tracking_list)
elif timestep: # happens in all other time-steps
# prepare the data association building the cost matrix
detected_centers = [x.center_cartesian for x in detected_clusters]
prev_cartcenters = []
prev_centers = []
if len(tracking_list) > 0:
for trk in tracking_list:
prev_cartcenters.append(trk.xy)
prev_centers.append(trk.rtheta)
cost_matrix = np.zeros((len(detected_centers), len(prev_cartcenters)))
for i in range(len(detected_centers)):
for j in range(len(prev_cartcenters)):
# cost is the Mahalanobis distance
cost_matrix[i, j] = KalmanTracker.get_mahalanobis_distance(
detected_centers[i] - prev_cartcenters[j],
tracking_list[j].get_S())
cost_matrix = np.asarray(cost_matrix)
# hungarian algorithm for track association
matches, undet, _ = KalmanTracker.hungarian_assignment(cost_matrix)
# handle matched tracks
if len(matches) > 0:
for detec_idx, track_idx in matches:
# get observation, polar coords center of the detected cluster
obs = detected_clusters[detec_idx].center_polar
# get tracker object of the detection
current_tracker = tracking_list[track_idx]
# KF predict-update step
current_tracker.predict()
current_tracker.update(obs.reshape(2, 1))
current_tracker.box = get_box(detected_clusters[detec_idx])
current_tracker.hits += 1
current_tracker.misses_number = 0
# imaging(current_tracker, detected_clusters[detec_idx], data, labels, full_indices.ravel())
else:
print('[WAR] Truth | No detections-tracks matches found! Skipping frame.')
continue
# deal with undetected tracks
if len(undet) > 0:
for track_idx in undet:
old_tracker = tracking_list[track_idx]
old_tracker.misses_number += 1
# predict only as no obs is detected
old_tracker.predict()
old_tracker.box = get_box(None,
c=old_tracker.xy,
h=old_tracker.box[0],
w=old_tracker.box[0])
# filter out tracks outside room borders (ghost targets)
tracking_list = [t for t in tracking_list if (t.xy[0] > -1.70) and (t.xy[0] < 2.30)] # kill tracks outside the room boundaries
# select the valid tracks, i.e., the ones with less than the max. misses and enough hits
sel_tracking_list = [t for t in tracking_list if (t.misses_number <= MAX_AGE) and (t.hits >= MIN_DET_NUMBER)]
plot4train(f'{savepath}/{frawname}{int(4-len(str(timestep)))*"0"}{timestep}',
data,
raw_ra,
sel_tracking_list,
vrange_ext,
vang_deg,
args.reso,
action)
print(f'[LOG] Truth | Truth data ready: {savepath}')
def imaging(tracker, cluster, data, labels, full_indices):
flat_data = np.copy(data.ravel())
full_data = flat_data[full_indices]
full_data[labels != cluster.label] = 0
flat_data[full_indices] = full_data
flat_data = flat_data.reshape(data.shape)
# print(flat_data.shape)
ra = flat_data.max(2)
rd = flat_data.max(1)
plt.subplot(121)
plt.imshow(rd, aspect='auto')
plt.subplot(122)
plt.imshow(ra, aspect='auto', extent=(np.pi, 0.25065, 0.5, 10))
plt.scatter(tracker.rtheta[1], tracker.rtheta[0], marker='x', c='r')
plt.colorbar()
plt.show()
plt.close()
def plot(path, data_points, ra, noisy_ramap, t_list, action, index, ranges, angles):
boxes = np.array([kt.box for kt in t_list])
angles = deg2rad_shift(angles)
# ramap = data_points.mean(2)
_, ax = plt.subplots(1, 2)
ax[0].set_title('Point-cloud representation')
ax[1].set_title('RA map image representation')
ax[0].scatter(ra[1], ra[0], marker='.')#, c=labels)
ax[1].imshow(noisy_ramap, aspect='auto')
ax[0].set_xlabel(r'$\theta$ [rad]')
ax[0].set_ylabel(r'$R$ [m]')
ax[0].set_xlim([0.25065, np.pi])
ax[0].set_ylim([0.5, 10])
ax[0].grid()
for i in range(len(boxes)):
# add real valued bb on point cloud plot
add_bb(boxes[i], ax[0], t_list[i].id)
# add pixel-level bb to ra image
int_box = adjust_bb(boxes[i], ranges, angles)
add_bb(int_box, ax[1], t_list[i].id)
if action == 'save':
plt.savefig(path + f'fig_{index}', format='png', dpi=300)
plt.close()
elif action == 'plot':
plt.title(f'Frame {index}')
plt.show()
plt.close()
def plot4train(path, data_points, noisy_ramap, t_list, ranges, angles, reso=416, action='save'):
boxes = np.array([kt.box for kt in t_list])
angles = deg2rad_shift(angles)
fig = plt.figure(figsize=(1, 1), dpi=reso, frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.imshow(noisy_ramap, aspect='auto')
w_scale = reso/len(angles)
h_scale = reso/len(ranges)
bbs = []
for i in range(0,min(4, len(boxes))):
# # add pixel-level bb to ra image
bb = adjust_bb(boxes[i], ranges, angles, w_scale, h_scale)
bbs.append(list(map(int, [bb[1][0], bb[0][0], bb[3][0], bb[2][0]])))
# add_bb(bb, ax, t_list[i].id)
if bbs and action == 'save':
plt.savefig(f'{path}_{bbs}.png'.replace(' ', ''), format='png', dpi=reso)
elif action == 'plot':
plt.show()
plt.close()
def add_bb(bb, ax, note):
ax.add_patch(patches.Rectangle((bb[1] - bb[3]/2, bb[0] - bb[2]/2), # top left corner coordinates
bb[3], # width
bb[2], # height
linewidth=1,
edgecolor='r',
facecolor='none'))
def adjust_bb(bb_real, r, a, w_scale = 1, h_scale = 1):
'''
this function is needed to map the bb obtained in real values to the image
pixel coordinates without the bias introduced by non-uniform spacing of angle bins
'''
bb_ind = np.zeros(bb_real.shape[0])
bb_ind[0] = np.argmin(np.abs(r - bb_real[0])) * h_scale
bb_ind[1] = np.argmin(np.abs(a - bb_real[1])) * w_scale
top = np.argmin(np.abs(r - (bb_real[0] - bb_real[2]/2)))
bottom = np.argmin(np.abs(r - (bb_real[0] + bb_real[2]/2)))
left = np.argmin(np.abs(a - (bb_real[1] + bb_real[3]/2)))
right = np.argmin(np.abs(a - (bb_real[1] - bb_real[3]/2)))
bb_ind[2] = np.abs(top - bottom) * h_scale
bb_ind[3] = np.abs(left - right) * w_scale
return bb_ind.reshape(-1, 1)
| Python | 335 | 42.546268 | 145 | /dataprep/truth.py | 0.530948 | 0.511139 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import h5py
import numpy as np
import os, shutil
def chext(args):
rawpath = f'raw/{args.pathin}'
savepath = f'dataset/{args.pathout}/chext' if args.pathout else f'dataset/{args.pathin}/chext'
print(f'[LOG] ChExt | Starting: {args.pathin}')
# Create the subsequent save folders
# if os.path.isdir(savepath):
# shutil.rmtree(savepath)
if not os.path.isdir(savepath):
os.makedirs(savepath)
for i, fname in enumerate(os.listdir(rawpath)):
logprefix = f'[LOG] ChExt | {i+1} / {len(os.listdir(rawpath))}'
savename = f'{args.saveprefix}_seq_{i}' if args.saveprefix else f'{fname.split("_")[0]}_seq_{fname.split("_")[1].split(".")[0]}'
print(f'{logprefix} fname', end='\r')
channel_extraction(
f'{rawpath}/{fname}',
savepath,
savename,
action='SAVE',
logprefix=logprefix)
print('\n')
def channel_extraction(loadpath, savepath, savename, action, logprefix='', nr_chn=16):
with h5py.File(loadpath, 'r+') as h5data:
print(f'{logprefix} Initializing: {loadpath}', end='\r')
Data = np.zeros((h5data['Chn1'].shape[1], nr_chn, h5data['Chn1'].shape[0]), dtype=np.float32)
for i in range(nr_chn):
print(f'{logprefix} Extracting channel {i+1} \t\t\t', end='\r')
channel = np.asarray(h5data['Chn{}'.format(i+1)])
Data[:, i, :] = channel.T
print(f'{logprefix} Finalizing {savepath}', end='\r')
if action == 'SAVE':
print(f'{logprefix} Saving', end='\r')
np.save(f'{savepath}/{savename}', Data)
print(f'{logprefix} Saved: {savepath}/{savename} Data shape: {Data.shape}')
elif action == 'RETURN':
return Data
else:
print(f'[ERR] ChExt | Invalid action, please select SAVE or RETURN') | Python | 44 | 41.5 | 136 | /dataprep/channel_extraction.py | 0.578919 | 0.568218 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import os
import shutil
from dataclasses import dataclass, field
from typing import List
import h5py
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
@dataclass
class Cluster:
# cluster object, contains detected cluster points and additional values
label: int
cardinality: int = 0
elements: List = field(default_factory=list)
dopplers: List = field(default_factory=list)
center_polar: np.ndarray = np.empty((2, 1))
center_cartesian: np.ndarray = np.empty((2, 1))
box: np.ndarray = np.empty((4, 1))
def polar2cartesian(xp):
# angles in rad
return np.array([xp[0]*np.cos(xp[1]), xp[0]*np.sin(xp[1])], dtype=np.float64).reshape(-1, 1)
def cartesian2polar(xy):
# angles in rad
return np.array([np.sqrt(xy[0]**2 + xy[1]**2), np.arctan2(xy[1], xy[0])]).reshape(-1, 1)
def deg2rad_shift(angles):
a = np.copy(angles)
a = np.pi*a/180
a = -a + np.pi/2
return a
def shift_rad2deg(angles):
a = np.copy(angles)
a = -a + np.pi/2
a = 180*a/np.pi
return a
def get_box(cluster, c=None, h=0.5, w=0.3):
if cluster is not None:
r_ext = cluster.elements[0].max() - cluster.elements[0].min()
# print(cluster.elements[1])
a_ext = cluster.elements[1].max() - cluster.elements[1].min()
out = np.array([cluster.center_polar[0].squeeze(),
cluster.center_polar[1].squeeze(),
r_ext,
a_ext]).reshape(4, 1)
return out
else:
return np.array([c[0], c[1], h, w]).reshape(4, 1)
def IOU_score(a, b):
# returns the IOU score of the two input boxes
x1 = max(a[0], b[0])
y1 = max(a[1], b[1])
x2 = min(a[2], b[2])
y2 = min(a[3], b[3])
width = x2 - x1
height = y2 - y1
if (width < 0) or (height < 0):
return 0.0
area_intersection = width*height
area_a = (a[2] - a[0])*(a[3] - a[1])
area_b = (b[2] - b[0])*(b[3] - b[1])
area_union = area_a + area_b - area_intersection
return area_intersection/area_union
| Python | 70 | 28.942858 | 96 | /dataprep/util.py | 0.570637 | 0.532779 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import argparse
import sys, gc
from .channel_extraction import chext
from .processing import proc
from .truth import truth
def parse_arg():
parser = argparse.ArgumentParser(description='Data preprocessing module', add_help=True)
parser.add_argument('--pathin', type=str, required=True,
help="Path for the input folder")
parser.add_argument('--pathout', type=str,
help="Path for the output folder")
parser.add_argument('--saveprefix', type=str,
help="Prefix for the save file")
parser.add_argument('--chext', action='store_true',
help="Perform channel extraction")
parser.add_argument('--proc', action='store_true',
help="Perform signal processing (FFT and denoising)")
parser.add_argument('--truth', action='store_true',
help="Perform ground truth (clustering, tracking) bouding box calculations")
parser.add_argument('--objcount', type=int, default=1,
help="Number of objects per image (default: 1)")
parser.add_argument('--reso', type=int, default=416,
help="Input image resolution (def: 416)")
parser.add_argument('--v', type=int, default=0,
help="Verbose (0 minimal (def), 1 normal, 2 all")
return parser.parse_args(sys.argv[2:])
def main():
args = parse_arg()
if args.chext:
chext(args)
gc.collect()
if args.proc:
proc(args)
gc.collect()
if args.truth:
truth(args)
gc.collect()
| Python | 47 | 30.276596 | 92 | /dataprep/__init__.py | 0.646939 | 0.638095 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import torch
# import torch.nn as nn
# import torch.nn.functional as F
# import torch.optim as optim
# import torchvision
import torchvision.transforms as transforms
import os, sys
# import pickle, time, random
import numpy as np
# from PIL import Image
import argparse
from .darknet import DarkNet
from .dataset import *
from .util import *
def parse_arg():
parser = argparse.ArgumentParser(description='MmWaveYoLo Prediction module', add_help=True)
parser.add_argument('--cfg', type=str, default='yolov3micro',
help="Name of the network config (default: yolov3micro)")
parser.add_argument('--pathin', type=str,
help="Path for the input folder (default: testset)")
parser.add_argument('--pathout', type=str,
help="Path for the output folder")
parser.add_argument('--video', type=str, default='False',
help="Create video after prediction (default: False)")
parser.add_argument('--datasplit', type=float, default=0,
help="Dataset split percentage (default: 0 (single set))")
parser.add_argument('--seed', type=float, default=0,
help="Seed for the random shuffling (default: 0, (no shuffle))")
parser.add_argument('--bs', type=int, default=8,
help="Batch size (default: 8)")
parser.add_argument('--ckpt', type=str, default='10.0',
help="Checkpoint name <'epoch'.'iteration'>")
parser.add_argument('--nms', type=float, default=0.5,
help="NMS threshold (default: 0.5)")
parser.add_argument('--obj', type=float, default=0.5,
help="Objectiveness threshold (default: 0.5)")
parser.add_argument('--iou', type=float, default=0.5,
help="Intersection over Union threshold (default: 0.5)")
parser.add_argument('--reso', type=int, default=416,
help="Input image resolution (default: 416)")
parser.add_argument('--v', type=int, default=0,
help="Verbose (0 minimal (default), 1 normal, 2 all")
return parser.parse_args(sys.argv[2:])
def predict():
torch.cuda.empty_cache()
# CONSTANTS
args = parse_arg()
pathcfg = f"cfg/{args.cfg}.cfg"
pathin = f"dataset/{args.pathin}/final"
pathout = f"results/{args.pathout}"
num_workers = 2
# NETWORK
darknet = DarkNet(pathcfg, args.reso, args.obj, args.nms)
pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad)
print('# of params: ', pytorch_total_params)
if args.v > 0:
print(darknet.module_list)
# IMAGE PREPROCESSING!!!
transform = transforms.Compose([
transforms.Resize(size=(args.reso, args.reso), interpolation=3),
transforms.ToTensor()
])
# ====================================================
# Test data allocation
_, testloader = getDataLoaders(pathin, transform, train_split=args.datasplit, batch_size=args.bs, \
num_workers=num_workers, collate_fn=collate, random_seed=args.seed)
# ====================================================
start_epoch = 2
start_iteration = 0
# LOAD A CHECKPOINT!!!
start_epoch, start_iteration = args.ckpt.split('.')
start_epoch, start_iteration, state_dict, _, _, _, _ = load_checkpoint(
f'save/checkpoints/',
int(start_epoch),
int(start_iteration)
)
darknet.load_state_dict(state_dict)
# ====================================================
# Use GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
darknet.to(device) # Put the network on device
if args.v > 0:
print(next(darknet.parameters()).device)
# Create the subsequent save folders
# if os.path.isdir(pathout):
# shutil.rmtree(pathout)
if not os.path.isdir(pathout):
os.makedirs(pathout)
# PREDICT
print(f'[LOG] PREDICT | Test set: {len(testloader.dataset)}')
darknet.eval() # set network to evaluation mode
outcomes = np.zeros(4)
predList = []
countLabels = 0
with torch.no_grad():
for bidx, (paths, inputs, targets) in enumerate(testloader):
inputs = inputs.to(device)
predictions = darknet(inputs)
for idx, path in enumerate(paths):
print(f'[LOG] PREDICT | Predicting {(bidx*args.bs)+idx+1}/{len(testloader.dataset)}', end='\r')
savename = path.split('/')[-1].split('_')[2]
try:
prediction = predictions[predictions[:, 0] == idx]
except Exception:
prediction = torch.Tensor([])
print(f'[ERROR] TEST | No prediction? {prediction}')
tempL, _= correctness(prediction, targets[idx], reso=darknet.reso, iou_thresh=args.iou)
predList.extend(tempL)
countLabels += targets[idx].size(0)
# draw_prediction(path, prediction, targets[idx], darknet.reso, \
# names=[''], pathout=f'{pathout}/preds', savename=f'{savename}.png')
if args.video:
animate_predictions(pathout, args.video)
print(countLabels)
predList = precision_recall(predList, countLabels)
plot_precision_recall(predList, pathout=f'{pathout}/map', savename='')
# plot_precision_recall(predList, pathout=f'{pathout}/map', savename=f'iou{args.iou}.png')
# ====================================================
| Python | 144 | 36.611111 | 111 | /yolo/predict.py | 0.599335 | 0.590473 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | from __future__ import division
import torch
import os
from operator import itemgetter
import numpy as np
import cv2
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
def draw_prediction(img_path, prediction, target, reso, names, pathout, savename):
"""Draw prediction result
Args
- img_path: (str) Path to image
- prediction: (np.array) Prediction result with size [#bbox, 8]
8 = [batch_idx, x1, y1, x2, y2, objectness, cls_conf, class idx]
- target: (np.array) Prediction result with size [#bbox, 5]
8 = [batch_idx, x1, y1, x2, y2, class idx]
- reso: (int) Image resolution
- names: (list) Class names
- save_path: (str) Path to save prediction result
"""
img = Image.open(img_path).convert('RGB')
w, h = img.size
h_ratio = h / reso
w_ratio = w / reso
draw = ImageDraw.Draw(img)
# Drawing targets (labels)
try:
for i in range(target.shape[0]):
bbox = target[i, 0:4].numpy()
bbox = xywh2xyxy(bbox, target=True)
caption = f'truth #{i}'
color = (255, 255, 255)
x1, y1, x2, y2 = bbox[0]*w, bbox[1]*h, bbox[2]*w, bbox[3]*h
draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)),
outline=color, width=2)
draw.rectangle((x1 * w_ratio, y2 * h_ratio + 15,
x2 * w_ratio, y2 * h_ratio),
fill=color)
draw.text((x1 * w_ratio + 2, y2 * h_ratio),
caption, fill='black')
except Exception:
print(f'[ERR] TEST | Could not draw target')
# Drawing predictions
try:
for i in range(prediction.shape[0]):
bbox = prediction[i, 1:5]
conf = '%.2f' % prediction[i, -3]
caption = f'pred {conf}'
color = (0, 0, 255)
x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)),
outline=color, width=int(1+prediction[i, -3]*5))
draw.rectangle((x1 * w_ratio, y1 * h_ratio - 15,
x2 * w_ratio, y1 * h_ratio),
fill=color)
draw.text((x1 * w_ratio + 2, y1 * h_ratio - 15),
caption, fill='white')
except Exception:
print(f'[ERR] TEST | Could not draw prediction')
# img.show()
os.makedirs(pathout, exist_ok=True)
img.save(f'{pathout}/{savename}')
img.close()
def animate_predictions(path, savetype='gif'):
fps = 5
if savetype == 'gif':
gif = []
images = (Image.open(f'{path}/preds/{f}').copy() for f in sorted(os.listdir(f'{path}/preds')) if f.endswith('.png'))
for image in images:
gif.append(image)
os.makedirs(path, exist_ok=True)
gif[0].save(f'{path}/sequence.gif', save_all=True, \
optimize=False, append_images=gif[1:], loop=0, \
duration=int(1000/fps))
print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.gif')
elif savetype == 'avi':
images = [img for img in sorted(os.listdir(f'{path}/preds')) if img.endswith(".png")]
frame = cv2.imread(f'{path}/preds/{images[0]}')
height, width, _ = frame.shape
video = cv2.VideoWriter(f'{path}/sequence.avi', 0, fps, (width,height))
for image in images:
video.write(cv2.imread(f'{path}/preds/{image}'))
cv2.destroyAllWindows()
video.release()
print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.avi')
def IoU(box1, box2):
""" Compute IoU between box1 and box2 """
if box1.is_cuda == True:
box1 = box1.cpu()
if box2.is_cuda == True:
box2 = box2.cpu()
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[...,0], box1[...,1], box1[...,2], box1[...,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[...,0], box2[...,1], box2[...,2], box2[...,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
# TP / FP / FN / TN calculations
def correctness(prediction, target, reso=416, iou_thresh=0.5):
flagP = np.zeros([prediction.size(0), 2]) # Flag for predictions
flagP[:,1] -= 1
tempCor = np.zeros(4)
flagT = np.zeros(target.size(0))-1
tempList = []
if prediction.size(0) != 0:
for i, p in enumerate(prediction):
for j, t in enumerate(target):
iou = IoU(p[1:5], xywh2xyxy(t[0:4]*reso)).numpy()[0]
if iou > flagP[i, 0]:
flagP[i,:] = [iou, j]
for i in range(flagP.shape[0]):
if flagP[i,0] >= iou_thresh and flagT[int(flagP[i,1])] == -1:
# True Positive: iou >= thresh
tempCor[0] += 1
flagT[int(flagP[i,1])] = 1
tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], False])
else:
# False Positive: iou < thresh or duplicates
tempCor[1] = 1
tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], True])
# False Negative
if np.count_nonzero(flagP[:, 1] == -1) == prediction.size(0):
tempCor[2] += 1
return tempList, tempCor
# Precision and recall calculations
def precision_recall(predList, countLabels):
predList.sort(key = itemgetter(1), reverse=True) # Sort by IoU
predList.sort(key = itemgetter(2)) # Sort by TP
predList.sort(key = itemgetter(0), reverse=True) # Sort by objectiveness
for i, l in enumerate(predList):
temp = [0, 0, 0, 0]
if l[2] == False: temp[0] = 1 # TP
else: temp[1] = 1 # FP
if i != 0:
temp[0] += predList[i-1][3] # Cumulative TP
temp[1] += predList[i-1][4] # Cumulative FP
temp[2] = float(temp[0] / (temp[0] + temp[1])) # Precision
temp[3] = float(temp[0] / countLabels) # Recall
l.extend(temp)
return predList
# Drawing precision/recall curve
def plot_precision_recall(predList, pathout, savename=''):
predArr = np.array(predList, dtype=np.float)
# print(np.round(predArr[:,-2:], 2))
fig, _= plt.subplots(2, 1, gridspec_kw={'height_ratios': [3, 1]})
plt.subplot(2, 1, 1)
plt.plot(predArr[:, -1], predArr[:, -2])
plt.plot(np.round(predArr[:,-1], 2), np.round(predArr[:,-2], 2))
plt.grid(True)
plt.title(f'Precision/Recall graph ({savename})')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.subplot(2, 1, 2)
plt.plot(predArr[:,0])
ax = plt.gca()
ax.axes.xaxis.set_visible(False)
# ax.axes.yaxis.set_visible(False)
plt.rcParams['axes.titley'] = 1.0 # y is in axes-relative coordinates.
plt.rcParams['axes.titlepad'] = -14 # pad is in points...
plt.title(f'Objectiveness score')
if savename != '':
os.makedirs(f'{pathout}/{savename}', exist_ok=True)
plt.savefig(f'{pathout}/{savename}', dpi=100)
print(f'[LOG] TRAIN | Precision/Recall graph save \"{pathout}/{savename}\"')
else:
plt.show()
plt.close()
def xywh2xyxy(bbox, target=False):
if target:
xc, yc = bbox[0], bbox[1]
half_w, half_h = bbox[2] / 2, bbox[3] / 2
return [xc - half_w, yc - half_h, xc + half_w, yc + half_h]
bbox_ = bbox.clone()
if len(bbox_.size()) == 1:
bbox_ = bbox_.unsqueeze(0)
xc, yc = bbox_[..., 0], bbox_[..., 1]
half_w, half_h = bbox_[..., 2] / 2, bbox_[..., 3] / 2
bbox_[..., 0] = xc - half_w
bbox_[..., 1] = yc - half_h
bbox_[..., 2] = xc + 2 * half_w
bbox_[..., 3] = yc + 2 * half_h
return bbox_
#Check if it is working!!!
def xyxy2xywh(bbox, target=False):
if target:
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
xc, yc = bbox[0] + w/2, bbox[1] + h/2
return [xc, yc, w, h]
bbox_ = bbox.clone()
if len(bbox_.size()) == 1:
bbox_ = bbox_.unsqueeze(0)
w, h = bbox_[..., 2] - bbox_[..., 0], bbox_[..., 3] - bbox_[..., 1]
xc, yc = bbox_[..., 0] + w/2, bbox_[..., 1] + h/2
bbox_[..., 0] = xc
bbox_[..., 1] = yc
bbox_[..., 2] = w
bbox_[..., 3] = h
return bbox_
def load_checkpoint(checkpoint_dir, epoch, iteration):
"""Load checkpoint from path
Args
- checkpoint_dir: (str) absolute path to checkpoint folder
- epoch: (int) epoch of checkpoint
- iteration: (int) iteration of checkpoint in one epoch
Returns
- start_epoch: (int)
- start_iteration: (int)
- state_dict: (dict) state of model
"""
path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')
if not os.path.isfile(path):
raise Exception("Checkpoint in epoch %d doesn't exist" % epoch)
checkpoint = torch.load(path)
start_epoch = checkpoint['epoch']
state_dict = checkpoint['state_dict']
start_iteration = checkpoint['iteration']
tlosses = checkpoint['tlosses']
vlosses = checkpoint['vlosses']
optimizer = checkpoint['optimizer']
scheduler = checkpoint['scheduler']
assert epoch == start_epoch, "epoch != checkpoint's start_epoch"
assert iteration == start_iteration, "iteration != checkpoint's start_iteration"
return start_epoch, start_iteration, state_dict, tlosses, vlosses, optimizer, scheduler
def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):
"""Save checkpoint to path
Args
- path: (str) absolute path to checkpoint folder
- epoch: (int) epoch of checkpoint file
- iteration: (int) iteration of checkpoint in one epoch
- save_dict: (dict) saving parameters dict
"""
os.makedirs(checkpoint_dir, exist_ok=True)
path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')
assert epoch == save_dict['epoch'], "[ERROR] epoch != save_dict's start_epoch"
assert iteration == save_dict['iteration'], "[ERROR] iteration != save_dict's start_iteration"
if os.path.isfile(path):
print("[WARNING] Overwrite checkpoint in epoch %d, iteration %d" %
(epoch, iteration))
try:
torch.save(save_dict, path)
except Exception:
raise Exception("[ERROR] Fail to save checkpoint")
print("[LOG] Checkpoint %d.%d.ckpt saved" % (epoch, iteration))
def parse_cfg(cfgfile):
"""
Takes a configuration file
Returns a list of blocks. Each blocks describes a block in the neural
network to be built. Block is represented as a dictionary in the list
"""
file = open(cfgfile, 'r')
lines = file.read().split('\n') # store the lines in a list
lines = [x for x in lines if len(x) > 0] # get read of the empty lines
lines = [x for x in lines if x[0] != '#'] # get rid of comments
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
file.close()
block = {}
blocks = []
for line in lines:
if line[0] == "[": # This marks the start of a new block
if len(block) != 0: # If block is not empty, implies it is storing values of previous block.
blocks.append(block) # add it the blocks list
block = {} # re-init the block
block["type"] = line[1:-1].rstrip()
else:
key,value = line.split("=")
block[key.rstrip()] = value.lstrip()
blocks.append(block)
return blocks
def plot_losses(tlosses, vlosses=None, savepath=''):
plt.plot(range(0, len(tlosses)), tlosses)
if vlosses:
plt.plot(range(0, len(vlosses)), vlosses)
plt.legend(['Train loss', 'Valid loss'], loc='upper left')
plt.title(f'Training and Validation loss ({len(tlosses)} Epochs) ')
else:
plt.legend(['Train loss'], loc='upper left')
plt.title(f'Training loss ({len(tlosses)} Epochs) ')
plt.xlabel('Epoch')
plt.ylabel('Loss')
if savepath != '':
os.makedirs(savepath, exist_ok=True)
plt.savefig(f'{savepath}/loss_{len(tlosses)}.png', dpi=100)
print(f'[LOG] TRAIN | Loss graph save \"{savepath}/loss_{len(tlosses)}.png\"')
else:
plt.show()
plt.close() | Python | 353 | 35.569405 | 126 | /yolo/util.py | 0.556244 | 0.529904 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | from __future__ import division
import torch, torch.nn as nn, torch.nn.functional as F
# from torch.autograd import Variable
import numpy as np
# import cv2
# from pprint import pprint
from .util import *
# =================================================================
# MAXPOOL (with stride = 1, NOT SURE IF NEEDED)
class MaxPool1s(nn.Module):
def __init__(self, kernel_size):
super(MaxPool1s, self).__init__()
self.kernel_size = kernel_size
self.pad = kernel_size - 1
def forward(self, x):
padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode="replicate")
pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x)
return pooled_x
# EMPTY LAYER
class EmptyLayer(nn.Module):
def __init__(self):
super(EmptyLayer, self).__init__()
# YOLO / PREDICTION LAYER
class YOLOLayer(nn.Module):
def __init__(self, anchors, num_classes, reso, ignore_thresh):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_classes = num_classes
self.reso = reso
self.ignore_thresh = ignore_thresh
def forward(self, x, y_true=None):
bs, _, gs, _ = x.size()
stride = self.reso // gs # no pooling used, stride is the only downsample
num_attrs = 5 + self.num_classes # tx, ty, tw, th, p0
nA = len(self.anchors)
scaled_anchors = torch.Tensor(
[(a_w / stride, a_h / stride) for a_w, a_h in self.anchors]).cuda()
# Re-organize [bs, (5+nC)*nA, gs, gs] => [bs, nA, gs, gs, 5+nC]
x = x.view(bs, nA, num_attrs, gs, gs).permute(
0, 1, 3, 4, 2).contiguous()
pred = torch.Tensor(bs, nA, gs, gs, num_attrs).cuda()
pred_tx = torch.sigmoid(x[..., 0]).cuda()
pred_ty = torch.sigmoid(x[..., 1]).cuda()
pred_tw = x[..., 2].cuda()
pred_th = x[..., 3].cuda()
pred_conf = torch.sigmoid(x[..., 4]).cuda()
if self.training == True:
pred_cls = x[..., 5:].cuda() # softmax in cross entropy
else:
pred_cls = F.softmax(x[..., 5:], dim=-1).cuda() # class
grid_x = torch.arange(gs).repeat(gs, 1).view(
[1, 1, gs, gs]).float().cuda()
grid_y = torch.arange(gs).repeat(gs, 1).t().view(
[1, 1, gs, gs]).float().cuda()
anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1))
anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1))
pred[..., 0] = pred_tx + grid_x
pred[..., 1] = pred_ty + grid_y
pred[..., 2] = torch.exp(pred_tw) * anchor_w
pred[..., 3] = torch.exp(pred_th) * anchor_h
pred[..., 4] = pred_conf
pred[..., 5:] = pred_cls
if not self.training:
pred[..., :4] *= stride
return pred.view(bs, -1, num_attrs)
else:
loss = YOLOLoss([bs, nA, gs], scaled_anchors, self.num_classes, pred, [pred_tx, pred_ty, pred_tw, pred_th])
loss = loss(x, y_true.float())
return loss
# YOLOv3 Loss
class YOLOLoss(nn.Module):
def __init__(self, shape, scaled_anchors, num_classes, pred, pred_t):
super(YOLOLoss, self).__init__()
self.bs = shape[0]
self.nA = shape[1]
self.gs = shape[2]
self.scaled_anchors = scaled_anchors
self.num_classes = num_classes
self.predictions = pred
self.pred_conf = pred[..., 4]
self.pred_cls = pred[..., 5:]
self.pred_tx = pred_t[0]
self.pred_ty = pred_t[1]
self.pred_tw = pred_t[2]
self.pred_th = pred_t[3]
def forward(self, x, y_true):
gt_tx = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_ty = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_tw = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_th = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_conf = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_cls = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
obj_mask = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
for idx in range(self.bs):
for y_true_one in y_true[idx]:
y_true_one = y_true_one.cuda()
gt_bbox = y_true_one[:4] * self.gs
gt_cls_label = int(y_true_one[4])
gt_xc, gt_yc, gt_w, gt_h = gt_bbox[0:4]
gt_i = gt_xc.long().cuda()
gt_j = gt_yc.long().cuda()
pred_bbox = self.predictions[idx, :, gt_j, gt_i, :4]
ious = IoU(xywh2xyxy(pred_bbox), xywh2xyxy(gt_bbox))
best_iou, best_a = torch.max(ious, 0)
w, h = self.scaled_anchors[best_a]
gt_tw[idx, best_a, gt_j, gt_i] = torch.log(gt_w / w)
gt_th[idx, best_a, gt_j, gt_i] = torch.log(gt_h / h)
gt_tx[idx, best_a, gt_j, gt_i] = gt_xc - gt_i.float()
gt_ty[idx, best_a, gt_j, gt_i] = gt_yc - gt_j.float()
gt_conf[idx, best_a, gt_j, gt_i] = best_iou
gt_cls[idx, best_a, gt_j, gt_i] = gt_cls_label
obj_mask[idx, best_a, gt_j, gt_i] = 1
MSELoss = nn.MSELoss(reduction='sum')
BCELoss = nn.BCELoss(reduction='sum')
CELoss = nn.CrossEntropyLoss(reduction='sum')
loss = dict()
# Xc, Yc, W, H loss calculation
loss['x'] = MSELoss(self.pred_tx * obj_mask, gt_tx * obj_mask)
loss['y'] = MSELoss(self.pred_ty * obj_mask, gt_ty * obj_mask)
loss['w'] = MSELoss(self.pred_tw * obj_mask, gt_tw * obj_mask)
loss['h'] = MSELoss(self.pred_th * obj_mask, gt_th * obj_mask)
# CLASS loss calculation
# loss['cls'] = BCELoss(pred_cls * obj_mask, cls_mask * obj_mask)
loss['cls'] = CELoss((self.pred_cls * obj_mask.unsqueeze(-1)).view(-1, self.num_classes),
(gt_cls * obj_mask).view(-1).long())
# OBJECTIVENESS loss calculation
# loss['conf'] = MSELoss(self.pred_conf * obj_mask * 5, gt_conf * obj_mask * 5) + \
# MSELoss(self.pred_conf * (1 - obj_mask), gt_conf * (1 - obj_mask))
lambda_noobj = 0.5
loss['conf'] = BCELoss(self.pred_conf * obj_mask, (gt_conf * obj_mask).detach()) + \
lambda_noobj * BCELoss(self.pred_conf * (1 - obj_mask), (gt_conf * (1 - obj_mask)).detach())
# pprint(loss)
return loss
# Non-Max Suppression
class NMSLayer(nn.Module):
"""
NMS layer which performs Non-maximum Suppression
1. Filter background
2. Get prediction with particular class
3. Sort by confidence
4. Suppress non-max prediction
"""
def __init__(self, conf_thresh=0.65, nms_thresh=0.55):
"""
Args:
- conf_thresh: (float) fore-ground confidence threshold
- nms_thresh: (float) nms threshold
"""
super(NMSLayer, self).__init__()
self.conf_thresh = conf_thresh
self.nms_thresh = nms_thresh
def forward(self, x):
"""
Args
x: (Tensor) prediction feature map, with size [bs, num_bboxes, 5 + nC]
Returns
predictions: (Tensor) prediction result with size [num_bboxes, [image_batch_idx, 4 offsets, p_obj, max_conf, cls_idx]]
"""
bs, _, _ = x.size()
predictions = torch.Tensor().cuda()
for idx in range(bs):
pred = x[idx]
try:
non_zero_pred = pred[pred[:, 4] > self.conf_thresh]
non_zero_pred[:, :4] = xywh2xyxy(non_zero_pred[:, :4])
max_score, max_idx = torch.max(non_zero_pred[:, 5:], 1)
max_idx = max_idx.float().unsqueeze(1)
max_score = max_score.float().unsqueeze(1)
non_zero_pred = torch.cat(
(non_zero_pred[:, :5], max_score, max_idx), 1)
classes = torch.unique(non_zero_pred[:, -1])
except Exception: # no object predicted
print('No object predicted')
continue
for cls in classes:
cls_pred = non_zero_pred[non_zero_pred[:, -1] == cls]
conf_sort_idx = torch.sort(cls_pred[:, 5], descending=True)[1]
cls_pred = cls_pred[conf_sort_idx]
max_preds = []
while cls_pred.size(0) > 0:
max_preds.append(cls_pred[0].unsqueeze(0))
ious = IoU(max_preds[-1], cls_pred)
cls_pred = cls_pred[ious < self.nms_thresh]
if len(max_preds) > 0:
max_preds = torch.cat(max_preds).data
batch_idx = max_preds.new(max_preds.size(0), 1).fill_(idx)
seq = (batch_idx, max_preds)
predictions = torch.cat(seq, 1) if predictions.size(
0) == 0 else torch.cat((predictions, torch.cat(seq, 1)))
return predictions
# =================================================================
# NETWORK
class DarkNet(nn.Module):
def __init__(self, cfg, reso=416, thr_obj=0.5, thr_nms=0.5):
super(DarkNet, self).__init__()
self.blocks = parse_cfg(cfg)
self.reso, self.thr_obj, self.thr_nms = reso, thr_obj, thr_nms
self.net_info, self.module_list = self.create_modules(self.blocks)
self.nms = NMSLayer(self.thr_obj, self.thr_nms)
def forward(self, x, y_true=None, CUDA=False):
modules = self.blocks[1:]
predictions = torch.Tensor().cuda() if CUDA else torch.Tensor()
outputs = dict() #We cache the outputs for the route layer
loss = dict()
for i, module in enumerate(modules):
if module["type"] == "convolutional" or module["type"] == "upsample":
x = self.module_list[i](x)
outputs[i] = x
elif module["type"] == "shortcut":
from_ = int(module["from"])
x = outputs[i-1] + outputs[i+from_]
outputs[i] = x
elif module["type"] == "route":
layers = module["layers"]
layers = [int(a) for a in layers]
if (layers[0]) > 0:
layers[0] = layers[0] - i
if len(layers) == 1:
x = outputs[i + (layers[0])]
else:
if (layers[1]) > 0:
layers[1] = layers[1] - i
map1 = outputs[i + layers[0]]
map2 = outputs[i + layers[1]]
x = torch.cat((map1, map2), 1)
outputs[i] = x
elif module["type"] == 'yolo':
if self.training == True:
loss_part = self.module_list[i][0](x, y_true)
for key, value in loss_part.items():
value = value
loss[key] = loss[key] + \
value if key in loss.keys() else value
loss['total'] = loss['total'] + \
value if 'total' in loss.keys() else value
else:
x = self.module_list[i][0](x)
predictions = x if len(predictions.size()) == 1 else torch.cat(
(predictions, x), 1)
outputs[i] = outputs[i-1] # skip
# Print the layer information
# print(i, module["type"], x.shape)
# return prediction result only when evaluated
if self.training == True:
return loss
else:
predictions = self.nms(predictions)
return predictions
def create_modules(self, blocks):
net_info = blocks[0] #Captures the information about the input and pre-processing
module_list = nn.ModuleList()
in_channels = 3
out_channels_list = []
for index, block in enumerate(blocks[1:]):
module = nn.Sequential()
# Convolutional Layer
if (block["type"] == "convolutional"):
activation = block["activation"]
try:
batch_normalize = int(block["batch_normalize"])
bias = False
except:
batch_normalize = 0
bias = True
out_channels = int(block["filters"])
kernel_size = int(block["size"])
padding = (kernel_size - 1) // 2 if int(block["pad"]) else 0
stride = int(block["stride"])
conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias = bias)
module.add_module("conv_{0}".format(index), conv)
if batch_normalize:
bn = nn.BatchNorm2d(out_channels)
module.add_module("batch_norm_{0}".format(index), bn)
if activation == "leaky":
activn = nn.LeakyReLU(0.1, inplace = True)
module.add_module("leaky_{0}".format(index), activn)
# Up Sample Layer
elif (block["type"] == "upsample"):
stride = int(block["stride"]) # = 2 in Yolov3
upsample = nn.Upsample(scale_factor = stride, mode = "nearest")
module.add_module("upsample_{}".format(index), upsample)
# Shortcut Layer
elif block["type"] == "shortcut":
shortcut = EmptyLayer()
module.add_module("shortcut_{}".format(index), shortcut)
# Route Layer
elif (block["type"] == "route"):
route = EmptyLayer()
module.add_module("route_{0}".format(index), route)
block["layers"] = block["layers"].split(',')
start = int(block["layers"][0])
if len(block['layers']) == 1:
start = int(block['layers'][0])
out_channels = out_channels_list[index + start]
elif len(block['layers']) == 2:
start = int(block['layers'][0])
end = int(block['layers'][1])
out_channels = out_channels_list[index + start] + out_channels_list[end]
# Yolo Layer
elif block["type"] == "yolo":
mask = block["mask"].split(",")
mask = [int(x) for x in mask]
anchors = block["anchors"].split(",")
anchors = [int(a) for a in anchors]
anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)]
anchors = [anchors[i] for i in mask]
num_classes = int(block['classes'])
ignore_thresh = float(block['ignore_thresh'])
prediction = YOLOLayer(anchors, num_classes, self.reso, ignore_thresh)
module.add_module("prediction_{}".format(index), prediction)
module_list.append(module)
in_channels = out_channels
out_channels_list.append(out_channels)
return (net_info, module_list)
def load_weights(self, path, cutoff=None):
"""Load darknet weights from disk.
YOLOv3 is fully convolutional, so only conv layers' weights will be loaded
Darknet's weights data are organized as
1. (optinoal) bn_biases => bn_weights => bn_mean => bn_var
1. (optional) conv_bias
2. conv_weights
Args
- path: (str) path to .weights file
- cutoff: (optinoal, int)
"""
fp = open(path, 'rb')
header = np.fromfile(fp, dtype=np.int32, count=5)
weights = np.fromfile(fp, dtype=np.float32)
fp.close()
header = torch.from_numpy(header)
ptr = 0
for i, module in enumerate(self.module_list):
block = self.blocks[i]
if cutoff is not None and i == cutoff:
print("Stop before", block['type'], "block (No.%d)" % (i+1))
break
if block['type'] == "convolutional":
batch_normalize = int(
block['batch_normalize']) if 'batch_normalize' in block else 0
conv = module[0]
if batch_normalize > 0:
bn = module[1]
num_bn_biases = bn.bias.numel()
bn_biases = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_biases = bn_biases.view_as(bn.bias.data)
bn.bias.data.copy_(bn_biases)
ptr += num_bn_biases
bn_weights = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_weights = bn_weights.view_as(bn.weight.data)
bn.weight.data.copy_(bn_weights)
ptr += num_bn_biases
bn_running_mean = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_running_mean = bn_running_mean.view_as(bn.running_mean)
bn.running_mean.copy_(bn_running_mean)
ptr += num_bn_biases
bn_running_var = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_running_var = bn_running_var.view_as(bn.running_var)
bn.running_var.copy_(bn_running_var)
ptr += num_bn_biases
else:
num_biases = conv.bias.numel()
conv_biases = torch.from_numpy(weights[ptr:ptr+num_biases])
conv_biases = conv_biases.view_as(conv.bias.data)
conv.bias.data.copy_(conv_biases)
ptr = ptr + num_biases
num_weights = conv.weight.numel()
conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])
conv_weights = conv_weights.view_as(conv.weight.data)
conv.weight.data.copy_(conv_weights)
ptr = ptr + num_weights | Python | 451 | 39.986694 | 128 | /yolo/darknet.py | 0.495888 | 0.485663 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import argparse
import sys
import yolo
import dataprep
def parse_arg():
parser = argparse.ArgumentParser(description='mmWave YOLOv3', add_help=True,
usage='''python . <action> [<args>]
Actions:
train Network training module
predict Object detection module
dataprep Data preprocessing module
'''
)
parser.add_argument('Action', type=str, help='Action to run')
return parser.parse_args(sys.argv[1:2])
args = parse_arg()
if args.Action == 'train' or args.Action == 'predict':
yolo.main(args)
elif args.Action == 'dataprep':
dataprep.main()
else:
print('Unknown action. Check "python . --help"')
| Python | 28 | 24.107143 | 80 | /__main__.py | 0.624467 | 0.620199 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import torch
import torch.utils.data
from torch.utils.data.dataloader import default_collate
# from torchvision import transforms
import os
# import random
import numpy as np
from PIL import Image
# anchors_wh = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
# [59, 119], [116, 90], [156, 198], [373, 326]],
# np.float32) / 416
class MmwaveDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, data_size = 0, transforms = None):
files = sorted(os.listdir(data_dir))
self.files = [f"{data_dir}/{x}" for x in files]
if data_size < 0 or data_size > len(files):
assert("Data size should be between 0 to number of files in the dataset")
if data_size == 0:
data_size = len(files)
self.data_size = data_size
self.transforms = transforms
def __len__(self):
return self.data_size
def __getitem__(self, idx):
image_path = self.files[idx]
image = Image.open(image_path)
img_w, img_h = image.size
image = self.preProcessImage(image)
labels = [] # to make it array of bbs (for multiple bbs in the future)
labels_str = image_path.split("_")[-1]
if "[[" in labels_str:
labels_str = labels_str.split('[[')[1].split(']]')[0].split('],[')
labels = np.zeros((4, 5))
for i, l in enumerate(labels_str):
label = np.zeros(5)
label[:4] = np.array([int(a) for a in l.split(',')]) # [xc, yc, w, h]
# Normalizing labels
label[0] /= img_w #Xcenter
label[1] /= img_h #Ycenter
label[2] /= img_w #Width
label[3] /= img_h #Height
labels[i, :] = label
else:
labels_str = labels_str.split('[')[1].split(']')[0].split(',') # get the bb info from the filename
labels = np.zeros((1, 5))
labels[0, :4] = np.array([int(a) for a in labels_str]) # [xc, yc, w, h]
if np.any(labels[0, :4] == 0):
return image, None
# Normalizing labels
labels[0, 0] /= img_w #Xcenter
labels[0, 1] /= img_h #Ycenter
labels[0, 2] /= img_w #Width
labels[0, 3] /= img_h #Height
# labels[0, 4] = 0 # class label (0 = person)
# print(torch.any(torch.isfinite(image) == False), labels)
return image_path, image, labels
#Image custom preprocessing if required
def preProcessImage(self, image):
image = image.convert('RGB')
if self.transforms:
return self.transforms(image)
else:
image = np.array(image)
image = image.transpose(2,1,0)
return image.astype(np.float32)
def collate(batch):
batch = list(filter(lambda x:x[1] is not None, batch))
return default_collate(batch) # Use the default method to splice the filtered batch data
def getDataLoaders(data_dir, transforms, train_split=0, batch_size=8, \
num_workers=2, collate_fn=collate, random_seed=0):
if train_split < 0 or train_split > 1:
raise Exception(f"data_loader | Split ({train_split}) coefficient should be 0 < x < 1")
dataset = MmwaveDataset(data_dir=data_dir, transforms=transforms)
shuffle = True if random_seed != 0 else False
# Single Set
if train_split == 0 or train_split == 1:
return None, torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, collate_fn = collate_fn)
# Generate a fixed seed
generator = torch.Generator()
if random_seed != 0:
generator.manual_seed(random_seed)
train_size = int(train_split * len(dataset))
test_size = len(dataset) - train_size
trainset, testset = torch.utils.data.random_split(dataset, [train_size, test_size], generator=generator)
# Train and Validation sets
return torch.utils.data.DataLoader(trainset, batch_size=batch_size, \
shuffle=shuffle, num_workers=2, collate_fn = collate_fn), \
torch.utils.data.DataLoader(testset, batch_size=batch_size, \
shuffle=shuffle, num_workers=2, collate_fn = collate_fn)
| Python | 116 | 36.387932 | 110 | /yolo/dataset.py | 0.568826 | 0.545077 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import gc
from .train import train
from .predict import predict
def main(args):
gc.collect()
if args.Action == 'train':
train()
elif args.Action == 'predict':
predict()
gc.collect()
| Python | 12 | 17 | 34 | /yolo/__init__.py | 0.603687 | 0.603687 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import torch
import torch.nn as nn
# import torch.nn.functional as F
import torch.optim as optim
# import torchvision
import torchvision.transforms as transforms
# import os, pickle, random
import time, sys
import numpy as np
# from PIL import Image
import argparse
from .darknet import DarkNet
from .dataset import *
from .util import *
def parse_arg():
parser = argparse.ArgumentParser(description='mmWaveYoLov3 Training module', add_help=True)
parser.add_argument('--cfg', type=str, default='yolov3micro',
help="Name of the network config")
parser.add_argument('--pathin', type=str, default='trainset',
help="Input dataset name")
parser.add_argument('--datasplit', type=float, default=0.8,
help="Dataset split percentage (def: 0.8 (80 (train):20 (validation))")
parser.add_argument('--seed', type=float, default=42,
help="Seed for the random shuffle (default: 42, 0 for no shuffling)")
parser.add_argument('--bs', type=int, default=8,
help="Batch size (default: 8, 0 for single batch)")
parser.add_argument('--ckpt', type=str, default='0.0',
help="Checkpoint name as <'epoch'.'iteration'>")
parser.add_argument('--ep', type=int, default=5,
help="Total epoch number (default: 5)")
parser.add_argument('--lr', type=float, default=1e-5,
help="Learning rate (default: 1e-5)")
parser.add_argument('--reso', type=int, default=416,
help="Input image resolution (default: 416)")
parser.add_argument('--v', type=int, default=0,
help="Verbose (0 minimal (default), 1 normal, 2 all")
return parser.parse_args(sys.argv[2:])
def train():
torch.cuda.empty_cache()
# CONSTANTS
args = parse_arg()
pathcfg = f"cfg/{args.cfg}.cfg"
pathin = f"dataset/{args.pathin}/final"
num_workers = 2
# NETWORK
darknet = DarkNet(pathcfg, args.reso)
pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad)
print('# of params: ', pytorch_total_params)
if args.v > 0:
print(darknet.module_list)
# LOAD A CHECKPOINT!!!
start_epoch, start_iteration = [0, 0]
tlosses, vlosses = [], []
optimizer, scheduler = None, None
start_epoch, start_iteration = [int(x) for x in args.ckpt.split('.')]
if start_epoch != 0 and start_epoch != 0:
start_epoch, start_iteration, state_dict, \
tlosses, vlosses, \
optimizer, scheduler = load_checkpoint(
f'save/checkpoints/',
int(start_epoch),
int(start_iteration)
)
darknet.load_state_dict(state_dict)
# ====================================================
# OPTIMIZER & HYPERPARAMETERS
if optimizer == None:
# optimizer = optim.SGD(filter(lambda p: p.requires_grad, darknet.parameters()), \
# lr=args.lr, momentum=0.9, weight_decay=5e-4, nesterov=True)
optimizer = optim.Adam(filter(lambda p: p.requires_grad, darknet.parameters()), \
lr=args.lr, betas=[0.9,0.999], eps=1e-8, weight_decay=0, amsgrad=False)
if scheduler == None:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
# IMAGE PREPROCESSING!!!
transform = transforms.Compose([
# transforms.RandomResizedCrop(size=args.reso, interpolation=3),
transforms.Resize(size=(args.reso, args.reso), interpolation=3),
transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2),
transforms.RandomVerticalFlip(),
transforms.ToTensor()
])
# ====================================================
# Train and Validation data allocation
trainloader, validloader = getDataLoaders(pathin, transform, \
train_split=args.datasplit, batch_size=args.bs, \
num_workers=num_workers, collate_fn=collate, random_seed=args.seed)
# ====================================================
# Use GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1: # Use Multi GPU if available
darknet = nn.DataParallel(darknet)
darknet.to(device) # Put the network on device
if args.v > 0:
print(next(darknet.parameters()).device)
# TRAIN
print(f'[LOG] TRAIN | Training set: {len(trainloader.dataset)}')
print(f'[LOG] TRAIN | Validation set: {len(validloader.dataset)}')
print(f'[LOG] TRAIN | Starting to train from epoch {start_epoch} iteration {start_iteration}')
if start_epoch > args.ep:
print(f'[ERR] TRAIN | Total epochs ({args.ep}) is less then current epoch ({start_epoch})')
return
for epoch in range(start_epoch, args.ep):
print(f'[LOG] TRAIN | Starting Epoch #{epoch+1}')
darknet.train() # set network to training mode
tloss, vloss = [], []
start = time.time()
for batch_idx, (_, inputs, targets) in enumerate(trainloader):
optimizer.zero_grad() # clear the grads from prev passes
inputs, targets = inputs.to(device), targets.to(device) # Images, Labels
outputs = darknet(inputs, targets, device) # Loss
outputs['total'].backward() # Gradient calculations
tloss.append(outputs['total'].item())
optimizer.step()
end = time.time()
# Latest iteration!
if args.v == 1:
print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ')
elif args.v == 2:
print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ' \
f'w: {outputs["w"].item():.2f} h: {outputs["h"].item():.2f} ' \
f'cls: {outputs["cls"].item():.2f} ' \
f'conf: {outputs["conf"].item()}')
if (batch_idx % 100) == 99:
print(f'[LOG] TRAIN | Batch #{batch_idx+1}\
Loss: {np.mean(tloss)}\
Time: {end - start}s')
start = time.time()
# Save train loss for the epoch
tlosses.append(np.mean(tloss))
scheduler.step()
# VALIDATION
with torch.no_grad():
for batch_idx, (_, inputs, targets) in enumerate(validloader):
inputs, targets = inputs.to(device), targets.to(device)
voutputs = darknet(inputs, targets)
vloss.append(voutputs['total'].item())
# Validation loss!
print(f'[LOG] VALID | Epoch #{epoch+1} \
Loss: {np.mean(vloss)}')
# Save valid loss for the epoch
vlosses.append(np.mean(vloss))
# ====================================================
if (epoch % 10) == 9:
save_checkpoint(f'save/checkpoints/', epoch+1, 0, {
'epoch': epoch+1,
'iteration': 0,
'state_dict': darknet.state_dict(),
'tlosses': tlosses,
'vlosses': vlosses,
'optimizer': optimizer,
'scheduler': scheduler
})
plot_losses(tlosses, vlosses, f'save/losses')
save_checkpoint(f'save/checkpoints/', epoch+1, 0, {
'epoch': epoch+1,
'iteration': 0,
'state_dict': darknet.state_dict(),
'tlosses': tlosses,
'vlosses': vlosses,
'optimizer': optimizer,
'scheduler': scheduler
})
plot_losses(tlosses, vlosses, f'save/losses')
| Python | 196 | 37.397961 | 99 | /yolo/train.py | 0.568088 | 0.554936 |
enverbashirov/YOLOv3-mMwave-Radar | refs/heads/master | import matplotlib.animation as animation
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
class KalmanTracker:
def __init__(self, id_, s0=None, disable_rejection_check=False):
# Filter-related parameters
self.dt = 66.667e-3 # T_int of the radar TX
# state transition matrix
self.F = np.kron(np.eye(2), np.array([[1, self.dt], [0, 1]]))
# # state-acceleration matrix
self.G = np.array([0.5*(self.dt**2), self.dt]).reshape(2, 1)
# # observation matrix
self.H = np.array([[1, 0, 0, 0],
[0, 0, 1, 0]])
# measurement covariance matrix
self.R = np.array([[0.5, 0], [0, 0.5]]) # [wagner2017radar]
# initial state covariance
self.P = 0.2*np.eye(4)
# state noise variance
self.sigma_a = 8 # [wagner2017radar]
# state noise covariance
self.Q = np.kron(np.eye(2), np.matmul(self.G, self.G.T)*self.sigma_a**2)
self.n = self.F.shape[1]
self.m = self.H.shape[1]
# initial state
self.s = np.zeros((self.n, 1)) if s0 is None else s0
self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)
self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)
self.REJECT_THR = 4.605
self.disable_rejection_check = disable_rejection_check
#########################################################
# Tracker-related parameters
self.misses_number = 0
self.hits = 0
self.id = id_
self.box = np.array([])
self.state_memory = []
self.identity_label = 'UNK' # initialize as unknown cluster
self.id_dict = {-1: 'UNK', 0: 'S1', 1: 'S2', 2:'S3', 3:'S4'}
# self.id_dict = {-1: 'UNK', 0: 'JP', 1: 'FM', 2:'GP', 3:'RF'}
def transform_obs(self, z):
z_prime = np.array([z[0]*np.cos(z[1]), z[0]*np.sin(z[1])]).reshape(-1, 1)
return z_prime
def reject_obs(self, i, S):
chi_squared = np.matmul(np.matmul(i.T, np.linalg.inv(S)), i)[0, 0]
return chi_squared >= self.REJECT_THR
def predict(self):
# a_x = np.random.normal(0, self.sigma_a)
# a_y = np.random.normal(0, self.sigma_a)
self.s = np.matmul(self.F, self.s)
# check that x has the correct shape
assert self.s.shape == (self.n, 1)
self.P = np.matmul(np.matmul(self.F, self.P), self.F.T) + self.Q
self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)
self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)
return self.s, self.xy
def update(self, z):
z = self.transform_obs(z)
# innovation
y = z - np.matmul(self.H, self.s)
S = np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R
if (not self.reject_obs(y, S)) or self.disable_rejection_check:
K = np.matmul(np.matmul(self.P, self.H.T), np.linalg.inv(S))
self.s = self.s + np.matmul(K, y)
assert self.s.shape == (self.n, 1)
self.P = np.matmul(np.eye(self.n) - np.matmul(K, self.H), self.P)
self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)
self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)
self.state_memory.append(self.xy)
return self.s, self.xy
else:
self.state_memory.append(self.xy)
return self.s, self.xy
def get_S(self):
return np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R
@staticmethod
def get_mahalanobis_distance(x, C):
# returns Mahalanobis distance given the differece vector x and covariance C
return np.matmul(np.matmul(x.T, np.linalg.inv(C)), x)[0, 0]
@staticmethod
def hungarian_assignment(score_matrix):
# call the scipy implementation of Hungarian alg.
det_idx, tr_idx = sp.optimize.linear_sum_assignment(score_matrix)
unmatched, undetected = [], []
for t in range(score_matrix.shape[1]):
if t not in tr_idx:
undetected.append(t)
for d in range(score_matrix.shape[0]):
if d not in det_idx:
unmatched.append(d)
matches = []
for d, t in zip(det_idx, tr_idx):
matches.append(np.array([d, t]).reshape(1, 2))
if len(matches) == 0:
matches = np.empty((0, 2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(undetected), np.array(unmatched)
| Python | 108 | 43.111111 | 127 | /dataprep/kalman_tracker.py | 0.529134 | 0.502462 |
michelequinto/xUDP | refs/heads/master | files = [ "xaui_init.vhd",
"mdio/mdio.v",
"mdio/mdio_ctrl.vhd",
"vsc8486_init.vhd",
"clk_wiz_v3_3_0.vhd",
"xUDP_top.vhd",
__import__('os').path.relpath( __import__('os').environ.get('XILINX') ) + "/verilog/src/glbl.v" ]
modules = { "local" : [ "../../../rtl/vhdl/ipcores/xilinx/xaui"]}
# "../../../rtl/verilog/ipcores/xge_mac" ]}
| Python | 10 | 39.700001 | 107 | /syn/xilinx/src/Manifest.py | 0.449074 | 0.43287 |
michelequinto/xUDP | refs/heads/master | action = "simulation"
include_dirs = [ "../../environment", "../../sequences/"]
vlog_opt = '+incdir+' + \
__import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv+' + \
__import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv/mvc_base+' + \
__import__('os').environ.get('QUESTA_MVC_HOME') + '/include+' + \
__import__('os').environ.get('QUESTA_MVC_HOME') + '/examples/ethernet/common+' + \
__import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv/ethernet/ '
top_module = "top"
sim_tool = "modelsim"
files = ["src/genericTest.sv"]
modules = { "local" : [ "../../../../../syn/xilinx/src",
"../../../../../rtl/verilog/ipcores/xge_mac/" ] }
| Python | 17 | 40.470589 | 84 | /bench/sv/FullDesign/tests/genericTest/Manifest.py | 0.561702 | 0.561702 |
michelequinto/xUDP | refs/heads/master | files = [ "./xaui_v10_4.vhd",
"./xaui_v10_4/simulation/demo_tb.vhd",
"./xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper_gtx.vhd",
"./xaui_v10_4/example_design/xaui_v10_4_example_design.vhd",
"./xaui_v10_4/example_design/xaui_v10_4_tx_sync.vhd",
"./xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper.vhd",
"./xaui_v10_4/example_design/xaui_v10_4_block.vhd",
"./xaui_v10_4/example_design/xaui_v10_4_chanbond_monitor.vhd" ]
| Python | 8 | 60.25 | 73 | /rtl/vhdl/ipcores/xilinx/xaui/Manifest.py | 0.606122 | 0.520408 |
michelequinto/xUDP | refs/heads/master | files = [ "utilities.vhd",
"arp_types.vhd",
"axi_types.vhd",
"ipv4_types.vhd",
"xUDP_Common_pkg.vhdl",
"axi_tx_crossbar.vhd",
"arp_REQ.vhd",
"arp_RX.vhd",
"arp_STORE_br.vhd",
"arp_SYNC.vhd",
"arp_TX.vhd",
"arp.vhd",
"IPv4_RX.vhd",
"IPv4_TX.vhd",
"IPv4.vhd",
"IPv4_Complete_nomac.vhd",
"UDP_RX.vhd",
"UDP_TX.vhd",
"UDP_Complete_nomac.vhd",
"xge_mac_axi.vhd"]
| Python | 20 | 26.4 | 36 | /rtl/vhdl/Manifest.py | 0.419708 | 0.410584 |
michelequinto/xUDP | refs/heads/master | action = "simulation"
include_dirs = ["./include"]
#vlog_opt = '+incdir+' + \
#"../../../../../rtl/verilog/ipcores/xge_mac/include"
#__import__('os').path.dirname(__import__('os').path.abspath(__import__('inspect').getfile(__import__('inspect').currentframe())))
#os.path.abspath(__import__('inspect').getfile(inspect.currentframe())))
files = [ "./include/utils.v",
"./include/CRC32_D64.v",
"./include/CRC32_D8.v",
"./verilog/tx_dequeue.v",
"./verilog/sync_clk_core.v",
"./verilog/generic_fifo.v",
"./verilog/stats.v",
"./verilog/rx_hold_fifo.v",
"./verilog/tx_enqueue.v",
"./verilog/rx_dequeue.v",
"./verilog/sync_clk_wb.v",
"./verilog/tx_data_fifo.v",
"./verilog/fault_sm.v",
"./verilog/generic_mem_small.v",
"./verilog/wishbone_if.v",
"./verilog/generic_mem_medium.v",
"./verilog/meta_sync_single.v",
"./verilog/stats_sm.v",
"./verilog/rx_stats_fifo.v",
"./verilog/tx_hold_fifo.v",
"./verilog/rx_data_fifo.v",
"./verilog/xge_mac.v",
"./verilog/rx_enqueue.v",
"./verilog/generic_fifo_ctrl.v",
"./verilog/sync_clk_xgmii_tx.v",
"./verilog/tx_stats_fifo.v",
"./verilog/meta_sync.v" ]
| Python | 37 | 35.513512 | 130 | /rtl/verilog/ipcores/xge_mac/Manifest.py | 0.517012 | 0.511834 |
RoboBrainCode/Backend | refs/heads/master | from django.http import HttpResponse
from feed.models import BrainFeeds, ViewerFeed, GraphFeedback
import json
import numpy as np
from django.core import serializers
import dateutil.parser
from django.views.decorators.csrf import ensure_csrf_cookie
from django.db.transaction import commit_on_success
# This is a temporary function. It will be later moved to learning_plugins
def save_graph_feedback(request):
_id_node = (request.GET.get('id','-1')) # default k=10
_feedback_type = request.GET.get('feedback_type','')
_node_handle = request.GET.get('node_handle','')
_action_type = request.GET.get('action_type','')
graph_feedback = GraphFeedback(
id_node = _id_node,
feedback_type = _feedback_type,
node_handle = _node_handle,
action_type = _action_type
)
graph_feedback.save()
return HttpResponse(json.dumps(graph_feedback.to_json()), content_type="application/json")
# Returns k most recent feeds from BrainFeed table.
def return_top_k_feeds(request):
# Number of feeds required
top_k = int(request.GET.get('k','10')) # default k=10
max_len = ViewerFeed.objects.count()
upper_limit = min(max_len, top_k)
feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[:upper_limit])
brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids)
# Reordering brainfeeds from the DB in order of feed_ids in O(n)
# s.t. feed_ids == [bf.id for bf in brainfeeds]
feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))}
brainfeeds = [0] * len(feed_ids)
for bf in list(brainfeeds_db):
brainfeeds[feed_map_order[bf.id]] = bf
# Deleting entries from brainfeeds where brainfeeds == 0
delete_entries = []
for bf in brainfeeds:
if bf == 0:
delete_entries.append(0)
for bf in delete_entries:
brainfeeds.remove(bf)
update_scores_top_k(brainfeeds)
json_feeds = [feed.to_json() for feed in brainfeeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# This function allows infinite scrolling.
def infinite_scrolling(request):
# Feeds already present
current_feeds = int(request.GET.get('cur','10')) # default cur=10
# Number of extra feeds required
extra_feeds = int(request.GET.get('k','10')) # default k=10
max_len = ViewerFeed.objects.count()
upper_limit = min(max_len, current_feeds + extra_feeds)
feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[current_feeds:upper_limit])
brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids)
# Reordering brainfeeds from the DB in order of feed_ids in O(n)
# s.t. feed_ids == [bf.id for bf in brainfeeds]
feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))}
brainfeeds = [0] * len(feed_ids)
for bf in list(brainfeeds_db):
brainfeeds[feed_map_order[bf.id]] = bf
# Deleting entries from brainfeeds where brainfeeds == 0
delete_entries = []
for bf in brainfeeds:
if bf == 0:
delete_entries.append(0)
for bf in delete_entries:
brainfeeds.remove(bf)
update_scores_scroll(brainfeeds, current_feeds, extra_feeds)
json_feeds = [feed.to_json() for feed in brainfeeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
@commit_on_success
def update_scores_top_k(brainfeeds):
for feeds in brainfeeds:
feeds.update_score = True
feeds.log_normalized_feed_show += 1.0
feeds.save()
@commit_on_success
def update_scores_scroll(brainfeeds, current_feeds, extra_feeds):
page_number = current_feeds/max(1.0,extra_feeds) + 1.0
for feeds in brainfeeds:
feeds.update_score = True
feeds.log_normalized_feed_show += np.log10(1.0+page_number)
feeds.save()
# Filters feeds using the hash word
def filter_feeds_with_hashtags(request):
hashword = request.GET.get('hashword')
# Number of extra feeds required
k = int(request.GET.get('k','10')) # default k=10
if not hashword:
error_response = {
'Error': 'hashword not provided.'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(hashtags__contains=hashword).order_by('-created_at')[:k]
json_feeds = [feed.to_json() for feed in brain_feeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# Filters feeds with types
def filter_feeds_with_type(request):
feedtype = request.GET.get('type')
print(feedtype)
# Number of extra feeds required
k = int(request.GET.get('k','10')) # default k=10
if not feedtype:
error_response = {
'Error': 'type not provided.'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(source_text=feedtype).order_by('-created_at')[:k]
json_feeds = [feed.to_json() for feed in brain_feeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# Return feeds created after datetime. Input time should be in ISO string format. It is them parsed to UTC format
def return_feeds_since(request):
time_since = dateutil.parser.parse(request.GET.get('datetime'))
# Number of extra feeds required
k = int(request.GET.get('k','10')) # default k=10
if not time_since:
error_response = {
'Error': 'time_since not provided.'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(created_at__gte=time_since).order_by('-created_at')[:k]
json_feeds = [feed.to_json() for feed in brain_feeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# Records upvotes for a feed
@ensure_csrf_cookie
def upvotes_recorder(request):
if request.method == 'GET':
return HttpResponse('Ok')
elif request.method == 'POST':
payload = json.loads(request.body)
feedid = payload['feedid']
vote_dir = payload['vote']
change = payload['change']
if not feedid:
error_response = {
'Error': 'No feedid provided'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
if not vote_dir == -1 and not vote_dir == 1:
error_response = {
'Error': 'voteid {0} not provided. Can only be 1 or -1'.format(vote_dir)
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feed = BrainFeeds.objects.get(id=feedid)
votes = {}
if vote_dir == 1:
brain_feed.upvotes += 1
if change:
brain_feed.downvotes -= 1
if vote_dir == -1:
brain_feed.downvotes += 1
if change:
brain_feed.upvotes -= 1
votes = {
'upvotes': max(brain_feed.upvotes, 0),
'downvotes': max(brain_feed.downvotes, 0)
}
brain_feed.save()
return HttpResponse(json.dumps(votes), content_type='application/json')
| Python | 207 | 34.642513 | 120 | /feed/views.py | 0.647696 | 0.639837 |
RoboBrainCode/Backend | refs/heads/master | from django.forms import widgets
from rest_framework import serializers
from feed.models import JsonFeeds
from djangotoolbox.fields import ListField
import drf_compound_fields.fields as drf
from datetime import datetime
class TagFieldS(serializers.Serializer):
media = serializers.CharField(required=False)
class FeedSerializer(serializers.Serializer):
pk = serializers.Field() # Note: `Field` is an untyped read-only field.
feedtype = serializers.CharField(required=False)
text = serializers.CharField(required=False)
source_text = serializers.CharField(required=False)
source_url = serializers.CharField(required=False)
hashtags = serializers.CharField(required=False)
created_at = serializers.DateTimeField(required=False)
upvotes = serializers.IntegerField(required=False)
media = drf.ListField(serializers.CharField(),required=False)# serializers.CharField(required=False,many=True)
mediamap = drf.ListField(serializers.CharField(),required=False)
mediatype = drf.ListField(serializers.CharField(),required=False)
keywords = drf.ListField(serializers.CharField(),required=False)
graphStructure = drf.ListField(serializers.CharField(),required=False)
mediashow = drf.ListField(serializers.CharField(),required=False)
username = serializers.CharField(required=False)
def restore_object(self, attrs, instance=None):
"""
Create or update a new snippet instance, given a dictionary
of deserialized field values.
Note that if we don't define this method, then deserializing
data will simply return a dictionary of items.
"""
if instance:
# Update existing instance
#instance.feedtype = attrs.get('feedtype', instance.feedtype)
#instance.code = attrs.get('code', instance.code)
#instance.linenos = attrs.get('linenos', instance.linenos)
#instance.language = attrs.get('language', instance.language)
#instance.style = attrs.get('style', instance.style)
return instance
# Create new instance
attrs['created_at']=datetime.now()
return JsonFeeds(**attrs)
| Python | 49 | 44.265305 | 114 | /rest_api/serializer.py | 0.709197 | 0.709197 |
RoboBrainCode/Backend | refs/heads/master | from django.http import HttpResponse
import json
from django.contrib.auth.models import User
from django.views.decorators.csrf import ensure_csrf_cookie
from django import forms
from django.contrib.auth import login, logout
from django.contrib.auth import authenticate
from base64 import b64decode
@ensure_csrf_cookie
def create_user_rb(request):
if request.method == 'GET':
return HttpResponse('Ok')
elif request.method == 'POST':
payload = json.loads(request.body)
username = payload['username']
email = payload['email']
password = payload['password']
if email and User.objects.filter(email=email).exclude(username=username).count():
return HttpResponse('This email address is already in use! Try logging in.', status=401)
if email and User.objects.filter(email=email, username=username).count():
return HttpResponse('This account already exists! Try logging in.', status=401)
user = User.objects.create_user(username, email, password)
user.save()
return HttpResponse('Ok')
@ensure_csrf_cookie
def login_rb(request):
if request.user.is_authenticated():
user = request.user
user_data = {
'id': user.id,
'username': user.username,
'email': user.email,
'loggedin': 'True'
};
return HttpResponse(json.dumps(user_data), content_type='application/json')
if request.method == 'GET':
return HttpResponse('Ok')
elif request.method == 'POST':
decodedCredentials = b64decode(request.body)
if not ':' in decodedCredentials:
return HttpResponse('Not logged in', status=401)
email, password = decodedCredentials.split(':')
user = authenticateEmail(email, password)
if not user:
return HttpResponse('Invalid Credentials', status=401)
user = authenticate(username=user.username, password=password)
if not user:
return HttpResponse('Invalid Credentials', status=401)
login(request, user)
user_data = {
'id': user.id,
'username': user.username,
'email': user.email
};
return HttpResponse(json.dumps(user_data), content_type='application/json')
def authenticateEmail(email=None, password=None):
try:
user = User.objects.get(email=email)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def logout_rb(request):
logout(request)
return HttpResponse('Logged Out')
| Python | 71 | 33 | 94 | /auth/auth.py | 0.699254 | 0.690555 |
RoboBrainCode/Backend | refs/heads/master | import ConfigParser
import pymongo as pm
from datetime import datetime
import numpy as np
import importlib
import sys
sys.path.insert(0,'/var/www/Backend/Backend/')
def readConfigFile():
"""
Reading the setting file to use.
Different setting files are used on Production and Test robo brain
"""
global setfile
config = ConfigParser.ConfigParser()
config.read('/tmp/backend_uwsgi_setting')
env = config.get('uwsgi','env')
setting_file_name = env.strip().split('.')[1]
setfile = importlib.import_module(setting_file_name)
def establishConnection():
"""
Establishes connection to remote db
"""
global brain_feeds, viewer_feeds
client = pm.MongoClient(host,port)
db = client[dbname]
brain_feeds = db['brain_feeds']
viewer_feeds = db['viewer_feeds']
def viewerFeedsUpdate():
"""
Sorts Brain Feeds on Basis of score and pushes them to ViewerFeeds table
"""
feeds_ordered = brain_feeds.find().sort('score',pm.DESCENDING)
overall_counter = 0
feeds_to_push = []
first_time = True
for feeds in feeds_ordered:
try:
new_feed = {}
new_feed['_id'] = overall_counter
new_feed['feedid'] = feeds['_id'].__str__()
feeds_to_push.append(new_feed)
overall_counter += 1
print "{0} {1} {2}".format(overall_counter,feeds['score'],feeds['source_url'])
if overall_counter % 100 == 0:
if first_time:
viewer_feeds.drop()
first_time = False
viewer_feeds.insert(feeds_to_push)
feeds_to_push = []
except:
print "**************skipping*************"
def viewerFeedsUpdate_deprecated():
"""
DEPRECATED
Equally represent each project
"""
different_projects = brain_feeds.distinct('source_url')
different_projects = sorted(different_projects,key=len)
feeds_each_project = {}
feeds_count = {}
for url in different_projects:
feeds_each_project[url] = brain_feeds.find({'source_url':url},{'created_at':1}).sort('created_at',pm.DESCENDING)
feeds_count[url] = feeds_each_project[url].count()
feeds_to_push = []
overall_counter = 0
level = 0
first_time = True
while True:
toBreak = True
remaining_projects = []
for url in different_projects:
if feeds_count[url] > level:
print url
new_feed = {}
new_feed['_id'] = overall_counter
new_feed['feedid'] = feeds_each_project[url][level]['_id'].__str__()
feeds_to_push.append(new_feed)
overall_counter += 1
remaining_projects.append(url)
toBreak = False
if overall_counter % 100 == 0:
if first_time:
viewer_feeds.drop()
first_time = False
viewer_feeds.insert(feeds_to_push)
feeds_to_push = []
different_projects = remaining_projects
if toBreak:
break
level += 1
if __name__=="__main__":
global host, dbname, port, setfile, brain_feeds, viewer_feeds
# Reading the setting file for db address
readConfigFile()
host = setfile.DATABASES['default']['HOST']
dbname = setfile.DATABASES['default']['NAME']
port = int(setfile.DATABASES['default']['PORT'])
# Extablishing connection to remote db
establishConnection()
viewerFeedsUpdate()
| Python | 116 | 30.25 | 121 | /UpdateViewerFeeds/updateViewerFeed.py | 0.566345 | 0.560828 |
RoboBrainCode/Backend | refs/heads/master | # Create your views here.
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from feed.models import JsonFeeds
from rest_api.serializer import FeedSerializer
from datetime import datetime
from rest_framework import permissions
@api_view(['GET', 'POST'])
def feed_list(request):
#List all snippets, or create a new snippet.
if request.method == 'GET':
feeds = JsonFeeds.objects.all()[:25]
serializer = FeedSerializer(feeds, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = FeedSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| Python | 24 | 36.125 | 78 | /rest_api/views.py | 0.713647 | 0.704698 |
RoboBrainCode/Backend | refs/heads/master | from django.db import models
from djangotoolbox.fields import ListField
from datetime import datetime
from django.db.models.signals import post_save
from queue_util import add_feed_to_queue
#from feed.models import BrainFeeds
class GraphFeedback(models.Model):
id_node = models.TextField()
feedback_type = models.TextField()
node_handle = models.TextField()
action_type = models.TextField()
def to_json(self):
return {"_id":self.id,
"id_node":self.id_node,
"feedback_type":self.feedback_type,
"node_handle":self.node_handle,
"action_type":self.action_type
}
class Meta:
db_table = "graph_feedback"
class BrainFeeds(models.Model):
toshow = models.BooleanField(default=True)
feedtype = models.TextField() #originally feedtype -> type
text = models.TextField()
source_text = models.TextField()
source_url = models.TextField(db_index=True)
meta = {'indexes':['source_url']}
media = ListField()
mediatype = ListField()
created_at = models.DateTimeField(default=datetime.now())
hashtags = models.TextField(db_index=True)
meta = {'indexes':['hashtags']}
upvotes = models.IntegerField(default=0)
downvotes = models.IntegerField(default=0)
jsonfeed_id = models.TextField()
username = models.TextField()
score = models.FloatField(default=0.0,db_index=True)
meta = {'indexes':['score']}
update_score = models.BooleanField(default=True,db_index=True)
meta = {'indexes':['update_score']}
log_normalized_feed_show = models.FloatField(default=1.0)
def to_json(self):
return {"_id":self.id,
"toshow":self.toshow,
"feedtype":self.feedtype,
"text":self.text,
"source_text":self.source_text,
"source_url":self.source_url,
"media":self.media,
"mediatype":self.mediatype,
"created_at":self.created_at.isoformat(),
"hashtags":self.hashtags,
"upvotes":self.upvotes,
"downvotes":self.downvotes,
"jsonfeed_id":self.jsonfeed_id,
"username":self.username,
"score":self.score,
"log_normalized_feed_show":self.log_normalized_feed_show,
"update_score":self.update_score
}
class Meta:
db_table = 'brain_feeds'
get_latest_by = 'created_at'
class JsonFeeds(models.Model):
feedtype = models.TextField() #originally feedtype -> type
text = models.TextField()
source_text = models.TextField()
source_url = models.TextField()
mediashow = ListField()
media = ListField()
mediatype = ListField()
mediamap = ListField()
keywords = ListField()
graphStructure = ListField()
created_at = models.DateTimeField()
hashtags = models.TextField(default=datetime.now, blank=True)
meta = {'indexes':['hashtags']}
upvotes = models.IntegerField(default=0)
downvotes = models.IntegerField(default=0)
username = models.TextField()
def to_json(self):
return {"_id":self.id,
"feedtype":self.feedtype,
"text":self.text,
"source_text":self.source_text,
"source_url":self.source_url,
"mediashow":self.mediashow,
"media":self.media,
"mediatype":self.mediatype,
"mediamap":self.mediamap,
"keywords":self.keywords,
"graphStructure":self.graphStructure,
"created_at":self.created_at.isoformat(),
"hashtags":self.hashtags,
"upvotes":self.upvotes,
"downvotes":self.downvotes,
"username":self.username
}
class Meta:
db_table = 'json_feeds'
def postSaveJson(**kwargs):
instance = kwargs.get('instance')
print "Post Saving JsonFeed: ", instance.to_json()
add_feed_to_queue(instance.to_json())
#Saving JsonFeed to BrainFeed
brain_feed = BrainFeeds(
feedtype=instance.feedtype,
text=instance.text,
source_text=instance.source_text,
source_url=instance.source_url,
hashtags=instance.hashtags,
jsonfeed_id=instance.id,
username=instance.username
)
media = []
mediatype = []
for mediashow,_media,_mediatype in zip(instance.mediashow,instance.media,instance.mediatype):
if mediashow.lower() == 'true':
media.append(_media)
mediatype.append(_mediatype)
brain_feed.media = media
brain_feed.mediatype = mediatype
brain_feed.save()
#Saving viewer feed
"""
numitem = ViewerFeed.objects.all().count()
viewer_feed = ViewerFeed(
id = numitem,
feedid = brain_feed.id
)
viewer_feed.save()
"""
#Saving JsonFeed to GraphDB
post_save.connect(postSaveJson, JsonFeeds)
class ViewerFeed(models.Model):
feedid = models.TextField()
id = models.IntegerField(db_index=True,primary_key=True)
meta = {'indexes':['id']}
def to_json(self):
return {"_id":self.id,"id":self.id,"feedid":self.feedid}
class Meta:
db_table = 'viewer_feeds'
| Python | 164 | 30.524391 | 97 | /feed/models.py | 0.618762 | 0.617215 |
RoboBrainCode/Backend | refs/heads/master | from django.conf.urls import patterns, url
from feed import views
urlpatterns = patterns('',
url(r'most_recent/', views.return_top_k_feeds, name='most_recent'),
url(r'infinite_scroll/', views.infinite_scrolling, name='infinite_scrolling'),
url(r'filter/', views.filter_feeds_with_hashtags, name='filter'),
url(r'filter_type/', views.filter_feeds_with_type, name='filter_type'),
url(r'since/', views.return_feeds_since, name='since'),
url(r'upvotes/', views.upvotes_recorder, name='upvotes'),
url(r'graph_feedback/', views.save_graph_feedback, name='graph_feedback'),
)
| Python | 12 | 48.833332 | 82 | /feed/urls.py | 0.700669 | 0.700669 |
RoboBrainCode/Backend | refs/heads/master | from django.conf.urls import patterns, url
import auth
urlpatterns = patterns('',
url(r'create_user/', auth.create_user_rb, name='create_user'),
url(r'login/', auth.login_rb, name='login'),
url(r'logout/', auth.logout_rb, name='logout')
)
| Python | 8 | 30.5 | 66 | /auth/urls.py | 0.670635 | 0.670635 |
RoboBrainCode/Backend | refs/heads/master | from __future__ import with_statement
from fabric.api import cd, env, local, settings, run, sudo
from fabric.colors import green, red
from fabric.contrib.console import confirm
def prod_deploy(user='ubuntu'):
print(red('Deploying to production at robobrain.me...'))
if not confirm('Are you sure you want to deploy to production?'):
print(red('Aborting deploy.'))
env.host_string = '54.149.21.165'
env.key_filename = 'conf/www.pem'
env.user = user
env.shell = '/bin/zsh -l -c'
with cd('/var/www/Backend'):
# sudo('su - ubuntu')
print(green('Checking out test...'))
run('git checkout test')
print(green('Pulling latest version of test...'))
run('git pull origin test')
print(green('Checking out production...'))
run('git checkout production')
print(green('Rebasing onto test...'))
run('git rebase test')
print(green('Pushing production upstream...'))
run('git push origin production')
print(green('Reloading server...'))
sudo('uwsgi --reload /tmp/robobrain-master.pid')
print(red('Done!'))
def test_deploy(user='ubuntu'):
env.host_string = '54.148.225.192'
env.key_filename = 'conf/www.pem'
env.user = user
env.shell = '/bin/zsh -l -c'
print(red('Deploying to test at test.robobrain.me...'))
with cd('/var/www/Backend'):
print(green('Checking out master...'))
run('git checkout master')
print(green('Pulling latest version of master...'))
run('git pull origin master')
print(green('Checking out test...'))
run('git checkout test')
print(green('Rebasing onto master...'))
run('git rebase master')
print(green('Pulling latest version of test...'))
run('git pull origin test')
print(green('Push the latest version of test...'))
run('git push origin test')
print(green('Reloading server...'))
sudo('uwsgi --reload /tmp/robobrain-master.pid')
print(red('Done!'))
| Python | 51 | 36.196079 | 67 | /fabfile.py | 0.656299 | 0.647338 |
RoboBrainCode/Backend | refs/heads/master | from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = patterns('rest_api.views',
url(r'^feeds/$', 'feed_list'),
#url(r'^snippets/(?P<pk>[0-9]+)$', 'snippet_detail'),
)
urlpatterns = format_suffix_patterns(urlpatterns)
| Python | 9 | 31.555555 | 61 | /rest_api/urls.py | 0.713311 | 0.706485 |
RoboBrainCode/Backend | refs/heads/master | #!/usr/bin/python
import boto
import json
import traceback
from boto.sqs.message import RawMessage
from bson import json_util
conn = boto.sqs.connect_to_region(
"us-west-2",
aws_access_key_id='AKIAIDKZIEN24AUR7CJA',
aws_secret_access_key='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')
feed_queue = conn.create_queue('feed_queue')
def add_feed_to_queue(json_feed):
m = RawMessage()
try:
m.set_body(json.dumps(json_feed, default=json_util.default))
feed_queue.write(m)
except Exception, e:
print traceback.format_exc()
print json_feed
if __name__ == '__main__':
add_feed_to_queue({
"username" : "arzav",
"_id": "546e6a2f5caae434656bbc36",
"feedtype" : "",
"mediashow" : [ ],
"text" : "#Simhat_Torah is a synonym of #Rejoicing_in_the_Law",
"hashtags" : " simhat_torah rejoicing_in_the_law",
"mediatype" : [ ],
"source_url" : "http://wordnet.princeton.edu/",
"source_text" : "WordNet",
"mediamap" : [ ],
"media" : [ ],
"keywords": ["Simhat_Torah","Rejoicing_in_the_Law","synonym","wordnet"],
"upvotes" : 0,
"graphStructure": ["#same_synset: #0 -> #1", "#same_synset: #1 -> #0"]})
| Python | 40 | 29.525 | 81 | /feed/queue_util.py | 0.576577 | 0.55774 |
KAcee77/django_sputnik_map | refs/heads/main | from django.apps import AppConfig
class DjangoSputnikMapsConfig(AppConfig):
name = 'django_sputnik_maps'
| Python | 5 | 21.200001 | 41 | /django_sputnik_maps/apps.py | 0.783784 | 0.783784 |
KAcee77/django_sputnik_map | refs/heads/main | from django.conf import settings
from django.forms import widgets
class AddressWidget(widgets.TextInput):
'''a map will be drawn after the address field'''
template_name = 'django_sputnik_maps/widgets/mapwidget.html'
class Media:
css = {
'all': ('https://unpkg.com/leaflet@1.0.1/dist/leaflet.css',
settings.STATIC_URL + 'django_sputnik_maps/css/jquery-ui.min.css',
settings.STATIC_URL + 'django_sputnik_maps/css/base.css',)
}
js=(
"https://unpkg.com/leaflet@1.0.1/dist/leaflet.js",
settings.STATIC_URL + 'django_sputnik_maps/js/base.js',
settings.STATIC_URL + 'django_sputnik_maps/js/jquery-3.5.1.js',
settings.STATIC_URL + 'django_sputnik_maps/js/jquery-ui.min.js',
)
| Python | 21 | 37.904762 | 86 | /django_sputnik_maps/widgets.py | 0.608802 | 0.5978 |
KAcee77/django_sputnik_map | refs/heads/main | from django.db import models
from django_sputnik_maps.fields import AddressField
# all fields must be present in the model
class SampleModel(models.Model):
region = models.CharField(max_length=100)
place = models.CharField(max_length=100)
street = models.CharField(max_length=100)
house = models.IntegerField()
lat = models.FloatField()
lon = models.FloatField()
address = AddressField(max_length=200)
| Python | 12 | 34.916668 | 51 | /sample/models.py | 0.729358 | 0.701835 |
KAcee77/django_sputnik_map | refs/heads/main | from django.db import models
class AddressField(models.CharField):
pass | Python | 5 | 14.6 | 37 | /django_sputnik_maps/fields.py | 0.779221 | 0.779221 |
KAcee77/django_sputnik_map | refs/heads/main | from .widgets import AddressWidget | Python | 1 | 34 | 34 | /django_sputnik_maps/__init__.py | 0.882353 | 0.882353 |
KAcee77/django_sputnik_map | refs/heads/main | # from django.db import models
from django.contrib import admin
from django_sputnik_maps.fields import AddressField
from django_sputnik_maps.widgets import AddressWidget
from .models import SampleModel
@admin.register(SampleModel)
class SampleModelAdmin(admin.ModelAdmin):
formfield_overrides = {
AddressField: {
'widget': AddressWidget
}
}
| Python | 15 | 24.333334 | 53 | /sample/admin.py | 0.734908 | 0.734908 |
Code-Institute-Submissions/ultimate-irish-quiz | refs/heads/master | import os
from flask import Flask, render_template, redirect, request, url_for
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from os import path
if path.exists("env.py"):
import env
MONGO_URI = os.environ.get("MONGO_URI")
app = Flask(__name__)
app.config["MONGO_DBNAME"] = 'quiz_questions'
app.config["MONGO_URI"] = MONGO_URI
mongo = PyMongo(app)
# Route for Home Page
@app.route('/')
@app.route('/get_questions')
def get_questions():
return render_template("question_and_answer.html",
question_and_answer=mongo.db.question_and_answer.find())
# Route to Add a Question
@app.route('/add_question')
def add_question():
return render_template('addquestion.html',
categories=mongo.db.categories.find())
# Route to Insert Question
@app.route('/insert_question', methods=['POST'])
def insert_question():
question_and_answer = mongo.db.question_and_answer
question_and_answer.insert_one(request.form.to_dict())
return redirect(url_for('get_questions'))
# Route to Edit Question
@app.route('/edit_question/<question_and_answer_id>')
def edit_question(question_and_answer_id):
the_question = mongo.db.question_and_answer.find_one(
{"_id": ObjectId(question_and_answer_id)})
all_categories = mongo.db.categories.find()
return render_template('editquestion.html',
question_and_answer=the_question,
categories=all_categories)
# Route to Update Question
@app.route('/update_question/<question_and_answer_id>', methods=['POST'])
def update_question(question_and_answer_id):
question_and_answer = mongo.db.question_and_answer
question_and_answer.update({'_id': ObjectId(question_and_answer_id)},
{
'category_name': request.form.get('category_name'),
'question': request.form.get('question'),
'answer': request.form.get('answer')
})
return redirect(url_for('get_questions'))
# Route to Delete Question
@app.route('/delete_question/<question_and_answer_id>')
def delete_question(question_and_answer_id):
mongo.db.question_and_answer.remove(
{'_id': ObjectId(question_and_answer_id)})
return redirect(url_for('get_questions'))
# Route for Shop Link
@app.route('/shop')
def get_shop():
return render_template("shop.html")
# Route for Under Construction Link
@app.route('/under_construction')
def get_under_construction():
return render_template("under_construction.html")
# Route for General Knowledge category
@app.route('/get_general_knowledge')
def get_general_knowledge():
question_and_answer = list(mongo.db.question_and_answer.find(
{'category_name': 'General Knowledge'}))
return render_template("categories.html",
question_and_answer=question_and_answer)
# Route for Geography category
@app.route('/get_geography')
def get_geography():
question_and_answer = list(
mongo.db.question_and_answer.find({'category_name': 'Geography'}))
return render_template("categories.html",
question_and_answer=question_and_answer)
# Route for History category
@app.route('/get_history')
def get_history():
question_and_answer = list(
mongo.db.question_and_answer.find({'category_name': 'History'}))
return render_template("categories.html",
question_and_answer=question_and_answer)
# Route for Music category
@app.route('/get_music')
def get_music():
question_and_answer = list(
mongo.db.question_and_answer.find({'category_name': 'Music'}))
return render_template("categories.html",
question_and_answer=question_and_answer)
# Route for Politics category
@app.route('/get_politics')
def get_politics():
question_and_answer = list(
mongo.db.question_and_answer.find({'category_name': 'Politics'}))
return render_template("categories.html",
question_and_answer=question_and_answer)
# Route for Sports category
@app.route('/get_sport')
def get_sport():
question_and_answer = list(
mongo.db.question_and_answer.find({'category_name': 'Sport'}))
return render_template("categories.html",
question_and_answer=question_and_answer)
# Route for TV and Film category
@app.route('/get_tv_and_film')
def get_tv_and_film():
question_and_answer = list(mongo.db.question_and_answer.find({
'category_name': 'TV and Film'}))
return render_template("categories.html",
question_and_answer=question_and_answer)
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=int(os.environ.get('PORT')),
debug=True)
| Python | 168 | 27.839285 | 83 | /app.py | 0.649948 | 0.649948 |
MMaazT/TSP-using-a-Genetic-Algorithm | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 28 13:31:51 2019
@author: mmaaz
"""
from itertools import permutations
import random as rand
import matplotlib.pyplot as plt
cityDict ={'A': [('B', 8), ('C',10), ('D', 3), ('E', 4), ('F',6)],
'B': [('A', 8), ('C',9), ('D', 5), ('E', 5), ('F',12)],
'C': [('A', 10), ('B',9), ('D', 7), ('E', 6), ('F',2)],
'D': [('A', 3), ('B',5), ('C', 7), ('E', 8), ('F',11)],
'E': [('A', 4), ('B',8), ('C', 6), ('D', 8), ('F',8)],
'F': [('A', 6), ('B',12), ('C', 2), ('D', 11), ('E',8)]}
def main():
permut=rand.sample(list(permutations('ABDEF')), 10)
best=[]
average=[]
for i in range(100):
initialTourCost= fitnessFunction(permut)
parents=parentSelection(initialTourCost)
crossed= crossOver(parents)
mut=insertMutations(crossed)
tenBest= survivalSelection(mut, initialTourCost)
bo=(bestSoFar(tenBest))
a=(averageBest(tenBest))
permut=removeC(tenBest)
best.append(bo)
average.append(round(a,5))
print(best)
print(average)
#plt.figure(figsize=(20,10))
#plt.show( plotBest(best))
plotBest(best)
#plotAverage(average)
def fitnessFunction(candidates):
tourCost= []
for individual in candidates:
sumd=0;
for i in range(len(individual)-1):
for j in range(len(cityDict[individual[i]])):
if(cityDict[individual[i]][j][0]==individual[i+1]):
sumd+=cityDict[individual[i]][j][1]
tourCost.append((list(individual),sumd))
return tourCost
def parentSelection(tourCost):
aux= tourCost[:]
parents= []
p1= rand.sample(aux, 2)
if(p1[0][1]> p1[1][1]):
parents.append(p1[0])
aux.remove(p1[0])
else:
parents.append(p1[1])
aux.remove(p1[1])
p2=rand.sample(aux,2)
if(p2[0][1]> p2[1][1]):
parents.append(p2[0])
aux.remove(p2[0])
else:
parents.append(p2[1])
aux.remove(p2[1])
p3=rand.sample(aux,2)
if(p3[0][1]> p3[1][1]):
parents.append(p3[0])
aux.remove(p3[0])
else:
parents.append(p3[1])
aux.remove(p3[1])
p4=rand.sample(aux,2)
if(p4[0][1]> p4[1][1]):
parents.append(p4[0])
aux.remove(p4[0])
else:
parents.append(p4[1])
aux.remove(p4[1])
return parents
def crossOver(parents):
sind12= rand.randint(0,3)
eind12=rand.randint(sind12,4)
if(sind12==eind12):
eind12+=1
sind34= rand.randint(0,3)
eind34=rand.randint(sind34,4)
if(sind34==eind34):
eind34+=1
offs1=[0,0,0,0,0]
offs2=[0,0,0,0,0]
offs3=[0,0,0,0,0]
offs4=[0,0,0,0,0]
offs1[sind12:eind12+1]=parents[0][0][sind12:eind12+1]
offs2[sind12:eind12+1]=parents[1][0][sind12:eind12+1]
offs3[sind34:eind34+1]=parents[2][0][sind34:eind34+1]
offs4[sind34:eind34+1]=parents[3][0][sind34:eind34+1]
auxparent2=parents[1][0][eind12:]
auxparent2= auxparent2 + parents[1][0][:eind12]
auxind=eind12
for j in range(len(auxparent2)):
if(auxparent2[j] not in offs1):
auxind+=1
offs1[auxind%5]=auxparent2[j]
auxparent1=parents[0][0][eind12:]
auxparent1= auxparent1 + parents[0][0][:eind12]
auxind=eind12
for j in range(len(auxparent1)):
if(auxparent1[j] not in offs2):
auxind+=1
offs2[auxind%5]=auxparent1[j]
auxparent4=parents[3][0][eind34:]
auxparent4= auxparent4 + parents[3][0][:eind34]
auxind=eind34
for j in range(len(auxparent4)):
if(auxparent4[j] not in offs3):
auxind+=1
offs3[auxind%5]=auxparent4[j]
auxparent3=parents[2][0][eind34:]
auxparent3= auxparent3 + parents[2][0][:eind34]
auxind=eind34
for j in range(len(auxparent3)):
if(auxparent3[j] not in offs4):
auxind+=1
offs4[auxind%5]=auxparent3[j]
crossOffsprings= [offs1,offs2, offs3, offs4]
return crossOffsprings
def insertMutations(crossOffsprings):
probability= round(rand.random(),2)
if(probability<=0.20):
for mut in crossOffsprings:
m1=rand.randint(0,2)
m2=rand.randint(3,4)
mut.insert(m1+1,mut[m2])
mut.remove(mut[m2+1])
else:
return crossOffsprings
return crossOffsprings
def survivalSelection(mutCross, parents):
costOffspring=fitnessFunction(mutCross)
parents=parents+costOffspring
finalParents= addC(parents)
finalParents=sorted(finalParents, key= lambda x: x[1])
finalParents=finalParents[:10]
return finalParents
def addC(parents):
c=[]
for i in parents:
i[0].insert(0, 'C')
for i in parents:
c.append(i[0])
finalParents= fitnessFunction(c)
return finalParents
def removeC(parents):
c=[]
for i in parents:
i[0].remove('C')
for i in parents:
c.append(i[0])
return c
def bestSoFar(finalParents):
bestFitness=finalParents[0][1]
return bestFitness
def averageBest(finalParents):
sumd=0
for i in finalParents:
sumd+= i[1]
return sumd/6
def plotAverage(ave):
plt.plot(ave)
plt.xlabel('Generation Number')
plt.ylabel('Fitness')
plt.title('Travelling Salesman Problem using Genetic Algorithm: Average At Each Generation')
def plotBest(best):
plt.plot(best)
plt.xlabel('Generation Number')
plt.ylabel('Fitness')
plt.title('Travelling Salesman Problem using Genetic Algorithm: Best At Each Generation')
if __name__=='main':
main()
| Python | 205 | 26.814634 | 96 | /TSP.py.py | 0.571229 | 0.5104 |
joanap/FooterPagination | refs/heads/master | import unittest
from src import footer_pagination
class SimpleTests(unittest.TestCase):
def test_beginning_pages(self):
"""Test the initial status of the set of pages in the beginning
"""
self.assertSequenceEqual((1, 1), footer_pagination.init_beginning_pages(5, 1))
def test_end_pages(self):
"""Test the initial status of the set of pages in the end pages
"""
self.assertSequenceEqual((5, 5), footer_pagination.init_end_pages(5, 1))
def test_around_pages(self):
"""Test the initial status of the set of around pages
"""
self.assertSequenceEqual((4, 4), footer_pagination.init_around_pages(4, 0, 5))
def test_overlapping_pages(self):
"""Test overlapping sets of pages
"""
self.assertTrue(footer_pagination.are_overlapping_pages((1, 3), (2, 4)))
def test_not_overlapping_pages(self):
"""Test not overlapping sets of pages
"""
self.assertFalse(footer_pagination.are_overlapping_pages((1, 3), (6, 7)))
def test_merge_pages(self):
"""Tests merging of two overlapping sets of pages
"""
self.assertSequenceEqual((1, 4), footer_pagination.merge_pages((1, 3), (2, 4)))
def test_update_overlap_pages(self):
"""Test the update of two sets of pages that overlap
"""
self.assertSequenceEqual(((1, 4), None), footer_pagination.update_pages((1, 3), (2, 4)))
def test_update_not_overlap_pages(self):
"""Test the update of two sets of pages that do not overlap
"""
self.assertSequenceEqual(((1, 3), (6, 7)), footer_pagination.update_pages((1, 3), (6, 7)))
def test_find_first_page(self):
"""Test if the first page is contained in the sets of pages
"""
self.assertTrue(footer_pagination.find_page([(1, 2), (3, 5), None], 1))
def test_not_find_first_page(self):
"""Test if the first page is contained in the sets of pages
"""
self.assertFalse(footer_pagination.find_page([(2, 3), (4, 5), None], 1))
def test_exist_remaining_pages(self):
"""Test when two sets of pages have remaining pages between them
"""
self.assertTrue(footer_pagination.exist_remaining_pages((1, 3), (6, 7)))
def test_not_exist_remaining_pages(self):
"""Test when two sets of pages do not have remaining pages between them
"""
self.assertFalse(footer_pagination.exist_remaining_pages((1, 7), (8, 9)))
def main():
unittest.main()
if __name__ == '__main__':
main() | Python | 86 | 29.11628 | 98 | /tests/simple_tests.py | 0.617227 | 0.594438 |
joanap/FooterPagination | refs/heads/master | import sys
INPUT_LEN = 5
FIRST_PAGE = 1
FIRST_PAGE_INDEX = 0
LAST_PAGE_INDEX = 1
REMAINING_PAGES = "..."
def init_beginning_pages(total_pages, boundaries):
"""Define the initial status for the set of pages in the beginning: return first and last page
:param total_pages: total number of pages
:param boundaries: how many pages we want to link in the beginning, or end
:return: (first beginning page, last beginning page)
"""
if boundaries != 0:
if boundaries < total_pages:
return FIRST_PAGE, boundaries
else:
FIRST_PAGE, total_pages
else:
None
def init_end_pages(total_pages, boundaries):
"""Define the initial status for the set of pages in the end: return first and last page
:param total_pages: total number of pages
:param boundaries: how many pages we want to link in the beginning, or end
:return: (first end page, last end page)
"""
if boundaries != 0:
if total_pages - boundaries > 0:
return total_pages - boundaries+1, total_pages
else:
return FIRST_PAGE, total_pages
else:
return None
def init_around_pages(current_page, around, total_pages):
"""Define the initial status for the set of pages in the around: return first and last page
:param current_page: current page
:param around: how many pages we want to link before and after the current page
:param total_pages: total number of pages
:return: (first around page, last around page)
"""
around_first_page = around_last_page = current_page
if around != 0:
around_first_page = current_page - around
around_last_page = current_page + around
if around_first_page < 1:
around_first_page = FIRST_PAGE
if around_last_page > total_pages:
around_last_page = total_pages
return around_first_page, around_last_page
def initial_pages_status(current_page, total_pages, boundaries, around):
"""Define the initial status for the sets of pages: return a list with beginning, around and end set of pages
:param current_page: current page
:param total_pages: total number of pages
:param boundaries: how many pages we want to link in the beginning, or end
:param around: how many pages we want to link before and after the current page
:return: list with beginning, around and end set of pages
"""
beginning_pages = init_beginning_pages(total_pages, boundaries)
around_pages = init_around_pages(current_page, around, total_pages)
end_pages = init_end_pages(total_pages, boundaries)
return [beginning_pages, around_pages, end_pages]
def are_overlapping_pages(pages1, pages2):
"""Check if the sets pages1 and pages2 overlap: return True if pages1 and pages2 overlap
:param pages1: set of pages
:param pages2: set of pages
:return: True if pages1 and pages2 overlap
"""
if pages1 is None or pages2 is None:
return False
else:
return not (pages1[LAST_PAGE_INDEX] < pages2[FIRST_PAGE_INDEX] or
pages2[LAST_PAGE_INDEX] < pages1[FIRST_PAGE_INDEX])
def merge_pages(pages1, pages2):
"""Merge overlapping sets of pages1 and pages2: return the merged set of pages
:param pages1: set of pages
:param pages2: set of pages
:return: merged set of pages
"""
return min(pages1[FIRST_PAGE_INDEX], pages2[FIRST_PAGE_INDEX]), max(pages1[LAST_PAGE_INDEX], pages2[LAST_PAGE_INDEX])
def update_pages(pages1, pages2):
"""Merge two sets of pages if they overlap, otherwise return the initial status of the sets
:param pages1: set of pages
:param pages2: set of pages
:return: (merged set of pages, None) if pages1 and pages2 overlap, otherwise return (pages1, pages2)
"""
if are_overlapping_pages(pages2, pages1):
return merge_pages(pages2, pages1), None
else:
return pages1, pages2
def update_all_pages(initial_pages_status):
"""Iterate the sets of pages and check if the current set of pages overlap the next sets of pages; unify sets
that overlap.
:param initial_pages_status: initial pages status
:return: final pages status with no overlapping.
"""
for pages_index, item in enumerate(initial_pages_status):
for i in range(pages_index, len(initial_pages_status) - 1):
new_pages_status = update_pages(initial_pages_status[pages_index], initial_pages_status[i+1])
if new_pages_status is not None:
if initial_pages_status[pages_index] is not None:
initial_pages_status[pages_index] = new_pages_status[0]
if initial_pages_status[i+1] is not None:
initial_pages_status[i+1] = new_pages_status[1]
return initial_pages_status
def exist_remaining_pages(pages1, pages2):
"""Check if there are remaining pages between the sets of pages pages1 and pages2
:param pages1: set of pages
:param pages2: set of pages
:return: True if exist remaining pages between pages1 and pages2
"""
if pages1 is not None and pages2 is not None:
return pages2[FIRST_PAGE_INDEX] - pages1[LAST_PAGE_INDEX] > 1
else:
return False
def print_range(pages):
"""Print the range of pages in the set pages
:param pages: set of pages to print
"""
if pages is not None:
print(*range(pages[FIRST_PAGE_INDEX], pages[LAST_PAGE_INDEX]+1), sep=' ', end='')
def find_page(pages_list, page_to_found):
"""Check if page_to_found is in pages_list: return True if exists
:param pages_list: list with sets of pages
:param page_to_found: page to found in the list
:return: True if the page is in the list
"""
for current_pages in pages_list:
if current_pages is not None:
if page_to_found == current_pages[FIRST_PAGE_INDEX] or page_to_found == current_pages[LAST_PAGE_INDEX]:
return True
return False
def remove_none(pages_list):
"""Remove None elements from a list
:param pages_list: list of sets of pages
:return: list without None elements
"""
return [pages for pages in pages_list if pages is not None]
def print_output(pages_list, last_page, boundaries):
"""Concatenate and print footer pagination
:param pages_list: sets of pages
:param last_page: total pages
:param boundaries: how many pages we want to link in the beginning, or end
"""
pages_list_without_none = remove_none(pages_list)
if boundaries == 0 and not find_page(pages_list_without_none, FIRST_PAGE):
print(REMAINING_PAGES + " ", end='')
for pages_index, current_pages in enumerate(pages_list_without_none):
print_range(current_pages)
if pages_index + 1 < len(pages_list_without_none):
if exist_remaining_pages(current_pages, pages_list_without_none[pages_index + 1]):
print(" " + REMAINING_PAGES + " ", end='')
else:
print(" ", end='')
if boundaries == 0 and not find_page(pages_list_without_none, last_page):
print(" " + REMAINING_PAGES, end='')
def validate_input(current_page, total_pages, boundaries, around):
"""
Raises an exception if input is invalid
:param current_page: current page
:param total_pages: total number of pages
:param boundaries: how many pages we want to link in the beginning, or end
:param around: how many pages we want to link before and after the current page
"""
if current_page <= 0 or total_pages <= 0:
raise ValueError("Current page and total pages must be greater than 0")
if boundaries < 0 or around < 0:
raise ValueError("Boundaries and around must be greater or equal to 0")
if current_page > total_pages:
raise ValueError("Current page must be lower than total pages")
def get_footer_pagination(current_page, total_pages, boundaries, around):
"""Build and print footer pagination according page, total_pages, boundaries and around
:param current_page: current page
:param total_pages: total number of pages
:param boundaries: how many pages we want to link in the beginning, or end
:param around: how many pages we want to link before and after the current page
"""
initial_pages_stat = initial_pages_status(current_page, total_pages, boundaries, around)
final_pages_stat = update_all_pages(initial_pages_stat)
print_output(final_pages_stat, total_pages, boundaries)
def main():
"""Read arguments current_page, total_page, boundaries and around and build the corresponding footer pagination
"""
if len(sys.argv) == INPUT_LEN:
current_page = int(sys.argv[1])
total_pages = int(sys.argv[2])
boundaries = int(sys.argv[3])
around = int(sys.argv[4])
try:
validate_input(current_page, total_pages, boundaries, around)
get_footer_pagination(current_page, total_pages, boundaries, around)
except ValueError as err:
print(err)
else:
print("Missing arguments")
if __name__ == '__main__':
main() | Python | 246 | 36.215446 | 121 | /src/footer_pagination.py | 0.666375 | 0.657199 |
Saumya-singh-02/Quiz-app | refs/heads/master | from django.urls import path
from .views import(
QuizListView,
quiz_view,
quiz_data_view,
save_quiz_view
)
app_name = 'quizes'
urlpatterns = [
path('',QuizListView.as_view(), name = 'main-view'),
path('<pk>/',quiz_view,name = 'quiz-view'),
path('<pk>/save/',save_quiz_view,name = 'save-view'),
path('<pk>/data/',quiz_data_view,name='quiz-data-view'),
] | Python | 16 | 23.4375 | 60 | /quizes/urls.py | 0.612821 | 0.612821 |
Saumya-singh-02/Quiz-app | refs/heads/master | from django.contrib import admin
from .models import Result
admin.site.register(Result)
# Register your models here.
| Python | 4 | 28.25 | 32 | /results/admin.py | 0.811966 | 0.811966 |
aymane081/python_algo | refs/heads/master | class Solution:
def has_increasing_subsequence(self, nums):
smallest, next_smallest = float('inf'), float('inf')
for num in nums:
# if num <= smallest:
# smallest = num
# elif num <= next_smallest:
# next_smallest = num
# else:
# return True
# A second way of doing the same
smallest = min(smallest, num)
if smallest < num:
next_smallest = min(next_smallest, min)
if next_smallest < num:
return True
return False | Python | 17 | 34.588234 | 60 | /arrays/increasing_triplet_subsequence.py | 0.488411 | 0.488411 |
aymane081/python_algo | refs/heads/master | class Solution(object):
def dissapeared_numbers(self, numbers):
if not numbers:
return []
n = len(numbers)
result = [i for i in range(1, n + 1)]
for num in numbers:
result[num - 1] = 0
self.delete_zeros(result)
return result
def delete_zeros(self, arr):
insert_pos = 0
for num in arr:
if num != 0:
arr[insert_pos] = num
insert_pos += 1
for _ in range(insert_pos, len(arr)):
arr.pop()
def dissapeared_numbers2(self, numbers):
if not numbers:
return []
for i, num in enumerate(numbers):
val = abs(num) - 1
if (numbers[val] > 0):
numbers[val] = - numbers[val]
result = []
for i, num in enumerate(numbers):
if num >= 0:
result.append(i + 1)
return result
solution = Solution()
numbers = [4, 3, 2, 7, 8, 2, 3, 1]
print(solution.dissapeared_numbers2(numbers)) | Python | 43 | 24.39535 | 45 | /arrays/dissapeared_numbers.py | 0.472961 | 0.453712 |
aymane081/python_algo | refs/heads/master | # 495
# time: O(n)
# space: O(1)
class Solution:
def find_poisoned_duration(self, timeSeries, duration):
result = 0
if not timeSeries:
return result
timeSeries.append(float('inf'))
for i in range(1, len(timeSeries)):
result += min(timeSeries[i] - timeSeries[i - 1], duration)
return result
# time: O(with of window * number of attacks)
# space: O(1)
class Solution2:
def find_poisoned_duration(self, timeSeries, duration):
result = 0
if not timeSeries:
return result
temp_poison = 0
for i in range(timeSeries[0], timeSeries[-1] + 1):
if i in timeSeries:
temp_poison = duration
if temp_poison:
result += 1
temp_poison -= 1
result += temp_poison
return result
solution = Solution()
print(solution.find_poisoned_duration([1], 2))
print(solution.find_poisoned_duration([1], 2)) | Python | 42 | 23.785715 | 70 | /arrays/teemo_attacking.py | 0.541346 | 0.522115 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.