seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12764674306 | # Exercise 3: Print characters from a string that are present at an even index number
str = input( 'Please enter a string: ' )
print( str )
x = 0
for x in range(0, len(str) - 1, 2):
if( x % 2 == 0 ):
print( str[ x ] )
| TheCoderGuru/python_practice | printCharacters.py | printCharacters.py | py | 234 | python | en | code | 1 | github-code | 13 |
20524242840 | input = open("input.txt")
input = input.read()
input = input.split(', ')
direction = 0
x = 0
y = 0
visited = []
twice = False
for i in input:
if i[0] == "R":
direction += 1
if direction > 3:
direction = 0
elif i[0] == "L":
direction -= 1
if direction < 0:
direction = 3
for n in range(int(i[1:])):
if direction == 0:
y += 1
elif direction == 1:
x += 1
elif direction == 2:
y -= 1
elif direction == 3:
x -= 1
if not twice:
if (x, y) in visited:
print("part 2:" + str(abs(x)+abs(y)))
twice = True
else:
visited.append((x, y))
print("part 1:" + str(abs(x)+abs(y)))
# part 1: 209
# part 2: 136
# def faculteit(i, j=1):
# if i <= 0:
# print(j)
# return
# faculteit(i-1, j*i)
# faculteit(5)
| Lesley55/AdventOfCode | 2016/1/part1.py | part1.py | py | 949 | python | en | code | 1 | github-code | 13 |
72636525779 | import unittest
from selenium import webdriver
from PO.app_creat import app_creat_Page
from PO.app_edit_del import app_edit_Page,app_del_Page
from PO.app_search import app_search_Page
import time
class TestApp(unittest.TestCase):
#driver = webdriver.Chrome()
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
cls.url = "http://10.0.95.8:8091/apigw"
sp = app_creat_Page(cls.driver)
sp.open(cls.url)
#把cookie加入浏览器
sp.send_cookie()
sp.open(cls.url)
time.sleep(3)
cls.driver.implicitly_wait(20)
#脚本运行时,错误的信息将被打印到这个列表中
cls.verificationErrors = []
@classmethod
def tearDownClass(cls):
#cls.driver = webdriver.Chrome()
try:
cls.driver.quit()
# 对前面verificationErrors方法获得的列表进行比较;如查verificationErrors的列表不为空,输出列表中的报错信息。
except Exception as e:
print(e)
def test1_app_creat(self):
"""创建应用"""
# 实例化app页面
sp = app_creat_Page(self.driver)
sp.click_diaoyong_api_loc()
sp.click_app_loc()
sp.mouse_loc()
sp.click_app_creat_loc()
sp.input_content_loc('app_test1','test')
sp.click_queding_loc()
time.sleep(2)
def test2_app_creat(self):
"""创建应用--应用名称为空"""
sp = app_creat_Page(self.driver)
time.sleep(2)
sp.click_app_creat_loc()
sp.input_content_loc('','test')
sp.click_queding_loc()
#断言
self.assertEqual(sp.get_name_null(),'不能为空')
def test3_app_creat(self):
"""创建应用--应用名称重复"""
sp = app_creat_Page(self.driver)
sp.open(self.url)
sp.click_diaoyong_api_loc()
sp.click_app_loc()
time.sleep(2)
sp.mouse_loc()
sp.click_app_creat_loc()
sp.input_content_loc('app_test1','test')
sp.click_queding_loc()
time.sleep(2)
# 断言
self.assertEqual(sp.get_name_repeat(), '指定的应用名称已存在,请重新修改')
def test4_app_edit(self):
"""编辑应用"""
sp = app_edit_Page(self.driver)
sp.open(self.url)
sp.click_diaoyong_api_loc()
sp.click_app_loc()
sp.mouse_loc()
time.sleep(2)
sp.click_app_edit_loc()
sp.clear_content_loc()
sp.input_content_loc('xiugai1','test1')
sp.click_queding_loc()
time.sleep(2)
def test5_app_xiangqing(self):
"""应用详情---密钥显示和隐藏"""
xiangqing = app_del_Page(self.driver)
xiangqing.click_diaoyong_api_loc()
xiangqing.click_app_loc()
xiangqing.mouse_loc()
xiangqing.click_name_loc()
xiangqing.click_xianshi_loc()
time.sleep(2)
xiangqing.click_yincang_loc()
def test6_app_del(self):
"""删除应用界面取消"""
delete = app_del_Page(self.driver)
delete.click_diaoyong_api_loc()
delete.click_app_loc()
delete.mouse_loc()
delete.click_app_del_loc()
delete.click_quxiao_loc()
time.sleep(2)
def test7_app_del(self):
"""删除应用"""
delete = app_del_Page(self.driver)
delete.click_diaoyong_api_loc()
delete.click_app_loc()
delete.mouse_loc()
delete.click_app_del_loc()
delete.click_queding_loc()
time.sleep(2)
def test8_app_search(self):
"""搜索应用"""
search = app_search_Page(self.driver)
search.click_diaoyong_api_loc()
search.click_app_loc()
search.mouse_loc()
search.input_name_loc('dada')
search.click_search_loc()
time.sleep(2)
# 断言
self.assertEqual(search.get_content_loc(), 'dada')
def test9_app_search(self):
"""模糊搜索"""
search = app_search_Page(self.driver)
search.click_diaoyong_api_loc()
search.click_app_loc()
search.mouse_loc()
search.input_name_loc('daxi')
search.click_search_loc()
time.sleep(2)
# 断言
self.assertEqual(search.get_null_content_loc(), '暂无数据')
| yinxiong007/api-automated-testing | api-auto-test/testcase/test_app.py | test_app.py | py | 4,350 | python | en | code | 0 | github-code | 13 |
44037328483 | import sys, math, time, zlib, colorsys, random, os, contextlib, array
#
# ttyfb 0.1 PREVIEW for Python
# 2021-04-17 Thomas Perl <m@thp.io>
# Based on code from the PyUGAT XMas Puzzle (2019-12-18)
#
# Copyright 2021 Thomas Perl
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__all__ = (
'w', 'h',
'resize', 'clear', 'fill',
'getpixel', 'putpixel',
'render', 'to_stdout', 'to_file',
'line', 'circle', 'rectangle',
'Vertex', 'triangle',
'text_big', 'text_small',
'view_image',
'no_cursor',
'lerp', 'lerp_rgb',
)
fontdat = bytearray(zlib.decompress(b'x\x9c=R\xc1\x8a\x13A\x10-6d\xe8C\xbbY5\x87\x16\x9a'
b' \x83\x07\t\x1e\x06\x0fc\\\xda\x8e' # Vincent font by Quinn Evans, public domain 2010
b'\x11\x0f\x11\xf6\xba \xe2\xa1!\xa4=\xcc\x10\x07\x84M\xc0\xa1\xfb\xdb\xf2!9\xed\x87'
b'\xc4W5q;\x93\xd4\xbc\xaa\xeaW\xf5\xaaBt9~u\xbf\xba\xff\xe6\xc9\xe7&7{O\xe5>go\x15)\xeb'
b'/v\x9e\xe7\xca\x0e\x18\x96\xac\xf7\xde\xd2\xf9\xfcx<\x1e\x1f\xcf\xc0eY\n>\x9dN\xc0\x93'
b'\xd1\xf8\xe3z]\x91B2\x07\xcc\xf4\xe6\xa6\xea\x02M^=\x7f\xf9\xe1!\x90\x9a\xdb\xbd\x9d+\n'
b'\x87\x94\xd3!\xd0h\xb2\xcc\xcb\xc9\x88\xeb \xa4(\xe2\xe0\x87\x96\xdb\xed\xa7\xf1xL\xfa'
b'\xfd\xfa\xcd\x8b\xbbJz\xcd9_\xf2\xbc\xf4c\xad%|\x87>5\xf84\xac\x01\x9fQ\xc8\xee\xba\x0e'
b'\xf94-s9%\xe4\xa3q`\xa8\xf3\xd6\xaa\xff\xf2\xc98g\x0c\x19\xaa\xeb\xa9\xe0\xba\xce\xf8'
b'\xd4\xa4f\xaf\xed\x95C\x9fQ\x9b*\x16\xe4\xa2{\xbb\x8dK2\xc6H3\x85\xc6\x9b.(T\xec\t\xb8'
b'h\xb3\xad\x11PP>\xd0W\x15\xfb\x89\xdcS\xb1\x10\xe4"\xbb]\xdc=\xc4\xe8\xc0\xb7\x00A\x02'
b'.t\x15\xc4\xda\x02~m\xdf\xdd%\xad)\x85\xbe`\xecb\xe89?E\xcd\x95\x81#?l=\xc7\x89U\x18'
b'\xba\xd8J\xcap\x7f\\\x9e\x1b@\x9fR\x98\xf9Y\xef\xe2\xeb\xcf\xef_\x1c\xf7\x11\x13\x8f'
b'\x1c\xe4\xfcp\x9d\x10\xc0\xd7\xf3"\x80S\x08}@_\x83\r\x12\xdf!\x1e/\xf70=\x1cG3\xadu'
b'\xdb.(\xb6\x87\xee\xd0FH\xe5\x93h\xb3\xcf\xcdf\xb3\xa1\xcd\x9f\xbf\xbf\x7f\xc1:\xe1'
b'\x1d\xf8{\xe1\x8bq\xe7\n\xa9\xcf\xf7\xc0\xefXO\xfa!\xc4\xf2\x7f\x80\xc4\xc1:C\xe0j\xf2'
b'\x1e|\x9b\xda\xd6\xe0\x13/\xf2\x92\xcc5\xd1L\xae\xcd\x06\xc1\x90\x7f\x10| 5-WO{\xa0L'
b'Zf#\xeb)<\xf8\xd1\xac\xe8\xe7\x89\xb0\xfe\xa2\x80\x1b~\xc6)8\xb2uU\xf5\x15V\xcf\xde'
b'\xc2]\xf2#\xe6M\xa2_\xd3\xa0\x1f\xc3kY\x87\x19\x0e\xee\xf3\x04\x1a\xd8^\xe6%\x13\xe7'
b'}\xf5\x83~rm\xeb\x9eMH\x1c\x8c\xc9\x7fvW=\xef1\r\xf7\x07\xfd\xf2\xe2\x84\xafi\x1a\xc6'
b'@.\x8a\x9b\xfb\x01\x11\x06\xe0\xe8\x1a\xcb7\xe6\x9a\xeb\xcb\xb7\xc3\xbf\xd4\x98\x0e'
b'\xf1\xdb\x96I\x14\xa6\xb6Z\xe5\x7f\xdb\x0c\xd0\xc5'))
w, h = os.get_terminal_size()
h *= 2
# Based on: https://jonasjacek.github.io/colors/
RGB16 = (
(0, 0, 0),
(128, 0, 0),
(0, 128, 0),
(128, 128, 0),
(0, 0, 128),
(128, 0, 128),
(0, 128, 128),
(192, 192, 192),
(128, 128, 128),
(255, 0, 0),
(0, 255, 0),
(255, 255, 0),
(0, 0, 255),
(255, 0, 255),
(0, 255, 255),
(255, 255, 255),
)
class Demo:
clear = array.array('I', ([16]*(w*h)))
buffer = array.array('I', clear)
textbuffer = [' ']*(w*h)
hires = True
antialias = False
motionblur = False
MODE_TRUECOLOR = 0
MODE_256_DITHER = 1
MODE_256_FLAT = 2
MODE_16_DITHER = 3
MODE_16_FLAT = 4
MODES = (
MODE_TRUECOLOR,
MODE_256_DITHER,
MODE_256_FLAT,
MODE_16_DITHER,
MODE_16_FLAT,
)
MODE_NAMES = {
MODE_TRUECOLOR: 'truecolor',
MODE_256_DITHER: '256-dither',
MODE_256_FLAT: '256-flat',
MODE_16_DITHER: '16-dither',
MODE_16_FLAT: '16-flat',
}
mode = MODE_TRUECOLOR
def resize(nw, nh):
global w, h
w = nw
h = nh
Demo.clear = array.array('I', ([16]*(w*h)))
Demo.buffer = array.array('I', Demo.clear)
Demo.textbuffer = [' ']*(w*h)
def clear():
Demo.buffer[:] = (darker(c) for c in Demo.buffer) if Demo.motionblur else Demo.clear
Demo.textbuffer = [' ']*(w*h)
def fill(rgb):
v = make_555(rgb)
Demo.buffer[:] = array.array('I', [v]*len(Demo.clear))
def pixelfont(text):
height = 8
width = len(text) * 8
pixels = bytearray(height * width)
for i, c in enumerate(text.encode('ascii')):
char = fontdat[c*8:(c+1)*8]
for y, row in enumerate(char):
for x in range(8):
if row & (1 << (8-x)) != 0:
pixels[(i * 8 + x) + y * width] = 0xff
return width, height, pixels
def make_555(rgb):
r, g, b = rgb
r = int(max(0, min(255, r)))
g = int(max(0, min(255, g)))
b = int(max(0, min(255, b)))
return (r << 16 | g << 8 | b)
def make_256(rgb):
r, g, b = rgb
r = int(max(0, min(255, r)))
g = int(max(0, min(255, g)))
b = int(max(0, min(255, b)))
if abs(r-g) < 5 and abs(r-b) < 5:
return int(232 + (255 - 232) * max(0, min(1, r / 255)))
r, g, b = (int(max(0, min(5, v / 255 * 5))) for v in rgb)
return (16 + 36 * r + 6 * g + b)
def parse_256(value):
if value < 16:
return RGB16[value]
elif value < 232:
value -= 16
b = value % 6
value /= 6
g = value % 6
value /= 6
r = value
return (r*255/5, g*255/5, b*255/5)
else:
value -= 232
value = value * 255 / (255 - 232)
return (value, value, value)
def rgb_diff(a, b):
return sum(abs(c-d) for c, d in zip(a, b))
def lighter(value):
return tuple(min(255, int(v*1.2)) for v in value)
lut_256_to_16 = [next(idx for idx, value in sorted(enumerate(RGB16), key=lambda iv: rgb_diff(iv[1], lighter(parse_256(i)))))
for i in range(256)]
def putpixel_555(pos, value):
x, y = pos
if y < 0 or y >= h or x < 0 or x >= w:
return
Demo.buffer[int(y)*w+int(x)] = value
# https://en.wikipedia.org/wiki/Ordered_dithering
dither_4x4 = [[(v/16-0.5) * 64 for v in row] for row in
[(0, 8, 2, 10),
(12, 4, 14, 6),
(3, 11, 1, 9),
(15, 7, 13, 5)]]
dither_4x4_broad = [[(v/16-0.5) * 128 for v in row] for row in
[(0, 8, 2, 10),
(12, 4, 14, 6),
(3, 11, 1, 9),
(15, 7, 13, 5)]]
def dither256(v, pos):
if v in (0, 255):
return v
x, y = pos
return v + dither_4x4[int(y)%4][int(x)%4]
def dither16(v, pos):
if v in (0, 255):
return v
x, y = pos
return v + dither_4x4_broad[int(y)%4][int(x)%4]
def putpixel(pos, rgb):
putpixel_555(pos, make_555(rgb))
def parse_555(value):
return (value >> 16 & 0xff, value >> 8 & 0xff, value & 0xff)
def darker(c):
return make_555((int(x*0.8) for x in parse_555(c)))
def getpixel(pos):
x, y = pos
if y < 0 or y >= h or x < 0 or x >= w:
return (0, 0, 0)
value = Demo.buffer[int(y)*w+int(x)]
return parse_555(value)
def lerp(a, b, alpha):
return a*(1-alpha)+b*alpha
def lerp_rgb(a, b, alpha):
return tuple(lerp(aa, bb, alpha) for aa, bb in zip(a, b))
def line(a, b, rgb_a, rgb_b=None):
if rgb_b is None:
rgb_b = rgb_a
x0, y0 = a
x1, y1 = b
dx = x1 - x0
dy = y1 - y0
if abs(dx) > abs(dy):
if dx < 0:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx *= -1
dy *= -1
for x in range(abs(int(dx+1))):
alpha = x / abs(dx)
if dx < 0:
x *= -1
y = int(x*dy/dx)
putpixel((x0+x, y0+y), lerp_rgb(rgb_a, rgb_b, alpha))
else:
if dy < 0:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx *= -1
dy *= -1
for y in range(abs(int(dy+1))):
alpha = y / abs(dy or 1)
if dy < 0:
y *= -1
x = int(y*dx/(dy or 1))
putpixel((x0+x, y0+y), lerp_rgb(rgb_a, rgb_b, alpha))
def circle(center, radius, color):
# https://iq.opengenus.org/bresenhams-circle-drawing-algorithm/
def draw(x, y):
putpixel((center[0]+x, center[1]+y), color)
putpixel((center[0]-x, center[1]+y), color)
putpixel((center[0]+x, center[1]-y), color)
putpixel((center[0]-x, center[1]-y), color)
putpixel((center[0]+y, center[1]+x), color)
putpixel((center[0]-y, center[1]+x), color)
putpixel((center[0]+y, center[1]-x), color)
putpixel((center[0]-y, center[1]-x), color)
x = 0
y = radius
decision = 3 - 2 * radius
draw(x, y)
while y >= x:
x += 1
if decision > 0:
y -= 1
decision += 4 * (x - y) + 10
else:
decision += 4 * x + 6
draw(x, y)
def triangle(v):
v = sorted(v, key=lambda p: p.pos.y)
height = v[2].pos.y - v[0].pos.y
if height == 0:
# Degenerate triangle; draw nothing
return
h_upper = v[1].pos.y - v[0].pos.y
x1, x2, xs, c1, c2 = [], [], [], [], []
for i in range(height):
alpha = float(i) / float(height)
x1.append(v[0].pos.x + int(float(v[2].pos.x - v[0].pos.x) * alpha))
c1.append(v[0].color + (v[2].color - v[0].color) * alpha)
if i < h_upper:
alpha = float(i) / float(h_upper)
x2.append(v[0].pos.x + int(float(v[1].pos.x - v[0].pos.x) * alpha))
c2.append(v[0].color + (v[1].color - v[0].color) * alpha)
else:
alpha = float(i - h_upper) / float(height - h_upper)
x2.append(v[1].pos.x + int(float(v[2].pos.x - v[1].pos.x) * alpha))
c2.append(v[1].color + (v[2].color - v[1].color) * alpha)
xs.append(abs(x2[i]-x1[i]))
y = v[0].pos.y
for i in range(height):
s = 1 if x1[i] < x2[i] else 0
xd = 1 if x1[i] < x2[i] else -1
x = x1[i]
colord = (c2[i] - c1[i]) / xs[i] if xs[i] != 0 else RGB(0, 0, 0)
color = c1[i]
for j in range(xs[i]+1):
putpixel((x, y), (color.r, color.g, color.b))
x += xd
color += colord
y += 1
class to_file(object):
def __init__(self, fp):
self.fp = fp
def __call__(self, *args):
for arg in args:
self.fp.write(arg)
def to_stdout(*args):
for arg in args:
sys.stdout.write(arg)
def colorfmt(bg, value, x, y):
if Demo.mode == Demo.MODE_TRUECOLOR:
return f'{3+bg}8;2;{value>>16&0xff};{value>>8&0xff};{value&0xff}'
elif Demo.mode == Demo.MODE_256_DITHER:
rgb = parse_555(value)
rgb = tuple(dither256(v, (x, y)) for v in rgb)
value = make_256(rgb)
return f'{3+bg}8;5;{value}'
elif Demo.mode == Demo.MODE_256_FLAT:
value = make_256(parse_555(value))
return f'{3+bg}8;5;{value}'
elif Demo.mode == Demo.MODE_16_DITHER:
rgb = parse_555(value)
rgb = tuple(dither16(v, (x, y)) for v in rgb)
closest = lut_256_to_16[make_256(rgb)]
if closest > 7:
return f'{9+bg}{closest-8}'
else:
return f'{3+bg}{closest}'
elif Demo.mode == Demo.MODE_16_FLAT:
rgb = parse_555(value)
closest = lut_256_to_16[make_256(rgb)]
if closest > 7:
return f'{9+bg}{closest-8}'
else:
return f'{3+bg}{closest}'
else:
raise ValueError(Demo.mode)
def render(out):
out('\033[H\033[0m')
current_fore_color = 15
# Fix for xterm black-on-white
out(f'\033[38;5;15m')
current_back_color = 0
# Force the background color too
out(f'\033[48;5;0m')
for y in range(int(h/2)-(1-h%2)):
for x in range(w):
if Demo.hires:
upper_color = Demo.buffer[(y*2+0)*w+x]
lower_color = Demo.buffer[(y*2+1)*w+x]
elif Demo.antialias:
c0 = getpixel((x, y*2+0))
c1 = getpixel((x, y*2+1))
color = (int((c0[0]+c1[0])/2),
int((c0[1]+c1[1])/2),
int((c0[2]+c1[2])/2))
upper_color = lower_color = make_555(color)
else:
upper_color = lower_color = Demo.buffer[(y*2)*w+x]
if upper_color != lower_color:
upper_color = colorfmt(1, upper_color, x, y*2)
lower_color = colorfmt(0, lower_color, x, y*2+1)
else:
upper_color = colorfmt(1, upper_color, x, y*2)
lower_color = colorfmt(0, lower_color, x, y*2)
ch = Demo.textbuffer[y*w+x]
if Demo.hires and ch != ' ':
# Fix for text_small in hires mode
lower_color = upper_color
if current_fore_color != 15:
current_fore_color = 15
# Fix for xterm black-on-white
out(f'\033[38;5;15m')
if upper_color == lower_color == 0:
if current_back_color != 0:
out(f'\033[{upper_color}m')
current_back_color = 0
out(ch)
elif upper_color == lower_color:
if current_back_color != upper_color:
out(f'\033[{upper_color}m')
current_back_color = upper_color
out(ch)
else:
if current_back_color != upper_color:
out(f'\033[{upper_color}m')
current_back_color = upper_color
if current_fore_color != lower_color:
out(f'\033[{lower_color}m')
current_fore_color = lower_color
out('▄')
if y < h-1:
out('\r\n')
out(f'\033[0m')
sys.stdout.flush()
def text_big(text, pos, rgb, scale=(1, 1), rotate=0):
ww, hh, pixels = pixelfont(text)
cx = pos[0] + ww * scale[0] / 2
cy = pos[1] + hh * scale[1] / 2
sr = math.sin(rotate)
cr = math.cos(rotate)
for y in range(int(hh*scale[1])):
scry = pos[1] + y
for x in range(int(ww*scale[0])):
if pixels[int(y/scale[1])*ww+int(x/scale[0])]:
scrx = pos[0] + x
if rotate:
lx = scrx - cx
ly = scry - cy
lx, ly = lx * cr - ly * sr, lx * sr + ly * cr
lx += cx
ly += cy
putpixel((lx, ly), rgb)
else:
putpixel((scrx, scry), rgb)
def text_small(text, pos):
x, y = pos
for i, c in enumerate(text):
Demo.textbuffer[y*w+x+i] = c
def dark_rectangle(x, y, w, h, darken=0.5):
# Fake "transparent black" background rectangle
for yy in range(y, y+h+2):
for xx in range(x, x+w):
c = getpixel((xx, yy))
c = (c[0]*darken, c[1]*darken, c[2]*darken)
putpixel((xx, yy), c)
def rectangle(x, y, w, h, color):
for yy in range(y, y+h):
for xx in range(x, x+w):
putpixel((xx, yy), color)
def yields_frames(func):
generator = func()
def func(j):
next(generator)
return func
class LinesPoint:
def __init__(self):
self.pos = Vec2(random.randint(0, w), random.randint(0, h))
self.vel = Vec2(random.uniform(0.1, 5), random.uniform(0.1, 5))
self.color = (random.randint(10, 255),
random.randint(10, 255),
random.randint(10, 255))
def update(self):
self.pos += self.vel
if self.pos.x > w:
self.vel.x *= -0.9
self.pos.x = w
elif self.pos.x < 0:
self.vel.x *= -0.9
self.pos.x = 0
if self.pos.y > h:
self.vel.y *= -0.9
self.pos.y = h
elif self.pos.y < 0:
self.vel.y *= -0.9
self.pos.y = 0
@yields_frames
def lines_demo():
points = [LinesPoint() for _ in range(20)]
y = 0
while True:
for a, b in zip(points[1:], points):
line((a.pos.x, a.pos.y), (b.pos.x, b.pos.y), a.color, b.color)
for point in points:
point.update()
y += 1
yield
def years_coroutine():
frames_per_line = 10
frames_afterglow = 105
shaders = [
('Lines', 'It can draw lines.', lines_demo),
('Circles', 'And circles, too!', circles_demo),
('Triangles', 'Colored and smooth-shaded.', bouncing_triangles),
('Pixels', 'Render whatever you want.', rain),
]
for title, description, background in shaders:
year_text = [description]
for year_lines in range(len(year_text)+1):
for line_frame in range(frames_per_line + (frames_afterglow if year_lines == (len(year_text)) else 0)):
frames_this_page = (year_lines * frames_per_line + line_frame)
page_intro_ratio = frames_this_page / frames_per_line / 3 if year_lines <= 3 else 1
yoff = min(0, -20*(1-easing_bounce(page_intro_ratio)))
yoff = int(yoff)
text_big(title, (1, yoff + 1), (0, 0, 0), (1, 2))
text_big(title, (2, yoff + 1), (255, 255, 255), (1, 2))
dark_rectangle(1, 20, max(len(line) for line in year_text)+4, 2*len(year_text)+2, 0.3)
for y, line in enumerate(year_text[:year_lines+1]):
if y == year_lines-1:
exposed = int(line_frame/2)
line = line.split()
fixed = line[:exposed]
shuffled = line[exposed:]
random.shuffle(shuffled)
line = ' '.join(fixed + shuffled)
elif y == year_lines:
line = line.split()
random.shuffle(line)
line = ' '.join(line)
text_small(line, (3, 11+y))
yield background
yield lightning
class Pos:
def __init__(self, x, y):
self.x = x
self.y = y
class RGB:
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def __iter__(self):
return iter((self.r, self.g, self.b))
def __mul__(self, f):
return RGB(self.r * f, self.g * f, self.b * f)
def __add__(self, other):
return RGB(self.r + other.r, self.g + other.g, self.b + other.b)
def __sub__(self, other):
return RGB(self.r - other.r, self.g - other.g, self.b - other.b)
def __truediv__(self, f):
return RGB(self.r / f, self.g / f, self.b / f)
class Vertex:
def __init__(self, x, y, color=None):
self.pos = Pos(int(x), int(y))
self.color = color or RGB(255, 255, 255)
def __add__(self, other):
return Vertex(self.pos.x + other.pos.x, self.pos.y + other.pos.y, self.color)
def rotate(self, j):
s = math.sin(j/180*math.pi)
c = math.cos(j/180*math.pi)
x = self.pos.x
y = self.pos.y
return Vertex(x*c-y*s, x*s+y*c)
def recolor(self, color):
res = Vertex(self.pos.x, self.pos.y)
res.color = color
return res
class Bounce:
def __init__(self, **kwargs):
self.x = int(w/2)
self.y = int(h)
self.dx = 2
self.dy = 0.5
self.rot = 1
self.drot = 0
self.rotup = 10
self.hued = 0.05
self.size = 20
self.__dict__.update(kwargs)
def update(self):
self.x += self.dx
if self.x > w or self.x < 0:
self.dx *= -1
self.drot += self.rotup
self.y += self.dy
if self.y > h or self.y < 0:
self.dy *= -1
self.drot += self.rotup
self.rot += self.drot
self.drot *= 0.95
hh, l, s = colorsys.rgb_to_hls(*(self.color / 255))
self.color = RGB(*colorsys.hls_to_rgb(hh+0.001, l, s)) * 255
def triangle(self):
hls = colorsys.rgb_to_hls(self.color.r/255, self.color.g/255, self.color.b/255)
color1 = RGB(*colorsys.hls_to_rgb((hls[0]+self.hued)%1, hls[1], hls[2])) * 255
color2 = RGB(*colorsys.hls_to_rgb((hls[0]+2*self.hued)%1, hls[1], hls[2])) * 255
center = Vertex(int(self.x), int(self.y))
return [center.recolor(self.color) + Vertex(self.size, 0).rotate(self.rot),
center.recolor(color1) + Vertex(self.size, 0).rotate(self.rot+120),
center.recolor(color2) + Vertex(self.size, 0).rotate(self.rot+240)]
@yields_frames
def bouncing_triangles():
bounces = [
Bounce(color=RGB(255, 0, 0)),
Bounce(dx=-.9, dy=-1, color=RGB(0, 255, 0)),
Bounce(dx=.9, dy=1, color=RGB(0, 0, 255)),
Bounce(dx=-1.1, dy=-1.1, color=RGB(0, 255, 255)),
Bounce(dx=1.1, dy=1.1, color=RGB(255, 0, 255)),
Bounce(dx=-1.2, dy=-1.2, color=RGB(255, 255, 0)),
]
bounces = []
for i in range(1, 16):
if i & 7 in (0, 7):
continue
bounces.append(Bounce(
dx=(1.9+0.1*i)*math.sin(i*30*180/math.pi),
dy=(1.9+0.1*i)*math.cos(i*30*180/math.pi),
size=20-i,
color=RGB(255 if (i & 1) else 0, 255 if (i & 2) else 0, 255 if (i & 4) else 0)))
while True:
for bounce in bounces:
bounce.update()
triangle(bounce.triangle())
yield
class Vec2:
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def __add__(self, other):
return Vec2(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Vec2(self.x - other.x, self.y - other.y)
def __mul__(self, scalar):
return Vec2(self.x * scalar, self.y * scalar)
def __div__(self, scalar):
return Vec2(self.x / scalar, self.y / scalar)
def __truediv__(self, scalar):
return Vec2(self.x / scalar, self.y / scalar)
def length(self):
return math.sqrt(self.x**2 + self.y**2)
def __eq__(self, other):
return (self - other).length() < .0001
def normalize(self):
l = self.length()
if l == 0:
return Vec2(0, 0)
return self / l
@yields_frames
def circles_demo():
i = 0
while True:
for y in range(0, h, 4):
x = (10+i+y*8)%(w+10)-5
circle((x, y), 8 + abs(8*math.sin(i*0.1)), (255, x*255/w, y*255/h))
i += 1
yield
def view_image(filename):
from PIL import Image
if isinstance(filename, Image.Image):
im = filename
else:
im = Image.open(filename)
ww, hh = im.size
ox = (w - ww) / 2
oy = (h - hh) / 2
px = im.load()
for y in range(hh):
for x in range(ww):
putpixel((ox+x, oy+y), px[x, y][:3])
del px
@yields_frames
def rain():
a = [0]*(w*h)
b = [0]*(w*h)
def sample(dx, dy):
xx = x + dx
yy = y + dy
if xx < 0 or xx >= w or yy < 0 or yy >= h:
return a[y*w+x]
return a[yy*w+xx]
j = 0
while True:
a[random.randint(0, len(a)-1)] += 1000
for y in range(h):
for x in range(w):
b[y*w+x] = (sample(0, -1) +
sample(0, +1) +
sample(0, 0) * 0.5 +
sample(-1, 0) +
sample(+1, 0)) / 2
c = b[y*w+x]
putpixel((x, y), (0, min(c*4, 255), min(c*3, 255)))
b[y*w+x] *= 0.44
a, b = b, a
j += 1
yield
@yields_frames
def lightning():
class Bolt:
def __init__(self):
self.x0 = random.randint(0, w)
self.x1 = max(0, min(w, self.x0 + random.randint(-20, +20)))
self.dx0 = random.uniform(-0.5, -0.2) if self.x0 > w/2 else \
random.uniform(0.2, 0.5)
self.y0 = 0
self.y1 = h
self.steps = 8
self.lifetime = 0
self.alpha_start =0
def update(self):
self.x0 += self.dx0
self.lifetime += 1
def branch(self):
bolt = Bolt()
where = random.uniform(0, 1)
bolt.y0 = self.y0 * (1 - where) + self.y1 * where
bolt.x0 = self.x0 * (1 - where) + self.x1 * where
bolt.x1 = bolt.x0 + (self.x1 - self.x0)
bolt.y1 = bolt.y0 + random.randint(10, 40)
bolt.dx0 = self.dx0 * (1-where)
bolt.alpha_start = where
return bolt
bolts = [Bolt()]
def gencolor(blend):
r = random.randint(0, 250)
return (r*blend/5,
random.randint(0, r)*blend/5,
random.randint(200, 255)*blend/5)
j = 0
while True:
for bolt in list(bolts):
if (bolt.lifetime) > 30:
bolts.remove(bolt)
if j % 8 == 0:
bolt = Bolt()
bolts.append(bolt)
bolts.append(bolt.branch())
for bolt in bolts:
bolt.update()
last = Vec2(bolt.x0, bolt.y0)
lastcolor = gencolor(bolt.lifetime)
for step in range(bolt.steps):
alpha = (step+1) / bolt.steps
alpha = bolt.alpha_start + alpha * (1 - bolt.alpha_start)
pos = Vec2(lerp(bolt.x0, bolt.x1, alpha) +
random.randint(-3, +3),
lerp(bolt.y0, bolt.y1, alpha))
r = random.randint(0, 150)
color = gencolor(bolt.lifetime * (1-alpha))
line((last.x, last.y), (pos.x, pos.y), lastcolor, color)
last = pos
lastcolor = color
j += 1
yield
class Vec3(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __iter__(self):
return iter((self.x, self.y, self.z))
def __mul__(self, f):
if isinstance(f, Vec3):
return Vec3(self.x * f.x, self.y * f.y, self.z * f.z)
else:
return Vec3(self.x * f, self.y * f, self.z * f)
__rmul__ = __mul__
def __neg__(self):
return self * -1
def __add__(self, other):
return Vec3(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return Vec3(self.x - other.x, self.y - other.y, self.z - other.z)
def dot(self, other):
return self.x * other.x + self.y * other.y + self.z * other.z
def cross(self, other):
return Vec3(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def length(self):
return math.sqrt(self.length_squared())
def length_squared(self):
return self.dot(self)
def normalized(self):
return self / self.length()
def __truediv__(self, f):
return Vec3(self.x / f, self.y / f, self.z / f)
class Matrix4x4(object):
def __init__(self, m=None):
self.side = 4
if m:
self.matrix = m
else:
self.matrix = [1 if x == y else 0 for y in range(self.side) for x in range(self.side)]
def __mul__(self, other):
a = self.matrix
b = other.matrix
return Matrix4x4([
a[0] * b[0] + a[1] * b[4] + a[2] * b[8] + a[3] * b[12],
a[0] * b[1] + a[1] * b[5] + a[2] * b[9] + a[3] * b[13],
a[0] * b[2] + a[1] * b[6] + a[2] * b[10] + a[3] * b[14],
a[0] * b[3] + a[1] * b[7] + a[2] * b[11] + a[3] * b[15],
a[4] * b[0] + a[5] * b[4] + a[6] * b[8] + a[7] * b[12],
a[4] * b[1] + a[5] * b[5] + a[6] * b[9] + a[7] * b[13],
a[4] * b[2] + a[5] * b[6] + a[6] * b[10] + a[7] * b[14],
a[4] * b[3] + a[5] * b[7] + a[6] * b[11] + a[7] * b[15],
a[8] * b[0] + a[9] * b[4] + a[10] * b[8] + a[11] * b[12],
a[8] * b[1] + a[9] * b[5] + a[10] * b[9] + a[11] * b[13],
a[8] * b[2] + a[9] * b[6] + a[10] * b[10] + a[11] * b[14],
a[8] * b[3] + a[9] * b[7] + a[10] * b[11] + a[11] * b[15],
a[12] * b[0] + a[13] * b[4] + a[14] * b[8] + a[15] * b[12],
a[12] * b[1] + a[13] * b[5] + a[14] * b[9] + a[15] * b[13],
a[12] * b[2] + a[13] * b[6] + a[14] * b[10] + a[15] * b[14],
a[12] * b[3] + a[13] * b[7] + a[14] * b[11] + a[15] * b[15],
])
__rmul__ = __mul__
def map_vec3(self, v3):
p = (v3.x, v3.y, v3.z, 1.)
p = [sum(p[row] * self.matrix[i * 4 + row] for row in range(4)) for i, v in enumerate(p)]
return Vec3(p[0] / p[3], p[1] / p[3], p[2] / p[3])
@staticmethod
def translation(x, y, z):
return Matrix4x4([1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, z, 0, 0, 0, 1])
@staticmethod
def rotation(angle, x, y, z):
x, y, z = Vec3(x, y, z).normalized()
c = math.cos(angle / 180 * math.pi)
s = math.sin(angle / 180 * math.pi)
return Matrix4x4([
x * x * (1 - c) + 1 * c, x * y * (1 - c) - z * s, x * z * (1 - c) + y * s, 0,
y * x * (1 - c) + z * s, y * y * (1 - c) + 1 * c, y * z * (1 - c) - x * s, 0,
x * z * (1 - c) - y * s, y * z * (1 - c) + x * s, z * z * (1 - c) + 1 * c, 0,
0, 0, 0, 1,
])
@classmethod
def perspective(cls, fovy, aspect, zNear, zFar):
f = math.cos(fovy / 2) / math.sin(fovy / 2)
return cls([f / aspect, 0, 0, 0, 0, f, 0, 0, 0, 0, (zFar + zNear) /
(zNear - zFar), (2 * zFar * zNear) / (zNear - zFar), 0, 0, -1, 0])
def easing_bounce(p):
if p < 4./11.:
return (121 * p * p)/16.0
elif p < 8./11.:
return (363/40.0 * p * p) - (99/10.0 * p) + 17/5.0
elif p < 9/10.:
return (4356/361.0 * p * p) - (35442/1805.0 * p) + 16061/1805.0
else:
return (54/5.0 * p * p) - (513/25.0 * p) + 268/25.0
@yields_frames
def cube():
front = [
# front
Vec3(-10, +10, +10),
Vec3(-10, -10, +10),
Vec3(+10, +10, +10),
Vec3(+10, -10, +10),
# back
Vec3(-10, +10, -10),
Vec3(-10, -10, -10),
Vec3(+10, +10, -10),
Vec3(+10, -10, -10),
# left
Vec3(-10, +10, -10),
Vec3(-10, -10, -10),
Vec3(-10, +10, +10),
Vec3(-10, -10, +10),
# right
Vec3(+10, +10, -10),
Vec3(+10, -10, -10),
Vec3(+10, +10, +10),
Vec3(+10, -10, +10),
# top
Vec3(+10, +10, -10),
Vec3(-10, +10, -10),
Vec3(+10, +10, +10),
Vec3(-10, +10, +10),
# bottom
Vec3(+10, -10, -10),
Vec3(-10, -10, -10),
Vec3(+10, -10, +10),
Vec3(-10, -10, +10),
]
colors = [tuple(int(255*x) for x in colorsys.hls_to_rgb(0.05+0.6*i/6, 0.5, 0.9)) for i in range(6)]
tris = [
# front
((0, 1, 2), colors[0]),
((2, 1, 3), colors[0]),
# back
((4, 6, 5), colors[1]),
((6, 7, 5), colors[1]),
# left
((8, 9, 10), colors[2]),
((10, 9, 11), colors[2]),
# right
((12, 14, 13), colors[3]),
((14, 15, 13), colors[3]),
# top
((16, 17, 18), colors[4]),
((18, 17, 19), colors[4]),
# bottom
((20, 22, 21), colors[5]),
((22, 23, 21), colors[5]),
]
def clipspace2screenspace(v):
return Vec2(v.x*fx+w/2, v.y*fy+h/2)
def clipspace2screenspace_cube(v):
pos = clipspace2screenspace(v)
pos.y += 30*(easing_bounce(min(1, (j-20)/30))-1)
return (pos.x, pos.y)
def rnz():
return random.uniform(0.01, 1)*random.choice([-1, 1])
stars = [Vec3(rnz(), rnz(), rnz()).normalized() * 40 for _ in range(90)]
j = 0
while True:
tm = 0.05 * j
s = math.sin(tm)
c = math.cos(tm)
delayed = max(0, min(1, (j-30)/50))
axis = Vec3(s*delayed, c*delayed, s*c*delayed if delayed != 0 else 1).normalized()
factor = 1.5+(0.5+0.5*s)*min(1, j/60)
fx = w*factor
fy = h*factor
p = Matrix4x4.perspective(116/180*math.pi, 16/10, 0.01, 100)
m = Matrix4x4.rotation(j*4*delayed, axis.x, axis.y, axis.z)
t = Matrix4x4.translation(0, 0, -50)
rotated = [p.map_vec3(t.map_vec3(m.map_vec3(v))) for v in front]
stars_mapped = [p.map_vec3(t.map_vec3(m.map_vec3(s))) for s in stars]
for star in stars_mapped:
pos = clipspace2screenspace(star)
putpixel((pos.x, pos.y), (128, 128, 128))
idx = 0
for (tri, color) in tris:
a, b, c = [rotated[idx] for idx in tri]
normal = (b - a).cross(c - a)
if idx % 2 == 0:
n = normal
# https://stackoverflow.com/a/9120171/1047040
if normal.z > 0:
triangle([Vertex(*clipspace2screenspace_cube(v), RGB(*color)*(0.2+n.z*14))
for v in [a, b, c]])
idx += 1
j += 1
yield
def come_from_center_coroutine(lines):
sc = (60, 60, 60)
sh = h
th = 8*2
for j in range(80):
ts = min(1, j/10)
ts = ts
y0 = (sh-((th)*len(lines)*ts))/2 - 1
for idx, line in enumerate(lines):
tw = len(line)*8
y = (sh-(th*ts)) / 2 * (1 - ts) + (y0+th*idx) * ts
y += int(9 * math.sin(ts*math.pi))
x = (w-tw*ts)/2-1
text_big(line, (x+1-2*(idx%2), y), sc, (ts, ts*2))
text_big(line, (x, y), (255, 255, 255), (ts, ts*2))
yield
def fade_to_black_coroutine(frames):
for i in range(frames):
for y in range(h):
for x in range(w):
c = getpixel((x, y))
darken = 1 - i/frames
c = (c[0]*darken, c[1]*darken, c[2]*darken)
putpixel((x, y), c)
yield
@contextlib.contextmanager
def no_cursor():
try:
to_stdout('\033[2J', '\033[?25l')
yield
finally:
to_stdout('\033[2J\033[H\033[0m', '\033[?25h')
def run_demo(out):
Demo.mode = Demo.MODE_256_DITHER
background_shader = cube
overlays = [
come_from_center_coroutine(['thp.io', 'presents']),
come_from_center_coroutine(['ttyfb 0.1', 'preview', 'for python']),
years_coroutine(),
come_from_center_coroutine(['create', 'something', 'awesome!']),
fade_to_black_coroutine(60),
]
overlay = overlays.pop(0)
with no_cursor():
j = 0
while True:
started_time = time.time()
clear()
background_shader(j)
try:
background_shader = next(overlay) or background_shader
except StopIteration:
if not overlays:
break
overlay = overlays.pop(0)
render(out)
j += 1
time.sleep(max(0, 0.04-(time.time() - started_time)))
if __name__ == '__main__':
try:
run_demo(to_stdout)
except KeyboardInterrupt:
...
| thp/ttyfb | ttyfb.py | ttyfb.py | py | 36,132 | python | en | code | 3 | github-code | 13 |
33496665511 |
myDict = {
"laptop" : "An electronic machine",
"parth " : "A simple boy",
"number" : [1,3,5],
"anotherDict" : {"Parth": "Coder"}
}
print(myDict["Laptop"])
print(myDict["Number"])
# It's an example of nested key
# Dictionary - Key:Value
print(myDict["anotherDict"]["Parth"])
| parthvashishtha/Python | Py.learning_files/Dictionary_syntax.py | Dictionary_syntax.py | py | 292 | python | en | code | 0 | github-code | 13 |
3634755040 | # Sort Words in Alphabatical Order
s = 'Hello World'
new_s = ''
word_list = s.split(' ') # ['Hello', 'World']
for word in word_list:
#lowercase_word = word.lower()
#sorted_word = "".join(sorted(lowercase_word))
#new_s = new_s + sorted_word + " "
new_s = new_s + "".join(sorted(word.lower())) + " "
new_s = new_s.rstrip()
print(new_s) | ashish-kumar-hit/python-qt | python/python-basics-100/String 2.7.py | String 2.7.py | py | 350 | python | en | code | 0 | github-code | 13 |
35160618917 | from datetime import datetime
from logging import debug
import logging
from model.ImagePost import ImagePost
from model.scoring import compute_score
from model.scoring import get_time_penalty
from persistence.Database import Database
from persistence.ImageStore import ImageStore
from scraper.integration import get_all_ylyl_image_posts, FILE_BASE_URL
NUMBER_OF_WINNERS = 9
class Scraper:
def __init__(self, db: Database, dl_folder: ImageStore):
self.db = db
self.dl_folder = dl_folder
def main(self):
db = self.db
blacklist = db.get_blacklist()
posts = get_all_ylyl_image_posts()
clean_blacklist(db, blacklist, posts.keys())
existing_winners = db.get_grid_items()
refreshed_winners = refresh_winners(existing_winners, posts)
new_winners = update_winners(posts.values(), refreshed_winners, blacklist)
clean_download_folder(self.dl_folder, new_winners)
download_files(self.dl_folder, new_winners, blacklist)
db.save_grid_items(new_winners)
print_status(new_winners)
def refresh_winners(existing_winners: list[ImagePost], posts: dict[int, ImagePost]):
return [refresh(winner, posts) for winner in existing_winners]
def refresh(winner, posts):
return posts[winner.id] if winner.id in posts else winner
def get_top_candidates(n, posts: list[ImagePost]) -> list[tuple[ImagePost, int]]:
posts_with_scores = [(post, compute_score(post)) for post in posts]
top = sorted(posts_with_scores, key=lambda post: post[1], reverse=True)
top = top[:n]
top.reverse()
return top
def clean_blacklist(db: Database, blacklist: list[int], posts: list[int]):
timed_out = [p for p in blacklist if p not in posts]
for p in timed_out:
db.remove_from_blacklist(p)
def update_winners(posts: list[ImagePost], winners: list[ImagePost], blacklist: list[int]) -> list[ImagePost]:
to_exclude = blacklist + [post.id for post in winners]
filtered_posts = list(filter(lambda post: post.id not in to_exclude, posts))
candidates = get_top_candidates(NUMBER_OF_WINNERS, filtered_posts)
current_winners = [(post, compute_score(post)) for post in winners]
current_winners = [(post, -1000000 if post.id in blacklist else score)for post, score in current_winners]
for candidate, score in candidates:
position, existing_score = lowest_score(current_winners)
if score > existing_score:
current_winners[position] = (candidate, score)
return [winner[0] for winner in current_winners]
def lowest_score(posts: list[tuple[ImagePost, int]]):
lowest = (0, 1000000)
for position in range(0, len(posts)):
score = posts[position][1]
if score < lowest[1]:
lowest = (position, score)
return lowest[0], lowest[1]
def download_files(dl_folder: ImageStore, winners: list[ImagePost], blacklist: list[int]):
for post in winners:
if post.id not in blacklist:
try:
dl_folder.download_file(FILE_BASE_URL + post.image, post.image)
dl_folder.download_file(FILE_BASE_URL + post.thumb, post.thumb)
except Exception as e:
print("Failed to download files: " + str(e.reason))
return
def clean_download_folder(dl_folder: ImageStore, winners: list[ImagePost]):
to_keep = [post.image for post in winners] + [post.thumb for post in winners]
dl_folder.clean(to_keep)
def print_status(winners: list[ImagePost]):
debug("Status:")
for position in range(0, len(winners)):
winner = winners[position]
debug("Position " + str(position) +
" --- id: " + str(winner.id) +
", score: " + str(compute_score(winner)) +
", last seen: " + str(round(datetime.now().timestamp() - winner.last_seen.timestamp())) + "s ago" +
", time penalty: " + str(get_time_penalty(winner)))
| how2die/chan-backend | src/scraper/Scraper.py | Scraper.py | py | 3,941 | python | en | code | 0 | github-code | 13 |
22434578705 | from fastapi import APIRouter, HTTPException
from elasticsearch.exceptions import NotFoundError, ConnectionError
from typing import Optional
from app.connections import es, test_logger
import app.routers.envLog as envLog
router = APIRouter(
tags=["search"]
)
@router.get("/search_cv")
def read_item(q: Optional[str] = None, contactInfoOnly: bool = False):
srouceExcluseList = "info" if contactInfoOnly else ""
try:
test_logger.info('Search executed : ' + str(q))
if q:
logs = es.search(index="cv_search", query={
"match": {"info": q}}, _source_excludes=srouceExcluseList)
else:
logs = es.search(index="cv_search", query={
"match_all": {}}, _source_excludes=srouceExcluseList)
return logs['hits']['hits']
except NotFoundError:
return []
except ConnectionError:
envLog.logFunction("error", 'Tried to reach "/search_cv", status : 500 - Internal Server Error (Cant reach ES instance)')
raise HTTPException(status_code=500, detail="Internal Server Error")
| AlessandroRinaudo/elastic-search-project | app/routers/search.py | search.py | py | 1,114 | python | en | code | 0 | github-code | 13 |
72055487377 | def cyclic_sort(nums):
i = 0
size = len(nums)
while i < size:
if nums[i] != i+1 and nums[i] != nums[nums[i] - 1]:
swap = nums[i]
nums[i] = nums[swap - 1]
nums[swap - 1] = swap
else:
i += 1
return nums
def find_duplicate(nums):
arr = cyclic_sort(nums)
size = len(arr)
duplicates= []
for i in range(size):
if arr[i] != i+1 and arr[i] <= size and arr[i] not in duplicates:
duplicates.append(arr[i])
return duplicates
# print(find_duplicate([1, 4, 4, 3, 2]))
# print(find_duplicate([2, 1, 3, 3, 5, 4]))
# print(find_duplicate([2, 4, 1, 4, 4]))
def cyclic_sort_and_duplicates(nums):
i = 0
size = len(nums)
duplicates = []
while i < size:
if nums[i] != i+1 and nums[i] != nums[nums[i] - 1]:
swap = nums[i]
nums[i] = nums[swap - 1]
nums[swap - 1] = swap
else:
i += 1
conditions1 = nums[i - 1] != i and nums[i - 1] == nums[nums[i - 1] - 1] and nums[i - 1] not in duplicates
if conditions1:
duplicates.append(nums[i - 1])
return duplicates
print(cyclic_sort_and_duplicates([1, 4, 4, 3, 2]))
print(cyclic_sort_and_duplicates([2, 1, 3, 3, 5, 4]))
print(cyclic_sort_and_duplicates([2,1]))
| Abelatnafu/educativeio | pattern_cyclic_sort/find_the_duplicate_number.py | find_the_duplicate_number.py | py | 1,329 | python | en | code | 0 | github-code | 13 |
15918621202 | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from copy import deepcopy
winStates = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 4, 7], [2, 5, 8], [3, 6, 9], [1, 5, 9], [3, 5, 7]]
player1 = True
player2 = False
class node :
def __init__(self, statex, stateo, empty, newstep):
self.xstate = statex
self.ostate = stateo
self.empty = empty
self.newstep = newstep
self.Children = []
self.similar_states = []
self.Huristic = self.getHuristic(self.newstep)
def Equal(self,node):
if set(self.xstate) == set(node.xstate)and set(self.ostate) == set(node.ostate):
return True
return False
def getHuristic(self,newstep):
H = 0
global winStates
if not len(self.empty) % 2 == 0:
xlist = self.xstate
olist = self.ostate
else:
olist = self.xstate
xlist = self.ostate
if len(self.empty) <= 5:
for L in winStates:
if L[0] in olist and (L[1] in olist) and (L[2] in olist):
H = 999
return H
for L in winStates:
if ((L[0] in xlist) and (L[1] in xlist)) and (L[2] == newstep):
H = 99
return H
elif ((L[0] in xlist) and (L[2]in xlist)) and (L[1] == newstep):
H = 99
return H
elif ((L[1] in xlist) and (L[2] in xlist)) and (L[0] == newstep):
H = 99
return H
for L in winStates:
if (L[0] in olist) or (L[1] in olist) or (L[2] in olist):
if (L[0] not in xlist) and (L[1] not in xlist)and (L[2] not in xlist):
H = H+1
return H
def Build(self):
if len(self.empty) % 2 == 0:
for o in self.empty:
E = list(self.empty)
oS = list(self.ostate)
E.pop(E.index(o))
oS.append(o)
child = node(self.xstate, oS, E, o)
self.add_child(child)
else:
for x in self.empty:
E = list(self.empty)
xS = list(self.xstate)
E.pop(E.index(x))
xS.append(x)
child = node(xS, self.ostate, E, x)
child.newstep = x
self.add_child(child)
def add_child(self, Child):
if not len(self.Children):
self.Children.append(Child)
else:
if not self.chick_repeatation(Child):
self.Children.append(Child)
def chick_repeatation(self, Child):
Child1 = deepcopy(Child)
for x in range(len(self.Children)):
if Child1.Equal(self.Children[x]):
print("right")
self.Children[x].similar_states.append(Child)
return True
else:
s = True
while s:
if Child1.Equal(self.Children[x]) or Child1.reflect().Equal(self.Children[x]) :
self.Children[x].similar_states.append(Child)
return True
Child1 = Child1.rotate()
if Child1.Equal(Child):
s = False
return False
def reflect(self):
self1 = deepcopy(self)
reflection_right_list = [3, 2, 1, 6, 5, 4, 9, 8, 7]
for x in range(len(self1.xstate)):
self1.xstate[x] = reflection_right_list[self1.xstate[x]-1]
for O in range(len(self1.ostate)):
self1.ostate[O] = reflection_right_list[self1.ostate[O]-1]
for e in range(len(self1.empty)):
self1.empty[e] = reflection_right_list[self1.empty[e]-1]
return self1
def rotate(self):
copy = deepcopy(self)
rotatelist = [7, 4, 1, 8, 5, 2, 9, 6, 3]
for x in range(len(copy.xstate)):
copy.xstate[x] = rotatelist[copy.xstate[x]-1]
for O in range(len(copy.ostate)):
copy.ostate[O] = rotatelist[copy.ostate[O]-1]
for e in range(len(copy.empty)):
copy.empty[e] = rotatelist[copy.empty[e]-1]
return copy
def play(self):
if len(self.Children):
max = deepcopy(self.Children[0])
for x in range(len(self.Children)):
if max.Huristic <= self.Children[x].Huristic:
max = deepcopy(self.Children[x])
return list([max.newstep, max])
xlist = []
olist = []
empty = [1, 2, 3, 4, 5, 6, 7, 8, 9]
CurrentGUIState = node(xlist, olist, empty, 0)
root = Tk()
root.title("TicTacToy")
style = ttk.Style()
style.theme_use('classic')
def ChickWinning(State):
if player1:
for l in winStates:
if (l[0] in State.xstate) and (l[1] in State.xstate) and (l[2] in State.xstate):
return True
elif player2:
for l in winStates:
if (l[0] in State.ostate) and(l[1] in State.ostate) and (l[2] in State.ostate):
return True
return False
def switchstate():
global player1
global player2
k = player1
player1 = player2
player2 = k
def X_O(location, value):
if location == 1:
but1.config(text=value, state="disabled")
elif location == 2:
but2.config(text=value, state="disabled")
elif location == 3:
but3.config(text=value, state="disabled")
elif location == 4:
but4.config(text=value, state="disabled")
elif location == 5:
but5.config(text=value, state="disabled")
elif location == 6:
but6.config(text=value, state="disabled")
elif location == 7:
but7.config(text=value, state="disabled")
elif location == 8:
but8.config(text=value, state="disabled")
else:
but9.config(text=value, state="disabled")
def let_player2_play():
global CurrentGUIState
laststate = deepcopy(CurrentGUIState)
c = CurrentGUIState.play()
newlocation = c[0]
CurrentGUIState = c[1]
X_O(newlocation, "O")
if not ChickWinning(CurrentGUIState):
switchstate()
else:
messagebox.showinfo(title="congratulations", message="you Lose")
for i in CurrentGUIState.empty:
X_O(i, " ")
def onclick(location):
CurrentGUIState.xstate.append(CurrentGUIState.empty.pop(CurrentGUIState.empty.index(location)))
CurrentGUIState.Build()
if player1:
X_O(location, "X")
if not ChickWinning(CurrentGUIState):
if len(CurrentGUIState.empty):
switchstate()
let_player2_play()
else:
messagebox.showinfo(title="VOid", message="there is No Winner")
else:
messagebox.showinfo(title="congratulations", message="winner winner")
for i in CurrentGUIState.empty:
X_O(i, " ")
but1 = ttk.Button(root, text=' ', command=lambda: onclick(1))
but1.grid(row=0, column=0, sticky='snew', ipadx=40, ipady=40)
but2 = ttk.Button(root, text=' ', command=lambda: onclick(2))
but2.grid(row=0, column=1, sticky='snew', ipadx=40, ipady=40)
but3 = ttk.Button(root, text=' ', command=lambda: onclick(3))
but3.grid(row=0, column=2, sticky='snew', ipadx=40, ipady=40)
but4 = ttk.Button(root, text=' ', command=lambda: onclick(4))
but4.grid(row=1, column=0, sticky='snew', ipadx=40, ipady=40)
but5 = ttk.Button(root, text=' ', command=lambda: onclick(5))
but5.grid(row=1, column=1, sticky='snew', ipadx=40, ipady=40)
but6 = ttk.Button(root, text=' ', command=lambda: onclick(6))
but6.grid(row=1, column=2, sticky='snew', ipadx=40, ipady=40)
but7 = ttk.Button(root, text=' ', command=lambda: onclick(7))
but7.grid(row=2, column=0, sticky='snew', ipadx=40, ipady=40)
but8 = ttk.Button(root, text=' ', command=lambda: onclick(8))
but8.grid(row=2, column=1, sticky='snew', ipadx=40, ipady=40)
but9 = ttk.Button(root, text=' ', command=lambda: onclick(9))
but9.grid(row=2, column=2, sticky='snew', ipadx=40, ipady=40)
root.mainloop()
| anaas8/Tic-Tac-Toe | TicTacToe.py | TicTacToe.py | py | 8,461 | python | en | code | 0 | github-code | 13 |
42081671586 | def solution(s):
answer = 0
alpha = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
for i in range(len(alpha)):
if alpha[i] in s:
print(s.find(alpha[i]))
return answer
solution("oneoneone") | HotBody-SingleBungle/HBSB-ALGO | HB/pysrc/프로그래머스/레벨1/Day4/숫자_문자열과_영단어.py | 숫자_문자열과_영단어.py | py | 280 | python | en | code | 0 | github-code | 13 |
73473280017 | from rest_framework.routers import DefaultRouter
from django.urls import include, path
from .views import (UserViewSet, TagViewSet, IngredientViewSet,
RecipeViewSet, FavoriteRecipeView, ShoppingCartView,
download_shopping_cart)
router = DefaultRouter()
router.register('tags', TagViewSet)
router.register('ingredients', IngredientViewSet)
router.register('recipes', RecipeViewSet)
router.register('users', UserViewSet)
urlpatterns = [path('recipes/download_shopping_cart/', download_shopping_cart),
path('', include(router.urls)),
path('recipes/<int:recipe_id>/favorite/',
FavoriteRecipeView.as_view()),
path('recipes/<int:recipe_id>/shopping_cart/',
ShoppingCartView.as_view()),
]
| unnamestr/foodgram-project-react | backend/api/urls.py | urls.py | py | 826 | python | en | code | 0 | github-code | 13 |
23746416192 | class RuntimeConfig(object):
def __init__(self, argv):
from argparse import ArgumentParser
parser = ArgumentParser(
description = 'COVID19 Data Visualization')
parser.add_argument(
'-d', '--data-root',
help = 'The path to the root COVID19 data directory')
parser.add_argument(
'-p', '--port',
type = int,
default = 8090,
help = 'The port from which the application should be served')
parser.add_argument(
'--debug',
type = bool,
default = False,
help = 'Debug the application')
self.args = vars(parser.parse_args(argv[1:]))
| sabjohnso/monitoring | runtime_config.py | runtime_config.py | py | 721 | python | en | code | 0 | github-code | 13 |
3189756409 | import json
import jieba
input_pic = json.loads(open('build/all.json', 'r', encoding='utf-8').read())
tags = {}
def p_content(_pic):
tag = jieba.cut(_pic['p_content'])
for t in tag:
if tags.get(t):
if not _pic['PID'] in tags[t]:
tags[t].append(_pic['PID'])
else:
tags[t] = [_pic['PID']]
for v in input_pic['today']:
p_content(v)
for v in input_pic['sort_map']:
for pic in input_pic['archive'][v]:
p_content(pic)
with open('build/tags.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(tags, ensure_ascii=False))
f.close()
| gggxbbb/TuPics | tags.py | tags.py | py | 622 | python | en | code | 2 | github-code | 13 |
25542591251 | import json
import os
import re
from datetime import date, datetime
import requests
from django.core import serializers
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from dotenv import load_dotenv
from data2.datamanager import *
from data.dep_estadual import deputados_estaduais as dep_e
from data.dep_federal import deputados_federais as dep_f
from .models import Registrador
load_dotenv()
TOKEN = os.getenv('TOKEN')
def inserir_dado(usuario="", user_id="", data='', rep_dep="", is_writable="",locale_is="", tipo=""):
aposta = Registrador(nomeuser=usuario,user_ident=user_id, data=data, rep_dep=rep_dep, is_writable=is_writable,locale_is=locale_is,tipo=tipo)
aposta.save()
def todo_banco():
r = serializers.serialize("json", Registrador.objects.all())
rest = json.loads(r)
return rest
def buscar_id_user(idd_user):
r = serializers.serialize("json", Registrador.objects.filter(user_ident=idd_user))
rest = json.loads(r)
if len(rest) == 0:
return rest
else:
return rest[0]["fields"]
def edit_data(alvo, novo_valor, idd_user):
registro = Registrador.objects.get(user_ident=idd_user)
if alvo == 'nomeuser':
registro.nomeuser = novo_valor
elif alvo == 'user_id':
registro.user_ident = novo_valor
elif alvo == 'data':
registro.data = novo_valor
elif alvo == 'rep_dep':
registro.rep_dep = novo_valor
elif alvo == 'is_writable':
registro.is_writable = novo_valor
elif alvo == 'locale_is':
registro.locale_is = novo_valor
elif alvo == 'tipo':
registro.tipo = novo_valor
registro.save()
def remover_elm(idd_user):
registro = Registrador.objects.get(user_ident=idd_user).delete()
return registro
#---------------------------------#------------------------------#----------------------------#
def form_data(timestam):
datas = date.fromtimestamp(timestam)
dataFormatada = datas.strftime('%d/%m/%Y')
return dataFormatada
def get_message(text, chat_id):
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'
data = {'chat_id': chat_id, 'text': text}
response = requests.post(url, data=data)
# print("Resultado do meu Chat33333333333333")
# print(response.content)
def send_message(text, chat_id):
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'
data = {'chat_id': chat_id, 'text': text}
response = requests.post(url, data=data)
def send_message_ACESS(dep_cat, chat_id):
text = f'Informe o numero que representa o {dep_cat} o senhor acessora\n'
if dep_cat == "Federal":
for dep in dep_f:
text += f'{dep["id"]}. {dep["nome"]}\n'
if dep_cat == "Estadual":
for dep in dep_e:
text += f'{dep["id"]}. {dep["nome"]}\n'
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'
data = {'chat_id': chat_id, 'text': text}
response = requests.post(url, data=data)
# print(response.content)
def send_image(file_path, chat_id):
url = f'https://api.telegram.org/bot{TOKEN}/sendPhoto'
data = {'chat_id': chat_id, }
files = {'photo': open(file_path, 'rb')}
response = requests.post(url, data=data, files=files)
# print(response.content)
def inicial(text, chat_id):
menu_init = {
"keyboard": [
[
{"text": "✅ CADASTRAR"},
{"text": "👁🗨 CONSULTAR"},
],[
{"text": "✏ EDITAR"},
{"text": "🗑 DELETAR"},
]
],
"resize_keyboard": True,
"one_time_keyboard": True
}
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'
butt = json.dumps(menu_init)
data = {'chat_id': chat_id, 'text': text, 'reply_markup': butt}
response = requests.post(url, data=data)
# print(response.content)
def send_menu(text, chat_id):
botoes = {
"inline_keyboard": [
[
{"text": "DEPUTADO", "callback_data": "D"},
{"text": "ACESSOR", "callback_data": "A"}
]
]
}
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'
butt = json.dumps(botoes)
data = {'chat_id': chat_id, 'text': text, 'reply_markup': butt}
response = requests.post(url, data=data)
# print(response.content)
def send_menu_dep1(text, chat_id):
botoes = {
"inline_keyboard": [
[
{"text": "FEDERAL", "callback_data": "FE"},
{"text": "ESTADUAL", "callback_data": "ES"}
]
]
}
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'
butt = json.dumps(botoes)
data = {'chat_id': chat_id, 'text': text, 'reply_markup': butt}
response = requests.post(url, data=data)
# print(response.content)
#Pergunta se é acessor de Deputado Estadual ou Federal
def choose_dep_acess(text, chat_id):
botoes = {
"inline_keyboard": [
[
{"text": "FEDERAL", "callback_data": "ACFE"},
{"text": "ESTADUAL", "callback_data": "ACES"}
]
]
}
url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'
butt = json.dumps(botoes)
data = {'chat_id': chat_id, 'text': text, 'reply_markup': butt}
response = requests.post(url, data=data)
# print(response.content)
def catch_dep( est, id):
id = int(id)
if est == 'Estadual':
for dep in dep_e:
print(dep['id'] == id)
print(f"{type(dep['id'])} == {type(id)}")
if dep['id'] == id:
return dep['nome']
elif est == 'Federal':
for dep in dep_f:
print(dep['id'] == id)
print(f"{type(dep['id'])} == {type(id)}")
if dep['id'] == id:
return dep['nome']
@csrf_exempt
def teleg(requests):
if requests.method == 'POST':
json_list = json.loads(requests.body)
# print(json_list)
if("message" in json_list.keys()):
id_chatt = json_list["message"]["chat"]["id"]
# É um comando
if("entities" in json_list['message'].keys()):
# Se o comando for help
if json_list['message']['text'] == '/help':
edit_data("nome", "Dinossauro", id_chatt)
edit_data("is_writable", "", id_chatt)
# Se o comando for start
elif json_list['message']['text'] == '/start':
if not(buscar_id_user(id_chatt)):
inserir_dado(user_id=id_chatt, data=form_data(json_list["message"]["date"]))
usuer = buscar_id_user(id_chatt)
usuer = buscar_id_user(id_chatt)
inicial("Selecione a opção", id_chatt)
else:
# É uma mensagem
# REALIZAR CADASTRO
usuer = buscar_id_user(id_chatt)
print(usuer)
if len(usuer) == 0:
send_message("Você deve enviar o comando /start para iniciar o cadastro", id_chatt)
elif json_list['message']['text'] == '✅ CADASTRAR':
edit_data("is_writable", "", id_chatt)
send_menu("Qual cargo o senhor ocupa", id_chatt)
# REALIZAR EDIÇÃO
elif json_list['message']['text'] == '✏ EDITAR':
edit_data("is_writable", "", id_chatt)
send_message("Vou editar seus dados", id_chatt)
elif json_list['message']['text'] == '👁🗨 CONSULTAR':
edit_data("is_writable", "", id_chatt)
send_message("Vou editar seus dados", id_chatt)
elif json_list['message']['text'] == '🗑 DELETAR':
edit_data("is_writable", "", id_chatt)
send_message("Vou deletar seus dados", id_chatt)
elif usuer["is_writable"] == "nomeuser":
edit_data("nomeuser", json_list['message']['text'], json_list["message"]["chat"]["id"])
edit_data("is_writable", "", json_list["message"]["chat"]["id"])
nome_d = buscar_id_user(json_list["message"]["chat"]["id"])
if nome_d['tipo'] == 'Deputado':
send_message(f'Seja Bem Vindo Sr {nome_d["tipo"]} {nome_d["locale_is"]} {nome_d["nomeuser"]}', nome_d["user_ident"])
if nome_d['tipo'] == 'Acessor':
send_message(f'Seja Bem Vindo Sr {nome_d["tipo"]} {nome_d["nomeuser"]}', nome_d["user_ident"])
choose_dep_acess("Qual categoria de deputado o senhor acessora", nome_d["user_ident"])
elif usuer["is_writable"] == "rep_dep":
tip_dep = usuer["locale_is"]
tip_num = 77 if tip_dep == 'Estadual' else 52 if tip_dep == 'Federal' else None
rest = re.findall(r'\d+',json_list['message']['text'])
if len(rest) == 0:
send_message("Por favor. Digite um numero válido", usuer["user_ident"])
elif int(rest[0]) < 0 or int(rest[0]) > tip_num:
send_message("Por favor. Digite um numero válido", usuer["user_ident"])
else:
dept = catch_dep(usuer["locale_is"],int(rest[0]))
edit_data("rep_dep", dept, json_list["message"]["chat"]["id"])
edit_data("is_writable", "", json_list["message"]["chat"]["id"])
usuer = buscar_id_user(json_list["message"]["chat"]["id"])
send_message(f"Obrigado senhor acessor do Deputado {usuer['locale_is']} {usuer['rep_dep']} ", usuer["user_ident"])
send_message("Cadastro concluído com sucesso 😃", usuer["user_ident"])
send_message("Aguarde a mensagem de aprovação do administrador para receber todas as atualizações", usuer["user_ident"])
print(usuer)
# i_files = os.getcwd()
# i_file = os.path.join(i_files,'telegram', 'img', 'mao.png')
else:
send_message("Desculpe não entendi seu comando", id_chatt)
# É um callback
elif("callback_query" in json_list.keys()):
id_chatt = json_list["callback_query"]["message"]["chat"]["id"]
escolha = json_list["callback_query"]["data"]
usuer = buscar_id_user(id_chatt)
# É um deputado
if json_list["callback_query"]["data"] == 'FE':
print("Tipo de User")
print(usuer['tipo'])
if(usuer['tipo']=="Acessor"):
send_message("O Senor não está cadastrado como deputado. Por gentileza realizar a correção clicando na opção EDITAR", json_list["callback_query"]["message"]["chat"]["id"])
else:
send_message("Obrigado pela confirmação Senhor Deputado", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("locale_is", "Federal", json_list["callback_query"]["message"]["chat"]["id"])
send_message("Por qual nome o senhor gostaria de ser chamado?", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("is_writable", "nomeuser", json_list["callback_query"]["message"]["chat"]["id"])
elif json_list["callback_query"]["data"] == 'ES':
send_message("Obrigado pela confirmação Senhor Deputado", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("locale_is", "Estadual", json_list["callback_query"]["message"]["chat"]["id"])
send_message("Por qual nome o senhor gostaria de ser chamado?", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("is_writable", "nomeuser", json_list["callback_query"]["message"]["chat"]["id"])
elif json_list["callback_query"]["data"] == 'D':
edit_data("tipo", "Deputado", json_list["callback_query"]["message"]["chat"]["id"])
send_menu_dep1("Por favor no informe a que categoria o Senhor pertence", json_list["callback_query"]["message"]["chat"]["id"])
# É um acessor
elif json_list["callback_query"]["data"] == 'A':
edit_data("tipo", "Acessor", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("locale_is", "", json_list["callback_query"]["message"]["chat"]["id"])
send_message("Olá Senhor Acessor", json_list["callback_query"]["message"]["chat"]["id"])
send_message("Por qual nome o senhor gostaria de ser chamado?", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("is_writable", "nomeuser", json_list["callback_query"]["message"]["chat"]["id"])
elif json_list["callback_query"]["data"] == 'ACFE':
edit_data("locale_is", "Federal", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("is_writable", "rep_dep", json_list["callback_query"]["message"]["chat"]["id"])
send_message_ACESS("Federal", json_list["callback_query"]["message"]["chat"]["id"])
elif json_list["callback_query"]["data"] == 'ACES':
edit_data("locale_is", "Estadual", json_list["callback_query"]["message"]["chat"]["id"])
edit_data("is_writable", "rep_dep", json_list["callback_query"]["message"]["chat"]["id"])
send_message_ACESS("Estadual", json_list["callback_query"]["message"]["chat"]["id"])
return HttpResponse("OK")
| cleytonfs777/emendastelebot | telegram/views.py | views.py | py | 14,015 | python | pt | code | 0 | github-code | 13 |
1707501930 | #!/usr/bin/env python
from datetime import datetime
from elasticsearch import Elasticsearch
es_conn = Elasticsearch(
['192.168.200.10'],
http_auth=('elastic', 'rPz1ZRnowQw5ckgF9Jow'),
scheme="http",
port=9200,
)
# List indices of elasticsearch server
indices_list=es_conn.indices.get_alias('*')
print (indices_list)
| taflilou/vagrantupselastic | datasender/elasticsearch/listindices.py | listindices.py | py | 339 | python | en | code | 0 | github-code | 13 |
21125925896 | import re
import argparse
import itertools
import numpy as np
import os, sys
import pandas as pd
import scipy.constants as sc
from formDataStructures import openHDF5, dataKey, printProgress
def extract_off_diag(mtx):
"""
extract off-diagonal entries in mtx
The output vector is order in a column major manner
:param mtx: input matrix to extract the off-diagonal entries
:return:
"""
Q = mtx.shape[0]
extract_cond = np.reshape((1 - np.eye(Q)).astype(bool), (-1, 1), order='F')
return np.extract(extract_cond, mtx[:, :])
def parseArgs():
"""
Parse command-line arguments.
:return: dictionary of valid arguments
"""
printProgress()
def parseRange(code):
if code is None:
return None
else:
range = eval(code)
return np.sort(range)
parser = argparse.ArgumentParser(
description="""
Read HDF5 file produced by formDataStructures.py and <DO SOMETHING WITH FRI>
""",
epilog="""
Example usage:
python real_data.py --dataFile '/Users/pan/Google Drive/RadioAstData/BOOTES24_SB180-189.2ch8s_SIM.hdf5'
--timeRange np.r_[0:2500:50]
--freqRange np.r_[0]
--stationCount 12
--FoV 5
--imageWidth 505
--lsqImage '/Users/pan/Google Drive/RadioAstData/bootes_background_eig48_station48.hdf5'
--catalog '/Users/pan/Google Drive/RadioAstData/skycatalog.npz'
--cleanData './data/CLEAN_data.npz'
""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('--dataFile', type=str, required=True, help='HDF5 file produced by formDataStructures.py')
parser.add_argument('--timeRange', type=str, required=True, help="""
List of (integer) time indices to process.
The format is np.r_[<write all indices here>].
""")
parser.add_argument('--freqRange', type=str, required=True, help="""
List of (integer) frequency indices to process.
The format is np.r_[<write all indices here>].
""")
parser.add_argument('--stationCount', type=int, required=True, help="""
Integer K specifying that only the first K stations should be used.
If K is small, then only the core stations are being used.
""")
parser.add_argument('--FoV', type=float, required=True,
help='Field of View (degrees)')
parser.add_argument('--imageWidth', type=int, required=True,
help='Width of image (pixels)')
parser.add_argument('--lsqImage', type=str, default=None, required=False,
help='HDF5 file produced by generateImages.py')
parser.add_argument('--catalog', type=str, default=None, required=False,
help='(Optional) Catalog data file')
parser.add_argument('--nvss_catalog', type=str, default=None, required=False,
help='(Optional) NVSS catalog data file')
parser.add_argument('--cleanData', type=str, default=None, required=False,
help='(Optional) CLEAN image with wsclean')
parser.add_argument('--csData', required=False,
help='(Optional) CS image with wsclean')
parser.add_argument('--trim_data', default=False, action='store_true',
help='If present, then the data is trimmed (due to failed stations)')
args = vars(parser.parse_args())
if args['dataFile'] == 'None':
args['dataFile'] = None
if args['lsqImage'] == 'None':
args['lsqImage'] = None
if args['catalog'] == 'None':
args['catalog'] = None
if args['nvss_catalog'] == 'None':
args['nvss_catalog'] = None
if args['dataFile'] is not None:
args['dataFile'] = os.path.abspath(args['dataFile'])
if args['lsqImage'] is not None:
args['lsqImage'] = os.path.abspath(args['lsqImage'])
args['timeRange'] = parseRange(args['timeRange'])
args['freqRange'] = parseRange(args['freqRange'])
return args
def getPointingDirection(args):
"""
Returns the pointing direction.
:param args: output of parseArgs()
:return: (longitude [-pi,pi], latitude [-pi/2,pi/2])
"""
store = openHDF5(args)
pointing_direction = store['POINTING_DIRECTION']
store.close()
return pointing_direction
def computeGridPoints(args):
"""
Calculate the grid-points on which the random field must be drawn.
:param args: output of parseArgs()
:return: (args['imageWidth']**2,3) array of XYZ grid-points
"""
FoV = args['FoV'] * np.pi / 180.
x = y = np.linspace(-np.sin(FoV / 2.), np.sin(FoV / 2.), args['imageWidth'])
[X, Y] = np.meshgrid(x, y)
Z = np.sqrt(1 - X ** 2 - Y ** 2)
gridPoints = np.column_stack((
X.reshape(-1),
Y.reshape(-1),
Z.reshape(-1)
))
return gridPoints
def loadData(timeIndex, freqIndex, args):
"""
Load data from the input HDF5 file and transform relevant fields from UVW to XYZ coordinates
:param timeIndex: time index
:param freqIndex: freq index
:param args: output of parseArgs()
:return: (S, STATION_ID, STATION_XYZ, gridPoints_XYZ, wavelength, pointing_direction)
"""
store = openHDF5(args)
FoV_radian = args['FoV'] * np.pi / 180
S = store[dataKey('S', timeIndex, freqIndex)].iloc[:args['stationCount'], :args['stationCount']]
wavelength = sc.speed_of_light / store['FREQ_MAP'].loc[freqIndex].values
STATION_ID = store[dataKey('STATION_ID', timeIndex, freqIndex)][:args['stationCount']]
STATION_UVW = store[dataKey('STATION_UVW', timeIndex, freqIndex)]
STATION_UVW = pd.concat(
[station for (_, station) in STATION_UVW.groupby(by='stationID')][:args['stationCount']],
ignore_index=True
)
pointing_direction = store['POINTING_DIRECTION'].values
gridPoints_UVW = computeGridPoints(args)
store.close()
return S, STATION_ID, STATION_UVW, gridPoints_UVW, wavelength, pointing_direction, FoV_radian
if __name__ == '__main__':
args = parseArgs()
if args['lsqImage'] is None:
lsqImg_available = False
else:
lsqImg_available = True
if args['catalog'] is None:
catalog_available = False
else:
catalog_available = True
if args['nvss_catalog'] is None:
nvss_catalog_available = False
else:
nvss_catalog_available = True
if args['cleanData'] is None:
clean_data_availabe = False
else:
clean_data_availabe = True
if args['csData'] is None:
cs_data_available = False
else:
cs_data_available = True
# print(type(args['stationCount']), args['stationCount'])
num_subband = args['freqRange'].size
num_sti = args['timeRange'].size
num_station = args['stationCount']
num_antenna = 24 # <= at each time at most 24 out of 48 antennas are working
# the station count is not always consecutive (some stations are not working)
max_station_num = loadData(0, 0, args)[1].size
num_station = min(num_station, max_station_num)
args['stationCount'] = num_station
freq_subbands_hz = np.zeros(num_subband, dtype=float)
# since not all antennas are always working, we initialise the matrix filled with nan.
# later, we can use np.isnan to determine which antenna are involved.
array_coordinate = np.full((num_antenna, num_station, num_sti, 3), np.nan, dtype=float)
visi_noisy = np.zeros((num_station * (num_station - 1), num_sti, num_subband), dtype=complex)
for freq_count, freqIndex in enumerate(args['freqRange']):
for time_count, timeIndex in enumerate(args['timeRange']):
S, STATION_ID, STATION_UVW, gridPoints_UVW, \
wavelength, pointing_direction, FoV_radian = \
loadData(int(timeIndex), int(freqIndex), args)
if args['trim_data']:
# find failed stations
validStationIDs = np.where(~np.all(S == 0, axis=0))
# trim data
STATION_UVW = STATION_UVW[STATION_UVW['stationID'].isin(*validStationIDs)]
# frequencies of different subbands
freq_subbands_hz[freq_count] = sc.speed_of_light / wavelength
# antenna coordinates
antenna_idx = np.mod(STATION_UVW.loc[:, 'antennaID'].values, num_antenna)
'''
because some stations may not be working, we change the station_id to a
sequentially increasing sequence -- we will use station id later to store
antenna coordinates
'''
'''
for staion_id_count, station_id_loop in enumerate(STATION_ID.values):
STATION_UVW['stationID'].replace(station_id_loop, staion_id_count, inplace=True)
'''
station_idx = STATION_UVW.loc[:, 'stationID'].values
array_coordinate[antenna_idx, station_idx, time_count, 0] = \
STATION_UVW.loc[:, 'u'].values * wavelength
array_coordinate[antenna_idx, station_idx, time_count, 1] = \
STATION_UVW.loc[:, 'v'].values * wavelength
array_coordinate[antenna_idx, station_idx, time_count, 2] = \
STATION_UVW.loc[:, 'w'].values * wavelength
# noisy visibility measurements
visi_noisy[:, time_count, freq_count] = extract_off_diag(S.as_matrix())
# plotting grid point
x_plt = gridPoints_UVW[:, 0].reshape(args['imageWidth'], args['imageWidth'])
y_plt = gridPoints_UVW[:, 1].reshape(args['imageWidth'], args['imageWidth'])
z_plt = gridPoints_UVW[:, 2].reshape(args['imageWidth'], args['imageWidth'])
# telescope focusing point
sky_focus = pointing_direction.squeeze()
sky_ra = sky_focus[0]
sky_dec = sky_focus[1]
if lsqImg_available:
# load least square image
lsqImg_store = pd.HDFStore(args['lsqImage'], mode='r')
# some frames are missing from the hdf5 file
indexing_keys = lsqImg_store.keys()
pattern = r'/DATA/t(?P<time>\d+)/IMAGE'
valid_indices = [int(re.match(pattern, key).group('time'))
for key in indexing_keys if re.match(pattern, key) != None]
img_lsq = np.zeros(lsqImg_store['/DEC'].shape)
for loop_count in filter(lambda x: x in args['timeRange'], valid_indices):
loop_file_name = '/DATA/t{t:=04d}/IMAGE'.format(t=loop_count)
img_lsq += lsqImg_store[loop_file_name]
# (optional) catalog
if catalog_available:
catalog_data = np.load(args['catalog'])
skycatalog_intensities = catalog_data['Intensities_skyctalog']
skycatalog_U = catalog_data['U_skycatalog']
skycatalog_V = catalog_data['V_skycatalog']
skycatalog_W = catalog_data['W_skycatalog']
else:
skycatalog_intensities = None
skycatalog_U = None
skycatalog_V = None
skycatalog_W = None
if nvss_catalog_available:
nvss_catalog_data = np.load(args['nvss_catalog'])
nvss_skycatalog_intensities = nvss_catalog_data['Intensities_skyctalog']
nvss_skycatalog_U = nvss_catalog_data['U_skycatalog']
nvss_skycatalog_V = nvss_catalog_data['V_skycatalog']
nvss_skycatalog_W = nvss_catalog_data['W_skycatalog']
else:
nvss_skycatalog_intensities = None
nvss_skycatalog_U = None
nvss_skycatalog_V = None
nvss_skycatalog_W = None
# (optional) CLEAN image
if clean_data_availabe:
clean_data = np.load(args['cleanData'])
img_clean = clean_data['img_clean']
img_dirty = clean_data['img_dirty']
x_plt_CLEAN = clean_data['x_plt_CLEAN_rad']
y_plt_CLEAN = clean_data['y_plt_CLEAN_rad']
# (optional) CS image
if cs_data_available:
cs_data = np.load(args['csData'])
img_cs = cs_data['img_clean']
# save extracted data
data_file_name = ('./data/' +
os.path.splitext(os.path.basename(args['dataFile']))[0] +
'_{0}STI_{1:.0f}MHz_{2}Station_{3}Subband.npz'
).format(num_sti, np.mean(freq_subbands_hz) / 1e6,
num_station, num_subband)
npz_data_dict = {
'freq_subbands_hz': freq_subbands_hz,
'array_coordinate': array_coordinate,
'visi_noisy': visi_noisy,
'RA_rad': sky_ra,
'DEC_rad': sky_dec,
'FoV': np.degrees(FoV_radian),
'skycatalog_intensities': skycatalog_intensities,
'skycatalog_U': skycatalog_U,
'skycatalog_V': skycatalog_V,
'skycatalog_W': skycatalog_W,
'nvss_skycatalog_intensities': nvss_skycatalog_intensities,
'nvss_skycatalog_U': nvss_skycatalog_U,
'nvss_skycatalog_V': nvss_skycatalog_V,
'nvss_skycatalog_W': nvss_skycatalog_W,
'x_plt': x_plt_CLEAN if clean_data_availabe else x_plt,
'y_plt': y_plt_CLEAN if clean_data_availabe else y_plt,
'z_plt': z_plt,
}
if clean_data_availabe:
npz_data_dict['img_clean'] = img_clean
npz_data_dict['img_dirty'] = img_dirty
if cs_data_available:
npz_data_dict['img_cs'] = img_cs
if lsqImg_available:
npz_data_dict['img_lsq'] = img_lsq
np.savez(data_file_name, **npz_data_dict)
| hanjiepan/LEAP | real_data.py | real_data.py | py | 13,433 | python | en | code | 1 | github-code | 13 |
74880239058 | import io
import os
import numpy as np
import pytest
from hypothesis import HealthCheck, example, given, settings
import roffio
from .generators.roff_tag_data import roff_data
def test_write_adds_metadata():
f = io.BytesIO()
roffio.write(f, {})
f.seek(0)
read_contents = roffio.read(f)
assert read_contents["version"]["major"] == 2
assert read_contents["version"]["minor"] == 0
assert read_contents["filedata"]["byteswaptest"] == 1
def test_overwrite_version_major_errors():
with pytest.raises(ValueError, match="change roff file version"):
roffio.write(io.BytesIO(), {"version": {"major": -1}})
def test_overwrite_version_minor_errors():
with pytest.raises(ValueError, match="change roff file version"):
roffio.write(io.BytesIO(), {"version": {"minor": -1}})
def test_overwrite_byteswaptest_errors():
with pytest.raises(ValueError, match="not possible to set the byteswaptest"):
roffio.write(io.BytesIO(), {"filedata": {"byteswaptest": -1}})
def test_overwrite_filetype():
f = io.BytesIO()
roffio.write(f, {"filedata": {"filetype": "surface"}})
f.seek(0)
assert roffio.read(f)["filedata"]["filetype"] == "surface"
def test_overwrite_creation_date():
f = io.BytesIO()
roffio.write(f, {"filedata": {"creationDate": "today"}})
f.seek(0)
assert roffio.read(f)["filedata"]["creationDate"] == "today"
def test_just_one_eof():
f = io.BytesIO()
roffio.write(f, {"eof": {}})
f.seek(0)
assert roffio.read(f)["eof"] == {}
@given(roff_data)
@example({"filedata": {"filetype": "generic"}, "tag": {"x": 1}})
def test_read_write_is_identity(roff_data):
f = io.BytesIO()
roffio.write(f, roff_data)
f.seek(0)
read_contents = roffio.read(f)
read_contents.pop("version")
read_contents.pop("filedata")
read_contents.pop("eof")
roff_data.pop("version", None)
roff_data.pop("filedata", None)
roff_data.pop("eof", None)
assert read_contents == roff_data
@given(roff_data)
def test_binary_write_read_is_ascii_write_read(roff_contents):
bf = io.BytesIO()
af = io.StringIO()
roffio.write(bf, roff_contents, roff_format=roffio.Format.BINARY)
roffio.write(af, roff_contents, roff_format=roffio.Format.ASCII)
bf.seek(0)
af.seek(0)
read_binary_contents = roffio.read(bf)
read_ascii_contents = roffio.read(af)
read_binary_contents.pop("filedata")
read_ascii_contents.pop("filedata")
assert read_binary_contents == read_ascii_contents
@pytest.mark.parametrize(
"roff_format, buffer",
[(roffio.Format.BINARY, io.BytesIO()), (roffio.Format.ASCII, io.StringIO())],
)
def test_read_write_multitag(roff_format, buffer):
contents = [
("tagname", {"keyname": 1.0}),
("tagname", {"keyname": 2.0}),
]
roffio.write(buffer, contents, roff_format=roff_format)
buffer.seek(0)
values = roffio.read(buffer)
assert values["tagname"] == [{"keyname": 1.0}, {"keyname": 2.0}]
@pytest.mark.parametrize(
"roff_format, buffer",
[(roffio.Format.BINARY, io.BytesIO()), (roffio.Format.ASCII, io.StringIO())],
)
def test_read_write_multikey(roff_format, buffer):
contents = {
"tagname": [
("keyname", 1.0),
("keyname", 2.0),
],
}
roffio.write(buffer, contents, roff_format=roff_format)
buffer.seek(0)
values = roffio.read(buffer)
assert values["tagname"] == {"keyname": [1.0, 2.0]}
def test_read_write_warn_cast():
buff = io.BytesIO()
contents = {"t": {"a": np.array([1, 2], dtype=np.int64)}}
with pytest.warns(UserWarning, match="cast"):
roffio.write(buff, contents)
buff.seek(0)
assert np.array_equal(roffio.read(buff)["t"]["a"], np.array([1, 2], dtype=np.int32))
@given(roff_data)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
def test_read_write_pathlib(tmp_path, roff_data):
filepath = tmp_path / "data.roff"
roffio.write(filepath, roff_data)
read_contents = roffio.read(filepath)
read_contents.pop("version")
read_contents.pop("filedata")
read_contents.pop("eof")
roff_data.pop("version", None)
roff_data.pop("filedata", None)
roff_data.pop("eof", None)
assert read_contents == roff_data
@given(roff_data)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
def test_read_write_filestr(tmpdir, roff_data):
filepath = os.path.join(tmpdir, "data.roff")
roffio.write(filepath, roff_data)
read_contents = roffio.read(filepath)
read_contents.pop("version")
read_contents.pop("filedata")
read_contents.pop("eof")
roff_data.pop("version", None)
roff_data.pop("filedata", None)
roff_data.pop("eof", None)
assert read_contents == roff_data
@pytest.mark.parametrize(
"roff_format, filelike",
[(roffio.Format.BINARY, io.BytesIO()), (roffio.Format.ASCII, io.StringIO())],
)
def test_read_write_list(roff_format, filelike):
data = {"t": {"k": ["a", "b"]}}
roffio.write(filelike, data, roff_format=roff_format)
filelike.seek(0)
read_contents = roffio.read(filelike)
read_contents.pop("version")
read_contents.pop("filedata")
read_contents.pop("eof")
read_contents["t"]["k"] = list(read_contents["t"]["k"])
assert read_contents == data
| equinor/roffio | tests/test_read_write.py | test_read_write.py | py | 5,344 | python | en | code | 3 | github-code | 13 |
11151785274 | ##################################################################
#
# iDEA Simulator
# elf32instr.py
#
# Modelling elf32-bigmips instructions
# Fredrik Brosser 2013-05-14
#
##################################################################
# Imports
import sys
import re
class elf32instr:
## Constructor
def __init__(self, label, address, opcode, mnemonic, nOperands, operands, indirect):
self.label = label;
self.address = address;
self.opcode = opcode;
self.mnemonic = mnemonic;
self.nOperands = nOperands;
self.operands = operands;
self.indirect = indirect;
self.indirectReg = ""
self.isMemInstr = False
# List of MIPS Registers
self.registerList = ['zero', 'at', 'v0', 'v1', 'a0', 'a1' , 'a2', 'a3',
't0', 't1', 't2', 't3', 't4', 't5', 't6', 't7',
's0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8',
't8' , 't9',
'k0', 'k1',
'gp', 'sp',
's8', 'ra']
# Mapping of MIPS Registers to iDEA (naming conventions only)
self.registerMapping = {'zero':'$r0', 'at':'$r1', 'v0':'$r2', 'v1':'$r3',
'a0':'$r4', 'a1':'$r5', 'a2':'$r6', 'a3':'$r7',
't0':'$r8', 't1':'$r9', 't2':'$r10', 't3':'$r11',
't4':'$r12', 't5':'$r13', 't6':'$r14', 't7':'$r15',
's0':'$r16', 's1':'$r17', 's2':'$r18', 's3':'$r19',
's4':'$r20', 's5':'$r21', 's6':'$r22', 's7':'$r23',
't8':'$r24' , 't9':'$r25',
'k0':'$r26', 'k1':'$r27',
'gp':'$r28', 'sp':'$r29',
's8':'$r30', 'ra':'$r31'}
# Label instruction as a memory instruction (for offset value parsing)
if(mnemonic == "sw" or mnemonic == "lw"):
self.isMemInstr = True
else :
self.isMemInstr = False
# Instruction uses an offset value
if(indirect is not None):
self.indirectReg = indirect.translate(None, '()')
# Get the instruction information as a single string
def getData(self):
labelStr = (self.label + ": ") if self.label is not '' else ""
printStr = (labelStr + self.address + ":\t" + self.opcode + "\t" + self.mnemonic + "\t")
for i in range (0, self.nOperands):
printStr += self.operands[i]
if(i<self.nOperands-1):
printStr += ","
if(self.indirect is not None):
printStr += self.indirect
return printStr
# Get the instruction information as a single string for execution in the simulator
def getSimData(self):
printStr = (self.mnemonic)
if(self.nOperands > 0):
printStr += (" ")
for i in range (0, self.nOperands):
if(self.operands[i] in self.registerList):
printStr += self.registerMapping[self.operands[i]]
else:
printStr += self.operands[i]
if(i<self.nOperands-1):
printStr += ", "
if(self.indirect is not None):
printStr += ("(" + self.registerMapping[self.indirect.strip('()')] + ")")
return printStr
| warclab/idea | simulator/src/elf32instr.py | elf32instr.py | py | 2,862 | python | en | code | 14 | github-code | 13 |
20561598764 | list = []
score = int(input("how many numbers do you want to be added up"))
print("Enter The Numbers You Want Added Up")
for x in range(0,score):
score1 = int(input())
list.append(score1)
print("This Is Your Numbers", list)
AN = list # This Puts The List Into A Variable
S = sum(AN) # This Sums The Variable List To A Number Simple
print(S) # this Prints The Sum Of The Variable List | 19JIvan/2017-Year-10-Programming | DoneForSchool/list of numbers 2.py | list of numbers 2.py | py | 398 | python | en | code | 0 | github-code | 13 |
7583922411 | # 주사위의 개수
def solution1(box, n):
answer = 1
for i in box:
answer = answer * (i // n)
return answer
# 합성수 찾기
def solution2(n):
cnt = 0
for num in range(1,n+1):
i = 2
while i < num:
if num % i == 0:
cnt += 1
break
i += 1
return cnt
def solution2_2(n):
result = 0
for i in range(4, n +1):
for j in range(2, int(i ** 0.5) + 1):
if i % j == 0:
output += 1
break
return output
# 최댓값 만들기 1
def solution3(numbers):
numbers.sort(reverse=True)
return numbers[0]*numbers[1]
def solution3_2(numbers):
numbers.sort()
return numbers[-1]*numbers[-2]
# 팩토리얼
def solution4(n):
k = 1
for i in range(1, 11):
k = k * i
if k == n:
return i
elif k > n:
return i - 1
| hjhyun98/Programmers-Algorithm | python/lv0/day11.py | day11.py | py | 942 | python | en | code | 0 | github-code | 13 |
21586924731 | import os
import imgaug as ia
from imgaug.augmenters.meta import SomeOf
import numpy as np
from imgaug import augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from PIL import Image
import setting
ia.seed(1)
def xywh_to_bbox(label, x, y, w, h):
return BoundingBox(x1=x - w / 2,
y1=y - h / 2,
x2=x + w / 2,
y2=y + h / 2,
label=label)
def read_yolo_annotations(inpath, image_width, image_height):
"""
Read annotations (in YOLO format) form file
:param inpath: filepath to annotation file
:type inpath: str
:param image_width: width of image
:type image_width: int
:param image_height: height of image
:type image_height: int
:return: parsed bounding box annotations
:rtype: BoundingBoxesOnImage
"""
with open(inpath, 'r') as fp:
lines = fp.readlines()
bb_list = []
for line in lines:
items = line.split(' ')
if len(items) < 5:
print('Invalid anno line: {}'.format(line))
label, x, y, w, h = items
x = float(x) * image_width
y = float(y) * image_height
w = float(w) * image_width
h = float(h) * image_height
label = int(label)
bb_list.append(xywh_to_bbox(label, x, y, w, h))
bbs = BoundingBoxesOnImage(bounding_boxes=bb_list,
shape=(image_height, image_width))
return bbs
def write_yolo_annotations(outpath, annotations, image_width, image_height):
"""
Write annotations into file following the YOLO format
:param outpath: filepath to save
:type outpath: str
:param annotations: annotations of bounding boxes
:type annotations: BoundingBoxesOnImage
:param image_width: width of image
:type image_width: int
:param image_height: height of image
:type image_height: int
"""
with open(outpath, 'w') as f:
for anno in annotations.remove_out_of_image().clip_out_of_image():
label = anno.label
x = anno.center_x / image_width
y = anno.center_y / image_height
w = anno.width / image_width
h = anno.height / image_height
f.write('{} {} {} {} {}\n'.format(label, x, y, w, h))
def get_box(obj_w, obj_h, min_x, min_y, max_x, max_y):
"""
Generate a random bounding box for object to paste
:param obj_w: width of object
:type obj_w: int
:param obj_h: height of object
:type obj_h: int
:param min_x: minimum value of position x
:type min_x: int
:param min_y: minimum value of position y
:type min_y: int
:param max_x: maximum value of position x
:type max_x: int
:param max_y: maximum value of position y
:type max_y: int
:return: generated bboxes
:rtype: list[int]
"""
x1, y1 = np.random.randint(min_x, max_x,
1), np.random.randint(min_y, max_y, 1)
x2, y2 = x1 + obj_w, y1 + obj_h
return [x1[0], y1[0], x2[0], y2[0]]
def intersects(box, new_box):
"""
Check whether two bounding boxes are intersected
:param box: one bounding box
:type box: list[int]
:param new_box: another bounding box
:type new_box: list[int]
:return: whether two bounding boxes are intersected
:rtype: bool
"""
box_x1, box_y1, box_x2, box_y2 = box
x1, y1, x2, y2 = new_box
return not (box_x2 < x1 or box_x1 > x2 or box_y1 > y2 or box_y2 < y1)
def get_group_object_positions(object_group, image_background, dataset_object,
aug_object):
"""
Generate positions for grouped object to paste on background image
:param object_group: group of objects to appear
:type object_group: list[int]
:param image_background: background image
:type image_background: numpy.array
:param dataset_object: dataset of object images
:type dataset_object: dataset.ObjectImageFolderDataset
:param aug_object: augment instance for object
:type aug_object: iaa.Sequential
:return: size and bounding oxes of grouped objects
"""
bkg_w, bkg_h = image_background.size
boxes = []
objs = []
labels = []
obj_sizes = []
for i in object_group:
# load data
obj, label = dataset_object[i]
# TODO move transforms into dataset getting method
# resize obj
factor = min([
(setting.OBJECT_INIT_SCALE_FACTOR * image_background.size[dim]) /
obj.size[dim] for dim in range(len(obj.size))
])
obj_size = tuple(
int(obj.size[dim] * factor) for dim in range(len(obj.size)))
obj_w, obj_h = obj_size
obj = obj.resize((obj_w, obj_h))
obj = resize_image(obj)
# augment obj
obj_aug = Image.fromarray(aug_object.augment_images([np.array(obj)])[0])
# add to list
objs.append(obj_aug)
labels.append(label)
obj_sizes.append(obj_aug.size)
for w, h in obj_sizes:
# set background image boundaries
if len(boxes) == 0 or not setting.OBJECT_IN_LINE:
min_x, min_y = 2 * w, 2 * h
max_x, max_y = bkg_w - 10 * w, bkg_h - 10 * h
else:
min_x = boxes[-1][2] + 1
min_y = boxes[-1][1] + 1
max_x = min(bkg_w - 2 * w,
boxes[-1][2] + np.random.randint(2, 3, 1)[0])
max_y = min(bkg_h - 2 * h,
boxes[-1][1] + np.random.randint(2, 3, 1)[0])
if min_x >= max_x or min_y >= max_y:
print('Ignore invalid box: ', w, h, min_x, min_y, max_x, max_y)
continue
# get new box coordinates for the obj on the bkg
while True:
new_box = get_box(w, h, min_x, min_y, max_x, max_y)
for box in boxes:
res = intersects(box, new_box)
if res:
break
else:
break # only executed if the inner loop did NOT break
continue # only executed if the inner loop DID break
# append our new box
boxes.append(new_box)
return objs, labels, obj_sizes, boxes
def resize_image(image):
"""
Resize image by random scale factor
"""
resize_rate = np.random.choice(
setting.OBJECT_AUG_SCALE_FACTOR) + np.random.uniform(low=-0.1, high=0.1)
image = image.resize(
[int(image.width * resize_rate),
int(image.height * resize_rate)], Image.BILINEAR)
return image
def sometimes(aug):
"""
Return a shortcut for iaa.Sometimes
:param aug: augmentation method
:type aug: iaa.meta.Augmenter
:return: wrapped augmentation method
:rtype: iaa.meta.Augmenter
"""
return iaa.Sometimes(0.5, aug)
def build_augment_sequence_for_object():
"""
Build augmentation sequence for object
:return: aug for object
:rtype: iaa.Sequential
"""
return iaa.Sequential([
sometimes(
iaa.CropAndPad(
percent=(-0.05, 0.075), pad_mode=ia.ALL, pad_cval=(0, 255))),
sometimes(iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-1, 1))),
sometimes(iaa.MultiplyHueAndSaturation((0.5, 1.5), per_channel=True)),
iaa.SomeOf((0, 2), [
iaa.OneOf([
iaa.GaussianBlur((0, 1.0)),
iaa.AverageBlur(k=(1, 3)),
iaa.MedianBlur(k=(1, 3)),
]),
iaa.Affine(scale={
'x': (0.9, 1.1),
'y': (0.9, 1.1)
},
rotate=(-5, 5),
order=[0, 1],
cval=(0, 255),
mode=ia.ALL),
iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25),
iaa.PiecewiseAffine(scale=(0.01, 0.05)),
]),
iaa.PerspectiveTransform(scale=(0.06, 0.1),
keep_size=False,
fit_output=True,
cval=(0, 255),
mode=ia.ALL),
],
random_order=True)
def build_augment_sequence_for_background():
"""
Build augmentation sequence for background
:return: aug for background
:rtype: iaa.Sequential
"""
return iaa.Sequential(
[
sometimes(
iaa.CropAndPad(percent=(-0.05, 0.075),
pad_mode=ia.ALL,
pad_cval=(0, 255))),
sometimes(
iaa.Affine(
scale={
'x': (0.9, 1.1),
'y': (0.9, 1.1)
},
translate_percent={
'x': (-0.03, 0.03),
'y': (-0.03, 0.03)
},
rotate=(-5, 5), # rotate by -45 to +45 degrees
order=[0, 1],
cval=(0, 255),
mode=ia.ALL)),
iaa.SomeOf(
(0, 2),
[
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)),
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 7)),
]),
iaa.Sharpen(alpha=(0, 1.0),
lightness=(0.75, 1.5)), # sharpen images
iaa.AdditiveGaussianNoise(
loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
iaa.OneOf([
iaa.Dropout((0.01, 0.015), per_channel=0.1),
iaa.CoarseDropout((0.01, 0.015),
size_percent=(0.01, 0.015),
per_channel=0.1),
]),
iaa.Add((-10, 10), per_channel=0.5),
]),
iaa.PerspectiveTransform(scale=(0.02, 0.05), keep_size=False)
],
random_order=True)
| corenel/synthetic-image-generator | util.py | util.py | py | 10,098 | python | en | code | 0 | github-code | 13 |
4254364155 | import configargparse
import logging
import os
import platform
import random
import subprocess
import sys
import numpy as np
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
def main(cmd_args):
parser = configargparse.ArgumentParser(
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
# general configuration
parser.add('--config', is_config_file=True, help='config file path')
parser.add('--config2', is_config_file=True,
help='second config file path that overwrites the settings in `--config`.')
parser.add('--config3', is_config_file=True,
help='third config file path that overwrites the settings in `--config` and `--config2`.')
parser.add_argument('--ngpu', default=0, type=int,
help='Number of GPUs')
parser.add_argument('--backend', default='pytorch', type=str,
choices=['chainer', 'pytorch'],
help='Backend library')
parser.add_argument('--outdir', type=str, required=True,
help='Output directory')
parser.add_argument('--debugmode', default=1, type=int,
help='Debugmode')
parser.add_argument('--dict', required=True,
help='Dictionary')
parser.add_argument('--seed', default=1, type=int,
help='Random seed')
parser.add_argument('--debugdir', type=str,
help='Output directory for debugging')
parser.add_argument('--resume', '-r', default='', nargs='?',
help='Resume the training from snapshot')
parser.add_argument('--minibatches', '-N', type=int, default='-1',
help='Process only N minibatches (for debug)')
parser.add_argument('--verbose', '-V', default=0, type=int,
help='Verbose option')
parser.add_argument('--tensorboard-dir', default=None, type=str, nargs='?', help="Tensorboard log dir path")
# task related
parser.add_argument('--train-json', type=str, default=None,
help='Filename of train label data (json)')
parser.add_argument('--valid-json', type=str, default=None,
help='Filename of validation label data (json)')
# network architecture
parser.add_argument('--model-module', type=str, default=None,
help='model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)')
# encoder
parser.add_argument('--num-spkrs', default=1, type=int,
choices=[1, 2],
help='Number of speakers in the speech.')
parser.add_argument('--etype', default='blstmp', type=str,
choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm',
'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'],
help='Type of encoder network architecture')
parser.add_argument('--elayers-sd', default=4, type=int,
help='Number of encoder layers for speaker differentiate part. (multi-speaker asr mode only)')
parser.add_argument('--elayers', default=4, type=int,
help='Number of encoder layers (for shared recognition part in multi-speaker asr mode)')
parser.add_argument('--eunits', '-u', default=300, type=int,
help='Number of encoder hidden units')
parser.add_argument('--eprojs', default=320, type=int,
help='Number of encoder projection units')
parser.add_argument('--subsample', default="1", type=str,
help='Subsample input frames x_y_z means subsample every x frame at 1st layer, '
'every y frame at 2nd layer etc.')
# loss
parser.add_argument('--ctc_type', default='warpctc', type=str,
choices=['builtin', 'warpctc'],
help='Type of CTC implementation to calculate loss.')
# attention
parser.add_argument('--atype', default='dot', type=str,
choices=['noatt', 'dot', 'add', 'location', 'coverage',
'coverage_location', 'location2d', 'location_recurrent',
'multi_head_dot', 'multi_head_add', 'multi_head_loc',
'multi_head_multi_res_loc'],
help='Type of attention architecture')
parser.add_argument('--adim', default=320, type=int,
help='Number of attention transformation dimensions')
parser.add_argument('--awin', default=5, type=int,
help='Window size for location2d attention')
parser.add_argument('--aheads', default=4, type=int,
help='Number of heads for multi head attention')
parser.add_argument('--aconv-chans', default=-1, type=int,
help='Number of attention convolution channels \
(negative value indicates no location-aware attention)')
parser.add_argument('--aconv-filts', default=100, type=int,
help='Number of attention convolution filters \
(negative value indicates no location-aware attention)')
parser.add_argument('--spa', action='store_true',
help='Enable speaker parallel attention.')
# decoder
parser.add_argument('--dtype', default='lstm', type=str,
choices=['lstm', 'gru'],
help='Type of decoder network architecture')
parser.add_argument('--dlayers', default=1, type=int,
help='Number of decoder layers')
parser.add_argument('--dunits', default=320, type=int,
help='Number of decoder hidden units')
parser.add_argument('--mtlalpha', default=0.5, type=float,
help='Multitask learning coefficient, alpha: alpha*ctc_loss + (1-alpha)*att_loss ')
parser.add_argument('--lsm-type', const='', default='', type=str, nargs='?', choices=['', 'unigram'],
help='Apply label smoothing with a specified distribution type')
parser.add_argument('--lsm-weight', default=0.0, type=float,
help='Label smoothing weight')
parser.add_argument('--sampling-probability', default=0.0, type=float,
help='Ratio of predicted labels fed back to decoder')
# recognition options to compute CER/WER
parser.add_argument('--report-cer', default=True, action='store_true',
help='Compute CER on development set')
parser.add_argument('--report-wer', default=True, action='store_true',
help='Compute WER on development set')
parser.add_argument('--nbest', type=int, default=1,
help='Output N-best hypotheses')
parser.add_argument('--beam-size', type=int, default=4,
help='Beam size')
parser.add_argument('--penalty', default=0.0, type=float,
help='Incertion penalty')
parser.add_argument('--maxlenratio', default=0.0, type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""")
parser.add_argument('--minlenratio', default=0.0, type=float,
help='Input length ratio to obtain min output length')
parser.add_argument('--ctc-weight', default=0.3, type=float,
help='CTC weight in joint decoding')
parser.add_argument('--rnnlm', type=str, default=None,
help='RNNLM model file to read')
parser.add_argument('--rnnlm-conf', type=str, default=None,
help='RNNLM model config file to read')
parser.add_argument('--lm-weight', default=0.1, type=float,
help='RNNLM weight.')
#parser.add_argument('--sym-space', default='<space>', type=str, help='Space symbol')
parser.add_argument('--sym-space', default='\u2581', type=str, help='Space symbol')
parser.add_argument('--sym-blank', default='<blank>', type=str,
help='Blank symbol')
# model (parameter) related
parser.add_argument('--dropout-rate', default=0.0, type=float,
help='Dropout rate for the encoder')
parser.add_argument('--dropout-rate-decoder', default=0.0, type=float,
help='Dropout rate for the decoder')
# minibatch related
parser.add_argument('--sortagrad', default=0, type=int, nargs='?',
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs")
parser.add_argument('--batch-count', default='auto', choices=BATCH_COUNT_CHOICES,
help='How to count batch_size. The default (auto) will find how to count by args.')
parser.add_argument('--batch-size', '--batch-seqs', '-b', default=0, type=int,
help='Maximum seqs in a minibatch (0 to disable)')
parser.add_argument('--batch-bins', default=0, type=int,
help='Maximum bins in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-in', default=0, type=int,
help='Maximum input frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-out', default=0, type=int,
help='Maximum output frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-inout', default=0, type=int,
help='Maximum input+output frames in a minibatch (0 to disable)')
parser.add_argument('--maxlen-in', '--batch-seq-maxlen-in', default=800, type=int, metavar='ML',
help='When --batch-count=seq, batch size is reduced if the input sequence length > ML.')
parser.add_argument('--maxlen-out', '--batch-seq-maxlen-out', default=150, type=int, metavar='ML',
help='When --batch-count=seq, batch size is reduced if the output sequence length > ML')
parser.add_argument('--n-iter-processes', default=0, type=int,
help='Number of processes of iterator')
parser.add_argument('--preprocess-conf', type=str, default=None,
help='The configuration file for the pre-processing')
# optimization related
parser.add_argument('--opt', default='adadelta', type=str,
choices=['adadelta', 'adam', 'noam'],
help='Optimizer')
parser.add_argument('--accum-grad', default=1, type=int,
help='Number of gradient accumuration')
parser.add_argument('--eps', default=1e-8, type=float,
help='Epsilon constant for optimizer')
parser.add_argument('--eps-decay', default=0.1, type=float,
help='Decaying ratio of epsilon')
parser.add_argument('--weight-decay', default=0.0, type=float,
help='Weight decay ratio')
parser.add_argument('--criterion', default='acc', type=str,
choices=['loss', 'acc'],
help='Criterion to perform epsilon decay')
parser.add_argument('--threshold', default=1e-4, type=float,
help='Threshold to stop iteration')
parser.add_argument('--epochs', '-e', default=30, type=int,
help='Maximum number of epochs')
parser.add_argument('--early-stop-criterion', default='validation/main/acc', type=str, nargs='?',
help="Value to monitor to trigger an early stopping of the training")
parser.add_argument('--patience', default=3, type=int, nargs='?',
help="Number of epochs to wait without improvement before stopping the training")
parser.add_argument('--grad-clip', default=5, type=float,
help='Gradient norm threshold to clip')
parser.add_argument('--num-save-attention', default=3, type=int,
help='Number of samples of attention to be saved')
# speech translation related
parser.add_argument('--context-residual', default=False, type=strtobool, nargs='?',
help='')
parser.add_argument('--use-frontend', type=strtobool, default=False,
help='The flag to switch to use frontend system.')
# WPE related
parser.add_argument('--use-wpe', type=strtobool, default=False,
help='Apply Weighted Prediction Error')
parser.add_argument('--wtype', default='blstmp', type=str,
choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm',
'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'],
help='Type of encoder network architecture '
'of the mask estimator for WPE. '
'')
parser.add_argument('--wlayers', type=int, default=2,
help='')
parser.add_argument('--wunits', type=int, default=300,
help='')
parser.add_argument('--wprojs', type=int, default=300,
help='')
parser.add_argument('--wdropout-rate', type=float, default=0.0,
help='')
parser.add_argument('--wpe-taps', type=int, default=5,
help='')
parser.add_argument('--wpe-delay', type=int, default=3,
help='')
parser.add_argument('--use-dnn-mask-for-wpe', type=strtobool,
default=False,
help='Use DNN to estimate the power spectrogram. '
'This option is experimental.')
# Beamformer related
parser.add_argument('--use-beamformer', type=strtobool,
default=True, help='')
parser.add_argument('--btype', default='blstmp', type=str,
choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm',
'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'],
help='Type of encoder network architecture '
'of the mask estimator for Beamformer.')
parser.add_argument('--blayers', type=int, default=2,
help='')
parser.add_argument('--bunits', type=int, default=300,
help='')
parser.add_argument('--bprojs', type=int, default=300,
help='')
parser.add_argument('--badim', type=int, default=320,
help='')
parser.add_argument('--ref-channel', type=int, default=-1,
help='The reference channel used for beamformer. '
'By default, the channel is estimated by DNN.')
parser.add_argument('--bdropout-rate', type=float, default=0.0,
help='')
# Feature transform: Normalization
parser.add_argument('--stats-file', type=str, default=None,
help='The stats file for the feature normalization')
parser.add_argument('--apply-uttmvn', type=strtobool, default=True,
help='Apply utterance level mean '
'variance normalization.')
parser.add_argument('--uttmvn-norm-means', type=strtobool,
default=True, help='')
parser.add_argument('--uttmvn-norm-vars', type=strtobool, default=False,
help='')
# Feature transform: Fbank
parser.add_argument('--fbank-fs', type=int, default=16000,
help='The sample frequency used for '
'the mel-fbank creation.')
parser.add_argument('--n-mels', type=int, default=80,
help='The number of mel-frequency bins.')
parser.add_argument('--fbank-fmin', type=float, default=0.,
help='')
parser.add_argument('--fbank-fmax', type=float, default=None,
help='')
#extra parses added by vinit
parser.add_argument('--pairwise', type=strtobool, default=False,
help='Set true if batches need to be generated as pairs')
parser.add_argument('--pair-threshold', type=float, default=0.05,
help='Percentage threshold to decide proportion of nC2 pairs')
parser.add_argument('--pair-cutoff', type=float, default=10,
help='Maximum pairs of a given sentence')
parser.add_argument('--pair-lambda', type=float, default=1.0,
help='Lambda weight for siamese loss')
parser.add_argument('--pair-alpha', type=float, default=0.001,
help='alpha(lr) weight for siamese loss')
parser.add_argument('--oversamp-epsilon', type=float, default=1e-6,
help='epsilon threshold to remove oversampling due to cross entropy during pairwise')
args, _ = parser.parse_known_args(cmd_args)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is not None:
model_class = dynamic_import(args.model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
if args.model_module is None:
args.model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
if 'chainer_backend' in args.model_module:
args.backend = 'chainer'
if 'pytorch_backend' in args.model_module:
args.backend = 'pytorch'
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
else:
logging.basicConfig(
level=logging.WARN, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
logging.warning('Skip DEBUG/INFO messages')
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
# python 2 case
if platform.python_version_tuple()[0] == '2':
if "clsp.jhu.edu" in subprocess.check_output(["hostname", "-f"]):
cvd = subprocess.check_output(["/usr/local/bin/free-gpu", "-n", str(args.ngpu)]).strip()
logging.info('CLSP: use gpu' + cvd)
os.environ['CUDA_VISIBLE_DEVICES'] = cvd
# python 3 case
else:
if "clsp.jhu.edu" in subprocess.check_output(["hostname", "-f"]).decode():
cvd = subprocess.check_output(["/usr/local/bin/free-gpu", "-n", str(args.ngpu)]).decode().strip()
logging.info('CLSP: use gpu' + cvd)
os.environ['CUDA_VISIBLE_DEVICES'] = cvd
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# display PYTHONPATH
logging.info('python path = ' + os.environ.get('PYTHONPATH', '(None)'))
# set random seed
logging.info('random seed = %d' % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, 'rb') as f:
dictionary = f.readlines()
char_list = [entry.decode('utf-8').split(' ')[0]
for entry in dictionary]
char_list.insert(0, '<blank>')
char_list.append('<eos>')
args.char_list = char_list
else:
args.char_list = None
# train
logging.info('backend = ' + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
elif args.num_spkrs > 1:
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == '__main__':
main(sys.argv[1:])
| vinitunni/CoupledLoss-LAS-ESPNet | espnet/bin/asr_train.py | asr_train.py | py | 20,747 | python | en | code | 2 | github-code | 13 |
14234557366 | # -*- coding: utf-8 -*-
"""
# 数据:20类新闻文本
# 模型:svc
# 调参:gridsearch
"""
### 加载模块
import numpy as np
import pandas as pd
### 载入数据
from sklearn.datasets import fetch_20newsgroups # 20类新闻数据
news = fetch_20newsgroups(subset='all') # 生成20类新闻数据
### 数据分割
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(news.data[:300],
news.target[:300],
test_size=0.25, # 测试集占比25%
random_state=33) # 随机数
### pipe-line
from sklearn.feature_extraction.text import TfidfVectorizer # 特征提取
from sklearn.svm import SVC # 载入模型
from sklearn.pipeline import Pipeline # pipe_line模式
clf = Pipeline([('vect', TfidfVectorizer(stop_words='english', analyzer='word')),
('svc', SVC())])
### 网格搜索
from sklearn.model_selection import GridSearchCV
parameters = {'svc__gamma': np.logspace(-1, 1)} # 参数范围(字典类型)
gs = GridSearchCV(clf, # 模型
parameters, # 参数字典
n_jobs=1, # 使用1个cpu
verbose=0, # 不打印中间过程
cv=5) # 5折交叉验证
gs.fit(X_train, y_train) # 在训练集上进行网格搜索
### 最佳参数在测试集上模型分数
print("best:%f using %s" % (gs.best_score_,gs.best_params_))
### 测试集下的分数
print("test datasets score" % gs.score(X_test, y_test))
### 模型不同参数下的分数
# 方式一(0.20版本将删除)
print(gs.grid_scores_)
# 方式二(0.20推荐的方式)
means = gs.cv_results_['mean_test_score']
params = gs.cv_results_['params']
for mean, param in zip(means,params):
print("%f with: %r" % (mean,param)) | wanglei5205/Machine_learning | GridSearchCV_example/GridSearchCV_example.py | GridSearchCV_example.py | py | 2,109 | python | en | code | 75 | github-code | 13 |
8236500376 | from django.shortcuts import redirect
from django.utils.deprecation import MiddlewareMixin
from account.models import User
class AuthMiddleware(MiddlewareMixin):
def process_request(self, request):
# 排除那些不需要登录就能访问的页面
if request.path_info in ["/login/", "/image/code/"]:
return
info_dict = request.session.get("info")
print("in process_request",info_dict)
if info_dict:
if request.path_info in ["/adminUser/","/adminBook/",
"/adminRecord/","/adminAddBook/",
"/bookedit/","/deletebook/","/useredit/",
"/deleteuser/","/usernew/","/usereditpassword/"]:
user = User.objects.filter(id=info_dict["id"]).first()
if user.isadmin:
info_dict.update({"isadmin":True})
request.session["info"]=info_dict
return
else:
return redirect("/searchBook/")
return
return redirect('/login/')
| yllgl/BookAdminSystem | account/middleware/auth.py | auth.py | py | 1,131 | python | en | code | 0 | github-code | 13 |
38961957442 | import torch
import torch.nn as nn
from torchvision.models import resnet18
import copy
from sr_mobile_pytorch.trainer.utils import imagenet_normalize
class ContentLossVGG(nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
self.mae_loss = nn.L1Loss()
self.vgg = torch.hub.load("pytorch/vision:v0.10.0", "vgg19", pretrained=True)
self.model = nn.Sequential(*[self.vgg.features[i] for i in range(36)]).eval()
for param in self.model.parameters():
param.requires_grad = False
self.model = self.model.to(device)
def forward(self, hr, sr):
sr = imagenet_normalize(sr)
hr = imagenet_normalize(hr)
sr_features = self.model(sr)
hr_features = self.model(hr)
return self.mae_loss(hr_features, sr_features)
class ContentLossResNetSimCLR(nn.Module):
def __init__(self, feature_extactor_path, device):
super().__init__()
self.device = device
self.mae_loss = nn.L1Loss()
self.model = self.load_resnet_feature_extractor(feature_extactor_path, device)
self.layers = [
"layer1.0.relu",
"layer1.1.relu",
"layer2.0.relu",
"layer2.1.relu",
"layer3.0.relu",
"layer3.1.relu",
"layer4.0.relu",
"layer4.1.relu",
]
self._features = {layer: torch.empty(0) for layer in self.layers}
for layer_id in self.layers:
layer = dict(self.model.named_modules())[layer_id]
layer.register_forward_hook(self.save_outputs_hook(layer_id))
def save_outputs_hook(self, layer_id):
def fn(_, __, output):
self._features[layer_id] = output.detach()
return fn
def load_resnet_feature_extractor(self, model_path, device):
resnet = resnet18(pretrained=False)
weights = torch.load(model_path, map_location=device)
state_dict = weights["state_dict"]
for k in list(state_dict.keys()):
if k.startswith("backbone.") and not k.startswith("backbone.fc"):
state_dict[k[len("backbone.") :]] = state_dict[k]
del state_dict[k]
resnet.load_state_dict(state_dict, strict=False)
for param in resnet.parameters():
param.requires_grad = False
return resnet.eval().to(device)
def forward(self, hr, sr):
hr, sr = hr / 255.0, sr / 255.0
self.model(sr)
sr_features = copy.deepcopy(self._features)
self.model(hr)
hr_features = copy.deepcopy(self._features)
loss = torch.tensor(0.0).to(self.device)
for layer in self.layers:
loss += self.mae_loss(sr_features[layer], hr_features[layer])
return loss
class GANLoss:
def __init__(self):
self.bce_loss = nn.BCEWithLogitsLoss()
def generator_loss(self, sr_out):
return self.bce_loss(sr_out, torch.ones_like(sr_out))
def discriminator_loss(self, hr_out, sr_out):
hr_loss = self.bce_loss(hr_out, torch.ones_like(hr_out))
sr_loss = self.bce_loss(sr_out, torch.zeros_like(sr_out))
return hr_loss + sr_loss
| bookbot-hive/sr_mobile_pytorch | sr_mobile_pytorch/trainer/losses.py | losses.py | py | 3,198 | python | en | code | 8 | github-code | 13 |
4026690817 | #searching for a sstring in a group of strings
str=[]
n=int(input('How many strings?'))
for i in range(n):
print('enetr string:',end='')
str.append(input())
s=input('Enter the key to search:')
flag=False
for i in range(len(str)):
if s==str[i]:
flag=True
print('Found at',i+1)
else:
print('Not found')
#if flag==False:
# print('Not found')
| Athira-Vijayan/Python | strings/search.py | search.py | py | 376 | python | en | code | 0 | github-code | 13 |
12686314408 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
# Recursive Approach
# Base case
if head is None or head.next is None:
return head
back = self.swapPairs(head.next.next)
first_node = head
second_node = head.next
second_node.next = first_node
first_node.next = back
head = second_node
return head | MatrixEnder1337/Data-Structures-and-Algorithms | leetcode/medium/24. Swap Nodes in Pairs.py | 24. Swap Nodes in Pairs.py | py | 689 | python | en | code | 0 | github-code | 13 |
35927675195 | class Al:
def __init__(self, a, b):
self.a = a
self.b = b
@staticmethod
def addition():
c = a + b
print(c)
@staticmethod
def subtraction():
c = a - b
print(c)
@classmethod
def division(cls):
c = cls.a / cls.b
print(c)
a = int(input("enter a value:"))
b = int(input("enter b value:"))
cls1 = Al(a, b)
cls1.addition()
cls1.subtraction()
cls1.division()
Al.division()
# Al.subtraction()
| raajeshkumar5035/python_projects | class_example3.py | class_example3.py | py | 484 | python | en | code | 0 | github-code | 13 |
8883098456 | import cv2 as cv
from cv2 import VideoCapture
cap = VideoCapture(0)
while True:
ret, frame = cap.read()
cv.imshow("Camera feed",frame)
if cv.waitKey(1) == "q":
break
cap.release()
cv.destroyAllWindows() | KshitijKulkarni/Chess-Project---AI-vs-Player | CameraTest.py | CameraTest.py | py | 225 | python | en | code | 0 | github-code | 13 |
26535507726 | import numpy as np
INPUT_LAYER_SIZE = 1
HIDDEN_LAYER_SIZE = 2
OUTPUT_LAYER_SIZE = 2
def init_weights():
Wh = np.random.randn(INPUT_LAYER_SIZE, HIDDEN_LAYER_SIZE) * \
np.sqrt(2.0/INPUT_LAYER_SIZE)
Wo = np.random.randn(HIDDEN_LAYER_SIZE, OUTPUT_LAYER_SIZE) * \
np.sqrt(2.0/HIDDEN_LAYER_SIZE)
return Wh, Wo
def init_bias():
Bh = np.full((1, HIDDEN_LAYER_SIZE), 0.1)
Bo = np.full((1, OUTPUT_LAYER_SIZE), 0.1)
return Bh, Bo
def relu(Z):
return np.maximum(0, Z)
def feed_forward(X):
'''
X - input matrix
Zh - hidden layer weighted input
Zo - output layer weighted input
H - hidden layer activation
y - output layer
yHat - output layer predictions
'''
Bh, Bo = init_bias()
Wh, Wo = init_weights()
# Hidden layer
Zh = np.dot(X, Wh) + Bh
H = relu(Zh)
# Output layer
Zo = np.dot(H, Wo) + Bo
yHat = relu(Zo)
return yHat
result = feed_forward(1)
print(result) | Ralfik555/Course_DS | jdsz2-materialy-python/DL/2_podstawy_DL/2_Full_NN.py | 2_Full_NN.py | py | 998 | python | en | code | 0 | github-code | 13 |
3726554060 | import gym
class AutoStopEnv(gym.Wrapper):
"""A env wrapper that stops rollout at step max_path_length."""
def __init__(self, env=None, env_name="", max_path_length=100):
if env_name:
super().__init__(gym.make(env_name))
else:
super().__init__(env)
self._rollout_step = 0
self._max_path_length = max_path_length
def step(self, actions):
self._rollout_step += 1
next_obs, reward, done, info = self.env.step(actions)
if self._rollout_step == self._max_path_length:
done = True
self._rollout_step = 0
return next_obs, reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
| jaekyeom/IBOL | garaged/tests/wrappers.py | wrappers.py | py | 734 | python | en | code | 28 | github-code | 13 |
6573935112 | """
Final Project by Luit Meinen, last edited on the 20th of January.
Required libraries: Chess, pyqt5, speech_recognition and pyttsx3
Main class: runs the QSVGWidget and starts the game loop thread
"""
import chess
import chess.svg
import sys
from PyQt5.QtSvg import QSvgWidget
from PyQt5.QtWidgets import QApplication
from game_loop import GameLoop
class MainWindow(QSvgWidget):
def __init__(self):
super().__init__()
self.setGeometry(0, 0, 1000, 1000)
# create the window
self.window = QSvgWidget(parent=self)
self.window.setGeometry(10, 10, 950, 950)
# initialize the board svg
self.chessboardSvg = ""
# create and start the game_loop.py thread
self.gameLoop = GameLoop()
self.gameLoop.start()
self.gameLoop.send_board.connect(self.load_board)
def load_board(self, board, flipped):
self.chessboardSvg = chess.svg.board(board, flipped=flipped).encode("UTF-8")
self.window.load(self.chessboardSvg)
if __name__ == "__main__":
app = QApplication([])
window = MainWindow()
window.show()
sys.exit(app.exec_())
| LoudMines/OTB-AI | main.py | main.py | py | 1,149 | python | en | code | 0 | github-code | 13 |
74564788498 | #!/usr/bin/env python
"""
_Workflow_
Unittest for the WMCore.DataStructs.Workflow class.
"""
import unittest
from WMCore.DataStructs.Workflow import Workflow
from WMCore.DataStructs.Fileset import Fileset
class WorkflowTest(unittest.TestCase):
"""
_WorkflowTest_
"""
def testDefinition(self):
"""
Tests to make sure Workflow is defined correctly
"""
testSpec = "test"
testOwner = "mnorman"
testName = "testName"
testWorkflow = Workflow(spec = testSpec, owner = testOwner, name = testName)
self.assertEqual(testWorkflow.spec, testSpec)
self.assertEqual(testWorkflow.owner, testOwner)
self.assertEqual(testWorkflow.name, testName)
return
def testAddOutput(self):
"""
_testAddOutput_
Tests the addOutput functionality of the DataStructs Workflow.
"""
filesetA = Fileset(name = "filesetA")
filesetB = Fileset(name = "filesetB")
filesetC = Fileset(name = "filesetC")
testWorkflow = Workflow(spec = "test", owner = "mnorman")
testWorkflow.addOutput("out1", filesetA, filesetB)
testWorkflow.addOutput("out1", filesetB, filesetA)
testWorkflow.addOutput("out2", filesetC)
self.assertEqual(len(testWorkflow.outputMap["out1"]), 2,
"Error: There should be two mappings for out1.")
self.assertEqual(len(testWorkflow.outputMap["out2"]), 1,
"Error: There should be two mappings for out2.")
self.assertTrue({"output_fileset": filesetA,
"merged_output_fileset": filesetB} in testWorkflow.outputMap["out1"],
"Error: Fileset A should be in the output map.")
self.assertTrue({"output_fileset": filesetB,
"merged_output_fileset": filesetA} in testWorkflow.outputMap["out1"],
"Error: Fileset B should be in the output map.")
self.assertEqual(filesetC, testWorkflow.outputMap["out2"][0]["output_fileset"],
"Error: Fileset C should be in the output map.")
self.assertEqual(None, testWorkflow.outputMap["out2"][0]["merged_output_fileset"],
"Error: The merged output should be None.")
return
if __name__ == '__main__':
unittest.main()
| dmwm/WMCore | test/python/WMCore_t/DataStructs_t/Workflow_t.py | Workflow_t.py | py | 2,383 | python | en | code | 44 | github-code | 13 |
48489337574 | #Vanshika Shah
#! /usr/bin/env python3
# Echo Server
import sys
import socket
import struct
import random
# Read server IP address and port from command-line arguments
serverIP = sys.argv[1]
serverPort = int(sys.argv[2])
# Create a UDP socket. Notice the use of SOCK_DGRAM for UDP packets
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Assign server IP address and port number to socket
serverSocket.bind((serverIP, serverPort))
print("The server is ready to receive on port: " + str(serverPort) + "\n")
# loop forever listening for incoming UDP messages
while True:
rand = random.randint(0,10) #https://www.programiz.com/python-programming/examples/random-number
# Receive data from client
data, address = serverSocket.recvfrom(1024)
seqNum = struct.unpack('hh', data)[1]
response = struct.pack('hh', 2, seqNum)
#Server responds if random < 4
if rand >= 4:
print("Responding to ping request with sequence number: " + str(seqNum) )
serverSocket.sendto(response, address)
else:
print("Message with sequence number " + str(seqNum) + " dropped")
| vns25/Computer-Networks | HW2/ping-server.py | ping-server.py | py | 1,104 | python | en | code | 0 | github-code | 13 |
21569130295 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 9 10:59:02 2022
@author: hoshino
"""
import numpy as np
import pandas as pd
from modules.concn_effect_relationship import concentration_effect_relationship
# モデルの構造の選択
MODEL_TYPE = {'C':'Cyclic', 'R':'Reciprocal', 'B':'BindingModel'}['R']
# In Vivo と In Vitro の選択
InVivo_InVitro = ['InVivo', 'InVitro'][1]
# 反応の計算時間
cal_time = 5.0
# 最適化結果のパラメータの読み込み
if MODEL_TYPE == 'Cyclic':
from minimized_parameters_cyclic import parameters
elif MODEL_TYPE == 'Reciprocal':
from minimized_parameters_reciprocal import parameters
elif MODEL_TYPE == 'BindingModel':
from minimized_parameters_bindingmodel import parameters
###########################
# KD1とKD2の変化に対する EC50, gammaの変化特性
KD2_LIST = (10**-8)*(10**np.linspace(0,5,100))
KD1_LIST = (10**-8)*np.ones(100)
result = pd.DataFrame()
if InVivo_InVitro == 'InVivo':
D = 10**np.linspace(-8.0 , -4, 101)
if InVivo_InVitro == 'InVitro':
D = 10**np.linspace(-10 , -5, 101)
if MODEL_TYPE == 'Cyclic' or MODEL_TYPE == 'Reciprocal':
k_diss_D_list = [1,10,60]
elif MODEL_TYPE == 'BindingModel':
k_diss_D_list = [9999] # dummy for avoiding not implemented error
for k_dissD in k_diss_D_list:
for KD1, KD2 in zip(KD1_LIST,KD2_LIST):
parameters['k_dissD1'] = k_dissD
parameters['k_dissD2'] = k_dissD
parameters['k_assocD1'] = k_dissD/KD1
parameters['k_assocD2'] = k_dissD/KD2
(c50,gamma), cod, _ = \
concentration_effect_relationship(InVivo_InVitro, MODEL_TYPE, parameters,
free_fraction=1.0, fitting=True,
d_list=D, cal_time = cal_time)
print(f'C50={c50:.3e}, gamma={gamma:.3f}')
result = result.append(
{'KD1': KD1,
'KD2': KD2,
'k_dissD': k_dissD,
'muD': KD1/KD2,
'C50': c50,
'gamma': gamma,
'cod': cod,
},
ignore_index=True)
result.to_csv(f'fig_{MODEL_TYPE.lower()}/parameter_sweep_{InVivo_InVitro.lower()}.csv')
| hoshino06/simultaneous_ndnb_modeling | fig_parameter_sweep.py | fig_parameter_sweep.py | py | 2,288 | python | en | code | 0 | github-code | 13 |
25585970110 | from exfil.aws.exfil import ExfilS3
from exfil.dns.exfil import ExfilDNS
from exfil.email.exfil import exfilEmail
from exfil.ftp.exfil import exfilFTP
from exfil.git.exfil import exfiltrate_to_github
from exfil.http_advanced.graphql.exfil import ExfilGraphQL
from exfil.http_advanced.grpc.exfil import ExfilGRPC
from exfil.http_advanced.websocket.exfil import ExfilWebSocket
from exfil.http_standard.exfil import ExfilHTTP
from exfil.icmp.exfil import exfilICMP
from exfil.rpc.exfil import exfilRPC
from exfil.ssh.exfil import exfilSSH
from exfil.tcp.exfil import ExfilTCP
from exfil.udp.exfil import exfilUDP
import logging
# can be file or data testcases
def run_dns(config, testcase, location, file = False, port=53):
logging.debug("Running run_dns(location = %s, file = %s)" % (location, file))
ret = []
if "server" in config["exfil"]["dns"] and isinstance(config["exfil"]["dns"]["server"], str):
server = config["exfil"]["dns"]["server"]
logging.debug("DNS server specified in config file: %s" % server)
if location == "*":
logging.debug("Running DNS exfiltration for all locations")
ret.append({
"method": "DNS",
"location": "TXT",
"testcase": testcase,
"ret": ExfilDNS(server, "TXT", testcase, file, port)
})
logging.debug("Finished TXT DNS exfiltration")
ret.append({
"method": "DNS",
"location": "A",
"testcase": testcase,
"ret": ExfilDNS(server, "A", testcase, file, port)
})
logging.debug("Finished A DNS exfiltration")
ret.append({
"method": "DNS",
"location": "AAAA",
"testcase": testcase,
"ret": ExfilDNS(server, "AAAA", testcase, file, port)
})
logging.debug("Finished AAAA DNS exfiltration")
return ret
elif location == "A":
logging.debug("Running A DNS exfiltration")
ret.append({
"method": "DNS",
"location": "A",
"testcase": testcase,
"ret": ExfilDNS(server, "A", testcase, file, port)
})
logging.debug("Finished A DNS exfiltration")
elif location == "AAAA":
logging.debug("Running AAAA DNS exfiltration")
ret.append({
"method": "DNS",
"location": "AAAA",
"testcase": testcase,
"ret": ExfilDNS(server, "AAAA", testcase, file, port)
})
logging.debug("Finished AAAA DNS exfiltration")
elif location == "TXT":
logging.debug("Running TXT DNS exfiltration")
ret.append({
"method": "DNS",
"location": "TXt",
"testcase": testcase,
"ret": ExfilDNS(server, "TXT", testcase, file, port)
})
logging.debug("Finished TXT DNS exfiltration")
else:
logging.warning("Invalid DNS location specified. Skipping DNS Exfiltration.")
print("[-] Invalid DNS location specified. Skipping DNS Exfiltration.")
logging.debug("run_dns returning")
return ret
else:
logging.warning("DNS server not specified in config file. Skipping DNS Exfiltration.")
print("[-] DNS server not specified in config file. Skipping DNS Exfiltration.")
return ret
# can be file or data testcases
def run_email(config, testcase, location, file = False):
logging.debug("Running run_email(location = %s, file = %s)" % (location, file))
ret = []
server = ""
port = 25
to = ""
e_from = "dlp@dlp.com"
from_password = ""
tls = False
if "server" in config["exfil"]["email"] and isinstance(config["exfil"]["email"]["server"], str):
logging.debug("Email server specified in config file: %s" % config["exfil"]["email"]["server"])
server = config["exfil"]["email"]["server"]
else:
logging.warning("Invalid Email Server specified. Skipping Email Exfiltration.")
print("[-] Invalid Email Server specified. Skipping Email Exfiltration.")
return []
if "port" in config["exfil"]["email"] and isinstance(config["exfil"]["email"]["port"], int):
logging.debug("Email port specified in config file: %s" % config["exfil"]["email"]["port"])
port = config["exfil"]["email"]["port"]
else:
logging.warning("Invalid Email Port specified. Defaulting to 25.")
print("[-] Invalid Email Port specified. Defaulting to 25.")
if "to" in config["exfil"]["email"] and isinstance(config["exfil"]["email"]["to"], str):
logging.debug("Email To specified in config file: %s" % config["exfil"]["email"]["to"])
to = config["exfil"]["email"]["to"]
else:
logging.warning("Invalid Email To specified. Skipping Email Exfiltration.")
print("[-] Invalid Email To specified. Skipping Email Exfiltration.")
return []
if "from" in config["exfil"]["email"] and isinstance(config["exfil"]["email"]["from"], str):
logging.debug("Email From specified in config file: %s" % config["exfil"]["email"]["from"])
e_from = config["exfil"]["email"]["from"]
else:
logging.warning("Invalid Email From specified. Defaulting to dlp@dlp.com")
print("[-] Invalid Email From specified. Defaulting to dlp@dlp.com")
if "from_password" in config["exfil"]["email"] and isinstance(config["exfil"]["email"]["from_password"], str):
logging.debug("Email From Password specified in config file: %s" % config["exfil"]["email"]["from_password"])
from_password = config["exfil"]["email"]["from_password"]
else:
logging.warning("Invalid Email From Password specified. Defaulting to no password.")
print("[-] Invalid Email From Password specified. Defaulting to no password.")
if "tls" in config["exfil"]["email"] and isinstance(config["exfil"]["email"]["tls"], bool):
logging.debug("Email TLS specified in config file: %s" % config["exfil"]["email"]["tls"])
tls = config["exfil"]["email"]["tls"]
else:
logging.warning("Invalid Email TLS specified. Defaulting to False.")
print("[-] Invalid Email TLS specified. Defaulting to False.")
if location == "*":
logging.debug("Running Email exfiltration with location = *")
ret.append({
"method": "Email",
"location": "SUBJECT",
"testcase": testcase,
"ret": exfilEmail(server, port, to, e_from, from_password, "subject", testcase, tls)
})
logging.debug("Finished SUBJECT Email exfiltration")
ret.append({
"method": "Email",
"location": "BODY",
"testcase": testcase,
"ret": exfilEmail(server, port, to, e_from, from_password, "body", testcase, tls)
})
logging.debug("Finished BODY Email exfiltration")
if file:
ret.append({
"method": "Email",
"location": "ATTACHMENT",
"testcase": testcase,
"ret": exfilEmail(server, port, to, e_from, from_password, "attachment", testcase, tls)
})
logging.debug("Finished ATTACHMENT Email exfiltration")
elif location == "SUBJECT":
logging.debug("Running SUBJECT Email exfiltration")
ret.append({
"method": "Email",
"location": "SUBJECT",
"testcase": testcase,
"ret": exfilEmail(server, port, to, e_from, from_password, "subject", testcase, tls)
})
logging.debug("Finished SUBJECT Email exfiltration")
elif location == "BODY":
logging.debug("Running BODY Email exfiltration")
ret.append({
"method": "Email",
"location": "BODY",
"testcase": testcase,
"ret": exfilEmail(server, port, to, e_from, from_password, "body", testcase, tls)
})
logging.debug("Finished BODY Email exfiltration")
elif location == "ATTACHMENT" and file:
logging.debug("Running ATTACHMENT Email exfiltration")
ret.append({
"method": "Email",
"location": "ATTACHMENT",
"testcase": testcase,
"ret": exfilEmail(server, port, to, e_from, from_password, "attachment", testcase, tls)
})
logging.debug("Finished ATTACHMENT Email exfiltration")
logging.debug("Finished Email exfiltration")
return ret
# can only be file testcases
def run_ftp(config, testcase):
logging.debug("running run_ftp")
ret = []
server = ""
directory = ""
username = "anonymous"
password = ""
tls = False
if "server" in config["exfil"]["ftp"] and isinstance(config["exfil"]["ftp"]["server"], str):
logging.debug("FTP server specified in config file: %s" % config["exfil"]["ftp"]["server"])
server = config["exfil"]["ftp"]["server"]
else:
logging.warning("Invalid FTP Server specified. Skipping FTP Exfiltration.")
print("[-] Invalid FTP Server specified. Skipping FTP Exfiltration.")
return []
if "directory" in config["exfil"]["ftp"] and isinstance(config["exfil"]["ftp"]["directory"], str):
logging.debug("FTP directory specified in config file: %s" % config["exfil"]["ftp"]["directory"])
directory = config["exfil"]["ftp"]["directory"]
else:
logging.warning("Invalid FTP Directory specified. Defaulting to /")
print("[-] Invalid FTP Directory specified. Defaulting to /")
return []
if "username" in config["exfil"]["ftp"] and isinstance(config["exfil"]["ftp"]["username"], str):
logging.debug("FTP username specified in config file: %s" % config["exfil"]["ftp"]["username"])
username = config["exfil"]["ftp"]["username"]
else:
logging.warning("Invalid FTP Username specified. Defaulting to 'anonymous'.")
print("[-] Invalid FTP Username specified. Defaulting to 'anonymous'.")
if "password" in config["exfil"]["ftp"] and isinstance(config["exfil"]["ftp"]["password"], str):
logging.debug("FTP password specified in config file: %s" % config["exfil"]["ftp"]["password"])
password = config["exfil"]["ftp"]["password"]
else:
logging.warning("Invalid FTP Password specified. Defaulting to no password.")
print("[-] Invalid FTP Password specified. Defaulting to no password.")
if "tls" in config["exfil"]["ftp"] and isinstance(config["exfil"]["ftp"]["tls"], bool):
logging.debug("FTP TLS specified in config file: %s" % config["exfil"]["ftp"]["tls"])
tls = config["exfil"]["ftp"]["tls"]
else:
logging.warning("Invalid FTP TLS specified. Defaulting to False.")
print("[-] Invalid FTP TLS specified. Defaulting to False.")
ret.append({
"method": "FTP",
"location": directory,
"testcase": testcase,
"ret": exfilFTP(server, directory, testcase, tls, username, password)
})
logging.debug("Finished FTP exfiltration")
return ret
# can be file or data testcase
def run_git(config, testcase, file = False):
logging.debug("running run_git(file=%s)" % str(file))
ret = []
token = []
owner = []
repo = []
path = []
if "token" in config["exfil"]["git"] and isinstance(config["exfil"]["git"]["token"], str):
logging.debug("Git token specified in config file: %s" % config["exfil"]["git"]["token"])
token = config["exfil"]["git"]["token"]
else:
logging.warning("Invalid Git Token specified. Skipping Git Exfiltration.")
print("[-] Invalid Git Token specified. Skipping Git Exfiltration.")
return []
if "owner" in config["exfil"]["git"] and isinstance(config["exfil"]["git"]["owner"], str):
logging.debug("Git owner specified in config file: %s" % config["exfil"]["git"]["owner"])
owner = config["exfil"]["git"]["owner"]
else:
logging.warning("Invalid Git Owner specified. Skipping Git Exfiltration.")
print("[-] Invalid Git Owner specified. Skipping Git Exfiltration.")
return []
if "repo" in config["exfil"]["git"] and isinstance(config["exfil"]["git"]["repo"], str):
logging.debug("Git repo specified in config file: %s" % config["exfil"]["git"]["repo"])
repo = config["exfil"]["git"]["repo"]
else:
logging.warning("Invalid Git Repo specified. Skipping Git Exfiltration.")
print("[-] Invalid Git Repo specified. Skipping Git Exfiltration.")
return []
if "path" in config["exfil"]["git"] and isinstance(config["exfil"]["git"]["path"], str):
logging.debug("Git path specified in config file: %s" % config["exfil"]["git"]["path"])
path = config["exfil"]["git"]["path"]
else:
logging.warning("Invalid Git Path specified. Skipping Git Exfiltration.")
print("[-] Invalid Git Path specified. Skipping Git Exfiltration.")
return []
ret.append({
"method": "FTP",
"location": repo + ":" + path,
"testcase": testcase,
"ret": exfiltrate_to_github(token, owner, repo, path, testcase, file)
})
logging.debug("Finished Git exfiltration")
return ret
# can be file or data testcase
def run_graphql(config, testcase, file = False):
logging.debug("running run_graphql(file=%s)" % str(file))
ret = []
url = ""
if "url" in config["exfil"]["graphql"] and isinstance(config["exfil"]["graphql"]["url"], str):
logging.debug("GraphQL URL specified in config file: %s" % config["exfil"]["graphql"]["url"])
url = config["exfil"]["graphql"]["url"]
else:
logging.warning("Invalid GraphQL URL specified. Skipping GraphQL Exfiltration.")
print("[-] Invalid GraphQL URL specified. Skipping GraphQL Exfiltration.")
return []
ret.append({
"method": "GraphQL",
"location": url,
"testcase": testcase,
"ret": ExfilGraphQL(url, testcase, file)
})
logging.debug("Finished GraphQL exfiltration")
return ret
# can be file or data testcase
async def run_websockets(config, testcase, file = False):
logging.debug("running run_websockets(file=%s)" % str(file))
ret = []
url = ""
if "url" in config["exfil"]["websockets"] and isinstance(config["exfil"]["websockets"]["url"], str):
logging.debug("websockets URL specified in config file: %s" % config["exfil"]["websockets"]["url"])
url = config["exfil"]["websockets"]["url"]
if not url.startswith(('ws://', 'wss://')):
logging.debug("WebSocket URI scheme missing. Prepending ws:// to the URL.")
url = 'ws://' + url
else:
logging.warning("Invalid websockets URL specified. Skipping websockets Exfiltration.")
print("[-] Invalid websockets URL specified. Skipping websockets Exfiltration.")
return []
ret.append({
"method": "WebSockets",
"location": url,
"testcase": testcase,
"ret": await ExfilWebSocket(url, testcase, file)
})
logging.debug("Finished WebSockets exfiltration")
return ret
# can be file or data testcase
def run_grpc(config, testcase, file = False):
logging.debug("running run_grpc(file=%s)" % str(file))
ret = []
server = ""
port = ""
if "server" in config["exfil"]["grpc"] and isinstance(config["exfil"]["grpc"]["server"], str):
logging.debug("gRPC Server specified in config file: %s" % config["exfil"]["grpc"]["server"])
server = config["exfil"]["grpc"]["server"]
else:
logging.warning("Invalid gRPC Server specified. Skipping gRPC Exfiltration.")
print("[-] Invalid gRPC Server specified. Skipping gRPC Exfiltration.")
return []
if "port" in config["exfil"]["grpc"] and isinstance(config["exfil"]["grpc"]["port"], str):
logging.debug("gRPC Port specified in config file: %s" % config["exfil"]["grpc"]["port"])
port = config["exfil"]["grpc"]["port"]
else:
logging.warning("Invalid gRPC Port specified. Skipping gRPC Exfiltration.")
print("[-] Invalid gRPC Port specified. Skipping gRPC Exfiltration.")
return []
ret.append({
"method": "gRPC",
"location": server + ":" + port,
"testcase": testcase,
"ret": ExfilGRPC(server, port, testcase, file)
})
logging.debug("Finished gRPC exfiltration")
return ret
# can only be a data testcase. TODO: add file support
def run_http(config, testcase):
logging.debug("running run_http()")
ret = []
url = ""
method = "GET"
location = "urlparam"
http_port = 80
https_port = 443
if "url" in config["exfil"]["http"] and isinstance(config["exfil"]["http"]["url"], str):
logging.debug("HTTP URL specified in config file: %s" % config["exfil"]["http"]["url"])
url = config["exfil"]["http"]["url"]
else:
logging.warning("Invalid HTTP URL specified. Skipping HTTP Exfiltration.")
print("[-] Invalid HTTP URL specified. Skipping HTTP Exfiltration.")
return []
if "http_port" in config["exfil"]["http"] and isinstance(config["exfil"]["http"]["http_port"], int):
logging.debug("HTTP Port specified in config file: %s" % config["exfil"]["http"]["http_port"])
http_port = config["exfil"]["http"]["http_port"]
else:
logging.warning("Invalid HTTP Port specified. Defaulting to 80.")
if "https_port" in config["exfil"]["http"] and isinstance(config["exfil"]["http"]["https_port"], int):
logging.debug("HTTPS Port specified in config file: %s" % config["exfil"]["http"]["https_port"])
https_port = config["exfil"]["http"]["https_port"]
else:
logging.warning("Invalid HTTPS Port specified. Defaulting to 443.")
if "method" in config["exfil"]["http"] and isinstance(config["exfil"]["http"]["method"], str):
if config["exfil"]["http"]["method"].upper() in ["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS", "*"]:
logging.debug("HTTP Method specified in config file: %s" % config["exfil"]["http"]["method"])
method = config["exfil"]["http"]["method"].upper()
else:
logging.warning("Invalid HTTP Method specified. Defaulting to GET.")
print("[-] Invalid HTTP Method specified. Defaulting to GET.")
else:
logging.warning("Invalid HTTP Method specified. Defaulting to GET.")
print("[-] Invalid HTTP Method specified. Defaulting to GET.")
if "location" in config["exfil"]["http"] and isinstance(config["exfil"]["http"]["location"], str):
if config["exfil"]["http"]["location"].lower() in ["urlparam", "urlquery", "header", "body", "cookies", "*"]:
logging.debug("HTTP Location specified in config file: %s" % config["exfil"]["http"]["location"])
location = config["exfil"]["http"]["location"].lower()
else:
logging.warning("Invalid HTTP Location specified. Defaulting to urlparam.")
print("[-] Invalid HTTP Location specified. Defaulting to urlparam.")
else:
logging.warning("Invalid HTTP Location specified. Defaulting to urlparam.")
print("[-] Invalid HTTP Location specified. Defaulting to urlparam.")
if method == "*" and location == "*":
logging.debug("HTTP Method and Location are both *. Running all combinations.")
for meth in ["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"]:
for loc in ["urlparam", "urlquery", "header", "body", "cookies"]:
logging.debug("Running HTTP exfiltration for method %s and location %s" % (meth, loc))
ret.append({
"method": "HTTP",
"location": "http://" + meth + " " + url + ":" + str(http_port) + " - " + loc,
"testcase": testcase,
"ret": ExfilHTTP("http://" + url + ":" + str(http_port) , meth, loc, testcase)
})
ret.append({
"method": "HTTPS",
"location": "https://" + meth + " " + url + ":" + str(https_port) + " - " + loc,
"testcase": testcase,
"ret": ExfilHTTP("https://" + url + ":" + str(https_port), meth, loc, testcase)
})
logging.debug("Finished HTTP exfiltration for method %s and location %s" % (meth, loc))
elif method == "*" and location != "*":
logging.debug("HTTP Method is * and Location is not *. Running all methods.")
for meth in ["GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"]:
logging.debug("Running HTTP exfiltration for method %s and location %s" % (meth, location))
ret.append({
"method": "HTTP",
"location": "http://" + meth + " " + url + ":" + str(http_port) + " - " + location,
"testcase": testcase,
"ret": ExfilHTTP("http://" + url + ":" + str(http_port) , meth, location, testcase)
})
ret.append({
"method": "HTTPS",
"location": "https://" + meth + " " + url + ":" + str(https_port) + " - " + loc,
"testcase": testcase,
"ret": ExfilHTTP("https://" + url + ":" + str(https_port), meth, loc, testcase)
})
logging.debug("Finished HTTP exfiltration for method %s and location %s" % (meth, location))
elif method != "*" and location == "*":
logging.debug("HTTP Method is not * and Location is *. Running all locations.")
for loc in ["urlparam", "urlquery", "header", "body", "cookies"]:
logging.debug("Running HTTP exfiltration for method %s and location %s" % (method, loc))
ret.append({
"method": "HTTP",
"location": "http://" + method + " " + url + ":" + str(http_port) + " - " + loc,
"testcase": testcase,
"ret": ExfilHTTP("http://" + url + ":" + str(http_port) , method, loc, testcase)
})
ret.append({
"method": "HTTPS",
"location": "https://" + meth + " " + url + ":" + str(https_port) + " - " + loc,
"testcase": testcase,
"ret": ExfilHTTP("https://" + url + ":" + str(https_port), meth, loc, testcase)
})
logging.debug("Finished HTTP exfiltration for method %s and location %s" % (method, loc))
else:
logging.debug("HTTP Method and Location are both specified. Running specified method and location.")
ret.append({
"method": "HTTP",
"location": "http://" + method + " " + url + ":" + str(http_port) + " - " + location,
"testcase": testcase,
"ret": ExfilHTTP("http://" + url + ":" + str(http_port) , method, location, testcase)
})
ret.append({
"method": "HTTP",
"location": "https://" + method + " " + url + ":" + str(https_port) + " - " + location,
"testcase": testcase,
"ret": ExfilHTTP("https://" + url + ":" + str(https_port), method, location, testcase)
})
logging.debug("Finished HTTP exfiltration for method %s and location %s" % (method, location))
logging.debug("Finished HTTP exfiltration")
return ret
# can only be a data testcase.
def run_icmp(config, testcase):
logging.debug("running run_icmp()")
ret = []
ip = ""
if "ip" in config["exfil"]["icmp"] and isinstance(config["exfil"]["icmp"]["ip"], str):
logging.debug("ICMP IP specified in config file: %s" % config["exfil"]["icmp"]["ip"])
ip = config["exfil"]["icmp"]["ip"]
else:
logging.warning("Invalid ICMP IP specified. Skipping ICMP Exfiltration.")
print("[-] Invalid ICMP IP specified. Skipping ICMP Exfiltration.")
return []
ret.append({
"method": "ICMP",
"location": ip,
"testcase": testcase,
"ret": exfilICMP(ip, testcase)
})
logging.debug("Finished ICMP exfiltration")
return ret
# can be a data or file testcase
def run_rpc(config, testcase, file = False):
logging.debug("running run_rpc()")
ret = []
server = []
port = []
if "server" in config["exfil"]["rpc"] and isinstance(config["exfil"]["rpc"]["server"], list):
logging.debug("RPC Server specified in config file: %s" % config["exfil"]["rpc"]["server"])
server = config["exfil"]["rpc"]["server"]
else:
logging.warning("Invalid RPC Server specified. Skipping RPC Exfiltration.")
print("[-] Invalid RPC Server specified. Skipping RPC Exfiltration.")
return []
if "port" in config["exfil"]["rpc"] and isinstance(config["exfil"]["rpc"]["port"], list):
logging.debug("RPC Port specified in config file: %s" % config["exfil"]["rpc"]["port"])
port = config["exfil"]["rpc"]["port"]
else:
logging.warning("Invalid RPC Port specified. Skipping RPC Exfiltration.")
print("[-] Invalid RPC Port specified. Skipping RPC Exfiltration.")
return []
ret.append({
"method": "RPC",
"location": server + ":" + port,
"testcase": testcase,
"ret": exfilRPC(server, port, testcase, file)
})
logging.debug("Finished RPC exfiltration")
return ret
# can be a data or file testcase
def run_ssh(config, testcase, file = False):
logging.debug("running run_ssh(file = %s)" % file)
ret = []
server = ""
port = ""
username = ""
password = ""
if "server" in config["exfil"]["ssh"] and isinstance(config["exfil"]["ssh"]["server"], str):
logging.debug("SSH Server specified in config file: %s" % config["exfil"]["ssh"]["server"])
server = config["exfil"]["ssh"]["server"]
else:
logging.warning("Invalid SSH Server specified. Skipping SSH Exfiltration.")
print("[-] Invalid SSH Server specified. Skipping SSH Exfiltration.")
return []
if "port" in config["exfil"]["ssh"] and isinstance(config["exfil"]["ssh"]["port"], str):
logging.debug("SSH Port specified in config file: %s" % config["exfil"]["ssh"]["port"])
port = config["exfil"]["ssh"]["port"]
else:
logging.warning("Invalid SSH Port specified. Skipping SSH Exfiltration.")
print("[-] Invalid SSH Port specified. Skipping SSH Exfiltration.")
return []
if "username" in config["exfil"]["ssh"] and isinstance(config["exfil"]["ssh"]["username"], str):
logging.debug("SSH Username specified in config file: %s" % config["exfil"]["ssh"]["username"])
username = config["exfil"]["ssh"]["username"]
else:
logging.warning("Invalid SSH Username specified. Skipping SSH Exfiltration.")
print("[-] Invalid SSH Username specified. Skipping SSH Exfiltration.")
return []
if "password" in config["exfil"]["ssh"] and isinstance(config["exfil"]["ssh"]["password"], str):
logging.debug("SSH Password specified in config file: %s" % config["exfil"]["ssh"]["password"])
password = config["exfil"]["ssh"]["password"]
else:
logging.warning("Invalid SSH Password specified. Skipping SSH Exfiltration.")
print("[-] Invalid SSH Password specified. Skipping SSH Exfiltration.")
return []
ret.append({
"method": "SSH",
"location": username + ":" + password + "@" + server + ":" + port,
"testcase": testcase,
"ret": exfilSSH(server, port, username, password, testcase, file)
})
logging.debug("Finished SSH exfiltration")
return ret
# can only be data testcase. TODO: add file support
def run_tcp(config, testcase):
logging.debug("running run_tcp()")
ret = []
ip = ""
port = ""
if "ip" in config["exfil"]["tcp"] and isinstance(config["exfil"]["tcp"]["ip"], str):
logging.debug("TCP IP specified in config file: %s" % config["exfil"]["tcp"]["ip"])
ip = config["exfil"]["tcp"]["ip"]
else:
logging.warning("Invalid TCP IP specified. Skipping TCP Exfiltration.")
print("[-] Invalid TCP IP specified. Skipping TCP Exfiltration.")
return []
if "port" in config["exfil"]["tcp"] and isinstance(config["exfil"]["tcp"]["port"], str):
logging.debug("TCP Port specified in config file: %s" % config["exfil"]["tcp"]["port"])
port = config["exfil"]["tcp"]["port"]
else:
logging.warning("Invalid TCP Port specified. Skipping TCP Exfiltration.")
print("[-] Invalid TCP Port specified. Skipping TCP Exfiltration.")
return []
ret.append({
"method": "TCP",
"location": ip + ":" + port,
"testcase": testcase,
"ret": ExfilTCP(ip, port, testcase),
})
logging.debug("Finished TCP exfiltration")
return ret
# can only be data testcase. TODO: add file support
def run_udp(config, testcase):
logging.debug("running run_udp()")
ret = []
ip = ""
port = ""
if "ip" in config["exfil"]["udp"] and isinstance(config["exfil"]["udp"]["ip"], str):
logging.debug("UDP IP specified in config file: %s" % config["exfil"]["udp"]["ip"])
ip = config["exfil"]["udp"]["ip"]
else:
logging.warning("Invalid UDP IP specified. Skipping UDP Exfiltration.")
print("[-] Invalid UDP IP specified. Skipping UDP Exfiltration.")
return []
if "port" in config["exfil"]["udp"] and isinstance(config["exfil"]["udp"]["port"], int):
logging.debug("UDP Port specified in config file: %s" % config["exfil"]["udp"]["port"])
port = config["exfil"]["udp"]["port"]
else:
logging.warning("Invalid UDP Port specified. Skipping UDP Exfiltration.")
print("[-] Invalid UDP Port specified. Skipping UDP Exfiltration.")
return []
ret.append({
"method": "UDP",
"location": ip + ":" + str(port),
"testcase": testcase,
"ret": exfilUDP(ip, port, testcase),
})
logging.debug("Finished UDP exfiltration")
return ret
# can be both data and file testcase
def run_s3(config, testcase, file = False):
ret = []
bucket = ""
access_key_id = ""
secret_access_token = ""
session_token = ""
username = ""
password = ""
if "bucket" in config["exfil"]["s3"] and isinstance(config["exfil"]["s3"]["bucket"], str):
logging.debug("S3 Bucket specified in config file: %s" % config["exfil"]["s3"]["bucket"])
bucket = config["exfil"]["s3"]["bucket"]
else:
logging.warning("Invalid S3 Bucket specified. Skipping S3 Exfiltration.")
print("[-] Invalid S3 Bucket specified. Skipping S3 Exfiltration.")
return []
if "access_key_id" in config["exfil"]["s3"] and isinstance(config["exfil"]["s3"]["access_key_id"], str):
logging.debug("S3 Access Key ID specified in config file: %s" % config["exfil"]["s3"]["access_key_id"])
access_key_id = config["exfil"]["s3"]["access_key_id"]
else:
logging.warning("Invalid S3 Access Key ID specified. Defaulting to no access key ID.")
print("[-] Invalid S3 Access Key ID specified. Defualting to no access key ID.")
if "secret_access_token" in config["exfil"]["s3"] and isinstance(config["exfil"]["s3"]["secret_access_token"], str):
logging.debug("S3 Secret Access Token specified in config file: %s" % config["exfil"]["s3"]["secret_access_token"])
secret_access_token = config["exfil"]["s3"]["secret_access_token"]
else:
logging.warning("Invalid S3 Secret Access Token specified. Defaulting to no secret access token.")
print("[-] Invalid S3 Secret Access Token specified. Defaulting to no secret access token.")
if "session_token" in config["exfil"]["s3"] and isinstance(config["exfil"]["s3"]["session_token"], str):
logging.debug("S3 Session Token specified in config file: %s" % config["exfil"]["s3"]["session_token"])
session_token = config["exfil"]["s3"]["session_token"]
else:
logging.warning("Invalid S3 Session Token specified. Defaulting to no session token.")
print("[-] Invalid S3 Session Token specified. Defaulting to no session token.")
ret.append({
"method": "AWS S3",
"location": bucket,
"testcase": testcase,
"ret": ExfilS3(bucket, testcase, file, access_key_id, secret_access_token, session_token, username, password),
})
logging.debug("Finished S3 exfiltration")
return ret | bcdannyboy/dlpauto | src/dlpautomation/exfil/runners.py | runners.py | py | 32,765 | python | en | code | 0 | github-code | 13 |
17055934464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MerchantQueryResult(object):
def __init__(self):
self._alias_name = None
self._cert_no = None
self._city = None
self._detail_address = None
self._distinct = None
self._mcc_code = None
self._merchant_type = None
self._name = None
self._province = None
@property
def alias_name(self):
return self._alias_name
@alias_name.setter
def alias_name(self, value):
self._alias_name = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def city(self):
return self._city
@city.setter
def city(self, value):
self._city = value
@property
def detail_address(self):
return self._detail_address
@detail_address.setter
def detail_address(self, value):
self._detail_address = value
@property
def distinct(self):
return self._distinct
@distinct.setter
def distinct(self, value):
self._distinct = value
@property
def mcc_code(self):
return self._mcc_code
@mcc_code.setter
def mcc_code(self, value):
self._mcc_code = value
@property
def merchant_type(self):
return self._merchant_type
@merchant_type.setter
def merchant_type(self, value):
self._merchant_type = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def province(self):
return self._province
@province.setter
def province(self, value):
self._province = value
def to_alipay_dict(self):
params = dict()
if self.alias_name:
if hasattr(self.alias_name, 'to_alipay_dict'):
params['alias_name'] = self.alias_name.to_alipay_dict()
else:
params['alias_name'] = self.alias_name
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.city:
if hasattr(self.city, 'to_alipay_dict'):
params['city'] = self.city.to_alipay_dict()
else:
params['city'] = self.city
if self.detail_address:
if hasattr(self.detail_address, 'to_alipay_dict'):
params['detail_address'] = self.detail_address.to_alipay_dict()
else:
params['detail_address'] = self.detail_address
if self.distinct:
if hasattr(self.distinct, 'to_alipay_dict'):
params['distinct'] = self.distinct.to_alipay_dict()
else:
params['distinct'] = self.distinct
if self.mcc_code:
if hasattr(self.mcc_code, 'to_alipay_dict'):
params['mcc_code'] = self.mcc_code.to_alipay_dict()
else:
params['mcc_code'] = self.mcc_code
if self.merchant_type:
if hasattr(self.merchant_type, 'to_alipay_dict'):
params['merchant_type'] = self.merchant_type.to_alipay_dict()
else:
params['merchant_type'] = self.merchant_type
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.province:
if hasattr(self.province, 'to_alipay_dict'):
params['province'] = self.province.to_alipay_dict()
else:
params['province'] = self.province
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MerchantQueryResult()
if 'alias_name' in d:
o.alias_name = d['alias_name']
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'city' in d:
o.city = d['city']
if 'detail_address' in d:
o.detail_address = d['detail_address']
if 'distinct' in d:
o.distinct = d['distinct']
if 'mcc_code' in d:
o.mcc_code = d['mcc_code']
if 'merchant_type' in d:
o.merchant_type = d['merchant_type']
if 'name' in d:
o.name = d['name']
if 'province' in d:
o.province = d['province']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/MerchantQueryResult.py | MerchantQueryResult.py | py | 4,677 | python | en | code | 241 | github-code | 13 |
35263514196 | """
===============================
Utils file for plotting results
===============================
Util functions for selecting the results that we will plot.
"""
from typing import Dict
import sys
from scipy.stats import sem
from utils.config import channels_mag, channels_grad1, channels_grad2, meg_rdm, meg_sensors, similarity_folder
sys.path.append('../../MFRS/')
from utils.general import load_npy
def get_filtered_measures(sim_dict: dict, layer: str, channels_list: list, measure: str = "pearson", epsilon: int = 0.05):
"""
Extracts values from the given dictionary based on the specified layer-channel conditions.
Args:
- sim_dict (dict): Dictionary with keys as "layer_name channel_name" and values as {"measure": [r, p]}.
- layer (str): Layer name to extract values for.
- channels_list (list): List of channel names to iterate over.
- measure (str): Name of the measure to extract from the dictionary. Default is "pearson".
- epsilon (float): Threshold value for the condition p > epsilon. Default is 0.05.
Returns:
- filtered_values (list): List of values from the "measure" key that satisfy the condition p > epsilon,
with 0 added for values that don't meet the condition.
- extremum (float): Maximum absolute value among the filtered values.
"""
filtered_values = []
for channel_name in channels_list:
key = f"{layer} {channel_name}"
value = sim_dict.get(key, {}).get(measure, [])
if value[1] < epsilon:
filtered_values.append(value[0])
else:
filtered_values.append(0)
extremum = max(max(filtered_values), abs(min(filtered_values)))
return filtered_values, extremum
def get_layers_similarity(sim_dict, layer_list, correlation_measure="pearson", epsilon=0.05):
"""
Divide each layer similarity results into 3 lists corresponding to sensor types + get extremum values.
Args:
- sim_dict (dict): Dictionary with keys as "layer_name channel_name" and values as {"measure": [r, p]}.
- layer_list (list): List of layer names to iterate over.
- correlation_measure (str): Name of the correlation measure to use. Default is "spearman".
- epsilon (float): Threshold value for the condition p > epsilon. Default is 0.05.
Returns:
- layer_similarities (dict): Dictionary with layer names as keys and corresponding similarity lists for each sensor type as values.
- extremum_values (list): List containing the maximum absolute value for each sensor type.
"""
layer_similarities = {}
extremum_values = [0, 0, 0]
for layer in layer_list:
sensor_type_similarities = []
for i, channels_list in enumerate([channels_mag, channels_grad1, channels_grad2]):
filtered_values, extremum = get_filtered_measures(sim_dict, layer, channels_list, measure=correlation_measure, epsilon=epsilon)
sensor_type_similarities.append(filtered_values)
extremum_values[i] = max(extremum_values[i], extremum)
layer_similarities[layer] = sensor_type_similarities
return layer_similarities, extremum_values
def extract_layers_max_sim_values(sim_dict: dict, sensor_type: str, channels_list: list):
"""
Extracts a list of values for a given sensor type from the dictionary and returns the name of the layer with the highest similarity.
Args:
- sim_dict (dict): Dictionary containing the similarity values for each layer and sensor type.
- sensor_type (str): The sensor type to extract values for (e.g., 'grad1', 'grad2', 'mag').
- channels_list (list): List of all sensor names.
Returns:
- values_list (list): List of values corresponding to the given sensor type.
- max_index (int): Index of the function with the highest similarity.
- max_layer_name (str): Name of the layer that gave the highest similarity.
- mask (list): Mask indicating the sensor that gave the highest similarity (1 for the sensor, 0 for others).
"""
sensor_type_idx = {"mag":0, "grad1":1, "grad2":2}
values_list = [max(values[sensor_type_idx[sensor_type]]) for values in sim_dict.values()]
max_value = max(values_list)
max_index = values_list.index(max_value)
max_layer_name = next((key for key, value in sim_dict.items() if max(value[sensor_type_idx[sensor_type]]) == max_value), None)
max_sensor_idx = sim_dict[max_layer_name][sensor_type_idx[sensor_type]].index(max_value)
mask = [channels_grad2[max_sensor_idx] == sensor for sensor in channels_grad2]
return values_list, max_index, max_layer_name, mask
def get_bootstrap_values(bootstrap_data: Dict[str, Dict[str, list]]) -> Dict[str, Dict[str, float]]:
"""
Calculate the standard error of the mean (SEM) for each layer and sensor type from bootstrap data.
Args:
- bootstrap_data (dict): Dictionary containing the bootstrap values for each layer and sensor type.
Returns:
- boot_sem (dict): Dictionary containing the SEM for each layer and sensor type.
The structure is boot_sem[layer][sensor_type] = SEM.
"""
boot_sem = {}
for layer, values in bootstrap_data.items():
boot_layer = {}
for sensor_type, bootstrap_values in values.items():
boot_layer[sensor_type] = sem(bootstrap_values)
boot_sem[layer] = boot_layer
return boot_sem
| BabaSanfour/MFRS | similarity_analysis/plot_utils.py | plot_utils.py | py | 5,447 | python | en | code | 1 | github-code | 13 |
18592198752 |
import webapp2
import jinja2
import os
import urllib2
import json
import logging
from google.appengine.api import users
from google.appengine.ext import ndb
jinja_environment = jinja2.Environment(
loader = jinja2.FileSystemLoader(
os.path.dirname(__file__)))
class SignupHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('signup.html')
self.response.write(template.render())
def post(self):
name_from_form = self.request.get('parent')
page_from_form=self.request.get('parentAge')
page_from_form= int(page_from_form)
job_from_form= self.request.get('pJob')
income_from_form=self.request.get('money')
income_from_form= int(income_from_form)
kamount_from_form = self.request.get('children')
kage_from_form=self.request.get('kAge')
kage_from_form= int(kage_from_form)
template = jinja_environment.get_template('homepage.html')
self.response.write(template.render(
{
'name': name_from_form,
'parentAge':page_from_form,
'pJob':job_from_form,
'kAmount':kamount_from_form,
'pAge':page_from_form,
'kAge':kage_from_form,
'money':income_from_form,
}
))
class Home(ndb.Model):
name = ndb.StringProperty()
page=ndb.IntegerProperty()
job= ndb.StringProperty()
income=ndb.IntegerProperty()
kamount =ndb.IntegerProperty()
kage= ndb.IntegerProperty()
user = ndb.StringProperty()
class HomeHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
home_model = Home.query(Home.user == str(loggedin_user.user_id())).get()
if home_model:
template = jinja_environment.get_template('homepage.html')
self.response.write(template.render(
{
'name': home_model.name,
'parentAge':home_model.page,
'pJob':home_model.job,
'kAmount':home_model.kamount,
'kAge':home_model.kage,
'money':home_model.income,
}
))
else:
self.redirect('/')
def post(self):
name_from_form = self.request.get('parent')
page_from_form=self.request.get('parentAge')
page_from_form= int(page_from_form)
job_from_form= self.request.get('pJob')
income_from_form=self.request.get('money')
income_from_form= int(income_from_form)
kamount_from_form = self.request.get('children')
kamount_from_form = int(kamount_from_form)
kage_from_form=self.request.get('kAge')
kage_from_form= int(kage_from_form)
loggedin_user= users.get_current_user()
Home_model= Home(name = name_from_form, page=page_from_form,job= job_from_form,
income=income_from_form, kamount =kamount_from_form,kage= kage_from_form, user= loggedin_user.user_id())
Home_key= Home_model.put()
template = jinja_environment.get_template('homepage.html')
self.response.write(template.render(
{
'name': name_from_form,
'parentAge':page_from_form,
'pJob':job_from_form,
'kAmount':kamount_from_form,
'pAge':page_from_form,
'kAge':kage_from_form,
'money':income_from_form,
}
))
class BabyHandler(webapp2.RequestHandler):
def get(self):
response = urllib2.urlopen('https://randomuser.me/api/?results=10')
content = response.read()
content_dictionary = json.loads(content)
template = jinja_environment.get_template('BSFv3.html')
self.response.out.write(template.render( {
'contents' : content_dictionary}))
# class Resume (ndb.Model):
# resumetitle =ndb.StringProperty()
# name = ndb.StringProperty()
# jobtitle = ndb.StringProperty()
# email = ndb.StringProperty()
# phonenumber = StringProperty()
# personalwebsitelink = StringProperty()
# professionalprofile = StringProperty()
# skillentrys = ListProperty()
# pastjobs = ListProperty()
# degrees = ListProperty()
class ResumeHandler(webapp2.RequestHandler):
"""docstring for ResumeHandler"""
def get(self):
template = jinja_environment.get_template('startresume.html')
self.response.write(template.render())
def post(self):
# resume_model = Resume(
# resumetitle=resumetitle,
# name=name,
# jobtitle=capjob_title,
# email=email,
# phonenumber=phone_number,
# personalwebsitelink=personal_websitelink,
# skillentrys=skillentrys,
# pastjobs=jobentrys,
# degrees=degree_entrys,
# )
# resumetitle = self.request.get('resumetitle')
name = self.request.get('name')
job_title = self.request.get('jobtitle')
capname = name.upper()
job_title = self.request.get('jobtitle')
capjob_title = job_title.upper()
email = self.request.get('email')
phone_number = self.request.get('phonenumber')
personal_websitelink = self.request.get('personalwebsite')
professional_profile =self.request.get('professionalprofile')
skill_name = self.request.get('skillname')
skill_description = self.request.get('skill')
job_position = self.request.get('jobposition')
jp_description =self.request.get('jobposition_description')
education_entry = self.request.get('educationentry')
# skillentrys =[]
skillnames = []
skilldes = []
num = 0
while True:
next_skill_name = self.request.get('skillname%r' % num, default_value = -1)
next_skill_description = self.request.get('skill%r' % num, default_value = -1)
if next_skill_name == -1 or next_skill_description == -1:
break
skillnames.append(next_skill_name)
skilldes.append(next_skill_description)
num += 1
# jobentrys =[]
pastjobs = []
pastjobdes = []
num2 = 0
while True:
next_job_position = self.request.get('jobposition%r' % num2, default_value = -1)
next_jp_description = self.request.get('des%r' %num2, default_value = -1)
if next_job_position == -1 or next_jp_description == -1:
break
pastjobs.append(next_job_position)
pastjobdes.append(next_jp_description)
num2 += 1
# degree_entrys = []
degrees = []
schools = []
num3 = 0
while True:
next_degree_ = self.request.get('degree%r' %num3, default_value = -1)
next_school = self.request.get('school%r' %num3, default_value = -1)
if next_degree_ == -1 or next_school == -1:
break
degrees.append(next_degree_)
schools.append(next_school)
num3 += 1
template =jinja_environment.get_template('finishedresume.html')
self.response.write(template.render(
{
'name': capname,
'jobtitle': capjob_title,
'email': email,
'phonenumber': phone_number,
'personalwebsite': personal_websitelink,
'professionalprofile': professional_profile,
'skillname': skill_name,
'skill': skill_description,
'jobposition': job_position,
'jobposition_description': jp_description,
'educationentry': education_entry,
'skillnames': skillnames,
'skilldes': skilldes,
'pastjobs': pastjobs,
'pastjobdes': pastjobdes,
'degrees': degrees,
'schools': schools
}))
app = webapp2.WSGIApplication([
('/baby', BabyHandler),
('/',SignupHandler),
('/home', HomeHandler ),
('/resume',ResumeHandler),
], debug=True)
| quinaroonie/googleproject.github.io | main.py | main.py | py | 8,351 | python | en | code | 0 | github-code | 13 |
17158638477 | """ Inverse Kinematic based on numerical root finding method.
- Method : Inverse Pseudo Inverse Jacobian
- Return : 1 Possible Theta
"""
import numpy as np
from clampMag import clampMag
class ik_jacobian_pseudo_inverse:
def __init__(self, max_iteration, robot_class):
self.max_iter = max_iteration # for when it can't reach desired pose
self.robot = robot_class
self.theta_history = np.array([[]])
def pseudoinverse_jac(self, theta_current, x_desired):
x_current = self.robot.forward_kinematic(theta_current)
e = x_desired - x_current
i = 0
while np.linalg.norm(e) > 0.001 and i < self.max_iter: # norm = sqrt(x^2 + y^2)
x_current = self.robot.forward_kinematic(theta_current)
e = x_desired - x_current
Jac = self.robot.jacobian(theta_current)
theta_current = theta_current + np.linalg.pinv(Jac).dot(e)
i += 1
return theta_current
| Phayuth/robotics_manipulator | inverse_kinematic_numerical/numerical_jacpseudoinv.py | numerical_jacpseudoinv.py | py | 974 | python | en | code | 0 | github-code | 13 |
34355313618 | import rospy
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
from keras.models import load_model
import cv2
OBJECT_DETECTION_MODEL_PATH = 'models/detection/frozen_inference_graph.pb'
CLASSIFICATION_MODEL_PATH = 'models/classification/classification_model.h5'
class TLClassifier(object):
def __init__(self):
self.detection_graph = None
self.classification_graph = None
self.sess = None
self.image_tensor = None
self.boxes = None
self.scores = None
self.classes = None
self.num_detections = None
self.classification_model = None
self.__init_object_detection()
self.__init_classification()
def __init_object_detection(self):
self.detection_graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(OBJECT_DETECTION_MODEL_PATH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph, config=config)
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def __init_classification(self):
self.classification_model = load_model(CLASSIFICATION_MODEL_PATH)
self.classification_graph = tf.get_default_graph()
self.classification_model._make_predict_function()
def __box_normal_to_pixel(self, box, dim):
height, width = dim[0], dim[1]
box_pixel = [int(box[0] * height), int(box[1] * width), int(box[2] * height), int(box[3] * width)]
return np.array(box_pixel)
def detect_traffic_light(self, image):
best_box, best_score = None, None
expanded_image = np.expand_dims(image, axis=0)
with self.detection_graph.as_default():
boxes, scores, classes, num_detections = self.sess.run([
self.boxes, self.scores, self.classes, self.num_detections
], feed_dict={self.image_tensor: expanded_image})
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
cls = classes.tolist()
tl_idxs = [idx for idx, v in enumerate(cls) if int(v) == 10]
if len(tl_idxs) > 0 and scores[tl_idxs[0]] >= 0.2:
tl_idx = tl_idxs[0]
dim = image.shape[0:2]
box = self.__box_normal_to_pixel(boxes[tl_idx], dim)
box_h = box[2] - box[0]
box_w = box[3] - box[1]
ratio = box_h / (box_w + 0.01)
if box_w >= 20 and box_h >= 20 and ratio >= 1.5:
best_box = box
best_score = scores[tl_idx]
return best_box, best_score
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
x = image / 255.
with self.classification_graph.as_default():
pred = self.classification_model.predict(x)
predicted_class = pred.argmax()
return 4 if predicted_class == 3 else predicted_class
| deybvagm/CarND-Capstone | ros/src/tl_detector/light_classification/tl_classifier.py | tl_classifier.py | py | 3,878 | python | en | code | 0 | github-code | 13 |
1954952154 | # -*- coding: utf-8 -*-
"""
Problem 58 (Spiral primes)
Starting with 1 and spiralling anticlockwise in the following way,
a square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom right diagonal,
but what is more interesting is that 8 out of the 13 numbers lying along both diagonals are prime;
that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above,
a square spiral with side length 9 will be formed. If this process is continued,
what is the side length of the square spiral for which
the ratio of primes along both diagonals first falls below 10%?
"""
import math
def solution():
wynik = 1
wielkosc_spirali = 3
prime_on_corner = 3
loop = 1
corner_number = 5
while wynik > 0.1:
# for j in range(0, 3):
wielkosc_spirali += 2
for i in range(pow(wielkosc_spirali - 2, 2) + 1, pow(wielkosc_spirali, 2) + 1):
if loop % (wielkosc_spirali - 1) == 0:
corner_number += 1
if is_prime(i):
# print("Pierwsza na cornerze: {}".format(i))
prime_on_corner += 1
loop += 1
wynik = prime_on_corner / corner_number
loop = 1
print("Wynik: {} {}".format(wielkosc_spirali, wynik))
def is_prime(number):
for i in range(2, int(math.sqrt(number)) + 1):
if number % i == 0:
return False
return True
solution()
| KubiakJakub01/ProjectEuler | src/Problem58.py | Problem58.py | py | 1,638 | python | en | code | 0 | github-code | 13 |
41057903315 | '''
本节视频
https://www.bilibili.com/video/BV1J54y1u7Vo/ “Python”高级教程 什么是内部函式?内部函式的作用,如何定义内部函式
本节文章
https://learnscript.net/zh-hant/python/senior/define-and-call-nested-functions/ 如何定义和呼叫巢状函式
'''
###
def main():
# 主函式 main,实现一个傻傻的聊天机器人
###
def show_message(text):
# 巢状函式 show_message,显示来自机器人的讯息
# 加入时间资讯
import datetime
time = datetime.datetime.now()
print(f'{time} 机器人:“{text}”')
### 机器人问候使用者
show_message('你好,我是机器人!')
# 使用者输入讯息和机器人对话
while True:
text = input('请输入讯息:')
if text:
# 机器人只会傻傻的回答
show_message('哦,这样啊!')
else:
# 使用者输入内容为空,跳出循环
break
# 机器人说再见!
show_message('谢谢使用,再见!')
main()
# ERROR 找不到函式 show_message
# show_message('你还在吗?') | codebeatme/python | src/zh-hant/senior/nested_functions.py | nested_functions.py | py | 1,158 | python | zh | code | 1 | github-code | 13 |
32286146787 | import pygame
from src.Entity.Animals.Animal import Animal
class Wolf(Animal):
def __init__(self, world, position):
temp = pygame.image.load('assets/wolf.png')
scaled = pygame.transform.scale(temp, (world.scale, world.scale))
super().__init__(scaled, world, position, 'W', 9, 5)
def collision(self, entity):
if isinstance(entity, Wolf):
breedPosition = self.breed()
if breedPosition is not None:
element = Wolf(self._world, breedPosition)
self._world.setMapElement(breedPosition[0], breedPosition[1], element)
return False
return super().collision(entity)
| Adrian-Sciepura/virtual-world-simulator | Python/virtual-world-simulator/src/Entity/Animals/Wolf.py | Wolf.py | py | 677 | python | en | code | 0 | github-code | 13 |
73537232336 | """doc"""
def main(num):
"""doc"""
agent = 0
lis = [0.07, 0.10, 0.15, 0.18, 0.20]
nummber = [list(range(10, 21)), list(range(21, 31)), list(range(31, 41)), list(range(41, 61))]
for i in range(len(nummber)):
if num in nummber[i]:
agent = lis[i]
if agent == 0 and num < 10:
return print("I don't care.")
elif agent == 0 and num > 60:
agent = lis[-1]
print("%.3f" % ((1 - agent) * num))
main(int(input()))
| film8844/KMITL-Computer-Programming-Year-1 | week11/week12_[Week 11] ManU.py | week12_[Week 11] ManU.py | py | 476 | python | en | code | 0 | github-code | 13 |
70871056979 | import numpy as np
import imageio
import matplotlib.pyplot as plt
from numba import cuda
@cuda.jit
def colorToGrayscaleConvertion( Pout,
Pin,
width,
height
):
col = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
row = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
CHANNELS = 3
if col < width and row < height:
# Get ID offset for the grayscale image
grayOffset = row * width + col
# One can think of the RGB image having CHANNELS times more columns than the gray scale image
rgbOffset = grayOffset * CHANNELS
r = Pin[rgbOffset] # Red value
g = Pin[rgbOffset + 1] # Green value
b = Pin[rgbOffset + 2] # Blue value
# Perform the rescaling and store it
grayscale_value = 0.21 * r + 0.71 * g + 0.07 * b
Pout[grayOffset] = np.uint8( grayscale_value )
| lvllvl/python-api | cudas/colorToGrayscale.py | colorToGrayscale.py | py | 995 | python | en | code | 0 | github-code | 13 |
7494512293 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import Drawable
class Powerpoint(Drawable):
def __init__(self):
self.___list_draw_slides = None
self.___int_width = None
self.___int_height = None
self.___organizer_organizer = None
#PDF converter
#from fpdf import FPDF
#pdf = FPDF()
#pdf.add_page()
#pdf.set_font("Arial", size=12)
#pdf.cell(200, 10, txt="Welcome to Python!", ln=1, align="C")
#pdf.output("simple_demo.pdf")
#from Tkinter import *
#import mp3play
#root = Tk() # create tkinter window
#f = mp3play.load('Sound.mp3'); play = lambda: f.play()
#button = Button(root, text = 'Play', command = play)
#button.pack()
#root.mainloop()
| DavidCastillo2/Moon-Bishop | LearningMyFriend/PowerPoint.py | PowerPoint.py | py | 657 | python | en | code | 0 | github-code | 13 |
4301078796 | # import json
# person = {'first': 'Jason', 'last':'Friedrich'}
# print(person)
# # print(json.dumps(person_dict))
# # person_json = json.dumps(person_dict)
# # print(person_json)
import json
person_dict = {'FirstName': 'Jason', 'LastName': 'Friedrich'}
person_dict['City']='Bochum'
staff_dict ={}
staff_dict['Evil Creator']=person_dict
staff_json = json.dumps(staff_dict)
staff_json10 = json.dumps(staff_dict, indent=10)
# last_name = staff_json('LastName')
# print(staff_json)
# print(staff_json10)
print(person_dict['LastName'])
last_name = person_dict['LastName']
# last_name = staff_json.upper()
# last_name10 = staff_json10.upper()
last_nameUP = last_name.upper()
print(last_nameUP)
print('!')
print('!')
print('!')
# print(last_name10)
print(last_name)
# person_dict['first']='Christopher'
| gmmann/MSPythonCourse | jason.py | jason.py | py | 810 | python | en | code | 0 | github-code | 13 |
31624788854 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 09:46:57 2016
@author: ajaver
"""
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
if __name__ == '__main__':
#base directory
masked_image_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/MaskedVideos/CSTCTest_Ch1_18112015_075624.hdf5'
skeletons_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/Results/CSTCTest_Ch1_18112015_075624_skeletons.hdf5'
intensities_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/Results/CSTCTest_Ch1_18112015_075624_intensities.hdf5'
with pd.HDFStore(intensities_file, 'r') as fid:
trajectories_data = fid['/trajectories_data']
dd = trajectories_data.groupby('worm_index_joined').agg({'int_map_id':(np.min,np.max)})
print(dd)
#%%
worm_N = trajectories_data.groupby('frame_number').agg({'int_map_id':'count'})
worm_N.plot() | ver228/work-in-progress | work_in_progress/_old/Intensity_analysis/check_maps.py | check_maps.py | py | 891 | python | en | code | 0 | github-code | 13 |
38279681886 | # usage: split test set from whole set
import os
import shutil
import random
source_path = os.path.abspath(r'inputs/lecdata/images')
target_path = os.path.abspath(r'inputs/pancreas/images')
target_path_1 = os.path.abspath(r'inputs/pancreas_test/images')
source_mask_path = os.path.abspath(r'inputs/lecdata/masks/0')
target_mask_path = os.path.abspath(r'inputs/pancreas/masks/0')
target_mask_path_1 = os.path.abspath(r'inputs/pancreas_test/masks/0')
images = os.listdir(source_path)
test_images = random.sample(images, 509)
test_images.sort()
# print(test_images)
for file in images:
portion = file.split('.',1)
if portion[1] != "png":
continue
src_file = os.path.join(source_path, file)
mask_file = os.path.join(source_mask_path, file)
if file not in test_images:
shutil.copy(src_file, target_path)
shutil.copy(mask_file, target_mask_path)
else:
shutil.copy(src_file, target_path_1)
shutil.copy(mask_file, target_mask_path_1)
print('copy files finished!') | Ethel217/2023grad | split.py | split.py | py | 977 | python | en | code | 0 | github-code | 13 |
13566514692 | import argparse
import sys
from collections import OrderedDict
class GroupArgParser(argparse.ArgumentParser):
def __init__(self, usage, conflict_handler):
self.groups_dict = OrderedDict()
self.briefHelp = None
self.examples = ""
super(GroupArgParser, self).__init__(usage=usage, conflict_handler=conflict_handler)
def set_examples(self, examples):
self.examples = examples
def add_group(self, name, desc=None):
# group = argparse._ArgumentGroup(self, name, desc)
group = self.MyArgGroup(self, name, desc)
self.groups_dict[name.upper()] = group
return group
def update_action_groups(self):
for group in self.groups_dict.values():
self._action_groups.append(group)
def add_helpGroup(self, addHelp=None):
help='Print individual group help (the group name is not case-sensitive), where "ALL" will print all groups together.'
if addHelp:
help += ' ' + addHelp
choices_m = self.MyList(self.groups_dict.keys() + ['ALL'])
self.add_argument('--helpGroup', choices=choices_m, action=self.print_groupHelp, help=help)
from cStringIO import StringIO
old_stdout = sys.stdout
sys.stdout = self.briefHelp = StringIO()
self.print_help()
sys.stdout = old_stdout
self.update_action_groups()
self.add_argument('-h', '--help', action=self.print_briefHelp, nargs=0, help="Print this help")
def shareWithGroup(self, action, group):
# share option action to another group
if action and group:
if action not in group._group_actions:
group._group_actions.append(action)
class MyArgGroup(argparse._ArgumentGroup):
def shareWithMe(self, action):
self._group_actions.append(action)
class MyList(list):
# list subclass that uses upper() when testing for 'in'
def __contains__(self, other):
return super(GroupArgParser.MyList,self).__contains__(other.upper())
class print_briefHelp(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
briefHelp = parser.briefHelp
if briefHelp != None:
briefHelp.seek(0)
print(''.join(briefHelp.readlines()))
print(parser.examples)
sys.exit(0)
class print_groupHelp(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(GroupArgParser.print_groupHelp, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
values = values.upper()
groups = parser.groups_dict
if values == 'ALL':
parser.print_help()
elif values in groups.keys():
group = groups[values]
formatter = parser._get_formatter()
formatter.start_section(group.title)
formatter.add_text(group.description)
formatter.add_arguments(group._group_actions)
formatter.end_section()
print(formatter.format_help())
else:
raise Exception("!!!ERROR!!! Unknown group name=%s" % values)
sys.exit(0)
| afortiorama/panda-client | pandatools/Group_argparse.py | Group_argparse.py | py | 3,441 | python | en | code | null | github-code | 13 |
7410573435 | # https://github.com/JamieLoughnane/python-tweet
import sys
try:
import tweepy
except ModuleNotFoundError:
sys.exit("Tweepy not found! Please enter 'pip install tweepy' into your Command Prompt/Terminal, for help using pip visit: https://pip.pypa.io/en/stable/")
print("First create a Twitter app at https://developer.twitter.com/ and then come back here to enter your keys found on the 'Keys and tokens' page of your app")
consumer_key = input("Consumer Key: ")
consumer_secret = input("Consumer Secret: ")
access_token = input("Access Token: ")
access_token_secret = input("Access Token Secret: ")
tweet = input("Enter your tweet: ")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
try:
api.update_status(status=tweet)
except tweepy.error.TweepError as e:
error = e.api_code
if error == 89:
print("Tweet failed: Authentication error")
elif error == 170:
print("Tweet failed: No tweet entered")
elif error == 187:
print("Tweet failed: Duplicate tweet")
elif error == 186:
print(f"Tweet failed: Too many characters (the limit is 280 characters and you entered {len(tweet)} characters)")
else:
print("Tweet failed: Unknown error")
sys.exit()
print("Tweet sent!") | JamieLoughnane/python-tweet | tweet.py | tweet.py | py | 1,371 | python | en | code | 0 | github-code | 13 |
13779331458 | from PIL import Image,ImageEnhance
from selenium import webdriver
import requests
import images
url = 'http://jwxt.upc.edu.cn/verifycode.servlet'
browser = webdriver.Chrome()
browser.get(url)
loc = browser.find_element_by_tag_name('img').location
left = loc['x']+2
top = loc['y']+2
right = left + 41
bot = top + 17
for index in range(200,401):
# 保存截图处理
browser.get_screenshot_as_file('images/full.png')
browser.refresh()
img = Image.open('images/full.png')
img = img.crop((left,top,right,bot))
img = img.convert('L')
threshold = 127
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
img = img.point(table,'1')
img.save('images/'+ str(index) + '.png')
# 传入切割
# 如可切割则保存
images.import_cutting(index)
browser.quit()
| alexischiang/KNN_captcha | get_captcha.py | get_captcha.py | py | 889 | python | en | code | 0 | github-code | 13 |
14688032180 | #!/usr/bin/python3
with open('aoc2020-25-input.txt', 'r') as f:
[doorpub, cardpub] = map(int, f.read().strip().split('\n'))
# Test data
#cardpub = 5764801
#doorpub = 17807724
def partone():
result = 1
encrypted = 1
while result != cardpub:
result = (result * 7) % 20201227
encrypted = (encrypted * doorpub) % 20201227
return(encrypted)
print('Advent of Code 2020, day 25 part 1')
print(partone())
| annaoskarson/aoc2020 | aoc2020-25.py | aoc2020-25.py | py | 435 | python | en | code | 2 | github-code | 13 |
69837459539 | import re
import pickle
class Hmm:
def __init__(self, name="segmodel"):
with open(name, "rb") as model:
self.oh, self.hh, self.start = pickle.load(model)
self.h = list(self.hh.keys())
self.doc = []
self.lenh = len(self.h)
self.result = []
def sentence(self, doc):
regc = re.compile(r"[\u4e00-\u9fa5]")
regx = re.compile(r"[\u4e00-\u9fa5]+|[\W+]|[a-zA-Z]+|\d+")
self.doc = regx.findall(doc)
tmp = ""
for s in self.doc:
if "\u4e00" <= s[0] <= "\u9fa5":
for i, item in enumerate(self.viterbi(s)):
print(item)
if item == "B":
tmp += s[i]
elif item == "M":
tmp += s[i]
elif item == "E":
tmp += s[i]
self.result.append(tmp)
tmp = ""
elif item == "S":
tmp = s[i]
self.result.append(tmp)
tmp = ""
else:
print("ERROR: tokenizer has been destroyed by atm")
else:
if tmp is not "":
self.result.append(tmp)
tmp = ""
self.result.append(s)
print(self.result)
def hmm(self, doc):
pass
# 前向算法
# 浮点数运算精度不足,需要调整,下同
def forward(self):
alph = []
start = [st*ho for st, ho in zip(self.start, self.oh[self.doc[0]].values())]
alph.append(start)
for i in range(1, len(self.doc)):
temp = [sum([alph[i-1][index]*self.hh[self.h[index]][self.h[i-1]] for index in range(self.lenh)]) * ho
for ho in self.oh[self.doc[i]].values()]
alph.append(temp)
return sum(alph[-1])
# 后向算法
def backward(self):
beta = [[] for i in range(len(self.doc))]
end = [1 for i in range(self.lenh)]
beta[-1] = end
for i in range(1, len(self.doc)):
beta[-i-1] = [sum([self.hh[qi][self.h[j]] * self.oh[self.doc[-i]][self.h[j]] * beta[-i][j]
for j in range(self.lenh)])
for qi in self.h]
return sum([self.start[i] * self.oh[self.doc[0]][self.h[i]] * beta[0][i]
for i in range(self.lenh)])
# 维特比算法
def viterbi(self, observertion):
delt = [[self.start[i]*self.oh[observertion[0]][self.h[i]]
for i in range(self.lenh)]]
phi = [[0 for i in range(self.lenh)]]
for t in range(1, len(observertion)):
dt, pt = [], []
for i in range(self.lenh):
p = [delt[t-1][j]*self.hh[self.h[j]][self.h[i]] for j in range(self.lenh)]
m = max(p)
pt.append(self.h[p.index(m)])
dt.append(m * self.oh[observertion[t]][self.h[i]])
delt.append(dt)
phi.append(pt)
mp = delt[-1].index(max(delt[-1]))
dequence = [self.h[mp]]
for i in range(len(observertion)-1):
mp = self.h.index(phi[-i-1][mp])
dequence.insert(0, self.h[mp])
return dequence
if __name__ == "__main__":
hmms = Hmm("pku_4_data")
hmms.h = ['1', '2', '3']
hmms.lenh = len(hmms.h)
hmms.hh = {'1': {'1': 0.5, '2': 0.2, '3': 0.3},
'2': {'1': 0.3, '2': 0.5, '3': 0.2},
'3': {'1': 0.2, '2': 0.3, '3': 0.5}}
hmms.oh = {'r': {'1': 0.5, '2': 0.4, '3': 0.7},
'w': {'1': 0.5, '2': 0.6, '3': 0.3}}
hmms.start = [0.2, 0.4, 0.4]
print(hmms.viterbi("rrw"))
| blackKeyMoe/cnlp | src_of_everything/hmm.py | hmm.py | py | 3,878 | python | en | code | 0 | github-code | 13 |
33268295463 | import spotipy.util as util
from creds import client_id, client_secret
username = 'spotify'
scope = 'ugc-image-upload user-read-private user-read-email user-follow-read user-library-read user-top-read user-read-recently-played playlist-read-collaborative playlist-read-private'
token = util.prompt_for_user_token(username,
scope,
client_id=client_id,
client_secret=client_secret,
redirect_uri='https://puginarug.com/')
| lukeveitch/SpotifyArtProject | Backend/auth.py | auth.py | py | 531 | python | en | code | 0 | github-code | 13 |
1417256315 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Category(models.Model):
title = models.CharField('Category name', max_length=100)
parent = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True, related_name='child')
brend = models.BooleanField('this is a brand')
slug = models.SlugField(max_length=100, unique=True, verbose_name='url')
def __str__(self):
full_path = [self.title]
k = self.parent
while k is not None:
full_path.append(k.title)
k = k.parent
return ' -> '.join(full_path[::-1])
class Meta:
verbose_name = 'Category'
verbose_name_plural = 'Categories'
class Product(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE, blank=True, related_name='products')
name = models.CharField('Product name', max_length=100)
description = models.TextField('product description')
img = models.ImageField(upload_to='media')
web_id = models.IntegerField('Web ID')
price = models.IntegerField('price')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Product'
verbose_name_plural = 'Products'
class Slider(models.Model):
title = models.CharField('slider title', max_length=100)
slogan = models.TextField('slogan')
comment = models.TextField('comment')
img = models.ImageField(upload_to='media')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Slider'
verbose_name_plural ='Sliders'
class SupterSlider(models.Model):
title = models.CharField('slider title', max_length=100)
slogan = models.TextField('slogan')
comment = models.TextField('comment')
img = models.ImageField(upload_to='media')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Super slider (only one)'
class Blog(models.Model):
title = models.CharField('Blog title', max_length=100)
author = models.CharField('Auther name', max_length=100)
img = models.ImageField('img', upload_to='media')
text = models.TextField('text')
created_at = models.DateTimeField(auto_now_add=True)
def get(self):
return self.title
class Meta:
verbose_name = 'Blog'
verbose_name_plural = 'Blogs'
class Contacts(models.Model):
company_name = models.CharField('company name', max_length=100)
location = models.TextField('campny location')
city = models.CharField('City name', max_length=100)
number_nuber = models.CharField('phone number', max_length=100)
email = models.CharField('email adres', max_length=100)
def get(self):
return self.company_name
class Meta:
verbose_name = 'contac'
verbose_name_plural = 'contacts'
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
prod = models.ForeignKey(Product, on_delete=models.CASCADE)
post_time = models.DateTimeField(auto_now_add=True)
com = models.TextField()
def get(self):
return self.prod
class Meta:
verbose_name = 'Comment'
verbose_name_plural = 'Comments'
| VahagnZakaryan/Eshoper | main/models.py | models.py | py | 3,253 | python | en | code | 1 | github-code | 13 |
35012994492 |
if __name__ == '__main__':
# n, m = input().split()
# integer_list = map(int, input().split())
# set_a = map(int, input().split())
# set_b = map(int, input().split())
f = open('python/no-idea/test_case_8.txt')
n, m = f.readline().split()
integer_list = list(map(int, f.readline().split()))
set_a = list(map(int, f.readline().split()))
set_b = list(map(int, f.readline().split()))
f.close()
happiness = 0
index_set_a = {num: i for i, num in enumerate(set_a)}
index_set_b = {num: i for i, num in enumerate(set_b)}
for i in integer_list:
if i in index_set_a:
happiness += 1
if i in index_set_b:
happiness -= 1
print(happiness)
| Crisheld/HackerRank-solutions | python/no-idea/solution.py | solution.py | py | 734 | python | en | code | 1 | github-code | 13 |
8320008824 | l=[10,2,3,4,5,5,5,6,6,7,10,[11,22,33,44,55,66],111,222,333,234,'umesh']
# # l1=[]
# # l1=l[0]
# # j=0
# # for i in l[1::]:
# # if(type(i)==int):
# # if (l1[j]!=i):
# # l1.append(i)
# #
# # else:
# # pass
# c=0
# print(l)
# for i in l:
# for j in l:
# if(i==j):
# c=c+1
# if(c>0):
# for k in range(c+1):
# l.remove(i)
# print(i," ",c)
# c=0
#
l1 = []
for i in l:
if type(i)==int :
l1.append(i)
if type(i) == list or type(i) ==tuple or type(i)==set:
for j in i :
if type(j) == int or type(j) == str:
l1.append(j)
if type(i) == dict :
for k in i.items() :
for g in k :
if type(g) == int or type(g) == str:
l1.append(g)
print(l1)
for i in set(l1):
print(i, " ----> ", l1.count(i))
| Tandon07/Practical-Contents | July9oops_day3/prac.py | prac.py | py | 957 | python | en | code | 1 | github-code | 13 |
12994903249 | """
rulemining.py file
File which contains the full mining capability using the binary INK representation.
This file is adapted from:
Bayesian Rule Set mining by Tong Wang and Peter (Zhen) Li
reference: Wang, Tong, et al. "Bayesian rule sets for interpretable classification.
Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016.
"""
import math
import random
import numpy as np
import pandas as pd
from scipy import sparse
import ink.miner.utils as utils
from ink.miner.task_agnostic_mining import agnostic_fit
from ink.miner.task_specific_mining import specific_fit
__author__ = 'Bram Steenwinckel'
__copyright__ = 'Copyright 2020, INK'
__credits__ = ['Filip De Turck, Femke Ongenae']
__license__ = 'IMEC License'
__version__ = '0.1.0'
__maintainer__ = 'Bram Steenwinckel'
__email__ = 'bram.steenwinckel@ugent.be'
np.seterr(over='ignore')
np.seterr(divide='ignore', invalid='ignore')
pd.options.mode.chained_assignment = None
class RuleSetMiner(object):
"""
The INK RuleSetMiner.
Class which can mine both task specific and task agnostic rules.
:param support: Support measure, only rules with this level of support will be taken into account.
:type support: int
:param max_rules: Maximal number of rules which can be mined.
:type max_rules: int
:param max_len_rule_set: Maximal number of rules used to separate the classes during task-specific mining.
:type max_len_rule_set: int
:param max_iter: Maximal number of iterations used for the task-specific miner.
:type max_iter: int
:param chains: Maximal number of chains used for the task-specific miner.
:type chains: int
:param forest_size: Maximal number of forest within the classifier for the task-specific miner.
:type forest_size: int
:param criteria: Criteria used to screen the generated rules. Possible criteria's are precision, specificity,
sensitivity, mcc (matthew correlation coefficient) or cross-entropy (default).
:type criteria: str
:param propose_threshold: Threshold used to propose new combinations of possible rules for the task-specific mining.
:type propose_threshold: int
:param verbose: Parameter to show tqdm tracker (default False).
:type: bool
"""
def __init__(self, support=10, max_rules=10e13, max_len_rule_set=5, max_iter=10, chains=1000, forest_size=1000,
criteria='precision', rule_complexity = 2, propose_threshold=0.1, verbose=False):
self.max_rules = max_rules
self.max_iter = max_iter
self.chains = chains
self.support = support
self.max_rule_set = max_len_rule_set
self.verbose = verbose
self.alpha_1 = 100
self.beta_1 = 1
self.alpha_2 = 100
self.beta_2 = 1
self.alpha_l = None
self.beta_l = None
self.propose_threshold = propose_threshold
self.forest_size = forest_size
self.predicted_rules = []
self.dct_check = {}
self.criteria = criteria
self.attributeNames = None
self.itemNames = None
self.rule_explanations = None
self.rules_len = None
self.P0 = None
self.const_denominator = None
self.Lup = None
self.patternSpace = []
self.rules = []
self.rule_complexity = rule_complexity
def fit(self, data, label=None):
"""
Fit function to train the classifier or generate agnostic rules
:param data: Tuple value containing 1) a sparse binary representation, 2) list of indices, 3) column features.
:type data: tuple
:param label: List containing the labels for each index (task-specific) or None (task-agnostic)
:return: Rules
"""
if label is not None:
return specific_fit(self, data, label)
else:
return agnostic_fit(self, data)
def predict(self, data):
"""
Predict function used to predict new data against the learned task-specific rules.
:param data: Tuple value containing 1) a sparse binary representation, 2) list of indices, 3) column features.
:type data: tuple
:return: Predicted labels
:rtype: list
"""
df = pd.DataFrame(data[0].todense())
df.index = data[1]
df.columns = data[2]
X = df.astype('bool')
# replace this with multiprocessing code
yhat = np.zeros(X.shape[0], dtype=int)
for rule in self.predicted_rules:
yhat_items = np.ones(X.shape[0], dtype=int)
for item in self.rules[rule]:
if self.itemNames[item] in X.columns:
yhat_items = X[self.itemNames[item]].values & yhat_items
else:
if self.itemNames[item].startswith('count.'):
if '<' in self.itemNames[item]:
yhat_items = np.ones(X.shape[0], dtype=int) & yhat_items
else:
yhat_items = np.zeros(X.shape[0], dtype=int) & yhat_items
else:
yhat_items = np.zeros(X.shape[0], dtype=int) & yhat_items
if self.verbose:
print(yhat_items)
yhat = yhat | yhat_items
return yhat
def print_rules(self, rules):
"""
Function to represent the rules in a human-readable format.
:param rules: Output generated from the task-specific fit function
:type rules: list
:return:
"""
for rule in rules:
if self.rule_explanations.get(rule) is None:
rules_list = [self.itemNames[item] for item in self.rules[rule]]
else:
rules_list = self.rule_explanations[rule][0]
reformatted_rules = utils.rewrite_rules(rules_list, self.attributeNames)
print(reformatted_rules)
def set_parameters(self, X):
"""
Function to set some initial parameters based on the data.
:param X: Tuple value containing 1) a sparse binary representation, 2) list of indices, 3) column features.
:type X: tuple
:return:
"""
# number of possible rules, i.e. rule space italic(A) prior
self.patternSpace = np.ones(self.max_rule_set + 1)
# This patternSpace is an approximation
# because the original code allows
# the following situation, take tic-tac-toe
# 1_O == 1 and 1_O_neg == 1, which is impossible
numAttributes = len(X[2])
for i in range(1, self.max_rule_set + 1):
tmp = 1
for j in range(numAttributes - i + 1, numAttributes + 1):
tmp *= j
self.patternSpace[i] = tmp / math.factorial(i)
if self.alpha_l is None:
self.alpha_l = [1 for _ in range(self.max_rule_set + 1)]
if self.beta_l is None:
self.beta_l = [(self.patternSpace[i] * 100 + 1) for i in range(self.max_rule_set + 1)]
def precompute(self, y):
"""
Precompute values based on the given labels.
:param y: List of labels.
:return:
"""
TP, FP, TN, FN = sum(y), 0, len(y) - sum(y), 0
# self.Lup : p(S|A;alpha_+,beta_+,alpha_-,beta_-)
# conference paper formula(6)
self.Lup = (utils.log_betabin(TP, TP + FP, self.alpha_1, self.beta_1)
+ utils.log_betabin(TN, FN + TN, self.alpha_2, self.beta_2))
# self.const_denominator : log((|Al|+beta_l-1)/(alpha_l+|Al|-1))
# conference paper formula(9) denominator
self.const_denominator = [np.log((self.patternSpace[i] + self.beta_l[i] - 1)
/ (self.patternSpace[i] + self.alpha_l[i] - 1))
for i in range(self.max_rule_set + 1)]
Kn_count = np.zeros(self.max_rule_set + 1, dtype=int)
# P0 : maximum prior
# Ml=0, |Al|= rule space
# conference paper formula(3)
# because of log property, + is *
self.P0 = sum([utils.log_betabin(Kn_count[i], self.patternSpace[i], self.alpha_l[i],
self.beta_l[i]) for i in range(1, self.max_rule_set + 1)])
def screen_rules(self, X_trans, y):
"""
Function to pre_screen the generated rules based on the enabled criteria
:param X_trans: Binary data frame.
:param y: Label list
:return: RMatrix
"""
tmp_rules_len = [len(rule) for rule in self.rules]
ruleMatrix = np.zeros((len(self.rules), len(X_trans.columns)), dtype=int)
for i, rule in enumerate(self.rules):
for j in rule:
ruleMatrix[i][j - 1] = 1
ruleMatrix = sparse.csc_matrix(ruleMatrix.transpose())
mat = (sparse.csc_matrix(X_trans) * ruleMatrix).todense()
# Z is the matrix for data points covered by rules
Z = (mat == tmp_rules_len)
Zpos = Z[np.where(y > 0)]
# TP for each rule
TP = np.asarray(np.sum(Zpos, axis=0))[0]
# supp is threshold percentile of how TP a rule is
supp_select = np.where(TP >= self.support * sum(y) / 100.0)[0]
if len(supp_select) <= self.max_rules:
self.rules = np.asarray(self.rules)[supp_select]
RMatrix = np.array(Z[:, supp_select])
self.rules_len = [len(rule) for rule in self.rules]
else:
FP = np.array(np.sum(Z, axis=0))[0] - TP
TN = len(y) - np.sum(y) - FP
FN = np.sum(y) - TP
p1 = TP.astype(float) / (TP + FP)
p2 = FN.astype(float) / (FN + TN)
pp = (TP + FP).astype(float) / (TP + FP + TN + FN)
if self.criteria == 'precision':
select = np.argsort(p1[supp_select])[::-1][:self.max_rules].tolist()
elif self.criteria == 'specificity':
p3 = TN.astype(float) / (TN + FP)
select = np.argsort(p3[supp_select])[::-1][:self.max_rules].tolist()
elif self.criteria == 'sensitivity':
p4 = TP.astype(float) / (TP + FN)
select = np.argsort(p4[supp_select])[::-1][:self.max_rules].tolist()
elif self.criteria == 'mcc':
p5 = (2*TP.astype(float)) / (2*TP.astype(float) + FP + FN)
select = np.argsort(p5[supp_select])[::-1][:self.max_rules].tolist()
else:
cond_entropy = (-pp * (p1 * np.log(p1) + (1 - p1) * np.log(1 - p1))
- (1 - pp) * (p2 * np.log(p2)
+ (1 - p2) * np.log(1 - p2)))
cond_entropy[p1 * (1 - p1) == 0] = (-((1 - pp) * (p2 * np.log(p2)
+ (1 - p2) * np.log(1 - p2)))[p1 * (1 - p1) == 0])
cond_entropy[p2 * (1 - p2) == 0] = (-(pp * (p1 * np.log(p1)
+ (1 - p1) * np.log(1 - p1)))[p2 * (1 - p2) == 0])
cond_entropy[p1 * (1 - p1) * p2 * (1 - p2) == 0] = 0
pos = (TP + FN).astype(float) / (TP + FP + TN + FN)
info = - pos * np.log(pos) - (1 - pos) * np.log(1 - pos)
info[np.where((pos == 1) | (pos == 0))[0]] = 0
IGR = (info - cond_entropy) / info
IGR[np.where(info == 0)[0]] = 0
select = np.argsort(IGR[supp_select])[::-1][:self.max_rules].tolist()
ind = list(supp_select[select])
self.rules = [self.rules[i] for i in ind]
RMatrix = np.array(Z[:, ind])
self.rules_len = [len(rule) for rule in self.rules]
return RMatrix
def __normalize(self, rules_new):
try:
rules_len = [len(self.rules[index]) for index in rules_new]
rules = [rules_new[i] for i in np.argsort(rules_len)[::-1][:len(rules_len)]]
p1 = 0
while p1 < len(rules):
for p2 in range(p1 + 1, len(rules), 1):
if set(self.rules[rules[p2]]).issubset(set(self.rules[rules[p1]])):
rules.remove(rules[p1])
p1 -= 1
break
p1 += 1
return rules[:]
except (ValueError, Exception):
return rules_new[:]
def __find_rules_z(self, RMatrix, rules):
if len(rules) == 0:
return np.zeros(RMatrix.shape[0], dtype=int)
Z = np.zeros(RMatrix.shape[0], dtype=int)
for rule in rules:
if self.rule_explanations.get(rule) is None:
Z = RMatrix[:, rule] + Z
else:
Z = self.rule_explanations[rule][1] + Z
Z = Z > 0
return Z
def __propose(self, rules_curr, rules_norm, RMatrix, Y, q):
nRules = len(self.rules)
Yhat = (np.sum(RMatrix[:, rules_curr], axis=1) > 0).astype(int)
incorr = np.where(Y != Yhat)[0]
N = len(rules_curr)
if len(incorr) == 0:
ex = None
move = ['clean']
# it means the HBOA correctly classified all points but there could be redundant patterns,
# so cleaning is needed
else:
ex = random.sample(list(incorr), 1)[0]
t = np.random.random()
if Y[ex] == 1 or N == 1:
if t < 1.0 / 2 or N == 1:
move = ['add'] # action: add
else:
move = ['cut', 'add'] # action: replace
else:
if t < 1.0 / 2:
move = ['cut'] # action: cut
else:
move = ['cut', 'add'] # action: replace
if move[0] == 'cut':
""" cut """
if np.random.random() < q:
candidate = list(set(np.where(RMatrix[ex, :] == 1)[0]).intersection(rules_curr))
if len(candidate) == 0:
candidate = rules_curr
cut_rule = random.sample(list(candidate), 1)[0]
else:
p = []
all_sum = np.sum(RMatrix[:, rules_curr], axis=1)
for index, rule in enumerate(rules_curr):
Yhat = ((all_sum - np.array(RMatrix[:, rule])) > 0).astype(int)
TP, FP, TN, FN = utils.get_confusion(Yhat, Y)
p.append(TP.astype(float) / (TP + FP + 1))
p = [x - min(p) for x in p]
p = np.exp(p)
p = np.insert(p, 0, 0)
p = np.array(list(utils.accumulate(p)))
if p[-1] == 0:
index = random.sample(range(len(rules_curr)), 1)[0]
else:
p = p / p[-1]
# here
index = utils.find_lt(p, np.random.random())
cut_rule = rules_curr[index]
rules_curr.remove(cut_rule)
rules_norm = self.__normalize(rules_curr)
move.remove('cut')
if len(move) > 0 and move[0] == 'add':
""" add """
if np.random.random() < q:
add_rule = random.sample(range(nRules), 1)[0]
else:
Yhat_neg_index = list(np.where(np.sum(RMatrix[:, rules_curr], axis=1) < 1)[0])
mat = np.multiply(RMatrix[Yhat_neg_index, :].transpose(), Y[Yhat_neg_index])
# TP = np.array(np.sum(mat,axis = 0).tolist()[0])
TP = np.sum(mat, axis=1)
FP = np.array((np.sum(RMatrix[Yhat_neg_index, :], axis=0) - TP))
# TN = np.sum(Y[Yhat_neg_index] == 0) - FP
# FN = sum(Y[Yhat_neg_index]) - TP
p = (TP.astype(float) / (TP + FP + 1))
p[rules_curr] = 0
add_rule = random.sample(list(np.where(p == max(p))[0]), 1)[0]
if add_rule not in rules_curr:
rules_curr.append(add_rule)
rules_norm = self.__normalize(rules_curr)
if len(move) > 0 and move[0] == 'clean':
remove = []
for i, rule in enumerate(rules_norm):
Yhat = (np.sum(
RMatrix[:, [rule for j, rule in enumerate(rules_norm) if (j != i and j not in remove)]],
axis=1) > 0).astype(int)
TP, FP, TN, FN = utils.get_confusion(Yhat, Y)
if TP + FP == 0:
remove.append(i)
for x in remove:
if x in rules_norm:
rules_norm.remove(x)
return rules_curr, rules_norm
return rules_curr, rules_norm
def __compute_prob(self, rules, RMatrix, Y):
Yhat = (np.sum(RMatrix[:, rules], axis=1) > 0).astype(int)
TP, FP, TN, FN = utils.get_confusion(Yhat, Y)
Kn_count = list(np.bincount([self.rules_len[x] for x in rules], minlength=self.max_rule_set + 1))
prior_ChsRules = sum([utils.log_betabin(Kn_count[i], self.patternSpace[i], self.alpha_l[i], self.beta_l[i])
for i in range(1, len(Kn_count), 1)])
likelihood_1 = utils.log_betabin(TP, TP + FP, self.alpha_1, self.beta_1)
likelihood_2 = utils.log_betabin(TN, FN + TN, self.alpha_2, self.beta_2)
return [TP, FP, TN, FN], [prior_ChsRules, likelihood_1, likelihood_2]
def exec_chain(self, t):
"""
Function to execute chaining in parallel.
:param t: Tuple with number of rules, split, the RMatrix, y, T0 and chain indicator
:type t: tuple
:return: Chaining results
:rtype: list
"""
nRules, split, RMatrix, y, T0, chain = t
# random.seed()
# np.random.seed()
lst = []
N = random.sample(range(1, min(8, nRules), 1), 1)[0]
rules_curr = random.sample(range(nRules), N)
rules_curr_norm = self.__normalize(rules_curr)
pt_curr = -100000000000
lst.append(
[-1, [pt_curr / 3, pt_curr / 3, pt_curr / 3], rules_curr, [self.rules[i] for i in rules_curr]])
for i in range(self.max_iter):
if i >= split:
p = np.array(range(1 + len(lst)))
p = np.array(list(utils.accumulate(p)))
p = p / p[-1]
index = utils.find_lt(p, np.random.random())
rules_curr = lst[index][2].copy()
rules_curr_norm = lst[index][2].copy()
rules_new, rules_norm = self.__propose(rules_curr.copy(), rules_curr_norm.copy(), RMatrix, y,
self.propose_threshold)
cfmatrix, prob = self.__compute_prob(rules_new, RMatrix, y)
T = T0 ** (1 - i / self.max_iter)
pt_new = sum(prob)
alpha = np.exp(float(pt_new - pt_curr) / T)
if pt_new > sum(lst[-1][1]):
lst.append([i, prob, rules_new, [self.rules[i] for i in rules_new], cfmatrix])
if np.random.random() <= alpha:
rules_curr_norm, rules_curr, pt_curr = rules_norm.copy(), rules_new.copy(), pt_new
return lst
| IBCNServices/INK | ink/miner/rulemining.py | rulemining.py | py | 19,168 | python | en | code | 14 | github-code | 13 |
72060672657 | #!/usr/bin/env python
# coding=utf-8
"""
Holding functions to manipulate city object
"""
# import sympy.geometry.point as point
import shapely.geometry.point as point
import pycity_calc.cities.scripts.city_generator.city_generator as citgen
import pycity_base.classes.demand.SpaceHeating as SpaceHeating
import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand
import pycity_base.classes.demand.Apartment as Apartment
import pycity_calc.buildings.building as build_ex
import pycity_calc.cities.city as cit
import pycity_calc.visualization.city_visual as citvis
def gen_test_city(timestep=3600, year=2017, try_path=None,
location=(51.529086, 6.944689), altitude=55):
"""
Generate test city district
Parameters
----------
timestep : int
Timestep in seconds
year : int, optional
Chosen year of analysis (default: 2010)
(influences initial day for profile generation, market prices
and co2 factors)
If year is set to None, user has to define day_init!
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
location : Tuple, optional
(latitude , longitude) of the simulated system's position,
(default: (51.529086, 6.944689) for Bottrop, Germany.
altitude : float, optional
Altitute of location in m (default: 55 - City of Bottrop)
Returns
-------
city : object
City object of pycity_calc
"""
# Generate environment
environment = citgen.generate_environment(timestep=timestep,
year_timer=year,
year_co2=year,
try_path=try_path,
location=location,
altitude=altitude)
# Generate city object
city = cit.City(environment=environment)
list_x_coord = [15, 25, 40]
list_y_coord = [25, 10, 45]
for i in range(0, 3):
# Create demands (with standardized load profiles (method=1))
heat_demand = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type='HEF',
livingArea=100,
specificDemand=130)
el_demand = ElectricalDemand.ElectricalDemand(environment, method=1,
annualDemand=3000,
profileType="H0")
# Create apartment
apartment = Apartment.Apartment(environment)
# Add demands to apartment
apartment.addMultipleEntities([heat_demand, el_demand])
# Create extended building object
extended_building = build_ex.BuildingExtended(environment,
build_year=1970,
mod_year=2003,
build_type=0)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
position = point.Point(list_x_coord[i], list_y_coord[i])
# Add 3 extended buildings to city object
city.add_extended_building(extended_building=extended_building,
position=position)
# Add street network
# Add str nodes
node_1 = city.add_street_node(position=point.Point(10, 20))
node_2 = city.add_street_node(position=point.Point(30, 20))
node_3 = city.add_street_node(position=point.Point(50, 20))
# Add edges
city.add_edge(node_1, node_2, network_type='street')
city.add_edge(node_2, node_3, network_type='street')
return city
def get_min_x_y_coord(city):
"""
Returns min x- and y-coordinates as tuple, found within city object.
Requires position parameter (shapely point) on every node!
Parameters
----------
city : object
City object of pycity_calc
Returns
-------
tuple_min : tuple (of floats)
Tuple holding minimal x-/y-coordinates (x_min, y_min)
"""
x_min = None
y_min = None
# Find min x and y coordinate
for n in city.nodes():
x_curr = city.nodes[n]['position'].x
y_curr = city.nodes[n]['position'].y
if x_min is None or x_min > x_curr:
x_min = x_curr
if y_min is None or y_min > y_curr:
y_min = y_curr
tuple_min = (x_min, y_min)
return tuple_min
def set_zero_coordinate(city, buffer=10):
"""
Function manipulates position attributes of all nodes within city.
Finds zero point with info of smallest x- and y-coordinates (plus buffer)
Requires, that all nodes in city hold attribute 'position'!
Parameters
----------
city : object
City object of pycity
buffer : float, optional
Buffer that should be used between found min x- and y-coordinates
and newly defined zero point (default: 10).
E.g. if buffer == 0, zero point is defined with (x_min/y_min)
"""
for n in city.nodes():
if 'position' not in city.nodes[n]:
msg = str('Error: No position attribute on node ' + str(n))
raise AssertionError(msg)
x_min = None
y_min = None
# Find min x and y coordinate
(x_min, y_min) = get_min_x_y_coord(city)
if buffer != 0:
x_min -= buffer
y_min -= buffer
# Convert every point position
for n in city.nodes():
x_new = city.nodes[n]['position'].x - x_min
y_new = city.nodes[n]['position'].y - y_min
# Generate new point
point_new = point.Point(x_new, y_new)
# Overwrite point
city.nodes[n]['position'] = point_new
if __name__ == '__main__':
buffer = 5
# Generate test city object
city = gen_test_city()
# Plot city
citvis.plot_city_district(city, plt_title='Before zero point conversion')
# Convert points
set_zero_coordinate(city, buffer=buffer)
# Plot city
citvis.plot_city_district(city, plt_title='After zero point conversion')
| RWTH-EBC/pyCity_calc | pycity_calc/toolbox/modifiers/mod_city_geo_pos.py | mod_city_geo_pos.py | py | 6,372 | python | en | code | 7 | github-code | 13 |
1346915311 | import math
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
from pyhealth.datasets import SampleEHRDataset
from pyhealth.models import BaseModel
from pyhealth.tokenizer import Tokenizer
# VALID_OPERATION_LEVEL = ["visit", "event"]
class Attention(nn.Module):
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query.size(-1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = torch.softmax(scores, dim=-1)
if mask is not None:
p_attn = p_attn.masked_fill(mask == 0, 0)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList(
[nn.Linear(d_model, d_model, bias=False) for _ in range(3)]
)
self.output_linear = nn.Linear(d_model, d_model, bias=False)
self.attention = Attention()
self.dropout = nn.Dropout(p=dropout)
self.attn_gradients = None
self.attn_map = None
# helper functions for interpretability
def get_attn_map(self):
return self.attn_map
def get_attn_grad(self):
return self.attn_gradients
def save_attn_grad(self, attn_grad):
self.attn_gradients = attn_grad
# register_hook option allows us to save the gradients in backwarding
def forward(self, query, key, value, mask=None, register_hook = False):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [
l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))
]
# 2) Apply attention on all the projected vectors in batch.
if mask is not None:
mask = mask.unsqueeze(1)
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
self.attn_map = attn # save the attention map
if register_hook:
attn.register_hook(self.save_attn_grad)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, x, mask=None):
x = self.w_2(self.dropout(self.activation(self.w_1(x))))
if mask is not None:
mask = mask.sum(dim=-1) > 0
x[~mask] = 0
return x
class SublayerConnection(nn.Module):
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class TransformerBlock(nn.Module):
"""Transformer block.
MultiHeadedAttention + PositionwiseFeedForward + SublayerConnection
Args:
hidden: hidden size of transformer.
attn_heads: head sizes of multi-head attention.
dropout: dropout rate.
"""
def __init__(self, hidden, attn_heads, dropout):
super(TransformerBlock, self).__init__()
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.feed_forward = PositionwiseFeedForward(
d_model=hidden, d_ff=4 * hidden, dropout=dropout
)
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask=None, register_hook = False):
"""Forward propagation.
Args:
x: [batch_size, seq_len, hidden]
mask: [batch_size, seq_len, seq_len]
Returns:
A tensor of shape [batch_size, seq_len, hidden]
"""
x = self.input_sublayer(x, lambda _x: self.attention(_x, _x, _x, mask=mask, register_hook=register_hook))
x = self.output_sublayer(x, lambda _x: self.feed_forward(_x, mask=mask))
return self.dropout(x)
class TransformerLayer(nn.Module):
"""Transformer layer.
Paper: Ashish Vaswani et al. Attention is all you need. NIPS 2017.
This layer is used in the Transformer model. But it can also be used
as a standalone layer.
Args:
feature_size: the hidden feature size.
heads: the number of attention heads. Default is 1.
dropout: dropout rate. Default is 0.5.
num_layers: number of transformer layers. Default is 1.
register_hook: True to save gradients of attention layer, Default is False.
Examples:
>>> from pyhealth.models import TransformerLayer
>>> input = torch.randn(3, 128, 64) # [batch size, sequence len, feature_size]
>>> layer = TransformerLayer(64)
>>> emb, cls_emb = layer(input)
>>> emb.shape
torch.Size([3, 128, 64])
>>> cls_emb.shape
torch.Size([3, 64])
"""
def __init__(self, feature_size, heads=1, dropout=0.5, num_layers=1):
super(TransformerLayer, self).__init__()
self.transformer = nn.ModuleList(
[TransformerBlock(feature_size, heads, dropout) for _ in range(num_layers)]
)
def forward(
self, x: torch.tensor, mask: Optional[torch.tensor] = None, register_hook = False
) -> Tuple[torch.tensor, torch.tensor]:
"""Forward propagation.
Args:
x: a tensor of shape [batch size, sequence len, feature_size].
mask: an optional tensor of shape [batch size, sequence len], where
1 indicates valid and 0 indicates invalid.
Returns:
emb: a tensor of shape [batch size, sequence len, feature_size],
containing the output features for each time step.
cls_emb: a tensor of shape [batch size, feature_size], containing
the output features for the first time step.
"""
if mask is not None:
mask = torch.einsum("ab,ac->abc", mask, mask)
for transformer in self.transformer:
x = transformer(x, mask, register_hook)
emb = x
cls_emb = x[:, 0, :]
return emb, cls_emb
class Transformer(BaseModel):
"""Transformer model.
This model applies a separate Transformer layer for each feature, and then
concatenates the final hidden states of each Transformer layer. The concatenated
hidden states are then fed into a fully connected layer to make predictions.
Note:
We use separate Transformer layers for different feature_keys.
Currentluy, we automatically support different input formats:
- code based input (need to use the embedding table later)
- float/int based value input
We follow the current convention for the transformer model:
- case 1. [code1, code2, code3, ...]
- we will assume the code follows the order; our model will encode
each code into a vector and apply transformer on the code level
- case 2. [[code1, code2]] or [[code1, code2], [code3, code4, code5], ...]
- we will assume the inner bracket follows the order; our model first
use the embedding table to encode each code into a vector and then use
average/mean pooling to get one vector for one inner bracket; then use
transformer one the braket level
- case 3. [[1.5, 2.0, 0.0]] or [[1.5, 2.0, 0.0], [8, 1.2, 4.5], ...]
- this case only makes sense when each inner bracket has the same length;
we assume each dimension has the same meaning; we run transformer directly
on the inner bracket level, similar to case 1 after embedding table
- case 4. [[[1.5, 2.0, 0.0]]] or [[[1.5, 2.0, 0.0], [8, 1.2, 4.5]], ...]
- this case only makes sense when each inner bracket has the same length;
we assume each dimension has the same meaning; we run transformer directly
on the inner bracket level, similar to case 2 after embedding table
dataset: the dataset to train the model. It is used to query certain
information such as the set of all tokens.
feature_keys: list of keys in samples to use as features,
e.g. ["conditions", "procedures"].
label_key: key in samples to use as label (e.g., "drugs").
mode: one of "binary", "multiclass", or "multilabel".
embedding_dim: the embedding dimension. Default is 128.
**kwargs: other parameters for the Transformer layer.
Examples:
>>> from pyhealth.datasets import SampleEHRDataset
>>> samples = [
... {
... "patient_id": "patient-0",
... "visit_id": "visit-0",
... "list_codes": ["505800458", "50580045810", "50580045811"], # NDC
... "list_vectors": [[1.0, 2.55, 3.4], [4.1, 5.5, 6.0]],
... "list_list_codes": [["A05B", "A05C", "A06A"], ["A11D", "A11E"]], # ATC-4
... "list_list_vectors": [
... [[1.8, 2.25, 3.41], [4.50, 5.9, 6.0]],
... [[7.7, 8.5, 9.4]],
... ],
... "label": 1,
... },
... {
... "patient_id": "patient-0",
... "visit_id": "visit-1",
... "list_codes": [
... "55154191800",
... "551541928",
... "55154192800",
... "705182798",
... "70518279800",
... ],
... "list_vectors": [[1.4, 3.2, 3.5], [4.1, 5.9, 1.7], [4.5, 5.9, 1.7]],
... "list_list_codes": [["A04A", "B035", "C129"]],
... "list_list_vectors": [
... [[1.0, 2.8, 3.3], [4.9, 5.0, 6.6], [7.7, 8.4, 1.3], [7.7, 8.4, 1.3]],
... ],
... "label": 0,
... },
... ]
>>> dataset = SampleEHRDataset(samples=samples, dataset_name="test")
>>>
>>> from pyhealth.models import Transformer
>>> model = Transformer(
... dataset=dataset,
... feature_keys=[
... "list_codes",
... "list_vectors",
... "list_list_codes",
... "list_list_vectors",
... ],
... label_key="label",
... mode="multiclass",
... )
>>>
>>> from pyhealth.datasets import get_dataloader
>>> train_loader = get_dataloader(dataset, batch_size=2, shuffle=True)
>>> data_batch = next(iter(train_loader))
>>>
>>> ret = model(**data_batch)
>>> print(ret)
{
'loss': tensor(4.0555, grad_fn=<NllLossBackward0>),
'y_prob': tensor([[1.0000e+00, 1.8206e-06],
[9.9970e-01, 3.0020e-04]], grad_fn=<SoftmaxBackward0>),
'y_true': tensor([0, 1]),
'logit': tensor([[ 7.6283, -5.5881],
[ 1.0898, -7.0210]], grad_fn=<AddmmBackward0>)
}
>>>
"""
def __init__(
self,
dataset: SampleEHRDataset,
feature_keys: List[str],
label_key: str,
mode: str,
pretrained_emb: str = None,
embedding_dim: int = 128,
**kwargs
):
super(Transformer, self).__init__(
dataset=dataset,
feature_keys=feature_keys,
label_key=label_key,
mode=mode,
pretrained_emb=pretrained_emb,
)
self.embedding_dim = embedding_dim
# validate kwargs for Transformer layer
if "feature_size" in kwargs:
raise ValueError("feature_size is determined by embedding_dim")
# the key of self.feat_tokenizers only contains the code based inputs
self.feat_tokenizers = {}
self.label_tokenizer = self.get_label_tokenizer()
# the key of self.embeddings only contains the code based inputs
self.embeddings = nn.ModuleDict()
# the key of self.linear_layers only contains the float/int based inputs
self.linear_layers = nn.ModuleDict()
# add feature transformation layers
for feature_key in self.feature_keys:
input_info = self.dataset.input_info[feature_key]
# sanity check
if input_info["type"] not in [str, float, int]:
raise ValueError(
"Transformer only supports str code, float and int as input types"
)
elif (input_info["type"] == str) and (input_info["dim"] not in [2, 3]):
raise ValueError(
"Transformer only supports 2-dim or 3-dim str code as input types"
)
elif (input_info["type"] in [float, int]) and (
input_info["dim"] not in [2, 3]
):
raise ValueError(
"Transformer only supports 2-dim or 3-dim float and int as input types"
)
# for code based input, we need Type
# for float/int based input, we need Type, input_dim
self.add_feature_transform_layer(feature_key, input_info)
self.transformer = nn.ModuleDict()
for feature_key in feature_keys:
self.transformer[feature_key] = TransformerLayer(
feature_size=embedding_dim, **kwargs
)
output_size = self.get_output_size(self.label_tokenizer)
# transformer's output feature size is still embedding_dim
self.fc = nn.Linear(len(self.feature_keys) * self.embedding_dim, output_size)
def forward(self, **kwargs) -> Dict[str, torch.Tensor]:
"""Forward propagation.
The label `kwargs[self.label_key]` is a list of labels for each patient.
Args:
**kwargs: keyword arguments for the model. The keys must contain
all the feature keys and the label key.
Returns:
A dictionary with the following keys:
loss: a scalar tensor representing the loss.
y_prob: a tensor representing the predicted probabilities.
y_true: a tensor representing the true labels.
"""
patient_emb = []
for feature_key in self.feature_keys:
input_info = self.dataset.input_info[feature_key]
dim_, type_ = input_info["dim"], input_info["type"]
# for case 1: [code1, code2, code3, ...]
if (dim_ == 2) and (type_ == str):
x = self.feat_tokenizers[feature_key].batch_encode_2d(
kwargs[feature_key]
)
# (patient, event)
x = torch.tensor(x, dtype=torch.long, device=self.device)
# (patient, event, embedding_dim)
x = self.embeddings[feature_key](x)
# (patient, event)
mask = torch.any(x !=0, dim=2)
# for case 2: [[code1, code2], [code3, ...], ...]
elif (dim_ == 3) and (type_ == str):
x = self.feat_tokenizers[feature_key].batch_encode_3d(
kwargs[feature_key]
)
# (patient, visit, event)
x = torch.tensor(x, dtype=torch.long, device=self.device)
# (patient, visit, event, embedding_dim)
x = self.embeddings[feature_key](x)
# (patient, visit, embedding_dim)
x = torch.sum(x, dim=2)
# (patient, visit)
mask = torch.any(x !=0, dim=2)
# for case 3: [[1.5, 2.0, 0.0], ...]
elif (dim_ == 2) and (type_ in [float, int]):
x, mask = self.padding2d(kwargs[feature_key])
# (patient, event, values)
x = torch.tensor(x, dtype=torch.float, device=self.device)
# (patient, event, embedding_dim)
x = self.linear_layers[feature_key](x)
# (patient, event)
mask = mask.bool().to(self.device)
# for case 4: [[[1.5, 2.0, 0.0], [1.8, 2.4, 6.0]], ...]
elif (dim_ == 3) and (type_ in [float, int]):
x, mask = self.padding3d(kwargs[feature_key])
# (patient, visit, event, values)
x = torch.tensor(x, dtype=torch.float, device=self.device)
# (patient, visit, embedding_dim)
x = torch.sum(x, dim=2)
x = self.linear_layers[feature_key](x)
mask = mask[:, :, 0]
mask = mask.bool().to(self.device)
else:
raise NotImplementedError
# transform x to (patient, event, embedding_dim)
if self.pretrained_emb != None:
x = self.linear_layers[feature_key](x)
_, x = self.transformer[feature_key](x, mask, kwargs.get('register_hook'))
patient_emb.append(x)
patient_emb = torch.cat(patient_emb, dim=1)
logits = self.fc(patient_emb)
# obtain y_true, loss, y_prob
y_true = self.prepare_labels(kwargs[self.label_key], self.label_tokenizer)
loss = self.get_loss_function()(logits, y_true)
y_prob = self.prepare_y_prob(logits)
results = {"loss": loss, "y_prob": y_prob, "y_true": y_true, "logit": logits}
if kwargs.get("embed", False):
results["embed"] = patient_emb
return results
if __name__ == "__main__":
from pyhealth.datasets import SampleEHRDataset
samples = [
{
"patient_id": "patient-0",
"visit_id": "visit-0",
"single_vector": [1, 2, 3],
"list_codes": ["505800458", "50580045810", "50580045811"], # NDC
"list_vectors": [[1.0, 2.55, 3.4], [4.1, 5.5, 6.0]],
"list_list_codes": [["A05B", "A05C", "A06A"], ["A11D", "A11E"]], # ATC-4
"list_list_vectors": [
[[1.8, 2.25, 3.41], [4.50, 5.9, 6.0]],
[[7.7, 8.5, 9.4]],
],
"label": 1,
},
{
"patient_id": "patient-0",
"visit_id": "visit-1",
"single_vector": [1, 5, 8],
"list_codes": [
"55154191800",
"551541928",
"55154192800",
"705182798",
"70518279800",
],
"list_vectors": [[1.4, 3.2, 3.5], [4.1, 5.9, 1.7], [4.5, 5.9, 1.7]],
"list_list_codes": [["A04A", "B035", "C129"]],
"list_list_vectors": [
[[1.0, 2.8, 3.3], [4.9, 5.0, 6.6], [7.7, 8.4, 1.3], [7.7, 8.4, 1.3]],
],
"label": 0,
},
]
# dataset
dataset = SampleEHRDataset(samples=samples, dataset_name="test")
# data loader
from pyhealth.datasets import get_dataloader
train_loader = get_dataloader(dataset, batch_size=2, shuffle=True)
# model
model = Transformer(
dataset=dataset,
feature_keys=[
"list_codes",
"list_vectors",
"list_list_codes",
"list_list_vectors",
],
label_key="label",
mode="multiclass",
)
# data batch
data_batch = next(iter(train_loader))
# try the model
ret = model(**data_batch)
print(ret)
# try loss backward
ret["loss"].backward()
| sunlabuiuc/PyHealth | pyhealth/models/transformer.py | transformer.py | py | 20,506 | python | en | code | 778 | github-code | 13 |
70871055379 | from flask_cors import CORS
import sys
sys.path.append('.')
from cudas.colorToGrayscale import colorToGrayscaleConvertion
from cudas.imageBlur import imageBlur
from flask import Flask, request, jsonify, send_from_directory
from werkzeug import urls
from werkzeug.utils import secure_filename
from PIL import Image
import os
import base64
from io import BytesIO
import numpy as np
import math
from numba import cuda
import logging
print( "SYSTEM.PATH == ", sys.path )
UPLOAD_FOLDER = 'uploads'
if not os.path.exists( UPLOAD_FOLDER ):
os.makedirs( UPLOAD_FOLDER )
logging.basicConfig(filename='app.log', level=logging.DEBUG)
app = Flask(__name__, static_folder='../dist')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
CORS(app, resources={r"/*": {"origins": "*"}})
@app.route('/process_image', methods=['POST'])
def process_image():
try:
imageData = request.form.get('imageData')
processing_type = request.form.get('type')
# Convert the Base64 encoded data to a PIL Image
image_data = base64.b64decode(imageData.split(",")[1])
image = Image.open(BytesIO(image_data))
# Save the image temporarily and process it
image_path = os.path.join(UPLOAD_FOLDER, "temp_image.png")
image.save(image_path)
processed_image_path = process_with_cuda(image_path, processing_type)
return send_from_directory(UPLOAD_FOLDER, processed_image_path)
except Exception as e:
app.logger.error(f"Error processing image: {e}")
return jsonify({"error": str(e)}), 400
def process_with_cuda(image_path, processing_type):
# Load image and prepare data
image_data = image_to_rgb_array(image_path)
height, width, channels = image_data.shape # get height, width and channels for the image directly
# Flatten the image dat for GPU processing
flattened_image_data = image_data.flatten()
# Allocate device memory and copy data to device
pin_device = cuda.to_device(flattened_image_data)
pout_device = cuda.device_array((height * width * channels,), dtype=np.uint8) # allocate memory for the output image
# Define block and grid dimensions
threads_per_block = (16, 16)
blocks_per_grid_x = int( width / threads_per_block[0])
blocks_per_grid_y = int( height / threads_per_block[1])
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
# Launch the CUDA kernel
if processing_type == 'color-to-grayscale':
colorToGrayscaleConvertion[blocks_per_grid, threads_per_block](pout_device, pin_device, width, height)
# Copy the processed data back to the host
processed_image_data = pout_device.copy_to_host().reshape(height, width)
# Convert the processed data back to an image
processed_image = Image.fromarray(processed_image_data, 'L') # 'L' mode is for grayscale
elif processing_type == 'image-blur':
imageBlur[blocks_per_grid, threads_per_block](pout_device, pin_device, width, height)
# Copy the processed data back to the host
processed_image_data = pout_device.copy_to_host().reshape(height, width, 3) # 3 channels for RGB
# Convert the processed data back to an image
processed_image = Image.fromarray(processed_image_data, 'RGB') # 'RGB' mode for colored image
processed_image_path = os.path.join(UPLOAD_FOLDER, "processed_image.png")
processed_image.save(processed_image_path)
return processed_image_path
def image_to_rgb_array(image_path):
# Open the image and convert it to RGB mode
image = Image.open(image_path).convert('RGB')
# Convert image data to a numpy array
image_np = np.array(image)
# check for alpha channel and remove it, if present
if image_np.shape[2] == 4:
image_np = image_np[:, :, :3]
# rleturn it
return image_np
# Define a route for a basic GET request
@app.route('/hello', methods=['GET'])
def hello_world():
return jsonify({"message": "Hello, World!"})
# Define a route for a basic POST request
@app.route('/echo', methods=['POST'])
def echo():
try:
# Get the JSON data from the request
data = request.get_json()
return jsonify(data)
except Exception as e:
app.logger.error(f"Error processing request: {e}")
return jsonify({"error": str(e)}), 400
@app.route('/test', methods=['GET'])
def test_endpoint():
app.logger.info("Test endpoint called")
return jsonify({"message": "Hello, World from Flask!"})
@app.route('/api/test', methods=['GET'])
def api_test_endpoint():
return jsonify({"message": "Hello, World from API Test Endpoint!"})
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if path != "" and os.path.exists(os.path.join(app.static_folder, path)):
return send_from_directory(app.static_folder, path)
else:
return send_from_directory(app.static_folder, 'index.html')
if __name__ == '__main__':
port = int( os.environ.get( "PORT", 5000 )) # Use PORT if it's there
app.run( debug=True, host='0.0.0.0', port=port )
| lvllvl/python-api | api/api.py | api.py | py | 5,118 | python | en | code | 0 | github-code | 13 |
22396321332 | import numpy as np
import pandas as pd
import time
from matplotlib.widgets import Slider
# nucleosynth
from nucleosynth.tracers import load_save, tracer_tools
from nucleosynth import paths
from nucleosynth import network
from nucleosynth import plotting
from nucleosynth import printing
from nucleosynth import tools
"""
Class representing an individual mass tracer from a model
"""
class Tracer:
"""Object representing an individual mass tracer from a skynet model
common variables/terminology
----------------------------
abu_var : 'X' or 'Y'
mass fraction (X) and number fraction (Y)
iso_group : 'A' or 'Z'
nuclides of constant A (isobars) and Z (isotopes)
attributes
----------
columns : {table_name: pd.DataFrame}
Tables of tracer properties (density, temperature, etc.) versus time,
from original STIR data, and resulting SkyNet output
composition : {abu_var: pd.DataFrame}
Tables of X and Y versus time
files : h5py.File
Raw hdf5 tracer output files from skynet
mass : float
mass coordinate of tracer (interior mass, Msun)
model : str
Name of the core-collapse model (typically named after the progenitor model)
most_abundant : {abu_var: pd.DataFrame}
Table of most abundant isotopes, by X and Y, as subset of network
network : pd.DataFrame
Table of isotopes used in model (name, Z, A)
network_unique : {iso_group: [int]}
unique A and Z in network
paths : str
Paths to model input/output directories
reload : bool
whether to force reload from raw file (i.e. don't load cache)
save : bool
whether to save tables to cache for faster loading
steps : [int]
list of skynet model steps
summary : {}
collection of summary quantities
sums : {abu_var: iso_group: pd.DataFrame}
Y and X tables, grouped and summed over A and Z
time : pd.Series
Pointer to 'time' column of self.columns
tracer_id : int
The tracer ID/index
verbose : bool
Option to print output
"""
def __init__(self, tracer_id, model, load_all=True,
steps=(1, 2), save=True, reload=False,
verbose=True):
"""
parameters
----------
tracer_id : int
model : str
steps : [int]
load_all : bool
save : bool
reload : bool
verbose : bool
"""
self.tracer_id = tracer_id
self.model = model
self.verbose = verbose
self.steps = steps
self.save = save
self.reload = reload
self.files = None
self.network = None
self.composition = None
self.network_unique = None
self.most_abundant = None
self.sums = None
self.time = None
self.summary = dict.fromkeys(['total_heating'])
self.columns = dict.fromkeys(['skynet', 'stir'])
self.mass = load_save.get_stir_mass_element(tracer_id, self.model)
self.title = f'{self.model}, tracer_{self.tracer_id}'
self.paths = paths.get_model_paths(self.model)
if load_all:
self.load_all()
# ===============================================================
# Loading/extracting
# ===============================================================
def load_all(self):
"""Load all tracer data
"""
t0 = time.time()
self.load_files()
self.load_stir()
self.load_columns()
self.load_network()
self.load_composition()
self.load_sums()
self.get_most_abundant()
self.get_sumy_abar()
self.get_zbar()
self.get_summary()
t1 = time.time()
self.printv(f'Load time: {t1-t0:.3f} s')
def load_files(self):
"""Load raw tracer files
"""
self.files = load_save.load_files(self.tracer_id,
tracer_steps=self.steps,
model=self.model,
verbose=self.verbose)
def load_stir(self):
"""Load stir tracer table
"""
self.printv('Loading stir tracer')
self.columns['stir'] = load_save.load_stir_tracer(self.tracer_id, model=self.model)
def load_columns(self):
"""Load table of scalars
"""
self.printv('Loading columns')
columns = load_save.load_table(self.tracer_id,
model=self.model,
tracer_steps=self.steps,
table_name='columns',
tracer_files=self.files,
save=self.save, reload=self.reload,
verbose=False)
self.columns['skynet'] = columns
self.time = columns['time']
def load_network(self):
"""Load table of network isotopes
"""
self.printv('Loading network')
self.network = load_save.load_table(self.tracer_id,
model=self.model,
tracer_steps=self.steps,
table_name='network',
tracer_files=self.files,
save=self.save, reload=self.reload,
verbose=False)
self.get_network_unique()
def load_composition(self):
"""Load composition tables (X, Y)
"""
self.printv('Loading composition tables')
self.composition = load_save.load_composition(self.tracer_id,
tracer_steps=self.steps,
model=self.model,
tracer_files=self.files,
tracer_network=self.network,
reload=self.reload,
save=self.save,
verbose=False)
def load_sums(self):
"""Get X, Y sums over A, Z
"""
self.printv('Loading composition sums')
self.sums = load_save.load_sums(self.tracer_id,
tracer_steps=self.steps,
model=self.model,
tracer_files=self.files,
tracer_network=self.network,
reload=self.reload,
save=self.save,
verbose=False)
# ===============================================================
# Analysis
# ===============================================================
def get_network_unique(self):
"""Get unique Z and A in network
"""
self.network_unique = network.get_network_unique(self.network)
def get_sumy_abar(self):
"""Get sumY and Abar versus time from Y table
"""
columns = self.columns['skynet']
columns['sumy'] = network.get_sumy(self.composition['Y'])
columns['abar'] = 1 / columns['sumy']
def get_zbar(self):
"""Get Zbar versus time from Y table
"""
columns = self.columns['skynet']
columns['zbar'] = network.get_zbar(self.composition['Y'],
tracer_network=self.network,
ye=columns['ye'])
def get_summary(self):
"""Get summary quantities
"""
self.summary['total_heating'] = tracer_tools.get_total_heating(
table=self.columns['skynet'])
self.summary['max_ni56'] = self.composition['X']['ni56'].max()
def get_most_abundant(self):
"""Get most abundant isotopes in network
"""
most_abundant = dict.fromkeys(['X', 'Y'])
for abu_var in most_abundant:
most_abundant[abu_var] = network.get_most_abundant(
self.composition[abu_var],
tracer_network=self.network,
abu_var=abu_var)
self.most_abundant = most_abundant
# ===============================================================
# Accessing Data
# ===============================================================
def select_composition(self, abu_var, z=None, a=None):
"""Return composition (X or Y) for given Z and/or A
parameters
----------
abu_var : 'X' or 'Y'
z : int
atomic number
a : int
atomic mass number
"""
return network.select_composition(self.composition[abu_var],
tracer_network=self.network, z=z, a=a)
def select_network(self, z=None, a=None):
"""Return subset of network with given Z and/or A
parameters
----------
z : int
atomic number
a : int
atomic mass number
"""
return network.select_isotopes(self.network, z=z, a=a)
# ===============================================================
# Plotting
# ===============================================================
def plot_columns(self, columns, max_cols=1, y_scale=None, x_scale=None,
legend=False, title=True, ylims=None, xlims=None,
sub_figsize=(8, 4), label=None, column_table='skynet',
linestyle='-', marker='', sharex=True):
"""Plot column quantity versus time
parameters
----------
columns : [str]
list of quantities to plot in subplots
max_cols : int
how many subplots to put side-by-side
y_scale : 'log' or 'linear'
x_scale : 'log' or 'linear'
legend : bool
title : bool
ylims : [min, max]
xlims : [min, max]
sub_figsize : [width, height]
label : str
linestyle : str
marker : str
sharex : bool
column_table : 'skynet' or 'stir'
"""
fig, ax = plotting.setup_subplots(n_sub=len(columns), max_cols=max_cols,
sub_figsize=sub_figsize,
sharex=sharex, squeeze=False)
for i, column in enumerate(columns):
row = int(np.floor(i / max_cols))
col = i % max_cols
ax_title = title if i == 0 else False
axis = ax[row, col]
if column in ['X', 'Y']:
self.plot_composition(abu_var=column, y_scale=y_scale,
x_scale=x_scale, ylims=ylims, xlims=xlims,
ax=axis, legend=legend, title=ax_title,
linestyle=linestyle, marker=marker)
else:
self.plot_column(column, ax=axis, y_scale=y_scale,
x_scale=x_scale, ylims=ylims, xlims=xlims, label=label,
legend=legend, linestyle=linestyle, marker=marker,
title=ax_title, column_table=column_table)
return fig
def plot_column(self, column, y_scale=None, x_scale=None,
ax=None, legend=False, title=True,
ylims=None, xlims=None, figsize=(8, 6), label=None,
linestyle='-', marker='', column_table='skynet'):
"""Plot column quantity versus time
parameters
----------
column : str
quantity to plot on y-axis (from Tracer.columns)
y_scale : 'log' or 'linear'
x_scale : 'log' or 'linear'
ax : Axes
legend : bool
title : bool
ylims : [min, max]
xlims : [min, max]
figsize : [width, height]
label : str
linestyle : str
marker : str
column_table : 'skynet' or 'stir'
which table to plot from
"""
table = self.columns[column_table]
self.check_columns(column, column_table)
fig, ax = plotting.check_ax(ax=ax, figsize=figsize)
ax.plot(table['time'], table[column], ls=linestyle,
marker=marker, label=label)
plotting.set_ax_all(ax, y_var=column, x_var='time', y_scale=y_scale,
x_scale=x_scale, ylims=ylims, xlims=xlims, legend=legend,
title=title, title_str=self.title)
return fig
def plot_compare_tables(self, column, y_scale=None, x_scale=None,
ax=None, legend=True, title=True,
ylims=None, xlims=None, figsize=(8, 6),
marker='', column_tables=('skynet', 'stir')):
"""Plot column(s) from multiple tables for comparison
parameters
----------
column : str
quantity to plot on y-axis (from Tracer.columns)
y_scale : 'log' or 'linear'
x_scale : 'log' or 'linear'
ax : Axes
legend : bool
title : bool
ylims : [min, max]
xlims : [min, max]
figsize : [width, height]
marker : str
column_tables : 'skynet' or 'stir'
which table to plot from
"""
self.check_columns(column, tables=column_tables)
fig, ax = plotting.check_ax(ax=ax, figsize=figsize)
for column_table in column_tables:
self.plot_column(column=column, column_table=column_table, ax=ax,
label=column_table, legend=legend, marker=marker,
x_scale=x_scale, y_scale=y_scale, xlims=xlims,
ylims=ylims, title=title)
def plot_composition(self, abu_var, isotopes=None,
y_scale=None, x_scale=None, ylims=None, xlims=None,
ax=None, legend=True, title=True,
figsize=(8, 6), linestyle='-', marker=''):
"""Plot network composition versus time
parameters
----------
abu_var : 'X' or 'Y'
isotopes : [str]
list of isotopes to plot. If None, default to 10 most abundant
y_scale : 'log' or 'linear'
x_scale : 'log' or 'linear'
ax : Axes
legend : bool
title : bool
ylims : [min, max]
xlims : [min, max]
figsize : [width, height]
linestyle : str
marker : str
"""
table = self.composition[abu_var]
fig, ax = plotting.check_ax(ax=ax, figsize=figsize)
if isotopes is None:
isotopes = self.most_abundant[abu_var]['isotope']
for i, isotope in enumerate(isotopes):
ax.plot(self.time, table[isotope], ls=linestyle,
marker=marker, label=isotope)
plotting.set_ax_all(ax, y_var=abu_var, x_var='time', y_scale=y_scale,
x_scale=x_scale, ylims=ylims, xlims=xlims, legend=legend,
title=title, title_str=self.title)
return fig
def plot_sums(self, timestep, abu_var, iso_group, y_scale=None,
ax=None, legend=False, title=True,
ylims=None, xlims=None, figsize=(8, 6), label=None,
linestyle='-', marker='o'):
"""Plot composition sums
parameters
----------
timestep : int
index of timestep to plot
abu_var : 'X' or 'Y'
iso_group : 'A' or 'Z'
which iso-number to group by on x-axis
y_scale : 'log' or 'linear'
ax : Axes
legend : bool
title : bool
ylims : [min, max]
xlims : [min, max]
figsize : [width, height]
label : str
linestyle : str
marker : str
"""
fig, ax = plotting.check_ax(ax=ax, figsize=figsize)
x = self.network_unique[iso_group]
y = self.sums[iso_group][abu_var].loc[timestep]
t = self.time[timestep]
title_str = f"{self.title}, t={t:.3e} s"
ax.plot(x, y, ls=linestyle, marker=marker, label=label)
plotting.set_ax_all(ax, y_var=abu_var, x_var=iso_group, y_scale=y_scale,
x_scale='linear', ylims=ylims, xlims=xlims, legend=legend,
title=title, title_str=title_str)
return fig
def plot_sums_slider(self, abu_var, iso_group,
y_scale=None, title=True, ylims=None, xlims=None,
legend=False, figsize=(8, 6), linestyle='-', marker='o'):
"""Plot composition sums with interactive slider
parameters
----------
abu_var : 'X' or 'Y'
iso_group : 'A' or 'Z'
which iso-number to group by on x-axis
y_scale : 'log' or 'linear'
legend : bool
title : bool
ylims : [min, max]
xlims : [min, max]
figsize : [width, height]
linestyle : str
marker : str
"""
fig, profile_ax, slider_ax = plotting.setup_slider_fig(figsize=figsize)
step_min, step_max = self._get_slider_steps()
slider = Slider(slider_ax, 'timestep', step_min, step_max,
valinit=step_max, valstep=1)
self.plot_sums(step_max, abu_var=abu_var, iso_group=iso_group,
y_scale=y_scale, ax=profile_ax, legend=legend,
title=title, ylims=ylims, xlims=xlims, figsize=figsize,
linestyle=linestyle, marker=marker)
def update(step):
y = self.sums[iso_group][abu_var].loc[step]
profile_ax.lines[0].set_ydata(y)
t = self.time[step]
title_str = f"{self.title}, t={t:.3e} s"
profile_ax.set_title(title_str)
fig.canvas.draw_idle()
slider.on_changed(update)
return fig, slider
def plot_sums_all(self, timestep, abu_var, y_scale=None,
ax=None, legend=False, title=True,
ylims=None, xlims=None, figsize=(8, 6),
linestyle='-', marker='o'):
"""Plot all isotope composition sums
parameters
----------
timestep : int
index of timestep to plot
abu_var : 'X' or 'Y'
y_scale : 'log' or 'linear'
ax : Axes
legend : bool
title : bool
ylims : [min, max]
xlims : [min, max]
figsize : [width, height]
linestyle : str
marker : str
"""
fig, ax = plotting.check_ax(ax=ax, figsize=figsize)
for z in self.network_unique['Z']:
subnet = self.select_network(z=z)
subcomp = self.select_composition(abu_var=abu_var, z=z)
x = subnet['A']
y = subcomp.loc[timestep]
label = network.get_element_str(z=z).title()
ax.plot(x, y, ls=linestyle, marker=marker, label=label)
t = self.time[timestep]
title_str = f"{self.title}, t={t:.3e} s"
plotting.set_ax_all(ax, y_var=abu_var, x_var='A', y_scale=y_scale,
x_scale='linear', ylims=ylims, xlims=xlims, legend=legend,
title=title, title_str=title_str)
return fig
# ===============================================================
# Convenience
# ===============================================================
def printv(self, string):
"""Print string if verbose is True
"""
printing.printv(string, verbose=self.verbose)
def _get_slider_steps(self):
"""Return numbers of steps for slider bar
"""
columns = self.columns['skynet']
step_min = columns.index[0]
step_max = columns.index[-1]
return step_min, step_max
def check_columns(self, columns, tables):
"""Check if column(s) exist in provided table(s)
parameters
----------
columns : str or [str]
tables : str or [str]
"""
columns = tools.ensure_sequence(columns)
tables = tools.ensure_sequence(tables)
for column_table in tables:
table = self.columns[column_table]
for column in columns:
if column not in table:
raise ValueError(f"column '{column}' not in "
f"tracer table '{column_table}'")
| zacjohnston/nucleosynth | nucleosynth/tracers/tracer.py | tracer.py | py | 21,054 | python | en | code | 2 | github-code | 13 |
13206264515 | X, Y, Z = None, None, None
i = 0
while i < X:
print("--X--", end="")
j = 0
while j < Y:
print("!Y!", end="")
k = 0
while k < Z:
print("Z", end="")
k += 1
j += 1
print(" ", end="")
i += 1
print("done")
| z5267282/thesis | backend/test-questions-theory/q1.py | q1.py | py | 281 | python | en | code | 0 | github-code | 13 |
16811308464 | #!/usr/bin/env python3
# Takes the JSON output of googletest and prints information about the longest running tests and test suites
import json
import sys
from terminaltables import AsciiTable
if len(sys.argv) != 2:
print('Usage: (1) Run googletest with --gtest_output="json:output.json"')
print(' (2) " + sys.argv[0] + " output.json')
sys.exit(1)
with open(sys.argv[1]) as f:
data = json.load(f)
testsuites = {}
tests = {}
for testsuite in data["testsuites"]:
testsuites[testsuite["name"]] = float(testsuite["time"].replace("s", ""))
for test in testsuite["testsuite"]:
tests[testsuite["name"] + "." + test["name"]] = float(test["time"].replace("s", ""))
testsuites_sorted = list({k: v for k, v in sorted(testsuites.items(), key=lambda item: -item[1])}.items())
tests_sorted = list({k: v for k, v in sorted(tests.items(), key=lambda item: -item[1])}.items())
ENTRIES_SHOWN = 20
table = []
table += [[str(ENTRIES_SHOWN) + " most expensive test suites", "s", str(ENTRIES_SHOWN) + " most expensive tests", "s"]]
for i in range(ENTRIES_SHOWN):
table += [[testsuites_sorted[i][0], testsuites_sorted[i][1], tests_sorted[i][0], tests_sorted[i][1]]]
print(AsciiTable(table).table)
| hyrise/hyrise | scripts/analyze_gtest_runtime.py | analyze_gtest_runtime.py | py | 1,227 | python | en | code | 722 | github-code | 13 |
7316644115 | import scipy as sp
import matplotlib.pyplot as plt
data= sp.genfromtxt("web_traffic.tsv",delimiter="\t") #tsv for tab data
x = data[:,0]
y = data[:,1]
plt.scatter(x,y)
plt.title("Web Traffic last Month")
plt.xlabel("Time")
plt.ylabel("Hits/hours")
plt.xticks()
plt.autoscale(tight=True)
plt.grid()
plt.show() | raviveer792/HPE | Plot_data.py | Plot_data.py | py | 309 | python | en | code | 0 | github-code | 13 |
73605346578 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
import collections
from typing import Optional
# @Author: 花菜
# @File: 104二叉树的最大深度.py
# @Time : 2022/11/2 17:42
# @Email: lihuacai168@gmail.com
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxDepth(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
leftHeight = self.maxDepth(root.left)
rightHeight = self.maxDepth(root.right)
height = max(leftHeight, rightHeight) + 1
return height
def maxDepth(self, root) -> int:
if not root:
return 0
que = collections.deque()
que.append(root)
res = 0
while que:
for i in range(len(que)):
node = que.popleft()
if i == 0:
# 每遍历一层就+1
res += 1
if node.left:
que.append(node.left)
if node.right:
que.append(node.right)
return res
| lihuacai168/LeetCode | 二叉树/二叉树的深度和高度/104二叉树的最大深度.py | 104二叉树的最大深度.py | py | 1,160 | python | en | code | 4 | github-code | 13 |
586732175 | # Example code for discussing indegree and outdegree in a directed graph
class DirectedGraph:
def __init__(self, vertices):
self.vertices = vertices
self.edges = 0
self.indegree = {v: 0 for v in range(vertices)}
self.outdegree = {v: 0 for v in range(vertices)}
def add_edge(self, v, w):
self.outdegree[v] += 1
self.indegree[w] += 1
self.edges += 1
def vertex_indegree_outdegree(self, vertex):
return self.indegree[vertex], self.outdegree[vertex]
# Create a directed graph
directed_graph = DirectedGraph(4)
directed_graph.add_edge(0, 1)
directed_graph.add_edge(0, 3)
directed_graph.add_edge(1, 2)
directed_graph.add_edge(2, 3)
# Discuss indegree and outdegree for vertex 0
indegree_of_0, outdegree_of_0 = directed_graph.vertex_indegree_outdegree(0)
print(f"The indegree of vertex 0 is: {indegree_of_0}")
print(f"The outdegree of vertex 0 is: {outdegree_of_0}")
| Hienu/TranDanhHieu_CTDL | Đề tài giữa kỳ_DK009/15 Graphs/002 Graphs - Degree of a Vertex/c.py | c.py | py | 940 | python | en | code | 0 | github-code | 13 |
72829503699 | import bpy
from bpy.props import *
from ... base_types import AnimationNode
class sequenceNode(bpy.types.Node, AnimationNode):
bl_idname = "an_sequenceNode"
bl_label = "Multi-Channel Sequencer"
bl_width_default = 180
message1 = StringProperty("")
def create(self):
self.newInput("Integer", "Start Frame", "start")
self.newInput("Integer", "End Frame", "endf")
self.newInput("Integer", "Number of Steps", "st_n")
self.newInput("Float", "Step Value", "step")
self.newOutput("Integer List", "Output as IntegerList", "out_l")
self.newOutput("Integer", "Current Pulse Index", "idx")
def draw(self,layout):
if (self.message1 != ""):
layout.label(self.message1, icon = "ERROR")
def execute(self, start, endf, st_n, step):
self.use_custom_color = True
self.useNetworkColor = False
self.color = (0.8,0.9,1)
frame = bpy.context.scene.frame_current
if endf < (start + (step * st_n)) or step < 0.01 or st_n < 2:
self.message1 = "Check Input Values"
out_l = None
idx = None
else:
self.message1 = ""
out_l = []
idx = 0
for i in range(0,st_n):
out_l.append(0)
if frame in range(start,endf):
frm = (frame - start) % (step * st_n)
idx = int(frm // step)
out_l[idx] = step
return out_l, idx
| Clockmender/My-AN-Nodes | nodes/general/sequence.py | sequence.py | py | 1,493 | python | en | code | 16 | github-code | 13 |
16979220959 | from math import sqrt, isnan
import csv
dataFile = '../data/error_test.csv'
algorithmDescriptionIdx = 1
def readData():
result = []
headers = []
with open(dataFile , 'r') as file:
reader = csv.reader(file, skipinitialspace=True, delimiter=';')
rowCounter = 0
for r in reader:
if rowCounter <= 1:
headers += r
elif rowCounter % 2 == 0:
x = []
x += r
else:
x.append([int(y) for y in r])
result.append(x)
rowCounter += 1
return headers,preprocess(result)
def preprocess(data):
resultOther = []
resultBagMinHash1Float = []
resultBagMinHash2Float = []
resultBagMinHash1Binary = []
resultBagMinHash2Binary = []
for d2 in data:
d = d2
if d[algorithmDescriptionIdx] == "BagMinHash1 (float)":
d[algorithmDescriptionIdx] = "BagMinHash (float)"
resultBagMinHash1Float.append(d)
elif d[algorithmDescriptionIdx] == "BagMinHash2 (float)":
d[algorithmDescriptionIdx] = "BagMinHash (float)"
resultBagMinHash2Float.append(d)
elif d[algorithmDescriptionIdx] == "BagMinHash1 (binary)":
d[algorithmDescriptionIdx] = "BagMinHash (binary)"
resultBagMinHash1Binary.append(d)
elif d[algorithmDescriptionIdx] == "BagMinHash2 (binary)":
d[algorithmDescriptionIdx] = "BagMinHash (binary)"
resultBagMinHash2Binary.append(d)
else:
resultOther.append(d2)
assert(len(resultBagMinHash1Float) == len(resultBagMinHash2Float))
for i in range(0, len(resultBagMinHash1Float)):
assert(len(resultBagMinHash1Float[i]) == len(resultBagMinHash2Float[i]))
for j in range(0, len(resultBagMinHash1Float[i])):
assert(len(resultBagMinHash1Float[i][j]) == len(resultBagMinHash2Float[i][j]))
assert(len(resultBagMinHash1Binary) == len(resultBagMinHash2Binary))
for i in range(0, len(resultBagMinHash1Binary)):
assert(len(resultBagMinHash1Binary[i]) == len(resultBagMinHash2Binary[i]))
for j in range(0, len(resultBagMinHash1Binary[i])):
assert(len(resultBagMinHash1Binary[i][j]) == len(resultBagMinHash2Binary[i][j]))
return resultOther + resultBagMinHash1Float + resultBagMinHash1Binary
headers, data = readData()
caseDescriptionIdx = 0
algorithmDescriptionIdx = 1
numIterationsIdx = 2
hashSizeIdx = 3
trueJaccardIndexIdx = 4
histogramDataIdx = 5
assert(headers[caseDescriptionIdx] == "caseDescription")
assert(headers[algorithmDescriptionIdx] == "algorithmDescription")
assert(headers[numIterationsIdx] == "numIterations")
assert(headers[hashSizeIdx] == "hashSize")
assert(headers[trueJaccardIndexIdx] == "trueJaccardIndex")
assert(headers[histogramDataIdx] == "histogramEqualSignatureComponents")
def extractCaseDescriptions(data):
result = []
for d in data:
item = d[caseDescriptionIdx]
if item not in result:
result.append(item)
return result
def getTrueJaccardIndex(caseDescription, data):
for d in data:
if d[caseDescriptionIdx] == caseDescription:
return float(d[trueJaccardIndexIdx])
def getHistogram(caseDescription, algorithmDescription, data):
for d in data:
if d[caseDescriptionIdx] == caseDescription and int(d[hashSizeIdx]) == m and d[algorithmDescriptionIdx] == algorithmDescription:
return d[histogramDataIdx]
def getEmpiricalMSE(caseDescription, m, algorithmDescription, data):
histo = getHistogram(caseDescription, algorithmDescription, data)
if histo is None:
return float('nan')
assert(m + 1 == len(histo))
J = getTrueJaccardIndex(caseDescription, data)
s = 0
for k in range(0, m + 1):
s += histo[k] * pow(k / m - J, 2)
return s/getN(data)
def getN(data):
n = None
for d in data:
if n is None:
n = int(d[numIterationsIdx])
else:
assert(n == int(d[numIterationsIdx]))
return n
def calculateZScore(empiricalMSE, J, c, m):
expectedMSE = J * (1 - J) / m
expectedVarianceEmpiricalMSE = pow(expectedMSE, 2) / c * (2. - 6. / m) + expectedMSE / (c * pow(m, 2.))
zScoreMSE = (empiricalMSE - expectedMSE) / sqrt(expectedVarianceEmpiricalMSE)
return zScoreMSE
case_descriptions = extractCaseDescriptions(data)
m_values = [4, 16, 64, 256, 1024, 4096]
algorithms = [
"BagMinHash (float)",
"BagMinHash (binary)",
"ICWS",
"0-Bit",
"CCWS",
"PCWS",
"I2CWS"
]
algorithm_labels = {
"BagMinHash (float)" : "BagMinHash (float)",
"BagMinHash (binary)" : "BagMinHash (binary)",
"ICWS" : "\\acs*{ICWS} \\cite{Ioffe2010}",
"I2CWS" : "\\acs*{I2CWS} \\cite{Wu2017}",
"0-Bit" : "0-bit \\cite{Li2015}",
"PCWS" : "\\acs*{PCWS} \\cite{Wu2017a}",
"CCWS" : "\\acs*{CCWS} \\cite{Wu2016}"
}
redLimit = 3.
print("\\begin{tabular}{lrr" + (2*len(algorithms))*"r" + "}")
print("\\toprule")
print("& &")
for alg in algorithms:
print("& \\multicolumn{2}{c}{" + algorithm_labels[alg] + "}")
print("\\\\")
i = 4
for alg in algorithms:
print("\\cmidrule(l){" + str(i) + "-" + str(i+1) + "}")
i += 2
print("test case & \\symHashSize & $\\symExpectation(\\symEmpiricalMSE)$")
for alg in algorithms:
print("& $\\symEmpiricalMSE$ & $\\symZScore$-score")
print("\\\\")
n = getN(data)
for case_description in case_descriptions:
print("\\midrule")
i = 0
for m in m_values:
J = getTrueJaccardIndex(case_description, data)
if i == 0:
print("\\multirowcell{4}[1em][l]{" + case_description + " \\\\ " + "$\\symJaccard = " + "\\num[group-digits = false]{" + "{:.6g}".format(J) + "}" + "$}")
i += 1
print("& " + str(m))
expectedMSE = J*(1.-J)/m
print("& \\numsci{" + ' {:.2E}'.format(expectedMSE) + "}")
for alg in algorithms:
mse = getEmpiricalMSE(case_description, m, alg, data)
z = calculateZScore(mse, J, n, m)
print("&")
if not isnan(mse):
print("\\numsci{" + ' {:.2E}'.format(mse) + "}")
else:
print("N/A")
print("&")
if not isnan(z):
if (abs(z) >= redLimit):
print("\\color{red}\\bf")
if (abs(z) >= 10):
print("\\numsci{" + ' {:.2E}'.format(z) + "}")
else:
print("\\num{" + ' {:.2f}'.format(z) + "}")
else:
print("\\num{" + ' {:.2f}'.format(z) + "}")
else:
print("N/A")
print("\\\\")
print("\\bottomrule")
print("\\end{tabular}")
| oertl/bagminhash | python/error_table.py | error_table.py | py | 6,778 | python | en | code | 25 | github-code | 13 |
70766814099 | import pygame
import random
from Deck import Deck
from Player import Player
from computer import Computer
class Turn:
def __init__(self, players_num):
# players 리스트의 첫 번째 인자가 항상 먼저 시작
self.randomTurn = 0
self.players_num = players_num
self.current_player = 0
self.direction = 1
def next_direction(self):
# 다음 플레이어로 턴을 넘김
index = self.current_player
index = (index + self.direction) % self.players_num
self.current_player = index
self.randomTurn += 1
return index
def skip_direction(self):
# 한 턴 건너뛰기
index = self.current_player
index = ((index + 1) + self.direction) % self.players_num
self.current_player = index
return index
def reverse_direction(self):
# 턴 방향을 반대로 바꿈
self.direction *= -1
class Game:
def __init__(self, players):
self.dumy_deck = Deck() # 처음 생성되는 카드 리스트들 모인 곳
self.discard_deck = Deck()
self.discard_deck.reset() # 버려진 카드들 모이는 곳
self.color = ''
self.players = players
self.winner = self.players[0]
self.say_uno = False
# 덱 생성 및 카드 분배
def distrib_card(self, card_num,computer_game_mode,player_num):
self.dumy_deck.shuffle()
for player in self.players:
if "mode A" in computer_game_mode:
self.dumy_deck = player.setCard(self.dumy_deck, player_num,card_num,stage = 'A')
elif 'mode B' in computer_game_mode:
print("mode B@")
self.dumy_deck = player.setCard( self.dumy_deck,player_num, card_num,stage = 'B')
else:
self.dumy_deck = player.setCard( self.dumy_deck,player_num, card_num)
def show_winner(self):
print(self.winner, " wins!")
def is_game_over(self):
is_end = False
for player in self.players:
if len(player.getHand()) == 0:
self.winner = player
self.show_winner()
is_end = True
return is_end
# discard_deck에 카드 추가
def add_to_discard(self, card):
self.discard_deck.addCard(card)
# dumy_deck에서 카드 가져오기
def pop_from_dumy(self, current_player, num=1):
self.dumy_deck = current_player.setCard(self.dumy_deck, num)
# 우노 판별
# 플레이어 중 누군가 카드 2장 남았을 시 우노 외치기 가능
def can_press_uno(self, player):
can_press = False
if len(player.hand) == 2:
can_press = True
return can_press
# 유저 플레이어가 우노 외치기
def press_uno_by_user(self, player, current_player):
if self.can_press_uno(current_player):
required_player = current_player
for selected_player in self.players:
if len(selected_player.hand) == 2:
required_player = selected_player
if (required_player != player) and not self.say_uno:
# 다른 플레이어 덱에 카드 2장이 남은 경우
self.pop_from_dumy(required_player, 1)
self.say_uno = True
print(f"{player.name} said UNO!")
print(required_player.hand)
return True
else:
print("UNO cannot be said at this time.")
return False
# 컴퓨터 플레이어가 우노 외치기
def press_uno_by_computer(self, current_player):
random_computer = random.randint(1, len(self.players)-1)
return self.press_uno_by_user(self.players[random_computer], current_player)
# self.say_uno 값에 따라 턴 당 누군가 먼저 우노를 외쳤으면 그 다음에는 우노를 외치지 못하도록 막기 때문에, 매 턴마다 이 함수를 불러 self.say_uno = False 로 만들어줘야 합니다.
def reset_say_uno(self):
self.say_uno = False
| SE12Team/UNO | Game.py | Game.py | py | 4,105 | python | ko | code | 0 | github-code | 13 |
34487831176 | from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.db import connection
from order.forms import OrderForm
import string
from random import *
import datetime
def order(request):
if request.method == 'POST':
form = OrderForm(request.POST)
if form.is_valid():
sid = str(form.cleaned_data.get('sid'))
with connection.cursor() as cursor:
username = str(request.user)
cursor.execute("SELECT uid FROM Users WHERE login = %s", [username])
uid = cursor.fetchone()
uid = str(uid[0])
cursor.execute(
"INSERT INTO Purchases (sid,uid) "+
"VALUES "+
"(%s,%s,%s)",[sid,uid,datetime.date.today().strftime("%Y-%m-%d")]
)
cursor.execute(
"UPDATE Songs SET numDownloads = numDownloads+1 WHERE sid = %s", [sid]
)
return redirect('/myrecord/' + uid)
else:
form = OrderForm()
return render(request, 'order/order.html', {'form': form})
def generate_uid():
allchar = string.ascii_letters + string.punctuation + string.digits
uid = "".join(choice(allchar) for x in range(randint(10,10)))
return uid
| purplxholic/database_proj | order/views.py | views.py | py | 1,332 | python | en | code | 0 | github-code | 13 |
38072479248 | # steering file for BS->ESD step -- data configuration
# see myTopOptions.py for more info
#doCBNT=False
from RecExConfig.RecFlags import rec
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags as acf
import glob
if not ('EvtMax' in dir()):
acf.EvtMax=10
if not 'BSRDOInput' in dir():
acf.BSRDOInput=["../testAllPT_data/EF._0001.data"]
for i, f in enumerate(BSRDOInput):
if not glob.glob(f):
BSRDOInput[i] = "/afs/cern.ch/atlas/project/trigger/pesa-sw/validation/references/data"+f[2:]
if not 'doWriteESD' in dir():
rec.doWriteESD=True
#testCosmicV1=True
rec.doWriteRDO=False
rec.doWriteAOD=False
rec.doAOD=False
rec.doESD=False
rec.doWriteTAG=False
rec.doCBNT=False
doTrigger=True
#doTrigger=False
#-------
from AthenaCommon.GlobalFlags import GlobalFlags
GlobalFlags.DataSource.set_data()
#GlobalFlags.InputFormat.set_bytestream()
readBS=True
#from DBReplicaSvc.DBReplicaSvcConf import DBReplicaSvc
#svcMgr+=DBReplicaSvc(UseCOOLSQLite=False)
#useCOMCONDDB=True
#setDetDescr = 'ATLAS-GEO-04-00-00'
#setGlobalTag = 'COMCOND-HLTC-000-00'
#EvtMax=25
#setModifiers = ['noCSCReadout',
# 'enableHotIDMasking',
# 'disableCaloAllSamples',
# 'softTRTsettings',
# 'openThresholdRPCCabling',
#special streaming setup
# 'enable7BitL1TTStreaming']
from TriggerJobOpts.TriggerFlags import TriggerFlags
TriggerFlags.doLVL2= False
TriggerFlags.doEF = False
#include ("RecExCommon/RecExCommon_flags.py")
include.block("RecExCond/RecExCommon_flags.py")
TriggerFlags.doHLTpersistency=True
TriggerFlags.writeBS=False
TriggerFlags.abortOnConfigurationError=True
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetDescrVersion.set_Value_and_Lock('ATLAS-GEO-04-00-00')
globalflags.ConditionsTag.set_Value_and_Lock('COMCOND-HLTC-000-00')
globalflags.InputFormat.set_Value_and_Lock('bytestream')
globalflags.DataSource.set_Value_and_Lock('data')
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
#TriggerFlags.MuonSlice.doMuonCalibrationStream = athenaCommonFlags.isOnline()
athenaCommonFlags.BSRDOInput=BSRDOInput
# should be done afterwards so that TriggerFlags are configured ok
# has been run at RDO->BS step (even EF ?)
# doTrigger=False
#from RecExConfig.RecFlags import recAlgs
#recAlgs.doTrigger=False
# main jobOption
#include ("RecExCommon/RecExCommon_topOptions.py")
include("TriggerRelease/Trigger_topOptions_standalone.py")
# the correct tag should be specified
#from DBReplicaSvc.DBReplicaSvcConf import DBReplicaSvc
#svcMgr+=DBReplicaSvc(UseCOOLSQLite=False)
ServiceMgr.IOVDbSvc.GlobalTag="COMCOND-HLTC-000-00"
#ServiceMgr.IOVDbSvc.GlobalTag="OFLCOND-CSC-00-01-00"
| rushioda/PIXELVALID_athena | athena/Trigger/TrigValidation/TrigP1Test/share/testAthenaP1BStoESD_data.py | testAthenaP1BStoESD_data.py | py | 2,760 | python | en | code | 1 | github-code | 13 |
12408203380 | import os
import pandas as pd
# Note: The first row or column integer is 1, not 0.
directory = 'C:/Users/natha/OneDrive/Desktop/Summer 2023 Image analysis/Ua vs Ui Data/'
files = [] # list of the paths of all excel docs in the folder
# iterate over files in directory and add them to files
for filename in os.listdir(directory):
f = directory + filename
# checking if it is a file
if os.path.isfile(f):
files.append(f)
# reading the csv file
cvsDataframe = pd.read_csv(f)
# creating an output excel file
resultExcelFile = pd.ExcelWriter(f + ".xlsx")
# converting the csv file to an excel file
cvsDataframe.to_excel(resultExcelFile, index=False)
# saving the excel file
resultExcelFile.close()
os.remove(f)
files.append(f + ".xlsx")
| theburger222/Summer_2023_Image_Processing | Convert CSV to XLSX.py | Convert CSV to XLSX.py | py | 888 | python | en | code | 0 | github-code | 13 |
8253716377 | from hyperopt import hp
from hyperopt.pyll.base import scope
import pytest
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from training_templates.tuners import XGBoostHyperoptTuner, Tuner
from training_templates.data_utils import sample_pandas_dataframe, train_val_split
@pytest.fixture
def transformed_features(default_training_args):
df = sample_pandas_dataframe()
X_train, X_val, y_train, y_val = train_val_split(df, "Survived", 0.8)
preprocessing_pipeline = default_training_args['preprocessing_pipeline']
X_train_transformed = preprocessing_pipeline.fit_transform(X_train)
X_val_transformed = preprocessing_pipeline.transform(X_val)
return(X_train_transformed, X_val_transformed, y_train, y_val)
def get_init_model_func(model):
def init_model(model_params=None):
if not model_params:
return model()
else:
return model(**model_params)
return init_model
@pytest.fixture
def objective_fn_args(transformed_features):
X_train_transformed, X_val_transformed, y_train, y_val = transformed_features
args = {"X_train_transformed": X_train_transformed,
"X_val_transformed": X_val_transformed,
"y_train": y_train,
"y_val": y_val,
"random_state": 123}
return args
def test_sklearn_hyperopt_tuner(objective_fn_args, default_tuner):
model_init = get_init_model_func(RandomForestClassifier)
objective_fn_args['init_model'] = model_init
best_params = default_tuner.tune(**objective_fn_args)
assert isinstance(best_params, dict)
assert type(best_params["n_estimators"]) == int
assert type(best_params["max_features"]) == float
assert type(best_params["criterion"]) == str
def test_xgboost_hyperopt_tuner(objective_fn_args, default_tuner_args):
model = get_init_model_func(xgb.XGBClassifier)
objective_fn_args['init_model'] = model
hyperparameter_space = {
'max_depth': scope.int(hp.quniform('max_depth', 1, 10, 1)),
'eval_metric': 'auc',
'early_stopping_rounds': 50
}
default_tuner_args["hyperparameter_space"] = hyperparameter_space
model_name = "xgboost"
tuner = Tuner.load_tuner(model_name, default_tuner_args)
#tuner = XGBoostHyperoptTuner(**default_tuner_args)
best_params = tuner.tune(**objective_fn_args)
assert isinstance(best_params, dict)
assert type(best_params["max_depth"]) == int | marshackVB/training_templates | tests/test_tuners.py | test_tuners.py | py | 2,548 | python | en | code | 0 | github-code | 13 |
39219229134 | import socket
import requests
import re
import threading
import json
#testpx
#
timeout = 300
nodatatime = 5
def getPX():
p = requests.get("http://127.0.0.1:5010/get/").json().get("proxy")
p = str(p)
ip = str(p.split(":")[0])
port = int(p.split(":")[1])
print("new ip is:" + ip + ":" + str(port))
return ip,port
def targetToClient(conn,toPX):
global timeout
global nodatatime
i = 0
j = 0
while i < timeout:
try:
data = toPX.recv(1024)
if not data:
if j > nodatatime:
conn.close()
toPX.close()
return
j += 1
except:
if j > nodatatime:
conn.close()
toPX.close()
return
j += 1
#print("get data from px error")
try:
conn.sendall(data)
except:
#print("send data to client error")
pass
def clientToTarget(conn,toPX):
global timeout
global nodatatime
j = 0
i = 0
while i < timeout:
try:
data = conn.recv(1024)
if not data:
if j > nodatatime:
conn.close()
toPX.close()
return
j += 1
except:
if j > nodatatime:
conn.close()
toPX.close()
print("close")
return
j += 1
print("get data from client error")
try:
toPX.sendall(data)
except:
print("send data to px error")
i += 1
def AConnectFromClient(conn,addr):
print("new connect from client")
#pxip = "218.75.158.153"
#pxport = 3128
pxip,pxport = getPX()
try:
toPX = socket.socket()
toPX.connect((pxip,pxport))
except:
print("connect px error")
threading.Thread(target=clientToTarget,args=(conn,toPX)).start()
threading.Thread(target=targetToClient,args=(conn,toPX)).start()
if __name__ == "__main__":
sever = socket.socket()
host = "127.0.0.1"
port = 3080
sever.bind((host,port))
sever.listen(20)
print("sever is ok!!")
while True:
try:
conn,addr = sever.accept()
threading.Thread(target=AConnectFromClient,args=(conn,addr)).start()
except:
print("connect from client error")
| cctes/proxyTunnel | proxyTunnel测试版.py | proxyTunnel测试版.py | py | 2,580 | python | en | code | 7 | github-code | 13 |
5249664252 | def latin_square(N, array):
trace, r, c = 0, 0, 0
for i in range(N):
trace += array[i][i]
row = set(array[i])
if len(row) != N: r += 1
column = set(row[i] for row in array)
if len(column) != N: c += 1
return trace, r, c
tests = int(input())
for i in range(tests):
N = int(input())
array = []
for j in range(N):
line = list(map(int, input().split()))
array.append(line)
k, r, c = latin_square(N, array)
print("Case #" + str(i+1) + ": ", k, r, c)
| tikcho/CodingPracticePython | Vestigium.py | Vestigium.py | py | 537 | python | en | code | 0 | github-code | 13 |
33517037069 | import pandas as pd
import numpy as np
# 유저 데이터
u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv("dataset/ml-100k/u.user", sep="|", names=u_cols, encoding="latin-1")
# print(users)
# 영화 데이터
# 2가지 이상의 장르에 1을 갖는 영화도 있음
# 원 핫 인코딩 형태임
i_cols = ['movie_id', 'title', 'release date', 'video release date',
'IMDB URL', 'unknown', 'Action', 'Adventure', 'Animation',
'Childerns\'s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi',
'Thriller', 'War', 'Western']
movies = pd.read_csv('dataset/ml-100k/u.item', sep="|", names=i_cols, encoding="latin-1")
# 유저 평점 데이터
r_cols = ['user_id', 'movie_id', 'rating', 'timestamp']
ratings = pd.read_csv('dataset/ml-100k/u.data', sep='\t', names=r_cols, encoding="latin-1")
# row는 1차원, column은 2차원이므로.. axis는 0부터 ..
#print(ratings.drop('timestamp', axis=1))
ratings = ratings.drop('timestamp', axis=1)
# 인덱스 설정안하고, 무비 id랑 title만 추출(다른 데이터 제거)
movies = movies[['movie_id', 'title']]
# x, 데이터 원본 보존, y, user_id를 기준으로 나누기 위함
x = ratings.copy()
y = ratings['user_id'] # stratified sampling 방식
# 훈련 / 테스트 데이터 25% 로 분리
split_index = int(len(x)*0.75)
x_train = x[:split_index]
x_test = x[split_index:]
y_train = y[:split_index]
y_test = y[split_index:]
# print(y_train, y_test)
# Objective Function
# RMSE 정확도 계산
def RMSE(y_true, y_pred):
return np.sqrt(np.mean((np.array(y_true) - np.array(y_pred)) ** 2))
# 모델별 RMSE 계산 함수(해당 모델의 결과값과 실제 값의 RMSE값 도출 )
def score(model):
id_pairs = zip(x_test['user_id'], x_test['movie_id'])
y_pred = np.array([model(user, movie) for (user, movie) in id_pairs])
y_true = np.array(x_test['rating'])
return RMSE(y_true, y_pred)
# train 데이터로 Full Matrix 구하기
# 유저id를 인덱스로, 유저가 영화에 부여한 평점 매트릭스로 피버팅함
rating_matrix = x_train.pivot(index='user_id', columns='movie_id', values='rating')
# print(rating_matrix)
# 실제 모델, 전체 평균으로 예측치를 계산하는 기본 모델 (예측 모델)
def best_seller(user_id, movie_id):
# train set에는 존재하지 않지만 test set에 존재하는 영화로 인해 발생하는 오류 방지 (try-except)
try:
rating = train_mean[movie_id]
except:
rating = 3.0
return rating
# 영화의 평점 평균 집계
train_mean = x_train.groupby(['movie_id'])['rating'].mean()
# 모델 실행, 결과적으로 RMSE값이 증가함. 자신의 테스트 값으로 test하지 않았으므로 오차율이 증가한 것임
# print(score(best_seller))
# 사용자 데이터와 Full Matrix merge
merged_ratings = pd.merge(x_train, users)
users = users.set_index('user_id')
# gender별 평점평균 계산
g_mean = merged_ratings[['movie_id', 'sex', 'rating']].groupby(['movie_id', 'sex'])['rating'].mean()
# print(g_mean)
## Gender 기준 추천 예측 모델
def cf_gender(user_id, movie_id):
if movie_id in rating_matrix:
gender = users.loc[user_id]['sex']
# 내부에 젠더가 있는 경우 / 없는 경우로 나뉨 ( 평가한 사용자가 없는 경우 예측값 3.0 )
if gender in g_mean[movie_id]:
gender_rating = g_mean[movie_id][gender]
else :
gender_rating = 3.0
else:
gender_rating = 3.0
return gender_rating
print(score(cf_gender)) | kaminion/recommendation | 2-2.segment.py | 2-2.segment.py | py | 3,633 | python | ko | code | 0 | github-code | 13 |
4058986758 | import unittest.mock as mock
from ..errors import ClientError
from ..models import UserAccount
from ..core import GameServer, UserSession
from ..world import GameWorld
from .tm_test_case import TildemushTestCase
class CommandTest(TildemushTestCase):
def setUp(self):
super().setUp()
self.log_mock = mock.Mock()
self.server = GameServer(GameWorld, logger=self.log_mock)
self.user_session = UserSession(None, GameWorld, None)
self.vil = UserAccount.create(username='vilmibm', password='foobarbazquux')
msg = 'LOGIN vilmibm:foobarbazquux'
self.server.handle_login(self.user_session, msg)
def test_parses_command(self):
command_msgs = [
('COMMAND go somewhere',
('go', 'somewhere')),
('COMMAND look',
('look', '')),
('COMMAND fly-away',
('fly-away', '')),
('COMMAND neatly-eat a banana',
('neatly-eat', 'a banana')),
('COMMAND write a really long and involved novel',
('write', 'a really long and involved novel')),
('COMMAND say hello, all; how are you?',
('say', 'hello, all; how are you?')),
("COMMAND whisper and then i says, 'hey i'm eatin here'",
('whisper', "and then i says, 'hey i'm eatin here'")),
('COMMAND hideous!pathological;command.why some arguments',
('hideous!pathological;command.why', 'some arguments'))]
with mock.patch('tmserver.world.GameWorld.dispatch_action') as world_dispatch_mock:
for msg, expected in command_msgs:
self.server.handle_command(self.user_session, msg)
world_dispatch_mock.assert_called_with(*([self.vil.player_obj] + list(expected)))
def test_detects_malformed_command(self):
malformed_msgs = [
'COMMAND go somewhere',
'COMMAND go somewhere', # this might seem harsh but the client should be collapsing spaces
'COMMANDgo',
'COMMAND',
'COMMAND ',
'COMMAND ']
for malformed in malformed_msgs:
with self.assertRaisesRegex(
ClientError,
'malformed command message: {}'.format(malformed)):
self.server.handle_command(self.user_session, malformed)
def test_rejects_unauthenticated_command(self):
user_session = UserSession(None, GameWorld, None)
with self.assertRaisesRegex(
ClientError,
'not logged in'):
self.server.handle_command(user_session, 'COMMAND go')
| vilmibm/tildemush | server/tmserver/tests/command_test.py | command_test.py | py | 2,654 | python | en | code | 44 | github-code | 13 |
74868329298 | from django.db import models
from democrance.commons.mixins import ModelWithTimestamp
class PolicyType(ModelWithTimestamp):
"""
This is being done like this in order to standardise the policy types.
"Why not use a enumeration" - these make changes complicated and will
require database migrations, and also require programmatic insertion.
"""
name = models.TextField(
help_text="The name of this policy type",
db_index=True
)
def __str__(self):
return f"{self.name}"
class Policy(ModelWithTimestamp):
"""
The insurance policy model used for Democrance
"""
customer = models.ForeignKey(
to='user.User',
on_delete=models.DO_NOTHING,
related_name="policies",
help_text="The customer that this policy belongs to",
db_index=True
)
type = models.ForeignKey(
PolicyType,
on_delete=models.DO_NOTHING,
help_text="The type of policy associated with this cover",
db_index=True
)
premium = models.IntegerField(
default=None,
null=True,
blank=True,
help_text="The premium to be paid for this cover"
)
cover = models.IntegerField(
default=None,
null=True,
blank=True,
help_text="The amount that this policy seeks to cover"
)
def __str__(self):
return f"{self.pk} {self.customer}"
| duoi/democrance-project | policy/models.py | models.py | py | 1,422 | python | en | code | 0 | github-code | 13 |
37594041421 | def min_max(lista):
prod = 1
min = 1
max = lista[0] * lista[1]
for i in range(len(lista)):
for j in range(i + 1, len(lista)):
prod = lista[i] * lista[j]
if prod > max:
max = prod
elif prod < min:
min = prod
return min, max
if __name__ == "__main__":
lista = [1, 2, 3, 4, 5]
print(min_max(lista))
| HeresG/gabi | ALGORITMI 2 HGI/lab5b.py | lab5b.py | py | 404 | python | en | code | 0 | github-code | 13 |
74908601296 | import requests
import time
# Option 1: Not good, because the parameter is too long
# url = "https://movie.douban.com/j/chart/top_list?type=13&interval_id=100:90&action=&start=0&limit=20"
# headers = {
# "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Mobile Safari/537.36"
# }
# requests.exceptions.JSONDecodeError: [Errno Expecting value] : 0
# this means that the server is not sending a valid JSON response
# response = requests.get(url)
# print(response.json())
# response = requests.get(url, headers=headers)
# print(response.text)
# lis = response.json()
# print(lis)
# Option 2:
for i in range(1):
start = i * 20
url = "https://movie.douban.com/j/chart/top_list"
headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Mobile Safari/537.36"
}
dic = {
"type": "13",
"interval_id": "100:90",
"action": "",
"start": start, # 0, 20, 40, 60, 80
"limit": "20"
}
response = requests.get(url, params=dic, headers=headers)
print(response.json())
time.sleep(1)
| TBSAAA/Web-crawler | 01_data_filter_regular_expression/douban_rank.py | douban_rank.py | py | 1,214 | python | en | code | 0 | github-code | 13 |
4600378853 | #将int数字翻转过来 要注意溢出问题 也可以将int转换为字符串翻转字符串再转换为int数字 当然也要注意溢出问题
class Solution(object):
def reverse(self, x):
type = 0
if(x<0):#python 正负数求余规则不同
type=-1
x=0-x
res = 0
while(x>=10):
res = int(res*10) + int(x%10)
x = int(x/10)
res = int(res*10) + int(x%10)
if(type == -1):
res = 0-res
if(res>2147483648 or res<-2147483648):#手动处理溢出问题
res = 0
return res
if __name__ == "__main__":
res = Solution().reverse(1534236469)
print(res) | FaceWaller/MyLeetCode | 7.Reverse Integer(翻转int).py | 7.Reverse Integer(翻转int).py | py | 586 | python | ja | code | 0 | github-code | 13 |
19241693770 | import os
import copy
import torch
import logging
import itertools
import contextlib
import numpy as np
import seaborn as sns
from PIL import Image
from collections import OrderedDict
from pathlib import Path
from .evaluator import DatasetEvaluator
from trackron.utils import comm
from trackron.config import CfgNode
_PALETTE = (np.array(sns.color_palette(n_colors=256)) * 255).astype('uint8').ravel()
def calc_err_center(pred_bb, anno_bb, normalized=False):
pred_center = pred_bb[:, :2] + 0.5 * (pred_bb[:, 2:] - 1.0)
anno_center = anno_bb[:, :2] + 0.5 * (anno_bb[:, 2:] - 1.0)
if normalized:
pred_center = pred_center / anno_bb[:, 2:]
anno_center = anno_center / anno_bb[:, 2:]
err_center = ((pred_center - anno_center)**2).sum(1).sqrt()
return err_center
def calc_iou_overlap(pred_bb, anno_bb):
tl = torch.max(pred_bb[:, :2], anno_bb[:, :2])
br = torch.min(pred_bb[:, :2] + pred_bb[:, 2:] - 1.0,
anno_bb[:, :2] + anno_bb[:, 2:] - 1.0)
sz = (br - tl + 1.0).clamp(0)
# Area
intersection = sz.prod(dim=1)
union = pred_bb[:, 2:].prod(dim=1) + anno_bb[:, 2:].prod(dim=1) - intersection
return intersection / union
def calc_seq_err_robust(pred_bb, anno_bb, dataset="otb", target_visible=None):
pred_bb = pred_bb.clone()
# Check if invalid values are present
if torch.isnan(pred_bb).any() or (pred_bb[:, 2:] < 0.0).any():
raise Exception('Error: Invalid results')
if torch.isnan(anno_bb).any():
if dataset == 'uav':
pass
else:
raise Exception('Warning: NaNs in annotation')
if (pred_bb[:, 2:] == 0.0).any():
for i in range(1, pred_bb.shape[0]):
if (pred_bb[i, 2:] == 0.0).any() and not torch.isnan(anno_bb[i, :]).any():
pred_bb[i, :] = pred_bb[i - 1, :]
if pred_bb.shape[0] != anno_bb.shape[0]:
if dataset == 'lasot':
if pred_bb.shape[0] > anno_bb.shape[0]:
# For monkey-17, there is a mismatch for some trackers.
pred_bb = pred_bb[:anno_bb.shape[0], :]
else:
raise Exception('Mis-match in tracker prediction and GT lengths')
else:
# print('Warning: Mis-match in tracker prediction and GT lengths')
if pred_bb.shape[0] > anno_bb.shape[0]:
pred_bb = pred_bb[:anno_bb.shape[0], :]
else:
pad = torch.zeros(
(anno_bb.shape[0] - pred_bb.shape[0], 4)).type_as(pred_bb)
pred_bb = torch.cat((pred_bb, pad), dim=0)
pred_bb[0, :] = anno_bb[0, :]
if target_visible is not None:
target_visible = torch.tensor(target_visible, dtype=torch.bool)
valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) & target_visible
else:
valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2)
err_center = calc_err_center(pred_bb, anno_bb)
err_center_normalized = calc_err_center(pred_bb, anno_bb, normalized=True)
err_overlap = calc_iou_overlap(pred_bb, anno_bb)
# handle invalid anno cases
if dataset in ['uav']:
err_center[~valid] = -1.0
else:
err_center[~valid] = float("Inf")
err_center_normalized[~valid] = -1.0
err_overlap[~valid] = -1.0
if dataset == 'lasot':
err_center_normalized[~target_visible] = float("Inf")
err_center[~target_visible] = float("Inf")
if torch.isnan(err_overlap).any():
raise Exception('Nans in calculated overlap')
return err_overlap, err_center, err_center_normalized, valid
def save_tracker_output(seq_name, out_dir: Path, output: dict):
"""Saves the output of the tracker."""
base_results_path = out_dir / seq_name
def save_bb(file, data):
tracked_bb = np.array(data).astype(float)
np.savetxt(file, tracked_bb, delimiter='\t', fmt='%1.2f')
# tracked_bb = np.array(data).astype(int)
# np.savetxt(file, tracked_bb, delimiter='\t', fmt='%d')
def save_time(file, data):
exec_times = np.array(data).astype(float)
np.savetxt(file, exec_times, delimiter='\t', fmt='%f')
def _convert_dict(input_dict):
data_dict = {}
for elem in input_dict:
for k, v in elem.items():
if k in data_dict.keys():
data_dict[k].append(v)
else:
data_dict[k] = [
v,
]
return data_dict
for key, data in output.items():
# If data is empty
if not data:
continue
if key == 'target_bbox':
if isinstance(data[0], (dict, OrderedDict)):
data_dict = _convert_dict(data)
for obj_id, d in data_dict.items():
bbox_file = '{}_{}.txt'.format(base_results_path, obj_id)
save_bb(bbox_file, d)
else:
# Single-object mode
bbox_file = '{}.txt'.format(base_results_path)
save_bb(bbox_file, data)
elif key == 'time':
if isinstance(data[0], dict):
data_dict = _convert_dict(data)
for obj_id, d in data_dict.items():
timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id)
save_time(timings_file, d)
else:
timings_file = '{}_time.txt'.format(base_results_path)
save_time(timings_file, data)
elif key == 'segmentation':
base_results_path.mkdir(exist_ok=True)
for idx, mask in enumerate(output['segmentation']):
png_path = base_results_path / '{:05d}.png'.format(idx)
img = Image.fromarray(mask)
img.putpalette(_PALETTE)
img.save(png_path, format='PNG')
_EVAL_SETS = ['otb', 'lasot']
class SOTEvaluator(DatasetEvaluator):
def __init__(self,
dataset_name,
distributed=True,
output_dir=None,
tasks=None):
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._dataset_name = dataset_name
self._do_evaluation = dataset_name.lower() in _EVAL_SETS
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (tasks.TEST.KEYPOINT_OKS_SIGMAS
if not kpt_oks_sigmas else kpt_oks_sigmas)
self._logger.warn(
"SOT Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead.")
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
prediction = {"sequence": inputs, "visible": inputs.target_visible}
if self._output_dir is not None:
save_tracker_output(inputs.name, self._output_dir, outputs)
# save_pth = self._output_dir / f'{inputs.name}.txt'
# outputs['target_bbox'] = np.loadtxt(self._output_dir/f'{inputs.name}.txt')
if "target_bbox" in outputs:
target_bbox = torch.tensor(outputs["target_bbox"], dtype=torch.float32)
prediction["target_bbox"] = target_bbox
if "proposals" in outputs:
prediction["proposals"] = outputs["proposals"].to(self._cpu_device)
if self._do_evaluation:
gt_boxes = inputs.ground_truth_rect
if isinstance(gt_boxes, (dict, OrderedDict)):
### TODO
gt_boxes = list(gt_boxes.values())
prediction['gt_boxes'] = torch.tensor(gt_boxes, dtype=torch.float32)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self):
if not self._do_evaluation:
return {}
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[SOT evaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
file_path = Path(self._output_dir) / "target_bboxes.pth"
with file_path.open("wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "target_bbox" in predictions[0]:
self._eval_tracking_boxes(predictions)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_tracking_boxes(self, predictions):
tasks = self._tasks or self._tasks_from_predictions(predictions)
threshold_set_overlap = torch.arange(0.0,
1.0 + 0.05,
0.05,
dtype=torch.float32)
threshold_set_center = torch.arange(0, 51, dtype=torch.float32)
threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float32) / 100.0
avg_overlap_all = torch.zeros((len(predictions)), dtype=torch.float32)
ave_success_rate_plot_overlap = torch.zeros(
(len(predictions), threshold_set_overlap.numel()), dtype=torch.float32)
ave_success_rate_plot_center = torch.zeros(
(len(predictions), threshold_set_center.numel()), dtype=torch.float32)
ave_success_rate_plot_center_norm = torch.zeros(
(len(predictions), threshold_set_center.numel()), dtype=torch.float32)
# valid_sequence = torch.ones(len(predictions), dtype=torch.uint8)
pred_boxes = [p['target_bbox'] for p in predictions]
gt_boxes = [p['gt_boxes'] for p in predictions]
visibles = [p.get('visible', None) for p in predictions]
# self._calculate_metrics(pred_boxes, gt_boxes)
for seq_id, (pred_bb, anno_bb, target_visible) in enumerate(
zip(pred_boxes, gt_boxes, visibles)):
# Calculate measures
err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust(
pred_bb, anno_bb, self._dataset_name, target_visible)
avg_overlap_all[seq_id] = err_overlap[valid_frame].mean()
seq_length = anno_bb.shape[0]
if seq_length <= 0:
raise Exception('Seq length zero')
ave_success_rate_plot_overlap[
seq_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(
1, -1)).sum(0).float() / seq_length * 100
ave_success_rate_plot_center[
seq_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(
1, -1)).sum(0).float() / seq_length * 100
ave_success_rate_plot_center_norm[seq_id, :] = (
err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(
1, -1)).sum(0).float() / seq_length * 100
auc_curve = ave_success_rate_plot_overlap.mean(0)
sot_results = {
'AUC': auc_curve.mean(-1).item(),
'OP50': auc_curve[threshold_set_overlap == 0.50].item(),
'OP75': auc_curve[threshold_set_overlap == 0.75].item(),
'Precision': ave_success_rate_plot_center.mean(0)[20].item(),
'NormPrecision': ave_success_rate_plot_center_norm.mean(0)[20].item()
}
self._results['sot'] = sot_results
| Flowerfan/Trackron | trackron/evaluation/sot_evaluation.py | sot_evaluation.py | py | 11,074 | python | en | code | 46 | github-code | 13 |
31769083144 | # -*- coding: utf-8 -*-
"""
Code to standardize dataframe based on groupby columns
Creates a new dataframe with standardized values
Created on 3/30/2021
@author: Giovanni R Budi
"""
import pandas as pd
import numpy as np
def make_columns_float(dataframe, cols):
"""
Change specified columns in dataframe to data type float
Parameters
----------
dataframe : pandas dataframe
initial dataframe
cols : list of column names (strings)
list of columns to change data type into float
"""
for i in cols:
dataframe[i] = dataframe[i].astype('float64')
def get_summary_data(dataframe, groupcolumns, summarycolumns):
"""
Generates a dataframe with summary statistics (mean and standard deviation) of columns based on the grouped columns
Parameters
dataframe : pandas dataframe
intial dataframe
groupcolumns : list of column names (strings)
list of columns to group by
summarycolumns: list of column names (strings)
list of columns to gather summary statistics for
Returns
-------
df_summary : pandas dataframe
dataframe with summary statistics
"""
df_summary = dataframe.groupby(groupcolumns)[summarycolumns].agg(['mean', 'std'])
df_summary.columns = ['_'.join(x) for x in df_summary.columns.ravel()]
df_summary.reset_index(inplace=True)
return df_summary
def standardize_dataframe(dataframe, dropcolumns, standardizecolumns, keep):
"""
Generates standardized dataframe on specified columns
Parameters
----------
dataframe : pandas dataframe
initial dataframe to be standardized
dropcolumns : list of column names (strings)
columns to drop in initial dataframe
standardizecolumns : list of column names (strings)
columns to standardize in initial dataframe
keep: boolean
option to keep original columns for list of standardized columns
Returns
-------
df_standardized : TYPE
DESCRIPTION.
"""
make_columns_float(dataframe, standardizecolumns)
df_standardized = dataframe.copy()
for col in standardizecolumns:
df_mean = dataframe[col].mean()
df_std = dataframe[col].std()
df_standardized[col + "_standardized"] = (df_standardized[col] - df_mean)/df_std
df_standardized.drop(columns = dropcolumns, inplace=True)
if keep == False:
df_standardized.drop(columns = standardizecolumns, inplace=True)
return df_standardized
# Standardized column values in dataframe with group by from specified columns
def standardize_dataframe_by_group(dataframe, groupcolumns, dropcolumns, standardizecolumns, keep):
"""
Generates standardized dataframe based on groupby columns
Parameters
----------
dataframe : pandas dataframe
initial dataframe to be standardized
groupcolumns : list of column names (strings)
list of columns to group by
dropcolumns : list of column names (strings)
columns to drop in initial dataframe
standardizecolumns : list of column names (strings)
columns to standardize in initial dataframe
keep: boolean
option to keep original columns for list of standardized columns
Returns
-------
df_standardized : pandas dataframe
standardized dataframe
"""
make_columns_float(dataframe, standardizecolumns)
df_summary = get_summary_data(dataframe, groupcolumns, standardizecolumns)
df_standardized = pd.merge(dataframe, df_summary, on=groupcolumns, how='left')
for col in standardizecolumns:
df_standardized[col + '_standardized'] = (df_standardized[col] - df_standardized[col + '_mean'])/df_standardized[col + '_std']
df_standardized.drop(columns = [col + '_mean', col + '_std'], inplace=True)
df_standardized.drop(columns = dropcolumns, inplace=True)
if keep == False:
df_standardized.drop(columns = standardizecolumns, inplace=True)
return df_standardized
| giometry/Data-Analysis-Snippets | Standardization/standardize.py | standardize.py | py | 4,018 | python | en | code | 0 | github-code | 13 |
34016459785 | import os
from pendulum import datetime, duration
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from utils.slack_operator import task_fail_slack_alert
DEPLOYMENT_ENVIRONMENT = os.getenv("ENVIRONMENT", "development")
default_args = {
"owner": "airflow",
"description": "Test if the Slack notifier is working",
"depends_on_past": False,
"start_date": datetime(2015, 12, 1, tz="America/Chicago"),
"email_on_failure": False,
"email_on_retry": False,
"retries": 0,
"execution_timeout": duration(minutes=5),
"on_failure_callback": task_fail_slack_alert,
}
def task_fail():
raise Exception("Task failure test successfully triggered")
with DAG(
dag_id=f"test_slack_notifier_{DEPLOYMENT_ENVIRONMENT}",
default_args=default_args,
schedule_interval=None,
tags=["slack"],
catchup=False,
) as dag:
t1 = PythonOperator(
task_id="task_fail",
python_callable=task_fail,
)
t1
| cityofaustin/atd-airflow | dags/test_slack_notifier.py | test_slack_notifier.py | py | 1,002 | python | en | code | 2 | github-code | 13 |
21539403109 | import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import sys, os
import datetime
import cPickle as pickle
from collections import OrderedDict
from itertools import izip
import os, sys
import logging
reload(logging)
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from neural_network_weights import NeuralNetworkWeights
class RNN(NeuralNetworkWeights):
def __init__(self):
logger.info("Using RNN...")
def load_parameters(self, params, word_embeddings):
""" Directly load given parameters into the network.
"""
self.word_embeddings = theano.shared(value = word_embeddings,
name = 'word_embeddings',
borrow = True)
# load (aka. deep copy) parameters in params into network
c=0
self.params = []
names = ['W', 'W_in', 'bh']
for n,p in zip(names, params):
self.params.append(theano.shared(name = p.name,
value = p.get_value(borrow=True)))
setattr(self, n, self.params[c])
#logger.info("self.%s = %s (type %s)" % (n, str(self.params[c]), str(type(self.params[c]))))
c+=1
assert( len(self.params) == c )
def init_parameters(self,
n_in, # word embeddings dimension
n_hidden, # multimodal embeddings dimension
vocabulary_size,
word_embeddings = None):
""" Initialise network parameters with default values/distributions
(using sizes provided as parameters' shapes).
"""
# word embeddings
if word_embeddings is None:
word_embeddings = self.norm_weight(vocabulary_size, n_in)
self.word_embeddings = theano.shared(value = word_embeddings,
name = 'word_embeddings',
borrow = True)
# recurrent weights as a shared variable
W_init = self.norm_weight(n_hidden)
self.W = theano.shared(value=W_init, name='W', borrow=True)
# input to hidden layer weights
W_in_init = self.norm_weight(n_in, n_hidden)
self.W_in = theano.shared(value=W_in_init, name='W_in', borrow=True)
## hidden to output layer weights
#W_out_init = self.norm_weight(n_hidden, n_out)
#self.W_out = theano.shared(value=W_out_init, name='W_out', borrow=True)
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh', borrow=True)
#by_init = np.zeros((n_out,), dtype=theano.config.floatX)
#self.by = theano.shared(value=by_init, name='by', borrow=True)
self.params = [self.W, self.W_in, self.bh]
#self.params = [self.W, self.W_in, self.W_out, self.bh, self.by]
def create(self,
minibatch_sentences, # (n_timesteps x n_examples x word embeddings dimension)
minibatch_mask = None, # masks for minibatch_sentences
activation=T.nnet.sigmoid):
assert(not self.params is None and not len(self.params) == 0)
# minibatch_sentences is 3D tensor
# (n_words_in_input_sentences x n_sentences_in_minibatch x word_embeddings_dimensionality)
n_timesteps = minibatch_sentences.shape[0]
n_examples = minibatch_sentences.shape[1]
n_in = self.word_embeddings.shape[1]
n_hidden = self.W.shape[0]
#self.input = self.word_embeddings[minibatch_sentences.flatten()]
input = self.word_embeddings[minibatch_sentences]
input.reshape([n_timesteps, n_examples, n_in])
if minibatch_mask == None:
minibatch_mask = T.alloc(1., minibatch_sentences.shape[0], 1)
#minibatch_mask = np.ones((n_timesteps, n_examples, 1))
mask = minibatch_mask.reshape([n_timesteps, n_examples, 1])
self.activation = activation
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = OrderedDict()
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using sigmoid activation function)
# and linear output activation function (currently unused)
def step(x_t, mask, h_tm1):
h_t = self.activation( T.dot(x_t, self.W_in) + T.dot(h_tm1, self.W) + self.bh )
#y_t = T.dot(h_t, self.W_out) + self.by
#return [h_t, y_t]
return h_t
h0 = T.unbroadcast(T.alloc(0., n_examples, n_hidden), 0)
# mapping from word embeddings layer into first hidden layer
#projected_input = T.dot(self.input, self.W_first) + self.b_first
#projected_input = self.input
# the hidden state `h` for the entire sequences, and the output for the
# entire sequences `y_pred` (first dimension is always time)
#[h, y_pred], _ = theano.scan(step,
h, updates = theano.scan(step,
sequences=[input, mask],
outputs_info=[h0],
n_steps=input.shape[0])
self.last_h = h[-1]
self.last_h.name = 'last_h'
# create a predict function
self._predict = theano.function([minibatch_sentences, minibatch_mask],
self.last_h)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
#self.L1 += abs(self.W_out.sum())
self.L1.name = 'L1_regulariser'
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
#self.L2_sqr += (self.W_out ** 2).sum()
self.L2_sqr.name = 'L2_regulariser'
self.loss = lambda h: self.mse_h(h)
#self.loss = lambda y: self.mse(y)
def mse_h(self, h):
# error between output and hidden memory final state
return T.mean((self.last_h - h) ** 2)
def predict(self, X):
return self._predict(X, np.ones_like(X, dtype=theano.config.floatX)) | iacercalixto/mme-positive-examples-mse | RNN_sentence_embedder_mse.py | RNN_sentence_embedder_mse.py | py | 6,973 | python | en | code | 2 | github-code | 13 |
10311843530 | from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
app = Flask(__name__)
line_bot_api = LineBotApi('73Mu8Bojy7PwkWxy+bV0eFVUVasQzliOpdStK1TK4j3Ed39P3U9HFT5cvlZyiqDi66k84dv/AE4eoIN3iuyuUVYevWRh1IlRg0FJ4bC6I2ae/UrM2l7aOfhSJENxiHX0gkVPHSRo/SrqyO2krMKwEgdB04t89/1O/w1cDnyilFU=')
handler = WebhookHandler('44b266ad1513f57b4ef8d44a82884c1e')
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text))
if __name__ == "__main__":
app.run() | sing0510/line-bot | app.py | app.py | py | 1,259 | python | en | code | 0 | github-code | 13 |
27194522756 | import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
from model_components import Block
class GPTLanguageModel(nn.Module):
"""
Implements a GPT language model.
This model is based on the transformer architecture, specifically designed for generative pre-training
of language models. It consists of token and position embedding layers, followed by a sequence of transformer
blocks, and a final layer to generate predictions for the next token in the sequence.
Attributes:
token_embedding_table (nn.Embedding): Embedding layer for tokens.
position_embedding_table (nn.Embedding): Embedding layer for token positions.
blocks (nn.Sequential): Sequential container of transformer blocks.
ln_f (nn.LayerNorm): Final layer normalization.
lm_head (nn.Linear): Linear layer to map the output to the vocabulary size.
"""
def __init__(self, vocab_size, n_embd, n_head, n_layer, block_size, dropout):
"""
Initializes the GPTLanguageModel instance.
Args:
vocab_size (int): Size of the vocabulary.
n_embd (int): The size of each embedding vector.
n_head (int): The number of attention heads in each transformer block.
n_layer (int): The number of transformer blocks in the model.
block_size (int): Size of the sequence block considered in attention.
dropout (float): Dropout rate for regularization in the network.
"""
super().__init__()
self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
self.position_embedding_table = nn.Embedding(block_size, n_embd)
self.blocks = nn.Sequential(*[Block(n_embd, n_head, block_size, dropout) for _ in range(n_layer)])
self.ln_f = nn.LayerNorm(n_embd) # Final layer normalization
self.lm_head = nn.Linear(n_embd, vocab_size)
self.apply(self._init_weights)
def _init_weights(self, module):
"""
Initializes weights of the model's layers.
This method is applied to each module in the model. It initializes the weights of linear and embedding
layers following a normal distribution, which is a common practice in training deep learning models.
Args:
module (nn.Module): A module in the model.
"""
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, index, targets=None, device='cpu'):
"""
Forward pass of the GPTLanguageModel.
Processes an input sequence (index) and computes the logits for each token in the sequence.
If targets are provided, it also computes the loss, which can be used for training.
Args:
index (torch.Tensor): A tensor of token indices with shape (batch_size, sequence_length).
targets (torch.Tensor, optional): A tensor of target token indices with the same shape as 'index'.
device (str, optional): The device ('cpu' or 'cuda') to perform computations on.
Returns:
Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing logits and, if targets are provided, the loss.
"""
B, T = index.shape
tok_emb = self.token_embedding_table(index) # Token embeddings (B, T, C)
pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # Positional embeddings (T, C)
x = tok_emb + pos_emb # Combine token and position embeddings (B, T, C)
x = self.blocks(x) # Pass through transformer blocks (B, T, C)
x = self.ln_f(x) # Apply final layer normalization (B, T, C)
logits = self.lm_head(x) # Project to vocabulary size (B, T, vocab_size)
if targets is None:
loss = None
else:
B, T, C = logits.shape
logits = logits.view(B * T, C)
targets = targets.view(B * T)
loss = F.cross_entropy(logits, targets)
return logits, loss
def generate(self, index, max_new_tokens, device='cpu'):
"""
Generates new tokens given a context (index).
This function autoregressively generates new tokens based on the provided context.
It predicts the next token, appends it to the context, and repeats this process.
Args:
index (torch.Tensor): A tensor of token indices with shape (batch_size, current_sequence_length).
max_new_tokens (int): The maximum number of new tokens to generate.
device (str, optional): The device ('cpu' or 'cuda') to perform computations on.
Returns:
torch.Tensor: The tensor containing the original and newly generated token indices.
"""
max_seq_length = 64 # Assuming this is your model's maximum sequence length
for _ in range(max_new_tokens):
if index.size(1) >= max_seq_length:
index = index[:, -max_seq_length + 1:] # Keep the most recent tokens
logits, _ = self.forward(index, device=device) # Predict next token
logits = logits[:, -1, :] # Focus on the last time step
probs = F.softmax(logits, dim=-1) # Softmax to get probabilities
index_next = torch.multinomial(probs, num_samples=1) # Sample next token
index = torch.cat((index, index_next), dim=1) # Append to the sequence
return index
| ahmedmshazly/gpt_class_activity | new/gpt_model.py | gpt_model.py | py | 5,649 | python | en | code | 0 | github-code | 13 |
19065316041 | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# https://github.com/tpaviot/pythonocc-demos/blob/master/examples/core_classic_occ_bottle.py
import os
from OCC.gp import gp_Pln, gp_Dir, gp_Pnt, gp_OY, gp_Trsf
from OCC.STEPControl import STEPControl_Reader
from OCC.TopAbs import TopAbs_FACE
from OCC.TopExp import TopExp_Explorer
from OCC.BRepAlgoAPI import BRepAlgoAPI_Section
from OCC.BRepBuilderAPI import BRepBuilderAPI_MakeFace, BRepBuilderAPI_Transform
from occlib.Topology import Topo
from occlib.EdgeParse import EdgeOnSurface
from occlib.BoundingBox import get_boundingbox
from occlib.DXFwriter import write
from occlib.Scene import Arc3D
if __name__ == "__main__":
objects = set()
# Read the file and get the shape
reader = STEPControl_Reader()
tr = reader.WS().GetObject().TransferReader().GetObject()
reader.ReadFile(os.path.abspath(os.path.join('.', 'models', 'TPI_PH_CNF95XX.STEP')))
reader.TransferRoots()
shape = reader.OneShape()
# Get bounding box
xmin, ymin, zmin, xmax, ymax, zmax = get_boundingbox(shape)
# Build section plane
XYZ = (1, 1, 0)
lim_coord1 = (xmin, xmax)
lim_coord2 = (ymin, ymax)
section_height = zmax-0.18
# A horizontal plane is created from which a face is constructed to intersect with
# the building. The face is transparently displayed along with the building.
section_plane = gp_Pln(
gp_Pnt(0, 0, section_height),
gp_Dir(0, 0, 1)
)
section_face = BRepBuilderAPI_MakeFace(section_plane, xmin, xmax, ymin, ymax).Face()
# Quick way to specify the Y axis
xAxis = gp_OY()
# Set up the mirror
aTrsf = gp_Trsf()
aTrsf.SetMirror(xAxis)
# Explore the faces of the shape (these are known to be named)
exp = TopExp_Explorer(shape, TopAbs_FACE)
while exp.More():
s = exp.Current()
tp = Topo(s)
for face in tp.faces():
section = BRepAlgoAPI_Section(section_face, face).Shape()
# Apply the mirror transformation
aBRespTrsf = BRepBuilderAPI_Transform(section, aTrsf)
# Get the mirrored shape back out of the transformation and convert back to a wire
aMirroredShape = aBRespTrsf.Shape()
section_edges = list(Topo(aMirroredShape).edges())
for edge in section_edges:
obj = EdgeOnSurface(edge, section_plane, lim_coord1, lim_coord2, XYZ)
if type(obj) == Arc3D:
obj.t2, obj.t1 = obj.t1, obj.t2
objects.add(obj)
exp.Next()
path = "cross_section2.dxf"
write(objects, XYZ, path) | KodeWorker/3DModelAnalysis | dev/20190808/dev_cross_section2_write_dxf.py | dev_cross_section2_write_dxf.py | py | 2,884 | python | en | code | 0 | github-code | 13 |
38267959054 | """The image on the webpage is an anchor to another webpage with a similar
url. Appended to the end of the url however is a web-query entitled "nothing"
with a value of 44827. The content of the new page is "and the next nothing is",
followed by a number. This hints that we should alter the web-query by changing
the value of the webquery to the number found on the content of this page.
Another hint is provided in the source code of the original page informing us
that we need not go past 400 new pages. The pattern described above will repeat
until a special webpage is found containing the name of the next page."""
import sys
import requests
import webbrowser
from bs4 import BeautifulSoup
webpage = "http://www.pythonchallenge.com/pc/def/linkedlist.php"
r = requests.get(webpage)
soup = BeautifulSoup(r.content, "html.parser")
next_page = "http://www.pythonchallenge.com/pc/def/" + soup.find("a")["href"]
r = requests.get(next_page)
soup = BeautifulSoup(r.content, "html.parser")
num_scraped = 1
while True:
sys.stdout.write(f"\rOn page {num_scraped}")
sys.stdout.flush()
# If an html file name is present in the webpage, then the final page
# has been found.
if ".html" in soup.text:
break
# One page informs us to divide the previous number by two. When this
# page is found, do so and continue as normal.
if "Divide by two" in soup.text:
num /= 2
# The number at the end of each page's content is the value for the next
# web query; find it and use it to find the next webpage.
else:
num = int(soup.text.split()[-1])
next_page = "http://www.pythonchallenge.com/pc/def/linkedlist.php" \
f"?nothing={num}"
r = requests.get(next_page)
soup = BeautifulSoup(r.content, "html.parser")
num_scraped += 1
split_page = webpage.split("linkedlist.php")
new_page = f"{split_page[0]}{soup.text}"
webbrowser.open(new_page)
| cjonsmith/python-challenge | problem_04.py | problem_04.py | py | 1,923 | python | en | code | 0 | github-code | 13 |
74560006416 | # Hi 0191121332, please visit http://202.207.12.156:9014/context/3ff280105813f582c7c38dabedd901bc fill text
import requests
import json
import numpy as np
url ="http://202.207.12.156:9014/step_06"
r = requests.get(url)
q = r.text
q = json.loads(q)
# q = eval(q)
# print(q)
# print((type(q)))
n = q["questions"]
print(n)
cb = np.full((15,15),".")
a=0
b=0
nn=0
ss = ''
cbs=''
for i in range(0,len(n),2):
a = ord(n[i])-96
b = ord(n[i+1])-96
print(n[i],ord(n[i]),a)
if nn%2==0:
cb[a-1][b-1]='x'
else:
cb[a-1][b-1]='o'
nn+=1
if i != 0:
cbs += ','
for i in range(0,15):
for j in range(0,15):
cbs += cb[i][j]
params ={
"ans":cbs
}
url="http://202.207.12.156:9014/step_06"
r =requests.get(url,params=params)
print(r.url)
print(r.text)
| GritYolo/AI_Summer | 6.py | 6.py | py | 818 | python | en | code | 0 | github-code | 13 |
72060646417 | #!/usr/bin/env python
# coding=utf-8
"""
Script with functions to dimension local heating and decentralized electrical
networks.
Currently, no support for separate heating_and_deg network dimensioning
(first lhn, then deg dimensioning; plus overlapping),
if street routing is used!
If you want to have a heating_and_deg network via street routing, use
add_lhn_to_city with street routing and heating_and_deg as network type.
"""
import os
import math
import pickle
import pycity_base.functions.process_city as prcity
import pycity_calc.visualization.city_visual as cityvis
import pycity_calc.toolbox.dimensioning.dim_functions as dimfunc
import pycity_calc.toolbox.networks.network_ops as netop
def estimate_u_value(d_i):
"""
Estimate U-value (in W/mK) depending on inner pipe diameter d_i.
Estimation based on values by: U-values: C. Beier, S. Bargel,
C. Doetsch, LowEx in der Nah- und Fernwaerme. Abschlussbericht, 2010.
Parameters
----------
d_i : float
Inner diameter of pipe in meters
Returns
-------
u_pipe : float
U-value of pipe in W/mK
"""
u_pipe = 0.9264 * d_i ** 0.501
return u_pipe
def calc_pipe_power_loss(length, u_pipe, temp_vl, temp_rl, temp_environment):
"""
Calculate thermal loss power of heating pipe in Watt
Parameters
----------
length : float
Total length of lhn grid in m
u_pipe : float
U-value of pipe in W/m
temp_vl : float
Inlet temperature of LHN in degree Celsius
temp_rl : float
Flowback temperature of LHN in degree Celsius
temp_environment : float
Environmental temperature in degree Celsius
Returns
-------
q_dot_loss : float
Thermal power loss of pipelines in Watt
"""
# Estimation of max lhn heat loss value in W
q_dot_loss = u_pipe * length * (temp_vl + temp_rl - 2 * temp_environment)
return q_dot_loss
def calc_diameter_of_lhn_network(max_th_power, length, temp_vl, temp_rl,
temp_environment, c_p=4190, rho=1000, v_max=2,
round_up=True):
"""
Iterative function to estimate necessary inner pipe diameter of lhn pipes
within network.
Parameters
----------
max_th_power : float
Maximal thermal power in W (maximal power taken by final user from lhn
grid)
length : float
Total length of lhn grid in m
temp_vl : float
Inlet temperature of LHN in degree Celsius
temp_rl : float
Flowback temperature of LHN in degree Celsius
temp_environment : float
Environmental temperature in degree Celsius
c_p : float, optional
Specific heat capacity of medium in J / (kg*K)
(default: 4190 for water)
rho : float, optional
Density of medium in kg/m^3 (default: 1000 for water)
v_max : float, optional
Maximal allowed velocity within lhn system (in m/s)
(default: 2)
round_up : bool, optional
Round up to next full cm value
(default: True)
False - Do not round up
Returns
-------
d_i : float
Inner pipe diameter for system dimensioning in meters
"""
# Assert functions
assert temp_vl > temp_rl
assert_list = [max_th_power, c_p, rho, length]
for i in assert_list:
assert i > 0, ('Input parameters of calc_diameter_of_lhn_network' +
' [max_th_power, c_p, rho, length] must be larger' +
' than zero!')
# Iterative function to estimate inner diameter, depending on required
# thermal power (user + pipe losses)
# Start value for mass_flow
m_point = max_th_power * 1.3 / ((temp_vl - temp_rl) * c_p)
# 1.03 is used to account for lhn heating losses
delta_e = 100 # Distance value in %
# Iterate while distance value is larger than 0.1 %
while delta_e >= 0.001:
m_point_1 = m_point
# Calculation of inner diameter (in m)
d_i = round(2 * math.sqrt(m_point_1 / (math.pi * v_max * rho)), 5)
# Estimate u-value of pipe
u_pipe = estimate_u_value(d_i)
# Estimation of max lhn heat loss value in W
q_dot_loss = calc_pipe_power_loss(length=length, u_pipe=u_pipe,
temp_vl=temp_vl, temp_rl=temp_rl,
temp_environment=temp_environment)
m_point = (max_th_power + q_dot_loss) / ((temp_vl - temp_rl) * c_p)
# Distance value between actual massflow and massflow
# (one timestep earlier)
delta_e = (abs(m_point_1 - m_point)) / m_point_1
# Round up inner diameter value to
if round_up:
d_i = math.ceil(d_i * 100) / 100
return d_i
def add_lhn_to_city(city, list_build_node_nb=None, temp_vl=90,
temp_rl=50, c_p=4186, rho=1000,
use_street_network=False, network_type='heating',
plot_stepwise=False):
"""
Function adds local heating network (LHN) to city district.
LHN can either be installed along minimum spanning tree
(use_street_network = False)
or along street network (use_street_network = True).
Raise assertion error if one node within list_build_node_nb does not have
a building entity or if one node is already connected to lhn and/or deg.
Parameters
----------
city : object
City object of pycity_calc
list_build_node_nb : list, optional
List of building nodes, which should be connected to LHN network
(default: None). If set to None, connects all buildings to LHN.
temp_vl : float, optional
Inlet flow temperature in degree Celsius
(default: 90)
temp_rl : float, optional
Return flow temperature in degree Celsius
(default: 50)
c_p : float, optional
Specific heat capacity of medium within lhn system in J/kgK
(default: 4186 - for water)
rho : float, optional
Density of medium within lhn system in kg/m^3
(default: 1000 - for water)
use_street_network : bool, optional
Defines if street network should be used to generate lhn system
(default: False)
False - Use minimum spanning tree to generate lhn system
True - Only allow routing along street network
If no street network exists within city object, minimium spanning tree
is used
network_type : str, optional
Desired network (Default: 'heating')
Options: 'heating' or 'heating_and_deg' (deg: decentralized, el. grid)
plot_stepwise : bool, optional
Plot stepwise graph search and lhn generation (default: False)
Returns
-------
res_tuple : tuple (of floats)
Results tuple of kind (d_i, length)
d_i : float
Inner diameter of pipe in meters
length : float
Total network length in meters
"""
# Assert functions
assert temp_vl > temp_rl
assert c_p > 0, 'c_p must be larger than zero!'
assert rho > 0, 'rho must be larger than zero!'
assert network_type in ['heating', 'heating_and_deg']
assert list_build_node_nb != []
if list_build_node_nb is None:
# get list of all building entities
list_build_node_nb = city.get_list_build_entity_node_ids()
else:
# Check if all node ids within list_build_node_nb belong to buildings
for n in list_build_node_nb:
assert n in city.get_list_build_entity_node_ids(), \
('Node ' + str(n) + ' does not have a building entity.')
# Check if one building is already connected to lhn
# If existing heating connection is found, ValueError is raised
for u in list_build_node_nb:
for v in city.nodes():
if city.has_edge(u, v):
if 'network_type' in city.edges[u, v]:
if (city.edges[u, v]['network_type'] == 'heating' or
city.edges[u, v][
'network_type'] == 'heating_and_deg'):
print('u', u)
print('v', v)
raise ValueError('Building within building list ' +
'already holds lhn network!')
print('Start process to add LHN to city\n')
# # Start with lhn processing
# #------------------------------------------------------------------
# Use street networks
# #------------------------------------------------------------------
if use_street_network: # Route along street networks
# Get minimum network spanning tree, based on street network
(min_span_graph, list_new_nodes) = \
netop.gen_min_span_tree_along_street(city=city,
nodelist=list_build_node_nb,
plot_graphs=plot_stepwise)
# Use building minimum spanning tree
# #------------------------------------------------------------------
else: # Use minimum spanning tree between building nodes
# Generate subgraph with building of list, exclusively
subcity = prcity.get_subcity(city=city, nodelist=list_build_node_nb)
print('Subcity node ids:')
print(subcity.nodes(data=False))
print()
print('Calculate minimum spanning tree.')
# Generate minimum spanning tree (with copy of subcity)
min_span_graph = \
netop.get_min_span_tree_for_x_y_positions(city=subcity,
nodelist=
list_build_node_nb)
print('Minimum spanning tree edges:')
print(min_span_graph.edges(data=False))
print()
# Sum up weight to total length of network
length = netop.sum_up_weights_of_edges(min_span_graph)
print('Total network length in m:', math.ceil(length))
print()
# Extract ground temperature of environment
temp_ground = city.environment.temp_ground
# Get max thermal power of all buildings within list
max_th_power = dimfunc.get_max_p_of_city(city_object=city,
get_thermal=True,
with_dhw=False,
nodelist=list_build_node_nb)
print('Max. thermal power in kW:', round(max_th_power / 1000, 1))
print()
d_i = calc_diameter_of_lhn_network(max_th_power=max_th_power,
temp_vl=temp_vl,
temp_rl=temp_rl,
temp_environment=temp_ground,
c_p=c_p, rho=rho,
length=length,
round_up=True)
print('Chosen inner diameter of LHN pipes in m:', d_i)
print()
# Use street networks
# #------------------------------------------------------------------
if use_street_network:
# create a list which saves information about created LHN nodes
# hold created LHN nodes and the min_span_tree_node from which
# it was created. This prevents multiple LHN node creation
list_lhn_node=[[],[]]
# Loop over all edges of minimum spanning graph
for u, v in min_span_graph.edges():
# check if u and v are buildingnodes or if they have already been used to create an LHN node
if u not in list_build_node_nb:
#u is not a buildingnode
if u not in list_lhn_node[0]:
# u was not set already as a LHN node
# Get current position
pos_curr = min_span_graph.nodes[u]['position']
# Generate new id
id1 = city.new_node_number()
while id1 in city.nodes():
id1 += 1
list_lhn_node[0].append(u) # save the min_span_tree_node
list_lhn_node[1].append(id1) # save the new_lhn_node
# Add new network node to city
city.add_node(id1, position=pos_curr,
node_type=network_type)
else:
# u was set already as a LHN node
# look up which id the LHN node has
for i in range(len(list_lhn_node[0])):
if list_lhn_node[0][i] == u:
index = i
id1 = list_lhn_node[1][index]
else:
# u is a buildingnode
id1=u
if v not in list_build_node_nb:
# v is not a buildingnode
if v not in list_lhn_node[0]:
# v was not set already as a LHN node
# Get current position
pos_curr = min_span_graph.nodes[v]['position']
# Generate new id
id2 = city.new_node_number()
while id2 in city.nodes():
id2 += 1
list_lhn_node[0].append(v) # save the min_span_tree_node
list_lhn_node[1].append(id2) # save the new_lhn_node
# Add new network node to city
city.add_node(id2, position=pos_curr,
node_type=network_type)
else:
# v was set already as a LHN node
# look up which id the LHN node has
for i in range(len(list_lhn_node[0])):
if list_lhn_node[0][i] == v:
index = i
id2 = list_lhn_node[1][index]
else:
# v is a buildingnode
id2 = v
city.add_edge(id1, id2, network_type=network_type,
temp_vl=temp_vl,
temp_rl=temp_rl, c_p=c_p, rho=rho, d_i=d_i)
# Use building minimum spanning tree
# #------------------------------------------------------------------
else: # Use minimum spanning tree between building nodes
# Loop over minium spanning tree edges and add lhn to city
for u, v, data in min_span_graph.edges(data=True):
set_heat_deg = False
# If deg network already exists, replace it with heating_and_deg
if city.has_edge(u, v):
if 'network_type' in city.edges[u, v]:
if city.edges[u, v]['network_type'] == 'electricity':
print('Found existing el. network between node ' +
str(u) + ' and node ' + str(v) + '. Going '
'to replace is with type heating_and_deg.')
# Add heating_and_deg edge to city
city.add_edge(u, v, network_type='heating_and_deg',
temp_vl=temp_vl,
temp_rl=temp_rl, c_p=c_p, rho=rho,
d_i=d_i)
set_heat_deg = True
# If there has not been a deg connection, add regular network edge
if set_heat_deg is False:
# Add network edge to city
city.add_edge(u, v, network_type=network_type, temp_vl=temp_vl,
temp_rl=temp_rl, c_p=c_p, rho=rho, d_i=d_i)
return (d_i, length)
def add_deg_to_city(city, list_build_node_nb=None, use_street_network=False):
"""
Function adds decentralized electrical grig to city district.
DEG can either be installed along minimum spanning tree
(use_street_network = False)
or along street network (use_street_network = True).
Raise assertion error if one node within list_build_node_nb does not have
a building entity or if one node is already connected to deg and/or deg.
Parameters
----------
city : object
City object of pycity_calc
list_build_node_nb : list, optional
List of building nodes, which should be connected to DEG network.
(default: None). If None is set, connects all buildings within city.
use_street_network : bool, optional
Defines if street network should be used to generate deg system
(default: False)
False - Use minimum spanning tree to generate deg system
True - Only allow routing along street network
If no street network exists within city object, minimium spanning tree
is used
"""
assert list_build_node_nb != []
if list_build_node_nb is None:
# get list of all building entities
list_build_node_nb = city.get_list_build_entity_node_ids()
else:
# Check if all node ids within list_build_node_nb belong to buildings
for n in list_build_node_nb:
assert n in city.get_list_build_entity_node_ids(), \
('Node ' + str(n) + ' does not have a building entity.')
# Check if all node ids within list_build_node_nb belong to buildings
for n in list_build_node_nb:
assert n in city.get_list_build_entity_node_ids(), ('Node ' + str(n) +
' does not have' +
' a building ' +
'entity.')
print('Start process to add DEG to city\n')
# Use street networks
# #------------------------------------------------------------------
if use_street_network: # Route along street networks
# Get minimum network spanning tree, based on street network
(min_span_graph, list_new_nodes) = \
netop.gen_min_span_tree_along_street(city=city,
nodelist=list_build_node_nb,
plot_graphs=False)
# Use building minimum spanning tree
# #------------------------------------------------------------------
else: # Use minimum spanning tree
# Generate subgraph with building of list, exclusively
subcity = prcity.get_subcity(city=city, nodelist=list_build_node_nb)
print('Subcity node ids:')
print(subcity.nodes(data=False))
print()
print('Calculate minimum spanning tree.')
# Generate minimum spanning tree (with copy of subcity)
min_span_graph = \
netop.get_min_span_tree_for_x_y_positions(city=subcity,
nodelist=
list_build_node_nb)
print('Minimum spanning tree edges:')
print(min_span_graph.edges(data=False))
print()
# Sum up weight to total length of network
length = netop.sum_up_weights_of_edges(min_span_graph)
print('Total network length in m:', math.ceil(length))
print()
# Use street networks
# #------------------------------------------------------------------
if use_street_network:
# create a list which saves information about created DEG nodes
# hold created DEG nodes and the min_span_tree_node from which
# it was created. This prevents multiple DEG node creation
list_deg_node = [[], []]
# Loop over all edges of minimum spanning graph
for u, v in min_span_graph.edges():
# check if u and v are buildingnodes or if they have already been used to create an deg node
if u not in list_build_node_nb:
# u is not a buildingnode
if u not in list_deg_node[0]:
# u was not set already as a deg node
# Get current position
pos_curr = min_span_graph.nodes[u]['position']
# Generate new id
id1 = city.new_node_number()
while id1 in city.nodes():
id1 += 1
list_deg_node[0].append(u) # save the min_span_tree_node
list_deg_node[1].append(id1) # save the new_deg_node
# Add new network node to city
city.add_node(id1, position=pos_curr,
node_type='electricity')
else:
# u was set already as a deg node
# look up which id the deg node has
for i in range(len(list_deg_node[0])):
if list_deg_node[0][i] == u:
index = i
id1 = list_deg_node[1][index]
else:
# u is a buildingnode
id1 = u
if v not in list_build_node_nb:
# v is not a buildingnode
if v not in list_deg_node[0]:
# v was not set already as a deg node
# Get current position
pos_curr = min_span_graph.nodes[v]['position']
# Generate new id
id2 = city.new_node_number()
while id2 in city.nodes():
id2 += 1
list_deg_node[0].append(v) # save the min_span_tree_node
list_deg_node[1].append(id2) # save the new_deg_node
# Add new network node to city
city.add_node(id2, position=pos_curr,
node_type='electricity')
else:
# v was set already as a deg node
# look up which id the deg node has
for i in range(len(list_deg_node[0])):
if list_deg_node[0][i] == v:
index = i
id2 = list_deg_node[1][index]
else:
# v is a buildingnode
id2 = v
city.add_edge(id1, id2, network_type='electricity')
# Use building minimum spanning tree
# #------------------------------------------------------------------
else:
# Loop over minium spanning tree edges and add lhn to city
for u, v in min_span_graph.edges():
found_network = False
if city.has_edge(u, v):
if 'network_type' in city.edges[u, v]:
if city.edges[u, v]['network_type'] == 'heating':
print('Found existing heating network between node ' +
str(u) + ' and node ' + str(v) + '. Going '
'to replace is with type heating_and_deg.')
# Add heating_and_deg edge to city
city.add_edge(u, v, network_type='heating_and_deg')
found_network = True
elif city.edges[u, v]['network_type'] == 'heating_and_deg':
print(
'Found existing heating_and_deg network between node'
+ str(u) + ' and node ' + str(v) + '. Do nothing.')
found_network = True
if found_network is False:
# Add lhn edge to city
city.add_edge(u, v, network_type='electricity')
# TODO: Add function to erase complete network
if __name__ == '__main__':
# Path to pickle city file
city_filename = 'city_clust_simple.pkl'
this_path = os.path.dirname(os.path.abspath(__file__))
pycity_calc_path = os.path.dirname(os.path.dirname(this_path))
load_path = os.path.join(pycity_calc_path, 'toolbox', 'analyze',
'input', city_filename)
use_street_network = True
# Load pickle city file
city = pickle.load(open(load_path, mode='rb'))
# Extract list of all building nodes (should be connected to lhn)
nodelist = city.nodelist_building
# Add heating network to city district
add_lhn_to_city(city, list_build_node_nb=nodelist, temp_vl=90,
temp_rl=50, c_p=4186, rho=1000,
use_street_network=use_street_network,
network_type='heating',
plot_stepwise=False)
# Get infos about city graph
print('City edge info:')
print(city.edges(data=True))
print('Edges without data:')
print(city.edges(data=False))
# Plot city district
cityvis.plot_city_district(city=city, plot_lhn=True, plot_deg=True)
# Add deg to city (on existing heating network)
# Results in heating_and_deg edge
add_deg_to_city(city=city, list_build_node_nb=[1001, 1002],
use_street_network=use_street_network)
# Get infos about city graph
print('City edge info:')
print(city.edges(data=True))
print('Edges without data:')
print(city.edges(data=False))
list_lhn = \
netop.get_list_with_energy_net_con_node_ids(city=city,
network_type='heating',
build_node_only=False)
print()
print('LHN list: ', list_lhn)
print('Length lhn list: ', len(list_lhn[0]))
list_lhn = \
netop.get_list_with_energy_net_con_node_ids(city=city,
network_type='heating',
build_node_only=True)
print()
print('LHN list (building nodes, only): ', list_lhn)
# Plot city district
cityvis.plot_city_district(city=city, plot_lhn=True, plot_deg=True,
plot_build_labels=True, plot_heat_labels=True)
# # Plot multi city district
# cityvis.plot_multi_city_district(city=city, main_save_path=this_path,
# equal_axis=False, fig_adjust='a4_half',
# dpi=300)
list_heat_nodes = []
for n in city.nodes():
if 'node_type' in city.nodes[n]:
if (city.nodes[n]['node_type'] == 'heating' or
city.nodes[n]['node_type'] == 'heating_and_deg'):
list_heat_nodes.append(n)
print()
print('List heating network nodes: ', list_heat_nodes)
print('Number of heating nodes: ', len(list_heat_nodes))
| RWTH-EBC/pyCity_calc | pycity_calc/toolbox/dimensioning/dim_networks.py | dim_networks.py | py | 26,636 | python | en | code | 7 | github-code | 13 |
74008741458 | import os
import re
ids = set()
# Do with /bioSamples/list_biosamples.txt if for all data
# Do with /bioSamples/list_randomInit_biosamples.txt if for labeled data
filePath = "/bioSamples/list_biosamples.txt"
with open(filePath, "r") as readFile:
for line in readFile:
line = line.rstrip()
ids.add(line)
print(len(ids))
alreadyGot = set()
with open("/bioSamples/list_randomInit_biosamples.txt", "r") as labeledFile:
for line in labeledFile:
line = line.rstrip()
alreadyGot.add(line)
for current_file in os.listdir('/bioSamples/allJsons'):
Idnumber = current_file.split("/")[-1]
Idnumber = re.sub(".json", "", Idnumber)
alreadyGot.add(Idnumber)
ids = ids - alreadyGot
print(len(ids))
with open("/bioSamples/keepLoading.txt", "w") as writeFile:
for id in ids:
writeFile.write(id + "\n")
| toolzakinbo/racegeo | scripts/download.py | download.py | py | 859 | python | en | code | 0 | github-code | 13 |
21264388266 | #
# @lc app=leetcode id=79 lang=python3
#
# [79] Word Search
#
# @lc code=start
from typing import List
class Solution:
'''
Solution 1: 记录下当前cell的值, 并替换为一个特殊字符, 来避免重复访问
'''
def exist(self, board: List[List[str]], word: str) -> bool:
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] != word[0]:
continue
if self.helper(board, word, 0, r, c):
return True
return False
def helper(self, board, word, index, r, c):
if r < 0 or r >= len(board) or c < 0 or c >= len(board[0]):
return False
if board[r][c] != word[index]:
return False
if index == len(word) - 1:
print('r: {} c: {}'.format(r, c))
print(index)
return True
# turn the visited cell to '#' to avoid revisiting, ABCB
temp = board[r][c]
board[r][c] = '#'
# recursivly expore the 4 directions
up = self.helper(board, word, index + 1, r - 1, c)
rt = self.helper(board, word, index + 1, r, c + 1)
dn = self.helper(board, word, index + 1, r + 1, c)
lt = self.helper(board, word, index + 1, r, c - 1)
# restore the cell to its original value.
board[r][c] = temp
return (up or rt or dn or lt)
'''
Solution 2: 用布尔array来避免重复访问
'''
def word_search(self, board, word):
visited = [[False for i in range(len(board[0]))] for j in range(len(board))]
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] != word[0]:
continue
if self.helper_2(board, word, r, c, 0, visited):
return True
return False
def helper_2(self, board, word, row, col, index, visited):
if row < 0 or row >= len(board) or col < 0 or col >= len(board[0]) or visited[row][col]:
return False
if board[row][col] != word[index]:
return False
if index == len(word) - 1:
return True
visited[row][col] = True
up = self.helper(board, word, row - 1, col, index + 1, visited)
rt = self.helper(board, word, row, col + 1, index + 1, visited)
dn = self.helper(board, word, row + 1, col, index + 1, visited)
lt = self.helper(board, word, row, col - 1, index + 1, visited)
visited[row][col] = False
return up or rt or dn or lt
# @lc code=end
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "ABCCED"
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "SEE"
# 如果不标记已经访问过的cell, "ABC" C向左重复访问B, 会得到true的结果.
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "ABCB"
s = Solution()
print(s.exist(board, word)) | sundaycat/Leetcode-Practice | solution/79. word-search.py | 79. word-search.py | py | 3,067 | python | en | code | 0 | github-code | 13 |
10937316950 | from .models import AirTrafficController, ArrivalFlight, DepartureFlight, Lane
from .src.consts import AOD, DOM_ID, KEY, MODAL_FIELD, STRING, VALUE
from .src.database_operation import\
get_earliest_object_from_a_day, \
get_latest_datetime_from_a_model, \
get_list_from_object_field
from .src.specific_functions import \
create_pagination_return_page_and_number_of_pages, \
generate_flight_management_panel_dom_parameters, \
set_flight_management_panel_non_status_and_status
from django.shortcuts import render
def index(request):
""" Create paginations. """
flight_table_paginations = {}
flight_table_paginations_parameters = [
[KEY.ARRIVAL_FLIGHT_TABLE_PAGINATION, ArrivalFlight],
[KEY.DEPARTURE_FLIGHT_TABLE_PAGINATION, DepartureFlight]
]
"""
Create arrival flight table pagination and departure flight pagination.
Return the last page of the pagination. The last pagination is the latest
flights added to the database.
"""
for flight_table_pagination_parameters in\
flight_table_paginations_parameters:
flight_table_paginations[flight_table_pagination_parameters[0]] =\
create_pagination_return_page_and_number_of_pages(
flight_table_pagination_parameters[1],
MODAL_FIELD.SCHEDULED_DATETIME,
VALUE.PAGINATION_OBJECTS_COUNT_PER_PAGE,
KEY.LAST
)
"""
For the initial page, set the flight management panel only to display the
earliest flight from the latest recorded day. This could be changed based
on the preference.
"""
latest_datetime_from_arrivalflight = get_latest_datetime_from_a_model(
ArrivalFlight,
MODAL_FIELD.SCHEDULED_DATETIME
)
"""
Get the earliest `ArrivalFlight` document from the latest day as the fist
document shown in the flight management panel.
"""
earliest_arrivalflight_from_the_latest_day =\
get_earliest_object_from_a_day(
ArrivalFlight,
MODAL_FIELD.SCHEDULED_DATETIME,
latest_datetime_from_arrivalflight
)
""" Create parameters for flight management panel DOM. """
flight_management_panel_initial_dom =\
generate_flight_management_panel_dom_parameters(
earliest_arrivalflight_from_the_latest_day
)
"""
Dictionary that will be used to render views. Dictionary for initially
displayed flight management panel.
PENDING: Could be refactored alongside with the `table_requests_flight()`
function.
"""
parameters = {}
parameters = set_flight_management_panel_non_status_and_status(
parameters,
flight_management_panel_initial_dom\
[KEY.FMP_NON_STATUS_DOM_PARAMETERS],
flight_management_panel_initial_dom[KEY.FMP_STATUS]
)
""" Assigning airport manager into client's render view. """
parameters[KEY.AIRPORT_MANAGER] = request.user
""" Assigning all ATCs into client's render view. """
parameters[KEY.ATC_OBJECTS] = AirTrafficController.objects.all()
""" Assigning all Lanes into client's render view. """
parameters[KEY.LANE_OBJECTS] = Lane.objects.all();
""" Parameters to help set initial flight online ATCs form. """
parameters[KEY.FLIGHT_ONLINE_ATC_FORM_ARRIVALDEPARTURE] = AOD.ARRIVAL
parameters[KEY.FLIGHT_ONLINE_ATC_FORM_FLIGHT_ID] =\
earliest_arrivalflight_from_the_latest_day.id
parameters[KEY.FLIGHT_ONLINE_ATC_FORM_FLIGHT_ONLINE_ATCS_ID] =\
get_list_from_object_field(
earliest_arrivalflight_from_the_latest_day.online_atcs, "id")
""" Parameters to help to set initial flight lane form. """
parameters[KEY.FLIGHT_LANE_FORM_ARRIVALDEPARTURE] =\
parameters[KEY.FLIGHT_ONLINE_ATC_FORM_ARRIVALDEPARTURE]
parameters[KEY.FLIGHT_LANE_FORM_FLIGHT_ID] =\
parameters[KEY.FLIGHT_ONLINE_ATC_FORM_FLIGHT_ID]
parameters[KEY.FLIGHT_LANE_FORM_FLIGHT_LANE_ID] =\
"" if earliest_arrivalflight_from_the_latest_day.lane ==\
None else earliest_arrivalflight_from_the_latest_day.lane.id
""" Both arrival flight table and departure flight table properties. """
parameters[KEY.TABLES_PROPERTIES] = [
{
KEY.ARRIVALDEPARTUREFLIGHT_OBJECTS:
flight_table_paginations\
[KEY.ARRIVAL_FLIGHT_TABLE_PAGINATION][KEY.OBJECTS],
KEY.TABLE_PAGINATION_NUMBER_OF_PAGES:
flight_table_paginations\
[KEY.ARRIVAL_FLIGHT_TABLE_PAGINATION]\
[KEY.NUMBER_OF_PAGES],
KEY.TABLE_TITLE: STRING.ARRIVAL_TABLE_TITLE,
KEY.TABLE_ID: DOM_ID.ARRIVAL_FLIGHT_TABLE,
KEY.TABLE_ERROR_ID: DOM_ID.ARRIVAL_FLIGHT_TABLE_ERROR,
KEY.TABLE_PAGINATION_ID: DOM_ID.ARRIVAL_FLIGHT_TABLE_PAGINATION,
KEY.TABLE_PAGINATION_NUMBER_OF_PAGES_ID:
DOM_ID.ARRIVAL_FLIGHT_TABLE_PAGINATION_NUMBER_OF_PAGES,
KEY.TABLE_REQUESTING_ID:
DOM_ID.ARRIVAL_FLIGHT_TABLE_REQUESTING
},
{
KEY.ARRIVALDEPARTUREFLIGHT_OBJECTS:
flight_table_paginations\
[KEY.DEPARTURE_FLIGHT_TABLE_PAGINATION][KEY.OBJECTS],
KEY.TABLE_PAGINATION_NUMBER_OF_PAGES:
flight_table_paginations\
[KEY.DEPARTURE_FLIGHT_TABLE_PAGINATION]\
[KEY.NUMBER_OF_PAGES],
KEY.TABLE_TITLE: STRING.DEPARTURE_TABLE_TITLE,
KEY.TABLE_ID: DOM_ID.DEPARTURE_FLIGHT_TABLE,
KEY.TABLE_ERROR_ID: DOM_ID.DEPARTURE_FLIGHT_TABLE_ERROR,
KEY.TABLE_PAGINATION_ID:
DOM_ID.DEPARTURE_FLIGHT_TABLE_PAGINATION,
KEY.TABLE_PAGINATION_NUMBER_OF_PAGES_ID:
DOM_ID.DEPARTURE_FLIGHT_TABLE_PAGINATION_NUMBER_OF_PAGES,
KEY.TABLE_REQUESTING_ID:
DOM_ID.DEPARTURE_FLIGHT_TABLE_REQUESTING
}
]
""" Render index.html with the dictionary as parameter. """
return render(request, "airport_management/index.html", parameters) | notalentgeek/airport | airport_management/views.py | views.py | py | 6,128 | python | en | code | 0 | github-code | 13 |
72605086739 | import sys
N = int(sys.stdin.readline().replace("\n", ""))
triangle = [[]]
dp = [[] for _ in range(N+1)]
for _ in range(N):
triangle.append(
list(map(int, sys.stdin.readline().replace("\n", "").split(" "))))
dp[1] = [triangle[1][0]] # 7
dp[2] = [triangle[2][0]+triangle[1][0], triangle[2][1]+triangle[1][0]] # [10 , 15]
for i in range(3, N+1):
dp[i] = [0 for _ in range(len(triangle[i]))]
for j in range(len(triangle[i])):
if j == 0:
dp[i][j] = dp[i-1][j] + triangle[i][j] # [3][0] = [2][0] + [3][0]
elif j == len(triangle[i])-1:
dp[i][j] = dp[i-1][j-1] + triangle[i][j]
else:
case1 = dp[i-1][j-1] + triangle[i][j]
case2 = dp[i-1][j] + triangle[i][j]
dp[i][j] = max(case1, case2)
print(max(dp[N]))
| gitdog01/AlgoPratice | random/dp/1932/main.py | main.py | py | 805 | python | en | code | 0 | github-code | 13 |
14385313140 | from genericpath import exists
import os
import json
import time
import random
cacheFile = "/home/sejapoe/.cache/color-changer.json"
configFile = '/home/sejapoe/.config/color-changer.json'
if not exists(cacheFile):
cache = dict()
cache["currentEnd"] = 0
cache["currentIndex"] = -1
with open(cacheFile, 'w') as outp:
outp.write(json.dumps(cache))
with open(cacheFile, 'r') as inpt:
cache = json.load(inpt)
with open(configFile, 'r') as inpt:
config = json.load(inpt)
while True:
wait = cache["currentEnd"] - int(time.time())
print(wait)
if wait > 0:
time.sleep(wait)
if (config["isRandom"]):
newIndex = random.choice(list(set([i for i in range(0, len(config["profiles"]))]) - {cache["currentIndex"]}))
else:
newIndex = (cache["currentIndex"] + 1) % len(config["profiles"])
loadingProfile = config["profiles"][newIndex]
for cmd in loadingProfile:
os.system(cmd)
cache["currentEnd"] = int(time.time()) + config["duration"]
if config["hasEpochAnchor"]:
cache["currentEnd"] = (cache["currentEnd"] // config["duration"]) * config["duration"];
cache["currentIndex"] = newIndex
with open(cacheFile, 'w') as outp:
outp.write(json.dumps(cache))
| sejapoe/color-changer | color-changer.py | color-changer.py | py | 1,202 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.