seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12963151156 | from __future__ import print_function
import concurrent.futures
from core.colors import info
def flash(function, links, thread_count):
"""Process the URLs and uses a threadpool to execute a function."""
# Convert links (set) to list
links = list(links)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)),
end='\r')
print('')
| s0md3v/Photon | core/flash.py | flash.py | py | 673 | python | en | code | 10,128 | github-code | 36 |
3224536254 | from var import *
from output import *
def battle_stats():
entity = V.player
T.clear_text()
T.print("(1) {}\n(2) {}\n(0) Back".format(V.player.name, V.mob.name), "\n", V.c_text2)
select = T.input(": ")
if select == "0": return
if select == "1": entity = V.player
if select == "2": entity = V.mob
V.entity_stats(entity, "battle")
def battle_attack():
hand = "right hand"
T.clear_text()
T.print("(1) Left Hand\n(2) Right Hand\n(0) Back", "\n", V.c_text2)
select = T.input(": ")
if select == "0": return
if select == "1": hand = "left hand"
if select == "2": hand = "right hand"
if V.roll_skill(V.player, "combat"):
dmg = V.player.get_damage(hand)
V.mob.take_damage(dmg)
T.text("{} attacked {} for {} damage".format(V.player.name, V.mob.name, max(1, dmg-V.mob.get_armor())))
else:
T.text("{} missed".format(V.player.name))
V.player.take_stat_damage()
V.ai_turn = True
V.state = "battle"
def battle_magic():
entity = V.mob
T.clear_text()
T.print("(1) {}\n(2) {}\n(0) Back".format(V.player.name, V.mob.name), "\n", V.c_text2)
sel = T.input(": ")
if sel == "0":
V.state = "battle"
return
if sel == "1": entity = V.player
if sel == "2": entity = V.mob
T.clear_text()
T.print("Select a spell to use", "\n", V.c_text1)
for i in magic:
if i in V.player.spells:
margin = T.menu_width - ( len(i) + len(str(V.player.spells[i])) + len(str(magic[i]['cost'])) )
T.print("[{}] {}{}{}".format(V.player.spells[i], i, " "*margin, magic[i]['cost']), "\n", V.c_text2)
T.print("(0) Back", "\n", V.c_text2)
sel = T.input(": ")
if sel == "0":
V.state = "battle"
return
elif sel in V.player.spells:
if V.roll_skill(V.player, 'cast'):
V.player.use_spell(sel, entity)
T.text("{} used {} on {}".format(V.player.name, sel, entity.name))
else:
T.text("{} miscasted {} on {}".format(V.player.name, sel, entity.name))
V.player.take_stat_damage()
V.ai_turn = True
V.state = "battle"
def battle_item(self):
sel = V.inventory_selection(V.player.inventory, "battle")
if sel != "nothing":
V.player.use_item(sel)
T.text("{} used {}".format(V.player.name, sel))
V.state = "battle"
def battle_win():
exp_bonus = int(mobs[V.mob.race]['exp']*(V.mob.level*mobs[V.mob.race]['curve']))
V.player.gain_experience(exp_bonus)
gold = int(V.mob.gold*0.75)
V.player.gold = int(V.player.gold+gold)
V.mob.gold = int(V.mob.gold-gold)
if V.player.confused: V.player.confused = False
if V.player.stunned: V.player.stunned = False
T.text("{} won the battle, gaining {} experience and {} gold".format(V.player.name, exp_bonus, gold))
for item in V.mob.equip:
if V.mob.equip[item] != "nothing":
V.mob.add_item(V.mob.equip[item])
for item in V.mob.inventory:
T.print("(1) Take {}\n(0) Skip".format(item), "\n", V.c_text2)
sel = T.input(": ")
if sel == "0": pass
else:
plural = ""
if V.mob.inventory[item] > 1: plural = "s"
for i in range(V.mob.inventory[item]):
V.player.add_item(item)
T.text("{} looted {} {}{} from {}".format(V.player.name, V.mob.inventory[item], item, plural, V.mob.name))
V.state = "main_menu"
def battle_lose():
gold = int(V.player.gold*0.75)
V.player.gold = int(V.player.gold-gold)
V.mob.gold = int(V.mob.gold+gold)
V.player.hp = V.player.HP
V.player.mp = V.player.MP
if V.player.poisoned: V.player.poisoned = False
if V.player.confused: V.player.confused = False
if V.player.stunned: V.player.stunned = False
if V.player.burned: V.player.burned = False
T.text("{} lost the battle, losing {} gold".format(V.player.name, gold))
V.state = "main_menu"
def battle_ai():
affliction = ""
can_perform_action = True
if V.mob.stunned:
if random.randint(0, 100) < 10:
affiction = "being stunned"
can_perform_action = False
if V.mob.confused:
if random.randint(0, 100) < 10:
affiction = "confusion"
can_perform_action = False
if can_perform_action:
if V.mob.hp < V.mob.HP/2:
try:
for i in V.mob.inventory:
if 'hp' in items[i] and not 'mp' in items[i]:
if V.mob.hp+items[i]['hp'] <= V.mob.HP:
V.mob.use_item(i)
T.text("{} used {}".format(V.mob.name, i))
except: pass
elif V.mob.mp < V.mob.MP/2:
try:
for i in V.mob.inventory:
if 'mp' in items[i] and not 'hp' in items[i]:
if V.mob.mp+items[i]['mp'] <= V.mob.MP:
V.mob.use_item(i)
T.text("{} used {}".format(V.mob.name, i))
except: pass
elif V.mob.hp < V.mob.HP/2 and V.mob.mp < V.mob.MP/2:
try:
for i in V.mob.inventory:
if 'hp' in items[i] and 'mp' in items[i]:
if V.mob.hp+items[i]['hp'] <= V.mob.HP or V.mob.mp+items[i]['mp'] <= V.mob.MP:
V.mob.use_item(i)
T.text("{} used {}".format(V.mob.name, i))
except: pass
can_cast = bool(random.randint(0, V.mob.MP) < V.mob.mp)
if len(V.mob.spells) > 0:
if can_cast:
spell = "nothing"
entity = V.mob
for s in V.mob.spells:
if magic[s]['cost'] <= V.mob.mp:
can_cast = True
if 'damage' in magic[s]:
entity = V.player
if 'hp' in magic[s]:
entity = V.mob
spell = s
if can_cast and spell != "nothing":
if V.roll_skill(V.mob, 'cast'):
V.mob.use_spell(spell, entity)
T.text("{} casted {} on {}".format(V.mob.name, spell, entity.name))
else:
T.text("{} miscasted {} on {}".format(V.mob.name, spell, entity.name))
V.mob.take_stat_damage()
V.ai_turn = False
if not can_cast:
hand = "right hand"
if V.mob.equip['left hand'] != "nothing": hand = "left hand"
elif V.mob.equip['right hand'] != "nothing": hand = "right hand"
if V.roll_skill(V.mob, 'combat'):
dmg = V.mob.get_damage(hand)
V.player.take_damage(dmg)
T.text("{} attacked {} for {} damage".format(V.mob.name, V.player.name, max(1, dmg-V.player.get_armor())))
else:
T.text("{} missed".format(V.mob.name))
V.mob.take_stat_damage()
V.ai_turn = False
elif not can_perform_action:
T.text("{} couldn't attack due to {}".format(V.mob.name, affliction))
V.mob.take_stat_damage()
V.ai_turn = False
def battle_player():
T.print("(1) Attack\n(2) Stats\n(3) Magic\n(4) Item\n(0) Run", "\n", V.c_text2)
sel = T.input(": ")
if sel == "0": V.state = "main_menu"
elif sel == "1": battle_attack()
elif sel == "2": battle_stats()
elif sel == "3": battle_magic()
elif sel == "4": battle_item()
def battle():
T.clear_text()
p_health = "HP:{}/{}".format(V.player.hp, V.player.HP)
m_health = "HP:{}/{}".format(V.mob.hp, V.mob.HP)
p_mana = "MP:{}/{}".format(V.player.mp, V.player.MP)
m_mana = "MP:{}/{}".format(V.mob.mp, V.mob.MP)
health_margin = T.menu_width-(len(V.player.name)+len(V.mob.name))
T.print("{}{}{}".format(V.player.name," "*(T.menu_width-(len(V.player.name)+len(V.mob.name))),V.mob.name), "\n", V.c_text1)
T.print("{}{}{}".format(p_health," "*(T.menu_width-(len(p_health)+len(m_health))),m_health), "\n", V.c_text1)
T.print("{}{}{}".format(p_mana," "*(T.menu_width-(len(p_mana)+len(m_mana))),m_mana), "\n", V.c_text1)
print()
if V.player.hp <= 0:
V.battle_lose()
return
if V.mob.hp <= 0:
battle_win()
return
if V.ai_turn: battle_ai()
else: battle_player()
def prepare_battle():
V.ai_turn = random.choice([True, False])
V.randomize_mob()
txt = ""
if V.ai_turn: txt = "{} is engaging {}".format(V.mob.race.capitalize(), V.player.name)
else: txt = "{} is engaging {}".format(V.player.name, V.mob.race)
T.clear_text()
T.text(txt)
chcs = ""
if not V.ai_turn:
T.print("(1) Engage\n(0) Retreat", "\n", V.c_text2)
sel = T.input(": ")
if not V.ai_turn and sel == "0":
V.state = "main_menu"
return
V.state = "battle"
| ihave13digits/PythonTextRPG | ESbattle.py | ESbattle.py | py | 8,987 | python | en | code | 4 | github-code | 36 |
70806985703 | import sys
sys.stdin = open('input.txt')
def solution():
new_move = set() # ๋ค์์ ์์ง์ฌ์ผ ํ ๊ณ ์ด๋์น, ๋ฌผ ์ขํ๋ค
new_water = set()
for y, x in move: # ๊ณ ์ด๋์น๊ฐ ์ด๋ํ๋ ์ขํ
for k in range(4): # ์ธ์ ํ ๋ค์ ์ขํ
r = y + dr[k]
c = x + dc[k]
if 0 <= r < R and 0 <= c < C and not visited[r][c]: # ๊ณ ์ด๋์น๊ฐ ์ด๋ ๊ฐ๋ฅํ ๊ณณ์ด๋ผ๋ฉด
visited[r][c] = visited[y][x] + 1 # ์ด๋ ๊ฑฐ๋ฆฌ ๊ธฐ๋ก
new_move.add((r, c)) # ๋ค์๋ฒ์ ์์ง์ผ ๊ณ ์ด๋์น ์ขํ ์ถ๊ฐ
for y, x in water: # ๋ฌผ์ด ์ด๋ํ๋ ์ขํ
for k in range(4): # ์ธ์ ํ ๋ค์ ์ขํ
r = y + dr[k]
c = x + dc[k]
if 0 <= r < R and 0 <= c < C and 0 <= visited[r][c] and land[r][c] != 'D': # ๋ฌผ์ด ์ด๋ ๊ฐ๋ฅํ ๊ณณ์ด๋ผ๋ฉด
visited[r][c] = -1 # ๋ฌผ์ด ์ด๋๋ฌ๋ค๋ฉด ์์๋ก ๊ธฐ๋ก
new_water.add((r, c)) # ๋ค์๋ฒ์ ์์ง์ผ ๋ฌผ ์ขํ ์ถ๊ฐ
return new_move, new_water # ๋ค์๋ฒ์ ์์ง์ผ ์ขํ๋ค ๋ฐํ
dr = [-1, 0, 1, 0]
dc = [0, 1, 0, -1]
R, C = map(int, sys.stdin.readline().split()) # ํ, ์ด
land = [list(sys.stdin.readline().strip()) for _ in range(R)] # ๋
์ ์ ๋ณด
visited = [[0] * C for _ in range(R)] # ์์น๋ณ ์ด๋ ๊ฑฐ๋ฆฌ ๋๋ ๋ฌผ ์นจ๋ฒ ์ ๋ฌด
water = set() # ์ด๋ํด์ผ ํ ๋ฌผ์ ์ขํ๋ค
move = set() # ์ด๋ํด์ผ ํ ๊ณ ์ด๋์น์ ์์น
for i in range(R):
for j in range(C):
if land[i][j] == 'D': # ๋์ฐฉ์ง ์ ๋ณด ๊ธฐ๋ก
end = (i, j)
elif land[i][j] == 'S': # ์ถ๋ฐ์ง
move.add((i, j)) # ์ถ๋ฐ ์์น์ ๊ฐ์ 1๋ก ์ค์
visited[i][j] = 1
elif land[i][j] == '*': # ๋ฌผ์ธ ๊ฒฝ์ฐ
water.add((i, j)) # ๋ค์๋ฒ์ ์ด๋ํด์ผ ํ ๋ฌผ์ ์ขํ ์ถ๊ฐ
visited[i][j] = -1 # ๋ฐฉ๋ฌธ ๋ฆฌ์คํธ์ ์์๋ฅผ ๊ธฐ๋กํ์ฌ ๊ณ ์ด๋์น๊ฐ ์ด๋ํ์ง ๋ชปํ๋๋ก ์ค์
elif land[i][j] == 'X': # ๋ฒฝ์ธ ๊ฒฝ์ฐ
visited[i][j] = -1 # ๋ฌผ๊ณผ ๊ณ ์ด๋์น๊ฐ ์ด๋ํ์ง ๋ชปํ๋๋ก ์์ ๊ธฐ๋ก
while move: # ๊ณ ์ด๋์น๊ฐ ์ด๋ํ ์ขํ๊ฐ ๋จ์์๋ค๋ฉด
if visited[end[0]][end[1]]: # ๋ชฉ์ ์ง ๋์ฐฉํ์ผ๋ฉด ๋ฐ๋ณต๋ฌธ ์ข
๋ฃ
break
move, water = solution() # ๋ค์์ ์ด๋ํด์ผ ํ ๊ณ ์ด๋์น์ ์ขํ๋ค๊ณผ ๋ฌผ์ ์ขํ๋ค
move = move.difference(water) # ๊ณ ์ด๋์น๊ฐ ์์์ผ๋, ๋ฌผ์ด ๋ค์ด์ค๋ฉด์ ์ด๋ ๋ถ๊ฐ๋ฅํ ๊ณ ์ด๋์น ์ขํ ์ ๊ฑฐ
answer = visited[end[0]][end[1]] - 1 # ๋ชฉ์ ์ง์ ์ด๋ํ๊ธฐ ์ํ ์ต์ ์๊ฐ
if answer > 0:
print(answer)
else:
print('KAKTUS') | unho-lee/TIL | CodeTest/Python/BaekJoon/3055.py | 3055.py | py | 3,128 | python | ko | code | 0 | github-code | 36 |
4255373744 | from unittest import TestCase, main
from leet.merge_k_sorted_lists.main import Solution
from data_structures.list_node import ListNode
s = Solution()
class TestSuite(TestCase):
def test_1(self):
list1 = ListNode(1, ListNode(4, ListNode(5)))
list2 = ListNode(1, ListNode(3, ListNode(4)))
list3 = ListNode(2, ListNode(6))
lists = [
list1,
list2,
list3
]
expected = (
ListNode(1, ListNode(1, ListNode(
2, ListNode(3, ListNode(
4, ListNode(4, ListNode(
5, ListNode(6))))))))
)
self.assertTrue(ListNode.compare(
expected, s.mergeKLists(lists)))
if __name__ == '__main__':
main()
| blhwong/algos_py | leet/merge_k_sorted_lists/test.py | test.py | py | 769 | python | en | code | 0 | github-code | 36 |
27433129284 | import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits import mplot3d
import math
class Interpolator:
def __init__(self):
DEFAULT_STEP = 1 # 10 ** 5
self.c = 0.9 # Smoothing factor
self.e = 0.1 # sys.float_info.epsilon # Really small number
self.actions = []
self.qualities = []
self.knots_count = 0
self.step = DEFAULT_STEP
def distance(self, chosen_action, i, q_max):
return np.linalg.norm(np.subtract(chosen_action, self.actions[i])) ** 2 + self.c * (
q_max - self.qualities[i]) + self.e
def wsum(self, chosen_action):
output = 0
q_max = max(self.qualities)
for i in range(self.knots_count):
output += self.qualities[i] / self.distance(chosen_action, i, q_max)
return output
def norm(self, chosen_action):
output = 0
q_max = max(self.qualities)
for i in range(self.knots_count):
output += 1 / self.distance(chosen_action, i, q_max)
return output
def get_quality(self, action):
value = self.wsum(action) / self.norm(action)
if math.isnan(value):
return 0
else:
return value
def update_function_2(self, action, quality, update_action=True):
q = np.array(self.qualities)
knot_count = len(q)
optimal_action = action
action = np.array(self.actions)
Q_new = quality
num = 0
den = 0
deriv_q = []
deriv_u0 = []
deriv_u1 = []
for it in range(0, knot_count):
weight = np.linalg.norm(optimal_action - action[it]) + self.c * (q.max() - q[it] + self.e)
den = den + (1.0 / weight)
num = num + (q[it] / weight)
deriv_q.append((den * (weight + q[it] * self.c) - num * self.c) / pow((weight * den), 2))
deriv_u0.append(((num - den * q[it]) * 2 * (action[it][0] - optimal_action[0])) / (pow(weight * den, 2)))
deriv_u1.append(((num - den * q[it]) * 2 * (action[it][1] - optimal_action[1])) / (pow(weight * den, 2)))
Q_dash = num / den
error = Q_new - Q_dash
for it in range(0, knot_count):
q[it] = q[it] + error * deriv_q[it]
action[it][0] = action[it][0] + error * deriv_u0[it]
action[it][1] = action[it][1] + error * deriv_u1[it]
if update_action:
self.actions = action
self.qualities = q
def update_function(self, action, quality, update_action=False):
knot_count = len(self.qualities)
# print("qualities:", self.qualities)
if type(self.qualities) == np.ndarray:
self.qualities = self.qualities.tolist()
if type(self.qualities[0]) == list:
self.qualities = [e[0] for e in self.qualities]
max_list = self.qualities + [float(quality)]
q_max = max(max_list)
for it in range(0, knot_count):
self.qualities[it] += self.e * \
(quality - self.qualities[it]) \
/ self.distance(action, it, q_max) ** 2
def set_u(self, actions):
self.actions = actions
self.knots_count = len(self.actions)
def set_q(self, qualities):
self.qualities = qualities
def set_step(self, step):
self.step = step
def get_u(self):
return self.actions
def get_q(self):
return self.qualities
if __name__ == "__main__":
from output_visualizer import OutputVisualizer
import cv2
u = []
interpolator = Interpolator()
for i in np.arange(-1, 1.1, 0.5):
for j in np.arange(-1, 1.1, 0.5):
u.append(np.array([i, j]))
q = [0.04448929, 0.5086165, 0.76275706, -0.2851543, 0.39455223,
-0.19585085, -0.52812827, 0.25080782, 0.4987614, 0.26595366,
-0.3598364, 0.41622806, 0.10484912, -0.11532316, -0.11455766,
-0.14297369, -0.04747943, 0.19820265, 0.5723205, 0.13500524,
-0.24156858, 0.15854892, 0.22840545, 0.35542938, -0.5061423]
visualizer = OutputVisualizer()
visualizer.render(np.append(u, [[e] for e in q], axis=1))
cv2.waitKey(3000)
interpolator.set_q(q)
interpolator.set_u(u)
# for _ in range(5):
# interpolator.update_function_2(np.array([0, 0]), 2) # , update_action=False)
# interpolator.update_function(np.array([-1, 0]), 2)#, update_action=False)
interpolator.update_function(np.array([-0.5, 1.0]), -0.6402964293956757) # , update_action=False)
q = interpolator.get_q()
u = interpolator.get_u()
visualizer.render(np.append(u, [[e] for e in q], axis=1))
cv2.waitKey(3000)
# print(interpolator.get_quality(np.array([0.75, 0])))
'''
fig = plt.figure()
ax = plt.axes() # projection="3d")
X = []
Y = []
Z = []
for throttle in np.arange(-1, 1.1, 0.1):
for steering in np.arange(-1, 1.1, 0.1):
X.append(throttle)
Y.append(steering)
Z.append(interpolator.get_quality(np.array([throttle, steering])))
'''
# ax.plot_trisurf(np.array(X), np.array(Y), np.array(Z), cmap=cm.bwr)
# throttles = [a[0] for a in u]
# steerings = [a[1] for a in u]
# ax.plot_trisurf(np.array(throttles), np.array(steerings), np.array(q))
# interpolator.update_function(np.array([1, 0]), 20)
# interpolator.update_function(np.array([1, 0]), 20)
'''
X = []
Y = []
Z = []
for throttle in np.arange(-1, 1.1, 0.1):
for steering in np.arange(-1, 1.1, 0.1):
X.append(throttle)
Y.append(steering)
Z.append(interpolator.get_quality(np.array([throttle, steering])))
ax.plot_trisurf(np.array(X), np.array(Y), np.array(Z), cmap=cm.bwr)
'''
'''
u = interpolator.get_u()
q = np.reshape(interpolator.get_q(), (-1, 5))
throttles = np.reshape([a[0] for a in u], (-1, 5))
steerings = np.reshape([a[1] for a in u], (-1, 5))
ax.contourf(np.array(throttles), np.array(steerings), np.array(q), cmap=cm.bwr)
plt.show()
'''
| TimoLoomets/FSTT_dynamics | interpolator.py | interpolator.py | py | 6,172 | python | en | code | 0 | github-code | 36 |
43679325041 | import re
listOfTexts = {"test test testing tester tester test test", "test test testing tester tester test test", "test test testing tester tester test test. The car"}
commonWords = {}
def extract(texts):
for text in texts:
wordList = re.sub("[^\w]", " ", text).split()
for word in wordList:
if (word in commonWords):
commonWords[word]+=1
else:
commonWords[word] = 1
return commonWords
wordsUsed = extract(listOfTexts)
sortedList = [(k, wordsUsed[k]) for k in sorted(wordsUsed, key=wordsUsed.get, reverse=True)]
for k,v in sortedList:
print("'"+k+"' is used "+str(v)+" times.")
| mhabash99/Common-Word-Extractor | commongWordExtractor.py | commongWordExtractor.py | py | 697 | python | en | code | 0 | github-code | 36 |
28483046981 | import numpy as np
import cv2
import sys
import argparse
# Creating the parser
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to the model used to make the prediction and generate the class activation maps")
# Parsing the arguments
args = vars(ap.parse_args())
from utilities.gapModels import MobileNetGAP
from utilities.classifier import Classifier
from utilities.helpers import *
print("โณ" + BLUE + " Loading model ... " + END)
model = MobileNetGAP(path=args["model"])
clf = Classifier(model, name='mobilenet')
print("๐พ" + BLUE + " Model loaded." + END)
def addContours(input_img, output_img, draw_bounding_box=True, draw_contours=False, threshold=100):
"""
>>> Work In Progress <<<
Detects the bounding boxes and/or contours in the input image and adds them to the output image
Returns the modified output_img
>>> Work In Progress <<<
"""
# Convert image to gray
gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
# Threshold the image
_, threshed_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
# Get the external contours
_, contours, _ = cv2.findContours(threshed_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if draw_contours:
# Draw the contours
cv2.drawContours(output_img , contours, -1, (0, 255, 0), 5)
if draw_bounding_box:
# Draw the bounding boxes
for c in contours:
# Get the bounding rectangle
x, y, w, h = cv2.boundingRect(c)
# Draw it
cv2.rectangle(output_img, (x, y), (x + w, y + h), (255, 0, 0), 3)
return output_img
def show_detection(img, prediction):
height, width, _ = img.shape
middle = (int(height//2), int(width//2))
if prediction>0.6: cv2.rectangle(img,(10,10),(width-10, height-10),(255, 255, 255),thickness=40)
cv2.rectangle(img,(0,0),(width, 40),(56, 38, 50),thickness=-1)
cv2.rectangle(img,(0,0),(int(width*prediction), 40),(118, 230, 0),thickness=-1)
return img
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
# Get the cam and prediction made by the model
cam, prediction = clf.cam(img, class_number=1)
# Detect the contours and or bounding boxes in the cam
# img = addContours(input_img=cam, output_img=img, draw_bounding_box=True, draw_contours=False, threshold=100)
# Add the cam to the original image
img = cv2.addWeighted(cam, 0.5, img, 0.8, 0)
# Indicators of the probability of presence of a human
img = show_detection(img, prediction[1])
cv2.imshow('img',img)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| Selim78/real-time-human-detection | webcam_cam.py | webcam_cam.py | py | 2,632 | python | en | code | 3 | github-code | 36 |
27628025167 | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def balanced(self, node):
if node:
A = len(node)//2
t = TreeNode(node[A])
t.left = self.balanced(node[:A])
t.right = self.balanced(node[A+1:])
return t
def sortedListToBST(self, head):
lst = []
while head:
lst.append(head.val)
head = head.next
print('Linked List: ', lst)
return self.balanced(lst)
def PrintTree(self, tree):
queue = [(tree, 0)]
prev_level = 0
lst = []
while queue:
node, curr_level = queue.pop(0)
if curr_level == prev_level:
lst.append(node.val)
else:
lst.append(node.val)
if node.left:
queue.append((node.left, curr_level + 1))
if node.right:
queue.append((node.right, curr_level + 1))
prev_level = curr_level
return lst
if __name__ == '__main__':
sol = Solution()
lst = ListNode(-10)
lst.next = ListNode(-3)
lst.next.next = ListNode(0)
lst.next.next.next = ListNode(5)
lst.next.next.next.next = ListNode(9)
res = sol.sortedListToBST(lst)
tree_lst = sol.PrintTree(res)
print('Final Level by Level Output: ', tree_lst)
| ArramBhaskar98/LeetCode | 2021/05.May_2021/06.SLL_AVL.py | 06.SLL_AVL.py | py | 1,380 | python | en | code | 0 | github-code | 36 |
21398256786 | # /usr/bin/python
# -*- coding: utf-8 -*-
"""
This program is to:
reconstruct sentences from a given data file
CS137B, programming assignment #1, Spring 2015
"""
import re
__author__ = 'Keigh Rim'
__date__ = '2/1/2015'
__email__ = 'krim@brandeis.edu'
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--i",
help="name of a data file"
)
parser.add_argument(
"--o",
help="name of the output file"
)
args = parser.parse_args()
path = "../dataset/"
sent = ""
tags = ""
with open(path+args.i) as in_file, open("../" + args.o, 'w') as out_file:
for line in in_file:
if re.search(r"^\s+$", line):
sent += "\n"
tags += "\n"
out_file.write(sent)
out_file.write(tags)
out_file.write("\n")
sent = ""
tags = ""
else:
sent += line.split("\t")[1] + "\t"
tags += line.split("\t")[2] + "\t"
| keighrim/bananaNER | scripts/sent_reconst.py | sent_reconst.py | py | 1,087 | python | en | code | 1 | github-code | 36 |
1252674632 | class MirrorReflection(object):
def mirrorReflection(self, p, q):
"""
:type p: int
:type q: int
:rtype: int
"""
m, n = q, p
while m % 2 == 0 and n % 2 == 0:
m, n = m / 2, n / 2
if m % 2 == 0 and n % 2 == 1:
return 0
elif m % 2 == 1 and n % 2 == 1:
return 1
elif m % 2 == 1 and n % 2 == 0:
return 2
if __name__ == '__main__':
a = MirrorReflection()
print(a.mirrorReflection(12, 1))
print(a.mirrorReflection(12, 11))
print(a.mirrorReflection(11, 1))
print(a.mirrorReflection(11, 4)) | lyk4411/untitled | beginPython/leetcode/MirrorReflection.py | MirrorReflection.py | py | 649 | python | en | code | 0 | github-code | 36 |
72694740265 | # ์ฃผ์ ์
import sys
input = sys.stdin.readline
n = int(input())
length = list(map(int, input().split()))
oil = list(map(int, input().split()))
min_oil = int(1e9)
result = 0
for i in range(len(length)):
if i == 0:
min_oil = min(min_oil, oil[i])
result += (length[i] * min_oil)
else:
min_oil = min(min_oil, oil[i])
result += (length[i] * min_oil)
print(result) | baejinsoo/algorithm_study | algorithm_study/BOJ/13305.py | 13305.py | py | 403 | python | en | code | 0 | github-code | 36 |
74779840744 | from .hash_table_common import DEFAULT_CAPACITY_ANTILOG
from Common.map import Map
class _HashMapBucketNode(object):
def __init__(self):
self.key = None
self.val = None
self.next = None
class HashMap(Map):
"""Simple hash dictionary implementation using chaining to resolve collisions."""
def __init__(self):
self._buckets = [None] * (1 << DEFAULT_CAPACITY_ANTILOG)
self._len = 0
def __len__(self):
return self._len
def __getitem__(self, k):
i = self._get_bucket_index(k)
head = self._buckets[i]
while head is not None:
if head.key == k:
return head.val
head = head.next
raise KeyError(str(k))
def __setitem__(self, k, v):
bucket_index = self._get_bucket_index(k)
head = self._buckets[bucket_index]
override = False
while head is not None:
if head.key == k:
head.val = v
override = True
break
head = head.next
if override:
return
if self._len == len(self._buckets):
self._table_doubling()
bucket_index = self._get_bucket_index(k)
head = self._buckets[bucket_index]
new_head = _HashMapBucketNode()
new_head.key = k
new_head.val = v
new_head.next = head
self._buckets[bucket_index] = new_head
self._len += 1
def __contains__(self, k):
bucket_index = self._get_bucket_index(k)
head = self._buckets[bucket_index]
while head is not None:
if head.key == k:
return True
head = head.next
return False
def __iter__(self):
for bucket in self._buckets:
head = bucket
while head:
yield (head.key, head.value)
head = head.next
@property
def capacity(self):
return len(self._buckets)
def pop(self, k):
bucket_index = self._get_bucket_index(k)
head = self._buckets[bucket_index]
prev = None
while head is not None:
if head.key == k:
if prev is not None:
prev.next = head.next
else:
self._buckets[bucket_index] = head.next
self._len -= 1
return
prev = head
head = head.next
raise KeyError(str(k))
def keys(self):
for bucket in self._buckets:
head = bucket
while head:
yield head.key
head = head.next
def values(self):
for bucket in self._buckets:
head = bucket
while head:
yield head.val
head = head.next
def _get_bucket_index(self, k):
return hash(k) % len(self._buckets)
def _table_doubling(self):
old_buckets = self._buckets
self._buckets = [None] * (len(old_buckets) * 2)
for bucket in old_buckets:
head = bucket
while head is not None:
_next = head.next
new_bucket_index = self._get_bucket_index(head.key)
head.next = self._buckets[new_bucket_index]
self._buckets[new_bucket_index] = head
head = _next
| GarfieldJiang/CLRS | P3_DataStructures/HashTable/hash_table.py | hash_table.py | py | 3,389 | python | en | code | 0 | github-code | 36 |
37635314680 | # Given a string s and an integer k, return the length of the longest substring of s that contains at most k distinct characters.
# Example 1:
# Input: s = "eceba", k = 2
# Output: 3
# Explanation: The substring is "ece" with length 3.
# Example 2:
# Input: s = "aa", k = 1
# Output: 2
# Explanation: The substring is "aa" with length 2.
# Constraints:
# 1 <= s.length <= 5 * 104
# 0 <= k <= 50
class Solution:
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
ans = float('-inf')
i,j = 0,0
chars_set = {}
while j<len(s):
cur_char = s[j]
chars_set[cur_char] = chars_set.get(cur_char,0)+1
#print(chars_set)
if len(chars_set)<=k:
ans = max(ans,j-i+1)
else:
while len(chars_set) > k:
removed_char = s[i]
chars_set[removed_char]-=1
if chars_set[removed_char] == 0:
del chars_set[removed_char]
i+=1
ans = max(ans,j-i+1)
j+=1
return ans
| sunnyyeti/Leetcode-solutions | 340 Longest Substring wit At Most K Distinct Characters.py | 340 Longest Substring wit At Most K Distinct Characters.py | py | 1,143 | python | en | code | 0 | github-code | 36 |
36289889662 | import os
import os.path
import shutil
import tarfile
import hashlib
import argparse
import fnmatch
import sys
STR_EMPTY = ''
STR_SLASH = '/'
STR_POINT = '.'
STR_TAB = '\t'
STR_EOL = '\n'
STR_CAT_EXT = '.cat'
STR_TAR_EXT = '.tar'
STR_GZ_EXT = '.tar.gz'
STR_BZ2_EXT = '.tar.bz2'
STR_DIR_LIST = 'DIR_LIST'
STR_DIR = 'DIR'
STR_DIR_END = 'DIR_END'
STR_FILE = 'FILE'
STR_FILE_END = 'FILE_END'
STR_DIR_LIST_END = 'DIR_LIST_END'
STR_HASH_LIST = 'HASH_LIST'
STR_HASH = 'HASH'
STR_HASH_LIST_END = 'HASH_LIST_END'
def calc_hash(path): # IOError
h = hashlib.sha256()
with open(path, 'rb') as f: # IOError
block = f.read(h.block_size)
while block:
h.update(block)
block = f.read(h.block_size)
return h.hexdigest()
class CatalogFormatError(Exception):
pass
class HashNameError(Exception):
pass
class FileInfo():
def __init__(self, is_dir):
self.marked = False # for 'include'
self.isDir = is_dir
self.hash = STR_EMPTY
self.size = -1
def hash_name(info): # HashNameError
if info.isDir or (info.hash == STR_EMPTY) or (info.size == -1):
raise HashNameError()
return info.hash + '.' + str(info.size)
class FileList(): # OSError, IOError, CatalogFormatError
def __init__(self):
self.dict = {}
def _get_dir_list(self, root_dir, rel_dir=STR_EMPTY): # OSError
if rel_dir == STR_EMPTY:
self.dict.clear()
current_dir = root_dir + rel_dir
current_dir_list = os.listdir(current_dir) # OSError
for f in current_dir_list:
full_path = current_dir + STR_SLASH + f
rel_path = rel_dir + STR_SLASH + f
if os.path.isdir(full_path): # OSError
path_info = FileInfo(True)
path_info.mtime = int(os.path.getmtime(full_path)) # OSError
self.dict[rel_path] = path_info
self._get_dir_list(root_dir, rel_path)
elif os.path.isfile(full_path): # OSError
path_info = FileInfo(False)
# read mtime, size and hash directly before file checking / archiving
self.dict[rel_path] = path_info
def read_dir_list(self, source_path):
try:
self._get_dir_list(source_path)
except IOError as e:
print('ERROR: Can not read: ' + e.filename)
return
def _unmark_all(self):
for key in self.dict:
self.dict[key].marked = False
# include only matched files/folders
# use for "find"
def include(self, pattern_list):
if (pattern_list is not None) and (len(pattern_list) > 0):
# unmark all records
self._unmark_all()
# mark included
for pattern in pattern_list:
for key in self.dict:
if fnmatch.fnmatch(key, pattern):
self.dict[key].marked = True
# remove not marked (not included)
key_list = list(self.dict.keys())
for key in key_list:
if not self.dict[key].marked:
del self.dict[key]
# include not only matched files/folders but also all parent folders for matched files/folders
# use for "create" and "restore"
def include_hierarchy(self, pattern_list):
if (pattern_list is not None) and (len(pattern_list) > 0):
# unmark all records
self._unmark_all()
# mark included
key_list = list(self.dict.keys())
for pattern in pattern_list:
for key in key_list:
if fnmatch.fnmatch(key, pattern):
self.dict[key].marked = True
# mark folders with marked files/folders
d = os.path.dirname(key)
while d != STR_SLASH:
self.dict[d].marked = True
d = os.path.dirname(d)
# remove not marked (not included)
key_list = list(self.dict.keys())
for key in key_list:
if not self.dict[key].marked:
del self.dict[key]
# check and if not exist all parent folders for files/folders in list
def fix_hierarchy(self):
key_list = list(self.dict.keys())
for key in key_list:
d = os.path.dirname(key)
while d != STR_SLASH:
if d not in key_list:
path_info = FileInfo(False)
path_info.marked = False # for 'include'
path_info.isDir = True
path_info.mtime = self.dict[key].mtime
self.dict[d] = path_info
d = os.path.dirname(d)
def exclude(self, pattern_list):
if (pattern_list is not None) and (len(pattern_list) > 0):
for pattern in pattern_list:
key_list = list(self.dict.keys())
for key in key_list:
if fnmatch.fnmatch(key, pattern):
del self.dict[key]
def save(self, file_object): # IOError
# file_object = open('file.name', mode='w', encoding='utf-8')
file_object.write(STR_DIR_LIST + STR_EOL)
key_list = list(self.dict.keys())
key_list.sort()
for key in key_list:
if self.dict[key].isDir:
file_object.write(STR_DIR + STR_EOL)
file_object.write(key + STR_EOL)
file_object.write(str(self.dict[key].mtime) + STR_EOL)
file_object.write(STR_DIR_END + STR_EOL)
else:
file_object.write(STR_FILE + STR_EOL)
file_object.write(key + STR_EOL)
file_object.write(str(self.dict[key].mtime) + STR_EOL)
file_object.write(str(self.dict[key].size) + STR_EOL)
file_object.write(self.dict[key].hash + STR_EOL)
file_object.write(STR_FILE_END + STR_EOL)
file_object.write(STR_DIR_LIST_END + STR_EOL)
def load(self, file_object): # IOError, CatalogFormatError
# file_object = open('file.name', mode='r', encoding='utf-8')
wait_list = 0
wait_dir_file = 1
wait_path = 2
wait_mtime = 3
wait_size = 4
wait_hash = 5
wait_dir_end = 6
wait_file_end = 7
self.dict.clear()
file_object.seek(0, os.SEEK_SET)
state = wait_list
info_is_dir = False
info_path = STR_EMPTY
info_mtime = -1
info_size = -1
info_hash = STR_EMPTY
for s in file_object:
line = s.strip()
if (state == wait_list) and (line == STR_DIR_LIST):
state = wait_dir_file
elif ((state == wait_dir_file) and
((line == STR_DIR) or (line == STR_FILE) or (line == STR_DIR_LIST_END))):
if line == STR_DIR:
info_is_dir = True
state = wait_path
elif line == STR_FILE:
info_is_dir = False
state = wait_path
elif line == STR_DIR_LIST_END:
return
elif state == wait_path:
info_path = line
state = wait_mtime
elif state == wait_mtime:
info_mtime = int(line)
if info_is_dir:
state = wait_dir_end
else:
state = wait_size
elif state == wait_size:
info_size = int(line)
state = wait_hash
elif state == wait_hash:
info_hash = line
state = wait_file_end
elif (state == wait_dir_end) and (line == STR_DIR_END):
self.dict[info_path] = FileInfo(True)
self.dict[info_path].mtime = info_mtime
info_is_dir = False
state = wait_dir_file
elif (state == wait_file_end) and (line == STR_FILE_END):
self.dict[info_path] = FileInfo(False)
self.dict[info_path].mtime = info_mtime
self.dict[info_path].size = info_size
self.dict[info_path].hash = info_hash
state = wait_dir_file
else:
raise CatalogFormatError() # CatalogFormatError
def load_file(self, file_name):
try:
file_object = open(file_name, mode='r', encoding='utf-8')
try:
self.load(file_object)
except IOError:
print('ERROR: Can not read reference catalogue file!')
return
except CatalogFormatError:
print('ERROR: Reference catalogue is damaged!')
return
finally:
file_object.close()
except IOError:
print('ERROR: Can not open reference catalogue file!')
# key = hash + u'.' + unicode(size)
# value = arch name
# FileList.dict[key].hashName
class HashList(): # IOError, CatalogFormatError
def __init__(self):
self.dict = {}
def save(self, file_object): # IOError
# file_object = open('file.name', mode='w', encoding='utf-8')
file_object.write(STR_HASH_LIST + STR_EOL)
key_list = list(self.dict.keys())
key_list.sort()
for key in key_list:
file_object.write(STR_HASH + STR_TAB + key + STR_TAB + self.dict[key] + STR_EOL)
file_object.write(STR_HASH_LIST_END + STR_EOL)
def load(self, file_object): # IOError, CatalogFormatError
# file_object = open('file.name', mode='r', encoding='utf-8')
wait_list = 0
wait_hash = 1
self.dict.clear()
file_object.seek(0, os.SEEK_SET)
state = wait_list
for s in file_object:
line = s.strip()
if (state == wait_list) and (line == STR_HASH_LIST):
state = wait_hash
elif state == wait_hash:
if line == STR_HASH_LIST_END:
return
else:
lst = line.split(STR_TAB)
if (len(lst) == 3) and (lst[0] == STR_HASH):
self.dict[lst[1]] = lst[2]
else:
raise CatalogFormatError()
def load_file(self, file_name):
try:
file_object = open(file_name, mode='r', encoding='utf-8')
try:
self.load(file_object)
except IOError:
print('ERROR: Can not read reference catalogue file!')
return
except CatalogFormatError:
print('ERROR: Reference catalogue is damaged!')
return
finally:
file_object.close()
except IOError:
print('ERROR: Can not open reference catalogue file!')
# not correct for unicode file names
class TarFileWriter: # OSError, IOError, tarfile.TarError
def __init__(self, name, max_part_size, arch_type='tar'):
self.TarName = name
self.PartNumber = 0
self.PartSize = 0
self.PartFile = None
self.Closed = True
self.MaxPartSize = (max_part_size // tarfile.RECORDSIZE) * tarfile.RECORDSIZE
self.Type = arch_type.lower()
if arch_type == 'tar':
self.Ext = STR_TAR_EXT
self.Mode = 'w:'
elif arch_type == 'gz':
self.Ext = STR_GZ_EXT
self.Mode = 'w:gz'
elif arch_type == 'bz2':
self.Ext = STR_BZ2_EXT
self.Mode = 'w:bz2'
else:
raise IOError()
def close(self): # IOError
if not self.Closed:
self.PartFile.close()
self.PartFile = None
self.Closed = True
def __new_part(self): # IOError
self.close()
self.PartNumber += 1
self.PartFile = tarfile.open(self.TarName + STR_POINT + str(self.PartNumber) + self.Ext, self.Mode)
self.PartSize = 0
self.Closed = False
def add(self, file_path, tar_name): # OSError, IOError, tarfile.TarError
if self.Closed:
self.__new_part()
# prepare file object
file_size = os.path.getsize(file_path) # OSError
file_tar_info = self.PartFile.gettarinfo(file_path) # tarfile.TarError
file_tar_info.name = tar_name
with open(file_path, 'rb') as file_object: # IOError
# copy file to tar
while (self.PartSize + file_size + 3*tarfile.BLOCKSIZE) > self.MaxPartSize:
file_size_to_save = self.MaxPartSize - self.PartSize - 3*tarfile.BLOCKSIZE
file_tar_info.size = file_size_to_save
self.PartFile.addfile(file_tar_info, file_object) # tarfile.TarError
self.PartSize = self.PartSize + tarfile.BLOCKSIZE + file_size_to_save
assert (self.PartSize + 2*tarfile.BLOCKSIZE) == self.MaxPartSize
self.__new_part()
file_size -= file_size_to_save
file_tar_info.size = file_size
self.PartFile.addfile(file_tar_info, file_object) # tarfile.TarError
# recalculate PartSize
self.PartSize = self.PartSize + tarfile.BLOCKSIZE + (file_size // tarfile.BLOCKSIZE) * tarfile.BLOCKSIZE
if (file_size % tarfile.BLOCKSIZE) > 0:
self.PartSize += tarfile.BLOCKSIZE
assert (self.PartSize + 2*tarfile.BLOCKSIZE) <= self.MaxPartSize
if (self.PartSize + 3*tarfile.BLOCKSIZE) >= self.MaxPartSize:
self.close()
# not correct for unicode file names
class TarFileReader: # KeyError, IOError, tarfile.TarError
def __init__(self, name):
self.TarName = name
self.PartNumber = 0
self.PartFile = None
self.Closed = True
if os.path.isfile(name + '.1' + STR_TAR_EXT):
self.Ext = STR_TAR_EXT
elif os.path.isfile(name + '.1' + STR_GZ_EXT):
self.Ext = STR_GZ_EXT
elif os.path.isfile(name + '.1' + STR_BZ2_EXT):
self.Ext = STR_BZ2_EXT
else:
raise IOError()
def close(self): # IOError
if not self.Closed:
self.PartFile.close()
self.PartFile = None
self.Closed = True
def __next_part(self): # IOError
self.close()
self.PartNumber += 1
self.PartFile = tarfile.open(self.TarName + STR_POINT + str(self.PartNumber) + self.Ext)
def extract(self, tar_name, file_path): # KeyError, IOError, tarfile.TarError
self.PartNumber = 0
# ะธัะตะผ ะฟะตัะฒัะน ัะพะผ ะฒ ะบะพัะพัะพะผ ะตััั ัะฐะบะพะน ัะฐะนะป
found = False
no_file = False
while not (found or no_file):
try:
self.__next_part()
file_tar_info = self.PartFile.getmember(tar_name)
found = True
except IOError:
no_file = True
except KeyError:
pass
if found:
with open(file_path, 'wb') as file_object: # IOError
while found:
# ะบะพะฟะธััะตะผ ะฒ ัะฐะนะป
tar_buffer = self.PartFile.extractfile(file_tar_info) # tarfile.TarError
file_size = file_tar_info.size
while file_size > 0:
if file_size > tarfile.BLOCKSIZE:
file_size_to_save = tarfile.BLOCKSIZE
else:
file_size_to_save = file_size
file_object.write(tar_buffer.read(tarfile.BLOCKSIZE)) # IOError, tarfile.TarError
file_size = file_size - file_size_to_save
tar_buffer.close() # tarfile.TarError
# ะฟัะพะฒะตััะตะผ ะฒ ัะปะตะดัััะตะผ ัะพะผะต
try:
self.__next_part()
file_tar_info = self.PartFile.getmember(tar_name) # tarfile.TarError
except IOError:
found = False
except KeyError:
found = False
else:
raise KeyError()
def sh_create(sh_args):
# check source
if not os.path.isdir(sh_args.source):
print('ERROR: Source not found!')
return
# check repository
if not os.path.isdir(sh_args.repository):
print('ERROR: Repository not found!')
return
# check if files with backup name exist
if os.path.isfile(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT):
print('ERROR: Such archive already exists!')
return
# create empty reference and hash lists
reference_list = FileList()
hash_list = HashList()
# load reference and hash lists
if sh_args.reference is not None:
# check if reference file exists
ref_path = sh_args.repository + '/' + sh_args.reference + STR_CAT_EXT
if not os.path.isfile(ref_path):
print('ERROR: Reference not found!')
return
reference_list.load_file(ref_path)
hash_list.load_file(ref_path)
# create list of files/dirs in source destination
source_list = FileList()
source_list.read_dir_list(sh_args.source)
# include / exclude files / dirs
source_list.include_hierarchy(sh_args.include)
source_list.exclude(sh_args.exclude)
# compression
compr = 'tar'
if sh_args.compression is not None:
compr = sh_args.compression
# create TarFileWriter
writer = TarFileWriter(sh_args.repository + STR_SLASH + sh_args.name, sh_args.size, compr)
# check files and if new/changed add to archive
c_all = 0
c_new = 0
size_all = 0
size_new = 0
key_list = list(source_list.dict)
key_list.sort()
for file_name in key_list:
file_path = sh_args.source + file_name
if not source_list.dict[file_name].isDir:
ok = False
while not ok:
try:
# get date and size
source_list.dict[file_name].mtime = int(os.path.getmtime(file_path))
source_list.dict[file_name].size = os.path.getsize(file_path)
# check if such file is in reference
if (not sh_args.recalculate) and (file_name in reference_list.dict) and \
(not reference_list.dict[file_name].isDir) and \
(source_list.dict[file_name].mtime == reference_list.dict[file_name].mtime) and \
(source_list.dict[file_name].size == reference_list.dict[file_name].size):
source_list.dict[file_name].hash = reference_list.dict[file_name].hash
else:
# calculate hash
source_list.dict[file_name].hash = calc_hash(file_path)
# add file to archive
tar_name = hash_name(source_list.dict[file_name])
if tar_name not in hash_list.dict:
hash_list.dict[tar_name] = sh_args.name
writer.add(sh_args.source + file_name, tar_name)
c_new += 1
size_new = size_new + source_list.dict[file_name].size
size_all = size_all + source_list.dict[file_name].size
ok = True
except (OSError, IOError) as e:
print('ERROR: Can not read: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
writer.close()
return
elif answer == 'i':
del source_list.dict[file_name]
ok = True
except tarfile.TarError:
print('ERROR: Can not write files to archive!')
answer = input('Abort (a) / Retry (other): ')
if answer == 'a':
writer.close()
return
c_all += 1
if not sh_args.quiet:
sys.stdout.write("\rFiles (New/All): %s / %s, Size (New/All): %.02f Mb / %.02f Mb" % (
c_new, c_all, size_new/1024.0/1024.0, size_all/1024.0/1024.0))
sys.stdout.flush()
# close TarFileWriter
writer.close()
if not sh_args.quiet:
sys.stdout.write(STR_EOL)
sys.stdout.flush()
# save catalogue
try:
file_object = open(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT,
mode='w', encoding='utf-8')
try:
source_list.save(file_object)
hash_list.save(file_object)
except IOError:
print('ERROR: Can not create catalogue file!')
return
finally:
file_object.close()
except IOError:
print('ERROR: Can not create catalogue file!')
def sh_find(sh_args):
# check repository
if not os.path.isdir(sh_args.repository):
print('ERROR: Repository not found!\n')
return
# get file list
cat_list = os.listdir(sh_args.repository)
cat_list.sort()
key_list = list(cat_list)
for key in key_list:
if not fnmatch.fnmatch(key, sh_args.name + STR_CAT_EXT):
del cat_list[cat_list.index(key)]
# check if something found
if len(cat_list) == 0:
print('ERROR: No catalogue found!\n')
return
# looking for patterns in all catalogues
for cat in cat_list:
# loading catalogue
file_list = FileList()
file_list.load_file(sh_args.repository + STR_SLASH + cat)
# include / exclude files / dirs
file_list.include(sh_args.include)
file_list.exclude(sh_args.exclude)
# looking for matching files and dirs
key_list = list(file_list.dict.keys())
key_list.sort()
for key in key_list:
print(cat + ': ' + key)
def sh_restore(sh_args):
# check repository
if not os.path.isdir(sh_args.repository):
print('ERROR: Repository not found!\n')
return
# check existence of catalogue file
if not os.path.isfile(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT):
print('ERROR: Catalogue not found!\n')
return
# check destination existence
if not os.path.isdir(sh_args.destination):
print('ERROR: Destination not found!\n')
return
# read FileList and HashList from catalogue
source_list = FileList()
hash_list = HashList()
source_list.load_file(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT)
hash_list.load_file(sh_args.repository + STR_SLASH + sh_args.name + STR_CAT_EXT)
# include / exclude files / dirs
source_list.fix_hierarchy()
source_list.include_hierarchy(sh_args.include)
source_list.exclude(sh_args.exclude)
# create not existing dirs and extract new or changed files
c_all = 0
c_new = 0
size_all = 0
size_new = 0
key_list = list(source_list.dict)
key_list.sort()
for file_name in key_list:
file_path = sh_args.destination + file_name
# make directory
if source_list.dict[file_name].isDir:
file_dir = file_path
else:
(file_dir, stub) = os.path.split(file_path)
ok = False
while not ok:
try:
if os.path.isfile(file_dir):
os.remove(file_dir)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
ok = True
except OSError as e:
print('ERROR: Can not create directory: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
# restore file
if not source_list.dict[file_name].isDir:
hash_key = hash_name(source_list.dict[file_name])
backup_file = hash_list.dict[hash_key]
ok = False
while not ok:
try:
# check if such file exists
reader = TarFileReader(sh_args.repository + STR_SLASH + backup_file)
if os.path.isfile(file_path) and \
(source_list.dict[file_name].mtime == int(os.path.getmtime(file_path))) and \
(source_list.dict[file_name].size == os.path.getsize(file_path)) and \
(source_list.dict[file_name].hash == calc_hash(file_path)):
pass
else:
if os.path.isdir(file_path):
shutil.rmtree(file_path)
reader.extract(hash_key, file_path)
c_new += 1
size_new = size_new + source_list.dict[file_name].size
ok = True
except (OSError, IOError) as e:
print('ERROR: Can not restore file: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
finally:
reader.close()
c_all += 1
size_all = size_all + source_list.dict[file_name].size
# set time
ok = False
while not ok:
try:
os.utime(file_path, (source_list.dict[file_name].mtime,
source_list.dict[file_name].mtime))
ok = True
except OSError as e:
print('ERROR: Can not update time for: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
sys.stdout.write("\rFiles (New/All): %s / %s, Size (New/All): %.02f Mb / %.02f Mb" % (
c_new, c_all, size_new/1024.0/1024.0, size_all/1024.0/1024.0))
sys.stdout.flush()
sys.stdout.write(STR_EOL)
sys.stdout.flush()
# get FileList for destination
if sh_args.delete:
destination_list = FileList()
destination_list.read_dir_list(sh_args.destination)
# remove old files
key_list = list(destination_list.dict.keys())
key_list.sort()
for file_name in key_list:
file_path = sh_args.destination + file_name
if (not destination_list.dict[file_name].isDir) and \
(file_name not in source_list.dict):
ok = False
while not ok:
try:
os.remove(file_path)
ok = True
except OSError as e:
print('ERROR: Can not delete file: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
# remove old dirs
key_list = list(destination_list.dict.keys())
key_list.sort()
for file_name in key_list:
file_path = sh_args.destination + file_name
if destination_list.dict[file_name].isDir and \
(file_name not in source_list.dict):
ok = False
while not ok:
try:
if os.path.isdir(file_path):
shutil.rmtree(file_path)
ok = True
except OSError as e:
print('ERROR: Can not delete directory: ' + e.filename)
if sh_args.ignore:
answer = 'i'
else:
answer = input('Abort (a) / Ignore (i) / Retry (other): ')
if answer == 'a':
return
elif answer == 'i':
ok = True
# source - ะฟะฐะฟะบะฐ, ะบะพัะพัะฐั ะฐัั
ะธะฒะธััะตััั
# destination - ะฟะฐะฟะบะฐ, ะฒ ะบะพัะพััั ะธะทะฒะปะตะบะฐะตััั
# repository - ะฟะฐะฟะบะฐ ะฒ ะบะพัะพัะพะน ั
ัะฐะฝะธััั ะฐัั
ะธะฒ
# name - ะธะผั ะฐัั
ะธะฒะฐ (ะฑะตะท ัะฐััะธัะตะฝะธั ะธ ะฑะตะท ะฟััะธ, ะฐัั
ะธะฒ ะดะพะปะถะตะฝ ะฑััั ะฒ [repository])
# reference - ะฟััั/ะธะผั ะบะฐัะฐะปะพะณะฐ ััะฐะปะพะฝะฐ ั ัะฐััะธัะตะฝะธะตะผ
# slice - ัะพะผ ะฐัั
ะธะฒะฐ, ะฟะตัะตะด ัะฐััะธัะตะฝะธะตะผ ะฝะพะผะตั [backup].3.tar
# catalogue - ะบะฐัะฐะปะพะณ ะฐัั
ะธะฒะฐ - [backup].cat
parser = argparse.ArgumentParser(description='version 0.6.2')
subparsers = parser.add_subparsers()
parser_create = subparsers.add_parser('create') #
parser_create.add_argument('source', help='Directory tree that will be backed up.') # dir
parser_create.add_argument('repository', help='Directory in which backup will be stored.') # dir
parser_create.add_argument('name', help='Basename for backup.') # name
parser_create.add_argument('-r', '--reference',
help='Reference basename for differential backup. '
'Reference catalog should be stored in the same repository.') # path
parser_create.add_argument('-s', '--size', type=int, default=1024*1024*1020, help='Size of one slice.')
parser_create.add_argument('-i', '--include', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be included in backup. '
'If no mask specified all Files/Dirs will be included.')
parser_create.add_argument('-e', '--exclude', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be excluded from backup.')
parser_create.add_argument('-q', '--quiet', action='store_true',
help='Nothing is displayed if operation succeeds.') # !!!
parser_create.add_argument('-g', '--ignore', action='store_true', help='Ignore all errors.')
parser_create.add_argument('-c', '--compression', help="'tar'-default, 'gz' or 'bz2'")
parser_create.add_argument('-a', '--recalculate', action='store_true',
help="Recalculate all hashes again. Don't use hashes from reference.")
parser_create.set_defaults(func=sh_create)
parser_find = subparsers.add_parser('find') # simple regular expressions
parser_find.add_argument('repository', help='Directory in which backup is stored.') # dir
parser_find.add_argument('name', help='Mask for backup basename. '
'Several backups can be looked thorough.') # name pattern (without ext)
parser_find.add_argument('-i', '--include', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be shown. '
'If no mask specified all Files/Dirs will be shown.')
parser_find.add_argument('-e', '--exclude', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will not be shown.')
parser_find.set_defaults(func=sh_find)
parser_restore = subparsers.add_parser('restore') # restore backup
parser_restore.add_argument('repository', help='Directory in which backup is stored.') # dir
parser_restore.add_argument('name', help='Basename for backup to be restored.') # name
parser_restore.add_argument('destination', help='Directory which will be restored.') # dir
parser_restore.add_argument('-i', '--include', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will be restored. '
'If no mask specified all Files/Dirs will be restored.')
parser_restore.add_argument('-e', '--exclude', nargs='*',
help='Mask list. Files/Dirs matching at least one mask will not be restored.')
parser_restore.add_argument('-d', '--delete', action='store_true',
help='Delete Files/Dirs not existing in backup.')
parser_restore.add_argument('-g', '--ignore', action='store_true', help='Ignore all errors.')
parser_restore.set_defaults(func=sh_restore)
args = parser.parse_args()
args.func(args)
# // ัะตะปะพัะธัะปะตะฝะฝะพะต ะดะตะปะตะฝะธะต, ัะตะทัะปััะฐั โ ัะตะปะพะต ัะธัะปะพ (ะดัะพะฑะฝะฐั ัะฐััั ะพัะฑัะฐััะฒะฐะตััั)
# % ะดะตะปะตะฝะธะต ะฟะพ ะผะพะดัะปั
# tar file format
# 1 file info - BLOCKSIZE (512)
# 1 file data - filled by zeros to BLOCKSIZE (512)
# 2 file info - BLOCKSIZE (512)
# 2 file data - filled by zeros to BLOCKSIZE (512)
# N file info - BLOCKSIZE (512)
# N file data - filled by zeros to BLOCKSIZE (512)
# two finishing zero blocks - BLOCKSIZE * 2 (512 * 2)
# filled by zeros to RECORDSIZE (BLOCKSIZE * 20) (512 * 20)
# tarfile.BLOCKSIZE = 512
# tarfile.RECORDSIZE = BLOCKSIZE * 20
| 2e8/siddar | siddar.py | siddar.py | py | 34,249 | python | en | code | 0 | github-code | 36 |
30587275431 | def gcd(a: int, b: int) -> int:
assert a >= 0 and b >= 0
return gcd(b, a % b) if b != 0 else a
def extended_gcd(a: int, b: int) -> tuple:
assert a >= 0 and b >= 0
# compute d, x, y
if b == 0:
d, x, y = a, 1, 0
else:
d, p, q = extended_gcd(b, a % b)
x = q
y = p - (a // b) * q
# check answers
assert d == a * x + b * y
return (d, x, y)
if __name__ == "__main__":
res = extended_gcd(24, 7)
print(res) | Brian-Ckwu/discrete-mathematics | 4_number_theory_and_cryptography/euclidean_algorithm.py | euclidean_algorithm.py | py | 476 | python | en | code | 0 | github-code | 36 |
73578590183 | # coding: utf-8
_all_ = [ 'processing', 'processing_outputs' ]
import os
import sys
parent_dir = os.path.abspath(__file__ + 3 * '/..')
sys.path.insert(0, parent_dir)
import inclusion
from inclusion.config import main
from inclusion.utils import utils
from inclusion.condor.job_writer import JobWriter
import re
import argparse
def produce_trigger_outputs_sample(args, sample, ext):
"""
Produces all outputs of the submitTriggerEff task.
Limitation: As soon as one file is not produced, luigi
reruns everything.
"""
assert(ext in ('root', 'txt'))
extension = '.' + ext
t = []
exp = re.compile('.+output(_[0-9]{1,5}).root')
inputs, _ = utils.get_root_inputs(sample, args.indir)
folder = os.path.join( args.outdir, proc )
for inp in inputs:
number = exp.search(inp)
proc_folder = os.path.dirname(inp).split('/')[-1]
basename = args.tprefix + '_' + proc_folder + number.group(1)
basename += args.subtag + extension
t.append( os.path.join(folder, basename) )
return t
@utils.set_pure_input_namespace
def produce_trigger_outputs(args, ext='root'):
"""
Produces all outputs of the submitTriggerEff task.
Limitation: As soon as one file is not produced, luigi
reruns everything.
"""
tdata, tmc = ([] for _ in range(2))
for proc in args.data_vals:
tdata.extend( produce_trigger_outputs_sample(args, proc, ext) )
for proc in args.mc_vals:
tmc.extend( produce_trigger_outputs_sample(args, proc, ext) )
return tdata, tmc
@utils.set_pure_input_namespace
def processing_outputs(args):
if args.mode == 'histos':
name = 'Histos'
elif args.mode == 'counts':
name = 'Counts'
else:
raise ValueError('Mode {} is not supported.'.format(args.mode))
_data_tup = tuple((k,v) for k,v in zip(args.data_keys,args.data_vals))
_mc_tup = tuple((k,v) for k,v in zip(args.mc_keys,args.mc_vals))
data_folders = [ name + '_' + v for v in args.data_vals ]
mc_folders = [ name + '_' + v for v in args.mc_vals ]
job_opt = dict(localdir=args.localdir, tag=args.tag)
return ( JobWriter.define_output( data_folders=data_folders, **job_opt),
JobWriter.define_output( data_folders=mc_folders, **job_opt),
_data_tup, _mc_tup )
@utils.set_pure_input_namespace
def processing(args):
outs_data, outs_mc, _data_procs, _mc_procs = processing_outputs(args)
# unite Data and MC lists
outs_job = outs_data[0] + outs_mc[0]
outs_submit = outs_data[1] + outs_mc[1]
outs_check = outs_data[2] + outs_mc[2]
outs_log = outs_data[3] + outs_mc[3]
_all_processes = _data_procs + _mc_procs
for i, (kproc, vproc) in enumerate(_all_processes):
filelist, _ = utils.get_root_inputs(vproc, args.indir)
#### Write shell executable (python scripts must be wrapped in shell files to run on HTCondor)
pars = {'outdir' : args.outdir,
'dataset' : kproc,
'sample' : vproc,
'isdata' : int(vproc in args.data_vals),
'file' : '${1}',
'subtag' : args.subtag,
'channels' : ' '.join(args.channels),
'tprefix' : args.tprefix,
'configuration' : args.configuration}
script = ('produce_trig_histos.py' if args.mode == 'histos'
else 'produce_trig_counts.py')
comm = utils.build_script_command(name=script, sep=' ', **pars)
if args.mode == 'histos':
pars1 = {'binedges_fname' : args.binedges_filename,
'intersection_str' : args.intersection_str,
'variables' : ' '.join(args.variables,),
'nocut_dummy_str' : args.nocut_dummy_str}
comm += utils.build_script_command(name=None, sep=' ', **pars1)
jw = JobWriter()
jw.write_shell(filename=outs_job[i], command=comm, localdir=args.localdir)
jw.add_string('echo "Process {} done in mode {}."'.format(vproc,args.mode))
#### Write submission file
jw.write_condor(filename=outs_submit[i],
real_exec=utils.build_script_path(script),
shell_exec=outs_job[i],
outfile=outs_check[i],
logfile=outs_log[i],
queue=main.queue,
machine='llrt3condor')
qlines = []
for listname in filelist:
qlines.append(' {}'.format( listname.replace('\n','') ))
jw.write_queue( qvars=('filename',),
qlines=qlines )
# -- Parse options
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Command line parser')
parser.add_argument('--binedges_dataset', dest='binedges_dataset', required=True, help='in directory')
parser.add_argument('--localdir', dest='localdir', default=os.getcwd(), help='job out directory')
parser.add_argument('--indir', dest='indir', required=True, help='in directory')
parser.add_argument('--outdir', dest='outdir', required=True, help='out directory')
parser.add_argument('--tag', dest='tag', required=True, help='tag')
parser.add_argument('--subtag', dest='subtag', required=True, help='subtag')
parser.add_argument('--tprefix', dest='tprefix', required=True, help='target prefix')
parser.add_argument('--mc_processes', dest='mc_processes', required=True, nargs='+', type=str,
help='list of MC process names')
parser.add_argument('--data_keys', dest='data_keys', required=True, nargs='+', type=str,
help='list of datasets')
parser.add_argument('--data_vals', dest='data_vals', required=True, nargs='+', type=str,
help='list of datasets')
parser.add_argument('--channels', dest='channels', required=True, nargs='+', type=str,
help='Select the channels over which the workflow will be run.' )
parser.add_argument('--variables', dest='variables', required=True, nargs='+', type=str,
help='Select the variables over which the workflow will be run.' )
parser.add_argument('--intersection_str', dest='intersection_str', required=False, default=main.inters_str,
help='String used to represent set intersection between triggers.')
parser.add_argument('--nocut_dummy_str', dest='nocut_dummy_str', required=True,
help='Dummy string associated to trigger histograms were no cuts are applied.')
parser.add_argument('--configuration', dest='configuration', required=True,
help='Name of the configuration module to use.')
args = parser.parse_args()
submitTriggerEff( args )
| bfonta/inclusion | inclusion/condor/processing.py | processing.py | py | 6,926 | python | en | code | 0 | github-code | 36 |
24288441205 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 20:50:05 2022
@author: Yifang
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import traceAnalysis as Ananlysis
import SPADdemod
def getSignalTrace (filename, traceType='Constant',HighFreqRemoval=True,getBinTrace=False,bin_window=20):
'''TraceType:Freq, Constant, TimeDiv'''
trace=Ananlysis.Read_trace (filename,mode="SPAD")
if HighFreqRemoval==True:
trace=Ananlysis.butter_filter(trace, btype='low', cutoff=1000, fs=9938.4, order=10)
if traceType=='Constant':
if getBinTrace==True:
trace_binned=Ananlysis.get_bin_trace(trace,bin_window=bin_window,color='m')
trace_binned=Ananlysis.get_bin_trace(trace,bin_window=bin_window)
return trace_binned
else:
return trace
if traceType=='Freq':
#Red,Green= SPADdemod.DemodFreqShift (trace,fc_g=1000,fc_r=2000,fs=9938.4)
Red,Green= SPADdemod.DemodFreqShift_bandpass (trace,fc_g=1009,fc_r=1609,fs=9938.4)
#Red=Ananlysis.butter_filter(Red, btype='low', cutoff=200, fs=9938.4, order=10)
#Green=Ananlysis.butter_filter(Green, btype='low', cutoff=200, fs=9938.4, order=10)
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=9938.4)
return Red,Green,Signal
if traceType=='TimeDiv':
#need to be modified for different time division traces
lmin,lmax=SPADdemod.hl_envelopes_max(trace, dmin=2, dmax=2, split=True)
fig, ax = plt.subplots(figsize=(12, 3))
ax.plot(lmax,trace[lmax], color='r')
ax.plot(lmin,trace[lmin], color='g')
x_green, Green=SPADdemod.Interpolate_timeDiv (lmin,trace)
x_red, Red=SPADdemod.Interpolate_timeDiv (lmax,trace)
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=9938.4)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Signal,ax, label="Signal")
return Red,Green,Signal
#%%
# Sampling Frequency
fs = 9938.4
#dpath= "C:/SPAD/SPADData/20220611/1516996_Freq_2022_6_11_16_8_21"
dpath="D:/SPAD/SPADData/20220913/1534725_HPC_50g_2022_9_13_16_3_57"
#%%
filename=Ananlysis.Set_filename (dpath,"traceValue.csv")
#Red,Green,Signal=getSignalTrace (filename,traceType='TimeDiv',HighFreqRemoval=True,getBinTrace=False)
Signal_raw=getSignalTrace (filename,traceType='Constant',HighFreqRemoval=True,getBinTrace=False,bin_window=100)
#%%
import traceAnalysis as Ananlysis
bin_window=100
Signal_bin=Ananlysis.get_bin_trace(Signal_raw,bin_window=bin_window)
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Signal_bin,ax, fs=99.38, label="Binned to 100Hz",color="b")
#%%
import traceAnalysis as Ananlysis
bin_window=200
Red_bin=Ananlysis.get_bin_trace(Red,bin_window=bin_window)
Green_bin=Ananlysis.get_bin_trace(Green,bin_window=bin_window)
#%%
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Red_bin[0:500],ax, fs=49.7, label="Binned to 50Hz",color="r")
#ax.set_xlim([0, 0.1])
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Green_bin[0:500],ax, fs=49.7, label="Binned to 50Hz",color="g")
#ax.set_xlim([0, 0.1])
#%%
'''unmixing time division'''
lmin,lg=SPADdemod.hl_envelopes_max(Green, dmin=4, dmax=7, split=True)
lmin,lr=SPADdemod.hl_envelopes_max(Red, dmin=4, dmax=7, split=True)
fig, ax = plt.subplots(figsize=(12, 3))
ax.plot(lg,Green[lg], color='g')
ax.plot(lr,Red[lr], color='r')
x_red, Red=SPADdemod.Interpolate_timeDiv (lr,Red)
x_green, Green=SPADdemod.Interpolate_timeDiv (lg,Green)
#%%
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=49.7)
#%%
fig, (ax0, ax1,ax2) = plt.subplots(nrows=3)
ax0=Ananlysis.plot_trace(Green,ax0, fs=49.7, label="Green Signal 200Hz",color='g')
ax1=Ananlysis.plot_trace(Red,ax1, fs=49.7, label="Red Signal 200Hz", color='r')
ax2=Ananlysis.plot_trace(Signal,ax2, fs=49.7, label="Substract Signal 200Hz", color='b')
fig.tight_layout()
#%%
Signal=Ananlysis.butter_filter(Signal, btype='low', cutoff=100, fs=9938.4, order=5)
fig, ax = plt.subplots(figsize=(12, 2.5))
Ananlysis.plot_trace(Signal,ax, fs=9938.4, label="100Hz Low pass")
#%%
'''temporal bin dual channel'''
bin_window=200
Green_bin=Ananlysis.get_bin_trace(Green,bin_window=bin_window)
Red_bin=Ananlysis.get_bin_trace(Red,bin_window=bin_window)
Signal_binned=Ananlysis.get_bin_trace(Signal,bin_window=bin_window)
fig, (ax0, ax1,ax2) = plt.subplots(nrows=3)
ax0=Ananlysis.plot_trace(Green_bin,ax0, fs=99.384/2, label="Green Signal Binned 50Hz",color='g')
ax1=Ananlysis.plot_trace(Red_bin,ax1, fs=99.384/2, label="Red Signal Binned 50Hz", color='r')
ax2=Ananlysis.plot_trace(Signal_binned,ax2, fs=99.384/2, label="Substract Signal Binned 50Hz", color='b')
fig.tight_layout()
#%%
fs=200
fig, ax = plt.subplots(figsize=(8, 2))
powerSpectrum, freqenciesFound, time, imageAxis = ax.specgram(Signal,Fs=fs,NFFT=1024, detrend='linear',vmin=-130)
ax.set_xlabel('Time (Second)')
ax.set_ylabel('Frequency')
ax.set_ylim([0, 100])
#%%
signal1,signal2=Ananlysis.getICA (Red_bin,Green_bin)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal1,ax, label="Signal1")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal2,ax, label="Signal2")
#%%
fig, (ax0, ax1) = plt.subplots(nrows=2)
ax0=Ananlysis.plot_trace(signal1,ax0, fs=99.384/2, label="signal1 Signal Binned 50Hz",color='g')
ax1=Ananlysis.plot_trace(signal2,ax1, fs=99.384/2, label="signal2 Signal Binned 50Hz", color='r')
fig.tight_layout()
#%%
'''temporal bin'''
bin_window=200
signal1_bin=Ananlysis.get_bin_trace(signal1,bin_window=bin_window)
signal2_bin=Ananlysis.get_bin_trace(signal2,bin_window=bin_window)
fig, (ax0, ax1) = plt.subplots(nrows=2)
ax0=Ananlysis.plot_trace(signal1_bin,ax0, fs=99.384/2, label="signal1 Signal Binned 50Hz",color='r')
ax1=Ananlysis.plot_trace(signal2_bin,ax1, fs=99.384/2, label="signal2 Signal Binned 50Hz", color='g')
fig.tight_layout()
#%%
Red,Green,Signal = getSignalTrace (filename, traceType='TimeDiv',HighFreqRemoval=False,getBinTrace=False)
#%%
# Plot the spectrogram
fig, ax = plt.subplots(figsize=(8, 2))
powerSpectrum, freqenciesFound, time, imageAxis = ax.specgram(signal2_bin, Fs=fs/200,NFFT=1024, detrend='linear',vmin=-130)
ax.set_xlabel('Time (Second)')
ax.set_ylabel('Frequency')
ax.set_ylim([0, 250])
fig.colorbar(imageAxis,ax=ax)
#%%
Ananlysis.PSD_plot (Signal,fs=9938.4/200,method="welch",color='tab:blue',linewidth=1)
fig=Ananlysis.plot_PSD_bands (Signal,fs=9938.4)
#%%
fig=Ananlysis.plot_PSD_bands (trace_binned,fs=9938.4/20)
#%% Low pass filter
'''Get trend and detrend'''
# trace_trend=Ananlysis.butter_filter(trace_clean, btype='low', cutoff=10, fs=9938.4, order=5)
# trace_detrend = Ananlysis.get_detrend(trace_binned)
#%%
'''USE FASTICE method'''
#Red,Green,signal1, signal2 = FreqShift_getICA (trace_clean,fc_g=1000,fc_r=2000,fs=9938.4)
#%%
'''PHOTOMETRY DATA ANALYSIS'''
dpath= "C:/SPAD/SPADData/20220616"
filename=Ananlysis.Set_filename (dpath,csv_filename="1516995_cont-2022-06-16-145825.csv")
Green,Red=Ananlysis.Read_trace (filename,mode="photometry")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Green,ax, fs=130, label="GCamp6 Raw")
Gcamp=Ananlysis.butter_filter(Green, btype='low', cutoff=10, fs=130, order=5)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Gcamp,ax, fs=130, label="GCamp6 10Hz lowpass")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Red,ax, fs=130, label="Isospestic Raw", color='m')
Iso=Ananlysis.butter_filter(Red, btype='low', cutoff=10, fs=130, order=5)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Iso,ax, fs=130, label="Isospestic 10Hz lowpass", color='m')
#%%
sig=Ananlysis.getSignal_subtract(Red,Green,fs=130)
sig=Ananlysis.butter_filter(sig, btype='low', cutoff=20, fs=130, order=5)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(sig,ax, fs=130, label="Isospestic")
#%%
Signal=Ananlysis.getSignal_subtract(Red,Green,fs=130)
#%%
signal1,signal2=Ananlysis.getICA (Red,Green)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal1,ax, label="Signal1")
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(signal2,ax, label="Signal2")
#%%
Signal=Ananlysis.butter_filter(Signal, btype='low', cutoff=20, fs=130, order=10)
fig, ax = plt.subplots(figsize=(12, 3))
ax=Ananlysis.plot_trace(Signal,ax, fs=130, label="trace")
| MattNolanLab/SPAD_in_vivo | SPAD_Python/mainAnalysis.py | mainAnalysis.py | py | 8,561 | python | en | code | 0 | github-code | 36 |
11399653224 |
"""
Fits, etc. to extracted spectra
"""
import os
import time
import warnings
import numpy as np
import scipy.ndimage as nd
from scipy.optimize import nnls
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.gridspec import GridSpec
import astropy.io.fits as pyfits
from grizli import utils
utils.set_warnings()
try:
import eazy
wave = np.exp(np.arange(np.log(2.4), np.log(4.5), 1./4000))*1.e4
_temp = utils.pah33(wave)
PAH_TEMPLATES = {}
for t in _temp:
if '3.47' in t:
continue
_tp = _temp[t]
PAH_TEMPLATES[t] = eazy.templates.Template(name=t, arrays=(_tp.wave, _tp.flux))
except:
print('Failed to initialize PAH_TEMPLATES')
PAH_TEMPLATES = {}
import grizli.utils_c
import astropy.units as u
import eazy.igm
igm = eazy.igm.Inoue14()
from . import drizzle
from . import utils as msautils
SCALE_UNCERTAINTY = 1.0
# try:
# from prospect.utils.smoothing import smoothspec
# except (FileNotFoundError, TypeError):
# if 'SPS_HOME' not in os.environ:
# sps_home = 'xxxxdummyxxxx' #os.path.dirname(__file__)
# print(f'msaexp: setting environment variable SPS_HOME={sps_home} '
# 'to be able to import prospect.utils.smoothing')
# os.environ['SPS_HOME'] = sps_home
FFTSMOOTH = False
__all__ = ["fit_redshift", "fit_redshift_grid", "plot_spectrum",
"read_spectrum", "calc_uncertainty_scale",
"SpectrumSampler"]
def test():
from importlib import reload
import msaexp.spectrum
from tqdm import tqdm
import msaexp.resample_numba
from grizli import utils
reload(msaexp.resample_numba); reload(msaexp.spectrum)
reload(msaexp.resample_numba); reload(msaexp.spectrum)
from msaexp.spectrum import SpectrumSampler
import eazy.templates
self = SpectrumSampler('macsj0647_1169.v1.spec.fits')
t = eazy.templates.Template('templates/sfhz/fsps_4590.fits')
z = 4.2418
res = self.resample_eazy_template(t, z=z)
line = self.resample_eazy_template(t, z=z)
lw, lr = utils.get_line_wavelengths()
k = 'highO32'
zg = np.linspace(z-0.1, z+0.1, 256)
chi2 = zg*0.
bspl = self.bspline_array(nspline=13, log=True)
bspl2 = self.bspline_array(nspline=3, log=True)
scale_disp = 1.2
velocity_sigma = 100
for i, zi in tqdm(enumerate(zg)):
lines = [self.fast_emission_line(w*(1+zi)/1.e4,
line_flux=r,
scale_disp=scale_disp,
velocity_sigma=velocity_sigma,
nsig=4)
for w, r in zip(lw[k], lr[k])]
A = np.vstack([np.array(lines).sum(axis=0)*bspl2] + [bspl])
Ax = (A / self.spec['full_err'])
yx = self.spec['flux'] / self.spec['full_err']
x = np.linalg.lstsq(Ax[:,self.valid].T, yx[self.valid].data, rcond=None)
model = A.T.dot(x[0])
resid = (self.spec['flux'] - model)/self.spec['full_err']
chi2[i] = (resid[self.valid]**2).sum()
zi = zg[np.argmin(chi2)]
lines = [self.fast_emission_line(w*(1+zi)/1.e4,
line_flux=r,
scale_disp=scale_disp,
velocity_sigma=velocity_sigma,
nsig=4)
for w, r in zip(lw[k], lr[k])]
A = np.vstack([np.array(lines).sum(axis=0)*bspl2] + [bspl])
Ax = (A / self.spec['full_err'])
yx = self.spec['flux'] / self.spec['full_err']
x = np.linalg.lstsq(Ax[:,self.valid].T, yx[self.valid].data, rcond=None)
model = A.T.dot(x[0])
class SpectrumSampler(object):
spec = {}
spec_wobs = None
spec_R_fwhm = None
valid = None
def __init__(self, spec_input, **kwargs):
"""
Helper functions for sampling templates onto the wavelength grid
of an observed spectrum
Parameters
----------
spec_input : str, `~astropy.io.fits.HDUList`
- `str` : spectrum filename, usually `[root].spec.fits`
- `~astropy.io.fits.HDUList` : FITS data
Attributes
----------
resample_func : func
Template resampling function, from
`msaexp.resample_template_numba.msaexp.resample_numba` if possible and
`msaexp.resample.resample_template` otherwise
sample_line_func : func
Emission line function, from
`msaexp.resample_template_numba.msaexp.sample_gaussian_line_numba` if
possible and `msaexp.resample.sample_line_func` otherwise
spec : `~astropy.table.Table`
1D spectrum table from the `SPEC1D HDU of ``file``
spec_wobs : array-like
Observed wavelengths, microns
spec_R_fwhm : array-like
Tabulated spectral resolution `R = lambda / dlambda`, assumed to be
defined as FWHM
valid : array-like
Boolean array of valid 1D data
"""
try:
from .resample_numba import resample_template_numba as resample_func
from .resample_numba import sample_gaussian_line_numba as sample_line_func
except ImportError:
from .resample import resample_template as resample_func
from .resample import sample_gaussian_line as sample_line_func
self.resample_func = resample_func
self.sample_line_func = sample_line_func
self.initialize_spec(spec_input)
self.initialize_emission_line()
def __getitem__(self, key):
"""
Return column of the `spec` table
"""
return self.spec[key]
@property
def meta(self):
"""
Metadata of `spec` table
"""
return self.spec.meta
def initialize_emission_line(self, nsamp=64):
"""
Initialize emission line
"""
self.xline = np.linspace(-nsamp, nsamp, 2*nsamp+1)/nsamp*0.1+1
self.yline = self.xline*0.
self.yline[nsamp] = 1
self.yline /= np.trapz(self.yline, self.xline)
def initialize_spec(self, spec_input, **kwargs):
"""
Read spectrum data from file and initialize attributes
Parameters
----------
spec_input : str
Filename, usually `[root].spec.fits`
kwargs : dict
Keyword arguments passed to `msaexp.spectrum.read_spectrum`
"""
self.spec_input = spec_input
if isinstance(spec_input, str):
self.file = spec_input
else:
self.file = None
self.spec = read_spectrum(spec_input, **kwargs)
self.spec_wobs = self.spec['wave'].astype(np.float32)
self.spec_R_fwhm = self.spec['R'].astype(np.float32)
self.valid = np.isfinite(self.spec['flux']/self.spec['full_err'])
@property
def meta(self):
return self.spec.meta
def resample_eazy_template(self, template, z=0, scale_disp=1.0, velocity_sigma=100., fnu=True, nsig=4):
"""
Smooth and resample an `eazy.templates.Template` object onto the observed
wavelength grid of a spectrum
Parameters
----------
template : `eazy.templates.Template`
Template object
z : float
Redshift
scale_disp : float
Factor multiplied to the tabulated spectral resolution before sampling
velocity_sigma : float
Gaussian velocity broadening factor, km/s
fnu : bool
Return resampled template in f-nu flux densities
nsig : int
Number of standard deviations to sample for the convolution
Returns
-------
res : array-like
Template flux density smoothed and resampled at the spectrum wavelengths
"""
templ_wobs = template.wave.astype(np.float32)*(1+z)/1.e4
if fnu:
templ_flux = template.flux_fnu(z=z).astype(np.float32)
else:
templ_flux = template.flux_flam(z=z).astype(np.float32)
res = self.resample_func(self.spec_wobs,
self.spec_R_fwhm*scale_disp,
templ_wobs,
templ_flux,
velocity_sigma=velocity_sigma,
nsig=nsig)
return res
def emission_line(self, line_um, line_flux=1, scale_disp=1.0, velocity_sigma=100., nsig=4):
"""
Make an emission line template - *deprecated in favor of*
`~msaexp.spectrum.SpectrumSampler.fast_emission_line`
Parameters
----------
line_um : float
Line center, microns
line_flux : float
Line normalization
scale_disp : float
Factor by which to scale the tabulated resolution FWHM curve
velocity_sigma : float
Velocity sigma width in km/s
nsig : int
Number of sigmas of the convolution kernel to sample
Returns
-------
res : array-like
Gaussian emission line sampled at the spectrum wavelengths
"""
res = self.resample_func(self.spec_wobs,
self.spec_R_fwhm*scale_disp,
self.xline*line_um,
self.yline,
velocity_sigma=velocity_sigma,
nsig=nsig)
return res*line_flux/line_um
def fast_emission_line(self, line_um, line_flux=1, scale_disp=1.0, velocity_sigma=100.):
"""
Make an emission line template with numerically correct pixel integration
function
Parameters
----------
line_um : float
Line center, microns
line_flux : float
Line normalization
scale_disp : float
Factor by which to scale the tabulated resolution FWHM curve
velocity_sigma : float
Velocity sigma width in km/s
Returns
-------
res : array-like
Gaussian emission line sampled at the spectrum wavelengths
"""
res = self.sample_line_func(self.spec_wobs,
self.spec_R_fwhm*scale_disp,
line_um,
line_flux=line_flux,
velocity_sigma=velocity_sigma,
)
return res
def bspline_array(self, nspline=13, log=False, get_matrix=True):
"""
Initialize bspline templates for continuum fits
Parameters
----------
nspline : int
Number of spline functions to sample across the wavelength range
log : bool
Sample in log(wavelength)
get_matrix : bool
If true, return array data. Otherwise, return template objects
Returns
-------
bspl : array-like
bspline data, depending on ``get_matrix``
"""
if get_matrix:
bspl = utils.bspline_templates(wave=self.spec_wobs*1.e4,
degree=3,
df=nspline,
log=log,
get_matrix=get_matrix
)
bspl = bspl.T
else:
bspl = utils.bspline_templates(wave=self.spec_wobs*1.e4,
degree=3,
df=nspline,
log=log,
get_matrix=get_matrix
)
return bspl
def redo_1d_extraction(self, **kwargs):
"""
Redo 1D extraction from 2D arrays with `msaexp.drizzle.make_optimal_extraction`
Parameters
----------
kwargs : dict
Keyword arguments passed to `msaexp.drizzle.make_optimal_extraction`
Returns
-------
output : `~msaexp.spectrum.SpectrumSampler`
A new `~msaexp.spectrum.SpectrumSampler` object
Examples
--------
.. plot::
:include-source:
# Compare 1D extractions
from msaexp import spectrum
import matplotlib.pyplot as plt
sp = spectrum.SpectrumSampler('https://s3.amazonaws.com/msaexp-nirspec/extractions/ceers-ddt-v1/ceers-ddt-v1_prism-clear_2750_1598.spec.fits')
fig, axes = plt.subplots(2,1,figsize=(8,5), sharex=True, sharey=True)
# Boxcar extraction, center pixel +/- 2 pix
ax = axes[0]
new = sp.redo_1d_extraction(ap_radius=2, bkg_offset=-6)
ax.plot(sp['wave'], sp['flux'], alpha=0.5, label='Original optimal extraction')
ax.plot(new['wave'], new['aper_flux'], alpha=0.5, label='Boxcar, y = 23 ยฑ 2')
ax.grid()
ax.legend()
# Extractions above and below the center
ax = axes[1]
low = sp.redo_1d_extraction(ap_center=21, ap_radius=1)
hi = sp.redo_1d_extraction(ap_center=25, ap_radius=1)
ax.plot(low['wave'], low['aper_flux']*1.5, alpha=0.5, label='Below, y = 21 ยฑ 1', color='b')
ax.plot(hi['wave'], hi['aper_flux']*3, alpha=0.5, label='Above, y = 25 ยฑ 1', color='r')
ax.set_xlim(0.9, 5.3)
ax.grid()
ax.legend()
ax.set_xlabel(r'$\lambda$')
for ax in axes:
ax.set_ylabel(r'$\mu\mathrm{Jy}$')
fig.tight_layout(pad=1)
"""
if isinstance(self.spec_input, pyfits.HDUList):
out_hdul = drizzle.extract_from_hdul(self.spec_input, **kwargs)
else:
with pyfits.open(self.file) as hdul:
out_hdul = drizzle.extract_from_hdul(hdul, **kwargs)
output = SpectrumSampler(out_hdul)
return output
def drizzled_hdu_figure(self, **kwargs):
"""
Run `msaexp.utils.drizzled_hdu_figure` on array data
Parameters
----------
kwargs : dict
Keyword arguments passed to `msaexp.utils.drizzled_hdu_figure`
Returns
-------
fig : `~matplotlib.figure.Figure`
Spectrum figure
"""
if isinstance(self.spec_input, pyfits.HDUList):
fig = msautils.drizzled_hdu_figure(self.spec_input, **kwargs)
else:
with pyfits.open(self.file) as hdul:
fig = msautils.drizzled_hdu_figure(hdul, **kwargs)
return fig
def smooth_template_disp_eazy(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):
"""
Smooth a template with a wavelength-dependent dispersion function.
*NB:* Not identical to the preferred
`~msaexp.spectrum.SpectrumSampler.resample_eazy_template`
Parameters
----------
templ : `eazy.template.Template`
Template object
wobs_um : array-like
Target observed-frame wavelengths, microns
disp : table
NIRSpec dispersion table with columns ``WAVELENGTH``, ``R``
z : float
Target redshift
velocity_fwhm : float
Velocity dispersion FWHM, km/s
scale_disp : float
Scale factor applied to ``disp['R']``
flambda : bool
Return smoothed template in units of f_lambda or f_nu.
Returns
-------
tsmooth : array-like
Template convolved with spectral resolution + velocity dispersion.
Same length as `wobs_um`
"""
dv = np.sqrt(velocity_fwhm**2 + (3.e5/disp['R']/scale_disp)**2)
disp_ang = disp['WAVELENGTH']*1.e4
dlam_ang = disp_ang*dv/3.e5/2.35
def _lsf(wave):
return np.interp(wave,
disp_ang,
dlam_ang,
left=dlam_ang[0], right=dlam_ang[-1],
)
if hasattr(wobs_um,'value'):
wobs_ang = wobs_um.value*1.e4
else:
wobs_ang = wobs_um*1.e4
flux_model = templ.to_observed_frame(z=z,
lsf_func=_lsf,
clip_wavelengths=None,
wavelengths=wobs_ang,
smoothspec_kwargs={'fftsmooth':FFTSMOOTH},
)
if flambda:
flux_model = np.squeeze(flux_model.flux_flam())
else:
flux_model = np.squeeze(flux_model.flux_fnu())
return flux_model
def smooth_template_disp_sedpy(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):
"""
Smooth a template with a wavelength-dependent dispersion function using
the `sedpy`/`prospector` LSF smoothing function
Parameters
----------
templ : `eazy.template.Template`
Template object
wobs_um : array-like
Target observed-frame wavelengths, microns
disp : table
NIRSpec dispersion table with columns ``WAVELENGTH``, ``R``
z : float
Target redshift
velocity_fwhm : float
Velocity dispersion FWHM, km/s
scale_disp : float
Scale factor applied to ``disp['R']``
flambda : bool
Return smoothed template in units of f_lambda or f_nu.
Returns
-------
tsmooth : array-like
Template convolved with spectral resolution + velocity dispersion.
Same length as `wobs_um`
"""
from sedpy.smoothing import smoothspec
wobs = templ.wave*(1+z)
trim = (wobs > wobs_um[0]*1.e4*0.95)
trim &= (wobs < wobs_um[-1]*1.e4*1.05)
if flambda:
fobs = templ.flux_flam(z=z)#[wclip]
else:
fobs = templ.flux_fnu(z=z)#[wclip]
if with_igm:
fobs *= templ.igm_absorption(z)
wobs = wobs[trim]
fobs = fobs[trim]
R = np.interp(wobs, disp['WAVELENGTH']*1.e4, disp['R'],
left=disp['R'][0], right=disp['R'][-1])*scale_disp
dv = np.sqrt(velocity_fwhm**2 + (3.e5/R)**2)
dlam_ang = wobs*dv/3.e5/2.35
def _lsf(wave):
return np.interp(wave, wobs, dlam_ang)
tsmooth = smoothspec(wobs, fobs,
smoothtype='lsf', lsf=_lsf,
outwave=wobs_um*1.e4,
fftsmooth=FFTSMOOTH,
)
return tsmooth
def smooth_template_disp(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):
"""
Smooth a template with a wavelength-dependent dispersion function
Parameters
----------
templ : `eazy.template.Template`
Template object
wobs_um : array-like
Target observed-frame wavelengths, microns
disp : table
NIRSpec dispersion table with columns ``WAVELENGTH``, ``R``
z : float
Target redshift
velocity_fwhm : float
Velocity dispersion FWHM, km/s
scale_disp : float
Scale factor applied to ``disp['R']``
flambda : bool
Return smoothed template in units of f_lambda or f_nu.
Returns
-------
tsmooth : array-like
Template convolved with spectral resolution + velocity dispersion.
Same length as `wobs_um`
"""
wobs = templ.wave*(1+z)/1.e4
if flambda:
fobs = templ.flux_flam(z=z)#[wclip]
else:
fobs = templ.flux_fnu(z=z)#[wclip]
if with_igm:
fobs *= templ.igm_absorption(z)
disp_r = np.interp(wobs, disp['WAVELENGTH'], disp['R'])*scale_disp
fwhm_um = np.sqrt((wobs/disp_r)**2 + (velocity_fwhm/3.e5*wobs)**2)
sig_um = np.maximum(fwhm_um/2.35, 0.5*np.gradient(wobs))
x = wobs_um[:,np.newaxis] - wobs[np.newaxis,:]
gaussian_kernel = 1./np.sqrt(2*np.pi*sig_um**2)*np.exp(-x**2/2/sig_um**2)
tsmooth = np.trapz(gaussian_kernel*fobs, x=wobs, axis=1)
return tsmooth
SMOOTH_TEMPLATE_DISP_FUNC = smooth_template_disp_eazy
def fit_redshift(file='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', z0=[0.2, 10], zstep=None, eazy_templates=None, nspline=None, scale_disp=1.3, vel_width=100, Rline=None, is_prism=False, use_full_dispersion=False, ranges=None, sys_err=0.02, **kwargs):
"""
Fit spectrum for the redshift
Parameters
----------
file : str
Spectrum filename
z0 : (float, float)
Redshift range
zstep : (float, float)
Step sizes in `dz/(1+z)`
eazy_templates : list, None
List of `eazy.templates.Template` objects. If not provided, just use
dummy spline continuum and emission line templates
nspline : int
Number of splines to use for dummy continuum
scale_disp : float
Scale factor of nominal dispersion files, i.e., `scale_disp > 1`
*increases* the spectral resolution
vel_width : float
Velocity width the emission line templates
Rline : float
Original spectral resolution used to sample the line templates
is_prism : bool
Is the spectrum from the prism?
use_full_dispersion : bool
Convolve `eazy_templates` with the full wavelength-dependent
dispersion function
ranges : list of tuples
Wavelength ranges for the subplots
sys_err : float
Systematic uncertainty added in quadrature with nominal uncertainties
Returns
-------
fig : Figure
Diagnostic figure
sp : `~astropy.table.Table`
A copy of the 1D spectrum as fit with additional columns describing the
best-fit templates
data : dict
Fit metadata
"""
import yaml
def float_representer(dumper, value):
text = '{0:.6f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
#is_prism |= ('clear' in file)
spec = read_spectrum(file, sys_err=sys_err, **kwargs)
is_prism |= spec.grating in ['prism']
if 'spec.fits' in file:
froot = file.split('.spec.fits')[0]
else:
froot = file.split('.fits')[0]
if zstep is None:
if (is_prism):
step0 = 0.002
step1 = 0.0001
else:
step0 = 0.001
step1 = 0.00002
else:
step0, step1 = zstep
if Rline is None:
if is_prism:
Rline = 1000
else:
Rline = 5000
# First pass
zgrid = utils.log_zgrid(z0, step0)
zg0, chi0 = fit_redshift_grid(file, zgrid=zgrid,
line_complexes=False,
vel_width=vel_width,
scale_disp=scale_disp,
eazy_templates=eazy_templates,
Rline=Rline,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
zbest0 = zg0[np.argmin(chi0)]
# Second pass
zgrid = utils.log_zgrid(zbest0 + np.array([-0.005, 0.005])*(1+zbest0),
step1)
zg1, chi1 = fit_redshift_grid(file, zgrid=zgrid,
line_complexes=False,
vel_width=vel_width,
scale_disp=scale_disp,
eazy_templates=eazy_templates,
Rline=Rline,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
zbest = zg1[np.argmin(chi1)]
fz, az = plt.subplots(1,1,figsize=(6,4))
az.plot(zg0, chi0)
az.plot(zg1, chi1)
az.set_ylim(chi1.min()-50, chi1.min() + 10**2)
az.grid()
az.set_xlabel('redshift')
az.set_ylabel(r'$\chi^2$')
az.set_title(os.path.basename(file))
fz.tight_layout(pad=1)
fz.savefig(froot+'.chi2.png')
if is_prism:
if ranges is None:
ranges = [(3427, 5308), (6250, 9700)]
if nspline is None:
nspline = 41
else:
if ranges is None:
ranges = [(3680, 4400), (4861-50, 5008+50), (6490, 6760)]
if nspline is None:
nspline = 23
fig, sp, data = plot_spectrum(file, z=zbest, show_cont=True,
draws=100, nspline=nspline,
figsize=(16, 8), vel_width=vel_width,
ranges=ranges, Rline=Rline,
scale_disp=scale_disp,
eazy_templates=eazy_templates,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
if eazy_templates is not None:
spl_fig, sp2, spl_data = plot_spectrum(file, z=zbest, show_cont=True,
draws=100, nspline=nspline,
figsize=(16, 8), vel_width=vel_width,
ranges=ranges, Rline=Rline,
scale_disp=scale_disp,
eazy_templates=None,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
for k in ['coeffs', 'covar', 'model', 'mline', 'fullchi2', 'contchi2']:
if k in spl_data:
data[f'spl_{k}'] = spl_data[k]
spl_fig.savefig(froot+'.spl.png')
sp['spl_model'] = sp2['model']
sp['wave'].unit = u.micron
sp['flux'].unit = u.microJansky
sp.write(froot+'.spec.zfit.fits', overwrite=True)
zdata = {}
zdata['zg0'] = zg0.tolist()
zdata['chi0'] = chi0.tolist()
zdata['zg1'] = zg1.tolist()
zdata['chi1'] = chi1.tolist()
data['dchi2'] = float(np.nanmedian(chi0) - np.nanmin(chi0))
for k in ['templates','spl_covar','covar']:
if k in data:
_ = data.pop(k)
with open(froot+'.zfit.yaml', 'w') as fp:
yaml.dump(zdata, stream=fp)
with open(froot+'.yaml', 'w') as fp:
yaml.dump(data, stream=fp)
fig.savefig(froot+'.zfit.png')
return fig, sp, data
H_RECOMBINATION_LINES = ['Ha+NII', 'Ha','Hb','Hg','Hd',
'PaA','PaB','PaG','PaD','Pa8',
'BrA','BrB','BrG','BrD']
def make_templates(sampler, z, bspl={}, eazy_templates=None, vel_width=100, broad_width=4000, broad_lines=[], scale_disp=1.3, use_full_dispersion=False, disp=None, grating='prism', halpha_prism=['Ha+NII'], oiii=['OIII'], o4363=[], sii=['SII'], lorentz=False, with_pah=True, **kwargs):
"""
Generate fitting templates
wobs : array
Observed-frame wavelengths of the spectrum to fit, microns
z : float
Redshift
bspl : dict
Spline templates for dummy continuum
eazy_templates : list
Optional list of `eazy.templates.Template` template objects to use in
place of the spline + line templates
vel_width : float
Velocity width of the individual emission line templates
halpha_prism : ['Ha+NII'], ['Ha','NII']
Line template names to use for Halpha and [NII], i.e., ``['Ha+NII']``
fits with a fixed line ratio and `['Ha','NII']` fits them separately
but with a fixed line ratio 6548:6584 = 1:3
oiii : ['OIII'], ['OIII-4959','OIII-5007']
Similar for [OIII]4959+5007, ``['OIII']`` fits as a doublet with fixed
ratio 4959:5007 = 1:2.98 and ``['OIII-4949', 'OIII-5007']`` fits them
independently.
o4363 : [] or ['OIII-4363']
How to fit [OIII]4363.
sii : ['SII'], ['SII-6717','SII-6731']
[SII] doublet
lorentz : bool
Use Lorentzian profile for lines
Returns
-------
templates : list
List of the computed template objects
tline : array
Boolean list of which templates are line components
_A : (NT, NWAVE) array
Design matrix of templates interpolated at `wobs`
"""
from grizli import utils
wobs = sampler.spec_wobs
wrest = wobs/(1+z)*1.e4
wmask = sampler.valid
wmin = wobs[wmask].min()
wmax = wobs[wmask].max()
templates = []
tline = []
if eazy_templates is None:
lw, lr = utils.get_line_wavelengths()
_A = [bspl*1]
for i in range(bspl.shape[0]):
templates.append(f'spl {i}')
tline.append(False)
#templates = {}
#for k in bspl:
# templates[k] = bspl[k]
# templates = {}
if grating in ['prism']:
hlines = ['Hb', 'Hg', 'Hd']
if z > 4:
oiii = ['OIII-4959','OIII-5007']
hene = ['HeII-4687', 'NeIII-3867','HeI-3889']
o4363 = ['OIII-4363']
else:
#oiii = ['OIII']
hene = ['HeI-3889']
#o4363 = []
#sii = ['SII']
#sii = ['SII-6717', 'SII-6731']
hlines += halpha_prism + ['NeIII-3968']
fuv = ['OIII-1663']
oii_7320 = ['OII-7325']
extra = []
else:
hlines = ['Hb', 'Hg', 'Hd','H8','H9', 'H10', 'H11', 'H12']
hene = ['HeII-4687', 'NeIII-3867']
o4363 = ['OIII-4363']
oiii = ['OIII-4959','OIII-5007']
sii = ['SII-6717', 'SII-6731']
hlines += ['Ha', 'NII-6549', 'NII-6584']
hlines += ['H7', 'NeIII-3968']
fuv = ['OIII-1663', 'HeII-1640', 'CIV-1549']
oii_7320 = ['OII-7323', 'OII-7332']
extra = ['HeI-6680', 'SIII-6314']
line_names = []
line_waves = []
for l in [*hlines, *oiii, *o4363, 'OII',
*hene,
*sii,
*oii_7320,
'ArIII-7138', 'ArIII-7753', 'SIII-9068', 'SIII-9531',
'OI-6302', 'PaD', 'PaG', 'PaB', 'PaA', 'HeI-1083',
'BrA','BrB','BrG','BrD','PfB','PfG','PfD','PfE',
'Pa8','Pa9','Pa10',
'HeI-5877',
*fuv,
'CIII-1906', 'NIII-1750', 'Lya',
'MgII', 'NeV-3346', 'NeVI-3426',
'HeI-7065', 'HeI-8446',
*extra
]:
if l not in lw:
continue
lwi = lw[l][0]*(1+z)
if lwi < wmin*1.e4:
continue
if lwi > wmax*1.e4:
continue
line_names.append(l)
line_waves.append(lwi)
so = np.argsort(line_waves)
line_waves = np.array(line_waves)[so]
for iline in so:
l = line_names[iline]
lwi = lw[l][0]*(1+z)
if lwi < wmin*1.e4:
continue
if lwi > wmax*1.e4:
continue
# print(l, lwi, disp_r)
name = f'line {l}'
for i, (lwi0, lri) in enumerate(zip(lw[l], lr[l])):
lwi = lwi0*(1+z)/1.e4
if l in broad_lines:
vel_i = broad_width
else:
vel_i = vel_width
line_i = sampler.fast_emission_line(lwi,
line_flux=lri/np.sum(lr[l]),
scale_disp=scale_disp,
velocity_sigma=vel_i,)
if i == 0:
line_0 = line_i
else:
line_0 += line_i
_A.append(line_0/1.e4)
templates.append(name)
tline.append(True)
if with_pah:
xpah = 3.3*(1+z)
if ((xpah > wmin) & (xpah < wmax)) | (0):
for t in PAH_TEMPLATES:
tp = PAH_TEMPLATES[t]
tflam = sampler.resample_eazy_template(tp,
z=z,
velocity_sigma=vel_width,
scale_disp=scale_disp,
fnu=False)
_A.append(tflam)
templates.append(t)
tline.append(True)
_A = np.vstack(_A)
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
else:
if isinstance(eazy_templates[0], dict) & (len(eazy_templates) == 2):
# lw, lr dicts
lw, lr = eazy_templates
_A = [bspl*1]
for i in range(bspl.shape[0]):
templates.append(f'spl {i}')
tline.append(False)
for l in lw:
name = f'line {l}'
line_0 = None
for i, (lwi0, lri) in enumerate(zip(lw[l], lr[l])):
lwi = lwi0*(1+z)/1.e4
if lwi < wmin:
continue
elif lwi > wmax:
continue
if l in broad_lines:
vel_i = broad_width
else:
vel_i = vel_width
line_i = sampler.fast_emission_line(lwi,
line_flux=lri/np.sum(lr[l]),
scale_disp=scale_disp,
velocity_sigma=vel_i,)
if line_0 is None:
line_0 = line_i
else:
line_0 += line_i
if line_0 is not None:
_A.append(line_0/1.e4)
templates.append(name)
tline.append(True)
_A = np.vstack(_A)
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
elif len(eazy_templates) == 1:
# Scale single template by spline
t = eazy_templates[0]
for i in range(bspl.shape[0]):
templates.append(f'{t.name} spl {i}')
tline.append(False)
tflam = sampler.resample_eazy_template(t,
z=z,
velocity_sigma=vel_width,
scale_disp=scale_disp,
fnu=False)
_A = np.vstack([bspl*tflam])
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
else:
templates = []
tline = []
_A = []
for i, t in enumerate(eazy_templates):
tflam = sampler.resample_eazy_template(t,
z=z,
velocity_sigma=vel_width,
scale_disp=scale_disp,
fnu=False)
_A.append(tflam)
templates.append(t.name)
tline.append(False)
_A = np.vstack(_A)
return templates, np.array(tline), _A
def old_make_templates(wobs, z, wfull, wmask=None, bspl={}, eazy_templates=None, vel_width=100, broad_width=4000, broad_lines=[], scale_disp=1.3, use_full_dispersion=False, disp=None, grating='prism', halpha_prism=['Ha+NII'], oiii=['OIII'], o4363=[], sii=['SII'], lorentz=False, **kwargs):
"""
Generate fitting templates
wobs : array
Observed-frame wavelengths of the spectrum to fit, microns
z : float
Redshift
wfull : array
Full wavelength array of the templates
wmask : array-like
Boolean mask on `wobs` for valid data
bspl : dict
Spline templates for dummy continuum
eazy_templates : list
Optional list of `eazy.templates.Template` template objects to use in
place of the spline + line templates
vel_width : float
Velocity width of the individual emission line templates
halpha_prism : ['Ha+NII'], ['Ha','NII']
Line template names to use for Halpha and [NII], i.e., ``['Ha+NII']``
fits with a fixed line ratio and `['Ha','NII']` fits them separately
but with a fixed line ratio 6548:6584 = 1:3
oiii : ['OIII'], ['OIII-4959','OIII-5007']
Similar for [OIII]4959+5007, ``['OIII']`` fits as a doublet with fixed
ratio 4959:5007 = 1:2.98 and ``['OIII-4949', 'OIII-5007']`` fits them
independently.
o4363 : [] or ['OIII-4363']
How to fit [OIII]4363.
sii : ['SII'], ['SII-6717','SII-6731']
[SII] doublet
lorentz : bool
Use Lorentzian profile for lines
Returns
-------
templates : list
List of the computed template objects
tline : array
Boolean list of which templates are line components
_A : (NT, NWAVE) array
Design matrix of templates interpolated at `wobs`
"""
from grizli import utils
lw, lr = utils.get_line_wavelengths()
wrest = wobs/(1+z)*1.e4
if wmask is None:
wmask = np.isfinite(wobs)
wmin = wobs[wmask].min()
wmax = wobs[wmask].max()
if eazy_templates is None:
templates = {}
for k in bspl:
templates[k] = bspl[k]
# templates = {}
if grating in ['prism']:
hlines = ['Hb', 'Hg', 'Hd']
if z > 4:
oiii = ['OIII-4959','OIII-5007']
hene = ['HeII-4687', 'NeIII-3867','HeI-3889']
o4363 = ['OIII-4363']
else:
#oiii = ['OIII']
hene = ['HeI-3889']
#o4363 = []
#sii = ['SII']
#sii = ['SII-6717', 'SII-6731']
hlines += halpha_prism + ['NeIII-3968']
fuv = ['OIII-1663']
oii_7320 = ['OII-7325']
extra = []
else:
hlines = ['Hb', 'Hg', 'Hd','H8','H9', 'H10', 'H11', 'H12']
hene = ['HeII-4687', 'NeIII-3867']
o4363 = ['OIII-4363']
oiii = ['OIII-4959','OIII-5007']
sii = ['SII-6717', 'SII-6731']
hlines += ['Ha', 'NII-6549', 'NII-6584']
hlines += ['H7', 'NeIII-3968']
fuv = ['OIII-1663', 'HeII-1640', 'CIV-1549']
oii_7320 = ['OII-7323', 'OII-7332']
extra = ['HeI-6680', 'SIII-6314']
for l in [*hlines, *oiii, *o4363, 'OII',
*hene,
*sii,
*oii_7320,
'ArIII-7138', 'ArIII-7753', 'SIII-9068', 'SIII-9531',
'OI-6302', 'PaD', 'PaG', 'PaB', 'PaA', 'HeI-1083',
'BrA','BrB','BrG','BrD','PfB','PfG','PfD','PfE',
'Pa8','Pa9','Pa10',
'HeI-5877',
*fuv,
'CIII-1906', 'NIII-1750', 'Lya',
'MgII', 'NeV-3346', 'NeVI-3426',
'HeI-7065', 'HeI-8446',
*extra
]:
if l not in lw:
continue
lwi = lw[l][0]*(1+z)
if lwi < wmin*1.e4:
continue
if lwi > wmax*1.e4:
continue
# print(l, lwi, disp_r)
name = f'line {l}'
for i, (lwi0, lri) in enumerate(zip(lw[l], lr[l])):
lwi = lwi0*(1+z)
disp_r = np.interp(lwi/1.e4, disp['WAVELENGTH'],
disp['R'])*scale_disp
if l in broad_lines:
vel_i = broad_width
else:
vel_i = vel_width
fwhm_ang = np.sqrt((lwi/disp_r)**2 + (vel_i/3.e5*lwi)**2)
# print(f'Add component: {l} {lwi0} {lri}')
if i == 0:
templates[name] = utils.SpectrumTemplate(wave=wfull,
flux=None,
central_wave=lwi,
fwhm=fwhm_ang,
name=name,
lorentz=lorentz)
templates[name].flux *= lri/np.sum(lr[l])
else:
templates[name].flux += utils.SpectrumTemplate(wave=wfull,
flux=None,
central_wave=lwi,
fwhm=fwhm_ang,
lorentz=lorentz,
name=name).flux*lri/np.sum(lr[l])
_, _A, tline = utils.array_templates(templates,
max_R=10000,
wave=wobs.astype(float)*1.e4,
apply_igm=False)
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
else:
templates = {}
if use_full_dispersion:
_A = []
tline = np.zeros(len(eazy_templates), dtype=bool)
for i, t in enumerate(eazy_templates):
templates[t.name] = 0.
tflam = SMOOTH_TEMPLATE_DISP_FUNC(t,
wobs,
disp,
z,
velocity_fwhm=vel_width,
scale_disp=scale_disp,
flambda=True)
_A.append(tflam)
tline[i] = t.name.startswith('line ')
_A = np.array(_A)
else:
for t in eazy_templates:
tflam = t.flux_flam(z=z)
templates[t.name] = utils.SpectrumTemplate(wave=t.wave,
flux=tflam, name=t.name)
# ToDo: smooth with dispersion
_, _A, tline = utils.array_templates(templates,
max_R=10000,
wave=wrest,
z=z, apply_igm=True)
for i in range(len(templates)):
_A[i,:] = nd.gaussian_filter(_A[i,:], 0.5)
return templates, tline, _A
def fit_redshift_grid(file='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', zgrid=None, vel_width=100, bkg=None, scale_disp=1.3, nspline=27, line_complexes=True, Rline=1000, eazy_templates=None, use_full_dispersion=True, sys_err=0.02, use_aper_columns=False, **kwargs):
"""
Fit redshifts on a grid
Parameters
----------
zgrid : array
Redshifts to fit
others : see `msaexp.spectrum.fit_redshift`
Returns
-------
zgrid : array
Copy of `zgrid`
chi2 : array
Chi-squared of the template fits at redshifts from `zgrid`
"""
import time
import os
from tqdm import tqdm
import astropy.io.fits as pyfits
import numpy as np
from grizli import utils
import grizli.utils_c
import astropy.units as u
import eazy.igm
import matplotlib.pyplot as plt
#spec = read_spectrum(file, sys_err=sys_err)
sampler = SpectrumSampler(file, **kwargs)
spec = sampler.spec
if (use_aper_columns > 0) & ('aper_flux' in spec.colnames):
if ('aper_corr' in spec.colnames) & (use_aper_columns > 1):
ap_corr = spec['aper_corr']*1
else:
ap_corr = 1
flam = spec['aper_flux']*spec['to_flam']*ap_corr
eflam = spec['aper_full_err']*spec['to_flam']*ap_corr
else:
flam = spec['flux']*spec['to_flam']
eflam = spec['full_err']*spec['to_flam']
wobs = spec['wave']
mask = spec['valid']
flam[~mask] = np.nan
eflam[~mask] = np.nan
#spline = utils.bspline_templates(wave=spec['wave']*1.e4, degree=3, df=nspline)
bspl = sampler.bspline_array(nspline=nspline, get_matrix=True)
chi2 = zgrid*0.
#bspl = utils.bspline_templates(wave=spec['wave']*1.e4, degree=3, df=nspline) #, log=True)
# w0 = utils.log_zgrid([spec['wave'].min()*1.e4,
# spec['wave'].max()*1.e4], 1./Rline)
for iz, z in tqdm(enumerate(zgrid)):
templates, tline, _A = make_templates(sampler, z,
bspl=bspl,
eazy_templates=eazy_templates,
vel_width=vel_width,
scale_disp=scale_disp,
use_full_dispersion=use_full_dispersion,
disp=spec.disp,
grating=spec.grating,
**kwargs,
)
okt = _A[:,mask].sum(axis=1) > 0
_Ax = _A[okt,:]/eflam
_yx = flam/eflam
if eazy_templates is None:
_x = np.linalg.lstsq(_Ax[:,mask].T,
_yx[mask], rcond=None)
else:
_x = nnls(_Ax[:,mask].T, _yx[mask])
coeffs = np.zeros(_A.shape[0])
coeffs[okt] = _x[0]
_model = _A.T.dot(coeffs)
chi = (flam - _model) / eflam
chi2_i = (chi[mask]**2).sum()
# print(z, chi2_i)
chi2[iz] = chi2_i
return zgrid, chi2
def calc_uncertainty_scale(file=None, data=None, method='bfgs', order=3, update=True, verbose=True, init=(1, 3), **kwargs):
"""
Compute a polynomial scaling of the spectrum uncertainties. The procedure is to fit for
coefficients of a polynomial multiplied to the `err` array of the spectrum such that
`(flux - model)/(err*scl)` residuals are `N(0,1)`
Parameters
----------
file : str
Spectrum filename
data : tuple
Precomputed outputs from `msaexp.spectrum.plot_spectrum`
method : str
Optimization method for `scipy.optimize.minimize`
order : int
Degree of the correction polynomial
update : bool
Update the global `msaexp.spectrum.SCALE_UNCERTAINTY` array with the fit result
verbose : bool
Print status messages
init : (float, float)
Masking for the fit initialization
kwargs : dict
Keyword arguments for `msaexp.spectrum.plot_spectrum` if `data` not specified
Returns
-------
spec : `~astropy.table.Table`
The spectrum as fit
escale : array
The wavelength-dependent scaling of the uncertainties
res : object
Output from `scipy.optimize.minimize`
"""
from scipy.stats import norm
from scipy.optimize import minimize
global SCALE_UNCERTAINTY
SCALE_UNCERTAINTY = 1.0
if data is None:
spec, spl, _ = plot_spectrum(file=file, eazy_templates=None,
get_spl_templates=True,
**kwargs
)
else:
spec, spl = data
ok = (spec['err'] > 0) & (spec['flux'] != 0)
ok &= np.isfinite(spec['err']+spec['flux'])
if init is not None:
err = init[0]*spec['err']
if 'escale' in spec.colnames:
err *= spec['escale']
err = np.sqrt(err**2 + (0.02*spec['flux'])**2)
_Ax = spl/err
_yx = spec['flux']/err
_x = np.linalg.lstsq(_Ax[:,ok].T, _yx[ok], rcond=None)
_model = spl.T.dot(_x[0])
ok &= np.abs((spec['flux']-_model)/err) < init[1]
def objfun_scale_uncertainties(c):
err = 10**np.polyval(c, spec['wave'])*spec['err']
if 'escale' in spec.colnames:
err *= spec['escale']
err = np.sqrt(err**2 + (0.02*spec['flux'])**2)
_Ax = spl/err
_yx = spec['flux']/err
_x = np.linalg.lstsq(_Ax[:,ok].T, _yx[ok], rcond=None)
_model = spl.T.dot(_x[0])
lnp = norm.logpdf((spec['flux']-_model)[ok],
loc=_model[ok]*0.,
scale=err[ok]).sum()
if verbose > 1:
print(c, lnp)
return -lnp/2.
# objfun_scale_uncertainties([0.0])
c0 = np.zeros(order+1)
#c0[-1] = np.log10(3)
res = minimize(objfun_scale_uncertainties, c0, method=method)
if update:
if verbose:
print('Set SCALE_UNCERTAINTY: ', res.x)
SCALE_UNCERTAINTY = res.x
return spec, 10**np.polyval(res.x, spec['wave']), res
def setup_spectrum(file, **kwargs):
"""
Deprecated, use `msaexp.spectrum.read_spectrum`
"""
return read_spectrum(file, **kwargs)
def read_spectrum(inp, spectrum_extension='SPEC1D', sys_err=0.02, err_mask=(10,0.5), err_median_filter=[11, 0.8], **kwargs):
"""
Read a spectrum and apply flux and/or uncertainty scaling
Flux scaling `corr` is applied if there are `POLY[i]` keywords in the spectrum
metadata, with
.. code-block:: python
:dedent:
>>> coeffs = [header[f'POLY{i}'] for i in range(order+1)]
>>> corr = np.polyval(coeffs, np.log(spec['wave']*1.e4))
Parameters
----------
inp : str or `~astropy.io.fits.HDUList`
Fits filename of a file that includes a `~astropy.io.fits.BinTableHDU` table of
an extracted spectrum. Alternatively, can be an `~astropy.io.fits.HDUList`
itself
spectrum_extension : str
Extension name of 1D spectrum in file or HDUList input
sys_err : float
Systematic uncertainty added in quadrature with `err` array
err_mask : float, float or None
Mask pixels where ``err < np.percentile(err[err > 0], err_mask[0])*err_mask[1]``
err_median_filter : int, float or None
Mask pixels where
``err < nd.median_filter(err, err_median_filter[0])*err_median_filter[1]``
Returns
-------
spec : `~astropy.table.Table`
Spectrum table. Existing columns in `file` should be
- ``wave`` : observed-frame wavelength, microns
- ``flux`` : flux density, `~astropy.units.microJansky`
- ``err`` : Uncertainty on ```flux```
Columns calculated here are
- ``corr`` : flux scaling
- ``escale`` : extra scaling of uncertainties
- ``full_err`` : Full uncertainty including `sys_err`
- ``R`` : spectral resolution
- ``valid`` : Data are valid
"""
global SCALE_UNCERTAINTY
import scipy.ndimage as nd
if isinstance(inp, str):
if 'fits' in inp:
with pyfits.open(inp) as hdul:
if spectrum_extension in hdul:
spec = utils.read_catalog(hdul[spectrum_extension])
else:
spec = utils.read_catalog(inp)
else:
spec = utils.read_catalog(inp)
elif isinstance(inp, pyfits.HDUList):
if spectrum_extension in inp:
spec = utils.read_catalog(inp[spectrum_extension])
else:
msg = f'{spectrum_extension} extension not found in HDUList input'
raise ValueError(msg)
else:
spec = utils.read_catalog(inp)
if 'POLY0' in spec.meta:
pc = []
for pi in range(10):
if f'POLY{pi}' in spec.meta:
pc.append(spec.meta[f'POLY{pi}'])
corr = np.polyval(pc, np.log(spec['wave']*1.e4))
spec['flux'] *= corr
spec['err'] *= corr
spec['corr'] = corr
else:
spec['corr'] = 1.
if 'escale' not in spec.colnames:
if hasattr(SCALE_UNCERTAINTY,'__len__'):
if len(SCALE_UNCERTAINTY) < 6:
spec['escale'] = 10**np.polyval(SCALE_UNCERTAINTY, spec['wave'])
elif len(SCALE_UNCERTAINTY) == len(spec):
spec['escale'] = SCALE_UNCERTAINTY
else:
spec['escale'] = SCALE_UNCERTAINTY
# print('xx scale scalar', SCALE_UNCERTAINTY)
for c in ['flux','err']:
if hasattr(spec[c], 'filled'):
spec[c] = spec[c].filled(0)
valid = np.isfinite(spec['flux']+spec['err'])
valid &= spec['err'] > 0
valid &= spec['flux'] != 0
if (valid.sum() > 0) & (err_mask is not None):
_min_err = np.nanpercentile(spec['err'][valid], err_mask[0])*err_mask[1]
valid &= spec['err'] > _min_err
if err_median_filter is not None:
med = nd.median_filter(spec['err'][valid], err_median_filter[0])
medi = np.interp(spec['wave'], spec['wave'][valid], med, left=0, right=0)
valid &= spec['err'] > err_median_filter[1]*medi
spec['full_err'] = np.sqrt((spec['err']*spec['escale'])**2 +
(sys_err*spec['flux'])**2)
if 'aper_err' in spec.colnames:
spec['aper_full_err'] = np.sqrt((spec['aper_err']*spec['escale'])**2 +
(sys_err*spec['aper_flux'])**2)
spec.meta['sys_err'] = sys_err
spec['full_err'][~valid] = 0
spec['flux'][~valid] = 0.
spec['err'][~valid] = 0.
spec['valid'] = valid
grating = spec.meta['GRATING'].lower()
_filter = spec.meta['FILTER'].lower()
_data_path = os.path.dirname(__file__)
disp = utils.read_catalog(f'{_data_path}/data/jwst_nirspec_{grating}_disp.fits')
spec.disp = disp
spec['R'] = np.interp(spec['wave'], disp['WAVELENGTH'], disp['R'],
left=disp['R'][0], right=disp['R'][-1])
spec.grating = grating
spec.filter = _filter
flam_unit = 1.e-20*u.erg/u.second/u.cm**2/u.Angstrom
um = spec['wave'].unit
if um is None:
um = u.micron
spec.equiv = u.spectral_density(spec['wave'].data*um)
spec['to_flam'] = (1*spec['flux'].unit).to(flam_unit, equivalencies=spec.equiv).value
spec.meta['flamunit'] = flam_unit.unit
spec.meta['fluxunit'] = spec['flux'].unit
spec.meta['waveunit'] = spec['wave'].unit
spec['wave'] = spec['wave'].value
spec['flux'] = spec['flux'].value
spec['err'] = spec['err'].value
return spec
def plot_spectrum(inp='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', z=9.505, vel_width=100, bkg=None, scale_disp=1.3, nspline=27, show_cont=True, draws=100, figsize=(16, 8), ranges=[(3650, 4980)], Rline=1000, full_log=False, write=False, eazy_templates=None, use_full_dispersion=True, get_spl_templates=False, scale_uncertainty_kwargs=None, plot_unit=None, spline_single=True, sys_err=0.02, return_fit_results=False, use_aper_columns=False, label=None, **kwargs):
"""
Make a diagnostic figure
Parameters
----------
...
return_fit_results : bool
Just return the fit results -
``templates, coeffs, flam, eflam, _model, mask, full_chi2``
"""
global SCALE_UNCERTAINTY
lw, lr = utils.get_line_wavelengths()
if isinstance(inp, str):
sampler = SpectrumSampler(inp, **kwargs)
file = inp
elif isinstance(inp, pyfits.HDUList):
sampler = SpectrumSampler(inp, **kwargs)
file = None
else:
file = None
sampler = inp
if (label is None) & (file is not None):
label = os.path.basename(file)
spec = sampler.spec
if (use_aper_columns > 0) & ('aper_flux' in spec.colnames):
if ('aper_corr' in spec.colnames) & (use_aper_columns > 1):
ap_corr = spec['aper_corr']*1
else:
ap_corr = 1
flam = spec['aper_flux']*spec['to_flam']*ap_corr
eflam = spec['aper_full_err']*spec['to_flam']*ap_corr
else:
flam = spec['flux']*spec['to_flam']
eflam = spec['full_err']*spec['to_flam']
wrest = spec['wave']/(1+z)*1.e4
wobs = spec['wave']
mask = spec['valid']
flam[~mask] = np.nan
eflam[~mask] = np.nan
bspl = sampler.bspline_array(nspline=nspline, get_matrix=True)
# bspl = utils.bspline_templates(wave=spec['wave']*1.e4,
# degree=3,
# df=nspline)
w0 = utils.log_zgrid([spec['wave'].min()*1.e4,
spec['wave'].max()*1.e4], 1./Rline)
templates, tline, _A = make_templates(sampler, z,
bspl=bspl,
eazy_templates=eazy_templates,
vel_width=vel_width,
scale_disp=scale_disp,
use_full_dispersion=use_full_dispersion,
disp=spec.disp,
grating=spec.grating,
**kwargs,
)
if scale_uncertainty_kwargs is not None:
_, escl, _ = calc_uncertainty_scale(file=None,
data=(spec, _A),
**scale_uncertainty_kwargs)
eflam *= escl
spec['escale'] *= escl
okt = _A[:,mask].sum(axis=1) > 0
_Ax = _A[okt,:]/eflam
_yx = flam/eflam
if eazy_templates is None:
_x = np.linalg.lstsq(_Ax[:,mask].T,
_yx[mask], rcond=None)
else:
_x = nnls(_Ax[:,mask].T, _yx[mask])
coeffs = np.zeros(_A.shape[0])
coeffs[okt] = _x[0]
_model = _A.T.dot(coeffs)
_mline = _A.T.dot(coeffs*tline)
_mcont = _model - _mline
full_chi2 = ((flam - _model)**2/eflam**2)[mask].sum()
cont_chi2 = ((flam - _mcont)**2/eflam**2)[mask].sum()
if return_fit_results:
return templates, coeffs, flam, eflam, _model, mask, full_chi2
try:
oktemp = okt & (coeffs != 0)
AxT = (_A[oktemp,:]/eflam)[:,mask].T
covar_i = utils.safe_invert(np.dot(AxT.T, AxT))
covar = utils.fill_masked_covar(covar_i, oktemp)
covard = np.sqrt(covar.diagonal())
has_covar = True
except:
has_covar = False
covard = coeffs*0.
N = len(templates)
covar = np.eye(N, N)
print(f'\n# line flux err\n# flux x 10^-20 erg/s/cm2')
if label is not None:
print(f'# {label}')
print(f'# z = {z:.5f}\n# {time.ctime()}')
cdict = {}
eqwidth = {}
for i, t in enumerate(templates):
cdict[t] = [float(coeffs[i]), float(covard[i])]
if t.startswith('line '):
lk = t.split()[-1]
# Equivalent width:
# coeffs, line fluxes are in units of 1e-20 erg/s/cm2
# _mcont, continuum model is in units of 1-e20 erg/s/cm2/A
# so observed-frame equivalent width is roughly
# eqwi = coeffs[i] / _mcont[ wave_obs[i] ]
if lk in lw:
lwi = lw[lk][0]*(1+z)/1.e4
continuum_i = np.interp(lwi, spec['wave'], _mcont)
eqwi = coeffs[i]/continuum_i
else:
eqwi = np.nan
eqwidth[t] = eqwi
print(f'{t:>20} {coeffs[i]:8.1f} ยฑ {covard[i]:8.1f} (EW={eqwi:9.1f})')
if 'srcra' not in spec.meta:
spec.meta['srcra'] = 0.0
spec.meta['srcdec'] = 0.0
spec.meta['srcname'] = 'unknown'
spec['model'] = _model/spec['to_flam']
spec['mline'] = _mline/spec['to_flam']
data = {'z': float(z),
'file':file,
'label':label,
'ra': float(spec.meta['srcra']),
'dec': float(spec.meta['srcdec']),
'name': str(spec.meta['srcname']),
'wmin':float(spec['wave'][mask].min()),
'wmax':float(spec['wave'][mask].max()),
'coeffs':cdict,
'covar':covar.tolist(),
'wave': [float(m) for m in spec['wave']],
'flux': [float(m) for m in spec['flux']],
'err': [float(m) for m in spec['err']],
'escale': [float(m) for m in spec['escale']],
'model': [float(m) for m in _model/spec['to_flam']],
'mline':[float(m) for m in _mline/spec['to_flam']],
'templates':templates,
'dof': int(mask.sum()),
'fullchi2': float(full_chi2),
'contchi2': float(cont_chi2),
'eqwidth': eqwidth,
}
for k in ['z','wmin','wmax','dof','fullchi2','contchi2']:
spec.meta[k] = data[k]
#fig, axes = plt.subplots(len(ranges)+1,1,figsize=figsize)
if len(ranges) > 0:
fig = plt.figure(figsize=figsize, constrained_layout=True)
gs = GridSpec(2, len(ranges), figure=fig)
axes = []
for i, _ra in enumerate(ranges):
axes.append(fig.add_subplot(gs[0,i]))
axes.append(fig.add_subplot(gs[1,:]))
else:
fig, ax = plt.subplots(1,1,figsize=figsize)
axes = [ax]
_Acont = (_A.T*coeffs)[mask,:][:,:nspline]
_Acont[_Acont < 0.001*_Acont.max()] = np.nan
if (draws is not None) & has_covar:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mu = np.random.multivariate_normal(coeffs[oktemp], covar_i, size=draws)
#print('draws', draws, mu.shape, _A.shape)
mdraws = _A[oktemp,:].T.dot(mu.T)
else:
mdraws = None
if plot_unit is not None:
unit_conv = (1*spec.meta['flamunit']).to(plot_unit,
equivalencies=spec.equiv).value
else:
unit_conv = np.ones(len(wobs))
for ax in axes:
if 1:
ax.errorbar(wobs, flam*unit_conv, eflam*unit_conv,
marker='None', linestyle='None',
alpha=0.5, color='k', ecolor='k', zorder=100)
ax.step(wobs, flam*unit_conv, color='k', where='mid', lw=1, alpha=0.8)
# ax.set_xlim(3500, 5100)
#ax.plot(_[1]['templz']/(1+z), _[1]['templf'])
ax.step(wobs[mask], (_mcont*unit_conv)[mask],
color='pink', alpha=0.8, where='mid')
ax.step(wobs[mask], (_model*unit_conv)[mask],
color='r', alpha=0.8, where='mid')
cc = utils.MPL_COLORS
for w, c in zip([3727, 4980, 6565, 9070, 9530, 1.094e4, 1.282e4,
1.875e4],
[cc['purple'], cc['b'], cc['g'], 'darkred', 'darkred',
cc['pink'], cc['pink'], cc['pink']]):
wz = w*(1+z)/1.e4
dw = 70*(1+z)/1.e4
ax.fill_between([wz-dw, wz+dw], [0,0], [100,100],
color=c, alpha=0.07, zorder=-100)
if mdraws is not None:
ax.step(wobs[mask], (mdraws.T*unit_conv).T[mask,:],
color='r', alpha=np.maximum(1./draws, 0.02), zorder=-100, where='mid')
if show_cont:
ax.plot(wobs[mask], (_Acont.T*unit_conv[mask]).T,
color='olive', alpha=0.3)
ax.fill_between(ax.get_xlim(), [-100, -100], [0, 0], color='0.8',
alpha=0.5, zorder=-1)
ax.fill_betweenx([0, 100], [0,0], [1215.67*(1+z)/1.e4]*2,
color=utils.MPL_COLORS['orange'], alpha=0.2,
zorder=-1)
ax.grid()
# axes[0].set_xlim(1000, 2500)
# ym = 0.15; axes[0].set_ylim(-0.1*ym, ym)
for i, r in enumerate(ranges):
axes[i].set_xlim(*[ri*(1+z)/1.e4 for ri in r])
# print('xxx', r)
if spec.filter == 'clear':
axes[-1].set_xlim(0.6, 5.29)
axes[-1].xaxis.set_minor_locator(MultipleLocator(0.1))
axes[-1].xaxis.set_major_locator(MultipleLocator(0.5))
elif spec.filter == 'f070lp':
axes[-1].set_xlim(0.69, 1.31)
axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))
elif spec.filter == 'f100lp':
axes[-1].set_xlim(0.99, 1.91)
axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))
axes[-1].xaxis.set_major_locator(MultipleLocator(0.1))
elif spec.filter == 'f170lp':
axes[-1].set_xlim(1.69, 3.21)
elif spec.filter == 'f290lp':
axes[-1].set_xlim(2.89, 5.31)
else:
axes[-1].set_xlim(wrest[mask].min(), wrest[mask].max())
axes[-1].set_xlabel(f'obs wavelenth, z = {z:.5f}')
#axes[0].set_title(os.path.basename(file))
for ax in axes:
xl = ax.get_xlim()
ok = wobs > xl[0]
ok &= wobs < xl[1]
ok &= np.abs(wrest-5008) > 100
ok &= np.abs(wrest-6564) > 100
ok &= mask
if ok.sum() == 0:
ax.set_visible(False)
continue
ymax = np.maximum((_model*unit_conv)[ok].max(), 10*np.median((eflam*unit_conv)[ok]))
ymin = np.minimum(-0.1*ymax, -3*np.median((eflam*unit_conv)[ok]))
ax.set_ylim(ymin, ymax*1.3)
# print(xl, ymax)
if ok.sum() > 0:
if (np.nanmax((flam/eflam)[ok]) > 20) & (full_log):
ax.set_ylim(0.005*ymax, ymax*5)
ax.semilogy()
if len(axes) > 0:
gs.tight_layout(fig, pad=0.8)
else:
fig.tight_layout(pad=0.8)
if label is not None:
fig.text(0.015*12./12, 0.005, f'{label}',
ha='left', va='bottom',
transform=fig.transFigure, fontsize=8)
fig.text(1-0.015*12./12, 0.005, time.ctime(),
ha='right', va='bottom',
transform=fig.transFigure, fontsize=6)
return fig, spec, data
| gbrammer/msaexp | msaexp/spectrum.py | spectrum.py | py | 68,700 | python | en | code | 17 | github-code | 36 |
25107333860 | from Actors.Actor import Actor
from Actors.Maze import Maze
from Actors.Direction import Direction
from Util.Timer import Timer
import pyrr
import math
import numpy
class Pacman(Actor):
# assume position is 2d vector (x cell, z cell)
def __init__(self, position : list, direction : Direction, speed : float, id : str):
super().__init__(position, direction, speed, id)
self.animation = self.__PacmanAnimation()
self.updateTimer = Timer()
self.updateTimer.restart()
self.isMoving = False
def update(self, maze : Maze):
deltaTime = self.updateTimer.getElapsedTime()
distance = deltaTime * self.speed
self.updateTimer.restart()
# if self.turnSignal == None:
# pass
if self.canApplyTurnSignal(maze):
self.applyTurnSignal()
currentCellPos = Maze.worldCoordsToCellCoords(self.position)
if not maze.isVacantSpotInSpecifiedDirection(currentCellPos, self.currectDirection) and maze.isTheMiddleOfTheCell(self.position, self.currectDirection):
self.isMoving = False
return
self.position += self.frontVector * pyrr.Vector3([distance, distance, distance])
self.isMoving = True
def notify(self, message : str):
tokens = message.split('/')
if tokens[0] == "turn":
direction = tokens[1]
if direction[0] == 'a':
directionValue = Direction.LEFT
elif direction[0] == 'w':
directionValue = Direction.UP
elif direction[0] == 'd':
directionValue = Direction.RIGHT
elif direction[0] == 's':
directionValue = Direction.DOWN
self.setTurnSignal(directionValue)
def getLowerThenUpperJawRotations(self):
return self.animation.getLowerThenUpperJawRotations()
class __PacmanAnimation:
def __init__(self):
self.animationPeriod = 300
self.amplitude = 60.0
self.openMouse = True
self.currentRotationLowerJaw = 0.0
self.currentRotationUpperJaw = 0.0
self.timer = Timer()
self.timer.restart()
def getLowerThenUpperJawRotations(self):
deltaTime = self.timer.getElapsedTime()
delta_x_degrees = (deltaTime * self.amplitude) / self.animationPeriod
if self.currentRotationLowerJaw < 10.0:
self.openMouse = True
elif self.currentRotationLowerJaw > self.amplitude:
self.openMouse = False
if self.openMouse == True:
self.currentRotationLowerJaw += delta_x_degrees
else:
self.currentRotationLowerJaw -= delta_x_degrees
self.currentRotationUpperJaw = -self.currentRotationLowerJaw
self.timer.restart()
return self.currentRotationLowerJaw, self.currentRotationUpperJaw
| VolodymyrVakhniuk/Pacman | src/Actors/Pacman.py | Pacman.py | py | 3,050 | python | en | code | 1 | github-code | 36 |
26388489404 | import os
import pickle
import types
import shutil
import time
import multiprocessing as mtp
packageSample={"mode":"join", "from":"id123", "to":["id234", "id789"], "time":"20181012122123","type":"unknown", "tag":["dog", "white"], "dataSet":"image object"}
class cellComm(object):
def __init__(self):
self.package="package.pkl"
self.pklTemp="temp.pkl"
self.commScript="comm.py"
self.cellScript="cell.py"
self.database="cell.db"
self.cwdir=os.path.dirname(__file__)
self.pwdir=os.path.dirname(self.cwdir)
#check if any file received to awake cell
def listen(self):
if os.path.exists(self.package):
pfile=open(self.package, "rb")
data=pickle.load(pfile)
pfile.close()
if type(data)==dict:
self.awakeCell()
os.remove(self.package)
#return data passed on
def recv(self):
if os.path.exists(self.package):
pfile=open(self.package, "rb")
self.data=pickle.load(pfile)
pfile.close()
if type(self.data)==dict:
return self.data
else:
os.remove(self.package)
return False
#send() function accepts packaged dictionary object
def send(self, packageObject):
self.packageObject=packageObject
if type(self.packageObject)==dict:
temp=open(self.pklTemp, "wb")
pickle.dump(self.packageObject, temp)
temp.close()
for item in self.packageObject["to"]:
shutil.copy(self.pklTemp, (item+r"/"+self.package))
self.awakeComm(item+r"/"+self.commScript)
os.remove(self.pklTemp)
#awake "comm.py" of target cell to activate communication
def awakeComm(self, desObject):
p=mtp.Process(target=execfile(desObject))
p.daemon=False
p.start()
#awake "cell.py" of target cell to activate it's body
def awakeCell(self):
p=mtp.Process(target=execfile(self.cellScript))
p.daemon=False
p.start()
def run(self):
self.listen()
| babyproject/scripts | learn_python_commA.py | learn_python_commA.py | py | 2,162 | python | en | code | 0 | github-code | 36 |
44648975893 | #!/usr/bin/env python
# coding: utf-8
from copy import deepcopy
import pandas as pd
import numpy as np
pd.set_option("display.max_colwidth", None)
def run_adult_experiments_trees_taxonomies(
name_output_dir="output",
type_experiment="one_at_time",
type_criterion="divergence_criterion",
min_support_tree=0.1,
min_sup_divergences=[0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.15, 0.2],
metric="d_outcome",
verbose=False,
ouput_folder_dir=".",
minimal_gain = 0,
):
info_list = ["FP", "max"]
type_gens = ["with_gen", "without_gen"]
out = {k: {} for k in info_list}
for i in info_list:
out[i] = {k: {} for k in type_gens}
# # Dataset
minimal_gain = None if minimal_gain == None else minimal_gain
dataset_name = "adult_income_taxonomy"
import os
import pandas as pd
filename_d = os.path.join(
os.path.curdir, "datasets", "ACSPUMS", "adult_dataset_income_tax.csv"
)
dfI = pd.read_csv(filename_d)
attributes = list(dfI.columns.drop("income"))
continuous_attributes = ["AGEP", "WKHP"]
metric = "d_outcome"
target = "income"
dfI = dfI[attributes + [target]]
import os
# # Tree divergence - FPR
all_time_results = {}
time_results = {"with_gen": {}, "without_gen": {}}
df_analyze = dfI.copy()
import time
start_time_tree = time.time()
from tree_discretization_ranking import TreeDiscretization_ranking
tree_discr = TreeDiscretization_ranking()
# ## Extract tree
generalization_dict, discretizations = tree_discr.get_tree_discretization(
df_analyze,
type_splitting=type_experiment,
min_support=min_support_tree,
metric=metric,
continuous_attributes=list(continuous_attributes),
storeTree=True,
type_criterion=type_criterion,
minimal_gain=minimal_gain,
target_col=target
# minimal_gain = 0.0015
)
time_results["tree_time"] = time.time() - start_time_tree
import json
with open(os.path.join(os.path.curdir, "datasets", "ACSPUMS", "adult_taxonomies.json"), "r") as fp:
generalization_dict_tax = json.load(fp)
generalization_dict_all = deepcopy(generalization_dict)
generalization_dict_all.update(generalization_dict_tax)
for min_sup_divergence in min_sup_divergences:
if verbose:
print(min_sup_divergence, end=" ")
# ## Extract divergence - 1 function
for apply_generalization in [False, True]:
if apply_generalization == False:
type_gen = "without_gen"
else:
type_gen = "with_gen"
from utils_extract_divergence_generalized_ranking import (
extract_divergence_generalized,
)
allow_overalp = (
True if type_experiment == "all_attributes" else False
)
if (allow_overalp) and (apply_generalization is False):
continue
start_time_divergence = time.time()
FP_fm = extract_divergence_generalized(
df_analyze,
discretizations,
generalization_dict,
continuous_attributes,
min_sup_divergence=min_sup_divergence,
apply_generalization=apply_generalization,
target_name=target,
FPM_type="fpgrowth",
metrics_divergence=[metric],
allow_overalp=allow_overalp,
type_experiment=type_experiment,
)
time_results[type_gen][min_sup_divergence] = (
time.time() - start_time_divergence
)
if verbose:
print(f"({round( time.time() - start_time_divergence,2)})", end = " ")
from divexplorer_generalized.FP_Divergence import FP_Divergence
fp_i = FP_Divergence(FP_fm, metric)
FP_fm = fp_i.getDivergence(th_redundancy=0)
out["FP"][type_gen][
float(min_sup_divergence)
] = FP_fm.shape[0]
out["max"][type_gen][
float(min_sup_divergence)
] = max(FP_fm[metric])
all_time_results = time_results
if verbose:
print()
outputdir = os.path.join(
ouput_folder_dir,
name_output_dir,
dataset_name,
type_criterion,
f"stree_{min_support_tree}",
metric,
)
from pathlib import Path
Path(outputdir).mkdir(parents=True, exist_ok=True)
import json
filename = os.path.join(
outputdir,
f"info_time.json",
)
with open(
filename,
"w",
) as fp:
json.dump(all_time_results, fp)
for i in info_list:
output = out[i]
filename = os.path.join(
outputdir,
f"info_ALL_{i}.json",
)
with open(
filename,
"w",
) as fp:
json.dump(output, fp)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--name_output_dir",
default="output_res",
help="specify the name of the output folder",
)
parser.add_argument(
"--type_criterion",
type=str,
default="divergence_criterion",
help='specify the experiment type among ["divergence_criterion", "entropy"]',
)
parser.add_argument(
"--min_sup_tree",
type=float,
default=0.1,
help="specify the minimum support for the tree induction",
)
parser.add_argument(
"--show_fig",
action="store_true",
help="specify show_fig to show the tree graph.",
)
parser.add_argument(
"--verbose",
action="store_true",
help="specify verbose to print working in progress status.",
)
parser.add_argument(
"--no_compute_divergence",
action="store_true",
help="specify no_compute_divergence to not compute the divergence scores.",
)
parser.add_argument(
"--type_experiments",
nargs="*",
type=str,
default=[
"one_at_time",
"all_attributes",
], # , "all_attributes_continuous"], #"",
help="specify the types of experiments to evaluate among ['one_at_time', 'all_attributes', 'all_attributes_continuous']",
)
parser.add_argument(
"--min_sup_divs",
nargs="*",
type=float,
default=[
0.01,
0.02,
0.03,
0.04,
0.05,
0.1,
0.15,
0.2,
0.25,
0.3,
0.35,
],
help="specify a list of min supports of interest, with values from 0 to 1",
)
parser.add_argument(
"--metrics",
nargs="*",
type=str,
default=["d_outcome"], # , "d_fnr", "d_error"]
help="specify a list of metric of interest, ['d_fpr', 'd_fnr', 'd_error', 'd_accuracy', 'd_outcome']",
)
parser.add_argument(
"--minimal_gain",
type=float,
default=0.0,
help="specify the minimal_gain for the tree induction",
)
args = parser.parse_args()
run_adult_experiments_trees_taxonomies(
type_criterion=args.type_criterion,
name_output_dir=args.name_output_dir,
type_experiments=args.type_experiments,
min_support_tree=args.min_sup_tree,
min_sup_divergences=args.min_sup_divs,
show_fig=args.show_fig,
metrics=args.metrics,
verbose=args.verbose,
minimal_gain=args.minimal_gain,
no_compute_divergence=args.no_compute_divergence,
)
| elianap/h-divexplorer | experiments_adult_trees_taxonomies.py | experiments_adult_trees_taxonomies.py | py | 7,907 | python | en | code | 2 | github-code | 36 |
9037585810 | import tbapy
import pyperclip
event = input("Enter the event key: ")
def getEventMatchTeams(key):
tba = tbapy.TBA('KiFI9IObf1xbtTKuLzSu6clL006qHK1Lh5Xy65i1zSDutDcvsYJWwliU1svWKVzX')
matches = tba.event_matches(key, simple=True)
# set matches to be only keys of red and blue alliances
alliances = []
for match in matches:
if match['comp_level'] == 'qm':
alliances.append({'match_num': match['match_number'], 'red': match['alliances']['red']['team_keys'], 'blue': match['alliances']['blue']['team_keys']})
#trim 'frc' from team keys
alliances[-1]['red'][0] = alliances[-1]['red'][0][3:]
alliances[-1]['red'][1] = alliances[-1]['red'][1][3:]
alliances[-1]['red'][2] = alliances[-1]['red'][2][3:]
alliances[-1]['blue'][0] = alliances[-1]['blue'][0][3:]
alliances[-1]['blue'][1] = alliances[-1]['blue'][1][3:]
alliances[-1]['blue'][2] = alliances[-1]['blue'][2][3:]
alliances.sort(key=lambda x: x['match_num'])
#delete match_num key
for match in alliances:
del match['match_num']
return alliances
teams = getEventMatchTeams(event)
print("\n" + str(teams))
pyperclip.copy(str(teams))
input("\n\033[1m\033[92mCopied to clipboard!\033[0m\n\nPress any key to continue.") | TotsIsTots/573_Scouting_2023 | teamfinder.py | teamfinder.py | py | 1,332 | python | en | code | 0 | github-code | 36 |
30988523805 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
#ๅ
ณ้ญๆ็คบๅฎๅ
จๆ็คบๆก
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--disable-infobars")
driver = webdriver.Chrome(executable_path="/usr/bin/chromedriver",chrome_options=chrome_options)
driver.get("https://www.baidu.com/")
elem = driver.find_element_by_name("wd")
elem.send_keys("lufei")
elem.send_keys(Keys.RETURN) | lufeirider/python | crawl/selenium.py | selenium.py | py | 481 | python | en | code | 1 | github-code | 36 |
9241363999 | import csv
person = {'name': 'Bob', 'age': 20, 'job': 'gardener', 'take him away!': True}
# Just for example
def read_file(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
fields = ['first_name', 'last_name', 'email', 'gender', 'balance']
reader = csv.DictReader(f, fields, delimiter=';')
for row in reader:
print(row)
def write_to_file(file_name):
with open(file_name, 'w', encoding='utf-8') as f:
fieldsnames = ['key', 'value']
writer = csv.DictWriter(f, fieldsnames, delimiter=';')
writer.writeheader()
for key, value in person.items():
writer.writerow({'key': key, 'value': value})
print(f'Data was successfully writen to {file_name}')
write_to_file('bob.csv')
| Almazzzzz/learn_python | lesson_3/write_to_csv_file.py | write_to_csv_file.py | py | 782 | python | en | code | 0 | github-code | 36 |
18567551028 | from upwardmobility.items import UpwardMobilityItem
from upwardmobility.loaders import CompanyLoader
from upwardmobility.utils import *
class PaDeptOfCorporationsSpider(scrapy.Spider):
name = 'pa_dept_of_corporations'
allowed_domains = ['file.dos.pa.gov']
start_urls = ['https://file.dos.pa.gov/search/business']
headers = {
'Accept': '*/*',
'content-type': 'application/json',
'Origin': 'https://file.dos.pa.gov',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
'authorization': 'undefined',
}
json_data = {
'SEARCH_FILTER_TYPE_ID': '1',
'FILING_TYPE_ID': '',
'STATUS_ID': '',
'FILING_DATE': {
'start': None,
'end': None,
},
}
post_link = 'https://file.dos.pa.gov/api/Records/businesssearch'
buf = []
def parse(self, response):
for searchText1 in string.ascii_lowercase:
for searchText2 in string.ascii_lowercase:
SEARCH_VALUE = f"{searchText1}{searchText2}"
self.json_data['SEARCH_VALUE'] = SEARCH_VALUE
yield scrapy.Request(
url=self.post_link,
method='POST',
body=json.dumps(self.json_data),
callback=self.get_data,
headers=self.headers
)
def get_data(self, response):
json_data = json.loads(response.text)['rows']
for t in json_data:
d = json_data[t]
if d['ID'] in self.buf:
continue
self.buf.append(d['ID'])
l = CompanyLoader(item=UpwardMobilityItem(), response=response)
l.add_value('source', 'PA Dept of Corporations')
l.add_value('business_name', d['TITLE'][0].split("(")[0])
l.add_value('license_number', d['TITLE'][0].split("(")[1].split(")")[0])
l.add_value('license_status', d['STATUS'])
l.add_value('license_issue_date', d['FILING_DATE'])
l.add_value('industry_type', d['ENTITY_TYPE'])
if ', PA' in d['AGENT']:
addresses = d['AGENT'].split(',')
state_zip = addresses[-1]
city = addresses[-2]
street_address = ''.join(addresses[:-2])
l.add_value('street_address', street_address.strip())
l.add_value('city', city.strip())
l.add_value('state', state_zip.strip().split(' ')[0].strip())
l.add_value('postal_code', state_zip.strip().split(' ')[-1].strip())
l.add_value('country', 'USA')
else:
l.add_value('secondary_business_name', d['AGENT'])
yield l.load_item()
| mscandale-iabbb/research_public | upwardmobility/spiders/pa_dept_of_corporations.py | pa_dept_of_corporations.py | py | 2,830 | python | en | code | 0 | github-code | 36 |
28519216717 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.abstract_variables.abstract_access_within_threshold_variable import abstract_access_within_threshold_variable
class employment_of_activity_id_DDD_within_DDD_minutes_SSS_travel_time(abstract_access_within_threshold_variable):
"""total number of jobs for zones within DDD minutes SSS (mode) travel time,
"""
_return_type = "int32"
function = "sum"
def __init__(self, activitynum, number, mode):
self.activity_id=activitynum
self.threshold = number
self.travel_data_attribute = "travel_data.%s" % mode
#*(business.activity_id==self.activity_id)
self.zone_attribute_to_access = "emp_access=zone.aggregate(business.employment*(business.activity_id==%d),intermediates=[building,parcel])" % self.activity_id
abstract_access_within_threshold_variable.__init__(self)
| psrc/urbansim | sanfrancisco/zone/employment_of_activity_id_DDD_within_DDD_minutes_SSS_travel_time.py | employment_of_activity_id_DDD_within_DDD_minutes_SSS_travel_time.py | py | 1,034 | python | en | code | 4 | github-code | 36 |
28514815877 | import os
#from opus_gui.configurations.xml_configuration import XMLConfiguration
from opus_gui.results_manager.run.indicator_framework.visualizer.visualization_factory import VisualizationFactory
from opus_gui.results_manager.run.indicator_framework_interface import IndicatorFrameworkInterface
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.table import Table
from opus_core.storage_factory import StorageFactory
from opus_gui.util.exception_formatter import formatExceptionInfo
from opus_gui.general_manager.general_manager_functions import get_available_spatial_dataset_names
class OpusResultVisualizer(object):
def __init__(self,
project,
indicator_type,
indicators,
kwargs = None):
self.finishedCallback = None
self.errorCallback = None
self.guiElement = None
self.config = None
self.firstRead = True
self.project = project
self.indicator_type = indicator_type
self.indicators = indicators
self.visualizations = []
self.spatial_datasets = get_available_spatial_dataset_names(project = self.project)
if kwargs == None: kwargs = {}
self.kwargs = kwargs
def run(self, args, cache_directory = None):
succeeded = False
try:
# find the directory containing the eugene xml configurations
not_visualized = self._visualize(args, cache_directory = cache_directory)
succeeded = True
except:
succeeded = False
errorInfo = formatExceptionInfo(custom_message = 'Unexpected error in the results visualizer')
if self.errorCallback is not None:
self.errorCallback(errorInfo)
if self.finishedCallback is not None:
self.finishedCallback(succeeded)
def _visualize(self, args, cache_directory = None):
self.visualizations = []
indicators_to_visualize = {}
interface = IndicatorFrameworkInterface(self.project)
not_visualized = []
#get common years
years = set([])
for indicator in self.indicators:
years |= set(indicator['years'])
source_data_objs = {}
for indicator in self.indicators:
indicator_name = indicator['indicator_name']
source_data_name = indicator['source_data_name']
dataset_name = indicator['dataset_name']
if (self.indicator_type == 'mapnik_map' or self.indicator_type == 'mapnik_animated_map') and dataset_name not in self.spatial_datasets:
not_visualized.append(indicator)
continue
if source_data_name not in source_data_objs:
source_data = interface.get_source_data(
source_data_name = source_data_name,
years = list(years),
cache_directory = cache_directory
)
source_data_objs[source_data_name] = source_data
else:
source_data = source_data_objs[source_data_name]
indicator = interface.get_indicator(
indicator_name = indicator_name,
dataset_name = dataset_name)
computed_indicator = interface.get_computed_indicator(indicator = indicator,
source_data = source_data,
dataset_name = dataset_name)
computed_indicator.gui_indicator_name = indicator_name
#####################
#hack to get plausible primary keys...
cache_directory = source_data.cache_directory
_storage_location = os.path.join(cache_directory,
'indicators',
'_stored_data',
repr(source_data.years[0]))
storage = StorageFactory().get_storage(
type = 'flt_storage',
storage_location = _storage_location)
cols = storage.get_column_names(
table_name = dataset_name)
primary_keys = [col for col in cols if col.find('_id') != -1]
computed_indicator.primary_keys = primary_keys
##################
name = computed_indicator.get_file_name(
suppress_extension_addition = True)
indicators_to_visualize[name] = computed_indicator
viz_args = {}
if self.indicator_type == 'mapnik_map':
viz_type = self.indicator_type
elif self.indicator_type == 'mapnik_animated_map':
viz_type = self.indicator_type
elif self.indicator_type == 'matplotlib_chart':
viz_type = self.indicator_type
elif self.indicator_type == 'tab':
viz_type = 'table'
if 'output_style' not in viz_args:
viz_args['output_style'] = Table.ALL
viz_args['output_type'] = 'tab'
elif self.indicator_type == 'table_esri':
viz_type = 'table'
if 'output_style' not in viz_args:
viz_args['output_style'] = Table.ALL
viz_args['output_type'] = 'esri'
elif self.indicator_type == 'table_per_year':
viz_type = 'table'
if 'output_style' not in viz_args:
viz_args['output_style'] = Table.PER_YEAR
viz_args['output_type'] = 'tab'
elif self.indicator_type == 'table_per_attribute':
viz_type = 'table'
if 'output_style' not in viz_args:
viz_args['output_style'] = Table.PER_ATTRIBUTE
viz_args['output_type'] = 'tab'
viz_args.update(self.kwargs)
viz_args.update(args)
# try:
# import pydevd;pydevd.settrace()
# except:
# pass
viz_factory = VisualizationFactory()
self.visualizations = viz_factory.visualize(
indicators_to_visualize = indicators_to_visualize.keys(),
computed_indicators = indicators_to_visualize,
visualization_type = viz_type, **viz_args)
if self.visualizations is None:
self.visualizations = []
return not_visualized
def get_visualizations(self):
return self.visualizations | psrc/urbansim | opus_gui/results_manager/run/opus_result_visualizer.py | opus_result_visualizer.py | py | 6,698 | python | en | code | 4 | github-code | 36 |
17392986734 | # I'm going to write a python script that creates a shell script that creates and executes a python script that prints "Hello, world"
import os
file_name = "hello_world.sh"
with open(file_name, 'w+') as g:
g.write("python -c \"print 'Hello, World!'\"")
os.system("chmod 777 " + file_name)
os.system("./" + file_name)
| ekeilty17/Personal-Projects-In-Python | Useful/convoluted_script.py | convoluted_script.py | py | 324 | python | en | code | 1 | github-code | 36 |
35025038372 | list_item = {}
#category = []
class Budget():
"""A Budget class that can instantiate objects based on different budget categories like food, clothing, and entertainment.
"""
category = ["food", "clothing", "entertainment"]
def __init__(self, category):
self.category = category
self.mylist = []
self.balance = 100
self.withdraw_amount = 50
def check_funds(self, amount):
if self.balance < amount:
return False
else:
return True
def deposit(self, amount, category):
"""Allows for depositing funds to categories.
"""
print("\nYou deposited N"+ str(amount) + " for " + self.category)
d = {
"amount" : amount,
"category" : self.category
}
self.mylist.append(d)
list_item.update({
"amount" : amount,
"category" : self.category
})
self.balance = self.balance + amount
def withdraw(self, amount, category):
if self.check_funds(amount) == True:
print("You have some funds available.")
self.withdraw_amount -= amount
print("\nYou have withdrawn N" + str(amount) + " from " + self.category)
print("\nYour balance is N{} ".format(self.withdraw_amount))
else:
print("\nInsufficient funds for {} ".format(self.category))
return False
list_item.update({
"amount" : self.balance,
"category" : self.category})
def compute_balance(self):
"""Computes category balance...
"""
print("\nYour balance for {} is N".format(self.category), end="")
return self.balance
def transfer(self, amount, category):
if self.check_funds(amount) == True:
self.balance -= amount
#list_item.update({
#"amount" : self.balance,
#"category" : self.category })
self.ledger.append({"amount": -amount})
print(self.mylist, end="")
print(" transfer for {} ".format(self.category))
else:
self.withdraw(amount,f'Transfer from {self.category}')
return False
user_1 = Budget("food")
user_1.deposit(900, "food")
#user_1.check_funds(400)
#user_1.withdraw(10, "food")
print(user_1.compute_balance())
#user_1.transfer(0, "food")
#print(list_item)
| RuthJane/budget_task | my_budget_test.py | my_budget_test.py | py | 2,433 | python | en | code | 0 | github-code | 36 |
10899114156 | #!/usr/bin/env python3
import subprocess
# Compile the .dylib
subprocess.run(['make'], check=True)
# Convert the .dylib to a JS array literal
payload = open('stage2.dylib', 'rb').read()
js = 'var stage2 = new Uint8Array(['
js += ','.join(map(str, payload))
js += ']);\n'
with open('stage2.js', 'w') as f:
f.write(js)
| saelo/cve-2018-4233 | stage2/make.py | make.py | py | 326 | python | en | code | 175 | github-code | 36 |
18190854190 | from csv import DictReader, DictWriter
from io import StringIO
import functools
import tempfile
import os
# helper to map from column names in the CSV dump to the schema
dumpNameMapping = {
'_id': 'mongo_id',
'admin': 'admin',
'profile.adult': 'adult',
'status.completedProfile': 'completed',
'status.admitted': 'admitted',
'verified': 'verified',
'timestamp': 'timestamp',
'email': 'email',
'profile.name': 'name',
'profile.school': 'school',
'profile.graduationYear': 'gradYear',
'profile.gender': 'gender',
'profile.description': 'description',
'profile.essay': 'essay'
}
def get_applicants_from_csv(csv_bytes):
# while it may be cleaner to do this in memory, it Just Doesn't Work Properly (tm) without hitting disk first--i suspect it has something to do with character encodings
with tempfile.TemporaryDirectory() as tempdir:
fn = os.path.join(tempdir, 'dump.csv')
csv_bytes.save(fn)
dr = DictReader(open(fn))
for row in dr:
translated = {}
for key in dumpNameMapping:
if row[key] == 'true':
translated[dumpNameMapping[key]] = 1
elif row[key] == 'false':
translated[dumpNameMapping[key]] = 0
else:
translated[dumpNameMapping[key]] = row[key]
yield translated
def insert_applicant(cursor, applicant):
cols = [
'mongo_id',
'admin',
'adult',
'completed',
'admitted',
'verified',
'timestamp',
'email',
'name',
'school',
'gradYear',
'gender',
'description',
'essay'
]
cols = list(map( lambda k: applicant[k], cols) )
cursor.execute('''
INSERT INTO Applicants (
mongo_id,
admin,
adult,
completed,
admitted,
verified,
timestamp,
email,
name,
school,
gradYear,
gender,
description,
essay
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', cols)
def create_csv(conn):
c = conn.cursor()
c.execute('''
SELECT name, email, mongo_id FROM Applicants WHERE completed=1
''')
rows = c.fetchall()
if rows is None:
rows = []
out = []
for row in rows:
c.execute('''
SELECT rating
FROM Votes
WHERE
app_id=?
''', (row['mongo_id'],))
votes = c.fetchall()
if len(votes) != 0:
voteaverage = functools.reduce( lambda a,v: a + v['rating'], votes, 0 ) / len(votes)
voteaverage = float('{:.3}'.format(voteaverage))
else:
voteaverage = 0
out.append({
'name': row['name'],
'email': row['email'],
'mongo_id': row['mongo_id'],
'rating': voteaverage,
'votes': len(votes)
})
buf = StringIO()
csv = DictWriter(buf, fieldnames=['mongo_id', 'name', 'email', 'rating', 'votes'])
csv.writeheader()
for app in out:
csv.writerow(app)
csv_text = buf.getvalue()
buf.close()
return csv_text
| compsoc-edinburgh/htb20-voter | app/data.py | data.py | py | 3,307 | python | en | code | 0 | github-code | 36 |
28667648389 | def printSudoku(vals):
if type(vals) != str:
raise TypeError("Illegal Sudoku type {}".format(type(vals)))
if len(vals) != 81:
raise ValueError("Illegal Sudoku length {} != 81".format(len(vals)))
cry = ""
for q in vals:
if q not in ["1","2","3","4","5","6","7","8","9"]:
cry = cry + str(q + ", ")
if len(cry) > 0:
raise ValueError("Illegal value(s) {} for a Sudoku digit".format(cry[:-2]))
printed = 0
for i in range(19):
if i%6 == 0:
print("#####################################")
elif i%2 ==0:
print("#---+---+---#---+---+---#---+---+---#")
else:
for j in range(37):
if j % 12 == 0:
print('#', end ="")
elif j%4 == 0:
print('|', end ="")
elif j %2 == 0:
print(vals[printed], end ="")
printed+=1
else:
print(" ", end ="")
print() | albertiho/python2018 | python/2/sudoku with exceptions.py | sudoku with exceptions.py | py | 911 | python | en | code | 0 | github-code | 36 |
31690737543 | import json
import os
from typing import TextIO
from ctranslate2.converters.transformers import TransformersConverter
def model_converter(model, model_output):
converter = TransformersConverter("openai/whisper-" + model)
try:
converter.convert(model_output, None, "float16", False)
except Exception as e:
print(e)
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
class ResultWriter:
extension: str
def __init__(self, output_dir: str):
self.output_dir = output_dir
def __call__(self, result: dict, audio_path: str):
audio_basename = os.path.basename(audio_path)
output_path = os.path.join(self.output_dir, audio_basename + "." + self.extension)
with open(output_path, "w", encoding="utf-8") as f:
self.write_result(result, file=f)
def write_result(self, result: dict, file: TextIO):
raise NotImplementedError
class WriteTXT(ResultWriter):
extension: str = "txt"
def write_result(self, result: dict, file: TextIO):
for segment in result["segments"]:
print(segment.text.strip(), file=file, flush=True)
class WriteVTT(ResultWriter):
extension: str = "vtt"
def write_result(self, result: dict, file: TextIO):
print("WEBVTT\n", file=file)
for segment in result["segments"]:
print(
f"{format_timestamp(segment.start)} --> {format_timestamp(segment.end)}\n"
f"{segment.text.strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteSRT(ResultWriter):
extension: str = "srt"
def write_result(self, result: dict, file: TextIO):
for i, segment in enumerate(result["segments"], start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment.start, always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment.end, always_include_hours=True, decimal_marker=',')}\n"
f"{segment.text.strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteTSV(ResultWriter):
"""
Write a transcript to a file in TSV (tab-separated values) format containing lines like:
<start time in integer milliseconds>\t<end time in integer milliseconds>\t<transcript text>
Using integer milliseconds as start and end times means there's no chance of interference from
an environment setting a language encoding that causes the decimal in a floating point number
to appear as a comma; also is faster and more efficient to parse & store, e.g., in C++.
"""
extension: str = "tsv"
def write_result(self, result: dict, file: TextIO):
print("start", "end", "text", sep="\t", file=file)
for segment in result["segments"]:
print(round(1000 * segment.start), file=file, end="\t")
print(round(1000 * segment.end), file=file, end="\t")
print(segment.text.strip().replace("\t", " "), file=file, flush=True)
class WriteJSON(ResultWriter):
extension: str = "json"
def write_result(self, result: dict, file: TextIO):
json.dump(result, file)
| ahmetoner/whisper-asr-webservice | app/faster_whisper/utils.py | utils.py | py | 3,802 | python | en | code | 1,105 | github-code | 36 |
15171813390 | # Python 3 program to
# compute sum of digits in
# number.
# Function to get sum of digits
def getSum(n):
sum = 0
while (n != 0):
sum = sum + int(n % 10)
n = int(n/10)
return sum
# Driver code
if __name__ == "__main__":
n = 687
# Function call
print(getSum(n))
| amallaltlai/algorithms-python | program-for-sum-of-the-digits-of-a-given-number.py | program-for-sum-of-the-digits-of-a-given-number.py | py | 280 | python | en | code | 0 | github-code | 36 |
74796094503 | from flask import Flask, render_template, Response, jsonify, request
import settings
from flask import abort
app = Flask(__name__,
static_url_path='',
static_folder='static',
template_folder='templates')
log = settings.logging
@app.route('/')
def index():
return render_template('index.html')
options = [
{
'id': 1,
'title': u'Add new device',
'description': u'More description for add device option',
'status': True
},
{
'id': 2,
'title': u'View device',
'description': u'More description for view device option',
'status': False
}
]
@app.route('/options/api/v1.0/options', methods=['GET'])
def get_options():
return jsonify({'options': options})
@app.route('/options/api/v1.0/options/<int:option_id>', methods=['GET'])
def get_task(option_id):
option = [option for option in options if option['id'] == option_id]
if len(option) == 0:
abort(404)
return jsonify({'task': option[0]})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, threaded=True, debug=True)
# app.run(host='0.0.0.0', port=443, threaded=True, ssl_context=(
# '/etc/letsencrypt/live/cq.jarzebak.eu/cert.pem', '/etc/letsencrypt/live/cq.jarzebak.eu/privkey.pem'))
log.debug("Started up cq app")
| jarzab3/flask_docker | cq_iot/app.py | app.py | py | 1,346 | python | en | code | 0 | github-code | 36 |
16239759623 | # list comprehension = a way to create a new list with less syntax
# can mimic certain lambda functions, easier to read
# list = [expression for item in iterable]
squares = [] # empty list
for i in range(1, 11): # for loop
squares.append(i*i) # append() method adds an item to the end of the list
print(squares) # [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
squares = [i*i for i in range(1, 11)] # list comprehension
print(squares) # [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] | Merlijnos/Python-Full-Course | list comprehension.py | list comprehension.py | py | 476 | python | en | code | 0 | github-code | 36 |
7537133333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Jan 10, 2013
@author: daniel
This utility file provides a method for moving information between Sugar
instances.
'''
from collections import defaultdict
from sugarcrm import Sugarcrm
from code import interact
from sugarcrm.sugarentry import SugarEntry
com_url = 'http://<host>:<port>/service/v4_1/rest.php'
com_usr = '<user>'
com_pwd = '<pass>'
pro_url = 'http://<host>:<port>/service/v4_1/rest.php'
pro_usr = '<user>'
pro_pwd = '<pass>'
modules = ['Accounts', 'Contacts', 'Opportunities', 'Leads', 'Notes',
'Prospects', 'Tasks']
cache = defaultdict(dict)
# Fill in user values from one system to the other here.
# The mapping is From -> To. Ie: Old System -> New System.
cache['Users']['1'] = '1'
# This map holds fields that need to pull from other cached values.
relate = {
'Contacts': {
'account_id': 'Accounts',
'assigned_user_id': 'Users'
},
'Opportunities': {
'account_id': 'Accounts',
'assigned_user_id': 'Users'
},
'Leads': {
'account_id': 'Accounts',
'assigned_user_id': 'Users',
'contact_id': 'Contacts',
'opportunity_id': 'Opportunities',
},
'Prospects': {
'assigned_user_id': 'Users',
'lead_id': 'Leads'
},
'Tasks': {
'assigned_user_id': 'Users',
'contact_id': 'Contacts'
},
'Calls': {
'assigned_user_id': 'Users',
'contact_id': 'Contacts'
},
'Notes': {
'account_id': 'Accounts',
'assigned_user_id': 'Users',
'contact_id': 'Contacts',
'lead_id': 'Leads',
'opportunity_id': 'Opportunities'
},
'Accounts': {
'assigned_user_id': 'Users'
}
}
SPro = Sugarcrm(pro_url, pro_usr, pro_pwd)
SCom = Sugarcrm(com_url, com_usr, com_pwd)
# A second lookup, this one for required module level connections that
# must be generated.
mod_links = {
'Tasks': [SCom.modules['Leads'],
SCom.modules['Notes'],
SCom.modules['Opportunities'],
SCom.modules['Accounts']],
'Notes': [SCom.modules['Opportunities'],
SCom.modules['Leads'], ]
}
def makeProEntry(oldEntry, oldID = None):
module = oldEntry._module
mod_name = module._name
newEntry = SugarEntry(SPro, mod_name)
for field in module._fields.keys():
if field == 'id':
oldID = oldEntry[field]
continue
if field in relate[mod_name]:
ref_mod = relate[mod_name][field]
newEntry[field] = cache[ref_mod].get(oldEntry[field], '')
continue
newEntry[field] = oldEntry[field]
newEntry.save()
for relmod in mod_links.get(mod_name, []):
for relentry in oldEntry.get_related(relmod):
if relentry['id'] in cache[relmod._name]:
newrelentry = SPro.newSugarEntry(relmod._name)
newrelentry['id'] = cache[relmod._name][relentry['id']]
newEntry.relate(newrelentry)
if oldID is not None:
cache[mod_name][oldID] = newEntry['id']
if __name__ == '__main__':
interact(local = globals())
| gddc/python_webservices_library | sugarcrm/utils/S2S.py | S2S.py | py | 2,952 | python | en | code | 46 | github-code | 36 |
7660703922 | import numpy as np
def loadDataSet(filename):
"""ๆฐๆฎๅ ่ฝฝๅฝๆฐ,ไฝๅช่ฟๅๆดไธช,ๆ ๅ็ฌ็label"""
dataMat=[]
fr=open(filename)
for line in fr.readlines():
currentLine=line.strip().split("\t")
fltLine=list(map(float,currentLine))
dataMat.append(fltLine)
return dataMat
def binSplitDataSet(dataSet,feature,value):
"""้ๆฉdataset็ๆไธช็นๅพfeature,ๅฐๆดไธชๆฐๆฎ้ๆ็
งfeatureไธvalue็ๅคงๅฐๅ
ณ็ณป่ฟ่กไบๅ"""
mat0=dataSet[np.nonzero(dataSet[:,feature]>value)[0],:]
mat1=dataSet[np.nonzero(dataSet[:,feature]<=value)[0],:]
return mat0,mat1
def regLeaf(dataSet):
"""่ด่ดฃ็ๆๅถ็ป็น็ๅฝๆฐ
ๅฝๅฝๆฐchooseBestSplit็กฎๅฎไธๅๅฏนๆฐๆฎ่ฟ่กๅๅๆถ,ๅฐฑไผ่ฐ็จ่ฏฅๅฝๆฐ,
ๆฅๅพๅฐๅถ็ป็น็ๆจกๅ
ๅๅฝๆ ไธญ,่ฏฅๆจกๅๅ
ถๅฎๅฐฑๆฏ็ฎๆ ๅ้็ๅๅผ
"""
return np.mean(dataSet[:,-1])
def regErr(dataSet):
"""่ฟๅ็ปๅฎๆฐๆฎไธ็็ฎๆ ๅ้็ๅนณๆน่ฏฏๅทฎ,ๅฏ็่งฃไธบ่ฎก็ฎๅ็ฑป้ฎ้ข็ๅบๅฐผ็ณปๆฐ(ๅฎ้
ไธ่ฟๆฏๅบไบๆๅฐไบไนๅๅทฎ)"""
return np.var(dataSet[:,-1])*dataSet.shape[0]
def chooseBestSplit(dataSet,leafType=regLeaf,errType=regErr,ops=(1,4)):
"""ๆพๅฐๆฐๆฎ้็ๆไฝณไบๅๅๅๆนๅผ
ๅฆๆๆพไธๅฐ,ๅ่ฟๅNoneๅนถไธๅๆถ่ฐ็จcreatTreeๆฅไบง็ๅถ็ป็น,ๅถ็ป็นไน่ฟๅNone
opsไธญ,็ฌฌไธไธชๅ
็ด ไธบๅฎน่ฎธไธ้็่ฏฏๅทฎๅผ,็ฌฌไบไธชๅ
็ด ๆฏๅๅ็ๆๅฐๆ ทๆฌๆฐ
ๆณจๆ่ฏฅๅฝๆฐๆไธไธชๆๅ็ปๆๆกไปถ"""
m,n=dataSet.shape
S=errType(dataSet) # ๆปไฝๆททไนฑ็จๅบฆ
tolS=ops[0] # ่ฝๅคๅฎนๅฟ็ๆๅฐๆนๅทฎๅๅ้
tolN=ops[1] # ่ฝๅคๅฎนๅฟ็ๆๅฐ็ฉ้ตๆ ทๆฌๆฐ้
if len(set(dataSet[:,-1].T.tolist()[0]))==1: # ๅฆๆๆฐๆฎ้ไธญ็็ฎๆ ๅผๅ
จ้จ็ธ็ญ๏ผๅๅๅปบๅถ็ป็นๅนถ้ๅบ
return None,leafType(dataSet)
# ๅๅงๅๆไผๆนๅทฎ๏ผๆไผ็นๅพ๏ผๆไผ็นๅพๅผ
bestS=np.inf
bestIndex=0 # ๆไผ็นๅพ็็ดขๅผ
bestValue=0
for featIndex in range(n-1): # ้ๅๆฏไธช็นๅพ
for splitVal in set(dataSet[:,featIndex].T.tolist()[0]):
mat0,mat1=binSplitDataSet(dataSet,featIndex,splitVal)
if mat0.shape[0]<tolN or mat1.shape[0]<tolN: # ๅๅๅพๅฐ็ๅญๆฐๆฎ้็ๅฐบๅฏธ่ฟๅฐ ๅ้ๆฐๅๅ
continue
# ๅญๆฐๆฎ้็ๆนๅทฎๅ
newS=errType(mat0)+errType(mat1) # ๆฐ็ๆททไนฑ็จๅบฆ
# ๆดๆฐ
if newS<bestS:
bestS=newS
bestIndex=featIndex
bestValue=splitVal
if S-bestS<tolS: # ๅฆๆ่ฏฏๅทฎๅๅฐไธๅคง๏ผๅ้ๅบ
return None,leafType(dataSet)
# ไฝฟ็จๆไผ็นๅพๅ็นๅพๅผๅๅ
mat0,mat1=binSplitDataSet(dataSet,bestIndex,bestValue)
if mat0.shape[0]<tolN or mat1.shape[0]<tolN:
return None,leafType(dataSet)
return bestIndex,bestValue
def createTree(dataSet, leafType=regLeaf, errType=regErr, ops=(1, 4)):
"""
ๅธธ่งๅณ็ญๆ ็้ๅฝๅฝๆฐ
leafType:็ปๅบๅปบ็ซๅถ็ป็น็ๅฝๆฐ
errType:็ปๅบ่ฏฏๅทฎ่ฎก็ฎๅฝๆฐ
"""
feat, val = chooseBestSplit(
dataSet, leafType, errType, ops) # ้ๆฉไฝฟๅพไฟกๆฏๅข็ๆๅคง็็นๅพๅๅ
ถๅ็ๅผval
if feat==None: # ้ๅฝๅฐๆฒกๆ็นๅพๅฏไปฅๅๅไบ๏ผๅ่ฟๅ่ฏฅ็นๅพๅฏนๅบ็ๆไฝณๅๅๅผ(่ฟ้ไธๅฏไปฅif not None ไปฃๆฟ่ฏฅ่ฏญๅฅ๏ผ)
return val
retTree = {}
retTree['spInd'] = feat
retTree['spVal'] = val
# ๆ นๆฎๆไผ็นๅพๅๅ
ถๅๅๅผ,ๅๅๅญ้
lSet, rSet = binSplitDataSet(dataSet, feat, val)
retTree['left'] = createTree(
lSet, leafType, errType, ops) # ๅฉ็จๅๅๅ็ๅทฆๅญ้,้ๅฝๅๅปบๅทฆๅญๆ
retTree['right'] = createTree(rSet, leafType, errType, ops)
return retTree
def linearSolve(dataSet):
m, n = dataSet.shape
x = np.mat(np.ones((m, n)))
y = np.mat(np.ones((m, 1)))
x[:, 1:n] = dataSet[:, :n-1]
y = dataSet[:, -1]
xTx = x.T*x
if np.linalg.det(xTx) == 0:
print('the det of xTx is 0,None has been returned.')
return np.array([0,0,0])
ws = xTx.I*(x.T*y)
return ws, x, y
def modelLeaf(dataSet):
ws, X, y = linearSolve(dataSet)
return ws
def modelErr(dataSet):
"""่ฏฏๅทฎ่ฎก็ฎๅฝๆฐ,ๅบไบ็บฟๆงๆจกๅ็้ขๆต่ฏฏๅทฎ"""
ws, X, y = linearSolve(dataSet)
yHat = X*ws
return np.sum(np.power(y-yHat, 2))
def regTreeEval(model, inDat):
return float(model)
def modelTreeEval(model, inDat):
n = inDat.shape[1]
# ๆ้ X
X = np.mat(np.ones((1, n+1)))
X[:, 1:n+1] = inDat
return float(X*model)
def treeForeCast(tree, inDat, modelEval=regTreeEval):
if not isTree(tree): # ๅฆๆtreeไธๆฏไธๆฃตๆ ,่ๅชๆฏไธไธชๅถๅญ็ป็น
return regTreeEval(tree, inDat)
# ๅฆๆtreeๆฏๆ ๏ผๅ้่ฆ้ๅฝ้ขๆต
if inDat[tree['spInd']] > tree['spVal']: # ่พๅ
ฅๆฐๆฎ็็นๅพๅผไธๆ ็ๅ่ฃ็้ๅผๆฏ่พ๏ผๅณๅฎ่ฟๅ
ฅๅทฆๅญๆ ่ฟๆฏๅณๅญๆ
if isTree(tree['left']): # ๅฆๆtree็ๅทฆๅญ่็นไธบๆ ๏ผๅ้ๅฝ้ขๆต
return treeForeCast(tree['left'], inDat, modelEval)
else:
return modelEval(tree['left'], inDat)
else:
if isTree(tree['right']):
return treeForeCast(tree['right'], inDat, modelEval)
else:
return modelEval(tree['right'], inDat)
def createForeCast(tree, testData, modelEval=regTreeEval):
m = len(testData)
yHat = np.mat(np.zeros((m, 1)))
for i in range(m):
yHat[i, 0] = treeForeCast(tree, testData[i], modelEval) # ๆฏๆฌก้ขๆตไธๆกๆฐๆฎ
return yHat
def isTree(obj):
"""ๅคๆญ่พๅ
ฅๆฐๆฎๆฏๅฆไธบไธๆฃตๆ (่ฟ้ไธบๅญๅ
ธ)"""
return isinstance(obj,dict) | HanggeAi/machine-learing-with-numpy | ๆ ๅๅฝ/regTree.py | regTree.py | py | 5,983 | python | zh | code | 0 | github-code | 36 |
4157086617 | import os
import glob
import imageio.v2 as imageio
from wand.image import Image
import PySimpleGUI as sg
from moviepy.editor import ImageSequenceClip
# Function to create an MP4 movie from images.
def create_mp4(input_folder, output_path, fps):
# Get a sorted list of image paths.
image_paths = sorted(
[
os.path.join(input_folder, file)
for file in os.listdir(input_folder)
if file.lower().endswith((".png", ".jpg", ".jpeg"))
]
)
# Read in images.
images = [imageio.imread(path) for path in image_paths]
if images:
# Create and write video file.
clip = ImageSequenceClip(images, fps=fps)
clip.write_videofile(output_path, codec="mpeg4")
# Function to create a GIF from images.
def create_gif(input_folder, output_path, duration, loop):
# Ensure output path ends with .gif.
if not output_path.lower().endswith(".gif"):
output_path += ".gif"
# Get a sorted list of image paths.
image_paths = sorted(
[
os.path.join(input_folder, file)
for file in os.listdir(input_folder)
if file.lower().endswith((".png", ".jpg", ".jpeg", ".tiff", ".exr"))
]
)
# Read in images.
images = [imageio.imread(path) for path in image_paths]
if images:
# Create and write GIF file.
imageio.mimsave(output_path, images, duration=duration, loop=loop)
# Function to process images with various effects.
def process_images(
input_folder,
output_folder,
dither,
num_colors,
pixelate,
pixelate_factor,
resize,
width,
height,
rotate,
angle,
blur,
radius,
mirror,
):
processing_done = False
for img_path in glob.glob(os.path.join(input_folder, "*")):
if img_path.lower().endswith((".png", ".jpg", ".jpeg", ".tiff", ".exr")):
# Apply requested image processing operations.
with Image(filename=img_path) as img:
if dither:
img.quantize(number_colors=int(num_colors), dither="riemersma")
processing_done = True
if pixelate:
img.resize(
int(img.width // pixelate_factor),
int(img.height // pixelate_factor),
)
img.resize(
img.width * pixelate_factor, img.height * pixelate_factor
)
processing_done = True
if resize:
img.resize(width, height)
processing_done = True
if rotate:
img.rotate(angle)
processing_done = True
if blur:
img.gaussian_blur(radius=radius)
processing_done = True
if mirror:
img.flop()
processing_done = True
img.save(
filename=os.path.join(output_folder, os.path.basename(img_path))
)
return processing_done
# Set up PySimpleGUI.
sg.theme("DarkBlue")
# Define the layout of the GUI.
layout = [
[
sg.Text(
"Process Images bakes input folder images to output folder with selected effects"
)
],
[
sg.Text("Input Folder", size=(15, 1)),
sg.InputText(key="-IN-"),
sg.FolderBrowse(),
],
[
sg.Text("Output Folder", size=(15, 1)),
sg.InputText(key="-OUT-"),
sg.FolderBrowse(),
],
[
sg.Checkbox("Dither", key="-DITHER-"),
sg.InputText(key="-NUM_COLORS-", default_text="256"),
],
[
sg.Checkbox("Pixelate", key="-PIXELATE-"),
sg.InputText(key="-PIXELATE_FACTOR-", default_text="1"),
],
[
sg.Checkbox("Resize", key="-RESIZE-"),
sg.InputText(key="-WIDTH-", default_text="512"),
sg.InputText(key="-HEIGHT-", default_text="512"),
],
[
sg.Checkbox("Rotate", key="-ROTATE-"),
sg.InputText(key="-ANGLE-", default_text="90"),
],
[sg.Checkbox("Blur", key="-BLUR-"), sg.InputText(key="-RADIUS-", default_text="0")],
[sg.Checkbox("Mirror", key="-MIRROR-", default=False)],
[
sg.Text("MP4 output path"),
sg.InputText(key="-MP4-"),
sg.FileSaveAs(file_types=(("MP4 Files", "*.mp4"),)),
],
[sg.Text("MP4 FPS"), sg.InputText(key="-FPS-", default_text="24")],
[
sg.Text("GIF output path"),
sg.InputText(key="-GIF-"),
sg.FileSaveAs(file_types=(("GIF Files", "*.gif"),)),
],
[sg.Text("GIF delay time in ms"), sg.InputText(key="-DURATION-", default_text="5")],
[sg.Text("GIF loop count"), sg.InputText(key="-LOOP-", default_text="0")],
[
sg.Button("Process Images"),
sg.Button("Create MP4"),
sg.Button("Create GIF"),
sg.Button("Exit"),
],
]
# Define icon path.
icon_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data_set_icon_246682.ico"
)
# Create the GUI window.
window = sg.Window("ImageChef", layout, background_color="", icon=icon_path)
# Main event loop.
while True:
event, values = window.read()
if event in (None, "Exit"):
break
elif event == "Process Images":
try:
pixelate_factor = int(values["-PIXELATE_FACTOR-"])
width = int(values["-WIDTH-"])
height = int(values["-HEIGHT-"])
angle = float(values["-ANGLE-"])
radius = float(values["-RADIUS-"])
if not values["-IN-"] or not os.path.isdir(values["-IN-"]):
raise ValueError("Invalid or non-existent input folder")
if not values["-OUT-"]:
sg.popup(
"No output folder specified, processed images will be saved in the input folder."
)
except ValueError as e:
sg.popup(f"Invalid input: {e}")
continue
processing_done = process_images(
values["-IN-"],
values["-OUT-"],
values["-DITHER-"],
values["-NUM_COLORS-"],
values["-PIXELATE-"],
pixelate_factor,
values["-RESIZE-"],
width,
height,
values["-ROTATE-"],
angle,
values["-BLUR-"],
radius,
values["-MIRROR-"],
)
if processing_done:
sg.popup("Processing Done")
else:
sg.popup("No Processing Done")
elif event == "Create MP4":
try:
fps = int(values["-FPS-"])
if not values["-IN-"] or not os.path.isdir(values["-IN-"]):
raise ValueError("Invalid or non-existent input folder")
if not values["-MP4-"]:
raise ValueError("Output path for MP4 is empty")
create_mp4(values["-IN-"], values["-MP4-"], fps)
except ValueError as e:
sg.popup(f"Invalid input for MP4 creation: {e}")
elif event == "Create GIF":
try:
duration = float(values["-DURATION-"])
loop = int(values["-LOOP-"])
if not values["-OUT-"] or not os.path.isdir(values["-OUT-"]):
raise ValueError("Invalid or non-existent output folder")
if not values["-GIF-"]:
raise ValueError("Output path for GIF is empty")
create_gif(values["-OUT-"], values["-GIF-"], duration, loop)
except ValueError as e:
sg.popup(f"Invalid input for GIF creation: {e}")
window.close()
| avyaktam/ImageChef | ImageChef.py | ImageChef.py | py | 7,866 | python | en | code | 0 | github-code | 36 |
43156184597 | from VendApi import *
from VendApi2 import *
import CsvUtil as cu
domain = ''
token = ''
#the product_id to search for in the inventory endpoint
prod_id = '2598c236-c76c-2a64-4aee-a410a54af7d2'
####################################
api = VendApi(domain, token)
api2 = VendApi2(domain, token)
inventories = api2.getInventories()
outlets = api.getOutlets()
onametoout = api.getKeyToObjs(outlets, 'name')
oidtoout = api.getKeyToObjs(outlets, 'id')
# print(oidtoout)
outtoinventory = {}
onametoinventory ={}
raw_inventory = []
#outlet to inventory record
for inv in inventories:
curr_pid = inv['product_id']
if curr_pid != prod_id:
continue
raw_inventory.append(inv)
curr_oid = inv['outlet_id']
curr_out_name = oidtoout[curr_oid]['name']
# outtoinventory[f'{curr_out_name}({curr_oid})'] = inv['inventory_level']
# onametoinventory[curr_out_name] = inv['inventory_level']
onametoinventory[f'{curr_out_name}({curr_oid})'] = inv['inventory_level']
print("\n\n###################### OUTLET TO INVENTORY MAPPED JSON ######################\n")
print(onametoinventory)
print('\n-------------------------------------\n')
print("\n###################### RAW INVENTORY RECORDS FROM THE /api/2.0/inventory ENDPOINT ######################\n")
print(raw_inventory)
| minstack/VScripts | InventoryRecordsLookup/inventory_records.py | inventory_records.py | py | 1,307 | python | en | code | 0 | github-code | 36 |
26469668124 | from ...key import Address
from ...common import Int, concatBytes, _hint
from ...hint import MNFT_COLLECTION_POLICY, MNFT_COLLECTION_REGISTER_FORM, MNFT_MINT_FORM, MNFT_NFT_ID, MNFT_SIGNER, MNFT_SIGNERS
class CollectionRegisterForm:
def __init__(self, target, symbol, name, royalty, uri, whites):
assert royalty >= 0 and royalty < 100, 'Invalid royalty; CollectionRegisterForm'
self.hint = _hint(MNFT_COLLECTION_REGISTER_FORM)
self.target = Address(target)
self.symbol = symbol
self.name = name
self.royalty = Int(royalty)
self.uri = uri
self.whites = []
for w in whites:
self.whites.append(Address(w))
def bytes(self):
bTarget = self.target.bytes()
bSymbol = self.symbol.encode()
bName = self.name.encode()
bRoyalty = self.royalty.bytes()
bUri = self.uri.encode()
_whites = bytearray()
for w in self.whites:
_whites += w.bytes()
bWhites = bytes(_whites)
return concatBytes(bTarget, bSymbol, bName, bRoyalty, bUri, bWhites)
def dict(self):
form = {}
form['_hint'] = self.hint.hint
form['target'] = self.target.address
form['symbol'] = self.symbol
form['name'] = self.name
form['royalty'] = self.royalty.value
form['uri'] = self.uri
whites = []
for w in self.whites:
whites.append(w.address)
form['whites'] = whites
return form
class CollectionPolicy:
def __init__(self, name, royalty, uri, whites):
assert royalty >= 0 and royalty < 100, 'Invalid royalty; CollectionPolicy'
self.hint = _hint(MNFT_COLLECTION_POLICY)
self.name = name
self.royalty = Int(royalty)
self.uri = uri
self.whites = []
for w in whites:
self.whites.append(Address(w))
def bytes(self):
bName = self.name.encode()
bRoyalty = self.royalty.bytes()
bUri = self.uri.encode()
_whites = bytearray()
for w in self.whites:
_whites += w.bytes()
bWhites = bytes(_whites)
return concatBytes(bName, bRoyalty, bUri, bWhites)
def dict(self):
policy = {}
policy['_hint'] = self.hint.hint
policy['name'] = self.name
policy['royalty'] = self.royalty.value
policy['uri'] = self.uri
whites = []
for w in self.whites:
whites.append(w.address)
policy['whites'] = whites
return policy
class NFTSigner:
def __init__(self, account, share, signed):
assert share >= 0 and share <= 100, 'Invalid share; NFTSigner'
self.hint = _hint(MNFT_SIGNER)
self.account = Address(account)
self.share = Int(share)
self.signed = signed
def bytes(self):
bAccount = self.account.bytes()
bShare = self.share.bytes()
bSigned = bytes([0])
if self.signed:
bSigned = bytes([1])
return concatBytes(bAccount, bShare, bSigned)
def dict(self):
signer = {}
signer['_hint'] = self.hint.hint
signer['account'] = self.account.address
signer['share'] = self.share.value
signer['signed'] = self.signed
return signer
class NFTSigners:
def __init__(self, total, signers):
assert total >= 0 and total <= 100, 'Invalid total share; NFTSigners'
self.hint = _hint(MNFT_SIGNERS)
self.total = Int(total)
self.signers = signers
def bytes(self):
bTotal = self.total.bytes()
_signers = bytearray()
for s in self.signers:
_signers += s.bytes()
bSigners = _signers
return concatBytes(bTotal, bSigners)
def dict(self):
signers = {}
signers['_hint'] = self.hint.hint
signers['total'] = self.total.value
_signers = []
for s in self.signers:
_signers.append(s.dict())
signers['signers'] = _signers
return signers
class MintForm:
def __init__(self, hash, uri, creators, copyrighters):
self.hint = _hint(MNFT_MINT_FORM)
self.hash = hash
self.uri = uri
self.creators = creators
self.copyrighters = copyrighters
def bytes(self):
bHash = self.hash.encode()
bUri = self.uri.encode()
bCreators = self.creators.bytes()
bCopyrighters = self.copyrighters.bytes()
return concatBytes(bHash, bUri, bCreators, bCopyrighters)
def dict(self):
form = {}
form['_hint'] = self.hint.hint
form['hash'] = self.hash
form['uri'] = self.uri
form['creators'] = self.creators.dict()
form['copyrighters'] = self.copyrighters.dict()
return form
class NFTID:
def __init__(self, collection, idx):
assert idx.value > 0, 'idx must be over zero; NFTID'
self.hint = _hint(MNFT_NFT_ID)
self.collection = collection
self.idx = idx
def bytes(self):
bCollection = self.collection.encode()
bIdx = self.idx.bytes()
return concatBytes(bCollection, bIdx)
def dict(self):
id = {}
id['_hint'] = self.hint.hint
id['collection'] = self.collection
id['idx'] = self.idx.value
return id
| ProtoconNet/mitum-py-util | src/mitumc/operation/nft/base.py | base.py | py | 5,368 | python | en | code | 2 | github-code | 36 |
9495049897 | ### Add fixed time effects and controls for infection levels and national lockdown
# Initial imports
import pandas as pd
import statsmodels.api as sm
import numpy as np
from scipy import stats
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
#Import household control data
data = pd.read_csv('household_regression_data_weeks.csv')
# Calculate the national lockdown variable for each week
lockdown_weekly = data.groupby('Week')['National_lockdown'].mean().reset_index()
#Import the y_weekly data
y_weekly = pd.read_csv('y_weekly_inf.csv')
#Merge on the national lockdown control and save the file
y_weekly = y_weekly.merge(lockdown_weekly, on='Week')
y_weekly.to_csv('y_weekly_full.csv')
# Fit the OLS regression model
model = smf.ols(formula='Y ~ Treatment + Week + Student_infection_lag + National_lockdown', data=y_weekly).fit()
print(model.summary())
# Plot the residuals
sns.residplot(x=model.predict(), y=y_weekly['Y'], lowess=True, line_kws={'color': 'red'})
plt.title('Residual Plot')
plt.xlabel('Predicted Values')
plt.ylabel('Residuals')
plt.show()
# Plot the real data and model over time
plt.plot(y_weekly['Week'], y_weekly['Y'], label='Real Data')
plt.plot(y_weekly['Week'], model.predict(), label='Model')
plt.axvline(x=10, linestyle='--', color='black')
plt.title('Real Data vs. Model Over Time')
plt.xlabel('Week of Study')
plt.ylabel('% Participation')
plt.ylim((0,1))
plt.legend()
plt.show()
| rg522/psych_owner | aggregate_model_4.py | aggregate_model_4.py | py | 1,484 | python | en | code | 0 | github-code | 36 |
2152603533 | import calendar
import unittest
from datetime import date, datetime, timedelta
from codenotes import parse_args
from codenotes.util.args import date_args_empty, dates_to_search
class TestDateArgsNeededEmpty(unittest.TestCase):
def test_no_args(self):
args = parse_args(["task", "search"])
self.assertTrue(date_args_empty(args))
def test_only_date(self):
args = parse_args(["task", "search", "--today"])
self.assertFalse(date_args_empty(args))
def test_only_text(self):
args = parse_args(["task", "search", "New", "task", "added"])
self.assertFalse(date_args_empty(args))
def test_text_and_date(self):
args = parse_args(["task", "search", "New", "task", "added", "--today"])
self.assertFalse(date_args_empty(args))
class TestDateToSearch(unittest.TestCase):
def test_today(self):
search_date = datetime.now().date()
args = parse_args(["task", "search", "--today"])
self.assertEqual(dates_to_search(args), search_date)
def test_yesterday(self):
search_date = datetime.now().date() - timedelta(days=1)
args = parse_args(["task", "search", "--yesterday"])
self.assertEqual(dates_to_search(args), search_date)
def test_month(self):
now = datetime.now()
num_days = calendar.monthrange(now.year, now.month)[1]
days = [date(now.year, now.month, 1), date(now.year, now.month, num_days)]
args = parse_args(["task", "search", "--month"])
self.assertListEqual(dates_to_search(args), days)
def test_week(self):
now = datetime.now().date()
first_day = now - timedelta(days=now.weekday())
last_day = first_day + timedelta(days=6)
days = [first_day, last_day]
args = parse_args(["task", "search", "--week"])
self.assertListEqual(dates_to_search(args), days)
if __name__ == "__main__":
unittest.main()
| EGAMAGZ/codenotes | tests/util/test_args.py | test_args.py | py | 1,944 | python | en | code | 0 | github-code | 36 |
2028059224 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 06:56:45 2018
@author: Javier Alejandro Acevedo Barroso
"""
import numpy as np
import matplotlib.pyplot as plt
x_obs = np.array([-2.0,1.3,0.4,5.0,0.1, -4.7, 3.0, -3.5,-1.1])
y_obs = np.array([ -1.931, 2.38, 1.88, -24.22, 3.31, -21.9, -5.18, -12.23, 0.822])
sigma_y_obs = ([ 2.63, 6.23, -1.461, 1.376, -4.72, 1.313, -4.886, -1.091, 0.8054])
plt.errorbar(x_obs, y_obs, yerr=sigma_y_obs, fmt='o')
def model(x,a,b,c):
return a*x*x + b * x + c
def loglikelihood(x_obs, y_obs, sigma_y_obs, a, b, c):
d = y_obs - model(x_obs, a,b,c)
d = d/sigma_y_obs
d = -0.5 * np.sum(d**2)
return d
def logprior(a, b, c):
# p = -np.inf
# if a < 2 and a >-2 and b >-10 and b<10 and c > -10 and c < 10:
# p = 0.0
# return p
return 0
N= 50000
lista_a = [np.random.random()]
lista_b = [np.random.random()]
lista_c = [np.random.random()]
logposterior = [loglikelihood(x_obs, y_obs, sigma_y_obs, lista_a[0], lista_b[0], lista_c[0]) + logprior(lista_a[0], lista_b[0], lista_c[0])]
sigma_delta_a = 0.2
sigma_delta_b = 1
sigma_delta_c = 1.0
for i in range(1,N):
propuesta_a = lista_a[i-1] + np.random.normal(loc=0.0, scale=sigma_delta_a)
propuesta_b = lista_b[i-1] + np.random.normal(loc=0.0, scale=sigma_delta_b)
propuesta_c = lista_b[i-1] + np.random.normal(loc=0.0, scale=sigma_delta_c)
logposterior_viejo = loglikelihood(x_obs, y_obs, sigma_y_obs, lista_a[i-1], lista_b[i-1], lista_c[i-1]) + logprior(lista_a[i-1], lista_b[i-1], lista_c[i-1])
logposterior_nuevo = loglikelihood(x_obs, y_obs, sigma_y_obs, propuesta_a, propuesta_b, propuesta_c) + logprior(propuesta_a, propuesta_b,propuesta_c)
r = min(1,np.exp(logposterior_nuevo-logposterior_viejo))
alpha = np.random.random()
if(alpha<r):
lista_a.append(propuesta_a)
lista_b.append(propuesta_b)
lista_c.append(propuesta_c)
logposterior.append(logposterior_nuevo)
else:
lista_a.append(lista_a[i-1])
lista_b.append(lista_b[i-1])
lista_c.append(lista_c[i-1])
logposterior.append(logposterior_viejo)
lista_a = np.array(lista_a)
lista_b = np.array(lista_b)
lista_c = np.array(lista_c)
logposterior = np.array(logposterior)
realx = np.linspace(-5,5,100)
#rta = (lista_a.argmax(),lista_b.argmax(),lista_c.argmax())
rta = (lista_a.mean(),lista_b.mean(),lista_c.mean())
plt.plot(realx, model(realx,rta[0],rta[1],rta[2]))
plt.title("a = %.3f b = %.3f c = %.3f" % (rta))
h2 = plt.figure()
#plt.plot(lista_a[100:], label='pendiente')
#plt.plot(lista_b[100:], label='intercepto')
#plt.plot(lista_c[100:], label='c')
#plt.plot(logposterior[100:], label='loglikelihood')
#plt.legend()
h2 = plt.figure()
plt.plot(lista_a, lista_b, alpha=0.5)
plt.scatter(lista_a, lista_b, alpha=0.4, c=np.exp(logposterior))
plt.colorbar()
h2 = plt.figure()
plt.plot(lista_b, lista_c, alpha=0.5)
plt.scatter(lista_a, lista_b, alpha=0.4, c=np.exp(logposterior))
plt.colorbar()
h2 = plt.figure()
plt.plot(lista_a, lista_c, alpha=0.5)
plt.scatter(lista_a, lista_b, alpha=0.4, c=np.exp(logposterior))
plt.colorbar() | ClarkGuilty/2018 | metodosComputacionales2/JavierAcevedo_Ejercicio6.py | JavierAcevedo_Ejercicio6.py | py | 3,157 | python | es | code | 0 | github-code | 36 |
22869409786 | # Time Complexity of Solution:
# Best O(n); Average O(n^2); Worst O(n^2).
#
# Approach:
# Insertion sort is good for collections that are very small or nearly sorted.
# Otherwise it's not a good sorting algorithm:
# it moves data around too much.
# Each time an insertion is made, all elements in a greater position are shifted
def insertion_sort(arr):
# since we want to swap an item with previous one, start from 1
for i in range(1, len(arr)):
# current_value will be used for comparison with previous items,
# and sent to the place it belongs
current_value = arr[i]
# reducing i directly will mess loop, so reduce its 'position' instead
position = i
# position > 0 bcoz no point going till arr[0] since there is
# no seat available on its left, for current_value
while position > 0 and arr[position - 1] > current_value:
# Move the bigger item one step right to make room for current_value
arr[position] = arr[position - 1]
position -= 1
arr[position] = current_value
alist = [5, 2, 1, 9, 0, 4, 6]
print('Before', alist)
insertion_sort(alist)
print('After ', alist)
| DerevenetsArtyom/pure-python | algorithms/Problem_Solving_Algorithms_Data Structures/sorting_and_search/insertion_sort.py | insertion_sort.py | py | 1,194 | python | en | code | 0 | github-code | 36 |
28915101395 | import copy
from pathlib import Path
from collections import defaultdict
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
try:
import datasets as hf_datasets
except ImportError:
pass
def get_texts_df(dir_path):
paths = [x for x in dir_path.iterdir() if x.is_file() and x.suffix == ".txt"]
paths.sort(key=lambda path: path.stem)
texts = []
for path in paths:
with open(path) as f:
texts.append((path.stem, f.read().rstrip()))
return pd.DataFrame(texts, columns=["id", "text"])
def get_dfs(path):
path = Path(path)
test_texts = get_texts_df(path / "test")
train_df = pd.read_csv(path / "train.csv")
train_texts = get_texts_df(path / "train")
return train_texts, train_df, test_texts
def get_block_dataset(df, text_column, tokenizer, max_len, seed):
if max_len is None:
max_len = tokenizer.model_max_length
dataset = hf_datasets.Dataset.from_pandas(df)
dataset = dataset.shuffle(seed)
def tokenize(examples):
tokenized = tokenizer(
examples[text_column], add_special_tokens=False, return_attention_mask=False
)
for tokens in tokenized.input_ids:
tokens.append(tokenizer.sep_token_id)
return tokenized
dataset = dataset.map(
tokenize,
batched=True,
remove_columns=dataset.column_names,
desc="Tokenizing examples...",
)
def blockify(examples):
all = []
for sub in examples["input_ids"]:
all.extend(sub)
sub_max = max_len - 2
block_starts = range(0, len(all) - sub_max + 1, sub_max)
blocks = [
tokenizer.build_inputs_with_special_tokens(all[i : i + sub_max])
for i in block_starts
]
examples = {"input_ids": blocks}
return examples
dataset = dataset.map(
blockify,
batched=True,
desc="Chunking examples...",
)
return dataset
def split_offsets(line):
words = line.split()
offsets = []
offset = 0
for word in words:
begin_offset = line.index(word, offset)
offset = begin_offset + len(word)
offsets.append((begin_offset, offset))
return offsets
labels = [
"Lead",
"Position",
"Evidence",
"Claim",
"Concluding Statement",
"Counterclaim",
"Rebuttal",
]
def get_label_ids(labels):
label_to_id = {(None, 0): 0}
id_to_label = {0: (None, 0)}
for i, t in enumerate(labels):
label_to_id[(t, True)] = i * 2 + 1
label_to_id[(t, False)] = i * 2 + 2
id_to_label[i * 2 + 1] = (t, True)
id_to_label[i * 2 + 2] = (t, False)
return label_to_id, id_to_label
label_to_id, id_to_label = get_label_ids(labels)
max_labels = len(label_to_id)
def get_answer_dict(df):
answers = defaultdict(list)
for row in df.itertuples():
words = row.predictionstring.split()
words = [int(word) for word in words]
answers[row.id].append((words, row.discourse_type))
for answer in answers.values():
answer.sort()
return answers
def get_clean_answers(answers):
answers = copy.deepcopy(answers)
for _, answer in answers.items():
prev_words = [-1]
cleaned_answer = []
for words, label in answer:
if prev_words[-1] >= words[0]:
if len(prev_words) == 1:
if len(words) == 1:
continue
words.pop(0)
else:
prev_words.pop()
cleaned_answer.append((words, label))
prev_words = words
answer.clear()
answer.extend(cleaned_answer)
return answers
def check_answer_dict(answers):
for answer in answers.values():
prev = -1
for words, _ in answer:
if len(words) == 0:
return False
for word_id in words:
if prev >= word_id:
return False
prev = word_id
return True
def get_word_dict(texts):
offsets = {}
for row in texts.itertuples():
offsets[row.id] = split_offsets(row.text)
return offsets
def overlap(a, b, c, d):
return a < d and c < b
def intersect_ranges(ranges, items):
# Given sorted ranges (non-overlapping) and sorted items (non-overlapping)
# Collect items that fall into these ranges and return the indices.
groups = []
index = 0
for r, s in ranges:
group = []
while index < len(items) and items[index][0] < s:
if r < items[index][1]:
group.append(index)
index += 1
groups.append(group)
return groups
def get_target(token_offsets, answers, word_offsets, overflow_to_sample):
answer_ranges = [
[(word_offset[words[0]][0], word_offset[words[-1]][1]) for words, _ in answer]
for answer, word_offset in zip(answers, word_offsets)
]
answer_seen = set()
target = torch.zeros(token_offsets.size()[:-1], dtype=torch.long)
for i, token_offset in enumerate(token_offsets.tolist()):
j = overflow_to_sample[i].item()
answer_tokens = intersect_ranges(answer_ranges[j], token_offset)
for k, answer_token in enumerate(answer_tokens):
label = answers[j][k][1]
label1 = label_to_id[(label, False)]
label0 = label_to_id[(label, True)] if (j, k) not in answer_seen else label1
target[i, answer_token[0:1]] = label0
target[i, answer_token[1:]] = label1
if len(answer_token) > 0:
answer_seen.add((j, k))
return target
class FeedbackDataset(Dataset):
def __init__(self, texts, df, tokenizer, max_len, stride, pad_to_multiple_of):
self.texts = texts
self.answers = get_answer_dict(df)
self.answers = get_clean_answers(self.answers)
self.words = get_word_dict(texts)
self.tokenizer = tokenizer
self.max_len = max_len
self.stride = stride
self.pad_to_multiple_of = pad_to_multiple_of
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts.loc[idx, "text"]
text_id = self.texts.loc[idx, "id"]
answer = self.answers[text_id]
words = self.words[text_id]
return text, answer, words
def get_collate_fn(self):
def collate_fn(examples):
text, answer, words = [list(a) for a in zip(*examples)]
inputs = self.tokenizer(
text,
add_special_tokens=True,
padding=True,
truncation=True,
return_overflowing_tokens=True,
return_offsets_mapping=True,
max_length=self.max_len,
stride=self.stride,
return_tensors="pt",
pad_to_multiple_of=self.pad_to_multiple_of,
)
target = get_target(
inputs.offset_mapping,
answer,
words,
inputs.overflow_to_sample_mapping,
)
return len(examples), inputs, target, words, answer
return collate_fn
def get_matches(preds, golds):
pred_sets = [set(pred) for pred in preds]
gold_sets = [set(gold) for gold in golds]
seen = set()
matches = []
for i, pred_set in enumerate(pred_sets):
for j, gold_set in enumerate(gold_sets):
if j in seen:
continue
intersection = len(pred_set.intersection(gold_set))
if intersection <= 0.5 * len(gold_set):
continue
if intersection <= 0.5 * len(pred_set):
continue
seen.add(j)
matches.append((i, j))
break
return matches
def _score_single(tp, fp, fn, pred_words, pred_labels, answer_words, answer_labels):
matches = get_matches(pred_words, answer_words)
for l in pred_labels:
fp[l] += 1
for l in answer_labels:
fn[l] += 1
for i, j in matches:
l = pred_labels[i]
if l != answer_labels[j]:
continue
tp[l] += 1
fp[l] -= 1
fn[l] -= 1
def score(preds_batch, words_batch, answers_batch):
return score_words(pred_to_words(preds_batch, words_batch), answers_batch)
def score_words(preds_batch, answers_batch):
tp = defaultdict(int)
fp = defaultdict(int)
fn = defaultdict(int)
for preds, answers in zip(preds_batch, answers_batch):
pred_words, pred_labels = zip(*preds) if preds else ([], [])
answer_words, answer_labels = zip(*answers)
_score_single(tp, fp, fn, pred_words, pred_labels, answer_words, answer_labels)
return {l: (tp[l], fp[l], fn[l]) for l in labels}
def pred_to_words(preds_batch, words_batch):
pred_words_batch = []
for preds, words in zip(preds_batch, words_batch):
if not preds:
pred_words_batch.append([])
continue
pred_ranges, pred_labels = zip(*preds)
pred_words = intersect_ranges(pred_ranges, words)
pred_words = [(a, b) for a, b in list(zip(pred_words, pred_labels)) if a]
pred_words_batch.append(pred_words)
return pred_words_batch
def _confusion_matrix_single(
matrix, pred_words, pred_labels, answer_words, answer_labels, to_id
):
matches = get_matches(pred_words, answer_words)
pred_seen = [False for _ in range(len(pred_labels))]
answer_seen = [False for _ in range(len(answer_labels))]
for i, j in matches:
pred_seen[i] = True
answer_seen[j] = True
p = to_id[pred_labels[i]]
a = to_id[answer_labels[j]]
matrix[p, a] += 1
for seen, p in zip(pred_seen, pred_labels):
if seen:
continue
matrix[to_id[p], to_id[None]] += 1
for seen, a in zip(answer_seen, answer_labels):
if seen:
continue
matrix[to_id[None], to_id[a]] += 1
def confusion_matrix_words(preds_batch, answers_batch):
full_labels = labels + [None]
to_id = {l: i for i, l in enumerate(full_labels)}
matrix = np.zeros((len(full_labels), len(full_labels)), dtype=np.int64)
for preds, answers in zip(preds_batch, answers_batch):
pred_words, pred_labels = zip(*preds) if preds else ([], [])
answer_words, answer_labels = zip(*answers)
_confusion_matrix_single(
matrix, pred_words, pred_labels, answer_words, answer_labels, to_id
)
full_labels[-1] = "Unmatched"
df = pd.DataFrame(matrix, index=full_labels, columns=full_labels)
return df
def confusion_matrix(preds_batch, words_batch, answers_batch):
return confusion_matrix_words(
pred_to_words(preds_batch, words_batch), answers_batch
)
| jeffdshen/kaggle-public | feedback/datasets.py | datasets.py | py | 10,868 | python | en | code | 0 | github-code | 36 |
26533716957 | '''
Fig1. Cascade Matrix heatmap
'''
import numpy as np
import matplotlib.pyplot as plt
# from numba import jit
import matplotlib.colors as colors
# @jit(nopython = True)
def random_cascade_matrix(mu_L, mu_U, sigma_L, sigma_U, gamma, N = 250):
J = np.zeros((N, N))
for i in range(N):
for j in range(i):
z1 = np.random.normal(0, 1)
z2 = np.random.normal(0, 1)
J[i, j] = mu_L/N + sigma_L*z1/np.sqrt(N)
J[j, i] = mu_U/N + sigma_U*(gamma*z1 + np.sqrt(1 - gamma**2)*z2)/np.sqrt(N)
return J
fig, axs = plt.subplots(1, 3, figsize = (10, 3))
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size': 12})
# for display
def block_matrix(mu, sizes):
N = np.sum(sizes)
c = np.hstack((np.array([0]), np.cumsum(sizes)[:-1]))
J = np.zeros((N, N))
for i in range(np.size(sizes)):
for j in range(np.size(sizes)):
for u in range(sizes[i]):
for v in range(sizes[j]):
J[c[i] + u, c[j] + v] = mu[i, j]
return J
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
arr = np.linspace(0, 50, 100).reshape((10, 10))
cmap = plt.get_cmap('viridis')
new_cmap = truncate_colormap(cmap, 0.12, 0.98)
mu1 = np.random.uniform(0, 1, 16).reshape((4, 4))
sizes1 = np.array([5, 4, 7, 3])
mu_cascade = np.array([[1, 2, 2, 2], [0, 1, 2, 2], [0, 0, 1, 2], [0, 0, 0, 1]])
sizes_cascade = np.array([7, 5, 6, 4])
J_cascade = block_matrix(mu_cascade, sizes_cascade)
J1 = block_matrix(mu1, sizes1)
axs[0].matshow(J1, cmap = "viridis")
axs[1].matshow(J_cascade, cmap = new_cmap)
axs[2].matshow(random_cascade_matrix(-1, 1, 0.0, 0.0, 0.0, 500), cmap = new_cmap)
# print(random_cascade_matrix)
# for i in range(3):
# Turn off tick labels
axs[0].set_xticklabels([])
axs[0].set_xticks([])
axs[1].set_xticklabels([])
axs[1].set_xticks([])
axs[2].set_xticklabels([])
axs[2].set_xticks([])
axs[0].get_yaxis().set_visible(False)
axs[1].get_yaxis().set_visible(False)
axs[2].get_yaxis().set_visible(False)
axs[0].set_xlabel("Block Structured")
axs[1].set_xlabel("Cascade Model, Finite B")
axs[2].set_xlabel("Cascade Model, infinite B")
plt.tight_layout()
# panel labels
axs[0].annotate('(a)', xy=(0.05, 0.05), xycoords='axes fraction', zorder = 10, ma = 'center', bbox=dict(facecolor='white', alpha=0.6, boxstyle='round'))
axs[1].annotate('(b)', xy=(0.05, 0.05), xycoords='axes fraction', zorder = 10, ma = 'center', bbox=dict(facecolor='white', alpha=0.6, boxstyle='round'))
axs[2].annotate('(c)', xy=(0.05, 0.05), xycoords='axes fraction', zorder = 10, ma = 'center', bbox=dict(facecolor='white', alpha=0.6, boxstyle='round'))
fig.tight_layout()
plt.savefig("Interaction Matrix Cartoon.pdf") | LylePoley/Cascade-Model | Figures/Fig1.py | Fig1.py | py | 2,924 | python | en | code | 0 | github-code | 36 |
37712754399 | from django.shortcuts import render
from custom_model_field_app.forms import PersonForm
# Create your views here.
def customview(request):
form = PersonForm()
if request.method == 'POST':
if form.is_valid():
form.save()
return render(request,'custom.html',{'form':form}) | m17pratiksha/django_models | models/custom_model_field_app/views.py | views.py | py | 323 | python | en | code | 0 | github-code | 36 |
35401950406 | def swap_case(s):
count_str = len(s)
modified_str = ''
for i in range(count_str):
if s[i].isupper():
a = s[i].lower()
elif s[i].islower():
a = s[i].upper()
else:
a = s[i]
modified_str = modified_str + a
return modified_str
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
| gardayulada/HackerRankSolutions | SwapCase.py | SwapCase.py | py | 396 | python | en | code | 0 | github-code | 36 |
35398321598 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from pants.backend.core.tasks.check_exclusives import ExclusivesMapping
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.base.exceptions import TaskError
from pants.util.dirutil import safe_mkdtemp, safe_rmtree
from pants_test.task_test_base import TaskTestBase
class DummyJvmTask(JvmTask):
def execute(self):
pass
class JvmTaskTest(TaskTestBase):
"""Test some base functionality in JvmTask."""
@classmethod
def task_type(cls):
return DummyJvmTask
def setUp(self):
super(JvmTaskTest, self).setUp()
self.workdir = safe_mkdtemp()
self.t1 = self.make_target('t1', exclusives={'foo': 'a'})
self.t2 = self.make_target('t2', exclusives={'foo': 'a'})
self.t3 = self.make_target('t3', exclusives={'foo': 'b'})
# Force exclusive propagation on the targets.
self.t1.get_all_exclusives()
self.t2.get_all_exclusives()
self.t3.get_all_exclusives()
context = self.context(target_roots=[self.t1, self.t2, self.t3])
# Create the exclusives mapping.
exclusives_mapping = ExclusivesMapping(context)
exclusives_mapping.add_conflict('foo', ['a', 'b'])
exclusives_mapping._populate_target_maps(context.targets())
context.products.safe_create_data('exclusives_groups', lambda: exclusives_mapping)
self.task = self.create_task(context, self.workdir)
def tearDown(self):
super(JvmTaskTest, self).tearDown()
safe_rmtree(self.workdir)
def test_get_base_classpath_for_compatible_targets(self):
self.task.get_base_classpath_for_compatible_targets([self.t1, self.t2])
def test_get_base_classpath_for_incompatible_targets(self):
with self.assertRaises(TaskError):
self.task.get_base_classpath_for_compatible_targets([self.t1, self.t3])
| fakeNetflix/square-repo-pants | tests/python/pants_test/tasks/test_jvm_task.py | test_jvm_task.py | py | 1,897 | python | en | code | 0 | github-code | 36 |
14125615992 | infile = open("input/in22_real.txt","r")
# infile = open("input/in22_test.txt","r")
p1 = []
next(infile)
for line in infile:
if line == '\n':
break
p1.append(int(line.strip()))
p2 = []
next(infile)
for line in infile:
if line == '\n':
break
p2.append(int(line.strip()))
def score(p):
return sum([n*(i+1) for i,n in enumerate(reversed(p))])
def day22_part1(p1,p2):
while len(p1) and len(p2):
c1 = p1.pop(0)
c2 = p2.pop(0)
if c1 > c2:
p1.extend([c1,c2])
else:
p2.extend([c2,c1])
if len(p1):
return score(p1)
else:
return score(p2)
def day22_part2(p1,p2,depth,Nmax):
Nturns = 0
while len(p1) and len(p2):
Nturns += 1
if Nturns > Nmax: #probably repeating, call it quits
return True
c1 = p1.pop(0)
c2 = p2.pop(0)
if len(p1) >= c1 and len(p2) >= c2:
if day22_part2(p1[:c1],p2[:c2],depth + 1,Nmax):
p1.extend([c1,c2])
else:
p2.extend([c2,c1])
else:
if c1 > c2:
p1.extend([c1,c2])
else:
p2.extend([c2,c1])
if depth:
if len(p1):
return True
else:
return False
else:
if len(p1):
return score(p1)
else:
return score(p2)
print(day22_part1(p1[:],p2[:]))
print(day22_part2(p1[:],p2[:],0,600)) | arguhuh/AoC | 2020/code22.py | code22.py | py | 1,182 | python | en | code | 0 | github-code | 36 |
42779512033 | from fastexcel import read_excel
from openpyxl import load_workbook
from xlrd import open_workbook
def pyxl_read(test_file_path: str):
wb = load_workbook(test_file_path, read_only=True, keep_links=False, data_only=True)
for ws in wb:
rows = ws.iter_rows()
rows = ws.values
for row in rows:
for value in row:
value
def xlrd_read(test_file_path: str):
wb = open_workbook(test_file_path)
for ws in wb.sheets():
for idx in range(ws.nrows):
for value in ws.row_values(idx):
value
def fastexcel_read(test_file_path: str):
reader = read_excel(test_file_path)
for sheet_name in reader.sheet_names:
sheet = reader.load_sheet_by_name(sheet_name)
sheet.to_arrow()
| ToucanToco/fastexcel | python/tests/benchmarks/readers.py | readers.py | py | 787 | python | en | code | 16 | github-code | 36 |
73122227944 | # Ignas Kleveckas S2095960
import sys
import time
import os
import select
from socket import *
class Sender(object):
def __init__(self, remote_host, port, file_name, retry_timeout, window_size):
# receive input
self.remote_host = remote_host
self.port = port
self.file_name = file_name
self.retry_timeout = retry_timeout / 1000
self.window_size = window_size
# configure socket
self.so = socket(AF_INET, SOCK_DGRAM)
# initialise remaining values
self.sequence_no = 1
self.window_base = 1
self.transfer_time = 0
self.file_size = os.path.getsize(file_name)
self.buffer_length = 1024
self.total_packets = 1 + (self.file_size / self.buffer_length)
self.timeout = False
self.timer = False
self.packets = [bytearray((0).to_bytes(1, 'big'))] #dummy value in 0 position
def packet_monitor(self):
if not self.timeout:
time_left = self.retry_timeout - (time.time() - self.time_sent)
if time_left >= 0:
# check if there is an incoming packet
ready = select.select([self.so], [], [], 0)
if ready[0]:
self.receive()
else:
self.timeout = True
else:
self.process_timeout()
def receive(self):
response, addr = self.so.recvfrom(self.buffer_length)
ack_no = int.from_bytes(response, 'big')
#print("ACK received " + str(ack_no))
self.window_base = ack_no + 1
# if the ack for the base packet was received, stop the timer
if self.window_base == self.sequence_no:
self.timer = False
# otherwise reset the timer
else:
self.time_sent = time.time()
self.timer = True
def process_timeout(self):
self.timer = True
self.timeout = False
self.time_sent = time.time()
# resend the packets in the window (after window base)
for i in range(self.window_base, self.sequence_no):
packet = self.packets[i]
self.so.sendto(packet, (self.remote_host, self.port))
#print("Packet retransmitted " + str(i))
def send_file(self):
myfile = open(self.file_name, "rb")
data = myfile.read(self.buffer_length)
end_of_file = 0
start = time.time()
while self.window_base < self.total_packets or data:
#if time.time() - start > 180: # TESTING
#break
if self.sequence_no < self.window_base + self.window_size and data:
# prepare header
if len(data) < self.buffer_length:
end_of_file = 1
else:
end_of_file = 0
packet = bytearray(self.sequence_no.to_bytes(2, 'big'))
packet.extend(end_of_file.to_bytes(1, 'big'))
packet.extend(data)
# send packet and add it to the packet list
self.packets.append(packet)
self.so.sendto(packet, (self.remote_host, self.port))
#print("Packet sent " + str(self.sequence_no))
# if sending the base packet, start the timer
if self.window_base == self.sequence_no:
self.time_sent = time.time()
self.timer = True
self.sequence_no += 1
# read data for the next packet
data = myfile.read(self.buffer_length)
if len(data) < self.buffer_length:
end_of_file = 1
if self.timer:
self.packet_monitor()
total_time = time.time() - start
throughput = self.file_size / (1000 * total_time)
print(throughput)
# close the file and socket
self.so.close()
myfile.close()
return throughput
if __name__ == '__main__':
sender = Sender(sys.argv[1], int(sys.argv[2]), sys.argv[3], int(sys.argv[4]), int(sys.argv[5]))
sender.send_file() | ikleveckas/Reliable-data-transfer-over-UDP | Sender3.py | Sender3.py | py | 4,285 | python | en | code | 0 | github-code | 36 |
72639783783 | # https://pypi.org/project/RPi.bme280/
import smbus2
import bme280
import syslog
import threading
def logmsg(level, msg):
syslog.syslog(level, 'station: {}: {}'.format(threading.currentThread().getName(), msg))
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
port = 1
address = 0x76
bus = smbus2.SMBus(port)
calibration_params = bme280.load_calibration_params(bus, address)
import numpy as np
import time as tm
import os
#with open('station.log', 'w') as f:
# pass
target_t_f = 85.
t_safe = 90.
from importlib import reload
cadence = 5 # seconds
last_time_s = None
last_error = None
while True:
data = bme280.sample(bus, address, calibration_params)
t_f = data.temperature * 9. / 5 + 32.
with open('status', 'r') as f:
status = f.readline()
output = '{:30} {:10s} {:5.2f} {:8.2f} {:5.2f}'.format(str(data.timestamp).replace(' ','_'), status, t_f, data.pressure, data.humidity)
with open('station.log', 'a') as f:
#f.write('{} error={:.3f} dt={:6.3f}s p={:.3f} i={:.3f} d={:.3f} pid={:.3f}'.format(output, error, dt, p, i, d, pid))
f.write('{}\n'.format(output))
time_s = tm.mktime(data.timestamp.timetuple())
if t_f > t_safe:
print('temperature {:.2f} > safe temperature {:.2f}; safe mode'.format(t_f, t_safe))
os.system('./safe')
#elif t_f < target_t_f:
# os.system('./on')
#else:
# os.system('./off')
else:
error = t_f - target_t_f
kp = 1
ki = kp / 300 # 5-minute integration time
kd = kp * 300 # 5-minute derivative time
p = kp * error
if last_time_s:
dt = time_s - last_time_s
i += ki * 0.5 * (error + last_error) * dt
d = kd * (error - last_error) / dt
else:
dt = 0
i = 0
d = 0
# now instead of comparing t_f to target_t_f, compare p+i+d to zero
last_time_s = time_s
last_error = error
pid = p + i + d
full_output = '{} error={:.3f} dt={:6.3f}s p={:.3f} i={:.3f} d={:.3f} pid={:.3f}'.format(output, error, dt, p, i, d, pid)
print(full_output)
loginf(full_output)
if pid > 0:
try:
os.system('./off')
except Exception as e:
logerr(str(e.args[0]))
with open('station.e', 'a') as ferr:
ferr.write('{} {}\n'.format(str(e), full_output))
elif pid < 0:
try:
os.system('./on')
except Exception as e:
logerr(str(e.args[0]))
with open('station.e', 'a') as ferr:
ferr.write('{} {}\n'.format(str(e), full_output))
else:
logerr('error {} not a number'.format(pid))
raise ValueError('error not a number')
tm.sleep(cadence)
| chkvch/fermentation_station | station.py | station.py | py | 2,558 | python | en | code | 0 | github-code | 36 |
28225076046 | def solution(nums: list[int]) -> int:
slow, fast = 0, 0
while True:
slow = nums[slow]
fast = nums[nums[fast]]
if slow == fast:
break
slow2 = 0
while slow != slow2:
slow, slow2 = nums[slow], nums[slow2]
return slow2
nums = [1, 3, 2, 2, 4]
print(solution(nums))
| HomayoonAlimohammadi/Training | Leetcode/287_FindDuplicateNumber.py | 287_FindDuplicateNumber.py | py | 328 | python | en | code | 2 | github-code | 36 |
19542054640 | import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Change default path
# sys.path.append('PROGMOD_')
# Function for formatting file to array of specific variables
# data = txt file
# sh_index = state history index, i.e. controller or environment
# var_index = variable index, i.e. timestamp, joint_angle etc...
def txt_to_variable_array (data, sh_index, var_index):
sh = data.split("|")[sh_index]
vars = sh.split("/")[var_index]
seperated = vars.split("&")
# Remove blank spaces
index_to_remove = []
for i, e in enumerate(seperated):
if (e == ""):
index_to_remove.append(i)
for e in reversed(index_to_remove):
seperated.pop(e)
# Convert from strings to float
in_numbers = [float(numerical_string) for numerical_string in seperated]
return (in_numbers, seperated, vars)
def plotxy (x, y, y2, xlab, ylab, xunit, yunit):
# PLOT XY DATA
plt.plot(x, y, "ro-", color="blue", label="Original")
plt.plot(x, y2, "ro-", color="red", label="Improved")
plt.xlabel(xlab)
plt.ylabel(ylab)
# SET UNITS
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter(f'%.1f {xunit}'))
plt.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter(f'%.1f {yunit}'))
plt.grid()
plt.legend()
plt.show()
# Main
def main():
# Changing default matplotlib font: https://jonathansoma.com/lede/data-studio/matplotlib/changing-fonts-in-matplotlib/
matplotlib.rcParams['font.serif'] = "Palatino Linotype" # Change default serif font
matplotlib.rcParams['font.family'] = "serif" # Set default family to serif
x_num = [2, 5, 10, 15, 17, 20, 22, 25, 30, 32]
y_num_original = [3 ,24, 99, 224, 288, 399, 483, 624, 899, 1023]
y_num_improved = [4, 13, 28, 43, 49, 58, 64, 73, 88, 94]
plotxy(x_num, y_num_original, y_num_improved, "No. of processes", "No. of messages", "", "")
if __name__ == "__main__":
main() | Andreas691667/P1LeaderElection_Group2 | UML & Graphs/plot.py | plot.py | py | 2,036 | python | en | code | 0 | github-code | 36 |
40327125119 | '''--------------SANKE,WATER AND GUN GAME------------------
-------------DEVELOPED BY : RANNJEET PRAJAPATI--------'''''
import random
def game(comp,your):
if comp=='s':
if your=='w':
return False
elif your=='g':
return True
elif comp=='w':
if your=='g':
return False
elif your=='s':
return True
elif comp=='g':
if your=='s':
return False
elif your=='w':
return True
elif comp==your:
return None
print("computer's turn: Snake(s),water(w),Gun(g)")
randno= random.randint(1,3)
if randno==1:
comp='s'
elif randno==2:
comp='w'
elif randno==3:
comp='g'
print("Your turn: Snake(s),water(w),Gun(g)")
your=input()
print("computer chooses:",comp)
print("you chooses:",your)
result=game(comp,your)
if result==True:
print ("You win!")
elif result==False:
print("You lose!")
elif result==None:
print("Game is tie!")
| ranjeetprajapati12/snake_water_gun_game | snake_water_gun.py | snake_water_gun.py | py | 1,019 | python | en | code | 1 | github-code | 36 |
73885161703 | from PyQt5 import QtCore
from PyQt5.QtCore import QObject, QThreadPool, pyqtSignal
from PyQt5.QtWidgets import QWidget, QScrollArea
from cvstudio.util import GUIUtilities
from cvstudio.view.widgets import ImageButton
from cvstudio.view.widgets.loading_dialog import QLoadingDialog
from cvstudio.view.widgets.response_grid import ResponseGridLayout, GridCard
from cvstudio.view.wizard import ModelWizard
class ModelsGridWidget(QWidget, QObject):
new_item_action = pyqtSignal()
def __init__(self, parent=None):
super(ModelsGridWidget, self).__init__(parent)
self.grid_layout = ResponseGridLayout()
self.grid_layout.setAlignment(QtCore.Qt.AlignTop)
self.grid_layout.cols = 8
self.setLayout(self.grid_layout)
self._entries = None
def build_new_button(self):
new_item_widget: GridCard = GridCard(with_actions=False, with_title=False)
btn_new_item = ImageButton(GUIUtilities.get_icon("new_folder.png"))
btn_new_item.clicked.connect(lambda: self.new_item_action.emit())
new_item_widget.body = btn_new_item
return new_item_widget
def bind(self):
cards_list = []
new_item_button = self.build_new_button()
cards_list.append(new_item_button)
self.grid_layout.widgets = cards_list
super(ModelsGridWidget, self).update()
class ModelsTabWidget(QScrollArea):
def __init__(self, parent=None):
super(ModelsTabWidget, self).__init__(parent)
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.data_grid = ModelsGridWidget()
self.data_grid.new_item_action.connect(self.data_grid_new_item_action_slot)
self.setWidget(self.data_grid)
self.setWidgetResizable(True)
self._thread_pool = QThreadPool()
self._loading_dialog = QLoadingDialog()
self.load()
def data_grid_new_item_action_slot(self):
new_model_wizard = ModelWizard()
new_model_wizard.exec_()
def load(self):
self.data_grid.bind()
| haruiz/CvStudio | cvstudio/view/widgets/tab_models.py | tab_models.py | py | 2,168 | python | en | code | 34 | github-code | 36 |
286897447 | import pandas as pd
import numpy as np
import time
import pylab as pl
import math
import os
import pickle
import gzip
from operator import itemgetter
from matplotlib import collections as mc
import copy
from random import randint
from itertools import *
def save_in_file_fast(arr, file_name):
pickle.dump(arr, open(file_name, 'wb'))
def load_from_file_fast(file_name):
return pickle.load(open(file_name, 'rb'))
def SieveOfEratosthenes(n):
# Create a boolean array "prime[0..n]" and initialize
# all entries it as true. A value in prime[i] will
# finally be false if i is Not a prime, else true.
prime = [True for i in range(n + 1)]
p = 2
while (p * p <= n):
# If prime[p] is not changed, then it is a prime
if (prime[p] == True):
# Update all multiples of p
for i in range(p * 2, n + 1, p):
prime[i] = False
p += 1
return prime
def get_primes():
cache_path = OUTPUT_PATH + 'prime_list.pkl'
if not os.path.isfile(cache_path):
n = 200000
prime = SieveOfEratosthenes(n)
plist = []
for p in range(2, n):
if prime[p]:
plist.append(p)
save_in_file_fast(set(plist), cache_path)
else:
plist = load_from_file_fast(cache_path)
return plist
# Globals
INPUT_PATH = 'input/'
OUTPUT_PATH = './'
CITIES = pd.read_csv('input/cities.csv')
PRIMES = get_primes()
all_ids = CITIES['CityId'].values
all_x = CITIES['X'].values
all_y = CITIES['Y'].values
CITIES_HASH = dict()
for i, id in enumerate(all_ids):
CITIES_HASH[id] = (all_x[i], all_y[i])
def isPrime(num):
return num in PRIMES
ORIGINAL = pd.read_csv('optimized_submission.csv')['Path'].values
def get_complete_score(tour):
score = 0.0
for i in range(0, len(tour)-1):
p1 = CITIES_HASH[tour[i]]
p2 = CITIES_HASH[tour[i+1]]
stepSize = math.sqrt((p1[0] - p2[0]) * (p1[0] - p2[0]) + (p1[1] - p2[1]) * (p1[1] - p2[1]))
if ((i + 1) % 10 == 0) and (tour[i] not in PRIMES):
stepSize *= 1.1
score += stepSize
return score
def get_score(tour, start, end):
score = 0.0
for i in range(start, end):
p1 = CITIES_HASH[tour[i]]
p2 = CITIES_HASH[tour[i+1]]
stepSize = math.sqrt((p1[0] - p2[0]) * (p1[0] - p2[0]) + (p1[1] - p2[1]) * (p1[1] - p2[1]))
if ((i + 1) % 10 == 0) and (tour[i] not in PRIMES):
stepSize *= 1.1
score += stepSize
return score
def swap(arr, i, j):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
def save_to_csv(arr):
sub = pd.DataFrame(np.array(arr), columns = ["Path"])
sub.to_csv('optimized_submission.csv', index=None)
def optimization2():
numIter = 50000000
iteration = 0
modified = copy.deepcopy(ORIGINAL)
total_reduction = 0.0
last_saved_reduction = 0.0
radius = 0
while True:
radius += 1
print("radius increase to: " + str(radius))
for step in range(9, len(ORIGINAL) - 1, 10):
best_reduction = 0
for i in [-radius, radius]:
# save every 100000 iterations
if iteration % 100000 == 0:
print("iteration: " + str(iteration))
if last_saved_reduction > total_reduction:
last_saved_reduction = total_reduction
print("saving to csv")
print("total reduction so far: " + str(total_reduction))
save_to_csv(modified)
print("score so far: " + str(get_complete_score(modified)))
else:
print("no improvement")
iteration += 1
index = step + i
if (index < len(ORIGINAL) and index > 0):
start = min(step, index) - 1
end = min(max(step, index) + 1, len(ORIGINAL) - 1)
original_score = get_score(modified, start, end)
swap(modified, step, index)
modified_score = get_score(modified, start, end)
score_diff = modified_score - original_score
if score_diff < best_reduction:
best_reduction = score_diff
best_swap = [step, index]
# swap back
swap(modified, step, index)
if (best_reduction < 0):
swap(modified, best_swap[0], best_swap[1])
total_reduction += best_reduction
return modified, total_reduction
def optimization3():
numIter = 50000000
iteration = 0
modified = copy.deepcopy(ORIGINAL)
total_reduction = 0.0
last_saved_reduction = 0.0
radius = 9
last_checked_iteration = 0
while True:
radius += 1
print("radius increase to: " + str(radius))
for origin in range(9, len(ORIGINAL) - 1, 10):
best_reduction = 0
start = max(origin - radius, 0)
end = min(origin + radius, len(ORIGINAL) - 1)
path_to_check = copy.deepcopy(modified[start:end])
original_score = get_score(modified, max(start - 1, 0), min(end + 1, len(ORIGINAL) - 1))
best_combo = copy.deepcopy(path_to_check)
for combo in permutations(path_to_check, len(path_to_check)):
iteration += 1
for i in range(len(combo)):
modified[origin - radius + i] = combo[i]
modified_score = get_score(modified, max(start - 1, 0), min(end + 1, len(ORIGINAL) - 1))
score_diff = modified_score - original_score
if score_diff < best_reduction:
best_reduction = score_diff
best_combo = copy.deepcopy(combo)
total_reduction += best_reduction
for j in range(len(best_combo)):
modified[origin - radius + j] = best_combo[j]
# save appx. every 100000 iterations
if iteration > last_checked_iteration + 100000:
last_checked_iteration = iteration
print("iteration: " + str(iteration))
if last_saved_reduction > total_reduction:
last_saved_reduction = total_reduction
print("saving to csv")
print("total reduction so far: " + str(total_reduction))
save_to_csv(modified)
print("score so far: " + str(get_complete_score(modified)))
else:
print("no improvement")
return modified, total_reduction
print("starting ...")
naive_modified3, total_reduction = optimization3()
print("end result")
print("improve score by: " + str(-total_reduction))
print("end!") | shun-lin/kaggle | prime_paths/optimizing-function.py | optimizing-function.py | py | 6,846 | python | en | code | 0 | github-code | 36 |
74853194983 | from langchain.chat_models import ChatVertexAI
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import HumanMessage, SystemMessage
chat = ChatVertexAI()
messages = [
SystemMessage(content="You are a helpful assistant that answer questions."),
HumanMessage(content="Why is the sky blue?"),
]
print(chat(messages))
template = """
You are a helpful assistant that answer questions.
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
# get a chat completion from the formatted messages
resp = chat(chat_prompt.format_prompt(text="why is the sky blue.").to_messages())
print(resp)
| GoogleCloudPlatform/solutions-genai-llm-workshop | LAB001-2-ChatModel/0-run.py | 0-run.py | py | 927 | python | en | code | 55 | github-code | 36 |
4124871021 | import abc
import numpy as np
from nn.utils.label_mapper import LabelMapper
from datetime import datetime
class LearningAlgorithmTypes(object):
SGD = "stochastic gradient descent"
class LearningAlgorithmFactory(object):
@staticmethod
def create_learning_algorithm_from_type(learning_algorithm_type):
if learning_algorithm_type == LearningAlgorithmTypes.SGD:
return SGD()
else:
raise NotImplementedError("Requested learning algorithm type not yet implemented")
class AbstractLearningAlgorithm(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def learn(self, *args, **kwargs):
raise NotImplementedError()
class SGD(AbstractLearningAlgorithm):
def __init__(self):
super(SGD, self).__init__()
def learn(self, network, training_data_set, number_of_epochs, learning_rate, size_of_batch, **kwargs):
for epoch in range(number_of_epochs):
print("Epoch: " + str(epoch) + " Start time: " + str(datetime.now()))
np.random.shuffle(training_data_set.data_instances)
for batch in np.array_split(training_data_set.data_instances,
len(training_data_set.data_instances) / size_of_batch):
self.__update_weights_and_bias(network, batch, learning_rate)
def __update_weights_and_bias(self, network, batch, learning_rate):
number_of_training_instances = len(batch)
updated_biases = map(lambda layer_biases: np.zeros(layer_biases.shape), network.biases)
updated_weights = map(lambda layer_weights: np.zeros(layer_weights.shape), network.weights)
for data_instance in batch:
# Computing the partial derivatives of the function cost w.r.t. each weight and bias. These partial
# derivatives are the gradient's components.
delta_biases, delta_weights = self.__back_propagate(network, data_instance.features_values_vector,
data_instance.label)
# Accumulating the delta of weights and biases for each training sample of the batch in order to adjust
# the network's weights and biases in the opposite direction of the gradient.
updated_biases = [new_bias + delta for new_bias, delta in zip(updated_biases, delta_biases)]
updated_weights = [new_weight + delta for new_weight, delta in zip(updated_weights, delta_weights)]
# Updating the network's weights and biases in the opposite direction of the cost function's gradient
network.weights = [current_weight - (learning_rate / number_of_training_instances) * new_weight
for current_weight, new_weight in zip(network.weights, updated_weights)]
network.biases = [current_bias - (learning_rate / number_of_training_instances) * new_bias
for current_bias, new_bias in zip(network.biases, updated_biases)]
def __back_propagate(self, network, output_vector, expected_output_label):
last_layer = -1
updated_biases = map(lambda layer_biases: np.zeros(layer_biases.shape), network.biases)
updated_weights = map(lambda layer_weights: np.zeros(layer_weights.shape), network.weights)
output_vectors_by_layer = [output_vector.reshape(1, len(output_vector))]
input_vectors_by_layer = []
for bias, weights in zip(network.biases, network.weights):
next_layer_input = np.dot(output_vector, weights) + bias.T
input_vectors_by_layer.append(next_layer_input)
output_vector = network.neuron.compute(next_layer_input)
output_vectors_by_layer.append(output_vector)
delta = network.cost_computer.compute_cost_derivative(output_vector=output_vectors_by_layer[last_layer],
expected_output_vector=LabelMapper().map_label_to_vector(
expected_output_label)) * \
network.neuron.compute_derivative(input_vectors_by_layer[last_layer])
updated_biases[last_layer] = delta.T
updated_weights[last_layer] = np.dot(output_vectors_by_layer[last_layer - 1].T, delta)
for layer_index in xrange(2, network.number_of_layers):
z = input_vectors_by_layer[-layer_index]
sp = network.neuron.compute_derivative(z)
delta = np.dot(delta, network.weights[-layer_index + 1].T) * sp
updated_biases[-layer_index] = delta.T
updated_weights[-layer_index] = np.dot(output_vectors_by_layer[-layer_index - 1].T, delta)
return updated_biases, updated_weights
| ADozois/ML_Challenge | nn/models/learning/learning_algorithms.py | learning_algorithms.py | py | 4,743 | python | en | code | 0 | github-code | 36 |
7805060084 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os, sys, datetime, json
from core import info_collection
from conf import settings
# import check python version model
from plugins.detector import check_version
class ArgvHandler(object):
def __init__(self, argvs):
self.argvs = argvs
self.parse_argv()
def parse_argv(self):
if len(self.argvs) > 1:
if hasattr(self, self.argvs[1]):
func = getattr(self, self.argvs[1])
func()
else:
self.help()
else:
self.help()
def help(self):
msg = '''
collect
report
'''
print(msg)
def collect(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
print(asset_data)
def __load_asset_id(self):
'''
deal local asset_id
:return: asset_id
'''
asset_id_file = settings.Params["asset_id"]
has_asset_id = False
if os.path.isfile(asset_id_file):
asset_id = open(asset_id_file).read().strip()
asset_id = int(asset_id)
if asset_id.isdigit():
return asset_id
else:
has_asset_id = False
def __import_model(self, cmd1, cmd2):
'''
deal urllib request command
:param cmd1: urllib py3 or urllib2 py2 request comand
:param cmd2: urllib.parse py3 or urllib py2 comand
:return: return request command and urlencode command
'''
cmd_urllib1 = __import__(cmd1)
cmd_urllib2 = __import__(cmd2)
return cmd_urllib1, cmd_urllib2
def __deal_urllib(self, cmd_urllib1, cmd_urllib2, url, data=None, pyversion=None, method=None):
'''
deal python2 or python3 urllib request
:param cmd_urllib1: urllib request model
:param cmd_urllib2: urllib urlencode model
:param url: antOps server url
:param data: system info data
:param pyversion: python version
:param method: get or post
:return: return server callback info
'''
if method == "get":
req = cmd_urllib1.Request(url)
res_data = cmd_urllib1.urlOpen(req, timeout=settings.Params["request_timeout"])
callback = res_data.read()
print("--->server response: ", callback)
return callback
elif method =="post":
data_encode = cmd_urllib2.urlencode(data, encoding='utf-8')
if pyversion == 3:
# ่งฃๅณ POST data should be bytes or an iterable of bytes. It cannot be of type str.
# # ่ฟ้้่ฆๆต่ฏ๏ผๅญๅจbug
req = cmd_urllib1.request.Request(url=url, data=bytes(data_encode, encoding='utf-8')) # python 3.x version
res_data = cmd_urllib1.urlOpen(req, timeout=settings.Params["request_timeout"])
callback = res_data.read()
callback = str(callback, encoding='utf-8')
elif pyversion == 2:
req = cmd_urllib1.Request(url=url, data=data_encode)
res_data = cmd_urllib1.urlOpen(req, timeout=settings.Params["request_timeout"])
callback = res_data.read()
callback = json.load(callback)
print("\033[31;1m[%s]:[%s]\033[0m response:\n%s" % (method, url, callback))
return callback
def __submit_data(self, url, data, method):
'''
This model is compability python2 and python3
:param url: antOps server url
:param data: system info data
:param method: get or post
:return: return server callback info
'''
if url in settings.Params["urls"]:
if type(settings.Params["port"]) is int:
url = "http://%s:%s%s" % (
settings.Params["server"], settings.Params["port"], settings.Params["urls"][url])
else:
url = "http://%s/%s" % (settings.Params["server"], settings.Params["urls"][url])
print("Connectins.. \n \033[32;2m[%s] \033[0m, it may take a minute..." % url)
if method == "get":
args = ""
for k, v in data.items():
args += "&%s=%s" % (k, v)
args = args[1:]
url_with_args = "%s?%s" % (url, args)
try:
pversion = check_version.check_python()
if pversion == 3:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib.request", "urllib.parse")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url_with_args, method="get")
return callback
elif pversion == 2:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib2", "urllib")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url_with_args, method="get")
return callback
except cmd_urllib1.request.URLError:
sys.exit("\033[31;1m%s\033[0m" % cmd_urllib1.request.URLError)
elif method == "post":
try:
pversion = check_version.check_python()
if pversion == 3:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib.request", "urllib.parse")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url, data=data, pyversion=pversion, method="get")
return callback
elif pversion == 2:
cmd_urllib1, cmd_urllib2 = self.__import_model("urllib2", "urllib")
callback = self.__deal_urllib(cmd_urllib1, cmd_urllib2, url, data=data, pyversion=pversion, method="get")
return callback
except Exception:
sys.exit("\033[31;1m%s\033[0m" % Exception)
else:
raise KeyError
def __update_asset_id(self, asset_id):
asset_id_file = settings.Params["asset_id"]
f = open(asset_id_file, "w")
f.write(str(asset_id))
f.close()
def report(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
asset_id = self.__load_asset_id() # load from asset_id file
if asset_id:
asset_data["asset_id"] = asset_id
post_url = "asset_update"
else:
asset_data["asset_id"] = None
post_url = "asset_report"
data = {"asset_data": json.dumps(asset_data)}
response = self.__submit_data(post_url, data, method="post")
if "asset_id" in response:
self.__update_asset_id(response["asset_id"])
self.log_record(response)
def log_record(self, log_mesg):
'''
log deal model
:param log_mesg: server callback info
:return: None
'''
f = open(settings.Params["log_file"], "a")
if log_mesg is str:
pass
if type(log_mesg) is dict:
if "info" in log_mesg:
for msg in log_mesg["info"]:
log_format = "%s\tINFO\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
if "error" in log_mesg:
for msg in log_mesg["error"]:
log_format = "%s\tERROR\t%s\n" %(datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"),msg)
f.write(log_format)
if "warning" in log_mesg:
for msg in log_mesg["warning"]:
log_format = "%s\tWARNING\t%s\n" %(datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"),msg)
f.write(log_format)
f.close() | szlyunnan/AntOpsv2 | antOpsClient/core/antMain.py | antMain.py | py | 7,879 | python | en | code | 0 | github-code | 36 |
40761210417 | class Solution(object):
def uniquePaths(self, m, n):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
memo = [[0] * (n+1) for _ in range(m+1)]
return self.dfs(m , n , memo)
def dfs(self, m, n, memo): # methods of postion m, n
if m < 0 or n < 0:
return 0
if m == 1 and n == 1:
return 1
if memo[m][n]:
return memo[m][n]
up = self.dfs(m - 1, n, memo)
left = self.dfs(m, n - 1, memo)
memo[m][n] = up + left
return memo[m][n]
class Solution2(object):
def uniquePaths(self, m, n):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if m < 0 or n < 0:
return 0
dp = [[0] * (n + 1) for _ in range(m + 1)]
# m, n = len(obstacleGrid), len(obstacleGrid[0])
# if obstacleGrid[0][0] == 0:
dp[1][1] = 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if i == 1 and j == 1:
continue
else:
dp[i][j] = dp[i - 1][j] + dp[i][j - 1]
return dp[m][n] | QingbiaoLi/LeetCodeFighter | DP/62_UniquePath.py | 62_UniquePath.py | py | 1,188 | python | en | code | 0 | github-code | 36 |
42890165120 | from pirc522 import RFID
import signal
import time
class Rfid_Oku:
def oku():
rdr = RFID()
util = rdr.util()
util.debug = True
rdr.wait_for_tag()
(error, data) = rdr.request()
if not error:
#print("Kart Algilandi!")
(error, uid) = rdr.anticoll()
if not error:
kart_uid = str(uid[0])+" "+str(uid[1])+" "+str(uid[2])+" "+str(uid[3])+" "+str(uid[4])
return (kart_uid)
def result(self,sonuc):
return sonuc
| semohy/raspi3apps | Raspberry Pi RFID uygulamasฤฑ/Rfid_Oku.py | Rfid_Oku.py | py | 562 | python | en | code | 0 | github-code | 36 |
34994522849 | #http://www.pythonchallenge.com/pc/def/ocr.html
__author__ = 'chihchieh.sun'
s = ''.join([line.rstrip() for line in open('level2_ocr.txt')])
OCCURRENCES = {}
for c in s:
OCCURRENCES[c] = OCCURRENCES.get(c, 0) + 1
avgOC = len(s) // len(OCCURRENCES)
print(''.join([c for c in s if OCCURRENCES[c] < avgOC])) | z-Wind/Python_Challenge | level2_dictionary.py | level2_dictionary.py | py | 325 | python | en | code | 0 | github-code | 36 |
20867944972 | import numpy as np
from utils import plot_output
# Defining parameters
N = 500
L = 5 # Topological charge number
A3 = np.zeros((N, N), dtype='complex_')
# Constructing SPP
x = np.array([i for i in range(N)])
y = np.array([i for i in range(N)])
X, Y = np.meshgrid(x, y)
theta = np.arctan2((X - N/2), (Y - N/2))
r = np.sqrt((X - N/2) * (X - N/2) + (Y - N/2) * (Y - N/2))
A1 = L * (theta + np.pi)
A2 = np.fmod(A1, 2*np.pi)
for p in range(N):
for q in range(N):
if np.fmod(A2[p, q], 2*np.pi) <= np.pi:
A3[p, q] = np.exp(1j * np.pi)
else:
A3[p, q] = np.exp(1j * 0)
A3[r > 30] = 0
plot_output(A3, N, angle=True, spiral=True) | Diana-Kapralova/Diffractive_Optics_on_Python | 3.Advanced_Diffractive_Optical_Elements/3.Exersise/Ex.3.3.py | Ex.3.3.py | py | 667 | python | en | code | 0 | github-code | 36 |
17354207696 | import numpy as np
from gc_utils import dictionary_to_vector
from gc_utils import gradients_to_vector
from gc_utils import relu
from gc_utils import sigmoid
from gc_utils import vector_to_dictionary
from public_tests import *
from testCases import *
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
J = theta * x
return J
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
dtheta = x
return dtheta
def gradient_check(
x,
theta,
epsilon=1e-7,
print_msg=False,
):
"""
Implement the gradient checking presented in Figure 1.
Arguments:
x -- a float input
theta -- our parameter, a float as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient. Float output
"""
theta_plus = theta + epsilon
theta_minus = theta - epsilon
J_plus = forward_propagation(x, theta_plus)
J_minus = forward_propagation(x, theta_minus)
gradapprox = (J_plus - J_minus) / (2 * epsilon)
grad = backward_propagation(x, theta)
numerator = np.linalg.norm(grad - gradapprox)
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)
difference = numerator / denominator
if print_msg:
if difference > 2e-7:
print(
"\033[93m"
+ "There is a mistake in the backward propagation! difference = "
+ str(difference)
+ "\033[0m"
)
else:
print(
"\033[92m"
+ "Your backward propagation works perfectly fine! difference = "
+ str(difference)
+ "\033[0m"
)
return difference
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for m examples)
cache -- a tuple with the intermediate values (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
log_probs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1.0 / m * np.sum(log_probs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
def gradient_check_n(
parameters,
gradients,
X,
Y,
epsilon=1e-7,
print_msg=False,
):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters
X -- input datapoint, of shape (input size, number of examples)
Y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have outputs two parameters but we only
# care about the first one
theta_plus = np.copy(parameters_values)
theta_plus[i] = theta_plus[i] + epsilon
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(theta_plus))
theta_minus = np.copy(parameters_values)
theta_minus[i] = theta_minus[i] - epsilon
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(theta_minus))
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
numerator = np.linalg.norm(grad - gradapprox)
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)
difference = numerator / denominator
if print_msg:
if difference > 2e-7:
print(
"\033[93m"
+ "There is a mistake in the backward propagation! difference = "
+ str(difference)
+ "\033[0m"
)
else:
print(
"\033[92m"
+ "Your backward propagation works perfectly fine! difference = "
+ str(difference)
+ "\033[0m"
)
return difference
| HarryMWinters/ML_Coursework | Course 5, Improving Deep Neural Networks/Week 1/gradient_checking.py | gradient_checking.py | py | 6,273 | python | en | code | 0 | github-code | 36 |
22377527144 | #!/usr/bin/env python3
import nibabel as nib
from nibabel import processing
import numpy as np
import scipy
import matplotlib.pyplot as plt
import matplotlib
from scipy import ndimage
from scipy.interpolate import RegularGridInterpolator
from scipy import optimize
import os, glob
import json
import time
import shutil
def calc_center_of_mass(img_data, affine):
com_ijk = ndimage.center_of_mass(img_data)
com_ijk = np.array([com_ijk[0], com_ijk[1], com_ijk[2], 1])
com_xyz = np.matmul(affine, com_ijk)
return com_xyz
def calc_affine(original_affine, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z, center_of_mass = None):
#WHERE TO BRING IN CENTER OF MASS CALCULATION??? MAYBE THINGS WILL CONVERGE FAST
#ENOUGH WHERE THIS ISN"T NECESSARY
#Make empty matrices for rotations
mat_rot_x = np.eye(4)
mat_rot_y = np.eye(4)
mat_rot_z = np.eye(4)
#Pre apply COM mass so that
if type(center_of_mass) == type(None):
temp_COM_mat = np.eye(4)
else:
temp_COM_mat = np.eye(4)
temp_COM_mat[0,3] = -1*center_of_mass[0]
temp_COM_mat[1,3] = -1*center_of_mass[1]
temp_COM_mat[2,3] = -1*center_of_mass[2]
#Define mat for x rotations
mat_rot_x[1,1] = np.cos(rot_x)
mat_rot_x[2,2] = np.cos(rot_x)
mat_rot_x[1,2] = -np.sin(rot_x)
mat_rot_x[2,1] = np.sin(rot_x)
#Define mat for y rotations
mat_rot_y[0,0] = np.cos(rot_y)
mat_rot_y[2,2] = np.cos(rot_y)
mat_rot_y[2,0] = -np.sin(rot_y)
mat_rot_y[0,2] = np.sin(rot_y)
#Define mat for z rotations
mat_rot_y[0,0] = np.cos(rot_z)
mat_rot_y[1,1] = np.cos(rot_z)
mat_rot_y[0,1] = -np.sin(rot_z)
mat_rot_y[1,0] = np.sin(rot_z)
#Apply x, then y, then z rotation then add translation
new_affine = np.matmul(mat_rot_x, temp_COM_mat)
new_affine = np.matmul(mat_rot_y, new_affine)
new_affine = np.matmul(mat_rot_z, new_affine)
new_affine = np.matmul(np.linalg.inv(temp_COM_mat), new_affine)
#new_affine = np.matmul(mat_rot_y, mat_rot_x)
#new_affine = np.matmul(mat_rot_z, new_affine)
new_affine[0,3] = trans_x
new_affine[1,3] = trans_y
new_affine[2,3] = trans_z
#print(new_affine)
return new_affine
def grab_orig_inds_xyz_mat(image_data, affine):
#This should be finished
inds = np.indices(image_data.shape)
inds_len = inds.shape[1]*inds.shape[2]*inds.shape[3]
inds_reshaped = np.reshape(inds, (3, inds_len))
ones = np.ones((1,inds_len))
full_inds = np.vstack([inds_reshaped, ones])
orig_xyz = np.matmul(affine, full_inds)
orig_vals = image_data.flatten().copy()
return orig_xyz, orig_vals
def get_new_xyzs(transformation, original_xyzs):
return np.matmul(transformation, original_xyzs)
def grab_image_vals(img_data, img_affine, inds, interp_method = 'linear'):
#Img_data is a matrix you want to sample from. Inds
#are the xyz inds to grab. out_vals are the interpolated
#img values at the inds.
i = np.arange(0,img_data.shape[0])
j = np.arange(0,img_data.shape[1])
k = np.arange(0,img_data.shape[2])
interp = RegularGridInterpolator((i, j, k), img_data, method = interp_method, bounds_error = False)
inds_xyz_to_ijk = np.matmul(np.linalg.inv(img_affine), inds)
out_vals = interp(inds_xyz_to_ijk[0:3,:].transpose())
return out_vals
def make_alignment_images(full_registered_nifti_path, localizers_arr_path, output_figures_folder, close_figures = True):
'''Make plots to show overlap between some full volumetric nifti and localizers
This function makes overlays to show the alignment between the image
represented by full_registered_nifti_path and the images within
localizers_arr_path. The overlays are saved to output_figures_folder
and
Parameters
----------
full_registered_nifti_path : str
Path to full 3d nifti image, probably an image that
is registered to the localizers
localizers_arr_path : list
List of paths to localizer images that will be used
to generate overlays
output_figures_folder : str
Path to the folder to store overlays. This will be
created if it doesn't already exist
Returns
-------
slice_specific_corrs : list
List of slice specific corrs between the full volumetric
image and a slice from any of the localizer images
'''
full_img = nib.load(full_registered_nifti_path)
full_data = full_img.get_fdata()
full_affine = full_img.affine
i = np.arange(0,full_data.shape[0])
j = np.arange(0,full_data.shape[1])
k = np.arange(0,full_data.shape[2])
interp = RegularGridInterpolator((i, j, k), full_data, method = 'linear', bounds_error = False)
slice_specific_corrs = []
if os.path.exists(output_figures_folder) == False:
os.makedirs(output_figures_folder)
for i, temp_localizer in enumerate(localizers_arr_path):
temp_loc_img = nib.load(temp_localizer)
temp_loc_data = temp_loc_img.get_fdata()
smallest_dim = np.argmin(temp_loc_data.shape)
num_slices = temp_loc_data.shape[smallest_dim]
if num_slices > 4:
slices = np.round(np.linspace(0, num_slices, 6)[1:-2]).astype(int)
else:
slices = np.linspace(0,num_slices - 1,num_slices).astype(int)
flattened_inds_vals = grab_orig_inds_xyz_mat(temp_loc_data, temp_loc_img.affine)
reshaped_inds = flattened_inds_vals[0].reshape((4, temp_loc_data.shape[0], temp_loc_data.shape[1], temp_loc_data.shape[2]))
for temp_slice_num in slices:
if smallest_dim == 0:
temp_slice = reshaped_inds[:,temp_slice_num,...]
temp_slice_data = temp_loc_data[temp_slice_num,:,:]
elif smallest_dim == 1:
temp_slice = reshaped_inds[:,:,temp_slice_num,...]
temp_slice_data = temp_loc_data[:,temp_slice_num,:]
elif smallest_dim == 2:
temp_slice = reshaped_inds[:,:,:,temp_slice_num]
temp_slice_data = temp_loc_data[:,:,temp_slice_num]
else:
raise ValueError('Error: localizer should be 3d image')
flattened_slice_inds = temp_slice.reshape((temp_slice.shape[0], int(temp_slice.shape[1]*temp_slice.shape[2])))
#Find values in the full 3d image that correspond to the
#current slice in the current localizer image
inds_xyz_to_ijk = np.matmul(np.linalg.inv(full_affine), flattened_slice_inds)
out_vals = interp(inds_xyz_to_ijk[0:3,:].transpose())
out_vals = out_vals.reshape((temp_slice.shape[1], temp_slice.shape[2]))
#Plot
plt.figure(dpi=200, figsize=(4,10))
plt.subplot(3,1,1)
plt.imshow(out_vals)
plt.xticks(np.arange(0,out_vals.shape[0],25), labels='')
plt.yticks(np.arange(0,out_vals.shape[1],25), labels='')
plt.gca().grid(color='red', linestyle='-.', linewidth=1)
plt.title('Full Volumetric Img.')
plt.subplot(3,1,2)
plt.imshow(temp_slice_data)
plt.title('Localizer Img.')
plt.xticks(np.arange(0,out_vals.shape[0],25), labels='')
plt.yticks(np.arange(0,out_vals.shape[1],25), labels='')
plt.gca().grid(color='red', linestyle='-.', linewidth=1)
plt.subplot(3,1,3)
plt.imshow((temp_slice_data - np.nanmean(temp_slice_data))/np.nanstd(temp_slice_data) - (out_vals - np.nanmean(out_vals))/np.nanstd(out_vals))
plt.title('Difference')
plt.xticks(np.arange(0,out_vals.shape[0],25), labels='')
plt.yticks(np.arange(0,out_vals.shape[1],25), labels='')
plt.gca().grid(color='red', linestyle='-.', linewidth=1)
plt.tight_layout()
plt.savefig(os.path.join(output_figures_folder, 'localizer_{}_slice_{}.png'.format(i, temp_slice_num)), bbox_inches='tight')
if close_figures:
plt.close()
return
def calc_loss(af_vals, localizer_imgs, localizer_vals, reference_data, reference_affine, center_of_mass, xyz_s_list):
affine_transforms = []
new_xyz_s_list = []
for i, temp_img in enumerate(localizer_imgs):
affine_transforms.append(calc_affine(localizer_imgs[i].affine, af_vals[0], af_vals[1], af_vals[2], af_vals[3], af_vals[4], af_vals[5], center_of_mass = center_of_mass)) #transform to apply
new_xyz_s_list.append(get_new_xyzs(affine_transforms[i], xyz_s_list[i]))
new_xyz_s_arr = np.hstack(new_xyz_s_list)
reference_vals = grab_image_vals(reference_data, reference_affine, new_xyz_s_arr, interp_method = 'linear')
good_ref = reference_vals[np.isnan(reference_vals) == False]
good_loc = localizer_vals[np.isnan(reference_vals) == False]
loss = 1 - np.corrcoef(good_ref, good_loc)[0,1]
return loss
def calc_localizer_val_bins(localizer_vals):
std = np.std(localizer_vals)
bin_widths = 3.49*std*np.power(localizer_vals.shape[0], -1/3) #This equation is optimal for unimodal case per page 151 of jenkinson paper
#bin_widths = 2*std*np.power(localizer_vals.shape[0], -1/3)
num_bins = int((np.max(localizer_vals) - np.min(localizer_vals))/bin_widths)
bins = np.histogram(localizer_vals, num_bins)
binned_data = np.zeros(localizer_vals.shape)
for i in range(bins[0].shape[0] - 1):
binned_data[(localizer_vals > bins[0][i+1])*(localizer_vals <= bins[0][i])] = i
binned_data[localizer_vals >= bins[0][0]] = bins[0].shape[0]
return binned_data
def calc_corr_ratio_loss(af_vals, localizer_imgs, localizer_vals, reference_data, reference_affine, mask_data, center_of_mass, xyz_s_list, make_plot = False, image_output_path = None):
affine_transforms = []
new_xyz_s_list = []
for i, temp_img in enumerate(localizer_imgs):
affine_transforms.append(calc_affine(localizer_imgs[i].affine, af_vals[0], af_vals[1], af_vals[2], af_vals[3], af_vals[4], af_vals[5], center_of_mass = center_of_mass)) #transform to apply
new_xyz_s_list.append(get_new_xyzs(affine_transforms[i], xyz_s_list[i]))
new_xyz_s_arr = np.hstack(new_xyz_s_list)
reference_vals = grab_image_vals(reference_data, reference_affine, new_xyz_s_arr, interp_method = 'linear')
mask_vals = grab_image_vals(mask_data, reference_affine, new_xyz_s_arr, interp_method = 'nearest')
good_ref = reference_vals[(np.isnan(reference_vals) == False)*(mask_vals > 0.5)]
good_loc = localizer_vals[(np.isnan(reference_vals) == False)*(mask_vals > 0.5)]
num_good = good_loc.shape[0]
unique_loc_vals = np.unique(good_loc)
corr_ratio = 0
for i in range(unique_loc_vals.shape[0]):
n_k = np.sum(unique_loc_vals == unique_loc_vals[i])
corr_ratio += (n_k/num_good)*np.var(good_ref[good_loc == unique_loc_vals[i]])
corr_ratio = corr_ratio/np.var(good_ref)
loss = 1 - corr_ratio
loss = corr_ratio
if make_plot:
make_corr_ratio_loss_plot(unique_loc_vals, good_loc, good_ref, output_image_path = image_output_path, close_image = False)
return loss
def make_corr_ratio_loss_plot(unique_loc_vals, good_loc, good_ref, output_image_path = None, close_image = True):
plt.figure(dpi = 100)
differences_1 = []
bin_jitters_1 = []
differences_2 = []
bin_jitters_2 = []
for i in range(unique_loc_vals.shape[0]):
temp_vals = good_ref[good_loc == unique_loc_vals[i]]
temp_differences = np.log10(np.absolute(temp_vals - np.mean(temp_vals)))*np.sign(temp_vals - np.mean(temp_vals))
temp_bin_jitters = np.random.uniform(low = i, high = i + 1, size = temp_differences.shape)
if np.mod(i,2) == 0:
differences_1.append(temp_differences)
bin_jitters_1.append(temp_bin_jitters)
else:
differences_2.append(temp_differences)
bin_jitters_2.append(temp_bin_jitters)
differences_1 = np.hstack(differences_1)
bin_jitters_1 = np.hstack(bin_jitters_1)
differences_2 = np.hstack(differences_2)
bin_jitters_2 = np.hstack(bin_jitters_2)
plt.scatter(bin_jitters_1, differences_1, s = 0.05)
plt.scatter(bin_jitters_2, differences_2, s = 0.05)
plt.xlabel('Bin Number')
plt.ylabel('sign(Deviation)*log10(absolute(Deviation))')
plt.axhline(2.5, linestyle = '--', color = 'grey', linewidth = 1)
plt.axhline(-2.5, linestyle = '--', color = 'grey', linewidth = 1)
if type(output_image_path) == type(None):
pass
else:
plt.savefig(output_image_path)
if close_image:
plt.close()
return
def make_readme(affine_readme_path):
with open(affine_readme_path, 'w') as f:
f.write('This folder contains registration results from a high res anatomical template to a localizer thats presumed to be in MRS voxel space.\n')
f.write('The details of what images were registered can be found in registration_summary.json and figures showing the quality of the registration can be found in the figures folder.\n')
f.write('The new copy of the reference image (now aligned to the localizer) is found at reference_img_aligned_to_localizer.nii.gz\n\n')
f.write('How to use transform_mat.npy file:\n\n\n')
f.write('import nibabel as nib\nimport numpy as np\npath_to_image_in_reference_space = ""\ntemp_img = nib.load(path_to_image_in_reference_space)\n')
f.write('transform_mat = np.load("transform_mat.npy")\ntemp_img.affine = np.matmul(transform_mat, temp_img.affine)\n')
f.write('nib.save(temp_img, "/some/new/path/for/image/now/in/localizer/space/img.nii.gz")')
return
def localizer_alignment_anat_update_osprey(anat_files_dict, registration_output_folder, localizer_paths):
'''Registers anat reference image to the localizer image(s)
Parameters
----------
anat_files_dict : dict
Has (at minimum) key 'files_nii' and optionally 'files_seg' that will be
registered to the localizer image. After registration,
this path will be reset to be the path to the new image
following registration.
registration_output_folder : str
Path to the folder that will be created to store registration
results. This will be subject/ses and in certain cases run
specific.
localizer_paths : list of strings
The paths to the localizer image or images to be registered.
You will only have multiple entries in this list if axial
images were stored in different images than sagital or coronal.
Returns
-------
anat_files_dict : dict
The same dictionary as before, but now the 'files_nii' key
has been updated to point to the registered image
'''
output_folder = registration_output_folder
if os.path.exists(os.path.join(output_folder, 'figures')) == False:
os.makedirs(os.path.join(output_folder, 'figures'))
make_readme(os.path.join(output_folder, 'readme.txt'))
#Load the reference image
reference_path = anat_files_dict['files_nii'][0]
reference_img = nib.load(reference_path)
reference_data = reference_img.get_fdata()
#Load and dilate brain mask by 10 iterations ... this keeps the scalp in registration but not neck
#USING MASK SEEMED TO HURT REGISTRATION FOR LOWRES LOCALIZERS SO AM EXCLUDING THIS FOR NOW
#mask_data = nib.load(brain_mask_path).get_fdata()
#mask_data = ndimage.binary_dilation(mask_data, iterations = 10)
#mask_data = mask_data.astype(float) + 1
reference_data_10mm_smoothing = processing.smooth_image(reference_img, 10).get_fdata()
reference_com = calc_center_of_mass(reference_data, reference_img.affine)
mask_data = np.ones(reference_data.shape) #we arent using a mask right now so this is just a dummy mask
#reference_com = None
localizer_imgs = []
xyz_s_list = []
vals = []
localizer_sizes = []
for i, temp_path in enumerate(localizer_paths):
localizer_imgs.append(nib.load(temp_path))
temp_xyz, temp_vals = grab_orig_inds_xyz_mat(localizer_imgs[i].get_fdata(), localizer_imgs[i].affine)
xyz_s_list.append(temp_xyz)
vals.append(temp_vals)
localizer_sizes.append(localizer_imgs[i].get_fdata().size)
xyz_s_arr = np.hstack(xyz_s_list)
localizer_vals = np.hstack(vals)
localizer_vals = calc_localizer_val_bins(localizer_vals) #NOW THESE ARE BINS
reference_vals = grab_image_vals(reference_data, reference_img.affine, xyz_s_arr, interp_method = 'linear')
good_ref = reference_vals[np.isnan(reference_vals) == False]
good_loc = localizer_vals[np.isnan(reference_vals) == False]
print('Original Ref/Localizer Correlation Ratio (0 is best, 1 is worst):')
original_corr = calc_corr_ratio_loss([0,0,0,0,0,0], localizer_imgs, localizer_vals, reference_data, reference_img.affine, mask_data, reference_com, xyz_s_list, make_plot = True, image_output_path = os.path.join(registration_output_folder, 'figures', 'corr_ratio_pre_registration.png'))
print(original_corr)
bounds_10mm = [[-100,100],[-100,100],[-100,100],[-1.5,1.5],[-1.5,1.5],[-1.5,1.5]]
tic = time.perf_counter()
options = {'maxfun':5000, 'maxiter':50}
results_tnc_10mm = optimize.minimize(calc_corr_ratio_loss, [0,0,0,0,0,0], args=(localizer_imgs, localizer_vals, reference_data_10mm_smoothing, reference_img.affine, mask_data, reference_com, xyz_s_list),
method='TNC', jac=None, bounds=bounds_10mm, options=options)
results_tnc_00mm = optimize.minimize(calc_corr_ratio_loss, results_tnc_10mm.x, args=(localizer_imgs, localizer_vals, reference_data, reference_img.affine, mask_data, reference_com, xyz_s_list),
method='TNC', jac=None, bounds=bounds_10mm, options=options)
toc = time.perf_counter()
print(f"Ran optimization in {toc - tic:0.4f} seconds")
###Illustrate the performance of the new transformation
affine_transforms = []
new_xyz_s_list = []
for i, temp_img in enumerate(localizer_imgs):
affine_transforms.append(calc_affine(localizer_imgs[i].affine, results_tnc_00mm.x[0], results_tnc_00mm.x[1], results_tnc_00mm.x[2], results_tnc_00mm.x[3], results_tnc_00mm.x[4], results_tnc_00mm.x[5], reference_com)) #transform to apply
new_xyz_s_list.append(get_new_xyzs(affine_transforms[i], xyz_s_list[i]))
new_xyz_s_arr = np.hstack(new_xyz_s_list)
reference_vals = grab_image_vals(reference_data, reference_img.affine, new_xyz_s_arr, interp_method = 'linear')
good_ref = reference_vals[np.isnan(reference_vals) == False]
good_loc = localizer_vals[np.isnan(reference_vals) == False]
registered_corr = calc_corr_ratio_loss(results_tnc_00mm.x, localizer_imgs, localizer_vals, reference_data, reference_img.affine, mask_data, reference_com, xyz_s_list, make_plot = True, image_output_path = os.path.join(registration_output_folder, 'figures', 'corr_ratio_post_registration.png'))
if original_corr > registered_corr:
inv_affine = calc_affine(np.eye(4), results_tnc_00mm.x[0], results_tnc_00mm.x[1], results_tnc_00mm.x[2], results_tnc_00mm.x[3], results_tnc_00mm.x[4], results_tnc_00mm.x[5], reference_com)
inv_affine = np.linalg.inv(inv_affine)
else:
inv_affine = np.eye(4)
registered_corr = original_corr
shutil.copyfile(os.path.join(registration_output_folder, 'figures', 'corr_ratio_pre_registration.png'), os.path.join(registration_output_folder, 'figures', 'corr_ratio_post_registration.png'))
print('Registered Ref/Localizer Correlation (0 is best, 1 is worst):')
print(registered_corr)
if original_corr > registered_corr:
inv_affine = calc_affine(np.eye(4), results_tnc_00mm.x[0], results_tnc_00mm.x[1], results_tnc_00mm.x[2], results_tnc_00mm.x[3], results_tnc_00mm.x[4], results_tnc_00mm.x[5], reference_com)
inv_affine = np.linalg.inv(inv_affine)
else:
inv_affine = np.eye(4)
registered_corr = original_corr
new_affine = np.matmul(inv_affine, reference_img.affine)
new_img = nib.nifti1.Nifti1Image(reference_data, new_affine)
registered_output_image_name = os.path.join(output_folder, 'reference_img_aligned_to_localizer.nii.gz')
nib.save(new_img, registered_output_image_name)
np.save(os.path.join(output_folder, 'transform_mat.npy'), inv_affine) #This can be used with other images to update their affines
if 'files_seg' in anat_files_dict.keys():
new_seg_img = nib.load(anat_files_dict['files_seg'][0][0])
new_seg_img = nib.Nifti1Image(new_seg_img.get_fdata(), new_affine)
print('Saving new segmentation image.')
nib.save(new_seg_img, os.path.join(output_folder, 'reference_seg_aligned_to_localizer.nii.gz'))
anat_files_dict['files_seg'] = [[os.path.join(output_folder, 'reference_seg_aligned_to_localizer.nii.gz')]]
make_alignment_images(registered_output_image_name, localizer_paths, os.path.join(output_folder, 'figures'))
registration_dict = {"reference_img" : reference_path,
"localizer_imgs": localizer_paths,
"reference_localizer_corr_ratio_pre_registration" : np.round(original_corr, 8),
"reference_localizer_corr_ratio_post_registration" : np.round(registered_corr, 8)}
with open(os.path.join(output_folder, 'registration_summary.json'), 'w') as f:
f.write(json.dumps(registration_dict, indent = 6))
anat_files_dict['files_nii'] = [registered_output_image_name]
return anat_files_dict | erikglee/OSPREY_Containerization | code/localizer_alignment.py | localizer_alignment.py | py | 21,949 | python | en | code | 0 | github-code | 36 |
70190181225 | """
"Insertion sort is a simple sorting algorithm that builds the final sorted array (or list) one item at a time.
It is much less efficient on large lists than more advanced algorithms such as quicksort, heapsort, or merge sort."
"""
import random
import time
lista_rand = random.sample(range(1, 860), 10)
print(lista_rand)
start = time.time()
class InsertionSort(object):
def __init__(self, list):
self.list = list
self.sorted = self.selection_sort()
def __str__(self):
return str(self.sorted)
def selection_sort(self):
for i in range(len(self.list)):
valor_menor = 1
for j in range(i + 1, len(self.list)):
if self.list[j] < self.list[valor_menor]:
valor_menor = j
self.list[i], self.list[valor_menor] = self.list[valor_menor], self.list[i]
return self.list
if __name__ == '__main__':
start = time.time()
InsertionSort(lista_rand)
print("\n lista ordenada: \n", lista_rand)
end = time.time()
print("\nTempo: ", end - start)
| mabittar/desafios_pythonicos | ordenacao/insertion_sortR2.py | insertion_sortR2.py | py | 1,079 | python | en | code | 0 | github-code | 36 |
15982937093 | import numpy as np
from matplotlib.pyplot import *
from scipy import interpolate
dat=np.loadtxt("/home/davidvartanyan/presupernova.dat")
rad=dat[:,2]
rho1=dat[:,4]
i=0
while rad[i]/10**9 < 1:
i+=1
xlim([rad[0],rad[i]])
#loglog(rad[0:i],rho1[0:i],'k')
npoints=1000
radmin=rad[0]
radmax=10**9
radius=np.linspace(0.1,
radmax,npoints)
dr=radius[1]-radius[0]
tck = interpolate.splrep(rad,rho1)
rho2= interpolate.splev(radius,tck,der=0)
#xlabel('radius [$cm$]')
#ylabel('Density [$g/cm^3$]')
#show()
ggrav = 6.67e-8
#######################################
# function definitions
def tov_RHS(x,rho,z):
# RHS function
rhs = np.zeros(2)
rhs[0] = z
rhs[1] = 4*np.pi*ggrav*rho- 2*z/x
return rhs
def tov_integrate_FE(rad,dr,rho,z,phi):
# Forward-Euler Integrator
new = np.zeros(2)
old = np.zeros(2)
old[0] = phi #dphi/dr-> phi
old[1] = z #dz/dr -> z
# forward Euler integrator
new = old + dr*tov_RHS(rad, rho, z)
# assign outputs
phinew = new[0]
znew = new[1]
return (phinew,znew)
#######################################
# set up variables
z1f = np.zeros(npoints)
phif = np.zeros(npoints)
# set up boundary values
z1f[0] = 0.0
phif[0] = 0.0
for n in range(npoints-1):
(phif[n+1],z1f[n+1]) = tov_integrate_FE(radius[n],
dr,rho2[n],z1f[n],phif[n])
dm=4*np.pi*radius**2*rho2*dr
M=np.sum(dm)
phiBC2=-ggrav*M/radius[npoints-1]
phiana=2./3*np.pi*ggrav*rho2*(radius**2-3*((10**9.)**2))
phifin=phif+phiBC2-phif[npoints-1]
#p1,=loglog(radius,-phifin)
#p2,=loglog(radius,-phiana)
xlabel('radius[cm]')
#ylabel('Phi')
#legend([p1,p2], ['Numerical Potential', 'Analytical Potential'])
ylabel('Error')
loglog(radius,np.abs((phiana-phifin)/phiana))
show()
#radmax2=np.asarray([10**9]*len(radius))
#origin=np.asarray([4.2118*10**20]*len(radius))
#plot(radius,phif+origin)
#print .67*np.pi*ggrav*rho2*(radius**2-3*radmax2**2)
#print -0.67*np.pi*ggrav*rho2*(radius**2-3*radmax2**2) | dvartany/ay190 | ws12/ws12.py | ws12.py | py | 2,041 | python | en | code | 0 | github-code | 36 |
43429268023 | def main():
N = int(input())
S = input()
max = 0;
for i in range(1, N):
l = S[:i]
r = S[i:]
used = []
for k in l:
if k in r and k not in used:
used.append(k)
if len(used) > max: max = len(used)
print(max)
if __name__ == '__main__':
main() | oamam/atcoder_amama | python/beginner/20180526/B.py | B.py | py | 329 | python | en | code | 0 | github-code | 36 |
43506783122 | #!/usr/bin/env python3
"""This modual holds the class created for task 3"""
import numpy as np
import matplotlib.pyplot as plt
class Neuron:
"""
Neuron - class for a neuron
nx = the number of input freatures to the neuron
"""
def __init__(self, nx):
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
self.__W = np.random.randn(1, nx)
self.__b = 0
self.__A = 0
@property
def W(self):
return self.__W
@property
def b(self):
return self.__b
@property
def A(self):
return self.__A
def forward_prop(self, X):
"""
This function calculates the forward propogation
and updates the private attibute A
"""
# X numpy array size = (nx - , m - )
Z = np.dot(self.W, X) + self.b
# Z = the (weight*activation)+bias for all data in the set
A = 1/(1 + np.exp(-1 * Z))
# applying the sigmoid function to Z (3brown1blue need to rewatch)
self.__A = A
return self.__A
def cost(self, Y, A):
"""Calculates the cost of the model using logistic regression"""
m = Y.shape[1]
loss = -1 * (Y * np.log(A) + (1 - Y) * np.log(1.0000001 - A))
cost = (1/m) * np.sum(loss)
return cost
def evaluate(self, X, Y):
"""Evalueates neurons predictions"""
predict = self.forward_prop(X)
predict = np.where(predict < 0.5, 0, 1)
return predict, self.cost(Y, self.__A)
def gradient_descent(self, X, Y, A, alpha=0.05):
"""
- Calculates one pass of gradient descent on the neuron
- Gradient decent updates the weights and biases
"""
m = Y.shape[1]
W = self.__W
b = self.__b
Dz = A - Y
Dw = (1/m) * (Dz @ X.T)
Db = (1/m) * np.sum(Dz)
self.__W = W - alpha * Dw
self.__b = b - alpha * Db
def train(
self, X, Y,
iterations=5000, alpha=0.05, verbose=True,
graph=True, step=100):
"""Trains a neuron"""
c = self.cost
if not isinstance(iterations, int):
raise TypeError("iterations must be an integer")
if iterations < 0:
raise ValueError("iterations must be a positive integer")
if not isinstance(alpha, float):
raise TypeError("alpha must be a float")
if alpha < 0:
raise ValueError("alpha must be positive")
if verbose or graph:
if not isinstance(step, int):
raise TypeError("step must be an integer")
if step <= 0 or step > iterations:
raise ValueError("step must be positive and <= iterations")
it = []
cost = []
for i in range(iterations + 1):
A = self.forward_prop(X)
if step == 0 or i % step == 0 or i == iterations:
if verbose:
print("Cost after {} iterations: {}".format(i, c(Y, A)))
if graph:
it.append(i)
cost.append(self.cost(Y, A))
self.gradient_descent(X, Y, A, alpha)
it = np.array(it)
cost = np.array(cost)
if graph:
plt.plot(it, cost)
plt.xlabel("iteration")
plt.ylabel("cost")
plt.title("Training Cost")
plt.show()
return self.evaluate(X, Y)
| chriswill88/holbertonschool-machine_learning | supervised_learning/0x00-binary_classification/7-neuron.py | 7-neuron.py | py | 3,565 | python | en | code | 0 | github-code | 36 |
7707679326 | import os
import sys
import time
import copy
import random
from reprint import output
MAX_oo = 65535
MIN_MAX = 65280
MIN_oo = -65535
'''
print("1111111",end="")
print("\r222222",end="")
โณใ
โโโโโโ
โโโโโโโโโโโโโโโโโโโโโโ
โโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโญโฎโฏโฐโฑโฒโณโดโตโถโทโธโนโบโป
โผโฝโพโฟโโโโโโ
โโโโโโ
โโโโโโโโรจ]โโโโโโโ โกโขโฃโคโฅโฆโงโจโฉโชโซโฌโณ
โ โโโ โฌ โ โ โฉ โ โจโฏ โทโ โโ โโณโฅ๏น๏นโ
'''
turn = 0
max_win = 0
min_win = 0
Max=" ใ"
Min=" โณ "
empty=" "
symbols=[Max, Min, empty]
alpha=[MIN_oo,MIN_oo]
Chess_Board=[[2]*5, [2]*5, [2]*5, [2]*5, [2]*5]
def check_win(chess_board,role):
for i in range(5):
sign = 1
for j in range(5):
if chess_board[i][j]!=role:
sign = 0
break
if sign:
return True
sign = 1
for j in range(5):
if chess_board[j][i]!=role:
sign = 0
break
if sign:
return True
sign = 1
for i in range(5):
if chess_board[i][i]!=role:
sign = 0
break
if sign:
return True
sign = 1
for i in range(5):
if chess_board[i][4-i]!=role:
sign = 0
break
if sign:
return True
return False
def search(chess_board, role):
value = 0
for i in range(5):
sign = 1
for j in range(5):
if chess_board[i][j]!=role and chess_board[i][j]!=2:
sign = 0
break
value += sign
sign = 1
for j in range(5):
if chess_board[j][i]!=role and chess_board[j][i]!=2:
sign = 0
break
value += sign
sign = 1
for i in range(5):
if chess_board[i][i]!=role and chess_board[i][i]!=2:
sign = 0
break
value += sign
sign = 1
for i in range(5):
if chess_board[i][4-i]!=role and chess_board[i][4-i]!=2:
sign = 0
break
value += sign
return value
def getGuess(chess_board, role):
enemy = (role+1)%2
if check_win(chess_board,role):
return MAX_oo
if check_win(chess_board,enemy):
return MIN_oo
myself_rate = search(chess_board,role)
enemy_rate = search(chess_board,enemy)
return myself_rate - enemy_rate
def MinMax(role):
global Chess_Board
open_list = []
for i in range(5):
for j in range(5):
if Chess_Board[i][j]==2:
new_chess_board = copy.deepcopy(Chess_Board)
new_chess_board[i][j] = role
open_list.append([new_chess_board,MAX_oo])
if len(open_list)==0:
return MIN_MAX
for index, min_node in enumerate(open_list):
alpha_beta_cut = False
new_Chess_board = min_node[0]
beta = min_node[1]
for min_i in range(5):
for min_j in range(5):
if new_Chess_board[min_i][min_j] == 2:
min_chess_board = copy.deepcopy(new_Chess_board)
min_chess_board[min_i][min_j] = (role+1)%2
guess = getGuess(min_chess_board, role)
beta = min(beta,guess)
open_list[index][1] = beta
if beta <= alpha[role]:
alpha_beta_cut = True
break
if alpha_beta_cut:
break
if alpha_beta_cut:
continue
alpha[role] = max(alpha[role],beta)
open_list.sort(key=lambda x:x[1],reverse=True)
#print(open_list)
status = open_list[0]
Chess_Board = status[0]
time.sleep(0.5)
if check_win(Chess_Board,role) == MAX_oo:
return 1
else:
return 0
output_list=[
"โโโโโฌโโโโฌโโโโฌโโโโฌโโโโ",
"โ{}โ{}โ{}โ{}โ{}โ",
"โโโโโผโโโโผโโโโผโโโโผโโโโค",
"โ{}โ{}โ{}โ{}โ{}โ",
"โโโโโผโโโโผโโโโผโโโโผโโโโค",
"โ{}โ{}โ{}โ{}โ{}โ",
"โโโโโผโโโโผโโโโผโโโโผโโโโค",
"โ{}โ{}โ{}โ{}โ{}โ",
"โโโโโผโโโโผโโโโผโโโโผโโโโค",
"โ{}โ{}โ{}โ{}โ{}โ",
"โโโโโดโโโโดโโโโดโโโโดโโโโ"
]
with output(output_type='list', initial_len=11) as out:
while True:
#with output(output_type='list', initial_len=11) as out:
for index,value in enumerate(output_list):
if index%2 == 1:
vals = [symbols[x] for x in Chess_Board[index//2]]
out[index]=value.format(*vals)
else:
out[index]=value
if max_win != 0 or min_win != 0:
break
turn_win = MinMax(turn)
max_win = turn_win if turn==0 else 0
min_win = turn_win if turn==1 else 0
turn = (turn+1)%2
if max_win==1:
print("Max win!!!")
if min_win==1:
print("Min win!!!")
if min_win==MIN_MAX or max_win==MIN_MAX:
print("It ends in a draw!!!") | Mecheal-helloworld/Python-shell | demo/MIN_MAX.py | MIN_MAX.py | py | 4,714 | python | en | code | 0 | github-code | 36 |
8868610859 | import logging
from aiogram import Dispatcher, types
from aiogram.dispatcher import FSMContext
import aiogram.utils.markdown as fmt
from aiogram.types.message import ContentType
from .. import userchoice
def check_none_name(name):
new_name = ''
if name is not None:
new_name = name
return new_name
async def starting_message(message: types.Message, state: FSMContext):
"""
ะะตัะฒะฐั ะบะพะผะฐะฝะดะฐ, ะพะฝะฐ ะฟัะพะฒะตััะตั ัััะตััะฒะพะฒะฐะฝะธะต ะฟะพะปัะทะพะฒะฐัะตะปั ะธ ะทะฐะฟะพะปะฝัะตั ะตะณะพ ะดะฐะฝะฝัะต
:param message: ัะพะพะฑัะตะฝะธะต
:param state: ัะพััะพัะฝะธะต
"""
await state.finish()
first_name = check_none_name(message.from_user.first_name)
last_name = check_none_name(message.from_user.last_name)
user = userchoice.UserChoice(message.from_user.id, first_name + ' ' + last_name)
user.check_name()
logging.info(f'ะะพะปัะทะพะฒะฐัะตะปั {message.from_user.first_name} {message.from_user.last_name} ะทะฐะปะพะณะธะฝะธะปัั')
await message.answer("ะัะธะฒะตั! ะญัะพั ะฑะพั ััะฐะฒะฝะธะฒะฐะตั ะฟะพะทะฒะพะปัะตั ััะฐะฒะฝะธัั ัะฐะผะธะปะธะธ ะฟะพ "
"<a href='https://ru.wikipedia.org/wiki/"
"%D0%A0%D0%B5%D0%B9%D1%82%D0%B8%D0%BD%D0%B3_%D0%AD%D0%BB%D0%BE'>ัะตะนัะธะฝะณั ะญะปะพ.</a> "
"ะะปั ะฟะตัะตัะฝั ะบะพะผะฐะฝะด ะฝะฐะฑะตัะธ /help", parse_mode=types.ParseMode.HTML)
async def helping_message(message: types.Message):
"""
ะกะพะพะฑัะตะฝะธะต ะฒัะฒะพะดะธั ัััะตััะฒัััะธะต ะบะพะผะฐะฝะดั
:param message: ัะพะพะฑัะตะฝะธะต
"""
await message.answer(fmt.text("ะฏ ะทะฝะฐั ัะปะตะดัััะธะต ะบะพะผะฐะฝะดั:", "/rate - ะฒัะฑัะฐัั ะฑะพะปะตะต ัะผะตัะฝะพะณะพ ะธะท ะฟะฐัั",
"/rating - ะฟะพะบะฐะทะฐัั ัะฟะธัะพะบ ะปะธะดะตัะพะฒ", sep='\n'))
async def wrong_command_message(message: types.Message):
"""
ะกะพะพะฑัะตะฝะธะต ัะตะฐะณะธััะตั ะฝะฐ ะฝะตะฟัะฐะฒะธะปัะฝัะต ะบะพะผะฐะฝะดั
:param message: ัะพะพะฑัะตะฝะธะต
"""
logging.info(f'ะะพะปัะทะพะฒะฐัะตะปั {message.from_user.first_name} {message.from_user.last_name} ะฟะธัะตั {message.text}')
await message.answer("ะั ะฒะฒะตะปะธ ะฝะตะฒะตัะฝัั ะบะพะผะฐะฝะดั. ะะปั ัะพะณะพ, ััะพะฑั ัะทะฝะฐัั, "
"ะบะฐะบะธะต ะบะพะผะฐะฝะดั ะผะพะถะฝะพ ะธัะฟะพะปัะทะพะฒะฐัั, ะฝะฐะฑะตัะธัะต /help")
def register_handlers_common(dp: Dispatcher):
"""
ะ ะตะณะธัััะฐัะธั ะพัะฝะพะฒะฝัั
ัะพะพะฑัะตะฝะธะน ะฒ ะดะธัะฟะตััะตัะต
:param dp: ะดะธัะฟะตััะตั
"""
dp.register_message_handler(starting_message, commands="start", state="*")
dp.register_message_handler(helping_message, commands="help")
dp.register_message_handler(wrong_command_message, content_types=ContentType.ANY)
| KFeyn/naming_bot | app/handlers/common.py | common.py | py | 2,897 | python | ru | code | 0 | github-code | 36 |
37840139507 | import logging
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.menus import (
menu_object, menu_return, menu_secondary, menu_setup
)
from mayan.apps.events.classes import EventModelRegistry, ModelEventType
from mayan.apps.navigation.classes import SourceColumn
from .classes import CredentialBackend
from .events import event_credential_edited, event_credential_used
from .links import (
link_credential_backend_selection, link_credential_delete,
link_credential_edit, link_credential_list,
link_credential_setup
)
from .permissions import (
permission_credential_delete, permission_credential_edit,
permission_credential_use, permission_credential_view
)
logger = logging.getLogger(name=__name__)
class CredentialsApp(MayanAppConfig):
app_namespace = 'credentials'
app_url = 'credentials'
has_rest_api = True
has_tests = True
name = 'mayan.apps.credentials'
verbose_name = _('Credentials')
def ready(self):
super().ready()
CredentialBackend.load_modules()
StoredCredential = self.get_model(model_name='StoredCredential')
EventModelRegistry.register(model=StoredCredential)
ModelEventType.register(
model=StoredCredential, event_types=(
event_credential_edited, event_credential_used
)
)
SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=StoredCredential
)
SourceColumn(
attribute='internal_name', include_label=True, is_sortable=True,
source=StoredCredential
)
SourceColumn(
attribute='get_backend_class_label', include_label=True,
source=StoredCredential
)
ModelPermission.register(
model=StoredCredential, permissions=(
permission_credential_delete, permission_credential_edit,
permission_credential_view, permission_credential_use
)
)
menu_object.bind_links(
links=(link_credential_delete, link_credential_edit),
sources=(StoredCredential,)
)
menu_return.bind_links(
links=(link_credential_list,), sources=(
StoredCredential,
'credentials:stored_credential_backend_selection',
'credentials:stored_credential_create',
'credentials:stored_credential_list'
)
)
menu_secondary.bind_links(
links=(link_credential_backend_selection,), sources=(
StoredCredential,
'credentials:stored_credential_backend_selection',
'credentials:stored_credential_create',
'credentials:stored_credential_list'
)
)
menu_setup.bind_links(
links=(link_credential_setup,)
)
| salmabader/mayan-edms | mayan/apps/credentials/apps.py | apps.py | py | 3,033 | python | en | code | 0 | github-code | 36 |
37821183076 | import tensorflow as tf
import numpy as np
from tensorflow.python.ops.signal import window_ops
from scipy import stats
import decimal, math
import os, sys
import librosa
import soundfile as sf
import functools
import matplotlib.pyplot as plt
from matplotlib import style
from scipy.special import exp1
import math
class tensor_polar():
def __init__(self, N_d, N_s, K, f_s):
"""
Argument/s:
N_d - window duration (samples).
N_s - window shift (samples).
K - number of frequency bins.
f_s - sampling frequency.
"""
self.N_d = N_d
self.N_s = N_s
self.K = K
self.f_s = f_s
self.W = functools.partial(window_ops.hamming_window,
periodic=False)
self.ten = tf.cast(10.0, tf.float32)
self.one = tf.cast(1.0, tf.float32)
def polar_analysis(self, x):
"""
Polar-form acoustic-domain analysis.
Argument/s:
x - waveform.
Returns:
Short-time magnitude and phase spectrums.
"""
STFT = tf.signal.stft(x, self.N_d, self.N_s, self.K,
window_fn=self.W, pad_end=True)
return tf.abs(STFT), tf.math.angle(STFT)
def polar_synthesis(self, STMS, STPS):
"""
Polar-form acoustic-domain synthesis.
Argument/s:
STMS - short-time magnitude spectrum.
STPS - short-time phase spectrum.
Returns:
Waveform.
"""
STFT = tf.cast(STMS, tf.complex64) * tf.exp(1j * tf.cast(STPS, tf.complex64))
return tf.signal.inverse_stft(STFT, self.N_d, self.N_s, self.K, tf.signal.inverse_stft_window_fn(self.N_s, self.W))
def mmse_lsa_np(xi, gamma):
"""
Computes the MMSE-LSA gain function.
Numpy version:
v_1 = np.divide(xi, np.add(1.0, xi))
nu = np.multiply(v_1, gamma)
return np.multiply(v_1, np.exp(np.multiply(0.5, exp1(nu)))) # MMSE-LSA gain function.
Argument/s:
xi - a priori SNR.
gamma - a posteriori SNR.
Returns:
MMSE-LSA gain function.
"""
xi = np.where(xi == 0, np.finfo(float).eps, xi)
gamma = np.where(gamma == 0, np.finfo(float).eps, gamma)
v_1 = np.divide(xi, np.add(1.0, xi))
nu = np.multiply(v_1, gamma)
return np.multiply(v_1, np.exp(np.multiply(0.5, exp1(nu)))) # MMSE-LSA gain function.
class mcra(object):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, frame_L, bin_num, delta, *tupleArg):
self.alpha_d = np.expand_dims(alpha_d,0)
self.alpha_s = np.expand_dims(alpha_s,0)
self.alpha_p = np.expand_dims(alpha_p,0)
if len(lambda_d.shape) == 2:
self.lambda_d = lambda_d
elif len(lambda_d.shape) == 1:
self.lambda_d = np.expand_dims(lambda_d,0)
self.bin_len = bin_num
a = np.hanning(7)
self.matrix = np.eye(self.bin_len)*a[3] \
+ np.eye(self.bin_len, k=-2)*a[1] + np.eye(self.bin_len, k=2)*a[5] \
+ np.eye(self.bin_len, k=-1)*a[2] + np.eye(self.bin_len, k=1)*a[4]
self.matrix = np.expand_dims(self.matrix, 0).repeat(self.lambda_d.shape[0], 0)
self.S = self.S_tmp = self.S_min = np.squeeze(np.matmul(self.matrix, np.expand_dims(self.lambda_d, -1)),-1)
self.frame_L = frame_L
self.delta = np.expand_dims(delta,0)
self.self_alpha_D_hat = np.expand_dims(alpha_d,0)
self.speech_present = np.expand_dims(np.zeros(self.bin_len, float),0)
self.snr_gammar = np.expand_dims(np.ones(self.bin_len, float)*0.1,0)
self.snr_xi = np.expand_dims(np.ones(self.bin_len, float)*0.1,0)
self.alpha_snr = 0.92
self.G_h=mmse_lsa_np(self.snr_xi, self.snr_gammar)
self.G_min = np.expand_dims(np.ones(self.bin_len, float) * 0.09,0)
def update_snr_dd(self, pwr):
snr_gammar_prev = self.snr_gammar
self.snr_gammar = pwr / self.lambda_d
self.snr_xi = self.alpha_snr * np.square(self.G_h) * snr_gammar_prev + (1 - self.alpha_snr) * np.maximum(
self.snr_gammar - 1, 0)
def update_S(self, pwr):
S_f = np.squeeze(np.matmul(self.matrix, np.expand_dims(pwr,-1)),-1)
self.S = self.alpha_s * self.S + (1 - self.alpha_s) * S_f
def tracking_S_win(self, current_frame):
if current_frame % self.frame_L == 0:
self.S_min = np.minimum(self.S, self.S_tmp)
self.S_tmp = self.S
else:
self.S_min = np.minimum(self.S, self.S_min)
self.S_tmp = np.minimum(self.S, self.S_tmp)
def update_speech_present(self):
S_ratio = self.S/self.S_min
p = np.array(S_ratio > self.delta).astype(int)
self.speech_present = self.alpha_p * self.speech_present + (1 - self.alpha_p) * p
def update_alpha_d(self):
self.alpha_D_hat = self.alpha_d + (1-self.alpha_d)*self.speech_present
def update_noise(self, pwr):
self.lambda_d = self.alpha_D_hat * self.lambda_d + (1 - self.alpha_D_hat) * pwr
def update_SNR_GH(self):
self.G_h = mmse_lsa_np(self.snr_xi, self.snr_gammar)
def tracking_noise(self, pwr, c_frame):
self.update_snr_dd(pwr)
self.update_S(pwr)
self.tracking_S_win(c_frame)
self.update_speech_present()
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return np.squeeze(self.lambda_d), np.squeeze(self.G_h), np.squeeze(self.speech_present)
def mmse_lsa(self, meg, c_frame):
pwr = np.square(meg)
lambda_d, G, P = self.tracking_noise(pwr, c_frame)
return np.squeeze(G * meg)
def omlsa(self, meg, c_frame):
pwr = np.square(meg)
lambda_d, G, P = self.tracking_noise(pwr, c_frame)
return np.squeeze(np.power(G, P) * np.power(self.G_min, (1 - P)) * meg)
class mcra_2(mcra):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta, gamma, beta):
super().__init__(alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta)
self.gamma = gamma
self.beta = beta
self.S_minus_one = self.S
def update_S_2(self,meg):
self.S_minus_one = self.S
self.update_S(meg)
def tracking_S_continue(self):
p = np.array(self.S_min < self.S).astype(int)
p_not = np.array(self.S_min >= self.S).astype(int)
self.S_min = self.S*p_not+(self.gamma * self.S_min + (1-self.gamma)*(self.S - self.beta * self.S_minus_one)/(1-self.beta))*p
def tracking_noise(self, pwr, c_frame):
self.update_snr_dd(pwr)
self.update_S_2(pwr)
self.tracking_S_continue()
self.update_speech_present()
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return self.lambda_d, self.G_h, self.speech_present
class imcra(mcra):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta, beta, b_min, gamma0, gamma1, zeta0):
super().__init__(alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta)
self.beta = beta
self.b_min = b_min
self.gamma0 = gamma0
self.gamma1 = gamma1
self.zeta0 = zeta0
self.S_hat = self.S
self.S_min_hat = self.S_min
self.S_tmp_hat = self.S_tmp
self.zero = np.zeros(self.bin_len, float)
self.ones = np.ones(self.bin_len, float)
self.gamma1minus1 = self.gamma1 - self.ones
self.alpha_s_hat = self.alpha_s * 1.2
self.frame_L_hat = frame_L * 0.5
def update_S_hat(self, pwr):
gamma_min = pwr/(self.b_min*self.S_min)
zeta = self.S/(self.b_min*self.S_min)
I_tmp = np.array(np.logical_and((gamma_min < self.gamma0), (zeta < self.zeta0))).astype(int)
win_I = np.matmul(self.matrix, I_tmp)
a_p = np.array(win_I == self.zero).astype(int)
a_p_not = np.array(win_I > self.zero).astype(int)
denominator = win_I + a_p
numerator = win_I*pwr + self.S_hat*a_p#_not
S_f = numerator/denominator
self.S_hat = self.alpha_s_hat * self.S_hat + (1-self.alpha_s_hat)*S_f
def tracking_S_win_hat(self, current_frame):
if current_frame % self.frame_L_hat == 0:
self.S_min_hat = np.minimum(self.S_hat, self.S_tmp_hat)
self.S_tmp_hat = self.S_hat
else:
self.S_min_hat = np.minimum(self.S_hat, self.S_min_hat)
self.S_tmp_hat = np.minimum(self.S_hat, self.S_tmp_hat)
def update_speech_present(self,pwr):
gamma_min_hat = pwr/(self.b_min*self.S_min_hat)
zeta_hat = self.S_hat/(self.b_min*self.S_min_hat)
a = np.array(np.logical_and((gamma_min_hat < self.ones),(zeta_hat < self.zeta0))).astype(int)
b = np.array(np.logical_and((zeta_hat < self.zeta0), np.logical_and((gamma_min_hat < self.gamma1), (gamma_min_hat > self.ones)))).astype(int)
q = a + b*(self.gamma1-gamma_min_hat)/self.gamma1minus1
c_x = 1+self.snr_xi
c_x = np.where(c_x == 0, np.finfo(float).eps, c_x)
v = np.true_divide(self.snr_xi*self.snr_gammar,c_x)
oneminusq = 1-q
oneminusq = np.where(oneminusq == 0, np.finfo(float).eps, oneminusq)
sp_reciprocal = 1+q*(1+self.snr_xi)*np.exp(-v)/oneminusq
sp_reciprocal = np.where(sp_reciprocal == 0, np.finfo(float).eps, sp_reciprocal)
self.speech_present = 1/sp_reciprocal
def tracking_noise(self, pwr, c_frame):
self.update_snr_dd(pwr)
self.update_S(pwr)
self.tracking_S_win(c_frame)
self.update_S_hat(pwr)
self.tracking_S_win_hat(c_frame)
self.update_speech_present(pwr)
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return np.squeeze(self.lambda_d), np.squeeze(self.G_h), np.squeeze(self.speech_present)
''''''
class mcra_tbrr(mcra):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, z_b, z_r, frame_L, bin_num, delta, *tupleArg):
super().__init__(alpha_d, alpha_s, alpha_p, lambda_d, frame_L, bin_num, delta)
self.mcra_zb = mcra(alpha_d=alpha_d, alpha_s=alpha_s, alpha_p=alpha_p, lambda_d=z_b, frame_L=frame_L, bin_num=bin_num,
delta=delta)
self.mcra_zr = mcra(alpha_d=alpha_d, alpha_s=alpha_s, alpha_p=alpha_p, lambda_d=z_r, frame_L=frame_L, bin_num=bin_num,
delta=delta)
self.Lambda_0 = 1.67
self.Lambda_1 = 1.81
self.gammar_0 = 4.6
self.gammar_0_minus_1 = 4.6-1
self.Omega_low = 1
self.Omega_high = 3
self.Omega_delta =self.Omega_high - self.Omega_low
self.betta = 1.47
def tracking_tbrr(self, pwr_b, pwr_bm, c_frame):
self.Q_zb,self.G_zb, _ = self.mcra_zb.tracking_noise(pwr_b, c_frame)
self.Q_zr,self.G_zr, _ = self.mcra_zr.tracking_noise(pwr_bm, c_frame)
self.Lambda_y = np.squeeze(self.mcra_zb.S/self.mcra_zb.lambda_d)
self.Lambda_bm = np.max(self.mcra_zr.S / self.mcra_zr.lambda_d,axis=0)
self.Omega = (self.mcra_zb.S - self.mcra_zb.lambda_d)/np.max(self.mcra_zr.S - self.mcra_zr.lambda_d,axis=0)
H0 = np.array(self.Lambda_y <= self.Lambda_0).astype(int)
H0_not_mask = 1 - H0
H1_tmp = np.array(self.Lambda_bm <= self.Lambda_1).astype(int)
H1 = H0_not_mask* H1_tmp
H1_not_mask = 1 - H1
Hr = H0_not_mask * H1_not_mask
H0t = np.logical_or(np.array(self.Omega < self.Omega_low), np.array(self.snr_gammar < 1)).astype(int)
H0t_tbrr = H0t * Hr
H0t_tbrr_not_mask = 1 - H0t_tbrr
H_tbrr_mask = Hr * H0t_tbrr_not_mask
H1_tbrr = np.logical_or(np.array(self.Omega > self.Omega_high), np.array(self.snr_gammar > self.gammar_0)).astype(int)
H1_tbrr_r = H1_tbrr * H_tbrr_mask
H1_tbrr_r_not_mask = 1 - H1_tbrr_r
Hr_tbrr_mask = H_tbrr_mask * H1_tbrr_r_not_mask
r_tbrr = np.maximum((self.gammar_0 - self.snr_gammar)/self.gammar_0_minus_1, (self.Omega_high-self.Omega)/self.Omega_delta)
Hr_tbrr = r_tbrr * Hr_tbrr_mask
self.q_tbrr = H0+H0t_tbrr+Hr_tbrr
def update_speech_present(self):
c_x = 1 + self.snr_xi
c_x = np.where(c_x == 0, np.finfo(float).eps, c_x)
v = np.true_divide(self.snr_xi * self.snr_gammar, c_x)
oneminusq = 1 - self.q_tbrr
oneminusq = np.where(oneminusq == 0, np.finfo(float).eps, oneminusq)
sp_reciprocal = 1 + self.q_tbrr * (1 + self.snr_xi) * np.exp(-v) / oneminusq
sp_reciprocal = np.where(sp_reciprocal == 0, np.finfo(float).eps, sp_reciprocal)
self.speech_present = 1 / sp_reciprocal
def tracking_noise(self, pwr, pwr_b, pwr_bm, c_frame):
self.update_snr_dd(pwr)
#self.update_S(pwr)
#self.tracking_S_win(c_frame)
self.tracking_tbrr(pwr_b, pwr_bm, c_frame)
self.update_speech_present()
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return self.lambda_d, self.G_h, self.speech_present
def omlsa(self, meg, meg_b, meg_bm, c_frame):
pwr = np.square(meg)
pwr_b = np.square(meg_b)
pwr_bm = np.square(meg_bm)
lambda_d, G, P = self.tracking_noise(pwr, pwr_b, pwr_bm,c_frame)
return np.squeeze(np.power(G, P) * np.power(self.G_min, (1 - P)) * meg)
def ensures_dir(directory: str):
if len(directory) > 0 and not os.path.exists(directory):
os.makedirs(directory)
def expw(x):
c_exp_x0 = [
[1.0, 1.0644944589178593, 1.1331484530668263, 1.2062302494209807,
1.2840254166877414, 1.3668379411737963, 1.4549914146182013, 1.5488302986341331,
1.6487212707001282, 1.7550546569602985, 1.8682459574322223, 1.988737469582292,
2.117000016612675, 2.2535347872132085, 2.398875293967098, 2.553589458062927],
[1.0, 1.0039138893383475, 1.007843097206448, 1.0117876835593316,
1.0157477085866857, 1.0197232327137742, 1.023714316602358, 1.0277210211516217,
1.0317434074991028, 1.035781537021624, 1.03983547133623, 1.0439052723011284,
1.0479910020166328, 1.0520927228261099, 1.056210497316932, 1.0603443883214314],
[1.0, 1.0002441704297478, 1.0004884004786945, 1.000732690161397,
1.0009770394924165, 1.0012214484863171, 1.0014659171576668, 1.001710445521037,
1.0019550335910028, 1.0021996813821428, 1.002444388909039, 1.0026891561862772,
1.0029339832284467, 1.0031788700501403, 1.0034238166659546, 1.003668823090489]
]
if x<-709: return 0
elif x>709: return 1.7E+308
s = x * np.log2(np.e)
integer = np.floor(s)
decimal = (s - np.floor(s))*np.log(2)
ep = decimal * 16
q0 = int(np.floor(ep))
ep = ep - np.floor(ep)
ep1 = ep * 16
q1 = int(np.floor(ep1))
ep1 = ep1 - np.floor(ep1)
ep2 = ep1 * 16
q2 = int(np.floor(ep2))
ep2 = ep2 - np.floor(ep2)
h = c_exp_x0[0][q0] * c_exp_x0[1][q1] * c_exp_x0[2][q2]
h1 = np.exp(q0/16)*np.exp(q1/(16*16))*np.exp(q2/(16*16*16))
w = ep2 / 4096
ew = 1 + w + w * w / 2 + w * w * w / 6 + w * w * w * w / 24
eww = np.exp(w)
decimal_final = h * ew
result = decimal_final * 2**integer
golden = np.exp(x)
goldenn = 2**(np.log2(np.e)*x)
pass
def loge(x):
#if x <= 0: return -1.7E+308
#elif x > 100000000: return 18.420680743952367
decimal = 0
shift = 1
inverselist = np.flipud(np.arange(52))
for i in inverselist:
mask = 1 << i
shift /= 2
if mask & x:
decimal += shift
if __name__ == '__main__':
pi = math.pi
M = 256
b = np.exp(1j)
W = np.exp((2*pi/M)*1j)
nature = np.arange(M)
#expw(3.5)
loge(0x5000000000000)
DFT_Matrix = np.ones([M,M],np.complex)
for row in range(M):
DFT_Matrix[row]=W**(-nature*(row))
def exp11(x):
return np.exp(-x)/x
x = np.linspace(0, 8, 256)
y = exp11(x)
plt.plot(x, y)
plt.show()
"""
ไธ่งไฟกๅท
def triangle_wave(x, c, hc): # ๅน
ๅบฆไธบhc๏ผๅฎฝๅบฆไธบc,ๆๅบฆไธบhc/2c็ไธ่งๆณข
if x >= c / 2:
r = 0.0
elif x <= -c / 2:
r = 0.0
elif x > -c / 2 and x < 0:
r = 2 * x / c * hc + hc
else:
r = -2 * x / c * hc + hc
return r
x = np.linspace(-3, 3, 256)
y = np.array([triangle_wave(t, 4.0, 1.0) for t in x])
plt.ylim(-0.2, 1.2)
plt.plot(x, y)
plt.show()
#Y = DFT_Matrix*y
Y = np.matmul(DFT_Matrix, y)
y_idx = np.linspace(0, 2*pi, 256)
plt.plot(y_idx, np.absolute(Y))
"""
"""
็ฉๅฝข่ๅฒไฟกๅท
def rect_wave(x, c, c0): # ่ตท็นไธบc0๏ผๅฎฝๅบฆไธบc็็ฉๅฝขๆณข
if x >= (c + c0):
r = 0.0
elif x < c0:
r = 0.0
else:
r = 1
return r
x = np.linspace(-2, 4, 256)
y = np.array([rect_wave(t, 2.0, -1.0) for t in x])
plt.ylim(-0.2, 4.2)
plt.plot(x, y)
plt.show()
Y = np.matmul(DFT_Matrix, y)
y_idx = np.linspace(0, 2*pi, 256)
plt.plot(y_idx, np.absolute(Y))
"""
from sympy import plot, sin, Symbol
x = np.linspace(0, 8, 256)
y = np.array([sin(np.pi/4*t) for t in x])
plt.ylim(-1.2, 6.2)
plt.plot(x, y)
plt.show()
y=y.astype(np.float64)
Y = np.matmul(DFT_Matrix, y)
y_idx = np.linspace(0, 2 * pi, 256)
plt.plot(y_idx, np.absolute(Y))
dd = 128
nature1 = np.arange(dd)
H_0 = np.exp(-(2 * pi * nature1 / dd) * 1j)
W = np.exp((2 * pi / dd) * 1j)
ranges = range(1, dd)
H_w = np.zeros(dd,np.complex)
for omega in nature1:
#tm = W**(-(nature1*omega))
#tm = np.exp(-(2 * pi * nature1 / dd + omega) * 1j)
tm = np.exp(-(2 * pi * nature1 * omega / dd) * 1j)
H_w[omega] = np.sum(tm)
abs_H = np.abs(H_w)
plt.figure(figsize=(20, 10))
plt.plot(ranges, abs_H[1:], 'b--o', label='H(jw)')
plt.show()
print("Processing observations...") | golfbears/DeepXi | bak/multiphase.py | multiphase.py | py | 18,079 | python | en | code | null | github-code | 36 |
17585811312 | import struct
import typing as t
from pathlib import Path
from starwhale import Link, GrayscaleImage
_TItem = t.Generator[t.Dict[str, t.Any], None, None]
def iter_mnist_item() -> _TItem:
root_dir = Path(__file__).parent.parent / "data"
with (root_dir / "t10k-images-idx3-ubyte").open("rb") as data_file, (
root_dir / "t10k-labels-idx1-ubyte"
).open("rb") as label_file:
_, data_number, height, width = struct.unpack(">IIII", data_file.read(16))
_, label_number = struct.unpack(">II", label_file.read(8))
print(
f">data({data_file.name}) split data:{data_number}, label:{label_number} group"
)
image_size = height * width
for i in range(0, min(data_number, label_number)):
_data = data_file.read(image_size)
_label = struct.unpack(">B", label_file.read(1))[0]
yield {
"img": GrayscaleImage(
_data,
display_name=f"{i}",
shape=(height, width, 1),
),
"label": _label,
}
class LinkRawDatasetProcessExecutor:
_endpoint = "10.131.0.1:9000"
_bucket = "users"
def __iter__(self) -> _TItem:
root_dir = Path(__file__).parent.parent / "data"
with (root_dir / "t10k-labels-idx1-ubyte").open("rb") as label_file:
_, label_number = struct.unpack(">II", label_file.read(8))
offset = 16
image_size = 28 * 28
uri = f"s3://{self._endpoint}/{self._bucket}/dataset/mnist/t10k-images-idx3-ubyte"
for i in range(label_number):
_data = GrayscaleImage(
link=Link(
f"{uri}",
offset=offset,
size=image_size,
),
display_name=f"{i}",
shape=(28, 28, 1),
)
_label = struct.unpack(">B", label_file.read(1))[0]
yield {"img": _data, "label": _label}
offset += image_size
| star-whale/starwhale | example/mnist/mnist/dataset.py | dataset.py | py | 2,105 | python | en | code | 171 | github-code | 36 |
650843683 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
class AqdyPipeline(object):
def process_item(self, item, spider):
xfplay_link = {}
xfplay_link["_id"] = self.count
xfplay_link["play_pic_url"] = item["play_pic_url"]
xfplay_link["xfplay_link"] = item["xfplay_link"]
# ๆๅ
ฅๆฐๆฎๅบ
# self.collection.insert(xfplay_link)
self.count += 1
return item
def open_spider(self,spider):
self.client = MongoClient()
self.collection = self.client["pySpider"]["lusi"]
self.count = 1
print("ๆฐๆฎๅบไปฅ่ฟๆฅ...")
def close_spider(self,spider):
self.client.close()
print("ๆฐๆฎๅบ่ฟๆฅๅ
ณ้ญ")
| jihongzhu/python- | aqdy/aqdy/pipelines.py | pipelines.py | py | 887 | python | en | code | 0 | github-code | 36 |
4104421635 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 11:27:48 2018
@author: anup
"""
from elasticsearch import Elasticsearch
from bs4 import BeautifulSoup as BS
import glob
from preprocess_class import EsPreProcessor
import warnings
from html_processing import *
warnings.filterwarnings('ignore')
from preprocessor_collection import *
def es_index_create(files_location, # location of html files
index_1_params, # name of index 1
pre_processor,
headers_list): # preprocessor
file_list = glob.glob(files_location + '/*.html')
file_names = [filename.split("/")[-1].split('.')[0] for filename in file_list]
# create index in elasticsearch with necessary field limit
es = Elasticsearch() # initialize elasticsearch
doc = {"settings": {"index.mapping.total_fields.limit": 10000}} # setting the field limit
es.indices.create(index = index_1_params[0], body = doc)
es_doc_id = 0
es_doc_id_content_dict = {}
for file_no in range(len(file_list)):
with open(file_list[file_no]) as f:
temp_html_file = [line.rstrip() for line in f]
html_file = ''
html_strip_file = ''
for line in temp_html_file:
html_file += (line + '\n')
html_strip_file += (line)
html = html_strip_file
# extract contents under the headers
section_dict_headers_contents = header_content_extraction(html,headers_list,file_names[file_no])
# assembling contents for the index
section_dict_1 = {**section_dict_headers_contents}
for key, value in section_dict_1.items():
section_dict_1[key] = EsPreProcessor.es_preprocessor_manager(value, pre_processor).es_pre_processed_corpus
for key, value in section_dict_1.items():
es_doc_id += 1
es_doc_id_content_dict[str(es_doc_id)] = eval(key)
es_update_dict = {}
es_update_dict['content'] = value
es.index(index=index_1_params[0], doc_type=index_1_params[1], id=es_doc_id, body = es_update_dict)
return es_doc_id_content_dict
def es_search_processor(es_search_doctype,
es_search_index,
es_search_body):
es_search = Elasticsearch()
es_user_query_search_result = es_search.search(index = es_search_index,
doc_type = es_search_doctype,
body = es_search_body)
return es_user_query_search_result
| anupkhalam/es_xd_standalone | html_indexer.py | html_indexer.py | py | 2,752 | python | en | code | 0 | github-code | 36 |
31068607886 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 15:00
# @Author : cold
# @File : python_mysql.py
from configparser import ConfigParser
import os
class MySQLConfig(ConfigParser):
def __init__(self, config, **kwargs):
# ConfigParser.__init__(self,allow_no_value=True)
super(MySQLConfig, self).__init__(allow_no_value=True)
self.config = config
self.mysql_vars = {}
if os.path.exists(self.config):
self.read(self.config)
self.get_mysqld_vars()
else:
self.get_default_vars()
self.set_mysqld_vars(kwargs)
def set_mysqld_vars(self, kwargs):
for k, v in kwargs.items():
setattr(self,k,v)
self.mysql_vars[k] = str(v)
def get_mysqld_vars(self):
rst = {}
options = self.options('mysqld')
for o in options:
rst[o] = self.get('mysqld', o)
self.set_mysqld_vars(rst)
def get_default_vars(self):
default = {
'port':'3306',
'socket': '/tmp/mysql.sock',
'log-bin':'mysql-bin',
'basedir': '/usr/local/mysql',
'datadir':'/data/mysql',
'binlog_format':'mixed',
'server-id':'1',
'user':'mysql',
}
self.set_mysqld_vars(default)
def set_vars(self,k,v):
self.mysql_vars[k] = v
def save(self):
if not self.has_section('mysqld'):
self.add_section('mysqld')
for k,v in self.mysql_vars.items():
# print(k,v)
self.set('mysqld', k ,v)
with open(self.config,'w') as fd:
# print(fd)
self.write(fd)
if __name__ == '__main__':
mc = MySQLConfig(r'C:\Users\cold\Desktop\my3.cnf', mx=1360)
mc.set_vars('skip-grant1', None)
mc.save()
print(mc.port)
print(mc.socket) | liangtaos/mysqlmanage | python_mysql.py | python_mysql.py | py | 1,876 | python | en | code | 0 | github-code | 36 |
1511337021 | import pandas as pd
import datetime
def load_analysis(analysis_id, data, metadata_record, projects, es, framework):
load_data(data, analysis_id, es, framework)
if framework == 'scp':
metadata_record['cell_count'] = data['annotation_metrics'].shape[0]
elif framework == 'mondrian':
metadata_record['cell_count'] = data['hmmcopy_metrics'].shape[0]
else:
raise Exception(f"Unknown framework, expected 'scp' or 'mondrian', but got '{framework}'")
es.load_record(metadata_record, analysis_id, es.ANALYSIS_ENTRY_INDEX)
missing_labels = es.get_missing_labels()
for label in missing_labels:
es.add_label(label)
es.add_analysis_to_projects(analysis_id, projects)
def clean_analysis(analysis_id, es):
clean_data(analysis_id, es)
es.delete_record_by_id(es.ANALYSIS_ENTRY_INDEX, analysis_id)
es.remove_analysis_from_projects(analysis_id)
def clean_data(analysis_id, es):
for data_type, get_data in GET_DATA.items():
es.delete_index(f"{analysis_id.lower()}_{data_type}")
def process_analysis_entry(analysis_id, library_id, sample_id, description, metadata):
record = {
**metadata
}
record['timestamp'] = datetime.datetime.now().isoformat()
record["dashboard_id"] = analysis_id
record["jira_id"] = analysis_id
record["dashboard_type"] = "single"
record["library_id"] = library_id
record["sample_id"] = sample_id
record["description"] = description
return record
def load_analysis_entry(analysis_id, library_id, sample_id, description, metadata, es):
record = process_analysis_entry(analysis_id, library_id, sample_id, description, metadata)
es.load_record(record, analysis_id, es.ANALYSIS_ENTRY_INDEX)
def load_data(data, analysis_id, es, framework):
"""Load dataframes"""
for data_type, get_data in GET_DATA.items():
df = get_data(data, framework)
es.load_df(df, f"{analysis_id.lower()}_{data_type}")
def get_qc_data(hmmcopy_data, framework=None):
if framework == 'scp':
data = hmmcopy_data['annotation_metrics']
elif framework == 'mondrian':
data = hmmcopy_data['hmmcopy_metrics']
data.rename(columns={'clustering_order': 'order', 'condition': 'experimental_condition'}, inplace=True)
else:
raise Exception(f"Unknown framework, expected 'scp' or 'mondrian', but got '{framework}'")
data['percent_unmapped_reads'] = data["unmapped_reads"] / data["total_reads"]
data['is_contaminated'] = data['is_contaminated'].apply(
lambda a: {True: 'true', False: 'false'}[a])
return data
def get_segs_data(hmmcopy_data, framework=None):
data = hmmcopy_data['hmmcopy_segs'].copy()
data['chrom_number'] = create_chrom_number(data['chr'])
return data
def get_bins_data(hmmcopy_data, framework=None):
data = hmmcopy_data['hmmcopy_reads'].copy()
data['chrom_number'] = create_chrom_number(data['chr'])
return data
def get_gc_bias_data(hmmcopy_data, framework=None):
data = hmmcopy_data['gc_metrics']
gc_cols = list(range(101))
gc_bias_df = pd.DataFrame(columns=['cell_id', 'gc_percent', 'value'])
for n in gc_cols:
new_df = data.loc[:, ['cell_id', str(n)]]
new_df.columns = ['cell_id', 'value']
new_df['gc_percent'] = n
gc_bias_df = gc_bias_df.append(new_df, ignore_index=True)
return gc_bias_df
GET_DATA = {
f"qc": get_qc_data,
f"segs": get_segs_data,
f"bins": get_bins_data,
f"gc_bias": get_gc_bias_data,
}
chr_prefixed = {str(a): '0' + str(a) for a in range(1, 10)}
def create_chrom_number(chromosomes):
chrom_number = chromosomes.map(lambda a: chr_prefixed.get(a, a))
return chrom_number
| shahcompbio/alhenaloader | alhenaloader/load.py | load.py | py | 3,730 | python | en | code | 0 | github-code | 36 |
26868051842 | """A perfect power is a classification of positive integers:
In mathematics, a perfect power is a positive integer that can be expressed as an integer power of another positive integer.
More formally, n is a perfect power if there exist natural numbers m > 1, and k > 1 such that mk = n.
Your task is to check wheter a given integer is a perfect power. If it is a perfect power, return a pair m and k with mk = n as a proof.
Otherwise return Nothing, Nil, null, NULL, None or your language's equivalent.
Note: For a perfect power, there might be several pairs. For example 81 = 3^4 = 9^2, so (3,4) and (9,2) are valid solutions.
However, the tests take care of this, so if a number is a perfect power, return any pair that proves it.
Examples
isPP(4) => [2,2]
isPP(9) => [3,2]
isPP(5) => None
"""
# too slow...
"""def isPP(n):
for i in range(2,n//2):
for j in range(2,n//2):
if i**j == n:
return [i, j]
return None"""
# better, but still too slow...
"""def isPP(n):
for j in range(2, n//2 + 1):
if n**(1/j) in range(2, n//2 + 1):
return [int(n**(1/j)), j]
return None"""
# some clever guy's solution
"""
from math import ceil, log, sqrt
def isPP(n):
for b in xrange(2, int(sqrt(n)) + 1):
e = int(round(log(n, b)))
if b ** e == n:
return [b, e]
return None
"""
# my solution, fast enough! ;)
def isPP(n):
for i in range(2, n//2 + 1):
tmp = n
count = 0
while tmp != 1:
tmp /= i
count += 1
if tmp % i != 0:
break
if tmp == 1:
return [i, count]
return None
# let's try it!
print(isPP(4))
print(isPP(16))
pp = [4, 8, 9, 16, 25, 27, 32, 36, 49, 64, 81, 100, 121, 125, 128, 144, 169, 196, 216, 225, 243, 256, 289, 324, 343, 361, 400, 441, 484]
for item in pp:
print(isPP(item)) | DavorKandic/from_codewars | perfect_power.py | perfect_power.py | py | 1,898 | python | en | code | 0 | github-code | 36 |
10180408027 | #!/usr/bin/python3
'''
Core Flask App
'''
from flask import Flask, jsonify, make_response
from models import storage
from api.v1.views import app_views
from os import getenv
app = Flask(__name__)
app.register_blueprint(app_views)
app.url_map.strict_slashes = False
@app.teardown_appcontext
def closeStorage(ob):
'''calls storage.close()'''
storage.close()
@app.errorhandler(404)
def pageNotFound404(error):
''' 404 Page Not Found '''
return make_response(jsonify({"error": "Not found"}), 404)
if __name__ == "__main__":
host = getenv('HBNB_API_HOST')
port = getenv('HBNB_API_PORT')
if not host:
host = '0.0.0.0'
if not port:
port = '5000'
app.run(host=host, port=port, threaded=True)
| jamesAlhassan/AirBnB_clone_v3 | api/v1/app.py | app.py | py | 746 | python | en | code | 0 | github-code | 36 |
16185556027 | import string
import sqlalchemy.sql as sasql
from ..util import random_string, sha256_hash
from ..adapter.repository import UserRepo, UserFollowRepo
from .exception import UsecaseException, NotFoundException
class UserUsecase:
_mobile_verify_codes = {}
_email_verify_codes = {}
def __init__(self, config: dict, user_repo: UserRepo,
user_follow_repo: UserFollowRepo):
self.config = config
self.user_repo = user_repo
self.user_follow_repo = user_follow_repo
async def create_user(self, **data):
if (await self.info_by_username(data['username'])) is not None:
raise UsecaseException("็จๆทๅ้ๅค")
data['salt'] = random_string(64)
data['password'] = sha256_hash(data['password'], data['salt'])
return await self.user_repo.create(**data)
async def modify_user(self, id, **data):
if data.get('username') is not None and (await self.info_by_username(data['username'])) is not None:
raise UsecaseException("็จๆทๅ้ๅค")
if data.get('mobile') is not None and (await self.info_by_mobile(data['mobile'])) is not None:
raise UsecaseException("ๆๆบ้ๅค")
if data.get('email') is not None and (await self.info_by_email(data['email'])) is not None:
raise UsecaseException("้ฎ็ฎฑ้ๅค")
if data.get('password') is not None:
user = self.info(id)
data['password'] = sha256_hash(data['password'], user['salt'])
return await self.user_repo.modify(id, **data)
async def info(self, id):
if id is None:
return None
user = await self.user_repo.info(id)
if user is None:
raise NotFoundException('็จๆทๆชๆพๅฐ')
return user
async def info_by_username(self, username):
return await self.user_repo.info(username, 'username')
async def info_by_mobile(self, mobile):
return await self.user_repo.info(mobile, 'mobile')
async def info_by_email(self, email):
return await self.user_repo.info(email, 'email')
async def infos(self, ids):
return await self.user_repo.infos(ids)
async def list(self, *, limit=None, offset=None):
return await self.user_repo.list(limit=limit, offset=offset)
async def follow(self, follower_id, following_id):
return await self.user_follow_repo.create(
follower_id=follower_id, following_id=following_id)
async def unfollow(self, follower_id, following_id):
await self.user_follow_repo.execute(
sasql.delete(self.user_follow_repo.table).
where(sasql.and_(
self.user_follow_repo.table.c.follower_id == follower_id,
self.user_follow_repo.table.c.following_id == following_id)))
async def following(self, user_id, limit=None, offset=None):
from_ = self.user_repo.table.join(
self.user_follow_repo.table,
self.user_follow_repo.table.c.following_id == self.user_repo.table.c.id)
where = self.user_follow_repo.table.c.follower_id == user_id
order_by = self.user_follow_repo.table.c.id.desc()
return await self.user_repo.list(
from_=from_, where=where, order_by=order_by, limit=limit,
offset=offset)
async def follower(self, user_id, limit=None, offset=None):
from_ = self.user_repo.table.join(
self.user_follow_repo.table,
self.user_follow_repo.table.c.follower_id == self.user_repo.table.c.id)
where = self.user_follow_repo.table.c.following_id == user_id
order_by = self.user_follow_repo.table.c.id.desc()
return await self.user_repo.list(
from_=from_, where=where, order_by=order_by, limit=limit,
offset=offset)
async def is_following_users(self, follower_id, following_ids):
valid_ids = [v for v in following_ids if v is not None]
if valid_ids:
result = await self.user_follow_repo.execute(
self.user_follow_repo.table.select()
.where(sasql.and_(
self.user_follow_repo.table.c.follower_id == follower_id,
self.user_follow_repo.table.c.following_id.in_(following_ids))))
d = {v['following_id']: True for v in await result.fetchall()}
else:
d = {}
return [d.get(v, False) for v in following_ids]
async def send_mobile_verify_code(self, type, mobile):
key = '{}_{}'.format(type, mobile)
code = self._mobile_verify_codes.get(key)
if code is None:
code = random_string(6, string.digits)
# ๆจกๆๅ้๏ผๅฎ้
ๅบ่ฐ็จ็ฌฌไธๆน API ๆฅๅ้้ช่ฏ็ ็ญไฟก
self._mobile_verify_codes[key] = code
return code
async def check_mobile_verify_code(self, type, mobile, code):
key = '{}_{}'.format(type, mobile)
sended = self._mobile_verify_codes.get(key)
if sended is None or sended != code:
return False
del self._mobile_verify_codes[key]
return True
async def send_email_verify_code(self, type, email):
key = '{}_{}'.format(type, email)
code = self._email_verify_codes.get(key)
if code is None:
code = random_string(6, string.digits)
self._email_verify_codes[key] = code
# TODO ่ฐ็จ็ฌฌไธๆน API ๅ้้ช่ฏ็ ้ฎไปถ
return code
async def check_email_verify_code(self, type, email, code):
key = '{}_{}'.format(type, email)
sended = self._email_verify_codes.get(key)
if sended is None or sended != code:
return False
del self._email_verify_codes[key]
return True
| jaggerwang/sanic-in-practice | weiguan/usecase/user.py | user.py | py | 5,796 | python | en | code | 42 | github-code | 36 |
28775606066 | #!/usr/bin/env python3.7
# Soubor: view.py
# Datum: 25.03.2019 13:11
# Autor: Marek Noลพka, nozka <@t> spseol <d.t> cz
# Licence: GNU/GPL
############################################################################
from . import app, socketio
from flask import (render_template,
# Markup,
# request,
flash,
# redirect,
# session
)
import threading
import serial
serial = serial.Serial('/dev/ttyUSB0')
############################################################################
def read_loop():
while True:
cislo = serial.readline()
print('@@@@@@@@@@@', cislo)
socketio.emit('input', {'data': cislo.decode('ascii')})
threading._start_new_thread(read_loop, ())
############################################################################
@app.route('/')
def index():
flash('ahoj')
return render_template('base.html')
@socketio.on('ahoj')
def ahoj(data=None):
print(data)
@socketio.on('connected')
def connected(data):
print('** Conected **')
| MarrekNozka/socketio-experiment | webface/routes.py | routes.py | py | 1,116 | python | de | code | 0 | github-code | 36 |
40806340636 | """
Problem 30: Digit fifth powers
https://projecteuler.net/problem=30
Surprisingly there are only three numbers that can be written
as the sum of fourth powers of their digits:
1634 = 1^4 + 6^4 + 3^4 + 4^4
8208 = 8^4 + 2^4 + 0^4 + 8^4
9474 = 9^4 + 4^4 + 7^4 + 4^4
As 1 = 1^4 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
"""
import pytest
@pytest.mark.parametrize('test_input_number,expected_result', [
(1634, True),
(8208, True),
(9474, True),
(1234, False),
(42, False),
(9876, False),
(3827, False),
(2, False),
])
def test_can_be_written_as_sum_of_nth_power(test_input_number, expected_result):
# arrange
from src.p030_digit_fifth_powers import can_be_written_as_sum_of_nth_power
# act
actual_result = can_be_written_as_sum_of_nth_power(test_input_number, 4)
# assert
assert actual_result == expected_result
def test_get_numbers_that_can_be_written_as_sum_of_nth_power():
# arrange
from src.p030_digit_fifth_powers import get_numbers_that_can_be_written_as_sum_of_nth_power
# act
actual_result_iter = get_numbers_that_can_be_written_as_sum_of_nth_power(4, int(1e5))
# assert
expected_result = [1634, 8208, 9474]
assert list(actual_result_iter) == expected_result
| FranzDiebold/project-euler-solutions | test/test_p030_digit_fifth_powers.py | test_p030_digit_fifth_powers.py | py | 1,404 | python | en | code | 1 | github-code | 36 |
70863160103 | import json
from datetime import datetime
from dataclasses import dataclass
from tabulate import tabulate
import requests
from exceptions import WrongCommandFormat
from tools import \
format_task, \
http_response_to_str, \
FORMATTED_TASK_COLUMNS
from config import URL, HELP_MSG
@dataclass
class HandlerReturn:
http_response: str
handler_response: str
def get_all_tasks(command):
if len(command) != 1:
raise WrongCommandFormat(reason='ะบะพะปะธัะตััะฒะพ ัะปะพะฒ ะฒ ะบะพะผะฐะฝะดะต ะฝะต ัะฐะฒะฝะพ ะตะดะธะฝะธัะต.')
url = URL + 'all/'
response = requests.get(url=url)
tasks_list = json.loads(response.json())
tasks_formatted = [format_task(task) for task in tasks_list]
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response='\n' + tabulate(tasks_formatted, headers=FORMATTED_TASK_COLUMNS)
)
def create_new_task(command):
if len(command) != 3:
raise WrongCommandFormat(reason='ะบะพะปะธัะตััะฒะพ ัะปะพะฒ ะฒ ะบะพะผะฐะฝะดะต ะฝะต ัะฐะฒะฝะพ ัััะผ.')
title = command[1]
text = command[2]
task_json = {
'title': title,
'text': text
}
url = URL + 'new/'
response = requests.post(url=url, json=task_json)
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=None
)
def get_task_by_id(command):
if len(command) != 2:
raise WrongCommandFormat(reason='ะบะพะปะธัะตััะฒะพ ัะปะพะฒ ะฒ ะบะพะผะฐะฝะดะต ะฝะต ัะฐะฒะฝะพ ะดะฒัะผ.')
data_json = {'pk': command[1]}
url = URL + 'get/'
response = requests.get(url=url, json=data_json)
try:
task_list = json.loads(response.json())
except json.decoder.JSONDecodeError:
task_list = []
tasks_formatted = [format_task(task) for task in task_list]
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=('\n' + tabulate(tasks_formatted, headers=FORMATTED_TASK_COLUMNS))
if len(tasks_formatted) != 0 else '-'
)
def complete_task_by_id(command):
if len(command) != 2:
raise WrongCommandFormat(reason='ะบะพะปะธัะตััะฒะพ ัะปะพะฒ ะฒ ะบะพะผะฐะฝะดะต ะฝะต ัะฐะฒะฝะพ ะดะฒัะผ.')
data_json = {
'pk': command[1],
'completion_date': datetime.now().strftime('%d-%m-%Y')
}
url = URL + 'complete/'
response = requests.post(url=url, json=data_json)
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=None
)
def delete_task_by_id(command):
if len(command) != 2:
raise WrongCommandFormat(reason='ะบะพะปะธัะตััะฒะพ ัะปะพะฒ ะฒ ะบะพะผะฐะฝะดะต ะฝะต ัะฐะฒะฝะพ ะดะฒัะผ.')
data_json = {'pk': command[1]}
url = URL + 'delete/'
response = requests.post(url=url, json=data_json)
return HandlerReturn(
http_response=http_response_to_str(response=response),
handler_response=None
)
def help_handler(command):
if len(command) != 1:
raise WrongCommandFormat(reason='ะบะพะปะธัะตััะฒะพ ัะปะพะฒ ะฒ ะบะพะผะฐะฝะดะต ะฝะต ัะฐะฒะฝะพ ะตะดะธะฝะธัะต.')
return HandlerReturn(
http_response=None,
handler_response=HELP_MSG
)
| yabifurkator/appvelox_task | client/handlers.py | handlers.py | py | 3,325 | python | en | code | 0 | github-code | 36 |
28757254981 | import psycopg2
import psycopg2.pool
from psycopg2.extras import execute_values
import pandas.io.sql as psql
class Dbconnection:
def __init__(self, schema, database, user, password, dbhost, dbport):
self._properties = dict(
database=database,
user=user,
password=password,
host=dbhost,
port=dbport,
options=f'-c search_path={schema}'
)
self._pool = psycopg2.pool.ThreadedConnectionPool(1,1,**self._properties)
# self._conn = psycopg2.connect(**self._properties)
#@property
def conn(self):
return self._pool.getconn()
def close(self):
if self._conn and not self._conn.closed:
self._conn.close()
def commit(self):
# Commit della connection a DB (altrimenti le modifiche effettuate non vengono applicate sul database)
self.conn.commit()
def rollback(self):
# Rollback to clean wrong DB modifications
self.conn.rollback()
def read(self, sql, idTable):
"""
:param sql: read sql to execute
:param idTable: the id to filter rows in the select table
:return: a dataframe of the selected rows, -1 otherwise
"""
connection = None
try:
connection = self.conn()
cursor = connection.cursor()
if idTable!=None:
cursor.execute(sql,[idTable])
else:
cursor.execute(sql)
return cursor.fetchall()
except Exception as e:
print(e)
return(-1)
finally:
if connection :
connection.close()
self._pool.putconn(connection)
def insert(self, sql, dframe,return_id = False):
"""
:param sql: insert query to execute
:param dframe: data_frame to insert in the database
Columns order and types must be coherent with the input SQL
:param return_id: Bool if you want the inserted ID back
:return: the inserted ID
"""
connection = None
id_out = -1
try:
connection = self.conn()
cursor = connection.cursor()
values_list = [tuple(x) for x in dframe.values]
# Execute multiple insert
execute_values(cursor, sql, values_list)
# If main table retrieve autoincrement ID
if return_id:
id_out = cursor.fetchone()[0]
connection.commit()
return id_out
except Exception as e:
print(e)
return(-1)
finally:
if connection:
connection.close()
self._pool.putconn(connection)
def update(self, sql, idTable):
"""
:param sql: update_sql query
:param idTable: id to select records to update
:return: None
"""
with self.conn.cursor() as c:
c.execute(sql, (idTable,))
def remove(self, delete_sql, idTable):
"""
:param delete_sql: delete sql to execute
:param idTable: the id of the rows to delete
"""
with self.conn.cursor() as c:
c.execute(delete_sql, (idTable,))
| csipiemonte/unlockpa-unlockbotrasa | code_actions/db/dbconnection.py | dbconnection.py | py | 3,262 | python | en | code | 0 | github-code | 36 |
15983286793 | import re
def calcularBinario(exp, n):
'''
Funciรณn recursiva. Recibe un nรบmero (1 o 0) y su รญndice en la cadena introducida
por el usuario. Con estos 2 valores se calcularรก la operaciรณn correspondiente a:
nรบmero (n) * 2 elevado al indice (exp)
Params: 111001
n (int) = valdrรก 0 o 1
exp (int) = tiene el valor del รญndice que ocupa n en la cadena introducida por el usuario
Return:
(int) = valor correspondiente a n*2**exp
'''
print(f"({n}*2^{exp})", end=" + ")
return n*2**exp
# Se comprueba que el input del usuario sรณlo contenga unos y ceros
patron = re.compile("[^01]+")
nBinario = input("Introduce un nรบmero binario (0's y 1's): ")
while patron.search(nBinario):
nBinario = input("Solo se pueden introducir unos y ceros: ")
# Se voltea el input
nBinario = nBinario[::-1]
decimal = 0
# Para cada elemento del input se llama a la funciรณn y se calcula el valor
print("\nFรณrmula para el cรกlculo\n------------------------")
for i in range(0, len(nBinario)):
decimal += calcularBinario(i, int(nBinario[i]))
print(f"\n\nEl nรบmero {nBinario[::-1]} es {decimal} en decimal")
| dvd23m/RetosMoureDev | Reto38_BinarioaDecimal/BinarioToDecimal.py | BinarioToDecimal.py | py | 1,198 | python | es | code | 2 | github-code | 36 |
6200752155 | from __future__ import print_function
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from utils import plot_variance
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network"""
def __init__(
self, dims, activation=nn.ReLU, relu_init=False, var_analysis=False, name=""
):
super(FCNet, self).__init__()
self.name = name
self.var_analysis = var_analysis
if var_analysis:
dims += [dims[-1]] * 4
layers = []
for i in range(len(dims) - 2):
in_dim = dims[i]
out_dim = dims[i + 1]
layers.append(
nn.Sequential(
nn.Linear(in_dim, out_dim)
if var_analysis
else weight_norm(nn.Linear(in_dim, out_dim), dim=None),
activation(),
)
)
layers.append(
nn.Sequential(
nn.Linear(dims[-2], dims[-1])
if var_analysis
else weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None),
activation(),
)
)
self.main = nn.ModuleList(layers)
if relu_init:
self.init_weights()
def init_weights(self):
for name, p in self.main.named_parameters():
if "weight" in name:
nn.init.kaiming_normal_(p.data, nonlinearity="relu")
return
def forward(self, x):
for idx, layer in enumerate(self.main):
x = layer(x)
if self.var_analysis and self.training:
plot_variance(x.cpu(), self.name + " layer " + str(idx))
return x
if __name__ == "__main__":
fc1 = FCNet([10, 20, 10])
print(fc1)
print("============")
fc2 = FCNet([10, 20])
print(fc2)
| cliziam/VQA_project_Demo | demo-vqa-webcam/fc.py | fc.py | py | 1,847 | python | en | code | 0 | github-code | 36 |
16539479262 | import json
from common.variables import *
def send_msg(socket, msg):
json_msg = json.dumps(msg)
coding_msg = json_msg.encode(ENCODING)
socket.send(coding_msg)
def get_msg(client):
json_response = client.recv(MAX_PACKAGE_LENGTH).decode(ENCODING)
response = json.loads(json_response)
if isinstance(response, dict):
return response
else:
raise ValueError
| MariaAfanaseva/app | HW_3_Afanaseva_Maria/common/utils.py | utils.py | py | 401 | python | en | code | 0 | github-code | 36 |
5234638589 | import tkinter.ttk as ttk
from tkinter import *
import time
root = Tk()
root.title("Hoon GUI")
root.geometry("640x480") # ๊ฐ๋ก * ์ธ๋ก + X์ขํ + Y์ขํ
# progressbar = ttk.Progressbar(root, maximum = 100, mode="indeterminate") # mode: indeterminate(์ธ์ ๋๋ ์ง ๋ชจ๋ฅด๋ ๊ฒฝ์ฐ)
# progressbar = ttk.Progressbar(root, maximum = 100, mode="determinate") # mode: determinate(์ธ์ ๋๋ ์ง ์๋ ๊ฒฝ์ฐ)
# progressbar.start(10) # 10 ms ๋ง๋ค ์์ง์, progressbar ์คํ
# progressbar.pack()
# def btncmd():
# progressbar.stop() # ์๋ ์ค์ง
#
# btn = Button(root, text="์ค์ง", command=btncmd)
# btn.pack()
p_var2 = DoubleVar()
progressbar2 = ttk.Progressbar(root, maximum=100, length=150, variable=p_var2)
progressbar2.pack()
def btncmd2():
for i in range(1, 101):
time.sleep(0.01) # 0.01์ด ๋๊ธฐ
p_var2.set(i) # progress bar ์ ๊ฐ ์ค์
progressbar2.update() # ui ์
๋ฐ์ดํธ
print(p_var2.get())
btn = Button(root, text="์์", command=btncmd2)
btn.pack()
root.mainloop() | OctoHoon/PythonStudy_GUI | gui_basic/9_progressbar.py | 9_progressbar.py | py | 1,077 | python | ko | code | 0 | github-code | 36 |
28320633011 | import os
import platform
import subprocess
import sys
def create_virtualenv():
'''Determine the appropriate virtual environment command based on the platform'''
if platform.system() == "Windows":
venv_cmd = "python -m venv .venv"
else:
venv_cmd = "python -m venv .venv"
# Run the virtual environment command
subprocess.run(venv_cmd, shell=True, check=True)
def activate_virtualenv():
'''Determine the appropriate activation command based on the platform'''
if platform.system() == "Windows":
activate_cmd = ".venv\\Scripts\\activate"
else:
activate_cmd = "source .venv/bin/activate"
# Run the activation command
subprocess.run(activate_cmd, shell=True, check=True)
def main():
# Check if the virtual environment directory exists
if not os.path.exists(".venv"):
print("Creating a virtual environment...")
create_virtualenv()
print("Activating the virtual environment...")
activate_virtualenv()
ssl()
def ssl():
print("Generating local Root CA")
# subprocess.run(["pip", "install", "trustme"])
subprocess.run(["python", "./scripts/ssl/setup_ssl.py"])
if __name__ == "__main__":
main() | ImredeAngelo/delta | scripts/init.py | init.py | py | 1,219 | python | en | code | 0 | github-code | 36 |
73881855145 | from nlp_flask_client import NLPClient
import pandas as pd
csv_filename = "messaging_data.csv"
data_df = pd.read_csv(csv_filename)
IP = "127.0.0.1" # Local
PORT = 5000 # Always running
multi_threaded=True
multi_messages=True
threads_no=20
rows_per_call = 13
client = NLPClient(IP, PORT)
# Testing with a single string
res = client.get_text_stats("Hey just testing this out", no_encryption=False)
print(res)
res = client.get_text_stats("Hey just testing this out", no_encryption=True)
print(res)
# this will send a file, get a new one and save it on the disk
# client.analyse_file(csv_filename, "msg_text")
# this will send the dataframe text rows one by one, get its stats back and return an update dataframe AND also write everything to a csv file
out = client.analyse_dataframe(client.slice_df(data_df, rows_amount=500), 'msg_text', True, True, threads_no, rows_per_call)
# out = client.analyse_dataframe(client.slice_df(data_df, rows_amount=500), 'msg_text', True, False, threads_no, rows_per_call)
# out = client.analyse_dataframe(client.slice_df(data_df, rows_amount=500), 'msg_text', False, True, threads_no, rows_per_call)
# out = client.analyse_dataframe(client.slice_df(data_df, rows_amount=500), 'msg_text', False, False, threads_no, rows_per_call)
# out.to_csv("pandas_stats_result.csv", index=False)
| Gabryxx7/nlp-flask-server | analyse_data.py | analyse_data.py | py | 1,322 | python | en | code | 2 | github-code | 36 |
2938524596 | import sys
input=sys.stdin.readline
class SegmentTree:
def __init__(self,arr):
self.n=len(arr)
self.tree=[0] * (4*self.n) #?
self.lazy=[0] * (4*self.n)
self.build(1,0,self.n-1,arr)
def build(self,node,left,right,arr):
if(left==right):
self.tree[node]=arr[left]
return
mid=(left+right)//2
self.build(2*node,left,mid,arr)
self.build(2*node+1,mid+1,right,arr)
self.tree[node]=self.tree[2*node]+self.tree[2*node+1]
def query(self,ql,qr,node,left,right):
if left > qr or right < ql:
return 0
if(ql<=left and right<=qr):
return self.tree[node]
mid=(left+right)//2
left_sum=self.query(ql,qr,2*node,left,mid)
right_sum=self.query(ql,qr,2*node+1,mid+1,right)
return left_sum+right_sum
def update(self,idx,value,node,left,right):
if left==right:
self.tree[node]=value
return
mid=(left+right)//2
if(left<=idx<=mid):
self.update(idx,value,node*2,left,mid)
else:
self.update(idx,value,node*2+1,mid+1,right)
self.tree[node]=self.tree[2*node]+self.tree[2*node+1]
if __name__=="__main__":
n,m=map(int,input().split())
arr=[0 for _ in range(n)]
seg_tree=SegmentTree(arr)
for _ in range(m):
cmd,a,b=map(int,input().split())
if(cmd):
seg_tree.update(a-1,b,1,0,len(arr)-1)
else:
if a>b:
a,b=b,a
print(seg_tree.query(a-1,b-1,1,0,len(arr)-1))
| DoSeungJae/Baekjoon | Python/2268.py | 2268.py | py | 1,649 | python | en | code | 1 | github-code | 36 |
18370027108 | from flask import Flask, request, render_template
import json
import pickle
import nltk
import string
import re
#from nltk.classify import NaiveBayesClassifier
app = Flask(__name__)
#preprocess the text
def preprocess(sentence):
nltk.download('stopwords')
nltk.download('punkt')
def build_bow_features(words):
return {word:True for word in words}
sentence = sentence.lower()
sentence = sentence.replace('\n','')
useless = nltk.corpus.stopwords.words("english") + list(string.punctuation)
wordlist = [word for word in nltk.word_tokenize(sentence) if word not in useless]
stemmed_words = [nltk.stem.SnowballStemmer('english').stem(word) for word in wordlist]
Bow = (build_bow_features(stemmed_words))
print(Bow)
return Bow
#load the trained model and do prediction
def predict (txt):
prediction = model.classify(txt)
return prediction
#return the prediction
def submit_txt(txt):
txt = preprocess(txt)
status = predict(txt)
if status==4 :
return 'Positive'
if status==0 :
return 'Negative'
return 'FAIL'
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
details = request.form
if details['form_type'] == 'submit_txt':
return submit_txt(details['txt'])
return render_template('interface.html')
if __name__ == '__main__':
model = pickle.load(open('SentimentAnalysisModel2.pkl', 'rb'))
app.run(host='0.0.0.0')
| AnasE17/SentimentAnalysis | app.py | app.py | py | 1,434 | python | en | code | 0 | github-code | 36 |
952895712 | pkgname = "libpeas"
pkgver = "1.36.0"
pkgrel = 2
build_style = "meson"
configure_args = ["-Ddemos=false", "-Dvapi=true"]
make_check_wrapper = ["weston-headless-run"]
hostmakedepends = [
"meson",
"pkgconf",
"glib-devel",
"gettext",
"vala",
"gobject-introspection",
"python",
]
makedepends = [
"glib-devel",
"python-devel",
"python-gobject-devel",
"gtk+3-devel",
]
depends = ["python-gobject"]
checkdepends = ["weston", "fonts-dejavu-ttf"]
pkgdesc = "G0bject application plugin library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-or-later"
url = "https://wiki.gnome.org/Projects/Libpeas"
source = f"$(GNOME_SITE)/{pkgname}/{pkgver[:-2]}/{pkgname}-{pkgver}.tar.xz"
sha256 = "297cb9c2cccd8e8617623d1a3e8415b4530b8e5a893e3527bbfd1edd13237b4c"
# gtk3 can't handle seatless wayland displays
options = ["!cross", "!check"]
@subpackage("libpeas-devel")
def _devel(self):
return self.default_devel()
| chimera-linux/cports | main/libpeas/template.py | template.py | py | 960 | python | en | code | 119 | github-code | 36 |
39006060669 | """
๋ฌธ์ ์ ํ: ์ด๋ถ ํ์
๋ฌธ์ : https://www.acmicpc.net/problem/2417
ํ์ด
์ด๋ถ ํ์์ผ๋ก ์ ๊ณฑ๊ทผ ๊ตฌํ๋ ๋ฌธ์ ๋ก ์ด๋ถ ํฌ์์ ๋ํด ์ดํด๊ฐ ์์ด์ผ ํ์ ์์
1.์ด๋ถํ์ start 0, end ์
๋ ฅ๋ฐ์ ์ ์ค์
2.๋ฐ๋ณต ์กฐ๊ฑด์ผ๋ก ์์์์ ์ด ์ข
๋ฃ์์ ๋ณด๋ค ์๊ฑฐ๋ ๊ฐ์๋๊น์ง ์ํ(์ด์กฐ๊ฑด ์ค์!)
3.์ค๊ฐ๊ฐ์ ์ ๊ณฑ์ด ์์ผ๋ฉด start์ ์ค๊ฐ๊ฐ + 1 ์ฒ๋ฆฌ ์๋๊ฒฝ์ฐ end์ ์ค๊ฐ๊ฐ -1
4.๋ง์ง๋ง start๊ฐ end๋ฅผ ๋์ด๊ฐ๋์ ๊ฐ์ด ๊ฒฐ๊ณผ๊ฐ
keypoint: ๊ฐ์ฅ ์์ ์ ์๋ฅผ ๊ตฌํ๋ ๊ฒ์ผ๋ก ์ ๊ณฑ๊ทผ์ ์์ ๊ธฐ์ค์ด start๊ฐ end ์๋ฅผ ์ด๊ณผํ๋ ์์ ์ด ํคํฌ์ธํธ
"""
n = int(input())
start, end = 0, n
while start <= end:
mid = (start + end) // 2
if mid ** 2 < n:
start = mid + 1
else:
end = mid - 1
print(start) | daeyoungshinme/algorithm | ๋ฐฑ์ค/์ด์งํ์/boj2417.py | boj2417.py | py | 852 | python | ko | code | 0 | github-code | 36 |
26852296512 | from draw_rectangle import print_bbox
def get_tokens(txt:str):
if txt == "nan" or len(txt.strip()) == 0:
return []
else:
return txt.split()
import Levenshtein
def calculate_distance(data,findtxt):
if type(data) == str and type(findtxt) == str and len(findtxt) > 0:
return Levenshtein.distance(data, findtxt)
else:
return 1000000
import re
def get_path(url):
expression = r'gs:\/\/(?P<bucket>.*)\/(?P<file>.*)'
m = re.match(expression, url).groupdict()
return m#, m["bucket"], m["file"]
test = "gs://gcs-public-data--labeled-patents/us_084.pdf"
get_path(test)
def label_file(sample, features,ocrdf):
labels = [get_tokens(str(sample[feat])) for feat in features]
lens = [len(f) for f in labels]
new_features = [ f for f,l in zip(features,lens) if l > 0]
new_lens = [ l for l in lens if l > 0]
tokens_to_search = [token for f in labels for token in f]
data = [(ocrdf.apply(lambda row: calculate_distance(row["text"],token), axis=1)).to_numpy() for token in tokens_to_search]
return np.array(data), tokens_to_search, new_lens,new_features
def getx(features,tokens,lens,best_variation):
all_best_tokens_index = []
all_best_tokens_value = []
all_best_tokens_target_token = []
pos = 0
for i,l in enumerate(lens):
best_tokens_index = best_variation[i][0,:,3]
best_tokens_value = [features[i] for _ in range(l)]
best_tokens_target_token = tokens[pos:pos+l]
all_best_tokens_index.extend(best_tokens_index)
all_best_tokens_value.extend(best_tokens_value)
all_best_tokens_target_token.extend(best_tokens_target_token)
pos = pos + l
return all_best_tokens_index, all_best_tokens_value, all_best_tokens_target_token
pass
labels_file = "data/patents_dataset.xlsx"
import numpy as np
import pandas as pd
#import xlrd
from tqdm import tqdm
import itertools
import os.path
df = pd.read_excel(labels_file, sheet_name=0)
# for each file
for i in tqdm(range(df.shape[0])):
try:
sample = df.iloc[i]
file_name = get_path(sample[0])["file"]
annotation_path = "annotation/" + file_name + ".csv"
if os.path.exists(annotation_path):
continue
features = sample.keys()[3:]
ocrdf = pd.read_csv("data/" + file_name + ".csv")
data, tokens, lens, features = label_file(sample, features, ocrdf)
print(tokens)
ocrdf["x"] = (ocrdf.loc[:, "left"] + ocrdf.loc[:, "width"] / 2) / ocrdf.loc[0, "width"]
ocrdf["y"] = (ocrdf.loc[:, "top"] + ocrdf.loc[:, "height"] / 2) / ocrdf.loc[0, "height"]
# consider that words in the same line are closer
# than in different lines
ocrdf["x"] = ocrdf["x"]/4
positions = ocrdf.loc[:, ["x", "y"]].to_numpy()
myData = data.T
top_n = 4
top_lev = np.argsort(myData, axis=0)[:top_n]
top_lev_values = np.sort(myData, axis=0)[:top_n]
top_postions = ocrdf.loc[top_lev.flatten(), ["x", "y"]]
top_postions["lev"] = top_lev_values.flatten()
top_postions["pos"] = top_lev.flatten()
tokens_matrix = top_postions.to_numpy().reshape(top_lev.shape[0], top_lev.shape[1], 4)
labels_best_results = []
labels_best_results_indexes = []
labels_best_scores = []
pos = 0
# for l as length of one of the labels
# para cada label
for l in lens:
cluster_matrix = tokens_matrix[:, pos:pos + l, :]
# (topn candidates, n_tokens current label, {x y lev pos})
tokens_vars = np.transpose(cluster_matrix, axes=(1, 0, 2))
# ( n_tokens current label,topn candidates, {x y lev pos})
postions_scores = []
variations = []
for variation in itertools.product(*tokens_vars):
# para cada combinacao de candidatos ร label
npvariation = np.array(variation)
deviations = np.std(npvariation[:, :2], axis=0)
deviation = np.sqrt(np.sum(np.power(deviations, 2))) # distancia media do centro
levenstein = np.sum(npvariation[:, 2:3], axis=0) # distancia de levenstein mรฉdia
#score da combinacao
score = np.exp(levenstein) * (deviation + 1)
postions_scores.append(score)
variations.append(npvariation)
postions_scores = np.array(postions_scores)
variations = np.array(variations)
best_variations_indexes = np.argsort(postions_scores, axis=0)[:3]
best_variations_indexes_scores = postions_scores[best_variations_indexes]
labels_best_scores.append(best_variations_indexes_scores)
labels_best_results_indexes.append(best_variations_indexes)
labels_best_results.append(variations[best_variations_indexes])
pos += l
labels_best_results_indexes = np.array(labels_best_results_indexes)
viable_variations = []
viable_scores = []
lists = [list(range(labels_best_results[0].shape[0])) for _ in range(len(labels_best_results))]
combinations = [x for x in itertools.product(*lists)]
print("len(combinations)", len(combinations))
for i, variation_indexes in enumerate(combinations):
all_labels_variation = [labels_best_results[j][k] for j, k in enumerate(variation_indexes)]
all_labels_scores = [labels_best_scores[j][k] for j, k in enumerate(variation_indexes)]
variation_score = np.sum(all_labels_scores)
#join together all position for all labels of a combination
variation_tokens = []
for label_candidate in all_labels_variation:
variation_tokens.extend(label_candidate[0, :, 3])
#if no repeated tokens in more than one label
#it is a valid option
if np.max(np.unique(variation_tokens, return_counts=True)[1]) == 1:
viable_variations.append(all_labels_variation)
viable_scores.append(variation_score)
print("number of evaluated variations",len(viable_variations))
best_vatiation_index = np.argmin(viable_scores)
print("best variation index", best_vatiation_index)
best_variation = viable_variations[best_vatiation_index]
print(best_variation)
all_best_tokens_index, all_best_tokens_value, all_best_tokens_target_token = getx(features,tokens,lens,best_variation)
ocrdf.at[all_best_tokens_index,"label"] = all_best_tokens_value
ocrdf.at[all_best_tokens_index,"target"] = all_best_tokens_target_token
ocrdf["right"] = ocrdf["left"] + ocrdf["width"]
ocrdf["bottom"] = ocrdf["top"] + ocrdf["height"]
tops = ocrdf.groupby(by=["label"], dropna=True)["top"].min()
bottoms = ocrdf.groupby(by=["label"], dropna=True)["bottom"].max()
lefts = ocrdf.groupby(by=["label"], dropna=True)["left"].min()
rights = ocrdf.groupby(by=["label"], dropna=True)["right"].max()
dfx = pd.merge(lefts, rights, right_index=True, left_index=True)
dfx = pd.merge(dfx, tops, right_index=True, left_index=True)
dfx = pd.merge(dfx, bottoms, right_index=True, left_index=True)
print_bbox("pdf/" + file_name, dfx, "img/" + file_name + ".png")
ocrdf.to_csv(annotation_path)
# break # para no primeiro ficheiro
except:
print("error on",file_name) | helderarr/patents_dataset | main.py | main.py | py | 6,975 | python | en | code | 0 | github-code | 36 |
43168841119 | import settings
import os
import click
import inspect
import sys
from configure import db as dbs
from apps import app
from CustomerException import ParameterError
from apps.API.models import (
Model,
Device,
DeviceService,
DeviceServiceData
)
from asyncpg import create_pool
from utils.table_util import CreateTable
###########################################
########ๅฏนไบ้orm้กน็ฎๅฏไปฅๅๆถๆณจ้ไธ############
########้ขไธค่ก็จไบๅ
จๅฑๅผ็จๆฐๆฎๅบ่ฟ ############
########ๆฅๆฑ ############
###########################################
# @app.listener('before_server_start')
# async def register_db(app, loop):
# conn = "postgres://{user}:{password}@{host}:{port}/{database}".format(
# user=settings.CONFIG.DB_USER, password=settings.CONFIG.DB_PASSWORD,
# host=settings.CONFIG.DB_HOST, port=settings.CONFIG.DB_PORT,
# database=settings.CONFIG.DB_DATABASE
# )
# app.settings['pool'] = await create_pool(
# dsn=conn,
# min_size=10,
# max_size=10,
# max_queries=50000,
# max_inactive_connection_lifetime=300,
# loop=loop
# )
# @app.listener('after_server_stop')
# async def close_connection(app, loop):
# pool = app.settings['pool']
# async with pool.acquire() as conn:
# await conn.close()
@app.listener('before_server_start')
async def register_db(app, loop):
pass
@click.group()
def run():
pass
@click.command()
@click.argument('db')
def init(db):
# ๅฆไฝๅๆmodelsไธๆๆ่ชๅปบ็Model๏ผ็ถๅ่ชๅจๅฏนๅ
ถ่ฟ่กๅปบ่กจๆไฝ๏ผ
# ็ฎๅๅฏไปฅ่ทๅพmodelsไธๆๆ็class๏ผๅ
ๆฌimport็
try:
if db == 'db':
__import__('apps.API.models')
modules = sys.modules['apps.API.models']
for name, obj in inspect.getmembers(modules, inspect.isclass):
if 'apps.API.models' in str(obj):
sys.stdout.write('.')
sys.stdout.flush()
CreateTable(obj)
sys.stdout.write('OK')
sys.stdout.flush()
else:
raise ParameterError("Parameter Error, Please use 'db'!")
except ParameterError as e:
print(e)
e = None
@click.command()
def shell():
os.system('ipython -i -m "apps.models"')
@click.command()
def runserver():
app.run(host="0.0.0.0", port=8001, workers=4)
run.add_command(init)
run.add_command(shell)
run.add_command(runserver)
if __name__ == "__main__":
# app.settings.ACCESS_LOG = False
run()
| DemonXD/template_sanic_project | manager.py | manager.py | py | 2,643 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.