seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
23667141606 |
import math
class Node:
def __init__(self, value=None, next=None,skip_next=None,term_frequency=0,tf_idf=0.0):
""" Class to define the structure of each node in a linked list (postings list).
Value: document id, Next: Pointer to the next node
Add more parameters if needed.
Hint: You may want to define skip pointers & appropriate score calculation here"""
self.value = value
self.next = next
self.skip_next = skip_next
self.term_frequency=term_frequency
self.tf_idf=tf_idf
class LinkedList:
""" Class to define a linked list (postings list). Each element in the linked list is of the type 'Node'
Each term in the inverted index has an associated linked list object.
Feel free to add additional functions to this class."""
def __init__(self):
self.start_node = None
self.end_node = None
self.length, self.n_skips, self.idf = 0, 0, 0.0
self.skip_length = None
def traverse_list(self):
traversal = []
if self.start_node is None:
print("List has no element")
return
else:
n = self.start_node
# Start traversal from head, and go on till you reach None
while n is not None:
traversal.append(n.value)
n = n.next
return traversal
def traverse_list_sort(self):
traversal = []
if self.start_node is None:
print("List has no element")
return
else:
n = self.start_node
# Start traversal from head, and go on till you reach None
while n is not None:
traversal.append((n.tf_idf,n.value))
n = n.next
sorted_traveral_list=sorted(traversal, key=lambda t: (t[0], -t[1]), reverse=True)
output_array=[]
for i in range(len(sorted_traveral_list)):
output_array.append(sorted_traveral_list[i][1])
return output_array
def traverse_skips(self):
traversal = []
if self.start_node is None:
return
else:
""" Write logic to traverse the linked list using skip pointers.
To be implemented."""
n = self.start_node
while n is not None:
traversal.append(n.value)
n=n.skip_next
# while n is not None:
# traversal.append(n.value)
# n=n.skip_next
return traversal
def add_skip_connections(self):
""" Write logic to add skip pointers to the linked list.
This function does not return anything.
To be implemented."""
n_skips = math.floor(math.sqrt(self.length))
if n_skips * n_skips == self.length:
n_skips = n_skips - 1
self.skip_length=round(math.sqrt(self.length))
if self.start_node is None:
print("List has no element")
return
else:
n = self.start_node
# Start traversal from head, and go on till you reach None
i=0
x=0
temp = n
while n is not None and i <n_skips:
if (x==self.skip_length):
temp.skip_next=n
temp = n
x=0
i=i+1
n = n.next
x+=1
def insert_at_end(self, value):
""" Write logic to add new elements to the linked list.
Insert the element at an appropriate position, such that elements to the left are lower than the inserted
element, and elements to the right are greater than the inserted element.
To be implemented. """
new_node = Node(value=value)
n = self.start_node
new_node.term_frequency+=1
if self.start_node is None:
self.start_node = new_node
self.end_node = new_node
return
elif self.start_node.value >= value:
self.start_node = new_node
self.start_node.next = n
return
elif self.end_node.value <= value:
self.end_node.next = new_node
self.end_node = new_node
return
else:
while n.value < value < self.end_node.value and n.next is not None:
n = n.next
m = self.start_node
while m.next != n and m.next is not None:
m = m.next
m.next = new_node
new_node.next = n
return
def insert_at_end_tf_idf(self, value,tf_idf_ip):
""" Write logic to add new elements to the linked list.
Insert the element at an appropriate position, such that elements to the left are lower than the inserted
element, and elements to the right are greater than the inserted element.
To be implemented. """
new_node = Node(value=value,tf_idf=tf_idf_ip)
n = self.start_node
new_node.term_frequency+=1
if self.start_node is None:
self.start_node = new_node
self.end_node = new_node
return
elif self.start_node.value >= value:
self.start_node = new_node
self.start_node.next = n
return
elif self.end_node.value <= value:
self.end_node.next = new_node
self.end_node = new_node
return
else:
while n.value < value < self.end_node.value and n.next is not None:
n = n.next
m = self.start_node
while m.next != n and m.next is not None:
m = m.next
m.next = new_node
new_node.next = n
return
| lbodapat/Search_Engine_Indexing_Query_Retrieval | linkedlist.py | linkedlist.py | py | 5,845 | python | en | code | 0 | github-code | 90 |
33597117897 | from Labs.Lab06_MVC.lab06_AnsMvc.model.degree_minutes_seconds import degree_minutes_seconds
def format_location(location):
"""
Функция возвращает строку с информацией о локации
в виде широты и долготы
:param location: (iterable): географические координаты
местоположения. Первый элемент итерации - широта,
второй - долгота.
:return:
Строка для отображения информации о локации (широта и долгота точки)
"""
ns = ""
if location[0] < 0:
ns = 'S'
elif location[0] > 0:
ns = 'N'
ew = ""
if location[1] < 0:
ew = 'W'
elif location[0] > 0:
ew = 'E'
format_string = '{:03d}\xb0{:0d}\'{:.2f}"'
latdegree, latmin, latsecs = degree_minutes_seconds(abs(location[0]))
latitude = format_string.format(latdegree, latmin, latsecs)
longdegree, longmin, longsecs = degree_minutes_seconds(abs(location[1]))
longitude = format_string.format(longdegree, longmin, longsecs)
return '(' + latitude + ns + ',' + longitude + ew + ')'
| ASPRTK/ITMO | Course.Python/Labs/Lab06_MVC/lab06_AnsMvc/model/format_location.py | format_location.py | py | 1,227 | python | ru | code | 0 | github-code | 90 |
41293396279 | N, M = list(map(int, input().split()))
line_w = []
line_b = []
graph = []
graph_w = []
graph_b = []
for i in range(8):
if i % 2 == 0:
line_w.append('W')
line_b.append('B')
else:
line_b.append('W')
line_w.append('B')
for i in range(8):
if i % 2 == 0:
graph_w.append(line_w)
graph_b.append(line_b)
else:
graph_w.append(line_b)
graph_b.append(line_w)
for i in range(N):
graph.append(list(input()))
cache_x = N - 7
cache_y = M - 7
result = 64
for x in range(cache_x):
for y in range(cache_y):
count_w = 0
count_b = 0
for i in range(8):
for j in range(8):
if graph[i + x][j + y] != graph_w[i][j]:
count_w += 1
if graph[i + x][j + y] != graph_b[i][j]:
count_b += 1
if result > min(count_w, count_b):
result = min(count_w, count_b)
print(result)
# import sys
# x, y = map(int, sys.stdin.readline().split())
# square = []
# count_square_b = [0 for i in range(x)]
# count_square_w = [0 for i in range(x * y)]
# Min = 64
# for i in range(x):
# value = sys.stdin.readline()
# square.append(value)
# for k in range(x):
# for j in range(y):
# if (k + j) % 2 == 0:
# if square[k][j] != 'B': #제일 왼쪽, 위가 블랙
# count_square_b[k][j] = 1
# else: #제일 왼쪽, 위가 화이트
# count_square_w[k][j] = 1
# else:
# if square[k][j] != 'W': #제일 왼쪽, 위가 블랙
# count_square_b[k][j] = 1
# else: #제일 왼쪽, 위가 화이트
# count_square_w[k][j] = 1
# for i in range(x - 8):
# sum_b = 0
# sum_w = 0
# for l in range(i + 8):
# for m in range(y - 8):
# for n in range(m + 8):
# sum_b += count_square_b[l][n]
# sum_w += count_square_w[l][n]
# Min = min(Min,sum_b,sum_w)
# print(Min) | du2lee/BOJ | BOJ/python/1018.py | 1018.py | py | 2,101 | python | en | code | 3 | github-code | 90 |
8209172362 | import math
import numpy as np
import pygame.draw
from sim.settings import BLUE
from sim.noodle import Noodle
from sim.food import Food
from sim.helpers import circularize, fill_pie
# Pred is a child of Creature class, with additional health and smart predator attributes
class Pred(Noodle):
# initialize pred as smart creature to chase closest prey, only if health below threshold
def __init__(self, x, y, size, speed, sight, view, rep, color=BLUE):
super().__init__(x, y, size, speed, sight, view, rep, color)
self.health = self.health*2
self.max_health = self.max_health*2
self.smart = True
# draw triangular pred
def draw(self, window):
if np.all((self.vel != 0)):
theta = math.degrees(math.atan2(self.vel[1], self.vel[0]))
else:
theta = 0
theta = circularize(theta)
points = fill_pie((int(self.pos[0]), int(self.pos[1])), self.size*4, theta-160, theta+160, 1)
pygame.draw.polygon(window, self.color, points)
# kill pred if health depleted and replace with food
def kill(self, preds, foods):
if self.health < 0:
foods.append(Food(int(self.pos[0]), int(self.pos[1]), self.size*6))
preds.remove(self) | ggdurrant/EvoNoodles | sim/pred.py | pred.py | py | 1,262 | python | en | code | 0 | github-code | 90 |
73251343977 | import sys
import requests
from jnpr.junos.device import Device
####################################
# UDFs #
####################################
# Get the hostname of the device
def get_hostname(**kwargs):
device_info = get_device_info_healthbot(**kwargs)
return device_info['facts']['hostname']
# Get the model of the device
def get_model(**kwargs):
device_info = get_device_info_healthbot(**kwargs)
return device_info['facts']['platform']
# Get the version of device
def get_version(**kwargs):
device_info = get_device_info_healthbot(**kwargs)
return device_info['facts']['release']
# Get the version of RE0 of the device if present
def get_version_RE0(**kwargs):
device_details = get_device_info(**kwargs)
with connect_to_device(**device_details) as dev:
return dev.facts['version_RE0']
# Get the version of RE1 of the device if present
def get_version_RE1(**kwargs):
device_details = get_device_info(**kwargs)
with connect_to_device(**device_details) as dev:
return dev.facts['version_RE1']
# Get the version of RE0 of the device if present
def get_re_master(**kwargs):
device_details = get_device_info(**kwargs)
with connect_to_device(**device_details) as dev:
return dev.facts['re_master']['default']
# Get the serrial number of the device
def get_serial_no(**kwargs):
device_info = get_device_info_healthbot(**kwargs)
return device_info['facts']['serial-number']
# Get the configuration of the device in string
def get_config(**kwargs):
device_details = get_device_info(**kwargs)
with connect_to_device(**device_details) as dev:
return dev.rpc.get_config(options={'format':'json'})
# subtract function
def difference(num1,num2, **kwargs):
try:
return (int(num1)-int(num2))
except Exception:
print("Hit Exception, invalid arg type")
# Calculate the percentage
def decimal_to_percent(numerator,denominator, **kwargs):
if denominator == 0:
round_percent = 0
else:
percent = (numerator/denominator)*100
round_percent = round(percent,3)
return round_percent
# Change the percentage to decimal
def percent_to_decimal(percentage, **kwargs):
return percentage/100
# convert bytes to kilobytes
def bytes_to_kb(bytes, **kwargs):
return bytes/(10**3)
# convert bytes to megabytes
def bytes_to_mb(bytes, **kwargs):
bytes = int(bytes)
return bytes/(10**6)
# convert bytes to gigabytes
def bytes_to_gb(bytes, **kwargs):
return bytes/(10**9)
# convert bytes to gigabytes
def mb_to_bytes(mb, **kwargs):
return mb*(10**6)
# convert megabytes to gigabytes
def mb_to_gb(mb, **kwargs):
return mb/(10**3)
# convert gigabytes to bytes
def gb_to_bytes(gb, **kwargs):
return gb*(10**9)
# convert bytes to megabytes
def gb_to_mb(gb, **kwargs):
return gb*(10**6)
# Bytes per second conversion
def octets_to_bytes_per_second(intf_name, octets, ifl_id = None, **kwargs):
intf_name_ifl_id = ""
if ifl_id is None:
intf_name_ifl_id = intf_name
else:
intf_name_ifl_id = intf_name + ifl_id
# Get previous values
if 'hb_store' not in kwargs:
kwargs['hb_store'] = {
'prev_value': dict(),
'prev_time': dict(),
'prev_bps': dict()
}
else:
if 'prev_value' not in kwargs['hb_store']:
kwargs['hb_store']['prev_value'] = dict()
if 'prev_time' not in kwargs['hb_store']:
kwargs['hb_store']['prev_time'] = dict()
if 'prev_bps' not in kwargs['hb_store']:
kwargs['hb_store']['prev_bps'] = dict()
prev_value = kwargs['hb_store']['prev_value']
prev_time = kwargs['hb_store']['prev_time']
prev_bps = kwargs['hb_store']['prev_bps']
# get present time
cur_time = kwargs.get('point_time', 0)
octets = int(octets)
# convert octets to bytes
cur_value = octets
# Calculate time difference between previous and present point
time_difference = (cur_time - prev_time.get(intf_name_ifl_id, 0))
# Calculate data sent in bps
try:
bps = (cur_value - prev_value.get(intf_name_ifl_id, 0)) / time_difference
except Exception:
print("Hit Exception", file=sys.stderr)
bps = prev_bps.get(intf_name_ifl_id, 0)
# update global values
prev_value[intf_name_ifl_id] = cur_value
prev_time[intf_name_ifl_id] = cur_time
prev_bps[intf_name_ifl_id] = bps
return bps
# bps functrion is renamed as octets_to_bytes_per_second
# The following mapping is used for backward compatibility
# This line will be removed after 3 releases
bps = octets_to_bytes_per_second
# megabytes per second conversion
def mbps(intf_name, octets, ifl_id = None, **kwargs):
bbps = bps(intf_name, octets, ifl_id, **kwargs)
mbps = bbps/1000000
return mbps
# kilobytes per second conversion
def kbps(intf_name, octets, ifl_id = None, **kwargs):
bbps = bps(intf_name, octets, ifl_id, **kwargs)
kbps = bbps/1000
return kbps
# gigabytes per second conversion
def gbps(intf_name, octets, ifl_id = None, **kwargs):
bbps = bps(intf_name, octets, ifl_id, **kwargs)
gbps = (bbps/1000000000)
return gbps
# Bytes transfered in an interval
def bytes(intf_name, octets, ifl_id = None, **kwargs):
intf_name_ifl_id = ""
if ifl_id is None:
intf_name_ifl_id = intf_name
else:
intf_name_ifl_id = intf_name + ifl_id
# Get previous values
if 'hb_store' not in kwargs:
kwargs['hb_store'] = {
'prev_value': dict()
}
else:
if 'prev_value' not in kwargs['hb_store']:
kwargs['hb_store']['prev_value'] = dict()
prev_value = kwargs['hb_store']['prev_value']
octets = int(octets)
# convert octets to bytes
cur_value = octets
try:
bytes_send = (cur_value - prev_value.get(intf_name_ifl_id, 0))
except Exception:
print("Hit Exception", file=sys.stderr)
bytes_send = prev_bps.get(intf_name_ifl_id, 0)
# update global values
prev_value[intf_name_ifl_id] = cur_value
return bytes_send
# kilobytes transfered in an interval
def kilo_bytes(intf_name, octets, ifl_id = None, **kwargs):
bytes_send = bytes(intf_name, octets, ifl_id, **kwargs)
kilobytes_send = bytes_send/1000
return kilobytes_send
# megabytes transfered in an interval
def mega_bytes(intf_name, octets, ifl_id = None, **kwargs):
bytes_send = bytes(intf_name, octets, ifl_id, **kwargs)
megabytes_send = bytes_send/1000000
return megabytes_send
# gigabytes transfered in an interval
def giga_bytes(intf_name, octets, ifl_id = None, **kwargs):
bytes_send = bytes(intf_name, octets, ifl_id, **kwargs)
gigabytes_send = bytes_send/1000000000
return gigabytes_send
# generic function to find the difference between current and previous values
# usage: key_name is mandatory to store the previous value
# sub_key_name is optional can be used in case of multiple keys
def value_diff(key_name, value, sub_key_name = None, **kwargs):
key_sub_key_name = ""
if sub_key_name is None:
key_sub_key_name = key_name
else:
key_sub_key_name = key_name + "." + sub_key_name
if 'hb_store' not in kwargs:
kwargs['hb_store'] = {
'prev_value': dict()
}
else:
if 'prev_value' not in kwargs['hb_store']:
kwargs['hb_store']['prev_value'] = dict()
prev_value = kwargs['hb_store']['prev_value']
curr_value = int(value)
val_diff = curr_value - prev_value.get(key_sub_key_name, 0)
# update global values
prev_value[key_sub_key_name] = curr_value
return val_diff
####################################
# UDAs #
####################################
# Restart the Fpc of device
# input FPC slot Number
def restart_fpc(fpc_slot, **kwargs):
device_details = get_device_info(**kwargs)
dev = connect_to_device(**device_details)
response = dev.rpc.request_chassis_fpc(restart = True, slot = fpc_slot)
dev.close()
return response
# Bring the Fpc online of device
# input FPC slot Number
def online_fpc(fpc_slot, **kwargs):
device_details = get_device_info(**kwargs)
dev=connect_to_device(**device_details)
response = dev.rpc.request_chassis_fpc(online = True, slot = fpc_slot)
dev.close()
return response
# Bring the Fpc offline of device
# input FPC slot Number
def offline_fpc(fpc_slot, **kwargs):
device_details = get_device_info(**kwargs)
dev = connect_to_device(**device_details)
response = dev.rpc.request_chassis_fpc(offline = True, slot = fpc_slot)
dev.close()
return response
# Bring the pic online of specific fpc of device
# input FPC slot Number and pic slot number
def online_pic(fpc_slot, pic_slot, **kwargs):
device_details = get_device_info(**kwargs)
dev = connect_to_device(**device_details)
response = dev.rpc.request_chassis_pic(online = True,fpc_slot = fpc_slot, pic_slot = pic_slot)
dev.close()
return response
# Bring the pic offline of specific fpc of device
# input FPC slot Number and pic slot number
def offline_pic(fpc_slot, pic_slot, **kwargs):
device_details = get_device_info(**kwargs)
dev = connect_to_device(**device_details)
response = dev.rpc.request_chassis_pic(offline = True, fpc_slot = fpc_slot, pic_slot = pic_slot)
dev.close()
return response
# Restart The device
def reboot_system(**kwargs):
device_details = get_device_info(**kwargs)
dev = connect_to_device(**device_details)
response = dev.rpc.request_reboot()
dev.close()
return response
# Restart both the RE's
def reboot_both_routing_engines(**kwargs):
device_details = get_device_info(**kwargs)
dev = connect_to_device(**device_details)
response = dev.rpc.request_reboot(both_routing_engines = True)
dev.close()
return response
# Restart the other RE's
def reboot_other_routing_engine(**kwargs):
device_details = get_device_info(**kwargs)
dev = connect_to_device(**device_details)
response = dev.rpc.request_reboot(other_routing_engine = True)
dev.close()
return response
# Helper Functions
def get_device_info(**kwargs):
response = requests.get('http://config-server:9000/api/v2/config/device/%s/' % kwargs['device_id'], verify=False)
if response.status_code != 200:
return False
device_info = response.json()
device_details = dict()
device_details['hostname'] = device_info['host']
device_details['user'] = device_info['authentication']['password']['username']
device_details['password'] = device_info['authentication']['password']['password']
return device_details
def get_device_info_healthbot(**kwargs):
response = requests.get('http://config-server:9000/api/v2/config/device/%s/facts/' % kwargs['device_id'], verify=False)
if response.status_code != 200:
response = requests.get('http://config-server:9000/api/v2/config/device/%s/facts/?update=true' % kwargs['device_id'], verify=False)
device_info = response.json()
if len(device_info['facts']) == 0:
response = requests.get('http://config-server:9000/api/v2/config/device/%s/facts/?update=true' % kwargs['device_id'], verify=False)
device_info=response.json()
return device_info
def connect_to_device(hostname=None, user = None, password = None):
dev = Device(hostname, user=user, password=password, normalize=True)
dev.open(timeout=300)
return dev
| Juniper/healthbot-rules | juniper_official/System/generic_functions.py | generic_functions.py | py | 11,697 | python | en | code | 41 | github-code | 90 |
18455629839 | from collections import defaultdict
import heapq
N,K=map(int,input().split())
hq=[]
tset_all=set()
for i in range(N):
t,d=map(int,input().split())
heapq.heappush(hq,(-d,t))
tset_all.add(t)
#print(heapq)
hq_K=[]
dsum=0
tdic=defaultdict(int)
for i in range(K):
md,t=heapq.heappop(hq)
heapq.heappush(hq_K,((-md,t)))
dsum-=md
tdic[t]+=1
t0=len(tdic)
max_answer=dsum+(t0**2)
#print(t0,max_answer)
for i in range(t0+1,min(K,len(tset_all))+1):
loop_flg=True
while(loop_flg):
d,t=heapq.heappop(hq_K)
if tdic[t]>1:
tdic[t]-=1
dsum-=d
while(True):
md2,t2=heapq.heappop(hq)
if tdic[t2]==0:
tdic[t2]=1
dsum-=md2
loop_flg=False
break
answer_i=dsum+(i**2)
#print(i,answer_i)
max_answer=max(max_answer,answer_i)
print(max_answer) | Aasthaengg/IBMdataset | Python_codes/p03148/s000229899.py | s000229899.py | py | 847 | python | en | code | 0 | github-code | 90 |
18372522289 | N = int(input())
A = list(map(int,input().split()))
S = sum(A)
damu_sum = sum(A[1::2])
ans = S - 2 * (damu_sum)
ans_list = [ans]
for i in range(N-1):
ans = 2 * A[i] - ans
ans_list.append(ans)
print(*ans_list) | Aasthaengg/IBMdataset | Python_codes/p02984/s016009366.py | s016009366.py | py | 219 | python | en | code | 0 | github-code | 90 |
71789653418 | #!/usr/bin/env python3
import itertools
import curses
from enum import Enum
from collections import defaultdict
class Tiles(Enum):
EMPTY = 0
WALL = 1
BLOCK = 2
PADDLE = 3
BALL = 4
def getTexture(self):
textures = {
0: "",
1: "|",
2: "█",
3: "▁",
4: "o"
}
return textures[self.value]
class Screen():
def __init__(self, vm):
self.vm = vm
self.vm.set_interactive(False)
self.output_buffer = defaultdict(lambda: {})
self.automatic = False
self.score = 0
def run(self, interactive=True):
if interactive:
curses.wrapper(self._run_loop)
print(self.score)
else:
self._run_and_parse_output()
def _run_loop(self, stdscr):
stdscr.refresh()
key = None
while True:
if key == "q":
break
elif key == "a":
self.automatic = True
elif key in ["KEY_LEFT", "KEY_RIGHT"]:
self.vm.give_stdin(-1 if key == "KEY_LEFT" else 1)
elif key == "KEY_UP":
self.vm.give_stdin(0)
self._run_and_parse_output()
key = self.render(stdscr)
if self.vm.is_finished():
break
def _run_and_parse_output(self):
self.vm.run()
if self.vm.has_stdout():
output = self.vm.get_stdout().split(",")
for chunk in chunks(output, 3):
self.output_buffer[int(chunk[0])][int(chunk[1])] = int(chunk[2])
def get_output_buffer(self):
return self.output_buffer
def render(self, stdscr):
key = None
width = len(self.output_buffer)
height = max([len(x) for x in self.output_buffer.values()])
info_box = curses.newwin(7, width + 2, height + 5 , 0)
win = curses.newwin(height + 2, width + 2, 0, 0)
score = curses.newwin(3, 25, height + 2, 0)
info_box.addstr(1, 1, "Controls:")
info_box.addstr(2, 1, "LEFT and RIGHT to move paddles.")
info_box.addstr(3, 1, "UP to advance the ball one position.")
info_box.addstr(4, 1, "q to quit.")
info_box.addstr(5, 1, "a for automatic mode.")
info_box.box()
win.box()
score.box()
while True:
stdscr.erase()
if self.automatic:
paddle_position = 0
ball_position = 0
for x, r in self.output_buffer.items():
if x == -1:
continue
for y, tile in r.items():
if Tiles(tile) is Tiles.PADDLE:
paddle_position = x
if Tiles(tile) is Tiles.BALL:
ball_position = x
key = "KEY_UP"
if ball_position > paddle_position:
key = "KEY_RIGHT"
elif ball_position < paddle_position:
key = "KEY_LEFT"
else:
stdscr.refresh()
for x, r in self.output_buffer.items():
for y, tile in r.items():
if x == -1:
score.addstr(1, 1, "Score: {}".format(tile))
self.score = tile
continue
win.addstr(y + 1, x + 1, Tiles(tile).getTexture())
info_box.refresh()
win.refresh()
score.refresh()
if not self.automatic:
key = stdscr.getkey()
if key in ["KEY_LEFT", "KEY_RIGHT", "KEY_UP", "a", "q"]:
return key
else:
info_box.box()
win.box()
score.box()
stdscr.erase()
def chunks(iterable, size):
it = iter(iterable)
chunk = tuple(itertools.islice(it, size))
while chunk:
yield chunk
chunk = tuple(itertools.islice(it, size))
| alu-/advent-of-code-2019 | intcode/screen.py | screen.py | py | 4,067 | python | en | code | 0 | github-code | 90 |
21246789823 | """Models for Cupcake app."""
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
DEFAULT_CUPCAKE_URL = 'https://tinyurl.com/demo-cupcake'
class Cupcake(db.Model):
"""Cupcake."""
__tablename__ = "cupcakes"
id = db.Column(
db.Integer,
primary_key=True,
autoincrement=True
)
flavor = db.Column(
db.String(50),
nullable=False
)
size = db.Column(
db.String(15),
nullable=False
)
rating = db.Column(
db.Integer,
nullable=False
)
image_url = db.Column(
db.String(500),
default=DEFAULT_CUPCAKE_URL,
nullable=False
)
def serialize(self):
""" Returns serialized dictionary of this instance. """
return {
"id": self.id,
"flavor": self.flavor,
"size": self.size,
"rating": self.rating,
"image_url": self.image_url
}
def connect_db(app):
"""Connect to database."""
app.app_context().push()
db.app = app
db.init_app(app) | jasjoh/flask-cupcakes | models.py | models.py | py | 1,074 | python | en | code | 0 | github-code | 90 |
17622316108 | #!/usr/bin/python3
"""
Export to json
"""
import json
import requests
import sys
def create_json_file(employee_id):
"""
Create a json file with all tasks from all employees
"""
base_url = "https://jsonplaceholder.typicode.com/"
res = requests.get(base_url + "users/{}".format(employee_id)).json()
todos = requests.get(base_url + "todos",
params={"userId": employee_id}).json()
username = res.get("username")
data = {
employee_id: []
}
for task in todos:
tasks = {
"task": task["title"],
"completed": task["completed"],
"username": username
}
data[employee_id].append(tasks)
with open("{}.json".format(employee_id), "w") as file:
json.dump(data, file)
if __name__ == "__main__":
user_id = sys.argv[1]
create_json_file(user_id)
| Grace-ngigi/alx-system_engineering-devops | 0x15-api/2-export_to_JSON.py | 2-export_to_JSON.py | py | 912 | python | en | code | 0 | github-code | 90 |
10966169421 | #!/usr/bin/env python3
from lib import prime
ways = [0]*11
for i in range(10):
if prime(i):
ways[i] = 1
else:
ways[i] = 0
for j in range(i):
if prime(i-j):
ways[i] += ways[j]
print(ways) | martinmongi/project_euler | 77.py | 77.py | py | 202 | python | en | code | 1 | github-code | 90 |
17963362529 | import collections
n=int(input())
a=list(map(int,input().split()))
c = collections.Counter(a)
b=[0,0]
d=[0,0]
for i in c:
if c[i]>=4:
b.append(i)
elif c[i]>=2:
d.append(i)
b.sort(reverse=True)
d.sort(reverse=True)
if b[0]>d[0]:
print(b[0]*b[0])
elif b[0]<d[1]:
print(d[0]*d[1])
else:
print(b[0]*d[0])
| Aasthaengg/IBMdataset | Python_codes/p03625/s170126791.py | s170126791.py | py | 337 | python | en | code | 0 | github-code | 90 |
70297684778 | from itertools import combinations
from sys import stdin
n = int(stdin.readline().rstrip())
nums = [i for i in range(0,10)]
temp = list()
result = list()
for i in range(1,11):
for j in combinations(nums,i):
temp = list(j)
temp.sort(reverse=True)
result.append(int(''.join(map(str,temp))))
result.sort()
try:
print(result[n])
except:
print(-1)
| JKbin/Study-of-Coding-with-Python | BaekJoon/Gold_V/1038.py | 1038.py | py | 459 | python | en | code | 0 | github-code | 90 |
4239356288 | #
# @lc app=leetcode id=65 lang=python3
#
# [65] Valid Number
#
# @lc code=start
class Solution:
def isNumber(self, s: str) -> bool:
def if_digits(s):
n = len(s)
if n == 0:
return False
for c in s:
if c not in list("0123456789"):
return False
return True
def if_sign(s):
n = len(s)
if n <= 0 or n > 1:
return False
return s in list("+-")
def if_integer(s):
n = len(s)
if n <= 0:
return False
start = 0
if if_sign(s[0]):
start += 1
return if_digits(s[start:])
def if_signless_decimal(s):
n = len(s)
if n <= 0:
return False
dot_pos = -1
for i, c in enumerate(s):
if c == '.':
dot_pos = i
break
if dot_pos == -1:
return False
if dot_pos == 0:
return if_digits(s[1:])
if dot_pos == n-1:
return if_digits(s[:n-1])
return if_digits(s[:dot_pos]) and if_digits(s[dot_pos+1:])
def if_decimal(s):
n = len(s)
if n <= 0:
return False
return ((s[0] in list("+-")) and if_signless_decimal(s[1:])) or if_signless_decimal(s)
n = len(s)
if s == 0:
return False
e_pos = -1
for i, c in enumerate(s):
if c in list("eE"):
e_pos = i
break
return ((e_pos == -1) and (if_decimal(s) or if_integer(s))) or ((e_pos > -1) and (if_decimal(s[:e_pos]) or if_integer(s[:e_pos])) and (if_integer(s[e_pos + 1:])))
# # 0 : is empty
# # 1 : is only digits
# # 2 : is decimal excluding only digits
# # 3 : is integer excluding only digits, so it means there's an e or E of + or -
# import numpy as np
# n = len(s)
# if n == 0:
# return False
# res = np.zeros((4, n + 1), dtype=bool)
# res[0, n] = True
# res[1, n] = False
# res[2, n] = False
# res[3, n] = False
# for i in range(n -1, -1, -1):
# if not (s[i] in ['e', 'E', '+', '-', '.'] or s[i].isnumeric()):
# return False
# if s[i].isnumeric(): # if its a digit, then they all stay the same
# if res[1, i+1]:
# res[1, i] = True
# continue
# if res[2, i+1]:
# res[2, i] = True
# continue
# if res[3, i+1]: # if its already
# return False
# @lc code=end
| wangyerdfz/python_lc | 65.valid-number.py | 65.valid-number.py | py | 3,011 | python | en | code | 0 | github-code | 90 |
74132736297 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 14:51:31 2023
2) Show that for Thiessen polygons drawn around randomly placed points within
continents (for the number of points in each continent, use the true number of groups),
the empirical relationship between geographic variability at the country level and
ethnic heterogeneity based on your Thiessen polygons does not hold. Produce the distribution of
coefficient estimates based on 500 permutations of random points.
Add the estimated coefficient based on the (1).
@author: anyamarchenko
"""
import os
import geopandas as gpd
import matplotlib.pyplot as plt
import pandas as pd
import random
from shapely.geometry import Point, Polygon
from scipy.spatial import Voronoi, voronoi_plot_2d
import subprocess # for sleep
import statsmodels.api as sm
def prevent_sleep():
return subprocess.Popen(['caffeinate'])
def allow_sleep(process):
process.terminate()
# Uncomment to prevent comp from sleeping
process = prevent_sleep()
# Set the base directory
base_directory = "/Users/anyamarchenko/Documents/Github/ethnolinguistic"
os.chdir(base_directory)
# Load the shapefiles
language_gdf = gpd.read_file('ethnologue/Ethnologue_16_shapefile/langa_no_overlap_biggest_clean.shp')
# =============================================================================
# # Re-project if the CRS is geographic
# if language_gdf.crs.is_geographic:
# # Example: using a World Mercator projection
# language_gdf = language_gdf.to_crs('EPSG:3395')
#
# =============================================================================
languages_per_continent = language_gdf.groupby('CNT').size()
def generate_random_points(geometry, num_points):
points = []
minx, miny, maxx, maxy = geometry.bounds
while len(points) < num_points:
point = Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
if geometry.contains(point):
points.append(point)
return points
# Generate random points for each continent
random_points = {}
for continent, num_languages in languages_per_continent.items():
# Get the combined geometry for all languages in this continent
continent_geometry = language_gdf[language_gdf['CNT'] == continent].unary_union
# Now generate random points within this geometry
random_points[continent] = generate_random_points(continent_geometry, num_languages)
# Create a DataFrame to store results
polygon_languages_df = pd.DataFrame(columns=['Continent', 'Polygon', 'Num_Languages', 'Countries'])
# Generate Voronoi polygons and count languages
for continent, points in random_points.items():
vor = Voronoi([point.coords[0] for point in points])
polygons = [Polygon(vor.vertices[region]) for region in vor.regions if -1 not in region and region]
for poly in polygons:
contained_languages = language_gdf[language_gdf.intersects(poly)]
countries = contained_languages['C1'].unique() # Extract unique country codes
new_row = pd.DataFrame([{
'Continent': continent,
'Polygon': poly,
'Num_Languages': len(contained_languages),
'Countries': ', '.join(countries) # Join country codes as a string
}])
polygon_languages_df = pd.concat([polygon_languages_df, new_row], ignore_index=True)
# Display the DataFrame
print(polygon_languages_df)
### Clean up polygon_languages_df
# Define a function to randomly select a country from the string
def select_random_country(countries_str):
countries_list = countries_str.split(', ')
if countries_list:
return random.choice(countries_list)
return None
# Apply this function to the 'Countries' column and create a new column 'countryname'
polygon_languages_df['countryname'] = polygon_languages_df['Countries'].apply(select_random_country)
# Replace 'Russian Federation' with 'Russia' in the 'countryname' column
polygon_languages_df['countryname'] = polygon_languages_df['countryname'].replace('Russian Federation', 'Russia')
polygon_languages_df['countryname'] = polygon_languages_df['countryname'].replace('Viet Nam', 'Vietnam')
polygon_languages_df['countryname'] = polygon_languages_df['countryname'].replace('Iran', 'Iran, Islamic Rep.')
polygon_languages_df['countryname'] = polygon_languages_df['countryname'].replace('Egypt', 'Egypt, Arab Rep.')
### Merge with country agriculture data
# Load the .dta file
dta_file_path = 'data/Tables1-3a.dta' # Replace with the actual file path
country_data_df = pd.read_stata(dta_file_path)
# Select only the required columns from the country data
country_data_df = country_data_df[['countryname', 'sd_emeanclip', 'emeanclip', 'sdclimclip', 'sd_suitclip', 'abs_latclip']]
# Merge the data into polygon_languages_df
merged_df = polygon_languages_df.merge(country_data_df, on='countryname', how='left')
merged_df = merged_df.drop(columns=['Polygon', 'Countries'])
merged_df['Num_Languages'] = pd.to_numeric(merged_df['Num_Languages'], errors='coerce')
# Save the merged DataFrame to a new .dta file
merged_df.to_stata('data/thiessen_polygon.dta')
# Convert the relevant columns to numeric data types
merged_df['sd_emeanclip'] = pd.to_numeric(merged_df['sd_emeanclip'], errors='coerce')
merged_df['sdclimclip'] = pd.to_numeric(merged_df['sdclimclip'], errors='coerce')
merged_df['sd_suitclip'] = pd.to_numeric(merged_df['sd_suitclip'], errors='coerce')
merged_df['Num_Languages'] = pd.to_numeric(merged_df['Num_Languages'], errors='coerce')
# Display the merged DataFrame
print(merged_df)
merged_df = merged_df.dropna(subset=['Num_Languages', 'sd_emeanclip', 'sdclimclip', 'sd_suitclip'])
# Define the independent variables (predictors) and the dependent variable
X = merged_df[['sd_emeanclip', 'sdclimclip', 'sd_suitclip']]
y = merged_df['Num_Languages']
# Adding a constant to the model (for the intercept)
X = sm.add_constant(X)
# Create a model
model = sm.OLS(y, X)
# Fit the model
results = model.fit()
# Display the coefficient table
print(results.summary())
# =============================================================================
# # Create a DataFrame to store results
# polygon_languages_df = pd.DataFrame(columns=['Continent', 'Polygon', 'Num_Languages', 'Largest_Intersecting_Country'])
#
# # Generate Voronoi polygons and count languages
# for continent, points in random_points.items():
# vor = Voronoi([point.coords[0] for point in points])
# polygons = [Polygon(vor.vertices[region]) for region in vor.regions if -1 not in region and region]
#
# # Create a GeoDataFrame for Voronoi polygons
# voronoi_gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries(polygons))
# # Set the CRS for Voronoi polygons to match the language data
# voronoi_gdf.crs = language_gdf.crs
#
# for poly in voronoi_gdf.geometry:
# contained_languages = language_gdf[language_gdf.intersects(poly)].copy()
# # Check if there are any intersecting languages
# if contained_languages.empty:
# print(f"No intersecting languages for polygon in {continent}")
# continue
#
# # Calculate intersection area for each country
# intersection_areas = contained_languages.intersection(poly).area
# contained_languages['Intersection_Area'] = intersection_areas
# # Find the country with the largest intersection
# largest_country = contained_languages.loc[contained_languages['Intersection_Area'].idxmax(), 'C1']
#
# new_row = pd.DataFrame([{
# 'Continent': continent,
# 'Polygon': poly,
# 'Num_Languages': len(contained_languages),
# 'Largest_Intersecting_Country': largest_country
# }])
# polygon_languages_df = pd.concat([polygon_languages_df, new_row], ignore_index=True)
#
# # Display the DataFrame
# print(polygon_languages_df)
# =============================================================================
# allow comp to sleep once code is done
allow_sleep(process)
| amarchenko26/ethnolinguistic | create_thiessen.py | create_thiessen.py | py | 8,051 | python | en | code | 0 | github-code | 90 |
23414187383 | from flask import Flask, render_template, request, redirect, url_for, flash
app = Flask(__name__)
ALLOWED_EXTENSIONS = {'jpeg', 'jpg', 'png'}
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['RECAPTCHA_USE_SSL']= False
app.config['RECAPTCHA_PUBLIC_KEY'] ='6LeBCfIZAAAAAO39_L4Gd7f6uCM0PfP_N3XjHxkW'
app.config['RECAPTCHA_PRIVATE_KEY'] ='6LeBCfIZAAAAAJTjq0Xz_ndAW9LByCo1nJJKy'
app.config['RECAPTCHA_OPTIONS'] = {'theme':'black'}
@app.route('/')
def signup():
return render_template('index.html')
@app.route('/', methods=['POST'])
def signup_post():
print(111)
ip = request.files['ip']
proxy = request.files['proxy']
octet = request.form.get('octet_count')
speed_check = request.form.getlist('speed')
ipv_6_check = request.form.getlist('ipv6')
proxy = str(proxy.read())[2:-1].split(r'\n')
if proxy[-1] == '':
proxy = proxy[:-1]
ip = str(ip.read())[2:-1].split(r'\n')
if ip[:-1] == '':
ip = ip[:-1]
if speed_check==[]:
speed_check = False
else:
speed_check = True
if ipv_6_check==[]:
ipv_6_check = False
else:
ipv_6_check = True
print(octet, speed_check, ipv_6_check)
return redirect(url_for('signup'))
app.secret_key = 'some_secret_key'
if __name__ == "__main__":
app.run(debug=True)
| Qazqazqaz2/proxy_checker | web_interface.py | web_interface.py | py | 1,343 | python | en | code | 0 | github-code | 90 |
18528203349 | def dig_sum(N):
ans=0
while N>0:
ans+=N%10
N=N//10
return ans
N=int(input())
ans=100
for i in range(1,N):
tmp=dig_sum(i)+dig_sum(N-i)
if tmp<ans:
ans=tmp
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03331/s944140510.py | s944140510.py | py | 210 | python | fr | code | 0 | github-code | 90 |
34917784157 | from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import numpy as np
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
import os, sys, glob, re
app = Flask(__name__)
model_path = "rice.h5"
classes = {0:"bacterial_leaf_blight:-{ About bacterial_leaf_blight disease }",1:"blast:-{ about blast disease} ",2:"brownspot:-{ about brownspot disease }"}
def model_predict(image_path):
print("Predicted")
image = load_img(image_path,target_size=(224,224))
image = img_to_array(image)
image = image/255
image = np.expand_dims(image,axis=0)
model = load_model(model_path)
result = np.argmax(model.predict(image))
prediction = classes[result]
if result == 0:
print("bacterial_leaf_blight.html")
return "bacterial_leaf_blight","bacterial_leaf_blight.html"
elif result == 1:
print("blast.html")
return "blast", "blast.html"
elif result == 2:
print("brownspot.html")
return "brownspot" , "brownspot.html"
@app.route('/',methods=['GET'])
def index():
return render_template('index.html')
@app.route('/predict',methods=['GET','POST'])
def predict():
print("Entered")
if request.method == 'POST':
print("Entered here")
file = request.files['image'] # fet input
filename = file.filename
print("@@ Input posted = ", filename)
file_path = os.path.join('static/user uploaded', filename)
file.save(file_path)
print("@@ Predicting class......")
pred, output_page = model_predict(file_path)
return render_template(output_page, pred_output = pred, user_image = file_path)
if __name__ == '__main__':
app.run(debug=True,threaded=False)
| 19wh1a0576/BVRITHYDERABAD | CSE/CSE Major Projects - 2017_21/Rice Crop Disease Detection/app.py | app.py | py | 1,912 | python | en | code | 0 | github-code | 90 |
15834722488 | from aws_cdk import (
aws_rds as rds,
aws_ec2 as ec2,
Duration,
RemovalPolicy,
)
from constructs import Construct
from typing import Optional, Any
from provena.custom_constructs.db_instance import INSTANCE_TYPE
# Setting for RDS instance
BACKUP_RETENTION_DAYS = 10
BACKUP_DURATION = Duration.days(BACKUP_RETENTION_DAYS)
NO_BACKUP_DURATION = Duration.days(0)
# Bump up the version due to minor updates
# if you try to update from snapshot using a specified older version RDS service tries to "update" backwards from new version -> older.
# make sure that this value reflects the actual current version of the restoring instance
#RDS_POSTGRES_VERSION = rds.PostgresEngineVersion.VER_13_4
RDS_POSTGRES_VERSION = rds.PostgresEngineVersion.VER_13_7
DEV_INSTANCE_TYPE = ec2.InstanceType.of(
instance_class=ec2.InstanceClass.BURSTABLE3,
instance_size=ec2.InstanceSize.MICRO
)
DEFAULT_INSTANCE_TYPE = ec2.InstanceType.of(
instance_class=ec2.InstanceClass.BURSTABLE3,
instance_size=ec2.InstanceSize.MEDIUM
)
class DBInstanceFromSnapshot(Construct):
def __init__(self, scope: Construct,
id: str,
service_name: str,
stage: str,
vpc: ec2.Vpc,
snapshot_arn: str,
backup_duration: Optional[Duration] = NO_BACKUP_DURATION,
public: Optional[bool] = True,
user_name: str = "keycloak",
removal_policy: Optional[RemovalPolicy] = RemovalPolicy.SNAPSHOT,
**kwargs: Any) -> None:
"""Creates a database cluster from an existing cluster snapshot.
Args:
scope (cdk.Construct): CDK construct scope
id (str): The CDK id
service_name (str): The name of the service (prefixes to most names, e.g. "db")
vpc (ec2.Vpc): The VPC in which to deploy the cluster
snapshot_arn (str): The ARN of the cluster snapshot
instances (int, optional): The number of instances to launch, act as read + write replicas. Defaults to 1.
backup_duration (Optional[cdk.Duration], optional): The amount of time to store backups for. Defaults to backups disabled (zero duration).
public (Optional[bool], optional): Should the db be exposed to the public - will place it in a public subnet if so. Defaults to True.
user_name (Optional[str], optional): The name of the root user. Defaults to "hydrokg".
removal_policy (Optional[cdk.RemovalPolicy], optional): What should happen when the cluster is removed. Defaults to cdk.RemovalPolicy.SNAPSHOT.
"""
# Super constructor
super().__init__(scope, id, **kwargs)
if stage in ['DEV', 'STAGE', 'TEST']:
INSTANCE_TYPE = DEV_INSTANCE_TYPE
else:
INSTANCE_TYPE = DEFAULT_INSTANCE_TYPE
# Restore from cluster backup
self.instance = rds.DatabaseInstanceFromSnapshot(
self,
id=f"{service_name}DBSnapshotInstance",
# Setup credentials to use specified user name
credentials=rds.SnapshotCredentials.from_generated_secret(
username=user_name),
snapshot_identifier=snapshot_arn,
allocated_storage=20,
engine=rds.DatabaseInstanceEngine.postgres(
version=RDS_POSTGRES_VERSION),
instance_type=INSTANCE_TYPE,
publicly_accessible=public,
vpc_subnets=ec2.SubnetSelection(
subnet_type=ec2.SubnetType.PUBLIC if public else \
ec2.SubnetType.PRIVATE_ISOLATED
),
backup_retention=backup_duration,
removal_policy=removal_policy,
vpc=vpc,
allow_major_version_upgrade=False,
auto_minor_version_upgrade=True,
port=5432
)
# Allow security group access to RDS
if public:
self.instance.connections.allow_default_port_from_any_ipv4(
"Allow traffic from all IPs to db (authenticated)."
)
self.secret = self.instance.secret
def give_connectable_access(self, connection: ec2.IConnectable) -> None:
self.instance.connections.allow_default_port_from(connection)
| provena/provena | infrastructure/provena/custom_constructs/db_instance_from_snapshot.py | db_instance_from_snapshot.py | py | 4,303 | python | en | code | 3 | github-code | 90 |
18395911839 | N = int(input())
D = {}
q = set()
for i in range(N):
s,p = input().split()
if s in q:
D[s].append([int(p),i])
else:
D[s] = [[int(p),i]]
q.add(s)
ans = []
for i in sorted(list(q)):
D[i].sort(reverse=True)
for j in D[i]:
ans.append(j[1])
[print(i+1) for i in ans]
| Aasthaengg/IBMdataset | Python_codes/p03030/s000431407.py | s000431407.py | py | 317 | python | en | code | 0 | github-code | 90 |
23158454615 | #!/usr/bin/env python3
"""
Description: Wakurtosis load simulator
"""
""" Dependencies """
import sys, logging, yaml, json, time, random, os, argparse, tomllib, glob
import requests
import rtnorm
# from pathlib import Path
# import numpy as np
# import pandas as pd
# import matplotlib.pyplot as plt
# import cloudpickle as pickle
""" Globals """
G_APP_NAME = 'WLS'
G_LOG_LEVEL = 'DEBUG'
G_DEFAULT_CONFIG_FILE = './config/wsl.yml'
G_LOGGER = None
""" Custom logging formatter """
class CustomFormatter(logging.Formatter):
# Set different formats for every logging level
time_name_stamp = "[%(asctime)s.%(msecs)03d] [" + G_APP_NAME + "]"
FORMATS = {
logging.ERROR: time_name_stamp + " ERROR in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s",
logging.WARNING: time_name_stamp + " WARNING - %(msg)s",
logging.CRITICAL: time_name_stamp + " CRITICAL in %(module)s.py %(funcName)s() %(lineno)d - %(msg)s",
logging.INFO: time_name_stamp + " %(msg)s",
logging.DEBUG: time_name_stamp + " %(funcName)s() %(msg)s",
'DEFAULT': time_name_stamp + " %(msg)s",
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT'])
formatter = logging.Formatter(log_fmt, '%d-%m-%Y %H:%M:%S')
return formatter.format(record)
def check_waku_node(node_address):
data = {
'jsonrpc': '2.0',
'method': 'get_waku_v2_debug_v1_info',
# 'method' : 'get_waku_v2_debug_v1_version',
'id': 1,
'params' : []}
G_LOGGER.info('Waku RPC: %s from %s' %(data['method'], node_address))
try:
response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'})
except Exception as e:
G_LOGGER.debug('%s: %s' % (e.__doc__, e))
return False
try:
response_obj = response.json()
except Exception as e:
G_LOGGER.debug('%s: %s' % (e.__doc__, e))
return False
G_LOGGER.debug('Response from %s: %s' %(node_address, response_obj))
return True
def get_waku_msgs(node_address, topic, cursor=None):
data = {
'jsonrpc': '2.0',
'method': 'get_waku_v2_store_v1_messages',
'id': 1,
'params' : [topic, None, None, None, {"pageSize": 100, "cursor": cursor,"forward": True}]
}
G_LOGGER.debug('Waku RPC: %s from %s' %(data['method'], node_address))
s_time = time.time()
response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'})
elapsed_ms =(time.time() - s_time) * 1000
response_obj = response.json()
# G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms))
return response_obj, elapsed_ms
# https://rfc.vac.dev/spec/16/#get_waku_v2_relay_v1_messages
def get_last_waku_msgs(node_address, topic):
data = {
'jsonrpc': '2.0',
'method': 'get_waku_v2_relay_v1_messages',
'id': 1,
'params' : [topic]}
G_LOGGER.debug('Waku RPC: %s from %s' %(data['method'], node_address))
s_time = time.time()
response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'})
elapsed_ms =(time.time() - s_time) * 1000
response_obj = response.json()
# G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms))
return response_obj, elapsed_ms
def send_waku_msg(node_address, topic, payload, nonce=1):
# waku_msg = {
# 'nonce' : nonce,
# 'timestamp' : time.time_ns(),
# 'payload' : payload}
my_payload = {
'nonce' : nonce,
'timestamp' : time.time_ns(),
'payload' : payload
}
waku_msg = {
'payload' : json.dumps(my_payload).encode('utf-8').hex()
}
data = {
'jsonrpc': '2.0',
'method': 'post_waku_v2_relay_v1_message',
'id': 1,
'params' : [topic, waku_msg]}
G_LOGGER.debug('Waku RPC: %s from %s Topic: %s' %(data['method'], node_address, topic))
s_time = time.time()
response = requests.post(node_address, data=json.dumps(data), headers={'content-type': 'application/json'})
elapsed_ms =(time.time() - s_time) * 1000
response_obj = response.json()
G_LOGGER.debug('Response from %s: %s [%.4f ms.]' %(node_address, response_obj, elapsed_ms))
return response_obj, elapsed_ms
# Generate a random interval using a Poisson distribution
def poisson_interval(rate):
return random.expovariate(rate)
def make_payload(size):
payload = hex(random.getrandbits(4*size))
G_LOGGER.debug('Payload of size %d bytes: %s' %(size, payload))
return payload
def make_payload_dist(dist_type, min_size, max_size):
# Check if min and max packet sizes are the same
if min_size == max_size:
G_LOGGER.warning('Packet size is constant: min_size=max_size=%d' %min_size)
return make_payload(min_size)
# Payload sizes are even integers uniformly distributed in [min_size, max_size]
if dist_type == 'uniform':
size = int(random.uniform(min_size, max_size))
# Reject non even sizes
while(size % 2) != 0:
size = int(random.uniform(min_size, max_size))
return make_payload(size)
# Payload sizes are even integers ~"normally" distributed in [min_size, max_size]
if dist_type == 'gaussian':
σ = (max_size - min_size) / 5.
μ = (max_size - min_size) / 2.
size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1))
# Reject non even sizes
while(size % 2) != 0:
size = int(rtnorm.rtnorm(min_size, max_size, sigma=σ, mu=μ, size=1))
return make_payload(size)
G_LOGGER.error('Unknown distribution type %s')
return '0x00'
def parse_targets(enclave_dump_path, waku_port=8545):
targets = []
G_LOGGER.info('Extracting Waku node addresses from Kurtosus enclance dump in %s' %enclave_dump_path)
for path_obj in os.walk(enclave_dump_path):
if 'waku_' in path_obj[0]:
with open(path_obj[0] + '/spec.json', "r") as read_file:
spec_obj = json.load(read_file)
network_settings = spec_obj['NetworkSettings']
waku_address = network_settings['Ports']['%d/tcp' %waku_port]
targets.append('%s:%s' %(waku_address[0]['HostIp'], waku_address[0]['HostPort']))
G_LOGGER.info('Parsed %d Waku nodes' %len(targets))
return targets
def get_next_time_to_msg(inter_msg_type, msg_rate, simulation_time):
if inter_msg_type == 'poisson':
return poisson_interval(msg_rate)
if inter_msg_type == 'uniform':
return simulation_time / msg_rate
G_LOGGER.error('%s is not a valid inter_msg_type. Aborting.' %inter_msg_type)
sys.exit()
def get_all_messages_from_node_from_topic(node_address, topic):
page_cnt = 0
msg_cnt = 0
# Retrieve the first page
response, elapsed = get_waku_msgs(node_address, topic)
if 'error' in response:
G_LOGGER.error(response['error'])
return 0
messages = response['result']['messages']
msg_cnt += len(messages)
G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic))
for msg_idx, msg in enumerate(messages):
# Decode the payload
payload_obj = json.loads(''.join(map(chr, msg['payload'])))
# Retrieve further pages
while(response['result']['pagingOptions']):
page_cnt += 1
cursor = response['result']['pagingOptions']['cursor']
index = {"digest" : cursor['digest'], "receivedTime" : cursor['receiverTime']}
response, elapsed = get_waku_msgs(node_address, topic, cursor)
if 'error' in response:
G_LOGGER.error(response['error'])
break
messages = response['result']['messages']
msg_cnt += len(messages)
G_LOGGER.debug('Got page %d with %d messages from node %s and topic: %s' %(page_cnt, len(messages), node_address, topic))
for msg_idx, msg in enumerate(messages):
# Decode the payload
payload_obj = json.loads(''.join(map(chr, msg['payload'])))
return msg_cnt
def main():
global G_LOGGER
""" Init Logging """
G_LOGGER = logging.getLogger(G_APP_NAME)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(CustomFormatter())
G_LOGGER.addHandler(handler)
G_LOGGER.info('Started')
""" Parse command line args. """
parser = argparse.ArgumentParser()
parser.add_argument("-cfg", "--config_file", help="Config file", action="store_true", default=G_DEFAULT_CONFIG_FILE)
args = parser.parse_args()
config_file = args.config_file
""" Load config file """
try:
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
except Exception as e:
G_LOGGER.error('%s: %s' % (e.__doc__, e))
sys.exit()
# Set loglevel from config
G_LOGGER.setLevel(config['general']['debug_level'])
handler.setLevel(config['general']['debug_level'])
G_LOGGER.debug(config)
G_LOGGER.info('Configuration loaded from %s' %config_file)
# Set RPNG seed from config
random.seed(config['general']['prng_seed'])
""" Load targets """
try:
with open(config['general']['targets_file'], 'r') as read_file:
targets = json.load(read_file)
except Exception as e:
G_LOGGER.error('%s: %s' % (e.__doc__, e))
sys.exit()
if len(targets) == 0:
G_LOGGER.error('Cannot find valid targets. Aborting.')
sys.exit(1)
G_LOGGER.debug(targets)
G_LOGGER.info('%d targets loaded' %len(targets))
""" Check all nodes are reachable """
for i, target in enumerate(targets):
if not check_waku_node('http://%s/' %target):
G_LOGGER.error('Node %d (%s) is not online. Aborted.' %(i, target))
sys.exit(1)
G_LOGGER.info('All %d Waku nodes are reachable.' %len(targets))
""" Load Topics """
topics = []
try:
tomls = glob.glob('./tomls/*.toml')
tomls.sort()
for toml_file in tomls:
with open(toml_file, mode='rb') as read_file:
toml_config = tomllib.load(read_file)
node_topics_str = toml_config['topics']
topics.append(list(node_topics_str.split(' ')))
except Exception as e:
G_LOGGER.error('%s: %s' % (e.__doc__, e))
sys.exit()
# Dictionary to count messages of every topic being sent
topics_msg_cnt = {}
for node_topics in topics:
for topic in node_topics:
topics_msg_cnt[topic] = 0
G_LOGGER.info('Loaded nodes topics from toml files: %s' %topics_msg_cnt.keys())
""" Define the subset of emitters """
num_emitters = int(len(targets) * config['general']['emitters_fraction'])
if num_emitters == 0:
G_LOGGER.error('The number of emitters must be greater than zero. Try increasing the fraction of emitters.')
sys.exit()
""" NOTE: Emitters will only inject topics they are subscribed to """
emitters_indices = random.sample(range(len(targets)), num_emitters)
emitters = [targets[i] for i in emitters_indices]
emitters_topics = [topics[i] for i in emitters_indices]
# emitters = random.sample(targets, num_emitters)
G_LOGGER.info('Selected %d emitters out of %d total nodes' %(len(emitters), len(targets)))
""" Start simulation """
stats = {}
msg_cnt = 0
failed_cnt = 0
bytes_cnt = 0
s_time = time.time()
last_msg_time = 0
next_time_to_msg = 0
G_LOGGER.info('Starting a simulation of %d seconds ...' %config['general']['simulation_time'])
while True:
# Check end condition
elapsed_s = time.time() - s_time
if elapsed_s >= config['general']['simulation_time']:
G_LOGGER.info('Simulation ended. Sent %d messages (%d bytes) in %ds.' %(msg_cnt, bytes_cnt, elapsed_s))
break
# Send message
# BUG: There is a constant discrepancy. The average number of messages sent by time interval is slightly less than expected
msg_elapsed = time.time() - last_msg_time
if msg_elapsed <= next_time_to_msg:
continue
G_LOGGER.debug('Time Δ: %.6f ms.' %((msg_elapsed - next_time_to_msg) * 1000.0))
# Pick an emitter at random from the emitters list
emitter_idx = random.choice(emitters_indices)
node_address = 'http://%s/' %emitters[emitter_idx]
emitter_topics = emitters_topics[emitter_idx]
# Pick a topic at random from the topics supported by the emitter
emitter_topic = random.choice(emitter_topics)
G_LOGGER.info('Injecting message of topic %s to network through Waku node %s ...' %(emitter_topic, node_address))
payload = make_payload_dist(dist_type=config['general']['dist_type'].lower(), min_size=config['general']['min_packet_size'], max_size=config['general']['max_packet_size'])
response, elapsed = send_waku_msg(node_address, topic=emitter_topic, payload=payload, nonce=msg_cnt)
if response['result']:
msg_cnt += 1
topics_msg_cnt[emitter_topic] += 1
else:
G_LOGGER.info('Message failed!')
failed_cnt += 1
# Compute the time to next message
next_time_to_msg = get_next_time_to_msg(config['general']['inter_msg_type'], config['general']['msg_rate'], config['general']['simulation_time'])
G_LOGGER.debug('Next message will happen in %d ms.' %(next_time_to_msg * 1000.0))
last_msg_time = time.time()
elapsed_s = time.time() - s_time
# Retrieve messages from every node and topic
G_LOGGER.info('Retriving messages from the enclave ...')
for node_idx, target in enumerate(targets):
node_address = 'http://%s/' %target
for topic_idx, topic in enumerate(topics[node_idx]):
msg_cnt = get_all_messages_from_node_from_topic(node_address, topic)
msg_lost = topics_msg_cnt[topic] - msg_cnt
G_LOGGER.info('- Retrieved %d messages on topic %s from node %s. Lost %d message(s).' %(msg_cnt, topic, node_address, msg_lost))
# Output
summary = {
"end_ts" : time.time(),
"params" : config['general'],
"topics" : list(topics_msg_cnt.keys()),
"topics_msg_cnt" : topics_msg_cnt,
"simulation_time" : elapsed_s,
"total_messages" : msg_cnt,
"avg_latency" : 0,
"max_latency" : 0,
"min_latency" : 0
}
G_LOGGER.info('Simulation sumnmary: %s' %summary)
with open('./summary.json', 'w') as summary_file:
summary_file.write(json.dumps(summary, indent=4))
""" We are done """
G_LOGGER.info('Ended')
if __name__ == "__main__":
main()
| alrevuelta/wakurtosis | wsl-module/wsl.py | wsl.py | py | 15,209 | python | en | code | null | github-code | 90 |
18429577189 | from collections import deque
N=int(input())
b=list(map(int,input().split()))
flag=0
op=deque()
#1→12→122→1232→11232→121232→1221232→11221232→111221232
#print(b)
while len(b)>0:
for i in range(len(b)-1,-1,-1):
if b[i]==i+1:
#print(b[i])
op.appendleft(b.pop(i))
flag=1
break
if flag==0:
break
flag=0
#print(op,b)
#print(op)
if len(b)==0:
for i in range(N):
print(op.popleft())
else:
print("-1")
| Aasthaengg/IBMdataset | Python_codes/p03089/s749279218.py | s749279218.py | py | 505 | python | en | code | 0 | github-code | 90 |
25599000076 | from django.contrib import admin
from django.urls import path
from home.views import *
admin.site.site_header = "Raj Tours Admin"
admin.site.site_title = "Raj Tours Admin Portal"
admin.site.index_title = "Welcome to Raj Tours"
urlpatterns = [
path('admin/', admin.site.urls),
path("",index,name='home'),
path("about/",about,name='about'),
path("services/",services,name='services'),
path("contact/",contactUs,name='contact'),
path("login/",loginUser,name="login"),
path("logout/",logoutUser,name="logout" ),
]
| AkshayKamble2312/web_devlopment_projects | firstproject/rajtours/home/urls.py | urls.py | py | 544 | python | en | code | 0 | github-code | 90 |
9891441718 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 28 21:37:43 2018
@author: William Keilsohn
"""
'''
Count the accurance of a given character in a string.
'''
# Import packages
import re
inString = input('Please enter a string: ')
inChar = input('Please enter a single character to search for: ')
def charFinder(string, char):
outList = re.findall(char, string)
outLength = len(outList)
print(outLength)
charFinder(inString, inChar)
## Print is used to display to the console.
## Return is also a viable option here | wkeilsohn/Python-Interview-Problems | Character_counter.py | Character_counter.py | py | 557 | python | en | code | 0 | github-code | 90 |
18194038099 | # import sys
# input = sys.stdin.readline
import itertools
import collections
from decimal import Decimal
from functools import reduce
# 持っているビスケットを叩き、1枚増やす
# ビスケット A枚を 1円に交換する
# 1円をビスケット B枚に交換する
def main():
n = int(input())
numbers = input_list()
ans = []
s = reduce(lambda a, b: a ^ b, numbers)
for i in range(n):
ans.append(s ^ numbers[i])
print(*ans)
def prime_factorize(n):
a = []
while n % 2 == 0:
a.append(2)
n //= 2
f = 3
while f * f <= n:
if n % f == 0:
a.append(f)
n //= f
else:
f += 2
if n != 1:
a.append(n)
return a
def bfs(H, W, black_cells, dist):
d = 0
while black_cells:
h, w = black_cells.popleft()
d = dist[h][w]
for dy, dx in ((1, 0), (0, 1), (-1, 0), (0, -1)):
new_h = h + dy
new_w = w + dx
if new_h < 0 or H <= new_h or new_w < 0 or W <= new_w:
continue
if dist[new_h][new_w] == -1:
dist[new_h][new_w] = d + 1
black_cells.append((new_h, new_w))
return d
def input_list():
return list(map(int, input().split()))
def input_list_str():
return list(map(str, input().split()))
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p02631/s849016927.py | s849016927.py | py | 1,403 | python | en | code | 0 | github-code | 90 |
20748158127 | # Take refresh token and encoded auth string and return new tokens
def get_new_token():
import requests
import json
JSON_FILE_DIRECTORY = r'user_data.json'
TOKEN_URL = 'https://accounts.spotify.com/api/token'
open_file = open(JSON_FILE_DIRECTORY) #Open json into variable
json_data = json.load(open_file) #Load json into variable
refresh_token = json_data['REFRESH_TOKEN']
auth_value = json_data['AUTH_VALUE']
# Refresh Token request data
data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
# Refresh Token request headers
headers = {
'content-type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + auth_value
}
# Exchange refresh token for new acces token
refresh_response = requests.post(TOKEN_URL, data=data,headers=headers)
# Convert json data into strings
access_token = refresh_response.json()['access_token']
expires_in = refresh_response.json()['expires_in']
return access_token, expires_in
| Gavie05/Streamer-Queue | token_refresh.py | token_refresh.py | py | 1,106 | python | en | code | 0 | github-code | 90 |
18548902899 | def actual(A, B, K):
min_left = A
max_left = min(A + (K - 1), B)
min_right = max(B - (K - 1), max_left + 1)
max_right = B
left = set(range(min_left, max_left + 1))
right = set(range(min_right, max_right + 1))
unique_nums = left | right
return '\n'.join(map(str, sorted(unique_nums)))
A, B, K = map(int, input().split())
print(actual(A, B, K)) | Aasthaengg/IBMdataset | Python_codes/p03386/s590165116.py | s590165116.py | py | 379 | python | en | code | 0 | github-code | 90 |
73529831976 | import pickle
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
import json
import os
from diagnostics import model_predictions
###############Load config.json and get path variables
with open('config.json','r') as f:
config = json.load(f)
dataset_csv_path = os.path.join(os.path.abspath(os.getcwd()),config['output_folder_path'])
test_data_path = os.path.join(os.path.abspath(os.getcwd()),config['test_data_path'])
model_path = os.path.join(os.path.abspath(os.getcwd()),config['output_model_path'])
##############Function for reporting
def score_model():
#calculate a confusion matrix using the test data and the deployed model
#write the confusion matrix to the workspace
test_data_file = os.path.join(test_data_path, 'testdata.csv')
df = pd.read_csv(test_data_file)
drop_columns= ['corporation', 'exited']
X = df.drop(drop_columns, axis=1)
y = df['exited']
y_pre = model_predictions(X)
cf_matrix = metrics.confusion_matrix(y, y_pre)
cf_matrix_png = os.path.join(model_path, 'confusionmatrix.png')
ax = sns.heatmap(cf_matrix, annot=True, cmap='Blues')
ax.set_title('Seaborn Confusion Matrix\n');
ax.set_xlabel('Predicted Values')
ax.set_ylabel('Actual Values ');
## Ticket labels - List must be in alphabetical order
ax.xaxis.set_ticklabels(['False','True'])
ax.yaxis.set_ticklabels(['False','True'])
# bbox_inches Set it as “tight” for proper fit of the saved figure.
ax.figure.savefig(cf_matrix_png, dpi=300, bbox_inches='tight')
plt.show()
# #The other method
# disp = metrics.ConfusionMatrixDisplay(confusion_matrix=cf_matrix)
# disp.plot()
# plt.title('Confusion Matrix\n')
# plt.savefig('confusionmatrix.png', dpi=300, bbox_inches='tight')
# plt.show()
if __name__ == '__main__':
score_model()
| lcwcharles/a-dynamic-risk-assessment-system | reporting.py | reporting.py | py | 1,951 | python | en | code | 0 | github-code | 90 |
74785351977 | minim = -100000
def RodCutting(price, n):
val = [0 for x in range(n + 1)]
val[0] = 0
maintain_len=[[] for x in range(n+1)]
maintain_len[0]=[0]
length_arr=len(price)
max_val = minim
for i in range(1, n + 1):
j=0
while j<length_arr and i-j-1>=0:
if max_val<(price[j] + val[i - j - 1]):
max_val=(price[j] + val[i - j - 1])
if (i-j-1)!=0:
maintain_len[i]=[j+1]+maintain_len[i-j-1]
else:
maintain_len[i] = [j + 1]
j=j+1
val[i] = max_val
return val[n],maintain_len[n]
arr = [2,6,10,2]
size = len(arr)
print("Maximum Value is " + str(RodCutting(arr, 5)[0]))
print("Number of cuts " + str(RodCutting(arr, 5)[1])) | codejigglers/leetcodes | Rod_cutting_problem.py | Rod_cutting_problem.py | py | 772 | python | en | code | 0 | github-code | 90 |
18443877229 | N=int(input())
A=list(map(int,input().strip().split()))
A.sort()
def gcd(a,b):
while True:
r=a%b
if r==0:
break
a=b
b=r
return b
ans=A[0]
for n in range(N):
ans=gcd(ans,A[n])
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03127/s469818631.py | s469818631.py | py | 244 | python | en | code | 0 | github-code | 90 |
18257368769 | import sys
import numpy as np
import math as mt
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
n, a, b = map(int, readline().split())
ans = a * (n//(a + b))
if n%(a+b) < a:
ans += n%(a+b)
else:
ans += a
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02754/s329777931.py | s329777931.py | py | 285 | python | en | code | 0 | github-code | 90 |
43136646063 | import pygame
from scripts.common.utils import State
PLAYER_SPEED = 1
JUMP_HEIGHT = 7
GRAVITY = 0.5
class Player(pygame.sprite.Sprite):
def __init__(self, game, assets, *groups, **kwargs):
super().__init__(groups)
self.game = game
self.assets = assets
self.image = pygame.image.load("./data/images/player/player.png")
self.rect = self.image.get_rect()
self.rect.x = 100
self.sounds = game.sounds
self.player_speed = kwargs.get("player_speed", PLAYER_SPEED)
self.velocity = pygame.Vector2(0, 0)
self.can_jump = False
self.is_alive = True
self.action = ""
self.anim_offset = (+2, +2)
self.flip = False
self.game.channels["player_run"].set_volume(0.1)
self.game.channels["player_run"].play(self.sounds["player/run"], loops=-1)
self.game.channels["player_run"].pause()
self.health = 100
self.set_action("idle")
def update(
self,
**kwargs: dict[str, pygame.sprite.Group],
):
keys = pygame.key.get_pressed()
self.velocity.x = 0
if keys[pygame.K_LEFT] or keys[pygame.K_a]:
self.velocity.x = -self.player_speed
if keys[pygame.K_RIGHT] or keys[pygame.K_d]:
self.velocity.x = self.player_speed
self.velocity.y += GRAVITY
self.rect.x += self.velocity.x
self.check_collision_x(kwargs["platforms"], kwargs["next"])
self.rect.y += self.velocity.y
self.check_collision_y(kwargs["platforms"], kwargs["next"])
if self.velocity.x > 0:
self.flip = False
self.set_action("run")
if self.velocity.x < 0:
self.flip = True
self.set_action("run")
if self.velocity.x == 0:
self.set_action("idle")
if self.velocity.y > 80:
self.is_alive = False
self.game.set_state(State.GAME_OVER)
self.image = pygame.transform.flip(self.animation.img(), self.flip, False)
self.animation.update()
def check_collision_x(
self,
platforms_group: pygame.sprite.Group,
next_level_group: pygame.sprite.Group,
):
hits = pygame.sprite.spritecollide(self, platforms_group, False)
for hit in hits:
if self.velocity.x > 0:
self.rect.right = hit.rect.left
elif self.velocity.x < 0:
self.rect.left = hit.rect.right
hits = pygame.sprite.spritecollide(self, next_level_group, False)
for hit in hits:
if self.velocity.x != 0:
self.game.set_state(State.NEXT_LEVEL)
def check_collision_y(
self,
platforms_group: pygame.sprite.Group,
next_level_group: pygame.sprite.Group,
):
hits = pygame.sprite.spritecollide(self, platforms_group, False)
for hit in hits:
if self.velocity.y > 0:
self.rect.bottom = hit.rect.top
self.velocity.y = 0
self.can_jump = True
elif self.velocity.y < 0:
self.rect.top = hit.rect.bottom
self.velocity.y = 0
hits = pygame.sprite.spritecollide(self, next_level_group, False)
for hit in hits:
if self.velocity.y != 0:
self.game.set_state(State.NEXT_LEVEL)
def jump(self):
if self.can_jump:
self.velocity.y = -JUMP_HEIGHT
self.can_jump = False
self.set_action("jump")
def set_action(self, action: str) -> None:
if self.action != action:
self.action = action
self.animation = self.assets["player/" + action].copy()
if self.game.sound_enabled:
sound = self.sounds.get("player/" + action)
if action == "jump":
self.game.channels["player_run"].pause()
self.game.channels["player"].play(sound)
self.game.channels["player"].set_volume(0.2)
elif action == "run":
self.game.channels["player_run"].unpause()
elif action == "idle":
self.game.channels["player_run"].pause()
else:
self.game.channels["player_run"].pause()
def hit(self):
self.health -= 20
if self.health <= 0:
self.is_alive = False
self.game.set_state(State.GAME_OVER)
| lsglucas/hurricane-in-hawaii | scripts/sprites/player.py | player.py | py | 4,496 | python | en | code | 0 | github-code | 90 |
17956236719 | import math
from typing import List, Counter, Tuple
from collections import Counter
from itertools import permutations
def read_int() -> int:
return int(input().strip())
def read_ints() -> List[int]:
return list(map(int, input().strip().split(' ')))
def solve() -> int:
N, M, R = read_ints()
r = [a-1 for a in read_ints()]
inf = 10**10
D = [
[inf for _ in range(N)] for _ in range(N)
]
for i in range(N):
D[i][i] = 0
for _ in range(M):
a, b, c = read_ints()
D[a-1][b-1] = D[b-1][a-1] = c
for k in range(N):
for i in range(N):
for j in range(N):
D[i][j] = min(D[i][j], D[i][k]+D[k][j])
min_cost: int = inf
for path in permutations(r):
cost: int = 0
for i in range(1, len(path)):
cost += D[path[i-1]][path[i]]
min_cost = min(min_cost, cost)
return min_cost
if __name__ == '__main__':
print(solve())
| Aasthaengg/IBMdataset | Python_codes/p03608/s281123310.py | s281123310.py | py | 965 | python | en | code | 0 | github-code | 90 |
32276575457 | import itertools
import os.path as path
import numpy as np
import pandas as pd
from src import constants
def load_feature_set(name):
file_path = path.join(constants.RAW_TAGGED_FEATURE_SET_PATH, 'msd-' + name + '/msd-' + name + '.csv')
whole = np.array(pd.read_csv(file_path, header=None))
return whole
base_file_path = path.join(constants.DATA_PATH, 'marsyas_base_split.csv')
meta_file_path = path.join(constants.DATA_PATH, 'marsyas_meta_split.csv')
ids_base = np.array(pd.read_csv(base_file_path, header=None).values[:, 0])
ids_meta = np.array(pd.read_csv(meta_file_path, header=None).values[:, 0])
dataset = load_feature_set('jmirmfccs_dev')
base_set = []
meta_set = []
for id in ids_base:
i = np.where(dataset[:, 0] == id)
base_set.append(dataset[i][0])
for id in ids_meta:
i = np.where(dataset[:, 0] == id)
meta_set.append(dataset[i][0])
base_set = pd.DataFrame(base_set)
meta_set = pd.DataFrame(meta_set)
base_set.to_csv(path_or_buf=path.join(constants.DATA_PATH, 'jmirmfccs_base_split.csv'), header=False, index=False)
meta_set.to_csv(path_or_buf=path.join(constants.DATA_PATH, 'jmirmfccs_meta_split.csv'), header=False, index=False) | EngineerLaroche/MusicTypeDetection | scripts/split_other_dataset.py | split_other_dataset.py | py | 1,179 | python | en | code | 0 | github-code | 90 |
348029338 | from parsimonious.nodes import Node
from parsimonious.grammar import Grammar, NodeVisitor
class Range:
start_open: bool
end_open: bool
val_type: str
precision: int
start: float
end: float
floor: bool
ceil: bool
def __init__(
self,
start_open: bool = False,
end_open: bool = False,
val_type: str = "int",
precision: int = 0,
start: float = 0,
end: float = 0,
floor: bool = False,
ceil: bool = False,
) -> None:
self.start_open = start_open
self.end_open = end_open
self.precision = precision
self.start = start
self.end = end
self.val_type = val_type
self.floor = floor
self.ceil = ceil
def check(self, item: float) -> bool:
# * case
if self.floor and self.ceil:
return True
# type and precision corrections
# use std mathematical rounding
if self.val_type == "int":
item = int(round(item, 0))
if self.precision > 0 and self.val_type == "float":
item = round(item, self.precision)
# lower bound
lower = self.floor or (
item > self.start if self.start_open else item >= self.start
)
# upper bound
upper = self.ceil or (item < self.end if self.end_open else item <= self.end)
return lower and upper
class RangeVisitor(NodeVisitor):
def visit_expr(self, node, visited_children):
p = Range()
item = visited_children[0]
# spot
if type(item) == Node and item.text.strip() == "*":
p.ceil = True
p.floor = True
elif type(item) == tuple:
p.start_open = False
p.end_open = False
p.val_type = "float" if item[1] else "int"
p.start = p.end = item[0]
else:
rg, tp = item
p.start_open = rg[0]
p.end_open = rg[3]
# check first spot
if type(rg[1]) == Node:
p.floor = True
else:
p.start = rg[1][0]
p.val_type = "float" if rg[1][1] else "int"
# check second spot
if type(rg[2]) == Node:
p.ceil = True
else:
p.end = rg[2][0]
p.val_type = "float" if rg[2][1] else "int"
# override type (if specified)
if type(tp) != Node:
p.val_type = tp[0][0]
p.precision = tp[0][1]
if p.val_type == "float" and p.precision > 0:
p.start = round(p.start, p.precision)
p.end = round(p.end, p.precision)
return p
def visit_range(self, node, visited_children):
op, n1, _, n2, cl = visited_children
return op, n1, n2, cl
def visit_range_open(self, node, visited_children):
return node.text.strip() == "("
def visit_range_close(self, node, visited_children):
return node.text.strip() == ")"
def visit_type_expr(self, node, visited_children):
_, _, _, t = visited_children
return t
def visit_spot(self, node, visited_children):
return visited_children[0]
def visit_type(self, node, visited_children):
return visited_children[0]
def visit_int(self, node, visited_children):
return "int", 0
def visit_float(self, node, visited_children):
_, p = visited_children
return "float", 0 if type(p) == Node else p[0]
def visit_paren(self, node, visited_children):
_, d, _ = visited_children
return d
def visit_digits(self, node, visited_children):
# inferred type is int
return int(node.text)
def visit_number(self, node, visited_childern):
# inferred type is "float" if "."
return float(node.text), "." in node.text
def generic_visit(self, node, visited_children):
return visited_children or node
class RangeParser:
def __init__(self):
_rangebnf = r"""
expr = spot / (range type_expr?)
range = range_open spot comma spot range_close
range_open = beg_open / beg_closed
range_close = end_open / end_closed
beg_open = ws? "(" ws?
end_open = ws? ")" ws?
beg_closed = ws? "[" ws?
end_closed = ws? "]" ws?
comma = ws? "," ws?
type_expr = ws? "->" ws? type
type = int / float
int = "int"
float = "float" paren?
paren = beg_open digits end_open
spot = number / "*"
ws = ~r"\s*"
digits = ~r"\d+"
number = ~r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?"
"""
self.grammar = Grammar(_rangebnf)
def parse(self, source: str) -> Range:
g = self.grammar.parse(source)
sv = RangeVisitor()
p = sv.visit(g)
return p
| sethjuarez/fibberio | fibberio/range.py | range.py | py | 5,099 | python | en | code | 5 | github-code | 90 |
657053692 | from ..locators.executive_secretary_locators import ExecutiveSecretaryLocators
from ..components.button import Button
from ..components.text_box import TextBox
class ExecutiveSecretaryPage(Button, TextBox):
def open_new_case_add_steps(self):
if self.is_element_present(*ExecutiveSecretaryLocators.MENU_CLAIM):
menu_link = self.browser.find_element(*ExecutiveSecretaryLocators.MENU_CLAIM)
menu_link.click()
if self.is_element_present(*ExecutiveSecretaryLocators.NEW_CLAIM_LINK):
new_claim_link = self.browser.find_element(*ExecutiveSecretaryLocators.NEW_CLAIM_LINK)
new_claim_link.click()
print('You can continue your test..')
else:
print('There is no NEW CLAIM LINK ...')
else:
print('You can not continue your test...')
def open_active_cases_list(self):
if self.is_element_present(*ExecutiveSecretaryLocators.MENU_CASES):
cases_link = self.browser.find_element(*ExecutiveSecretaryLocators.MENU_CASES)
cases_link.click()
if self.is_element_present(*ExecutiveSecretaryLocators.ACTIVE_CASES_LINK):
active_cases = self.browser.find_element(*ExecutiveSecretaryLocators.ACTIVE_CASES_LINK)
active_cases.click()
print('You can continue your test with active cases...')
else:
print('There is NO ACTIVE CASES link...')
else:
print('You can not continue your test... You do not have access to active cases!')
def open_active_case(self):
self.click_button(*ExecutiveSecretaryLocators.FIRST_CASE_EDIT_BTN, 'FIRST ACTIVE CASE EDIT') | gulida/mtc | pages/executive_secretary_page.py | executive_secretary_page.py | py | 1,723 | python | en | code | 0 | github-code | 90 |
18135963686 | # User-initiated helper script for parsing Ensembl FASTA Files to dataframes and saving
import pandas as pd
from Bio import SeqIO
def main(fasta_path):
data = []
records = [record for record in SeqIO.parse(fasta_path, "fasta")]
for record in records:
ensembl_protein_id = record.id
protein_seq = record.seq
description_elements = record.description.split(" ")
gene_name = ""
ensembl_transcript_id = ""
ensembl_gene_id = ""
for element in description_elements:
if "gene_symbol" in element:
gene_name = element.split(":",1)[1]
elif "gene:" in element:
ensembl_gene_id = element.split(":",1)[1]
elif "transcript:" in element:
ensembl_transcript_id = element.split(":",1)[1]
data.append([ensembl_protein_id, ensembl_transcript_id, ensembl_gene_id, gene_name, protein_seq])
cols = ["Ensembl_Protein_ID","Ensembl_Transcript_ID","Ensembl_Gene_ID","Gene_Name","Sequence"]
df = pd.DataFrame(data, columns=cols)
csv_path = fasta_path.split(".fa")[0] + ".csv"
df.to_csv(csv_path)
return df
if __name__ == "__main__":
fasta_path = input("Enter the Ensembl FASTA file path: ")
main(fasta_path) | noelgarber/PACM | general_utils/ensembl_fasta_parser.py | ensembl_fasta_parser.py | py | 1,310 | python | en | code | 0 | github-code | 90 |
18348847599 | N=int(input())
a=[[0 for j in range(N-1)] for i in range(N)]
for i in range(N):
line=list(map(int,input().split()))
for j in range(N-1):
a[i][j]=line[j]-1
a[i]=a[i][::-1]
stack=[]
def addmatch(i):
if len(a[i])==0:
return
j=a[i][-1]
if a[j][-1]==i:
stack.append([i,j])
for i in range(N):
addmatch(i)
day=0
while stack:
day+=1
member=set()
for i in range(len(stack)):
g=stack.pop()
y=g[0]
x=g[1]
if len(a[y])>0 and a[y][-1]==x:
a[y].pop()
a[x].pop()
member.add(y)
member.add(x)
for m in member:
addmatch(m)
for i in range(len(a)):
if len(a[i])>0:
print(-1)
break
else:
print(day) | Aasthaengg/IBMdataset | Python_codes/p02925/s644493706.py | s644493706.py | py | 676 | python | en | code | 0 | github-code | 90 |
15662277349 | import urlparse
import time
import datetime
class Throttle:
"""Add delay between two scrapy to same domain
"""
def __init__(self, delay):
self.delay = delay
self.domain = {}
def wait(self, url):
domain = urlparse.urlparse(url).netloc
lastVisistTime = self.domain.get(domain)
if self.delay > 0 and lastVisistTime is not None:
gap = (datetime.datetime.now() - lastVisistTime).seconds
if self.delay > gap:
time.sleep(self.delay - gap)
self.domain[domain] = datetime.datetime.now() | HelloWorldCAT/WebJobCrawler | Throttle.py | Throttle.py | py | 587 | python | en | code | 0 | github-code | 90 |
23362975164 | word = input()
liste = list(word) #fct list convertit la séquence en liste #liste=[word] :def une liste avec une seule variable
liste_inverse = liste[::-1]
while len(word) % 2 == 0: #si la longueur du mot est paire
for letter in range (len(liste)): #pour chaque lettre présente dans mon mot
if liste[letter] != liste_inverse[letter]:
print('Not palindrome')
break
else:
print('Palindrome')
break
else:
print('Not palindrome') | Stellupo/JetBrainsAcademyPython | PalidromeLetter.py | PalidromeLetter.py | py | 487 | python | fr | code | 0 | github-code | 90 |
36275045257 | # coding: utf-8
import tensorflow as tf
def create_adam_optimizer(learning_rate, momentum):
return tf.train.AdamOptimizer(learning_rate=learning_rate,
epsilon=1e-4)
def create_sgd_optimizer(learning_rate, momentum):
return tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=momentum)
def create_rmsprop_optimizer(learning_rate, momentum):
return tf.train.RMSPropOptimizer(learning_rate=learning_rate,
momentum=momentum,
epsilon=1e-5)
optimizer_factory = {'adam': create_adam_optimizer,
'sgd': create_sgd_optimizer,
'rmsprop': create_rmsprop_optimizer}
def mu_law_encode(audio, quantization_channels):
'''Quantizes waveform amplitudes.'''
with tf.name_scope('encode'):
mu = tf.to_float(quantization_channels - 1)
# Perform mu-law companding transformation (ITU-T, 1988).
# Minimum operation is here to deal with rare large amplitudes caused
# by resampling.
safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu) # tf.log1p(x) = log(1+x)
signal = tf.sign(audio) * magnitude
# Quantize signal to the specified number of levels.
return tf.to_int32((signal + 1) / 2 * mu + 0.5)
def mu_law_decode(output, quantization_channels, quantization=True):
'''Recovers waveform from quantized values.'''
with tf.name_scope('decode'):
mu = quantization_channels - 1
# Map values back to [-1, 1].
if quantization:
signal = 2 * (tf.to_float(output) / mu) - 1
else:
signal = output
# Perform inverse of mu-law transformation.
magnitude = (1 / mu) * ((1 + mu)**abs(signal) - 1)
return tf.sign(signal) * magnitude
| hccho2/Tacotron-Wavenet-Vocoder-Korean | wavenet/ops.py | ops.py | py | 1,985 | python | en | code | 162 | github-code | 90 |
18579186762 | import math
import numpy as np
"""
Finding point of intersection between line and circle: https://stackoverflow.com/questions/30844482/what-is-most-efficient-way-to-find-the-intersection-of-a-line-and-a-circle-in-py
Circle and line segment intersection: https://stackoverflow.com/questions/22747702/finding-x-and-y-axis-line-intercept-points-of-a-circle-python
Function: Find the intersection between a circle and a line segment
Input: Circle center and two points of line segment
"""
def lineSegmentCircleIntersection(circle, pt1, pt2):
x1, y1 = pt1[0], pt1[1]
x2, y2 = pt2[0], pt2[1]
try:
a = y2 - y1
b = -(x2 - x1)
c = y2 * (x2 - x1) - x2 * (y2 - y1)
except ZeroDivisionError:
return None
(ox, oy), size = circle
d = abs(a * ox + b * oy + c) / (math.sqrt(a * a + b * b))
if d <= (size):
return circle[0]
return None
"""
Function: Find intersection of ray with the workspace rectangle
2------c------3
| |
| |
b d
| |
| |
1------a------4
Here: vertex: (1,2,3,4)
sides : (a,b,c,d)
Inputs: rectangle, ray start point, direction of ray
Return: intersection Point, intersection Distance, intsection Rectangle Side
"""
def rayIntersectionRect(rectangle, rayOrigin, rayDirection):
#calculating rectangles 4 vertices
startpt, length_x, width_y = rectangle
p1 = (x1, y1) = (startpt[0], startpt[1])
p2 = (x2, y2) = (startpt[0], startpt[1] + width_y)
p3 = (x3, y3) = (startpt[0] + length_x, startpt[1] + width_y)
p4 = (x4, y4) = (startpt[0] + length_x, startpt[1])
vertexList = np.array([p1, p2, p3, p4], dtype=np.float)
# Find intersection with the workspace boundary
intersect_point = None
closest_side = None
euclid_distance = None
for i in range(len(vertexList)):
pt = rayIntersectionLineSegment(rayOrigin, rayDirection, vertexList[-1+i], vertexList[i])
if pt is not None:
dist = euclidDist(rayOrigin, pt)
if euclid_distance is None:
euclid_distance = dist
if dist <= euclid_distance:
intersect_point = pt
euclid_distance = dist
closest_side = i+1
return intersect_point, euclid_distance, closest_side
"""
Take care when the line segment and the ray are parallel to each other
Help site: https://gist.github.com/danieljfarrell/faf7c4cafd683db13cbc
https://rootllama.wordpress.com/2014/06/20/ray-line-segment-intersection-test-in-2d/
"""
def rayIntersectionLineSegment(rayOrigin, rayDirection, point1, point2):
# Convert to numpy arrays
rayOrigin = np.array(rayOrigin, dtype=np.float)
rayDirection = np.array(norm(rayDirection), dtype=np.float)
point1 = np.array(point1, dtype=np.float)
point2 = np.array(point2, dtype=np.float)
# Ray-Line Segment Intersection Test in 2D
v1 = rayOrigin - point1
v2 = point2 - point1
v3 = np.array([-rayDirection[1], rayDirection[0]])
if np.dot(v2, v3) == 0:
return None
t1 = np.cross(v2, v1) / np.dot(v2, v3)
t2 = np.dot(v1, v3) / np.dot(v2, v3)
if t1 >= 0.0 and t2 >= 0.0 and t2 <= 1.0:
return rayOrigin + t1 * rayDirection
return None
"""
Function: Define a range of angles expanding from : angle - 90 to angle + 90:
delta angle gives a field of view of angle in a discretized form which also sets the resolution of obstacle map
Input: Agent angular orientation (in degrees) and number of partitions of field of view
"""
def angleMap(angle_dir, num_of_partition):
del_angle = 180.0/num_of_partition
angle_map = np.zeros((6, 1))
start_angle= angle_dir - 90
stop_angle = angle_dir + 90 #180 degree relates to the human horizontal field of view
angle_map = [del_angle*i + angle_dir - 90 for i in range(num_of_partition + 1)]
angle_map = np.array(angle_map)
x = angle_map >= 360.0
angle_map[x] = angle_map[x] - 360.0
return angle_map
"""
Function: Get normalized form of a vector
Input: n-d vector
"""
def norm(vector):
return np.array(vector)/magnitude(np.array(vector))
"""
Function: Get magnitude of a n-d vector
Input: n-d vector
"""
def magnitude(vector):
return np.sqrt(np.dot(np.array(vector),np.array(vector)))
"""
Function: Convert angle to unit vector in 2D space
Input: Angle (in degrees)
"""
def angleToUnitVector(angle):
rad = angle*np.pi/180.0
return np.array([math.cos(rad), math.sin(rad)])
"""
Function: Convert a vector to angle in 2D space
Input: Unit Vector
"""
def vectorToAngle(vector):
x, y = vector
rad = math.atan2(y, x)
angle= rad*180.0/np.pi
return angle
"""
Calculate Euclidean Distance
Input: points - p0, p1
"""
def euclidDist(p0, p1):
dist = math.sqrt((p0[0]-p1[0])*(p0[0]-p1[0]) + (p0[1]-p1[1])*(p0[1]-p1[1]))
return dist
"""
Check if point lies on the line segment or Not
Inputs: Point - pt & Points - p0, p1
"""
def checkPointOnLine(pt, p0, p1):
tolerence = 0.001
del_dist = euclidDist(p0, pt) + euclidDist(pt, p1) - euclidDist(p0, p1)
if del_dist > tolerence:
return False
else:
return True | rahulsinghk998/Crowd-behaviour-modelling | mathHelper.py | mathHelper.py | py | 5,236 | python | en | code | 0 | github-code | 90 |
35281066047 | import torch
import pandas as pd
import numpy as np
import sys
import copy
from tqdm import tqdm
import torch.nn as nn
from sklearn.metrics import confusion_matrix
from models.losses import LogitAdjustLoss, FocalLoss, CrossEntropyLoss, instance_weighted_loss, DiscriminativeLoss
from utils.help_functions import Voting, compute_metrics_from_confusion_matrix, set_seeds
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
import mlflow
import mlflow.pytorch
class TrainModule():
def __init__(self, cfg, model, train_loader, val_loader, loss_func, use_instance_weight=True,
posthoc_adjustment=False):
self._cfg = cfg
self._model = model
self._train_loader = train_loader
self._val_loader = val_loader
self._posthoc_adjustment = posthoc_adjustment
# self._losses = {
# loss['NAME']: getattr(sys.modules[__name__], loss['NAME'])(**loss.get('ARGS', {}))
# for loss in cfg.LOSSES
# }
# self._loss = self._losses[cfg.MODEL.LOSS_FUNC]
# self._loss_feat = DiscriminativeLoss(delta_var=0.5, delta_dist=5)
self._loss = loss_func
self._use_instance_weight = use_instance_weight
def train(self, validate_interval=1, verbose=10):
set_seeds(self._cfg.SEED)
self._model = self._model.to(self._cfg.DEVICE)
optimizer, scheduler = self.configure_optimizers()
best_val_loss=1e5
best_val_UAR = -1e5
best_model_wts=None
early = 0
######### Start ##############################################################
print('START TRAINING...')
for epoch in range(0, self._cfg.TRAIN_ARGS.NUM_EPOCHS):
# Cyclical Learning Rate
if epoch % self._cfg.TRAIN_ARGS.CYCLICAL_EPOCHS == 0:
optimizer, scheduler = self.configure_optimizers()
# Early Stopping
if (self._cfg.TRAIN_ARGS.EARLY_STOPPING_PATIENCE > 0) and (early >= self._cfg.TRAIN_ARGS.EARLY_STOPPING_PATIENCE):
break
#### training ########################################################
self._model.train()
loss_train = 0
num_samples = 0
matrix_train = np.zeros((self._cfg.MODEL.NUM_CLASSES,self._cfg.MODEL.NUM_CLASSES))
for batch_idx, (batch_samples, ins_weights) in enumerate(self._train_loader):
ecg, label = batch_samples['ecg'].to(self._cfg.DEVICE, dtype=torch.float32), batch_samples['label'].to(self._cfg.DEVICE)
label = label.squeeze()
optimizer.zero_grad()
pred = self._model(ecg)
num_classes=pred.size(1)
if self._use_instance_weight:
loss = self._loss(pred, label, ins_weights)
else:
loss = self._loss(pred, label)
loss.backward()
optimizer.step()
# loss
loss_train += loss.item()*len(label)
num_samples += len(label)
# recall
matrix_train += confusion_matrix(label.reshape(1, -1).squeeze().cpu().numpy(),
torch.argmax(pred, dim=1).reshape(1, -1).squeeze().cpu().numpy(),
labels=range(self._cfg.MODEL.NUM_CLASSES))
loss_train = loss_train / num_samples
UAR_train, acc_train, metrics_train = compute_metrics_from_confusion_matrix(matrix_train)
scheduler.step()
mlflow.log_metric(f"train_loss", loss_train, step=epoch)
mlflow.log_metric(f"train_uar", UAR_train, step=epoch)
mlflow.log_metric(f"train_acc", acc_train, step=epoch)
for _i in range(len(metrics_train['recall'])):
mlflow.log_metric(f"train_recall_{_i}", metrics_train['recall'][_i], step=epoch)
mlflow.log_metric("lr", optimizer.param_groups[0]['lr'], step=epoch)
if epoch % verbose == 0:
print('Training\tEpoch: {}\tLoss: {:.3f}\tUAR: {:.3f}\t{} subjects'.format(
epoch, loss_train, UAR_train, num_samples))
del pred, label, batch_samples, num_samples
############ Validation #################################################################################################################
if epoch % validate_interval == 0:
self._model.eval()
loss_val = 0
num_samples = 0
matrix_val = np.zeros((self._cfg.MODEL.NUM_CLASSES,self._cfg.MODEL.NUM_CLASSES))
logit_0 = torch.zeros(num_classes)
logit_1 = torch.zeros(num_classes)
num_0 = 0
num_1 = 0
logit_0_orig = torch.zeros(num_classes)
logit_1_orig = torch.zeros(num_classes)
with torch.no_grad():
for batch_idx, (batch_samples, ins_weights) in enumerate(self._val_loader):
ecg,label= batch_samples['ecg'].to(self._cfg.DEVICE,dtype=torch.float32),batch_samples['label'].to(self._cfg.DEVICE)
label = label.squeeze()
optimizer.zero_grad()
pred= self._model(ecg)
if self._use_instance_weight:
loss = self._loss(pred, label, ins_weights)
else:
loss = self._loss(pred, label)
loss_val += loss.item()*len(label)
if self._posthoc_adjustment:
base_probs = torch.tensor([0.9, 0.1])
tau = torch.tensor(1.0)
pred_orig = pred
pred = pred - torch.log(torch.Tensor(base_probs**tau + 1e-12).to(self._cfg.DEVICE,dtype=torch.float32))
matrix_val_orig = matrix_val.copy()
matrix_val_orig += confusion_matrix(label.reshape(1, -1).squeeze().cpu().numpy(),
torch.argmax(pred_orig, dim=1).reshape(1, -1).squeeze().cpu().numpy(),
labels=range(self._cfg.MODEL.NUM_CLASSES))
logit_0_orig += pred_orig[label==0].sum(dim=0)
logit_1_orig += pred_orig[label==1].sum(dim=0)
num_samples += len(label)
matrix_val += confusion_matrix(label.reshape(1, -1).squeeze().cpu().numpy(),
torch.argmax(pred, dim=1).reshape(1, -1).squeeze().cpu().numpy(),
labels=range(self._cfg.MODEL.NUM_CLASSES))
logit_0 += pred[label==0].sum(dim=0)
logit_1 += pred[label==1].sum(dim=0)
num_0 += len(label==0)
num_1 += len(label==1)
# validation loss
loss_val = loss_val / num_samples
UAR_val, acc_val, metrics_val = compute_metrics_from_confusion_matrix(matrix_val)
# UAR
mlflow.log_metric(f"neg_logit", (logit_0[0]-logit_0[1])/num_0, step=epoch)
mlflow.log_metric(f"pos_logit", (logit_1[0]-logit_1[1])/num_1, step=epoch)
mlflow.log_metric(f"neg_logit_orig", (logit_0_orig[0]-logit_0_orig[1])/num_0, step=epoch)
mlflow.log_metric(f"pos_logit_orig", (logit_1_orig[0]-logit_1_orig[1])/num_1, step=epoch)
mlflow.log_metric(f"val_loss", loss_val, step=epoch)
mlflow.log_metric(f"val_uar", UAR_val, step=epoch)
mlflow.log_metric(f"val_acc", acc_val, step=epoch)
for _i in range(len(metrics_val['recall'])):
mlflow.log_metric(f"val_recall_{_i}", metrics_val['recall'][_i], step=epoch)
if self._posthoc_adjustment:
UAR_val_orig, acc_val_orig, metrics_val_orig = compute_metrics_from_confusion_matrix(matrix_val_orig)
mlflow.log_metric(f"val_uar_origin", UAR_val_orig, step=epoch)
mlflow.log_metric(f"val_acc_origin", acc_val_orig, step=epoch)
for _i in range(len(metrics_val_orig['recall'])):
mlflow.log_metric(f"val_origin_recall_{_i}", metrics_val_orig['recall'][_i], step=epoch)
################################################################################
if (epoch > 5) and (loss_val <= best_val_loss):
best_model_wts = copy.deepcopy(self._model.state_dict())
best_val_loss = loss_val
best_val_UAR = UAR_val
best_val_acc = acc_val
best_metrics = metrics_val['recall']
best_matrix = matrix_val
best_train_UAR = UAR_train
best_epoch = epoch
early = 0
else:
early += 1
if epoch % verbose == 0:
print('Validate\tEpoch: {}\tLoss: {:.3f}\tUAR: {:.3f}\tEarly: {}\t{} subjects'.format(
epoch, loss_val, UAR_val, early, num_samples),'\n')
del pred, label, batch_samples
print('\nFinished TRAINING.')
self._model.load_state_dict(best_model_wts)
# mlflow.pytorch.log_state_dict(
# best_model_wts, artifact_path="epoch_{}-uar_{:.2f}".format(best_epoch, best_val_UAR*100)
# )
results = {'best_val_uar': np.round(best_val_UAR,3),
'best_val_acc': np.round(best_val_acc,3),
'epoch_end': best_epoch,
}
mlflow.log_params(results)
print('Epoch: {}\tVal UAR: {:.3f}\tTrain UAR: {:.3f}'.format(
best_epoch, best_val_UAR, best_train_UAR),'\n')
return self._model, best_val_UAR, best_matrix
def test(self, test_loader):
self._model.eval()
loss_test = 0
num_samples = 0
pred_all = [None]*len(test_loader)
label_all = [None]*len(test_loader)
pred_orig_all = [None]*len(test_loader)
matrix_test = np.zeros((self._cfg.MODEL.NUM_CLASSES,self._cfg.MODEL.NUM_CLASSES))
with torch.no_grad():
for batch_idx, (batch_samples, ins_weights) in tqdm(enumerate(test_loader)):
ecg,label= batch_samples['ecg'].to(self._cfg.DEVICE,dtype=torch.float32),batch_samples['label'].to(self._cfg.DEVICE)
pred= self._model(ecg)
if self._use_instance_weight:
loss = self._loss(pred, label, ins_weights)
else:
loss = self._loss(pred, label)
loss_test += loss.item()
if self._posthoc_adjustment:
base_probs = torch.tensor([0.9, 0.1])
tau = torch.tensor(1.0)
pred_orig = pred
pred = pred - torch.log(torch.Tensor(base_probs**tau + 1e-12).to(self._cfg.DEVICE,dtype=torch.float32))
pred_orig_all[batch_idx] = torch.argmax(pred_orig).cpu().tolist()
# matrix_test_orig = matrix_test.copy()
# matrix_test_orig += confusion_matrix(label.reshape(1, -1).squeeze().cpu().numpy(),
# torch.argmax(pred_orig, dim=1).reshape(1, -1).squeeze().cpu().numpy(),
# labels=range(self._cfg.MODEL.NUM_CLASSES))
num_samples += 1
pred_all[batch_idx] = torch.argmax(pred).cpu().tolist()
label_all[batch_idx] = label.squeeze().cpu().tolist()
# matrix_test += confusion_matrix(label.reshape(1, -1).squeeze().cpu().numpy(),
# torch.argmax(pred).reshape(1, -1).squeeze().cpu().numpy(),
# labels=range(self._cfg.MODEL.NUM_CLASSES))
# test loss
loss_test = loss_test / num_samples
matrix_test = confusion_matrix(np.asarray(label_all),
np.asarray(pred_all),
labels=range(self._cfg.MODEL.NUM_CLASSES))
UAR_test, acc_test, metrics_test, fig = compute_metrics_from_confusion_matrix(matrix_test, visualize=True)
results = {'test_uar': np.round(UAR_test,3),
'test_acc': np.round(acc_test,3),
'test_loss': np.round(loss_test,3),
}
if self._posthoc_adjustment:
matrix_test_orig = confusion_matrix(np.asarray(label_all),
np.asarray(pred_orig_all),
labels=range(self._cfg.MODEL.NUM_CLASSES))
UAR_test_orig, acc_test_orig, metrics_test_orig, fig_2 = compute_metrics_from_confusion_matrix(matrix_test_orig, visualize=True)
results['original_test_uar']=np.round(UAR_test_orig,3)
results['original_test_acc']=np.round(acc_test_orig,3)
mlflow.log_figure(fig_2, "original_test_confusion_matrix.png")
mlflow.log_params(results)
mlflow.log_figure(fig, "test_confusion_matrix.png")
return pred_all, label_all
def configure_optimizers(self):
if self._cfg.TRAIN_ARGS.OPTIMIZER == 'SGD':
optimizer = torch.optim.SGD(
self._model.parameters(),
lr=self._cfg.TRAIN_ARGS.BASE_LR,
momentum=0.9,
weight_decay=self._cfg.TRAIN_ARGS.WEIGHT_DECAY)
elif self._cfg.TRAIN_ARGS.OPTIMIZER == 'AdamW':
optimizer = torch.optim.AdamW(
self._model.parameters(),
lr=self._cfg.TRAIN_ARGS.BASE_LR,
weight_decay=self._cfg.TRAIN_ARGS.WEIGHT_DECAY
)
elif self._cfg.TRAIN_ARGS.OPTIMIZER == 'Adam':
optimizer = torch.optim.Adam(
self._model.parameters(),
lr=self._cfg.TRAIN_ARGS.BASE_LR,
weight_decay=self._cfg.TRAIN_ARGS.WEIGHT_DECAY
)
else:
raise RuntimeError(f'Unsupported Optimizer {self._cfg.TRAIN_ARGS.OPTIMIZER}')
if self._cfg.TRAIN_ARGS.LR_SCHEDULER == 'ReduceLROnPlateau':
scheduler = {
'scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode='min',
factor=self._cfg.TRAIN_ARGS.LR_SCHEDULER_FACTOR,
patience=self._cfg.TRAIN_ARGS.LR_SCHEDULER_PATIENCE,
min_lr=self._cfg.TRAIN_ARGS.MIN_LR,
verbose=True,
),
'monitor': 'val_loss',
}
elif self._cfg.TRAIN_ARGS.LR_SCHEDULER == 'CosineAnnealingLR':
scheduler = {
'scheduler': torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=self._cfg.TRAIN_ARGS.MAX_EPOCHS,
)
}
elif self._cfg.TRAIN_ARGS.LR_SCHEDULER == 'CosineAnnealingWarmRestarts':
scheduler = {
'scheduler': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer,
T_0=self._cfg.TRAIN_ARGS.WARM_RESTART_EPOCH
)
}
elif self._cfg.TRAIN_ARGS.LR_SCHEDULER == 'LinearWarmupCosineAnnealingLR':
scheduler = {
'scheduler': LinearWarmupCosineAnnealingLR(
optimizer,
warmup_epochs=self._cfg.TRAIN_ARGS.WARM_UP_EPOCH,
max_epochs=self._cfg.TRAIN_ARGS.MAX_EPOCHS,
warmup_start_lr=self._cfg.TRAIN_ARGS.MIN_LR,
eta_min=self._cfg.TRAIN_ARGS.MIN_LR
)
}
elif self._cfg.TRAIN_ARGS.LR_SCHEDULER == 'StepLR':
scheduler = {
'scheduler': torch.optim.lr_scheduler.StepLR(
optimizer,
step_size=self._cfg.TRAIN_ARGS.LR_SCHEDULER_PATIENCE,
gamma=self._cfg.TRAIN_ARGS.LR_SCHEDULER_FACTOR
)
}
else:
raise RuntimeError(f'Unsupported LR scheduler {self._cfg.TRAIN_ARGS.LR_SCHEDULER}')
return optimizer, scheduler['scheduler'] | zili98/ELEC576-Deep-Learning-Final-Project | src/train_function.py | train_function.py | py | 16,799 | python | en | code | 0 | github-code | 90 |
18476969609 | class PrimeFactor():
def __init__(self, n):
"""
エラトステネス O(N loglog N)
"""
self.n = n
self.table = list(range(n+1)) # 最小素因数のリスト
self.table[2::2] = [2]*(n//2)
for p in range(3, int(n**0.5) + 2, 2):
if self.table[p] == p:
for q in range(p * p, n + 1, 2 * p):
if self.table[q] == q:
self.table[q] = p
def is_prime(self, x):
""" 素数判定 O(1) """
if x < 2:
return False
return self.table[x] == x
def prime_factors(self, x):
""" 素因数分解 O(logN) (試し割りだとO(sqrt(N))) """
res = []
if x < 2:
return res
while self.table[x] != 1:
res.append(self.table[x])
x //= self.table[x]
return res
def prime_counter(self, x):
""" 素因数分解(個数のリスト) O(logN) """
res = dict()
if x < 2:
return res
while self.table[x] != 1:
res[self.table[x]] = res.get(self.table[x], 0) + 1
x //= self.table[x]
return res
#################################################################
N = int(input())
P = PrimeFactor(N)
Q = dict()
for i in range(1,N+1):
for key, value in P.prime_counter(i).items():
Q[key] = Q.get(key,0) + value
a, b, c, d, e = 0, 0, 0, 0, 0
for value in Q.values():
if value >= 2: a += 1
if value >= 4: b += 1
if value >= 24: c += 1
if value >= 14: d += 1
if value >= 74: e += 1
print(b*(b-1)//2*(a-2) + c*(a-1) + d*(b-1) + e) | Aasthaengg/IBMdataset | Python_codes/p03213/s464282704.py | s464282704.py | py | 1,664 | python | en | code | 0 | github-code | 90 |
20857985297 | #!/usr/bin/env python
from __future__ import print_function
from six.moves import input
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from pkg_vb_sim.srv import *
def all_close(goal, actual, tolerance):
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
#To activate gripper in gazebo(client in pkg_vb_sim):-
def activate_gripper_client(activate_vacuum_gripper):
rospy.wait_for_service('/eyrc/vb/ur5_1/activate_vacuum_gripper')
try:
activate_gripper = rospy.ServiceProxy('/eyrc/vb/ur5_1/activate_vacuum_gripper', vacuumGripper)
resp1 = activate_gripper(activate_vacuum_gripper)
print("activate_vacuum_gripper:" + str(activate_vacuum_gripper) + " in gazebo")
return resp1.result
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def usage():
return "[]"
class MoveGroupPythonIntefaceTutorial(object):
def __init__(self):
super(MoveGroupPythonIntefaceTutorial, self).__init__()
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('node_t2_ur5_1_pick_place', anonymous=True)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group_name = "ur5_1_planning_group"
move_group = moveit_commander.MoveGroupCommander(group_name)
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory,
queue_size=20)
planning_frame = move_group.get_planning_frame()
print("============ Planning frame: %s" % planning_frame)
# We can also print the name of the end-effector link for this group:
eef_link = move_group.get_end_effector_link()
print("============ End effector link: %s" % eef_link)
# We can get a list of all the groups in the robot:
group_names = robot.get_group_names()
print("============ Available Planning Groups:", robot.get_group_names())
# Sometimes for debugging it is useful to print the entire state of the
# robot:
print("============ Printing robot state ===========")
print(robot.get_current_state())
print("")
# Misc variables
self.box_name = 'box'
self.robot = robot
self.scene = scene
self.move_group = move_group
self.display_trajectory_publisher = display_trajectory_publisher
self.planning_frame = planning_frame
self.eef_link = eef_link
self.group_names = group_names
def go_to_pose_goal(self, arg_pose):
move_group = self.move_group
move_group.set_pose_target(arg_pose)
plan = move_group.go(wait=True)
if (plan == True):
rospy.loginfo(
'\033[94m' + ">>> go_to_pose() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> go_to_pose() Failed. Solution for Pose not Found." + '\033[0m')
# Calling `stop()` ensures that there is no residual movement
move_group.stop()
move_group.clear_pose_targets()
current_pose = self.move_group.get_current_pose().pose
return all_close(arg_pose, current_pose, 0.01)
def wait_for_state_update(self, box_is_known=False, box_is_attached=False, timeout=4):
box_name = self.box_name
scene = self.scene
start = rospy.get_time()
seconds = rospy.get_time()
while (seconds - start < timeout) and not rospy.is_shutdown():
attached_objects = scene.get_attached_objects([box_name])
is_attached = len(attached_objects.keys()) > 0
is_known = box_name in scene.get_known_object_names()
if (box_is_attached == is_attached) and (box_is_known == is_known):
return True
# Sleep so that we give other threads time on the processor
rospy.sleep(0.1)
seconds = rospy.get_time()
return False
def add_box(self, timeout=4):
box_name = self.box_name
scene = self.scene
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "world"
box_pose.pose.orientation.w = 1.0
box_pose.pose.position.x = 0.05 # 0.09
box_pose.pose.position.y = 0.48 # 0.43
box_pose.pose.position.z = 1.84
box_name = "box"
scene.add_box(box_name, box_pose, size=(0.15, 0.15, 0.15))
self.box_name=box_name
return self.wait_for_state_update(box_is_known=True, timeout=timeout)
def attach_box(self, timeout=4):
box_name = self.box_name
robot = self.robot
scene = self.scene
eef_link = self.eef_link
group_names = self.group_names
grasping_group = 'ur5_1_planning_group'
touch_links = robot.get_link_names(group=grasping_group)
scene.attach_box(eef_link, box_name, touch_links=touch_links)
return self.wait_for_state_update(box_is_attached=True, box_is_known=False, timeout=timeout)
def detach_box(self, timeout=4):
box_name = self.box_name
scene = self.scene
eef_link = self.eef_link
scene.remove_attached_object(eef_link, name=box_name)
return self.wait_for_state_update(box_is_known=True, box_is_attached=False, timeout=timeout)
def remove_box(self, timeout=4):
box_name = self.box_name
scene = self.scene
scene.remove_world_object(box_name)
return self.wait_for_state_update(box_is_attached=False, box_is_known=False, timeout=timeout)
def main():
try:
print("")
print("----------------------------------------------------------")
print("Welcome to Task2")
print("----------------------------------------------------------")
print("Press Ctrl-D to exit at any time")
print("")
tutorial = MoveGroupPythonIntefaceTutorial()
# tutorial.go_to_pose_goal()
tutorial.add_box()
# intermediate locations
# (for avoiding collision and "go_to_pose() Failed. Solution for Pose not Found.")
# GotoBox -pos max hieght attaining position
ur5_pose_1 = geometry_msgs.msg.Pose()
ur5_pose_1.position.x = 0.000822714776654
ur5_pose_1.position.y = 0.10915010937
ur5_pose_1.position.z = 1.95105858432
ur5_pose_1.orientation.x = 1.32843786881e-07
ur5_pose_1.orientation.y = 0.00164148791415
ur5_pose_1.orientation.z = 0.000210623104507
ur5_pose_1.orientation.w = 2.12177767514e-09
# Frustum penetrating box
ur5_pose_2 = geometry_msgs.msg.Pose()
ur5_pose_2.position.x = 0.0533460477845
ur5_pose_2.position.y = 0.259171751739
ur5_pose_2.position.z = 1.9143211227
ur5_pose_2.orientation.x = 9.7605292128e-06
ur5_pose_2.orientation.y = 0.00166394819707
ur5_pose_2.orientation.z = -7.42958421915e-05
ur5_pose_2.orientation.w = 1.0
print("Going to max height attaining pose:- ")
tutorial.go_to_pose_goal(ur5_pose_1)
rospy.sleep(2)
print("Going to pick box pose:- ")
tutorial.go_to_pose_goal(ur5_pose_2)
rospy.sleep(6)
tutorial.attach_box()
print("Box attached in rviz successfully!")
if len(sys.argv) == 1:
activate_vacuum_gripper = True
else:
print(usage())
sys.exit(1)
gazebo_gripper_activate = activate_gripper_client(activate_vacuum_gripper)
print(gazebo_gripper_activate)
rospy.sleep(1)
# intermediate drop locations
# (for avoiding collision and "go_to_pose() Failed. Solution for Pose not Found.")
ur5_pose_before_drop_1 = geometry_msgs.msg.Pose()
ur5_pose_before_drop_1.position.x = -0.0901454549566
ur5_pose_before_drop_1.position.y = -0.062274347901
ur5_pose_before_drop_1.position.z = 1.81182607816
ur5_pose_before_drop_1.orientation.x = -7.70672480231e-05
ur5_pose_before_drop_1.orientation.y = 0.00155566570423
ur5_pose_before_drop_1.orientation.z = 6.51110000322e-05
ur5_pose_before_drop_1.orientation.w = 2.12177767514e-09
ur5_pose_before_drop_2 = geometry_msgs.msg.Pose()
ur5_pose_before_drop_2.position.x = -0.149408652019
ur5_pose_before_drop_2.position.y = -0.221361969977
ur5_pose_before_drop_2.position.z = 1.8796851007
ur5_pose_before_drop_2.orientation.x = 5.5688246173e-05
ur5_pose_before_drop_2.orientation.y = 0.00150812428518
ur5_pose_before_drop_2.orientation.z = -0.000126352428789
ur5_pose_before_drop_2.orientation.w = 2.12177767514e-09
ur5_pose_before_drop_3 = geometry_msgs.msg.Pose()
ur5_pose_before_drop_3.position.x = -0.284998169562
ur5_pose_before_drop_3.position.y = -0.25530143817
ur5_pose_before_drop_3.position.z = 1.8635091385
ur5_pose_before_drop_3.orientation.x = 5.83672567794e-05
ur5_pose_before_drop_3.orientation.y = 0.00148177354145
ur5_pose_before_drop_3.orientation.z = 0.000168866917652
ur5_pose_before_drop_3.orientation.w = 2.12177767514e-09
#GOTOBucket(drop) loc - final drop location
ur5_pose_final_drop_loc = geometry_msgs.msg.Pose()
ur5_pose_final_drop_loc.position.x = -0.666272224105
ur5_pose_final_drop_loc.position.y = -0.241918009411
ur5_pose_final_drop_loc.position.z = 1.00553602213
ur5_pose_final_drop_loc.orientation.x = -3.30905775786e-05
ur5_pose_final_drop_loc.orientation.y = 0.001610721457
ur5_pose_final_drop_loc.orientation.z = 3.13001701172e-05
ur5_pose_final_drop_loc.orientation.w = 2.12177767514e-09
print("After attaching box going to dropping pose:- ")
print("Going to intermediate poses:- ")
tutorial.go_to_pose_goal(ur5_pose_before_drop_1)
tutorial.go_to_pose_goal(ur5_pose_before_drop_2)
tutorial.go_to_pose_goal(ur5_pose_before_drop_3)
print("Going to final drop location pose")
tutorial.go_to_pose_goal(ur5_pose_final_drop_loc)
rospy.sleep(6)
tutorial.detach_box()
print("Box is dropped successfully in rviz!")
if len(sys.argv) == 1:
activate_vacuum_gripper = False
else:
print(usage())
sys.exit(1)
gazebo_gripper_activate = activate_gripper_client(activate_vacuum_gripper)
print(gazebo_gripper_activate)
rospy.sleep(1)
print("After dropping box going to initial pose(allZeros position):- ")
print("Going to intermediate pose(straightUp):- ")
# Intermediate pose
# (for avoiding collision and "go_to_pose() Failed. Solution for Pose not Found.")
ur5_pose_after_drop_1 = geometry_msgs.msg.Pose()
ur5_pose_after_drop_1.position.x = 0.0953699822384
ur5_pose_after_drop_1.position.y = 0.10912919735
ur5_pose_after_drop_1.position.z = 1.85633325896
ur5_pose_after_drop_1.orientation.x = 3.06336175362
ur5_pose_after_drop_1.orientation.y = 1.56998443354
ur5_pose_after_drop_1.orientation.z = 3.06314357573
ur5_pose_after_drop_1.orientation.w = 2.12177767514e-09
print("Now going to allZeros pose:- ")
ur5_pose_after_drop_2 = geometry_msgs.msg.Pose()
ur5_pose_after_drop_2.position.x = 0.817313113173
ur5_pose_after_drop_2.position.y = 0.108761433027
ur5_pose_after_drop_2.position.z = 0.944579923819
ur5_pose_after_drop_2.orientation.x = -3.14159265074
ur5_pose_after_drop_2.orientation.y = 0.000155989015405
ur5_pose_after_drop_2.orientation.z = 3.1411410382
ur5_pose_after_drop_2.orientation.w = 2.12177767514e-09
tutorial.go_to_pose_goal(ur5_pose_after_drop_1)
tutorial.go_to_pose_goal(ur5_pose_after_drop_2)
tutorial.remove_box()
print("================== Task2 execution completed! =======================")
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
main()
| hi-18-K/inventory_simulation | Task2/pkg_task2/scripts/node_t2_ur5_1_pick_place.py | node_t2_ur5_1_pick_place.py | py | 11,910 | python | en | code | 0 | github-code | 90 |
17863143812 | import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main():
mnist = input_data.read_data_sets('/mnist', one_hot=True)
I = 784
L1 = 300
O = 10
INPUT = tf.placeholder(tf.float32, [None, I])
TARGET = tf.placeholder(tf.float32, [None, O])
WEIGHT1 = weight_variable([I, L1])
BIAS1 = bias_variable([L1])
WEIGHT2 = weight_variable([L1, O])
BIAS2 = bias_variable([O])
LAYER1 = tf.nn.relu(tf.matmul(INPUT, WEIGHT1) + BIAS1)
OUTPUT = tf.matmul(LAYER1, WEIGHT2) + BIAS2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=TARGET, logits=OUTPUT))
train_step = tf.train.MomentumOptimizer(0.75, 0.1).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={INPUT: batch_xs, TARGET: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(OUTPUT, 1), tf.argmax(TARGET, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={INPUT: mnist.test.images, TARGET: mnist.test.labels}))
if __name__ == "__main__":
main()
| Alyndre/DeepLearningPython | TensorFlow/TF_mnist.py | TF_mnist.py | py | 1,524 | python | en | code | 0 | github-code | 90 |
18550850459 | #11208467
t="abcdefghijklmnopqrstuvwxyz"
s=input()
if s==t[::-1]:print(-1);exit()
if len(s)!=26:
for i in t:
if i not in s:print(s+i);exit()
i=25
while s[i-1]>s[i]:i-=1
tt=s[i-1]
ss=list(s[i-1:])
ss.sort()
print(s[:i-1]+ss[ss.index(tt)+1]) | Aasthaengg/IBMdataset | Python_codes/p03393/s991491343.py | s991491343.py | py | 245 | python | en | code | 0 | github-code | 90 |
73059217576 | import json
import hw1.morph as morph
from gensim.models import KeyedVectors
import numpy as np
LIMIT = 300
input1 = "example_texts.json"
input2 = "dataset_43428_1.txt"
xml_dict = '../hw1/dict.opcorpora.xml'
xml_corpus = '../hw1/annot.opcorpora.no_ambig.xml'
middle_punctuation_remover = str.maketrans({key: None for key in [',', ':', ';', '«', '»', '-', '(', ')', '—', '%', '&', '*', '^', '$', '#']})
end_punctuation = ['.', '?', '!']
end_punctuation_remover = str.maketrans({key: None for key in end_punctuation})
anafors = ['этот', 'это', 'эта', 'эту', 'этих', 'этого', 'этим', 'этой', 'этими', 'этом', 'эти']
anafors_punctuation = ['-', '—']
silly_words = ['']
with open("../hw2/stop-words.txt") as f:
for line in f:
silly_words.append(line.strip())
def build_dictionary(text):
text = text.translate(end_punctuation_remover)
dict = {}
words = text.split(" ")
for word in words:
if word in silly_words or word.isdigit() or len(word) < 3:
continue
word, _ = morph.choose_lemma(word, -1)
if dict.get(word) is None:
dict[word] = 1
else:
dict[word] += 1
return dict
def build_bigrams(text):
text = text.translate(end_punctuation_remover)
bigrams = {}
words = text.split(" ")
prev = ''
for word in words:
if prev == '' or word in silly_words:
continue
word, _ = morph.choose_lemma(word, -1)
pair = (prev, word) if prev < word else (word, prev)
if bigrams.get(pair) is None:
bigrams[pair] = 1
else:
bigrams[pair] += 1
prev = word
return bigrams
def get_sentence_end(text, start):
for i in range(start, len(text)):
if text[i] in end_punctuation:
return i
return -1
def get_sentences(text):
sentences = []
start = 0
while start != -1:
end = get_sentence_end(text, start)
if end != -1:
if text[end] == '?':
start = end + 1
continue
end += 1
sentence = text[start:end].strip()
else:
sentence = text[start:].strip()
start = end
if len(sentence) < 3:
continue
sentences.append(sentence)
return sentences
def preproocess_text(text):
text = text.strip().replace("\n", ". ")
text = ' '.join(text.split())
text = text.translate(middle_punctuation_remover)
return text
def find_anafor(words):
for word in words:
if word in anafors:
return word
return None
'''def make_anafored_sentence(anafor, sentence, weight, sentence_prev, weight_prev):
flag = False
for punct in anafors_punctuation:
if sentence.find(anafor) > sentence.find(punct) != -1:
flag = True
break
if flag
return sentence_prev + sentence, (weight_prev + weight) / 2.'''
def get_closest_words(model, dict, top=3):
print("finished")
closest = {}
for word1 in dict.keys():
if not (word1 in model.vocab):
continue
words = dict.keys()
weights = []
for word2 in words:
if not (word2 in model.vocab):
continue
weights.append(model.similarity(word1, word2))
_, words = zip(*sorted(zip(weights, words), reverse=True))
closest[word1] = words[1:top + 1]
return closest
def build_with_frequencies(model, text):
text = preproocess_text(text)
dict = build_dictionary(text)
closest_dict = get_closest_words(model, dict, 3)
bigrams = build_bigrams(text)
sentences = get_sentences(text)
if len(sentences) == 0:
return text[:LIMIT]
weights = []
for sentence in sentences:
weight = 0.
words = sentence.translate(end_punctuation_remover).split(" ")
if len(words) < 4 or not (find_anafor(words[:4]) is None):
weights.append(0)
continue
prev = ''
for word in words:
word, _ = morph.choose_lemma(word, -1)
if dict.get(word) is None:
continue
weight += dict[word]
closest = closest_dict.get(word)
if not (closest is None):
for word2 in closest:
weight += dict[word2] / 3.
pair = (prev, word) if prev < word else (word, prev)
if bigrams.get(pair) is None:
continue
weight += bigrams[pair]
weight = weight * 1. / len(sentence)
# weight = weight + 1. / 3. if sent_len <= text_len / 3. else weight
weights.append(weight)
for i in range(len(weights) // 3):
weights[i] += 1./2.
weights, sentences = zip(*sorted(zip(weights, sentences), reverse=True))
length = 0
result = ""
for i in range(len(sentences)):
result += sentences[i]
length += len(sentences[i])
if length >= LIMIT:
break
return result
def build_with_beginning(text):
text = text.strip().replace("\n", " ")
return text[:LIMIT].strip()
def main():
morph.read_lemmas(xml_dict)
morph.read_forms(xml_dict)
morph.read_corpus(xml_corpus)
model = KeyedVectors.load_word2vec_format('/home/katyakos/jb_news/VectorX_mediaplanning/base_topics/vectors/wiki.ru.vec')
with open(input2) as f:
data = json.load(f)
n = len(data)
referators = []
for i in range(n):
referators.append(build_with_frequencies(model, data[i].lower()))
out = open("result.json", "w+")
out.write("[\n")
for i in range(n):
out.write(" \"")
out.write(referators[i])
if i != n - 1:
out.write("\",\n")
else:
out.write("\"\n")
out.write("]")
if __name__ == '__main__':
main()
| KatyaKos/nlp-kr | nlp/hw2/refer.py | refer.py | py | 5,886 | python | en | code | 0 | github-code | 90 |
73367799978 | import conllu
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import torch.multiprocessing as mp
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
import os, sys
import numpy
from sklearn.metrics import classification_report
# Main Abstraction
## vocabindex to created from traning file
class PosTagDataset(Dataset):
def __init__(self, data_file, vocab_index):
self.vocab_index = vocab_index
self.pos_tag_index = {"Pad": 0, "ADJ": 17, "ADP": 1, "ADV": 2, "AUX": 3, "CCONJ": 4, "DET": 5, "INTJ": 6, "NOUN": 7, "NUM": 8, "PART": 9, "PRON": 10, "PROPN": 11, "PUNCT": 12, "SCONJ": 13, "SYM": 14, "VERB": 15, "X": 16}
self.Sentences, self.Tag_Sequences = get_data(data_file, self.vocab_index, self.pos_tag_index)
def __len__(self):
return len(self.Sentences)
def __getitem__(self, idx):
return torch.LongTensor(self.Sentences[idx]), torch.LongTensor(self.Tag_Sequences[idx])
class PosTagModel(torch.nn.Module):
def __init__(self, vocab_size, targe_size, embedding_dim, hidden_dim, no_layers):
super().__init__()
# Embeding layer
self.embedding = torch.nn.Embedding(vocab_size, embedding_dim)
# BLSTM layer
self.blstm = torch.nn.LSTM(embedding_dim, hidden_dim, no_layers, batch_first=True, bidirectional=True)
# Output layer (*2 because of bidirectional)
self.out_linear = torch.nn.Linear(hidden_dim * 2, targe_size)
self.out_activation = torch.nn.ReLU()
def forward(self, X):
X = self.embedding(X)
X, _ = self.blstm(X)
X = self.out_linear(X)
X = self.out_activation(X)
return X
def train_loop(model, loss_fn, optimizer, train_dataloader, device):
model.train()
for batch, (X, y) in enumerate(train_dataloader):
# Getting data
X, y = X.to(device), y.to(device)
# Forward pass and loss
pred = model(X)
y = y.reshape(-1)
pred = pred.reshape(pred.shape[0] * pred.shape[1], pred.shape[2])
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
def eval_model(model, loss_fn, data_loader, device):
model.eval()
y_true_report = torch.ones((1,)).to(device)
y_pred_report = torch.ones((1,)).to(device)
total_loss, correct, total_pred = 0, 0, 0
with torch.no_grad():
for X, y in data_loader:
X, y = X.to(device), y.to(device)
pred = model(X)
y = y.reshape(-1)
pred = pred.reshape(pred.shape[0] * pred.shape[1], pred.shape[2])
loss = loss_fn(pred, y)
total_loss += loss.item()
mask = y != 0
correct += (pred.argmax(1)[mask] == y[mask]).type(torch.float).sum().item()
total_pred += y[mask].shape[0]
y_true_report = torch.cat((y_true_report, y[mask]), 0)
y_pred_report = torch.cat((y_pred_report, pred.argmax(1)[mask]), 0)
return total_loss/ total_pred, (correct * 100) / total_pred, y_true_report.to(torch.device("cpu")), y_pred_report.to(torch.device("cpu"))
# Helper Functions
def get_data(data_file, vocab_index, pos_tag_index):
TokenLists = conllu.parse_incr(open(data_file, "r", encoding="utf-8"))
Sentences = []
Tag_Sequences = []
for TokenList in TokenLists:
Sentence = []
tags = []
for token in TokenList:
if token["form"] in vocab_index:
Sentence.append(vocab_index[token["form"]])
else:
Sentence.append(vocab_index["<unk>"])
tags.append(pos_tag_index[token["upos"]])
Sentences.append(Sentence)
Tag_Sequences.append(tags)
return Sentences, Tag_Sequences
def get_vocab_index(data_file):
vocab_index = {"pad": 0, "<unk>": 1}
sigletons = {}
TokenLists = conllu.parse_incr(open(data_file, "r", encoding="utf-8"))
for TokenList in TokenLists:
for token in TokenList:
if token["form"] not in sigletons:
sigletons[token["form"]] = 1
else:
if token["form"] not in vocab_index:
vocab_index[token["form"]] = len(vocab_index)
return vocab_index
def custom_collate(batch):
Sentences = [sample[0] for sample in batch]
PosTags = [sample[1] for sample in batch]
Sentences = pad_sequence(Sentences, batch_first=True)
PosTags = pad_sequence(PosTags, batch_first=True)
return Sentences, PosTags
## Running Environment for code
def ddp_setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12540'
init_process_group(backend='nccl', rank=rank, world_size=world_size)
def main_distributed_GPU(rank, world_size, hyper_params, qeue, Event, Store):
# Configuaration
ddp_setup(rank, world_size)
device = torch.device("cuda", rank)
# Hyperparameters
embedding_dim = hyper_params["embedding_dim"]
hidden_dim = hyper_params["hidden_dim"]
no_layers = hyper_params["no_layers"]
epochs = hyper_params["epochs"]
batch_size = hyper_params["batch_size"]
lr = hyper_params["lr"]
# Loading data
train_file = "./UD_English-Atis/en_atis-ud-train.conllu"
vocab_index = get_vocab_index(train_file)
train_dataset = PosTagDataset(train_file, vocab_index)
train_dataloader = DataLoader(train_dataset, batch_size, shuffle=False, collate_fn=custom_collate, sampler=DistributedSampler(train_dataset))
dev_file = "./UD_English-Atis/en_atis-ud-dev.conllu"
dev_dataset = PosTagDataset(dev_file, vocab_index)
dev_dataloader = DataLoader(dev_dataset, batch_size, shuffle=False, collate_fn=custom_collate, sampler=DistributedSampler(dev_dataset))
test_file = "./UD_English-Atis/en_atis-ud-test.conllu"
test_dataset = PosTagDataset(test_file, vocab_index)
test_dataloader = DataLoader(test_dataset, batch_size, shuffle=False, collate_fn=custom_collate, sampler=DistributedSampler(test_dataset))
# Creating model loss function and optimizer
vocab_size = len(train_dataset.vocab_index)
no_pos_tags = len(train_dataset.pos_tag_index)
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=0, reduction="sum")
model = PosTagModel(vocab_size, no_pos_tags, embedding_dim, hidden_dim, no_layers).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr)
model = DDP(model, device_ids=[device])
# Training
loss_values = torch.zeros((epochs, 2))
for t in tqdm(range(epochs)):
train_loop(model, loss_fn, optimizer, train_dataloader, device)
loss_values[t, 0] = eval_model(model, loss_fn, train_dataloader, device)[0] # Training Loss
loss_values[t, 1] = eval_model(model, loss_fn, dev_dataloader, device)[0] # validation Loss
qeue.put(loss_values)
Event.wait()
test_eval = eval_model(model, loss_fn, test_dataloader, device)
print("Testing accuracy", test_eval[1])
print(classification_report(test_eval[2].numpy(), test_eval[3].numpy()))
if rank == 0 and Store == True:
param_data = model.module.state_dict()
torch.save(param_data, "model_weights.pth")
destroy_process_group()
def get_loss_values(hyperpar, Save):
world_size = torch.cuda.device_count()
print("Number of GPUs: ", world_size)
Events = [mp.Event() for _ in range(world_size)]
qeue = mp.SimpleQueue()
processes = []
for rank in range(world_size):
p = mp.Process(target=main_distributed_GPU, args=(rank, world_size, hyperpar, qeue, Events[rank], Save))
processes.append(p)
p.start()
Data = torch.zeros((hyperpar["epochs"], 2))
for _ in range(world_size):
Data += qeue.get()
Data = Data / world_size
for event in Events:
event.set()
for p in processes:
p.join()
return Data
if __name__ == "__main__":
hyperpar = {"embedding_dim": 128, "hidden_dim": 128, "no_layers": 2, "epochs": 10, "batch_size": 32, "lr": 0.01}
Data = get_loss_values(hyperpar, False)
print(Data) | P-Balaramakrishna-Varma/NLPA2 | neural_tag.py | neural_tag.py | py | 8,400 | python | en | code | 0 | github-code | 90 |
18428624847 | from django.shortcuts import render
from admin.models import admin
from admin.form import AdminForm
# Create your views here.
def create(request):
if request.method=="POST":
form=AdminForm(request.POST)
form.save()
return redirect("/dashboard")
else:
form=AdminForm()
return render(request,"accounts/adminlogin.html",{'form':form}) | Bishal789/webdev | Admin/views.py | views.py | py | 376 | python | en | code | 0 | github-code | 90 |
12704450191 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
remove = ListNode(-1)
remove.next = head
previous, current = remove, head
while current:
if current.next and current.next.val == current.val:
while current.next and current.next.val == current.val:
current = current.next
current = current.next
else:
previous.next = current
previous = previous.next
current = current.next
previous.next = None
return remove.next | FevenBelay23/competitive-programming | 0082-remove-duplicates-from-sorted-list-ii/0082-remove-duplicates-from-sorted-list-ii.py | 0082-remove-duplicates-from-sorted-list-ii.py | py | 780 | python | en | code | 0 | github-code | 90 |
4129671612 | import sys
import numpy as np
state = np.array([int(x) for x in sys.stdin.readline().split(',')])
for i in range(80):
zeros = state.shape[0] - np.count_nonzero(state)
state[state == 0] = 7
state = np.concatenate([state, np.full((zeros,), 9)])
state -= 1
print(state.shape[0]) | folded/aoc-2021 | 06/06-1.py | 06-1.py | py | 286 | python | en | code | 0 | github-code | 90 |
44009984238 | # import libraries
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
def one_hot_encoding(data: pd.DataFrame, categorical_features:list) -> pd.DataFrame:
"""Apply one hot encoding to categorical features in a dataframe.
Args:
data (pd.DataFrame): Input dataframe.
categorical_features (list): List of column names to be encoded.
Returns:
pd.DataFrame: A new dataframe with encoded columns.
"""
# Instantiate the OneHotEncoder
encoder = OneHotEncoder()
# Apply one hot encoding to categorical features
encoded = encoder.fit_transform(data[categorical_features])
# Get the names of the encoded features
encoded_feature_names = encoder.get_feature_names_out()
# Create a new dataframe for the encoded features
encoded_df = pd.DataFrame(encoded.toarray(), columns=encoded_feature_names)
return pd.concat([data.drop(columns=categorical_features), encoded_df], axis=1)
def date_transform(data: pd.DataFrame, date_columns:list) -> pd.DataFrame:
"""Extract day, month, and year from date columns in a dataframe.
Args:
data (pd.DataFrame): Input dataframe.
date_columns (list): List of column names to be transformed.
Returns:
pd.DataFrame: A new dataframe with transformed columns.
"""
# Convert date columns to pandas datetime and extract new features
for column in date_columns:
data[column] = pd.to_datetime(data[column])
data[column+'_day'] = data[column].dt.day
data[column+'_month'] = data[column].dt.month
data[column+'_year'] = data[column].dt.year
# Drop original date columns
data.drop(columns=date_columns, inplace=True)
return data
def scaling(data: pd.DataFrame, numeric_columns: list, scaler) -> pd.DataFrame:
"""Apply feature scaling to numeric columns in a dataframe.
Args:
data (pd.DataFrame): Input dataframe.
numeric_columns (list): List of column names to be scaled.
scaler: The scaler to use.
Returns:
pd.DataFrame: A new dataframe with scaled columns.
"""
# Fit and transform the data with the scaler
scaled_data = scaler.fit_transform(data[numeric_columns])
# Restore scaled data into the new dataframe
data[numeric_columns] = scaled_data
return data
| mawada-sweis/Clustering-Analysis | src/utils/transform_data.py | transform_data.py | py | 2,355 | python | en | code | 3 | github-code | 90 |
71319205417 | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from sport_academy.models import Player
class DriverTest(TestCase):
fixtures = [
"sport_club_db_data.json"
]
def setUp(self):
self.user = get_user_model().objects.get(id=1)
self.client.force_login(self.user)
def test_search_form_players_by_last_name(self):
response = self.client.get(
reverse("sport_academy:players-list") + "?last_name=M"
)
self.assertEqual(
list(response.context["players_list"]),
list(Player.objects.filter(last_name__icontains="M"))
)
| anastasiia-tsurkan/iCoach | sport_academy/tests/test_forms.py | test_forms.py | py | 707 | python | en | code | 1 | github-code | 90 |
25788940903 | from hedera import (
Hbar,
PrivateKey,
AccountBalanceQuery,
AccountCreateTransaction,
TransferTransaction,
Transaction,
)
from get_client import client
from jnius import cast
exchangeKey = PrivateKey.generate()
userKey = PrivateKey.generate()
print("Exchange Key : ", exchangeKey.toString())
print("User Key : ", userKey.toString())
# the exchange only accepts transfers that it validates through a side channel (e.g. REST API)
# The owner key has to sign this transaction
# when setReceiverSignatureRequired is true
tran = AccountCreateTransaction(
).setInitialBalance(Hbar(1)
).setReceiverSignatureRequired(True
).setKey(exchangeKey
).freezeWith(client
).sign(exchangeKey)
receipt = tran.execute(client).getReceipt(client)
exchangeAccountId = receipt.accountId
print("exchange account = ", exchangeAccountId.toString())
tran = AccountCreateTransaction().setInitialBalance(Hbar(5)).setKey(userKey)
receipt = tran.execute(client).getReceipt(client)
userAccountId = receipt.accountId
print("user account = ", userAccountId.toString())
# the exchange-provided memo required to validate the transaction
# NOTE: to manually sign, you must freeze the Transaction first
transferTxn = TransferTransaction(
).addHbarTransfer(userAccountId, Hbar(2).negated()
).addHbarTransfer(exchangeAccountId, Hbar(2)
).setTransactionMemo("https://some-exchange.com/user1/account1"
).freezeWith(client
).sign(userKey)
# the exchange must sign the transaction in order for it to be accepted by the network
# assume this is some REST call to the exchange API server
signedTxnBytes = Transaction.fromBytes(transferTxn.toBytes()).sign(exchangeKey).toBytes()
# parse the transaction bytes returned from the exchange
signedTransferTxn = Transaction.fromBytes(signedTxnBytes)
# get the amount we are about to transfer
# we built this with +2, -2
realTransferTxn = cast(TransferTransaction, signedTransferTxn)
transferAmount = realTransferTxn.getHbarTransfers().values().toArray()[0].toString()
print("about to transfer ", transferAmount, "...")
# we now execute the signed transaction and wait for it to be accepted
transactionResponse = signedTransferTxn.execute(client)
# (important!) wait for consensus by querying for the receipt
transactionResponse.getReceipt(client)
senderBalanceAfter = AccountBalanceQuery().setAccountId(userAccountId).execute(client).hbars
receiptBalanceAfter = AccountBalanceQuery().setAccountId(exchangeAccountId).execute(client).hbars
print(userAccountId.toString(), " balance = ", senderBalanceAfter.toString())
print(exchangeAccountId.toString(), " balance = ", receiptBalanceAfter.toString())
| wensheng/hedera-sdk-py | examples/multi_app_transfer.py | multi_app_transfer.py | py | 2,741 | python | en | code | 18 | github-code | 90 |
23410225016 | #!/usr/bin/env python3
import asyncio, random
from irctokens import build, Line
from ircrobots import Bot as BaseBot
from ircrobots import Server as BaseServer
from ircrobots import ConnectionParams
# aaaaaaaaaaaaaAAAAAAAAAAAAAAA
# im too lazy to import more stuffs :tm:
from ircrobots.server import *
from config import *
class Server(BaseServer):
# overwrite connect so i can put try except blocks there
async def connect(self,
transport: ITCPTransport,
params: ConnectionParams):
try:
await sts_transmute(params)
await resume_transmute(params)
reader, writer = await transport.connect(
params.host,
params.port,
tls =params.tls,
tls_verify=params.tls_verify,
bindhost =params.bindhost)
self._reader = reader
self._writer = writer
self.params = params
await self.handshake()
except:
print('connection with {} failed, disconnecting'.format(self.name))
self.disconnected = True
async def line_read(self, line: Line):
print(f"{self.name} < {line.format()}")
if line.command == "001":
print(f"connected to {self.name}")
self.chan = FNCHANNEL if self.name in ["freenode","libera"] else CHANNEL
await self.send(build("JOIN", [self.chan]))
if line.command == "PRIVMSG" and line.params.pop(0) == self.chan:
text = line.params[0].replace("\1ACTION","*").replace("\1","")
nick = line.source.split('!')[0]
if nick == self.nickname or (line.tags and "batch" in line.tags) or "\x0f\x0f\x0f\x0f" in text:
return
if self.disconnected:
return
if nick.lower() in self.users and self.users[nick.lower()].account in ADMINS:
if text[:len(self.nickname)+2].lower() == f'{self.nickname}: '.lower():
args = text[len(self.nickname)+2:].split(' ')
if args[0] == 'connect' and len(args) > 4:
await self.bot.add_server(args[1],ConnectionParams(NICKNAME,args[2],args[3],bool(int(args[4]))))
await self.send(build("PRIVMSG",[self.chan,"Connected to {} :3".format(args[1])]))
return
if args[0] == 'unlink' and len(args) > 1:
await self.bot.servers[args[1]].disconnect()
del self.bot.servers[args[1]]
await self.send(build("PRIVMSG",[self.chan,"Unlinked {} :S".format(args[1])]))
return
for i in random.sample(list(self.bot.servers),len(self.bot.servers)):
asyncio.create_task(self.bot.servers[i].ac(self.name,args))
return
for npn in NOPING:
offset = 1
for loc in find_all_indexes(text.lower(), npn.lower()):
text = text[:loc+offset]+"\u200c"+text[loc+offset:]
offset += 1
for i in random.sample(list(self.bot.servers),len(self.bot.servers)):
asyncio.create_task(self.bot.servers[i].bc(self.name,nick,text))
#await self.send(build("PRIVMSG ##xfnw :ine and boat ",[text]))
if line.command == "INVITE":
await self.send(build("JOIN",[line.params[1]]))
async def line_send(self, line: Line):
print(f"{self.name} > {line.format()}")
async def bc(self,name,nick,msg):
if self.disconnected or name == self.name or "chan" not in list(dir(self)):
return
await self.send(build("PRIVMSG",[self.chan,"\x0f\x0f\x0f\x0f<"+nick[:1]+"\u200c"+nick[1:]+"@"+name+"> "+msg]))
async def ac(self,name,args):
if self.disconnected or "chan" not in list(dir(self)):
return
nargs = []
isComb = False
for arg in args:
if arg[0] == ':':
isComb = True
nargs.append(arg[1:])
continue
if isComb:
nargs[-1] += ' '+arg
else:
nargs.append(arg)
await self.send(build(nargs[0],[self.chan]+nargs[1:]))
class Bot(BaseBot):
def create_server(self, name: str):
return Server(self, name)
def find_all_indexes(input_str, search_str):
l1 = []
length = len(input_str)
index = 0
while index < length:
i = input_str.find(search_str, index)
if i == -1:
return l1
l1.append(i)
index = i + 1
return l1
async def main():
bot = Bot()
for name, host, port, ssl in SERVERS:
params = ConnectionParams(NICKNAME, host, port, ssl)
await bot.add_server(name, params)
await bot.run()
if __name__ == "__main__":
asyncio.run(main())
| xfnw/relay | bot.py | bot.py | py | 4,938 | python | en | code | 0 | github-code | 90 |
18317568279 | N=int(input())
L=list(map(int,input().split()))
suml=sum(L)
l,ll,i=L[0],0,0
while l<suml/2:
i+=1
ll=l
l+=L[i]
key=min(l-suml/2,suml/2-ll)
print(int(key*2)) | Aasthaengg/IBMdataset | Python_codes/p02854/s925779179.py | s925779179.py | py | 162 | python | en | code | 0 | github-code | 90 |
7671707668 | import pandas as pd
from method.frame.checking_data import DataMining
class ScoreCardProcess(DataMining):
def __init__(self, data,
label: str = 'label',
show_plot: bool = False):
self.data = data
self.label = label
self.show_plot = show_plot
self.use_specified_col = None
DataMining.__init__(self, self.data, self.label)
def pro_check_data(self, fillna: dict = None,
abnor: list = None,
remove_blank: bool = True,
resample: bool = True,
oversampling: bool = False,
cek_uni_char: list = ["'", ""]):
# 使用指定特征建模
if self.use_specified_col is not None:
assert isinstance(self.use_specified_col,
list), 'Specified columns must be in a list'
self.data = self.data[[
self.label] + self.use_specified_col]
self.renew()
# target的分类统计结果并打印
self.check_y_dist()
# 检查数据类型
self.check_dtypes()
# 异常字符串处理
if cek_uni_char is not None:
for i in cek_uni_char:
self.check_uni_char(i)
if fillna is not None:
self.fill_missing_values(mapping=fillna)
# 移除异常值
if abnor is not None:
self.filter_abnor_values(abnor)
if remove_blank:
self.filter_blank_values()
# 缺失值检查
self.check_missing_value(print_result=True)
# 样本平衡
if resample:
self.filter_data_subtable(
label=self.label, balance=True, oversampling=oversampling)
# 最终的样本描述
self.check_y_dist()
self.epo = self.data_describe()
def pro_feature_filter(self, inplace_data: bool = True,
var_zip=None,
plot_zip: bool = False,
iv_limit: float = .02,
missing_limit: float = .95,
identical_limit: float = .95,
var_rm: list = None,
var_kp: list = None,
positive: str = 'good|1'):
if var_zip is None:
numerical_col = self.data.drop(self.label, axis=1).select_dtypes(
include=['int', 'float']).columns
var_zip = {col: None for col in numerical_col}
if var_kp is None:
var_kp = list()
var_kp2 = list() if self.use_specified_col is None else self.use_specified_col
self.check_feature_zip(var_zip, c=.3, if0=False, plot=plot_zip)
# 创建 test_data
self.copy_filter_feature_zip()
self.test_data = self.sample_var_filter(dt=self.test_data,
x=None,
iv_limit=iv_limit,
missing_limit=missing_limit,
identical_limit=identical_limit, var_rm=var_rm,
var_kp=list(
set(var_kp + var_kp2)),
return_rm_reason=True, positive=positive)
if inplace_data:
self.data = self.data[self.test_data.columns.tolist()]
self.renew()
self.epo = self.data_describe()
def feature_process(self, iv_threshold: float = .15,
max_features: int = 6,
corr_threshold: float = .6,
cum_importance: float = .95,
breaks_adj=None, var_remove: list | None = None,
var_keep: list | None = None) -> None:
if var_keep is not None:
assert isinstance(var_keep, list), 'var_keep must be a list'
if var_remove is not None:
assert isinstance(var_remove, list), 'var_remove must be a list'
if self.use_specified_col is not None:
assert isinstance(self.use_specified_col,
list), 'use_specified_col must be a list'
print('使用指定特征建模...')
self.data = self.data[[self.label] + self.use_specified_col]
self.renew()
else:
self.bin0 = self.sample_woe_bin()
| JPL-JUNO/Collections | scorecard/method/process.py | process.py | py | 4,542 | python | en | code | 0 | github-code | 90 |
73951313257 | import os
import csv
import pandas as pd
class AddingStuff:
def __init__(self):
self.categories = self.load_csv()
def load_csv(self):
arr = []
path = os.path.abspath(os.path.dirname(__file__))
file_path = os.path.join(path, 'database/expenses.csv')
with open(file_path) as csvfile:
reader = csv.DictReader(csvfile)
for dict_ in reader:
arr.append(dict_)
return arr
def write_csv(self):
#takes data as dictionary and turns it into csv
df = pd.DataFrame.from_dict(self.categories)
print(df)
df.to_csv(r'database/expenses.csv', index = False, header = True)
def sub_(self):
num = 0
for category in self.load_csv():
num += int(category['amount'])
return num
def delete_expense(self,name):
for c in self.categories:
if c['category'] == name:
self.categories = [i for i in self.categories if i!=c]
self.write_csv()
| arsh939/Python-Projects | python-budget/addingStuff.py | addingStuff.py | py | 1,075 | python | en | code | 3 | github-code | 90 |
37778018200 | from rest_framework.authtoken.models import Token
from astrobin.middleware.mixins import MiddlewareParentClass
from common.services import AppRedirectionService
REST_FRAMEWORK_TOKEN_COOKIE = 'classic-auth-token'
class RestFrameworkTokenCookieMiddleware(MiddlewareParentClass):
def _process(self, request):
return (
hasattr(request, 'user') and
request.user.is_authenticated and
not request.is_ajax() and
not 'HTTP_AUTHORIZATION' in request.META and
not request.COOKIES.get(REST_FRAMEWORK_TOKEN_COOKIE)
)
def process_response(self, request, response):
if self._process(request):
token, created = Token.objects.get_or_create(user=request.user)
response.set_cookie(
REST_FRAMEWORK_TOKEN_COOKIE,
token,
max_age=60 * 60 * 24 * 180,
domain=AppRedirectionService.cookie_domain(request))
return response
| astrobin/astrobin | astrobin/middleware/rest_framework_token_cookie_middleware.py | rest_framework_token_cookie_middleware.py | py | 1,011 | python | en | code | 100 | github-code | 90 |
13810613079 | from datetime import timedelta
from functools import wraps
from django.conf import settings
from django.utils import timezone
from user.models import LoginRequest
import requests
def check_recaptcha(view_func):
@wraps(view_func)
def _wrapped_view(view, request, *args, **kwargs):
request.recaptcha_is_valid = None
if request.method == 'POST':
if not settings.GOOGLE_RECAPTCHA_SECRET_KEY:
request.recaptcha_is_valid = True
else:
recaptcha_response = request.POST.get('g-recaptcha-response')
data = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)
result = r.json()
if result['success']:
request.recaptcha_is_valid = True
else:
request.recaptcha_is_valid = False
return view_func(view, request, *args, **kwargs)
return _wrapped_view
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def reset_tries(request):
client_ip = get_client_ip(request)
login_request = LoginRequest.objects.get(ip=client_ip)
login_request.reset_tries()
login_request.save()
def check_client_ip(view_func):
@wraps(view_func)
def _wrapped_view(view, request, *args, **kwargs):
request.client_req_is_valid = None
if request.method == 'POST':
client_ip = get_client_ip(request)
request_time = timezone.now()
print(request_time)
try:
login_request = LoginRequest.objects.get(ip=client_ip)
latest_request = login_request.latest_request
if request_time - latest_request < timedelta(minutes=5):
login_request.login_tries += 1
else:
login_request.reset_tries()
if login_request.login_tries < getattr(settings, 'LOGIN_TRIES', 4):
login_request.latest_request = request_time
login_request.save()
except LoginRequest.DoesNotExist:
login_request = LoginRequest.objects.create(ip=client_ip, latest_request=request_time)
login_request.save()
if login_request.login_tries < getattr(settings, 'LOGIN_TRIES', 4):
request.client_req_is_valid = True
else:
request.client_req_is_valid = False
return view_func(view, request, *args, **kwargs)
return _wrapped_view
| HackAssistant/hackassistant | user/verification.py | verification.py | py | 2,846 | python | en | code | 6 | github-code | 90 |
27088036128 | import re
from llnl.util.argparsewriter import ArgparseWriter
import spack.cmd
import spack.main
from spack.main import SpackCommand
commands = SpackCommand('commands')
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
def test_commands_by_name():
"""Test default output of spack commands."""
out = commands()
assert out.strip().split('\n') == sorted(spack.cmd.all_commands())
def test_subcommands():
"""Test subcommand traversal."""
out = commands('--format=subcommands')
assert 'spack mirror create' in out
assert 'spack buildcache list' in out
assert 'spack repo add' in out
assert 'spack pkg diff' in out
assert 'spack url parse' in out
assert 'spack view symlink' in out
class Subcommands(ArgparseWriter):
def begin_command(self, prog):
assert prog in out
Subcommands().write(parser)
def test_rst():
"""Do some simple sanity checks of the rst writer."""
out = commands('--format=rst')
class Subcommands(ArgparseWriter):
def begin_command(self, prog):
assert prog in out
assert re.sub(r' ', '-', prog) in out
Subcommands().write(parser)
| matzke1/spack | lib/spack/spack/test/cmd/commands.py | commands.py | py | 1,203 | python | en | code | 2 | github-code | 90 |
8281775305 | # -*- coding:utf-8 -*-
"""
Created by haven on 16/8/20.
"""
import requests
from config import config
# from Dach import Dache
from Dache import Dache
class Uber(Dache):
def __init__(self, from_lat, from_lon, to_lat, to_lon):
# Dache.__init__(self)
# super(from_lat, from_lon, to_lat, to_lon)
super(Uber, self).__init__(from_lat, from_lon, to_lat, to_lon)
self.config = config['uber']
self.params = {
'start_latitude': from_lat,
'start_longitude': from_lon,
'end_latitude': to_lat,
'end_longitude': to_lon
}
self.params = dict(self.params, **self.config['params'])
def price(self):
ret = requests.get(self.config['url_price'], params=self.params, headers=self.config['headers']).json()
# print 'get price'
return {
'single_price': (ret['prices'][0]['high_estimate'] + ret['prices'][0]['low_estimate']) / 2,
'pool_price': (ret['prices'][1]['high_estimate'] + ret['prices'][1]['low_estimate']) / 2,
'distance': ret['prices'][1]['distance'],
'duration': ret['prices'][1]['duration'],
'name':'uber'
}
def time(self):
ret = requests.get(self.config['url_time'], params=self.params, headers=self.config['headers']).json()
# print 'get time'
return {
'wait_time': (ret['times'][0]['estimate']+ret['times'][1]['estimate'])/2
}
def get_info(self):
return dict(self.price(), **self.time())
# return {
# 'single': (ret['prices'][0]['high_estimate'] + ret['prices'][0]['low_estimate']) / 2,
# 'pool': (ret['prices'][1]['high_estimate'] + ret['prices'][1]['low_estimate']) / 2,
# 'distance': ret['prices'][1]['distance'],
# 'duration': ret['prices'][1]['duration']
# }
def get_uber_time(from_lat, from_lon, to_lat, to_lon):
uber = config['uber']
# print uber
params = {
'start_latitude': from_lat,
'start_longitude': from_lon,
'end_latitude': to_lat,
'end_longitude': to_lon
}
# print params
params = dict(params, **uber['params'])
# print params
ret = requests.get(uber['url_time'], params=params, headers=uber['headers'])
return ret.json()
def get_uber_price(from_lat, from_lon, to_lat, to_lon):
uber = config['uber']
# print uber
params = {
'start_latitude': from_lat,
'start_longitude': from_lon,
'end_latitude': to_lat,
'end_longitude': to_lon
}
# print params
params = dict(params, **uber['params'])
# print params
ret = requests.get(uber['url_price'], params=params, headers=uber['headers'])
# print ret.json()
return ret.json()
# return {
# 'ret':ret.json,
# 'status':
# }
#
#
# url = 'https://api.uber.com.cn/v1/estimates/price'
#
# parameters = {
# # 'Authorization': 'V0FOwsKs-DgoofNelzCRRV88H5RvmaHM4sTKSslk',
# 'server_token': 'V0FOwsKs-DgoofNelzCRRV88H5RvmaHM4sTKSslk',
# 'start_latitude': 31.193824167211297,
# 'start_longitude': 121.33244751040375,
# 'end_latitude': 31.19882056907011,
# 'end_longitude': 121.43771418515428
# }
#
# headers = {
# # 'Authorization': 'bearer lrY9qKMZqflY-QQe7DWZ0CSwslVgFcn2q6i904j_',
# 'Content-Type': 'application/json'
# }
#
# response = requests.get(url, params=parameters, headers=headers)
#
# data = response.json()
# print(data)
| Teisei/TaxiRobot | lib/RouteCompare/uberApi.py | uberApi.py | py | 3,510 | python | en | code | 0 | github-code | 90 |
9093289282 | import pymel.core as pm
import mtoa.utils as utils
import mtoa.ui.ae.utils as aeUtils
from mtoa.ui.ae.shaderTemplate import ShaderAETemplate
class AEH_ThinFilmInterferenceTemplate(ShaderAETemplate):
def setup(self):
# Add the shader swatch to the AE
self.addSwatch()
self.beginScrollLayout()
# Add a list that allows to replace the shader for other one
self.addCustom('message', 'AEshaderTypeNew',
'AEshaderTypeReplace')
# Begins a "Color Section"
self.beginLayout("TFI Controls", collapse=False)
# Add a control for the "constatColor" shader attribute
self.addControl("interference", label="Interference",
annotation="Interference")
self.addControl("ior_inside", label="Ior inside",
annotation="Ior inside")
self.addControl("ior_outside", label="Ior outside",
annotation="Ior outside")
self.addControl("min_thick", label="Min thickness",
annotation="Min thickness")
self.addControl("max_thick", label="Max thickness",
annotation="Max thickness")
self.addControl("multiplier", label="Multiplier",
annotation="Multiplier")
self.endLayout()
# Begins a "Color Section"
self.beginLayout("Sampling Controls", collapse=False)
self.addControl("color_samples", label="Color samples",
annotation="Color samples")
self.endLayout()
# include/call base class/node attributes
pm.mel.AEdependNodeTemplate(self.nodeName)
# Add Section for the extra controls not displayed before
self.addExtraControls()
self.endScrollLayout() | splicerlabs/H_ThinFilmInterference | source/mtoa/H_ThinFilmInterferenceTemplate.py | H_ThinFilmInterferenceTemplate.py | py | 1,743 | python | en | code | 13 | github-code | 90 |
3690741544 | # coding=utf-8
import cv2
import os.path
import sys
def splitVideo(video_path, out_path, interval, start, end):
"""
拆分视频
:param video_path: 视频路径
:param out_path: 输出影像的文件夹
:param interval: 采样间隔,1表示逐帧输出
:param start: 起始时间,单位为秒
:param end: 结束时间,单位为秒
:return: 空
"""
separator = os.path.sep
# 需要处理的视频文件
cap = cv2.VideoCapture(video_path)
# 获取视频的总帧数、fps
frames = int(cap.get(7))
fps = int(cap.get(5))
print (frames.__str__() + ' frames in total.')
# 计算需要输出的帧数
startIndex = int(start * fps)
endIndex = int(end * fps)
if endIndex > frames:
endIndex = frames
rangeFrames = endIndex - startIndex
# 判断如果小于0,返回
if rangeFrames < 0:
print ('Error.')
exit()
# 输出提示信息
print ((rangeFrames / interval).__str__() + ' frames are going to be outputted.')
print ('---Cutting---')
cap.set(cv2.CAP_PROP_POS_FRAMES, startIndex)
# 循环输出帧
for i in range(startIndex, endIndex, interval):
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = cap.read()
if frame is None:
break
else:
# 输出影像文件
cv2.imwrite(out_path + separator + "%04d" % (startIndex + i + 1) + ".jpg", frame)
print ('Cutting...' + round(((i - startIndex) * 1.0 / (rangeFrames)) * 100, 2).__str__() + "% finished.")
# 释放对象
cap.release()
if sys.argv.__len__() == 2 and sys.argv[1] == "help":
print("用于将视频拆分成一帧帧的图像,便于后续处理,支持设置起始、结束位置以及采样间隔\n")
print("脚本启动命令格式:")
print("scriptname.py:[video_path] [out_path] [interval] [start] [end]")
print("\n函数帮助:")
exec ("help(splitVideo)")
elif sys.argv.__len__() == 6:
splitVideo(sys.argv[1], sys.argv[2], int(sys.argv[3]), float(sys.argv[4]), float(sys.argv[5]))
else:
print("Input \"scriptname.py help\" for help information.")
| zhaoxuhui/TookitsForVideoProcessing | splitVideo.py | splitVideo.py | py | 2,178 | python | en | code | 2 | github-code | 90 |
4962944762 | import os
import sys
import time
import math
import shutil
# import at_cascade with a preference current directory version
current_directory = os.getcwd()
if os.path.isfile( current_directory + '/at_cascade/__init__.py' ) :
sys.path.insert(0, current_directory)
import at_cascade
import dismod_at
# BEGIN_PYTHON
#
# csv_file
csv_file = dict()
#
# option_fit.csv
random_seed = str( int( time.time() ) )
csv_file['option_fit.csv'] = \
'''name,value
max_abs_effect,3.0
'''
#
# option_predict.csv
random_seed = str( int( time.time() ) )
csv_file['option_predict.csv'] = \
'''name,value
db2csv,true
plot,true
'''
#
# node.csv
csv_file['node.csv'] = \
'''node_name,parent_name
n0,
n1,n0
n2,n0
'''
#
# sex_name2income
sex_name2income = { 'female' : 1.0, 'both' : 1.5, 'male' : 2.0 }
#
# covariate.csv
csv_file['covariate.csv'] = \
'''node_name,sex,income,age,time,omega
n0,female,1.0,50,2000,0.02
n1,female,1.0,50,2000,0.02
n2,female,1.0,50,2000,0.02
n0,male,2.0,50,2000,0.02
n1,male,2.0,50,2000,0.02
n2,male,2.0,50,2000,0.02
'''
#
# fit_goal.csv
csv_file['fit_goal.csv'] = \
'''node_name
n1
n2
'''
#
# predict_integrand.csv
csv_file['predict_integrand.csv'] = \
'''integrand_name
Sincidence
prevalence
mulcov_0
mulcov_1
'''
#
# prior.csv
csv_file['prior.csv'] = \
'''name,lower,upper,mean,std,density
gaussian_0_10,-1.0,1.0,0.5,10.0,gaussian
gaussian_eps_10,1e-6,1.0,0.5,10.0,gaussian
gauss_01,,,0.0,1.0,gaussian
'''
#
# parent_rate.csv
csv_file['parent_rate.csv'] = \
'''rate_name,age,time,value_prior,dage_prior,dtime_prior,const_value
iota,0.0,0.0,gaussian_eps_10,,,
'''
#
# child_rate.csv
csv_file['child_rate.csv'] = \
'''rate_name,value_prior
iota,gauss_01
'''
#
# mulcov.csv
csv_file['mulcov.csv'] = \
'''covariate,type,effected,value_prior,const_value
income,rate_value,iota,gaussian_0_10,
one,meas_noise,Sincidence,,1e-3
'''
#
# data_in.csv
# The 0.00 meas_value in this table gets replaced
header = 'data_id,integrand_name,node_name,sex,age_lower,age_upper,'
header += 'time_lower,time_upper,meas_value,meas_std,hold_out,'
header += 'density_name,eta,nu'
csv_file['data_in.csv'] = header + \
'''
0,Sincidence,n0,both,0,10,1990,2000,0.00,1e-4,0,gaussian,,
0,Sincidence,n0,both,0,10,1990,2000,0.00,1e-4,0,gaussian,,
1,Sincidence,n1,female,10,20,2000,2010,0.00,1e-4,0,gaussian,,
1,Sincidence,n1,male,10,20,2000,2010,0.00,1e-4,0,gaussian,,
2,Sincidence,n2,female,20,30,2010,2020,0.00,1e-4,0,gaussian,,
2,Sincidence,n2,male,20,30,2010,2020,0.00,1e-4,0,gaussian,,
'''
#
#
def main() :
#
# fit_dir
fit_dir = 'build/test'
if not os.path.exists(fit_dir) :
os.makedirs(fit_dir)
root_node_name = 'n0'
if os.path.exists( fit_dir + '/' + root_node_name ) :
shutil.rmtree( fit_dir + '/' + root_node_name )
#
# write csv files
for name in csv_file :
file_name = f'{fit_dir}/{name}'
file_ptr = open(file_name, 'w')
file_ptr.write( csv_file[name] )
file_ptr.close()
#
# table
file_name = f'{fit_dir}/covariate.csv'
table = at_cascade.csv.read_table( file_name )
#
# data_in.csv
float_format = '{0:.5g}'
true_mulcov_sex = 0.5
no_effect_iota = 0.1
file_name = f'{fit_dir}/data_in.csv'
table = at_cascade.csv.read_table( file_name )
for row in table :
sex_name = row['sex']
integrand_name = row['integrand_name']
assert integrand_name == 'Sincidence'
#
sex_name = row['sex']
effect = true_mulcov_sex * ( sex_name2income[sex_name] - 1.5)
iota = math.exp(effect) * no_effect_iota
row['meas_value'] = float_format.format( iota )
at_cascade.csv.write_table(file_name, table)
#
# csv.fit, csv.predict
at_cascade.csv.fit(fit_dir)
at_cascade.csv.predict(fit_dir)
#
# number_sample
number_sample = 20
#
# prefix
for prefix in [ 'fit', 'sam' ] :
#
# predict_table
file_name = f'{fit_dir}/{prefix}_predict.csv'
predict_table = at_cascade.csv.read_table(file_name)
#
# node
for node in [ 'n0', 'n1', 'n2' ] :
# sex_name
for sex_name in [ 'female', 'both', 'male' ] :
#
# sample_list
sample_list = list()
for row in predict_table :
if row['integrand_name'] == 'Sincidence' and \
row['node_name'] == node and \
row['sex'] == sex_name :
#
sample_list.append(row)
#
# check sample_list
if node == 'n0' or sex_name != 'both' :
if prefix == 'fit' :
assert len(sample_list) == 1
else :
assert len(sample_list) == number_sample
sum_avgint = 0.0
for row in sample_list :
sum_avgint += float( row['avg_integrand'] )
avgint = sum_avgint / len(sample_list)
income = sex_name2income[sex_name]
effect = true_mulcov_sex * (income - 1.5)
iota = math.exp(effect) * no_effect_iota
rel_error = (avgint - iota) / iota
if abs(rel_error) > 0.01 :
print('rel_error =', rel_error)
assert False
#
# db2csv_file_list
db2csv_name_list = [
'log.csv',
'age_avg.csv',
'hes_fixed.csv',
'trace_fixed.csv',
'mixed_info.csv',
'variable.csv',
'data.csv',
'predict.csv',
]
#
# subdir_list
subdir_list = {
('n0', 'both') : 'n0' ,
('n0', 'female') : 'n0/female' ,
('n0', 'male') : 'n0/male' ,
('n1', 'female') : 'n0/female/n1' ,
('n1', 'male') : 'n0/male/n1' ,
('n2', 'female') : 'n0/female/n2' ,
('n2', 'male') : 'n0/male/n2' ,
}
#
# check for db2csv files
for (node, sex) in subdir_list :
subdir = subdir_list[(node, sex)]
for name in db2csv_name_list + [ 'data_plot.pdf', 'rate_plot.pdf' ] :
file_path = f'{fit_dir}/{subdir}/{name}'
assert os.path.exists(file_path)
#
file_name = f'{fit_dir}/n0/dismod.db'
new = False
connection = dismod_at.create_connection(file_name, new)
tbl_name = 'bnd_mulcov'
bnd_mulcov_table = dismod_at.get_table_dict(connection, tbl_name)
connection.close()
max_mulcov = bnd_mulcov_table[0]['max_mulcov']
max_cov_diff = bnd_mulcov_table[0]['max_cov_diff']
max_abs_effect = 3.0
assert max_cov_diff == 0.5
assert max_mulcov == max_abs_effect / max_cov_diff
#
main()
print('csv_fit: OK')
sys.exit(0)
# END_PYTHON
| bradbell/at_cascade | test/csv_fit.py | csv_fit.py | py | 6,619 | python | en | code | 3 | github-code | 90 |
74405158057 | import numpy as np
from icecube import icetray, dataclasses
from icecube.dataclasses import I3Particle
def pick_em_or_had(type):
em_types = [I3Particle.EMinus, I3Particle.Brems]
had_types = [I3Particle.Hadrons, I3Particle.NuclInt]
if type in had_types:
return 'HAD'
elif type in em_types:
return 'EM'
else:
return 'UDEF'
| clark2668/icetradio | python/util_phys.py | util_phys.py | py | 335 | python | en | code | 0 | github-code | 90 |
21473642975 | # Splash scene - first scene the user sees
import pygwidgets
import pyghelpers
from Constants import *
class SceneSplash(pyghelpers.Scene):
def __init__(self, window):
self.window = window
self.backgroundImage = pygwidgets.Image(self.window,
(0, 0), 'images/splashBackground.jpg')
self.dodgerImage = pygwidgets.Image(self.window,
(150, 30), 'images/dodger.png')
self.startButton = pygwidgets.CustomButton(self.window, (250, 500),
up='images/startNormal.png',
down='images/startDown.png',
over='images/startOver.png',
disabled='images/startDisabled.png',
enterToActivate=True)
self.quitButton = pygwidgets.CustomButton(self.window, (30, 650),
up='images/quitNormal.png',
down='images/quitDown.png',
over='images/quitOver.png',
disabled='images/quitDisabled.png')
self.highScoresButton = pygwidgets.CustomButton(self.window, (360, 650),
up='images/gotoHighScoresNormal.png',
down='images/gotoHighScoresDown.png',
over='images/gotoHighScoresOver.png',
disabled='images/gotoHighScoresDisabled.png')
def getSceneKey(self):
return SCENE_SPLASH
def handleInputs(self, events, keyPressedList):
for event in events:
if self.startButton.handleEvent(event):
self.goToScene(SCENE_PLAY)
elif self.quitButton.handleEvent(event):
self.quit()
elif self.highScoresButton.handleEvent(event):
self.goToScene(SCENE_HIGH_SCORES)
def draw(self):
self.backgroundImage.draw()
self.dodgerImage.draw()
self.startButton.draw()
self.quitButton.draw()
self.highScoresButton.draw()
| IrvKalb/Object-Oriented-Python-Code | Chapter_16/Dodger/SceneSplash.py | SceneSplash.py | py | 2,392 | python | en | code | 207 | github-code | 90 |
18607934946 | import os
from jsonc_parser.parser import JsoncParser
import requests
from fastapi.responses import PlainTextResponse
import subprocess
from subprocess import PIPE
CONFIG_FILE = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/server/config.jsonc"
CONFIG = JsoncParser.parse_file(CONFIG_FILE)
async def convertPlantUMLToSVG(plantuml: str):
if CONFIG["plantuml"]["executionType"] == "server":
serverPost = requests.post(
CONFIG["plantuml"]["serverType"] + "/svg", data=plantuml
)
output = serverPost.content
else:
# asyncio subprocess does not work on windows
cmd = ["java","-jar",CONFIG['plantuml']['jarPath'],
"-pipe",
"-svg"
]
process = subprocess.run(cmd,input=plantuml.encode("utf-8"), capture_output=True)
output = process.stdout.decode("utf-8")
return output
| acenturyandabit/code2dia | code2dia/convertPlantUMLToSVG.py | convertPlantUMLToSVG.py | py | 897 | python | en | code | 0 | github-code | 90 |
17764060430 | # Leer letras de líneas (ver la entrada a continuación). Cada letra está en un cuarto índice, comenzando desde el índice 1.
# ENTRADA
# [D]
# [N] [C]
# [Z] [M] [P]
# SALIDA DESEADA
# [' D ', 'NC', 'ZMP']
# VERSION CON DOBLE CICLO FOR PARA RECORRER LINEAS Y LETRAS
with open("11. Desafíos/letras.txt") as archivo:
resultado = []
for line in archivo:
grupo = ""
for index in range(1, len(line), 4):
grupo += line[index]
resultado.append(grupo)
print(resultado)
# VERSION EN UNA LINEA CON CODIGO REDUNDANTE
with open("11. Desafíos/letras.txt") as archivo:
resultado = ["".join([line[1:len(line):4]])
for line in archivo.read().split("\n")]
print(resultado)
# VERSION PYTHON MINIMALISTA
with open("11. Desafíos/letras.txt") as archivo:
resultado = [line[1::4] for line in archivo]
print(resultado)
| manutorres/python | 11. Desafíos/4. slicing.py | 4. slicing.py | py | 895 | python | es | code | 0 | github-code | 90 |
31599144608 | class Department:
def __init__(self, name, emps=0):
self.name = name
self.emps = emps
def display(self):
print('Deartment: ', self.name)
print('Employees: ', self.emps)
class Employee(Department):
def __init__(self, name, age, department):
self.name = name
self.age = age
self.department = department
department.emps += 1
def display(self):
print("Name: ", self.name)
print("Age: ", self.age)
print("Department: ", self.department.name)
it_dept = Department("IT")
admin_dept = Department('Admin')
emp1 = Employee("ABC", 20, it_dept)
emp2 = Employee("XYZ", 28, it_dept)
emp3 = Employee("ABC", 20, admin_dept)
it_dept.display()
admin_dept.display()
emp1.display()
emp2.display()
emp3.display()
| tilvaanjali/python_exe1 | main.py | main.py | py | 807 | python | en | code | 0 | github-code | 90 |
73090191335 | """1588. Sum of All Odd Length Sub arrays Given an array of positive integers' arr, return the sum of all possible
odd-length sub arrays of arr. A subarray is a contiguous subsequence of the array.
Example 1:
Input: arr = [1,4,2,5,3]
Output: 58
Explanation: The odd-length subarrays of arr and their sums are:
[1] = 1
[4] = 4
[2] = 2
[5] = 5
[3] = 3
[1,4,2] = 7
[4,2,5] = 11
[2,5,3] = 10
[1,4,2,5,3] = 15
If we add all these together we get 1 + 4 + 2 + 5 + 3 + 7 + 11 + 10 + 15 = 58
Link - https://leetcode.com/problems/sum-of-all-odd-length-subarrays/
"""
arr = [1, 4, 2, 5, 3]
class Solution:
def sumOddLengthSubArrays(self, arr: list[int]) -> int:
"""
sum1 = 0
for i in range(len(arr)):
for j in range(i, len(arr), 2):
print("i -", i, "j -", j)
sum1 += sum(arr[i:j + 1])
print("arr[i:j+1]-----",i,j+1,arr[i:j+1])
print(sum1)
return sum1
"""
sum1 = 0
subarr = []
n = len(arr)
for i in range(n):
for j in range(i + 1, n + 1):
# print(j)
# subarr.append(arr[i:j])
if (j-i)%2:
sum1 += sum(arr[i:j])
print(sum1)
mySol1 = Solution()
print(mySol1.sumOddLengthSubArrays(arr))
| devWorldDivey/mypythonprogrammingtutorials | Python Problems/Leetcode Problem 1588. Sum of All Odd Length Subarrays.py | Leetcode Problem 1588. Sum of All Odd Length Subarrays.py | py | 1,319 | python | en | code | 0 | github-code | 90 |
71186130218 | from assento import Assento
class controladorAssentos():
PrecoDevolvido = 0
PessoasNaSala = 0
cont = 0
ValorApurado = 0
PrecoPessoaSala = 0
def __init__(self):
self.__linhas = None
self.__colunas = None
self.__lista = []
self.__saldodevolucoes = None
def criarsala(self):
qtdCadeiras = int(self.__linhas) * int(self.__colunas)
cadLinha = 0
valor = 20
for i in range(qtdCadeiras):
a = Assento(i, valor, True)
self.__lista.append(a)
cadLinha += 1
if cadLinha == self.__colunas:
valor -= 1
cadLinha = 0
def set_linhas(self,linhas):
self.__linhas = linhas
def set_colunas(self,colunas):
self.__colunas = colunas
def get_linhas(self):
return self.__linhas
def get_colunas(self):
return self.__colunas
def get_lista(self):
return self.__lista
def listadeassentos(self,cadeirapronta):
self.__lista.append(cadeirapronta)
def matriz(self):
sala = ""
numCadeira = 0
v = (int(self.__linhas) * int(self.__colunas) - 1)
for i in range(int(self.__linhas)):
for j in range(int(self.__colunas)):
cadeira = self.__lista[numCadeira]
if cadeira.get_disponivel():
sala += str(cadeira.get_numero()).zfill(len(str(v))) + " "
else:
sala += "xx".zfill(len(str(v))) + " "
numCadeira += 1
sala += "\n"
print(sala)
def comprarAssentos(self,cadeiraquero):
retorno = False
v = (int(self.__linhas) * int(self.__colunas) - 1)
temRepetidos = self.saberRepetidos(cadeiraquero)
if (temRepetidos):
pass
else:
for f in self.__lista:
for e in cadeiraquero:
if f.get_numero() == int(e):
if f.get_disponivel():
f.set_disponivel(False)
retorno = True
f.set_numero = "xx".zfill(len(str(v))) + " "
controladorAssentos.ValorApurado += int(f.get_preco())
else:
retorno = False
break
return retorno
def saberRepetidos(self, lista):
retorno = False
l = []
for i in lista:
if i not in l:
l.append(i)
else:
print('\033[1;31mCompra inválida, Tente novamente !\033[m'.format(i))
retorno = True
break
for l in lista:
num = int(l)
a = self.__lista[num]
if a.get_disponivel() == False:
retorno = True
break
return retorno
def saberRepetidosD(self, lista):
retorno = False
l = []
for i in lista:
if i not in l:
l.append(i)
else:
print('\033[1;31mCompra inválida, Tente novamente !\033[m'.format(i))
retorno = True
break
for l in lista:
num = int(l)
a = self.__lista[num]
if a.get_disponivel() == True:
retorno = True
break
return retorno
def devolverAssentos(self,cadeiradevolver):
retorno = False
v = (int(self.__linhas) * int(self.__colunas) - 1)
temRepetidos = self.saberRepetidosD(cadeiradevolver)
if (temRepetidos):
pass
else:
for f in self.__lista:
for e in cadeiradevolver:
if f.get_numero() == int(e):
if f.get_disponivel():
retorno = False
f.set_numero = e.zfill(len(str(v))) + " "
else:
f.set_disponivel(True)
retorno = True
controladorAssentos.cont += 1
controladorAssentos.ValorApurado -= 0.9*int(f.get_preco())
break
return retorno
def salvararquivo(self):
salvArq = open('arquivo.txt', 'w')
salvArq.write(str(self.__linhas) + '>')
salvArq.write(str(self.__colunas) + '>')
salvArq.write(str(controladorAssentos.PessoasNaSala) + '>')
salvArq.write(str(controladorAssentos.cont) + '>')
salvArq.write(str(controladorAssentos.ValorApurado) + '\n')
for f in self.__lista:
a = '{}'.format(f.get_numero())
b = '{}'.format(f.get_preco())
c = '{}'.format(f.get_disponivel())
salvArq.write('{}:'.format(a))
salvArq.write('{}:'.format(b))
salvArq.write('{}\n'.format(c))
salvArq.close()
def teste(self):
try:
a = open('arquivo.txt', 'r')
return True
except:
return False
def carregararquivo(self):
salvArq = open('arquivo.txt', 'r')
for c in salvArq.readlines():
c = c.replace("\n","")
if '>' in c:
a = c.split('>')
self.set_linhas(int(a[0]))
self.set_colunas(int(a[1]))
controladorAssentos.PessoasNaSala = int(a[2])
controladorAssentos.cont = int(a[3])
controladorAssentos.ValorApurado = float(a[4])
else:
b = c.split(':')
u = int(b[0])
valor = float(b[1])
if b[2] == 'True':
disponibilidade = True
else:
disponibilidade = False
ç = Assento(u, valor, disponibilidade)
self.__lista.append(ç)
| artillisprado/Cinema-Python-II-OO | controladorassento.py | controladorassento.py | py | 5,534 | python | pt | code | 1 | github-code | 90 |
19456584133 | class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def append(self, data):
new_node = Node(data)
if not self.head:
self.head = new_node
self.tail = new_node
else:
new_node.prev = self.tail
self.tail.next = new_node
self.tail = new_node
| tonianev/data-structures | src/doubly_linked_list.py | doubly_linked_list.py | py | 490 | python | en | code | 0 | github-code | 90 |
29521802946 | # Challenge 1
def split_gold(golds):
a = []
b = []
for i in range(len(golds)):
if i % 2 == 0:
if golds[0] >= golds[-1]:
a.append(golds[0])
golds.pop(0)
else:
a.append(golds[-1])
golds.pop(-1)
else:
if golds[0] >= golds[-1]:
b.append(golds[0])
golds.pop(0)
else:
b.append(golds[-1])
golds.pop(-1)
return [sum(a), sum(b)]
# Challenge 2
def english_beggars(golds):
return [sum(golds[::2]), sum(golds[1::2])]
print(english_beggars([1, 2, 3, 4, 5]))
# Challenge 3 Part 1
def josephus_survivor(n, one_every):
sequ = []
for i in range(1, n + 1):
sequ.append(i)
#print(sequ)
index = -1
while len(sequ) != 1:
index += one_every
while index >= len(sequ):
index -= len(sequ)
#print(sequ[index])
sequ.pop(index)
#print(sequ)
index -= 1
#print(index)
return sequ
print(josephus_survivor(7, 3))
# Challenge 3 Part 2
def josephus_permutation(n, k):
sequ = []
perm = []
for i in range(1, n + 1):
sequ.append(i)
#print(sequ)
index = -1
while len(sequ) != 0:
index += k
while index >= len(sequ):
index -= len(sequ)
#print(sequ[index])
perm.append(sequ.pop(index))
#print(sequ)
index -= 1
#print(index)
return perm
print(josephus_permutation(7, 3))
| coding-plus-equals-one/meeting-materials-2022-2023 | 5_lists_and_dicts/solutions.py | solutions.py | py | 1,559 | python | en | code | 0 | github-code | 90 |
16416910965 | #Actual log parser u_ex180414.log full parse
import re
import os
import functools
import operator
import sys
import cx_Oracle #pip install cx_oracle
#Read a file and parse it(convert it to csv)
logPath = os.path.join("C:\\","home","harish","Desktop","u_ex180414.log")
f = open(logPath,'r') #will we get stackoverflow error if file is big?
def parseMeterDat(siteId,dt,record):
compRec = siteId+","+dt+","+record[:-1]
recArr = compRec.split(',')
return recArr
def parseCsUriQueryDat(siteId,dt,queryDat):
queryDatArr=queryDat.split(':')
finalRecList = [parseMeterDat(siteId,dt,rec) for rec in queryDatArr]
return finalRecList
def parseData(lineStr):
csUriQueryPattern = '.*id=(.*)&dt=(.*)&dat=:(.*,)'
qre = re.search(csUriQueryPattern,lineStr)
#cs-uri-query
return parseCsUriQueryDat(qre.group(1),qre.group(2),qre.group(3))
parsedList = [parseData(line) for line in f if 'GET' and 'id=' in line]
#https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists
parsedFlattenedList = functools.reduce(operator.iconcat, parsedList, [])
f.close()
#oracle
#username - user
#password - password
db = cx_Oracle.connect('user/password@localhost:1521/XE')
cursor=db.cursor()
print(db.version)
cursor.prepare("INSERT INTO ENERGYTAB(SITEID,DateTime,METERID,EBENERGY,DGENERGY,VOLTAGE,CURRENTEN,ACTPOWER,APPPOWER,POWERFACTOR,MAXDEMAND,FLAG) VALUES (:1, :2, :3, :4, :5, :6, :7, :8, :9, :10, :11, :12)")
cursor.executemany(None, parsedFlattenedList)
db.commit()
r = cursor.execute("SELECT COUNT(*) FROM ENERGYTAB")
print(f'inserted {cursor.fetchone()} rows')
#SITEID,DateTime,METERID,EBENERGY,DGENERGY,VOLTAGE,CURRENT,ACTPOWER,APPPOWER,POWERFACTOR,MAXDEMAND,FLAG
#for row in cursor.execute("SELECT * FROM ENERGYTAB"):
# print(row)
print("Successfully completed parsing log file and loading it into database.")
#References:
#https://www.oracle.com/technetwork/articles/dsl/prez-python-queries-101587.html | Chandrakhasin/conserve-energy | energyLogFileParser.py | energyLogFileParser.py | py | 2,006 | python | en | code | 0 | github-code | 90 |
34998486844 | #! /usr/bin/env python
"""
eight queens, whose gui uses Tkinter
"""
import tkinter as Tk
import queen as Q
import os
Q_font = ("Times", 14)
def move_queen(now, next):
return [(i, z1-z0) for i, (z0, z1) in enumerate(zip(now, next)) if z0 != z1]
class Cboard(Tk.Canvas):
cell_size = 46
margin = 5
q_images = []
q_figure = []
def __init__(self, master):
cwidth = 8*self.cell_size
Tk.Canvas.__init__(self, master, relief=Tk.RAISED, bd=4, bg='white',
width=cwidth, height=cwidth)
self.q_answers = Q.eight_queens()
for i in range(8):
for j in range(8):
bcolor = (i-j)%2==0 and "#699C69" or "#D4D49F"
x0 = i*self.cell_size + self.margin
y0 = j*self.cell_size + self.margin
self.create_rectangle(x0, y0, x0+self.cell_size, y0+self.cell_size, fill=bcolor, width=0)
self.q_images.append(Tk.PhotoImage(file=os.path.dirname(__file__)+"/queen.gif"))
z = self.q_answers[0][i]
x = self.cell_size*(int(z / 8)+0.5) + self.margin
y = self.cell_size*(int(z % 8)+0.5) + self.margin
self.q_figure.append(self.create_image(x, y, image=self.q_images[i], tags="queen"))
def refresh(self, now, next):
answer_now = self.q_answers[now]
answer_next = self.q_answers[now+next]
for i, j in move_queen(answer_now, answer_next):
self.move(self.q_figure[i], 0, j*self.cell_size)
class Queen(Tk.Frame):
def __init__(self, master=None):
Tk.Frame.__init__(self, master)
self.master.title("8 Queens")
# title
l_title = Tk.Label(self, text='Eight Queens', font=('Times', '24', ('italic', 'bold')),
fg='#191970', bg='#EEE8AA', width=12)
l_title.pack(padx=10, pady=10)
# chess board
self.f_board = Cboard(self)
self.f_board.pack(padx=10, pady=10)
# buttons and a counter
self.q_counter = 0
self.f_footer = Tk.Frame(self)
self.f_footer.pack()
self.s_counter = Tk.StringVar()
self.s_counter.set("%d/12" % (1 + self.q_counter))
self.a_button = Tk.Button(self.f_footer, text="next", font=Q_font, command = self.show_next)
self.a_button.pack(side=Tk.LEFT, padx=5,pady=5)
self.b_button = Tk.Button(self.f_footer, text="prev", font=Q_font, command = self.show_prev)
self.b_button.pack(side=Tk.LEFT, padx=5,pady=5)
self.f_label = Tk.Label(self.f_footer, textvariable = self.s_counter, font=Q_font)
self.f_label.pack(side=Tk.LEFT, padx=5, pady=5)
def show_next(self):
if(self.q_counter < 11):
self.f_board.refresh(self.q_counter, 1)
self.change_counter(1)
def show_prev(self):
if(self.q_counter > 0):
self.f_board.refresh(self.q_counter, -1)
self.change_counter(-1)
def change_counter(self, i):
self.q_counter += i
self.s_counter.set("%d/12" % (1 + self.q_counter))
##---------------------------------------------------
if __name__ == "__main__":
app = Queen()
app.pack()
app.mainloop()
| 96no3/PythonStudy | Python/201912/191204/tkinter9/8queens.py | 8queens.py | py | 3,302 | python | en | code | 0 | github-code | 90 |
22685140972 | import json
import base64
import requests
from datetime import datetime
class er_agent():
def __init__(self, hostname, log=None):
self.my_hostname = hostname
self.log = log
self.unload_v_drm_schedule()
def load_v_drm_schedule(self, v_drm_schedule_json):
self.URL = "https://"+v_drm_schedule_json['IP']+":8339/beta"
self.my_location_id = v_drm_schedule_json['LOCATION_ID']
self.my_datatype_profile_id = v_drm_schedule_json['PROFILES']
self.my_target_id = v_drm_schedule_json['TARGET_ID']
self.userid = v_drm_schedule_json['ID']
self.current_ap_no = v_drm_schedule_json['AP_NO']
self.current_drm_schedule_id = v_drm_schedule_json['SCHEDULE_ID']
self.memory = v_drm_schedule_json['MEMORY']
self.throughput = v_drm_schedule_json['THROUGHPUT']
import base64
self.userpw_encoded = base64.b64encode(v_drm_schedule_json['PD'].encode('ascii'))
def unload_v_drm_schedule(self):
self.my_location_id = None
self.my_datatype_profile_id = None
self.my_target_id = None
self.userid = None
self.current_ap_no = None
self.current_drm_schedule_id = None
self.memory = None
self.throughput = None
self.userpw_encoded = None
def request(self, method, url, payload=None):
req_url = self.URL + url
self.log.debug("ER URL:"+req_url)
userpw = base64.b64decode(self.userpw_encoded).decode('ascii')
if 'post' == method:
headers = {'Content-Type': 'application/json; charset=utf-8'}
res = requests.post(req_url, headers=headers, auth=(self.userid, userpw), verify=False, data = payload)
elif 'get' == method:
res = requests.get(req_url, auth=(self.userid, userpw), verify=False)
elif 'delete' == method:
headers = {'Content-Type': 'application/json'}
res = requests.delete(req_url, headers=headers, auth=(self.userid, userpw), verify=False)
try:
ret = res.json()
except json.JSONDecodeError as e:
return ""
return ret
#region SCHEDULES
def list_schedules(self, schedule_id=None):
url = '/schedules'
if None != schedule_id:
url += '/' + str(schedule_id)
return self.request('get', url)
def is_schedule_completed(self, schedule_id):
try:
result = self.list_schedules(schedule_id)
self.log.info(json.dumps(result, indent=4, ensure_ascii=False))
if 'targets' not in result: return False
if len(result['targets']) < 1: return False
if 'locations' not in result['targets'][0]: return False
if len(result['targets'][0]['locations']) < 1: return False
if 'status' not in result['targets'][0]['locations'][0]: return False
if 'completed' != result['targets'][0]['locations'][0]['status']: return False
except Exception as e:
import traceback
self.log.error(traceback.format_exc())
self.log.error(e)
return False
return True
# data structure of location list
# [
# {
# 'id':'...'
# 'subpath':'...'
# },
# ]
def add_schedule(self, target_id, label, location_list):
data = {
'label':label,
'targets': {
'id':target_id,
'locations': location_list,
},
"profiles": [
self.my_datatype_profile_id,
],
}
if self.memory != None:
if self.memory == 0:
data['memory'] = 1024
else:
data['memory'] = self.memory
if self.throughput != None:
if self.throughput == 0:
data['throughput'] = 50
else:
data['throughput'] = self.throughput
self.log.info(json.dumps(data, indent=4, ensure_ascii=False))
ret = self.request('post', '/schedules', payload=json.dumps(data))
return ret
# Desc.: add schedule and returns SCHEDULE_ID
# NOTE: blank subpath list means all the files in the disk
# return:
# success - SCHEDULE ID (str)
# fail - None
def my_add_schedule(self, subpath_list, postfix = ""):
new_label = self.my_hostname+"_"+datetime.now().strftime("%Y%m%d %H%M%S_DRM")
if "" != postfix:
new_label = new_label + "_" + postfix
location_id = self.my_location_id
location_list = []
for subpath in subpath_list:
location_list.append({
'id':location_id,
'subpath':subpath,
})
result = self.add_schedule(self.my_target_id, new_label, location_list)
self.log.info(json.dumps(result, indent=4))
# NOTE: schedule id will be just one whether the param subpath is multiple or not
# success example : {'id': '44'}
if 'id' in result:
return result['id']
else:
return None
#endregion
#region
def list_locations(self, target_id):
ret = self.request('get', '/targets/'+str(target_id)+'/locations')
return ret
def summary_targets(self):
return self.request('get', '/summary/targets')
def list_targets(self, target_id = ""):
if "" != target_id:
target_id = '/'+target_id
return self.request('get', '/targets'+target_id)
#endregion
if __name__ == '__main__':
import lib_logging, sys, logging
log = lib_logging.init_log(logging.DEBUG)
er = er_agent("DESKTOP1", log)
print(log)
er.load_v_drm_schedule({
'IP': "192.168.12.7",
"LOCATION_ID": '10115313857004559053',
'LOCATION_ID': '',
'PROFILES': '',
'TARGET_ID': '',
'ID': 'admin',
'AP_NO': '',
'SCHEDULE_ID': '',
'PD': 'fren1212',
})
if 'list_locations' == sys.argv[1]:
print("TARGET ID: " + sys.argv[2])
result = er.list_locations(sys.argv[2])
print("=== locations ===")
print(json.dumps(result, indent=4))
result = er.list_targets(sys.argv[2])
print("=== targets ===")
print(json.dumps(result, indent=4))
elif 'list_schedules' == sys.argv[1]:
result = er.list_schedules(sys.argv[2])
print(json.dumps(result, indent=4)) | Frentree/python-for-pc | client/lib_er.py | lib_er.py | py | 6,001 | python | en | code | 0 | github-code | 90 |
23959792172 | # Imports
# Standard imports
import importlib
import random
import os
# Globals
global cardinalDirs
cardinalDirs = ("north", "east", "south", "west")
# Functions
# No idea on credit for clearScreen()
def clearScreen():
"""Clears the screen"""
if os.name == "nt": os.system("cls")
else: os.system("clear")
clearScreen()
class SpeedList(list):
"""A type of list that is optimized for deleting entries. (Indexes never need to shift, but consumes more memory.)"""
def __init__(self, arguments=[]):
self.data = {}
self.write = 0
for arg in arguments:
self.append(arg)
def __getitem__(self, key):
if key < 0 and key*-1 <= self.write:
return self.data[self.write+key]
elif key in self.data.keys():
return self.data[key]
else:
raise IndexError
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
return iter([self.data[i] for i in self.data.keys()])
def __len__(self):
return self.write
def __str__(self):
return str(self.data)
def append(self, argument):
self.data[self.write] = argument
self.write += 1
def getModule(codeModule, codePackage):
"""Warning: Returns the package, not an instance"""
return getattr(importlib.import_module(codeModule), codePackage)
# Tiles
class Tile:
def __init__(self, tileType, data={}):
self.tileType = tileType
self.data = {}
def get(self):
return self.tileType
def getData(self):
return self.data
def setData(self, content):
self.data = content
global tileTypes
tileTypes = ["grass", "stone", "ore"]
# Objects
class Object:
def __init__(self, objectType, data={}):
self.tileType = objectType
self.data = {}
def get(self):
return self.tileType
def getData(self):
return self.data
class Entity(Object):
def __init__(self, entityNumber, program):
self.entityNumber = entityNumber
self.program = program
self.task = None
self.taskResponse = None
def getEntityNumber(self):
return self.entityNumber
def setTask(self, task):
self.task = task
def getTask(self):
if self.task == None:
return None
else:
return self.task["task"], self.task["parameters"]
def setTaskResponse(self, response):
self.taskResponse = response
def getTaskResponse(self):
return self.taskResponse
def tick(self, board):
# Do correct number of lines of code (currently all)
self.setTask(self.program.run(self.getTaskResponse()))
# Return command
out = self.getTask()
self.setTask(None)
return out
class Robot(Entity):
def __init__(self, entityNumber, program):
self.entityNumber = entityNumber
self.program = program
self.task = None
self.taskResponse = None
self.scanPower = 3
self.inventory = None
def getScanPower(self):
return self.scanPower
def setScanPower(self, scanPower):
self.scanPower = scanPower
# Board
class Board:
def __init__(self, seed=None):
if seed == None:
random.seed()
self.seed = random.randint(1, 2**64)
else:
self.seed = seed
self.map = {}
self.entities = SpeedList()
self.tickNumber = 0
def getSeed(self):
return self.seed
def setTile(self, x, y, content, level, autoGenerate=True):
"""Warning: disabling autoGenerate may result in errors"""
if autoGenerate and x not in self.map.keys() or y not in self.map[x].keys():
self.generateTile(x, y)
self.map[x][y][level] = content
def generateTile(self, x, y, level=None, force=False):
if x not in self.map.keys():
self.map[x] = {}
if y not in self.map[x].keys():
self.map[x][y] = {}
if level == None or level == 0 and 0 not in self.map[x][y].keys or force:
# Check for special seeds
if self.getSeed() == "BLANK":
tile = None
# Seed is generic
else:
random.seed(str(self.getSeed())+str([x, y])+"0")
tile = Tile(random.choice(tileTypes))
if tile.get() == "ore":
random.seed(str(self.seed)+str([x, y])+"ore")
tile.setData({
"quantity": random.randint(10, 20)
})
self.setTile(x, y, tile, 0, False)
if level == None or level == 1 and 1 not in self.map[x][y].keys or force:
# Check for special seeds
if self.getSeed() == "BLANK":
tile = None
# Seed is generic
else:
#random.seed(str(self.getSeed())+str([x, y])+"1")
tile = None
self.setTile(x, y, tile, 1, False)
def getTile(self, x, y, level=None):
if x not in self.map.keys() or y not in self.map[x].keys():
self.generateTile(x, y)
if level == None:
return self.map[x][y]
else:
return self.map[x][y][level]
def copyTile(self, x1, y1, x2, y2, level):
tile = self.getTile(x1, y1, level)
self.setTile(x2, y2, tile, level)
if issubclass(tile.__class__, Entity):
self.entities[tile.getEntityNumber()][1] = x2
self.entities[tile.getEntityNumber()][2] = y2
def moveTile(self, x1, y1, x2, y2, level):
self.copyTile(x1, y1, x2, y2, level)
if not x1 == x2 or not y1 == y2:
self.setTile(x1, y1, None, level)
def shiftObject(self, x1, y1, direction, force=False):
# Set target
if direction == "north":
x2 = x1
y2 = y1+1
elif direction == "south":
x2 = x1
y2 = y1-1
elif direction == "east":
x2 = x1+1
y2 = y1
elif direction == "west":
x2 = x1-1
y2 = y1
else:
raise AssertionError
# Determine if possible
if self.getTile(x2, y2, 1) == None or force:
# success
self.moveTile(x1, y1, x2, y2, 1)
return True
else:
# failure
return False
def scan(self, x, y, direction, power):
# Define area
area = {
0: {
0: None
}
}
# Add to area
if direction == "north":
for j in range(1, power+1):
for i in range(j*-1, j+1):
if i not in area.keys():
area[i] = {}
if j not in area[i].keys():
area[i][j] = None
elif direction == "south":
for j in range(1, power+1):
for i in range(j*-1, j+1):
if i not in area.keys():
area[i] = {}
if j not in area[i].keys():
area[i][j*-1] = None
elif direction == "east":
for i in range(1, power+1):
for j in range(i*-1, i+1):
if i not in area.keys():
area[i] = {}
if j not in area[i].keys():
area[i][j] = None
elif direction == "west":
for i in range(1, power+1):
for j in range(i*-1, i+1):
if i*-1 not in area.keys():
area[i*-1] = {}
if j not in area[i*-1].keys():
area[i*-1][j] = None
else:
raise AssertionError
# Replace each blank in area with a tile
for i in area.keys():
for j in area[i].keys():
area[i][j] = self.getTile(i+x, j+y)
# Decending in priortity
for k in area[i][j].keys():
if issubclass(area[i][j][k].__class__, Entity):
area[i][j][k] = area[i][j][k]
elif issubclass(area[i][j][k].__class__, (Tile, Object)):
area[i][j][k] = area[i][j][k]
# Return
return area
def dig(self, x, y):
tile = self.getTile(x, y, 0)
if isinstance(tile, Tile) and tile.get() == "ore":
data = tile.getData()
data["quantity"] -= 1
tile.setData(data)
return True
else:
return False
def addObject(self, x, y, name="", force=False):
if self.getTile(x, y, 1) == None or force:
self.setTile(x, y, Object(name), 1)
return True
else:
return False
def addEntity(self, x, y, program, force=False):
if self.getTile(x, y, 1) == None or force:
self.entities.append([Entity(len(self.entities), program), x, y])
self.setTile(x, y, self.entities[-1][0], 1)
return True
else:
return False
def addRobot(self, x, y, program, force=False):
if self.getTile(x, y, 1) == None or force:
self.entities.append([Robot(len(self.entities), program), x, y])
self.setTile(x, y, self.entities[-1][0], 1)
return True
else:
return False
def getTickNumber(self):
return self.tickNumber
def tick(self):
self.tickNumber += 1
for entity in self.entities:
response = entity[0].tick(self)
if response == None:
continue
# Entities
elif response[0] == "move":
entity[0].setTaskResponse(self.shiftObject(entity[1], entity[2], response[1]))
# Robots
elif response[0] == "scan":
if issubclass(entity[0].__class__, Robot):
power = entity[0].getScanPower()
entity[0].setTaskResponse(self.scan(entity[1], entity[2], response[1], power))
else:
raise AssertionError
else:
raise AssertionError
def __str__(self):
radius = 15 # min 0, but 10 works best
center = [0, 0]
edge = "█"
doEdge = False
out = ""
if doEdge:
out += edge*(radius*4+6)+"\n"
for y in range(radius, radius*-1-1, -1):
if doEdge:
out += edge*2
for x in range(radius*-1, radius+1):
tile = self.getTile(x+center[0], y+center[1])
# Decending by priortity
# Level 1
# Robot
if issubclass(tile[1].__class__, Robot):
out += "R "
# Entity
elif issubclass(tile[1].__class__, Entity):
out += "E "
# Object
elif issubclass(tile[1].__class__, Object):
out += "O "
# Level 0
# Grass
elif issubclass(tile[0].__class__, Tile) and tile[0].get() == "grass":
out += "//"
# Stone
elif issubclass(tile[0].__class__, Tile) and tile[0].get() == "stone":
out += "▒▒"
# Ore
elif issubclass(tile[0].__class__, Tile) and tile[0].get() == "ore":
out += "░░"
# Anything else
else:
out += "? "
if doEdge:
out += edge*2
out += "\n"
if doEdge:
out += edge*(radius*4+6)
return out
| ericl16384/old-python-projects | BotsBuildBots/utilities.py | utilities.py | py | 12,200 | python | en | code | 0 | github-code | 90 |
15481789105 | from __future__ import annotations
from typing import Union as _Union
from typing import List as _List
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ._disease import Disease
from ._demographics import Demographics
from ._demographic import Demographic
from ._parameters import Parameters
from ._wards import Wards
from ._ward import Ward
from ._variableset import VariableSet, VariableSets
from datetime import date
__all__ = ["run", "find_mw_exe", "find_mw_include", "find_mw_lib",
"get_reticulate_command"]
def _write_to_file(obj: any, filename: str, dir: str = ".", bzip: bool = False,
dry_run: bool = False) -> str:
"""Write the passed object to a file called 'filename' in
directory 'dir', returning the
relative path to that file
"""
import os
if dry_run:
return filename
filename = os.path.join(dir, filename)
if hasattr(obj, "to_json"):
return obj.to_json(filename, auto_bzip=bzip)
else:
raise IOError(f"Cannot convert {obj} to a file!")
return filename
def _rmdir(directory):
"""Function modified from one copied from 'mitch' on stackoverflow
https://stackoverflow.com/questions/13118029/deleting-folders-in-python-recursively
"""
if directory is None:
return
from pathlib import Path
directory = Path(directory)
# first, check for removing important directories such as $HOME or root
if directory == Path.home():
raise FileExistsError(f"We WILL NOT remove your "
f"home directory ${directory}")
if directory == Path("/"):
raise FileExistsError(f"We WILL NOT remove the root directory "
f"{directory}")
# get the directory containing '$HOME'
if directory == Path.home().parent:
raise FileExistsError(f"We WILL NOT remove the users/home "
f"directory {directory}")
if not directory.is_dir():
directory.unlink()
return
for item in directory.iterdir():
if item.is_dir():
_rmdir(item)
else:
item.unlink()
directory.rmdir()
def _is_executable(filename):
import os
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
return None
# determining if this is executable
# on windows is really difficult, so just
# assume it is...
return filename
def _find_metawards(dirname):
import os
m = _is_executable(os.path.join(dirname, "metawards"))
if m:
return m
m = _is_executable(os.path.join(dirname, "metawards.exe"))
if m:
return m
m = _is_executable(os.path.join(dirname, "Scripts", "metawards"))
if m:
return m
m = _is_executable(os.path.join(dirname, "Scripts", "metawards.exe"))
if m:
return m
m = _is_executable(os.path.join(dirname, "bin", "metawards"))
if m:
return m
m = _is_executable(os.path.join(dirname, "bin", "metawards.exe"))
if m:
return m
return None
def _find_metawards_include(dirname):
import os
# this is from a metawards installation
m = os.path.abspath(os.path.join(dirname, "include", "metawards"))
if os.path.exists(m):
return m
# this is from a metawards source run (used for testing)
m = os.path.abspath(os.path.join(dirname, "src", "metawards"))
if os.path.exists(m):
return m
return None
def _find_metawards_lib(dirname):
import os
import glob
m = glob.glob(os.path.join(dirname, "lib", "libmetawards_*"))
if m is None:
m = []
if len(m) >= 1:
m = os.path.dirname(os.path.abspath(m[0]))
return m
m = glob.glob(os.path.join(dirname, "libmetawards_*"))
if m is None:
m = []
if len(m) >= 1:
m = os.path.dirname(os.path.abspath(m[0]))
return m
m = glob.glob(os.path.join(dirname, "lib*", "metawards_random.*"))
if m is None:
m = []
if len(m) >= 1:
m = os.path.dirname(os.path.abspath(m[0]))
return m
m = glob.glob(os.path.join(dirname, "metawards_random.*"))
if m is None:
m = []
if len(m) >= 1:
m = os.path.dirname(os.path.abspath(m[0]))
return m
return None
def find_mw_lib():
"""Try to find the directory containing the MetaWards libraries
(e.g. metawards_random).
This raises an exception if the libraries cannot be found.
It returns the full path to the library directory
"""
import metawards as _metawards
import os as _os
import sys as _sys
# Search through the path based on where the metawards module
# has been installed.
modpath = _metawards.__file__
metawards = None
# Loop only 100 times - this should break before now,
# We are not using a while loop to avoid an infinite loop
for i in range(0, 100):
metawards = _find_metawards_lib(modpath)
if metawards:
break
newpath = _os.path.dirname(modpath)
if newpath == modpath:
break
modpath = newpath
if metawards is not None:
return metawards
# Search from sys.prefix
modpath = _sys.prefix
# Loop only 100 times - this should break before now,
# We are not using a while loop to avoid an infinite loop
for i in range(0, 100):
metawards = _find_metawards_lib(modpath)
if metawards:
break
newpath = _os.path.dirname(modpath)
if newpath == modpath:
break
modpath = newpath
if metawards is not None:
return metawards
# This could have been put in the hostedtoolcache folder...
p = _os.path.abspath(_os.path.join(_os.path.dirname(_metawards.__file__),
"..", "hostedtoolcache"))
if _os.path.exists(p):
for dirpath, dirnames, filenames in _os.walk(p):
for filename in [f for f in filenames if (f.endswith(".lib") or
(f.endswith(".a")))]:
if filename.find("metawards") != -1:
metawards = dirpath
if metawards is None:
from .utils._console import Console
Console.error(
"Cannot find the metawards library directory, when starting from "
f"{_metawards.__file__}. Please could you "
"find it and then post an issue on the "
"GitHub repository (https://github.com/metawards/MetaWards) "
"as this may indicate a bug in the code.")
raise RuntimeError("Cannot locate the metawards library directory")
return metawards
def find_mw_include():
"""Try to find the directory containing the MetaWards include files.
This raises an exception if the include files cannot be found.
It returns the full path to the include files
"""
import metawards as _metawards
import os as _os
import sys as _sys
# Search through the path based on where the metawards module
# has been installed.
modpath = _metawards.__file__
metawards = None
# Loop only 100 times - this should break before now,
# We are not using a while loop to avoid an infinite loop
for i in range(0, 100):
metawards = _find_metawards_include(modpath)
if metawards:
break
newpath = _os.path.dirname(modpath)
if newpath == modpath:
break
modpath = newpath
if metawards is not None:
return metawards
# Search from sys.prefix
modpath = _sys.prefix
# Loop only 100 times - this should break before now,
# We are not using a while loop to avoid an infinite loop
for i in range(0, 100):
metawards = _find_metawards_include(modpath)
if metawards:
break
newpath = _os.path.dirname(modpath)
if newpath == modpath:
break
modpath = newpath
if metawards is None:
from .utils._console import Console
Console.error(
"Cannot find the metawards include directory, when starting from "
f"{_metawards.__file__}. Please could you "
"find it and then post an issue on the "
"GitHub repository (https://github.com/metawards/MetaWards) "
"as this may indicate a bug in the code.")
raise RuntimeError("Cannot locate the metawards include directory")
return metawards
def find_mw_exe():
"""Try to find the MetaWards executable. This should be findable
if MetaWards has been installed. This raises an exception
if it cannot be found. It returns the full path to the
executable
"""
import metawards as _metawards
import os as _os
import sys as _sys
# Search through the path based on where the metawards module
# has been installed.
modpath = _metawards.__file__
metawards = None
# Loop only 100 times - this should break before now,
# We are not using a while loop to avoid an infinite loop
for i in range(0, 100):
metawards = _find_metawards(modpath)
if metawards:
break
newpath = _os.path.dirname(modpath)
if newpath == modpath:
break
modpath = newpath
if metawards is not None:
return metawards
# Search from sys.prefix
modpath = _sys.prefix
# Loop only 100 times - this should break before now,
# We are not using a while loop to avoid an infinite loop
for i in range(0, 100):
metawards = _find_metawards(modpath)
if metawards:
break
newpath = _os.path.dirname(modpath)
if newpath == modpath:
break
modpath = newpath
if metawards is None:
# We couldn't find it that way - try another route...
dirpath = _os.path.join(_os.path.dirname(_sys.executable))
for option in [_os.path.join(dirpath, "metawards.exe"),
_os.path.join(dirpath, "metawards"),
_os.path.join(dirpath, "Scripts", "metawards.exe"),
_os.path.join(dirpath, "Scripts", "metawards")]:
if _os.path.exists(option):
metawards = option
break
if metawards is None:
# last attempt - is 'metawards' in the PATH?
from shutil import which
metawards = which("metawards")
if metawards is None:
from .utils._console import Console
Console.error(
"Cannot find the metawards executable. Please could you find "
"it and add it to the PATH. Or please post an issue on the "
"GitHub repository (https://github.com/metawards/MetaWards) "
"as this may indicate a bug in the code.")
raise RuntimeError("Cannot locate the metawards executable")
return metawards
def get_reticulate_command():
"""Print the reticulate command that you need to type
to be able to use the Python in which MetaWards is
installed
"""
import os as _os
import sys as _sys
pyexe = _os.path.abspath(_sys.executable)
return f"reticulate::use_python(\"{pyexe}\", required=TRUE)"
def run(help: bool = None,
version: bool = None,
dry_run: bool = None,
silent: bool = False,
auto_load: bool = False,
config: str = None,
input: _Union[str, VariableSet, VariableSets] = None,
line: int = None,
repeats: int = None,
seed: int = None,
additional: _Union[str, _List[str]] = None,
output: str = None,
disease: _Union[str, Disease] = None,
model: _Union[str, Wards, Ward] = None,
demographics: _Union[str, Demographics, Demographic] = None,
start_date: _Union[str, date] = None,
start_day: int = None,
parameters: _Union[str, Parameters] = None,
repository: str = None,
population: int = None,
nsteps: int = None,
user_variables: _Union[str, VariableSet] = None,
iterator: str = None,
extractor: str = None,
mixer: str = None,
mover: str = None,
star_as_E: bool = None,
star_as_R: bool = None,
disable_star: bool = None,
UV: float = None,
debug: bool = None,
debug_level: int = None,
outdir_scheme: str = None,
nthreads: int = None,
nprocs: int = None,
hostfile: str = None,
cores_per_node: int = None,
auto_bzip: bool = None,
no_auto_bzip: bool = None,
force_overwrite_output: bool = None,
profile: bool = None,
no_profile: bool = None,
mpi: bool = None,
scoop: bool = None) -> _Union[str, 'pandas.DataFrame']:
"""Run a MetaWards simulation
Parameters
----------
silent: bool
Run without printing the output to the screen
dry_run: bool
Don't run anything - just print what will be run
help: bool
Whether or not to print the full help
version: bool
Whether or not to print the metawards version info
output: str
The name of the directory in which to write the output. If this
is not set, then a new, random-named directory will be used.
force_overwrite_output: bool
Force overwriting the output directory - this will remove any
existing directory before running
auto_load: bool
Whether or not to automatically load and return a pandas dataframe
of the output/results.csv.bz2 file. If pandas is available then
this defaults to True, otherwise False
disease: Disease or str
The disease to model (or the filename of the json file containing
the disease, or name of the disease)
model: Ward, Wards or str
The network wards to run (of the filename of the json file
containing the network, or name of the network))
There are many more parameters, based on the arguments to
metawards --help.
Please set "help" to True to print out a full list of
help for all of the arguments
Returns
-------
results: str or pandas.DataFrame
The file containing the output results (output/results.csv.bz2),
or, if auto_load is True, the pandas.DataFrame containing
those results
"""
import sys
import os
import tempfile
from .utils._console import Console
metawards = find_mw_exe()
args = []
tmpdir = None
theme = "simple"
no_progress = True
no_spinner = True
if help:
args.append("--help")
output = None
elif version:
args.append("--version")
output = None
else:
if output is None and not dry_run:
output = tempfile.mkdtemp(prefix="output_", dir=".")
force_overwrite_output = True
if force_overwrite_output:
args.append("--force-overwrite-output")
else:
if output is None:
output = "output"
while os.path.exists(output):
import metawards as _metawards
print(f"Output directory {output} exists.")
output = _metawards.input("Please choose a new directory: ",
default="error")
if output is None:
return 0
output = output.strip()
if len(output) == 0:
return 0
if output.lower() == "error":
Console.error("You need to delete the directory or set "
"'force_overwrite_output' to TRUE")
return -1
try:
if config is not None:
args.append(f"--config {config}")
if input is not None:
if not isinstance(input, str):
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix="input_", dir=".")
input = _write_to_file(input, "input.dat", dir=tmpdir,
bzip=False, dry_run=dry_run)
args.append(f"--input {input}")
if line is not None:
args.append(f"--line {int(line)}")
if repeats is not None:
args.append(f"--repeats {int(repeats)}")
if seed is not None:
args.append(f"--seed {int(seed)}")
if additional is not None:
if isinstance(additional, list):
additional = "\\n".join(additional)
elif not isinstance(additional, str):
additional = str(int(additional))
if "\"" in additional:
if sys.platform.startswith("win"):
additional.replace("\"", "'")
args.append(f"--additional \"{additional}\"")
else:
args.append(f"--additional '{additional}'")
else:
args.append(f"--additional \"{additional}\"")
if output is not None:
args.append(f"--output {output}")
if disease is not None:
if not isinstance(disease, str):
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix="input_", dir=".")
disease = _write_to_file(disease, "disease.json",
dir=tmpdir,
bzip=False, dry_run=dry_run)
args.append(f"--disease {disease}")
if model is not None:
from ._ward import Ward
from ._wards import Wards
if isinstance(model, Ward):
m = Wards()
m.add(model)
model = m
if not isinstance(model, str):
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix="input_", dir=".")
model = _write_to_file(model, "model.json", dir=tmpdir,
bzip=True, dry_run=dry_run)
args.append(f"--model {model}")
if demographics is not None:
from ._demographic import Demographic
from ._demographics import Demographics
if isinstance(demographics, Demographic):
d = Demographics()
d.add(demographics)
demographics = demographics
if not isinstance(demographics, str):
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix="input_", dir=".")
demographics = _write_to_file(demographics,
"demographics.json",
dir=tmpdir,
bzip=False,
dry_run=dry_run)
args.append(f"--demographics {demographics}")
if start_date is not None:
from datetime import date
if isinstance(start_date, date):
start_date = date.isoformat()
args.append(f"--start-date {start_date}")
if start_day is not None:
args.append(f"--start-day {int(start_day)}")
if parameters is not None:
if not isinstance(parameters, str):
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix="input_", dir=".")
parameters = _write_to_file(parameters, "parameters.dat",
dir=tmpdir, bzip=False,
dry_run=dry_run)
args.append(f"--parameters {parameters}")
if repository is not None:
args.append(f"--repository {repository}")
if population is not None:
args.append(f"--population {int(population)}")
if nsteps is not None:
args.append(f"--nsteps {int(nsteps)}")
if user_variables is not None:
if not isinstance(user_variables, str):
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix="input_", dir=".")
user_variables = _write_to_file(user_variables,
"user_variables.dat",
dir=tmpdir,
bzip=False,
dry_run=dry_run)
args.append(f"--user {user_variables}")
if iterator is not None:
args.append(f"--iterator {iterator}")
if extractor is not None:
args.append(f"--extractor {extractor}")
if mixer is not None:
args.append(f"--mixer {mixer}")
if mover is not None:
args.append(f"--mover {mover}")
if star_as_E:
args.append("--star-as-E")
elif star_as_R:
args.append("--star-as-R")
elif disable_star:
args.append("--disable-star")
if UV is not None:
args.append(f"--UV {UV}")
if theme is not None:
args.append(f"--theme {theme}")
if no_spinner:
args.append("--no-spinner")
if no_progress:
args.append("--no-progress")
if debug:
args.append("--debug")
if debug_level is not None:
args.append(f"--debug-level {debug_level}")
if outdir_scheme is not None:
args.append(f"--outdir-scheme {outdir_scheme}")
if nthreads is not None:
args.append(f"--nthreads {int(nthreads)}")
if nprocs is not None:
args.append(f"--nprocs {int(nprocs)}")
if hostfile is not None:
args.append(f"--hostfile {hostfile}")
if cores_per_node is not None:
args.append(f"--cores-per-node {int(cores_per_node)}")
if auto_bzip:
args.append("--auto-bzip")
elif no_auto_bzip:
args.append("--no-auto-bzip")
if profile:
args.append("--profile")
elif no_profile:
args.append("--no-profile")
if mpi:
args.append("--mpi")
if scoop:
args.append("--scoop")
except Exception as e:
Console.error(f"[ERROR] Error interpreting the arguments"
f"[ERROR] {e.__class__}: {e}")
_rmdir(tmpdir)
raise
return -1
cmd = f"{metawards} {' '.join(args)}"
if dry_run:
Console.info(f"[DRY-RUN] {cmd}")
return_val = 0
else:
if output is not None:
Console.info(
f"Writing output to directory {os.path.abspath(output)}")
Console.info(f"[RUNNING] {cmd}")
try:
if sys.platform.startswith("win"):
# shlex.split doesn't work, but the command can
# be passed as a single string
args = cmd
else:
import shlex
args = shlex.split(cmd)
import subprocess
# We have to specify all of the pipes (stdin, stdout, stderr)
# as below as otherwise we will break metawards on Windows
# (especially needed to allow metawards to run under
# reticulate via metawards$run. Without these specified
# we end up with Windows File Errors)
with subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1, encoding="utf8",
errors="ignore",
text=True) as PROC:
while True:
line = PROC.stdout.readline()
if not line:
break
if not silent:
try:
sys.stdout.write(line)
sys.stdout.flush()
except UnicodeEncodeError:
# We get frequent unicode errors when run
# within RStudio. It is best just to ignore them
pass
except Exception as e:
Console.error(f"WRITE ERROR: {e.__class__} : {e}")
return_val = PROC.poll()
if return_val is None:
# get None if everything OK on Windows
# (sometimes windows returns 0 as None, which
# breaks things!)
return_val = 0
except Exception as e:
Console.error(f"[ERROR] {e.__class__}: {e}")
return_val = -1
if tmpdir is not None:
_rmdir(tmpdir)
if dry_run:
return
if output is None:
return
if return_val == 0:
results = os.path.join(output, "results.csv")
if not os.path.exists(results):
results += ".bz2"
if auto_load:
try:
import pandas
except ImportError:
Console.error("Cannot import pandas:\n{e}")
auto_load = False
if auto_load is None:
try:
import pandas
auto_load = True
except ImportError:
auto_load = False
if auto_load:
import pandas as pd
return pd.read_csv(results)
else:
return results
else:
output_file = os.path.join(output, "console.log.bz2")
Console.error(f"Something went wrong with the run. Please look "
f"at {output_file} for more information")
return None
| chryswoods/MetaWards | src/metawards/_run.py | _run.py | py | 26,754 | python | en | code | null | github-code | 90 |
22325934431 |
from os import read
import pygame
from generic_entity import GenericEntity
from player import Player
from generic_enemy import GenericEnemy, phf
from sys import exit
from weapon import Weapon
from setting import*
from ui import*
from level import Level
from game_data import level_0
from random import randint
# Starts & intiates pygame
WIDTH = 1366
HEIGHT = 768
healthFlag = False
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
weaponO = Weapon()
playerO = Player(weaponO, screen, healthFlag=False)
enemyO = GenericEnemy(screen=screen)
GenericEnemy.player = playerO
enemy = pygame.sprite.Group()
enemy.add(enemyO)
player = pygame.sprite.Group()
player.add(playerO)
weapon = pygame.sprite.Group()
weapon.add(weaponO)
started = True
enemyCounter = 0
pygame.display.set_caption('Soup')
# You can also change the icon
clock = pygame.time.Clock()
floor_surface = pygame.image.load('Textures/frames/floor_1.png').convert()
ui0 = Ui(screen, WIDTH, HEIGHT, floor_surface, started)
start = Start(screen, WIDTH, HEIGHT, floor_surface, started)
over = GameOver(screen, WIDTH, HEIGHT, floor_surface, started)
img = Images()
# level = Level(level_0, screen)
speed = 5
#running = True
game_over = True
while True:
enemyCounter += 1
if enemyCounter == 100:
enemy.add(GenericEnemy(screen=screen,
x=randint(0, 1300), y=randint(0, 700)))
enemyCounter = 0
if playerO.healthFlag == True:
with open("highscore.txt", "a+") as f:
f.write(" " + str(GenericEntity.playerScore))
over.gameOver()
playerO.healthFlag = False
start.startUi()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
# check for (W, A, S, D)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
playerO.left_pressed = True
# level.x_scroll += speed
if event.key == pygame.K_d:
playerO.right_pressed = True
# level.x_scroll -= speed
if event.key == pygame.K_w:
playerO.up_pressed = True
# level.y_scroll -= speed
if event.key == pygame.K_s:
playerO.down_pressed = True
# level.y_scroll += speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
playerO.left_pressed = False
# level.x_scroll = 0
if event.key == pygame.K_d:
playerO.right_pressed = False
# level.x_scroll = 0
if event.key == pygame.K_w:
playerO.up_pressed = False
# level.y_scroll = 0
if event.key == pygame.K_s:
playerO.down_pressed = False
# level.y_scroll = 0
# check for mouse movement and changes rotation true when moving as we don't want the weapon to follow the mouse when the mouse isn't moving
if event.type == pygame.MOUSEMOTION:
weaponO.rotation = True
weaponO.mx, weaponO.my = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
weaponO.attackFlag = 20
weaponO.attackDelay()
# li = pygame.sprite.groupcollide(enemy, weapon, False, False)
for e in enemy:
if e.distance <= 50:
e.damageFlag = 5
if event.type == pygame.MOUSEBUTTONUP:
weaponO.attackFlag = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
ui0.paused()
# draw all out elements
# Updates the display
screen.fill('black')
for i in range(0, HEIGHT, 16):
for k in range(0, WIDTH, 16):
screen.blit(floor_surface, (k, i))
# debug purposes
# screen.blit(pygame.transform.scale(img.pause_surface,(50,50)),(WIDTH-50,0))
# start.startUi()
# level.run()
enemy.draw(screen)
enemy.update()
player.draw(screen)
player.update()
weaponO.update()
weaponO.draw(screen)
pygame.display.update()
# Locks the frame rate at 60 fps
# not very clean code
weaponO.direction = playerO.direction
clock.tick(60)
| sandstone991/soup | demo.py | demo.py | py | 4,244 | python | en | code | 1 | github-code | 90 |
11941471888 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 30 13:00:40 2019
@author: hitham
"""
import os
import requests, zipfile, StringIO
def downloaddata(parentdir,datbase_link,libname):
#if not os.path.exists(parentdir):
# os.makedirs(parentdir)
r = requests.get(datbase_link, stream=True)
with zipfile.ZipFile(StringIO.StringIO(r.content)) as zf:
zf.extractall(parentdir)
for filename in os.listdir(parentdir):
src =parentdir+'/'+ filename
dst =parentdir+ '/' + libname
os.rename(src, dst)
def toolbox():
parentdir='../toolbox'
if not os.path.exists(parentdir):
os.makedirs(parentdir)
#--------------------------------
libname='libSVM-3-24'
#libsvm-3.24
#Version 3.24 released on September 11, 2019. It conducts some minor fixes.
datbase_link='https://www.csie.ntu.edu.tw/~cjlin/libsvm/oldfiles/libsvm-3.24.zip'
downloaddata(parentdir,datbase_link,libname) # from DCASE website
#--------------------------------
libname='libSVM-onevset'
#libSVM-onevset
#Extension of libSVM to support Open Set Recognitoin as described in "Toward Open Set Recognition", TPAMI July 2013
datbase_link='https://github.com/tboult/libSVM-onevset/archive/master.zip'
downloaddata(parentdir,datbase_link,libname) # from DCASE website
if __name__=="__main__":
toolbox()
| hjleed/Open-Set-Audio-Recognition-for-Multi-class-Classification-with-Rejection | Install_data_toolbox/toolBOX.py | toolBOX.py | py | 1,454 | python | en | code | 4 | github-code | 90 |
28151333871 | import OpenGL.GL as gl
import PyDelFEM2 as dfm2
import PyDelFEM2.gl.glfw
def draw_func():
gl.glEnable(gl.GL_LIGHTING)
msh.draw()
msh = dfm2.Mesh()
msh.read("../test_inputs/bunny_2k.ply")
msh.scale_xyz(0.03)
win = dfm2.gl.glfw.WindowGLFW(1.0,winsize=(400,300))
win.list_func_draw.append(draw_func)
dfm2.gl.setSomeLighting()
win.draw_loop()
| nobuyuki83/pydelfem2 | examples_py/01_openwin_glfw2.py | 01_openwin_glfw2.py | py | 348 | python | en | code | 10 | github-code | 90 |
74791737577 | import numpy as np
from tqdm import tqdm
import torch
import os
from sklearn.decomposition import PCA
import umap.umap_ as umap
import plotly.graph_objects as go
import argparse
from pathlib import Path
def adapt_hidden_embeddings(instance):
# if the embeddings of all the generation steps were saved in a single matrix, rather than in a list, separate them
if len(instance['last_hidden_embedding'][-1].shape) == 2:
instance['last_hidden_embedding'] = [instance['last_hidden_embedding'][0][i,:] for i in range(instance['last_hidden_embedding'][0].shape[0])]
# removing the paddings
# Compare all elements to 1
if "all_outputs_ids" in instance.keys():
matches = instance['all_outputs_ids'][0,:].eq(1)
# Find the first non-zero element in matches
indices = matches.nonzero(as_tuple=True)
# Get the first index where value is 1 (if no 1 then no "padding" and so can take all embeddings)
filter_index = indices[0][0].item() if indices[0].numel() != 0 else len(instance['last_hidden_embedding'])
else:
filter_index = len(instance['last_hidden_embedding'])
filtered_hidden_embedding = instance['last_hidden_embedding'][:filter_index]
return filtered_hidden_embedding
def get_data_name(full_file_path):
if "squad" in full_file_path:
return "squad"
elif "NQ" in full_file_path:
return "NQ"
elif "musique" in full_file_path:
return "musique"
else:
raise Exception(f"dataset name not found in {full_file_path}")
def get_model_name(curr_indir):
if "Flan-UL2" in curr_indir:
return "Flan-UL2"
elif "Flan-T5-xxl" in curr_indir:
return "Flan-T5-xxl"
elif "OPT-IML" in curr_indir:
return "OPT-IML"
else:
raise Exception(f"curr model not found in indir: {curr_indir}")
def get_response(options):
unanswerable_replies = ["unanswerable", "n/a", "idk", "i don't know", "not known", "answer not in context"]
unanswerable_replies_exact = ['nan', 'unknown', 'no answer', 'it is unknown', "none of the above", 'none of the above choices']
for option in options:
option = str(option).lower().strip()
if any(option==elem1 for elem1 in unanswerable_replies_exact) or any(option==f"{elem1}." for elem1 in unanswerable_replies_exact) or any(elem2 in option for elem2 in unanswerable_replies):
return "unanswerable"
return options[0]
def get_data(curr_indir, prompt_type, embedding_type):
full_pt_dicts = dict()
for subdir, dirs, files in os.walk(curr_indir):
for file in files:
if not file.endswith(".pt"):
continue
curr_data = torch.load(os.path.join(subdir, file))
curr_data_name = get_data_name(os.path.join(subdir, file))
if file.startswith("un-answerable"):
full_pt_dicts["unanswerable"] = curr_data
if embedding_type == "first_hidden_embedding":
unanswerable_all_embeddings = [instance[embedding_type] for instance in curr_data[prompt_type]]
else:
unanswerable_all_embeddings = [torch.stack(adapt_hidden_embeddings(instance)) for instance in curr_data[prompt_type]]
elif file.startswith("answerable"):
full_pt_dicts["answerable"] = curr_data
if embedding_type == "first_hidden_embedding":
answerable_all_embeddings = [instance[embedding_type] for instance in curr_data[prompt_type]]
else:
answerable_all_embeddings = [torch.stack(adapt_hidden_embeddings(instance)) for instance in curr_data[prompt_type]]
else:
raise Exception(f"{file} file doesn't start with \"unanswerable\" nor with \"answerable\".")
return unanswerable_all_embeddings, answerable_all_embeddings, full_pt_dicts, curr_data_name
def create_pca_plot(data_pca, unanswerable_identifies_as_unanswerable, unanswerable_identifies_as_answerable, answerable_identified_as_unanswerable, unanswerable_embeddings, outdir):
scatter1 = go.Scatter3d(
x=data_pca[:len(unanswerable_identifies_as_unanswerable), 0],
y=data_pca[:len(unanswerable_identifies_as_unanswerable), 1],
z=data_pca[:len(unanswerable_identifies_as_unanswerable), 2],
mode='markers',
marker=dict(
size=2,
color='red', # Set color to blue for first type of instances
),
name='unanswerable queries (identified)'
)
scatter2 = go.Scatter3d(
x=data_pca[len(unanswerable_identifies_as_unanswerable):len(unanswerable_identifies_as_unanswerable)+len(unanswerable_identifies_as_answerable), 0],
y=data_pca[len(unanswerable_identifies_as_unanswerable):len(unanswerable_identifies_as_unanswerable)+len(unanswerable_identifies_as_answerable), 1],
z=data_pca[len(unanswerable_identifies_as_unanswerable):len(unanswerable_identifies_as_unanswerable)+len(unanswerable_identifies_as_answerable), 2],
mode='markers',
marker=dict(
size=2,
color='pink', # Set color to blue for first type of instances
),
name='unanswerable queries (unidentified)'
)
scatter3 = go.Scatter3d(
x=data_pca[len(unanswerable_embeddings):len(unanswerable_embeddings)+len(answerable_identified_as_unanswerable), 0],
y=data_pca[len(unanswerable_embeddings):len(unanswerable_embeddings)+len(answerable_identified_as_unanswerable), 1],
z=data_pca[len(unanswerable_embeddings):len(unanswerable_embeddings)+len(answerable_identified_as_unanswerable), 2],
mode='markers',
marker=dict(
size=2,
color='green', # Set color to blue for first type of instances
),
name='answerable queries (unidentified)'
)
scatter4 = go.Scatter3d(
x=data_pca[len(unanswerable_embeddings)+len(answerable_identified_as_unanswerable):, 0],
y=data_pca[len(unanswerable_embeddings)+len(answerable_identified_as_unanswerable):, 1],
z=data_pca[len(unanswerable_embeddings)+len(answerable_identified_as_unanswerable):, 2],
mode='markers',
marker=dict(
size=2,
color='blue', # Set color to blue for first type of instances
),
name='answerable queries (identified)'
)
# ordered as scatter1, scatter2, scatter4, scatter3, so in the legend looks better
fig = go.Figure(data=[scatter1, scatter2, scatter4, scatter3])
fig.update_layout(
scene=dict(
xaxis=dict(
tickfont=dict(
size=10,
),
),
yaxis=dict(
tickfont=dict(
size=10,
),
),
zaxis=dict(
tickfont=dict(
size=10,
),
),
aspectmode='cube'
),
legend=dict(
itemclick=False, # Disable item click
itemdoubleclick=False, # Disable item double click
font=dict(
size=12, # Increase text size to 14
),
traceorder="normal",
itemsizing='constant' # Increase marker size
)
)
# fig.show()
fig.write_html(outdir)
def main(args):
aggregation_type = args.aggregation_type #"only_first_tkn"
prompt_type = args.prompt_type # "Hint-Prompt"
embedding_type = args.embedding_type # "first_hidden_embedding"
indirs = args.indirs #["../responses_embeddings/k-beams/22-06-2023_12:26:12/OPT"]
# create outdir
outdir_path = os.path.join(args.outdir, embedding_type, aggregation_type, prompt_type)
outdir_path_cls = Path(outdir_path)
outdir_path_cls.mkdir(parents=True, exist_ok=True)
for indir in tqdm(indirs):
unanswerable_all_embeddings, answerable_all_embeddings, full_pt_dicts, curr_data_name = get_data(indir, prompt_type, embedding_type)
if embedding_type == "first_hidden_embedding":
unanswerable_embeddings = [elem.cpu().numpy() for elem in unanswerable_all_embeddings]
answerable_embeddings = [elem.cpu().numpy() for elem in answerable_all_embeddings]
elif aggregation_type == "only_first_tkn":
unanswerable_embeddings = [elem.squeeze()[0,:].cpu().numpy() if len(elem.shape)>2 else elem[0,:].cpu().numpy() for elem in unanswerable_all_embeddings]
answerable_embeddings = [elem.squeeze()[0,:].cpu().numpy() if len(elem.shape)>2 else elem[0,:].cpu().numpy() for elem in answerable_all_embeddings]
elif aggregation_type == "average":
unanswerable_embeddings = [elem.mean(dim=0).cpu().numpy() for elem in unanswerable_all_embeddings]
answerable_embeddings = [elem.mean(dim=0).cpu().numpy() for elem in answerable_all_embeddings]
elif aggregation_type == "aggregated":
unanswerable_instances = [(emb.cpu().numpy(), instance["outputs"][0]) for instance in full_pt_dicts["unanswerable"][prompt_type] for emb in adapt_hidden_embeddings(instance)]
answerable_instances = [(emb.cpu().numpy(), instance["outputs"][0]) for instance in full_pt_dicts["answerable"][prompt_type] for emb in adapt_hidden_embeddings(instance)]
unanswerable_embeddings = [elem[0] for elem in unanswerable_instances]
answerable_embeddings = [elem[0] for elem in answerable_instances]
else:
raise Exception(f'aggregation_type can only be any of any of "average", "only_first_tkn" and "aggregated", but got {aggregation_type}')
# Extracting Actual Text Outputs
unanswerable_outputs = [elem["outputs"][0] for elem in full_pt_dicts["unanswerable"][prompt_type]]
answerable_outputs = [elem["outputs"][0] for elem in full_pt_dicts["answerable"][prompt_type]]
# separate questions into "unanswerable" replies and other
unanswerable_identifies_as_unanswerable = [unanswerable_embeddings[i] for i,txt in enumerate(unanswerable_outputs) if get_response([txt])=="unanswerable"]
unanswerable_identifies_as_answerable = [unanswerable_embeddings[i] for i,txt in enumerate(unanswerable_outputs) if get_response([txt])!="unanswerable"]
answerable_identified_as_unanswerable = [answerable_embeddings[i] for i,txt in enumerate(answerable_outputs) if get_response([txt])=="unanswerable"]
answerable_identified_as_answerable = [answerable_embeddings[i] for i,txt in enumerate(answerable_outputs) if get_response([txt])!="unanswerable"]
# Stack all vectors
combined_data = np.vstack((unanswerable_identifies_as_unanswerable, unanswerable_identifies_as_answerable, answerable_identified_as_unanswerable, answerable_identified_as_answerable))
# Initialize PCA
pca = PCA(n_components=3)
# Fit and transform data to 2D
data_pca = pca.fit_transform(combined_data)
# create and save PCA plot
curr_model_name = get_model_name(indir)
curr_outdir = os.path.join(outdir_path, f"{curr_model_name}_{curr_data_name}_3D.html")
print(f"Saving PCA plot of {curr_model_name} on {curr_data_name} to: {curr_outdir}")
create_pca_plot(data_pca, unanswerable_identifies_as_unanswerable, unanswerable_identifies_as_answerable, answerable_identified_as_unanswerable, unanswerable_embeddings, curr_outdir)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="")
argparser.add_argument('-i', '--indirs', nargs='+', type=str, required=True, help='path to data')
argparser.add_argument('-o', '--outdir', type=str, required=True, help='path to outdir')
argparser.add_argument('--prompt-type', type=str, default="Regular-Prompt", help='prompt type to classify ("Regular-Prompt" or "Hint-Prompt")')
argparser.add_argument('--aggregation-type', type=str, default="only_first_tkn", help='how to aggregate all the hidden layers of all the generated tokens of a single instance (choose from "average" to average them, "union" to treat each of them as an instance, and "only_first_tkn" to only take the first token\'s hidden layers).')
argparser.add_argument('--embedding-type', type=str, default="last_hidden_embedding", help='which layer to take: any one of "last_hidden_embedding" and "first_hidden_embedding"')
args = argparser.parse_args()
main(args) | lovodkin93/unanswerability | figures_generation/PCA_plots_generation.py | PCA_plots_generation.py | py | 12,510 | python | en | code | 3 | github-code | 90 |
27308158831 | '''
Created on Jun 18, 2015
@author: boris
'''
from numpy import concatenate, add
from gold.statistic.MagicStatFactory import MagicStatFactory
from gold.statistic.Statistic import MultipleRawDataStatistic
from gold.track.TrackFormat import TrackFormatReq
class RawOverlapCodedEventsStat(MagicStatFactory):
'''
Encode start and end events for multiple tracks. Needed to calculate the raw overlap for all combinations of a set of tracks.
Because of the encoding it is limited to 33 tracks.
'''
pass
#class RawOverlapCodedEventsStatSplittable(StatisticSumResSplittable):
# pass
class RawOverlapCodedEventsStatUnsplittable(MultipleRawDataStatistic):
def _compute(self):
tvs = [x.getResult() for x in self._children]
from numpy import array
# tvStartsOld = [x.startsAsNumpyArray()for x in tvs]
# tvEndsOld = [x.endsAsNumpyArray() for x in tvs]
tvStarts = [array(x.startsAsNumpyArray(), dtype='int64') for x in tvs]
tvEnds = [array(x.endsAsNumpyArray(), dtype='int64') for x in tvs]
numTracks = len(tvStarts)
assert numTracks < 34, 'Maximum supported nr. of tracks for this statistic is 33'
multiplier = 2**(numTracks+1)
#assert no overlaps..
#create arrays multiplied by 8 to use last three bits to code event type,
#Last three bits: relative to 4 (100): +/- 1 for start/end of track1, +/- 2 for track2..
tvCodedStarts = []
tvCodedEnds = []
for i in xrange(numTracks):
tvCodedStarts.append(tvStarts[i] * multiplier + (2**numTracks) + (2**i))
tvCodedEnds.append(tvEnds[i] * multiplier + (2**numTracks) - (2**i))
# t1CodedStarts = t1s * 8 +5
# t1CodedEnds= t1e * 8 +3
# t2CodedStarts = t2s * 8 +6
# t2CodedEnds= t2e * 8 +2
allSortedCodedEvents = concatenate((concatenate(tvCodedStarts), concatenate(tvCodedEnds) ))
allSortedCodedEvents.sort()
allEventCodes = (allSortedCodedEvents % multiplier) - (2**numTracks)
allSortedDecodedEvents = allSortedCodedEvents / multiplier
allEventLengths = allSortedDecodedEvents[1:] - allSortedDecodedEvents[:-1]
#due to the coding, the last bit now has status of track1, and the second last bit status of track2
#thus, 3 is cover by both, 2 is cover by only track2, 1 is cover by only track1, 0 is no cover
#this works as there are no overlaps, and bits will thus not "spill over"..
cumulativeCoverStatus = add.accumulate(allEventCodes)
return allSortedDecodedEvents, allEventLengths, cumulativeCoverStatus
def _getTrackFormatReq(self):
return TrackFormatReq(dense=False)
| uio-bmi/track_rand | lib/hb/gold/statistic/RawOverlapCodedEventsStat.py | RawOverlapCodedEventsStat.py | py | 2,758 | python | en | code | 1 | github-code | 90 |
17938506929 | import sys
sys.setrecursionlimit(10 ** 7)
input = sys.stdin.readline
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
n = int(input())
k = int(input())
res = 1
for _ in range(n):
if res < k:
res *= 2
else:
res += k
print(res)
if __name__ == '__main__':
resolve()
| Aasthaengg/IBMdataset | Python_codes/p03564/s361440470.py | s361440470.py | py | 339 | python | en | code | 0 | github-code | 90 |
26484924760 | import json
import os
from django.conf import settings
from apps.api.tests.base import BaseTestCase
class SearchInterestTestCase(BaseTestCase):
fixtures = [
"trend.json",
"user.json"
]
def test_search_interest_unauthorized(self):
resp = self.api_client.get("search_interest/")
self.assertEqual(resp.status_code, 401)
def test_get_for_all_keywords(self):
self.api_client.login("admin", "admin")
resp = self.api_client.get("search_interest/")
fixture_data = os.path.join(settings.FIXTURE_DIRS[0], "data/all_keywords.json")
with open(fixture_data, 'r') as f:
expected_keywords = json.load(f)
self.assertDictEqual(expected_keywords, resp.json)
def test_get_for_keyword(self):
self.api_client.login("admin", "admin")
resp = self.api_client.get("search_interest/?keyword=blue%20bloods")
fixture_data = os.path.join(settings.FIXTURE_DIRS[0], "data/blue_bloods_search_interests.json")
with open(fixture_data, 'r') as f:
expected_keywords = json.load(f)
self.assertDictEqual(expected_keywords, resp.json)
| GrigoriLab/daily_trend | apps/api/tests/test_search_interest.py | test_search_interest.py | py | 1,167 | python | en | code | 0 | github-code | 90 |
3118654017 | # This module provides the whole program with the nessary methods
# the main F bool function values counter
def bool_function(x1, x2, x3, x4):
if (x1 + x2 + x3) * (x2 + x3 + x4):
return 1
else:
return 0
# the full error between F and Y counter
def fault_counter(F, Y):
E = 0
for i in range(0, len(F)):
if Y[i] != F[i]:
E += 1
return E
# net function
def net(x, w, w0):
net = 0
for i in range(0, len(x)):
net += w[i] * x[i] + w0
return net
| thelacker/ITIB | LAB_1/Tools.py | Tools.py | py | 519 | python | en | code | 1 | github-code | 90 |
20862512561 | import copy
import random
import sys
sys.path.append(".")
from rpg2_classdefinitions import (Player_PC, Pet_NPC, ItemBag_PC,
Spell_PC, Monster_NPC, Weapon_PC,
Armor_PC, QuestItems_NPC, Access_NPC)
import rpg2_party_management_functions as party_func
import rpg2_quest_battle as battle_func
import rpg2_quest_monster_function as mon_func
from rpg2_constants import Constants
from rpg2_constant_lists import List_Constants
L = List_Constants()
C = Constants()
#quest two is advanced goblin fighting
#fight goblins until the town is saved
def quest_two(h_p, ib_pc, s_pc, p_npc, h_w, h_a, q_i, a_i):
print ("Those goblins are trying to invade the local village. ")
print ("We'll need to eliminate them before they get close. ")
new_h_p = []
for hro in h_p:
copy_hero = copy.copy(hro)
new_h_p.append(copy_hero)
g_p = []
q_i.package -= 1
y = len(h_p)
#at higher ranks you need to fight more goblins
for x in range(0, a_i.rank):
for z in range(0, y):
mon = mon_func.super_goblin_maker()
g_p.append(mon)
print ("You see a band of goblins approaching. ")
battle_func.battle_phase(new_h_p, g_p, p_npc, ib_pc, s_pc, h_w, h_a, q_i)
for hero in new_h_p:
if hero.health <= 0:
new_h_p.remove(hero)
if len(new_h_p) == 0:
break
elif len(new_h_p) > 0:
y += len(new_h_p)
if len(new_h_p) <= 0:
print ("You ok? We managed to push the goblins back for now. ")
print ("The fees for saving you will be taken out of your pay, by the way. ")
elif len(new_h_p) > 0:
print ("You were a big help, thanks. ")
q_i.rpackage += 1
a_i.fame += round(a_i.rank ** C.DECREASE_EXPONENT)
#quest one is goblin hunting
#fight goblins until you get the package back
def quest_one(h_p, ib_pc, s_pc, p_npc, h_w, h_a, q_i, a_i):
print ("Those damn goblins stole the package. ")
print ("They can't have gotten too far, go find it! ")
#make a copy of the heroes party to track if they are defeated
new_h_p = []
for hro in h_p:
copy_hero = copy.copy(hro)
new_h_p.append(copy_hero)
#make a party to fill with goblin monsters
g_p = []
#take away a package to start the quest
q_i.package -= 1
#keep track of the current rpackages that the player has
x = q_i.rpackage
#the goblin waves will keep increasing until you find the package
y = len(h_p)
#after they find another rpackage then the quest is over
while q_i.rpackage == x and len(new_h_p) > 0:
for z in range(0, y):
mon = mon_func.goblin_maker()
g_p.append(mon)
print ("You find a pack of goblins. ")
battle_func.battle_phase(new_h_p, g_p, p_npc, ib_pc, s_pc, h_w, h_a, q_i)
for hero in new_h_p:
if hero.health <= 0:
new_h_p.remove(hero)
y += 1
#if the heroes lose then they get no reward
if len(new_h_p) <= 0:
print ("Damn it, how can you lose to goblins?! ")
print ("I can't believe I hired you! ")
q_i.rpackage = x
elif q_i.rpackage > x:
print ("Thanks. I was a little worried there. ")
#make sure they only get one rpackage from the quest
q_i.rpackage = x + 1
#give them a fame
a_i.fame += 1
#function that decides what quest to give to the player
#quests can depend on their rank in the guild and fame
def quest(h_p, ib_pc, s_pc, p_npc, h_w, h_a, q_i, a_i):
#check whether the party has any packages
if q_i.package > 0:
#if so then make a quest
x = random.randint(0, 2)
if x == 1 or x == 0:
quest_one(h_p, ib_pc, s_pc, p_npc, h_w, h_a, q_i, a_i)
elif x == 2:
quest_two(h_p, ib_pc, s_pc, p_npc, h_w, h_a, q_i, a_i)
else:
print ("You don't have an assignment. ")
| DXing330/rpg_practice | RPG2v3/RPG2v3/RPG2subfiles/rpg2_quest_function.py | rpg2_quest_function.py | py | 4,635 | python | en | code | 0 | github-code | 90 |
18296293069 | # Original Submission At: https://atcoder.jp/contests/abc149/submissions/16823042
import sys
sys.setrecursionlimit(1000000)
x= int(input())
def prime_check(num,count):
if (num % count) != 0:
if num <= count**2:
print(num)
else:
prime_check(num,count+1)
else :
prime_check(num+1,2)
if x==2 :
print (2)
else:
prime_check(x,2)
| Aasthaengg/IBMdataset | Python_codes/p02819/s532168997.py | s532168997.py | py | 391 | python | en | code | 0 | github-code | 90 |
71957206057 | import unittest
def solution(H):
S=[]
count = 0
for h in H:
while(len(S) > 0 and h < S[-1]):
S.pop()
if len(S) == 0 or h != S[-1]:
S.append(h)
count += 1
return count
S=[]
S.append([[[8,8,5,7,9,8,7,4,8]],7])
class TestSolution(unittest.TestCase):
def test_solution(self):
for s in S:
self.assertEqual(solution(*s[0]), s[1])
if __name__ == '__main__':
unittest.main() | eavaria/codility | lesson_7d.py | lesson_7d.py | py | 481 | python | en | code | 0 | github-code | 90 |
42569575236 | import pandas as pd
import sys
def add_completeness(codon, a_struct, a_errors, t_struct, t_errors, tRNA):
complete = ""
bad_aterm = pd.isna(a_struct) or not(pd.isna(a_errors))
bad_term = pd.isna(t_struct) or not(pd.isna(t_errors))
if pd.isna(codon) and bad_aterm and bad_term:
complete = "None"
elif not (pd.isna(codon) or bad_aterm or bad_term):
complete = "Full"
else:
complete = "Partial"
if pd.isna(tRNA):
tRNA = "False"
else:
tRNA = "True"
return complete, tRNA
tboxes = pd.read_csv(sys.argv[1])
tboxes[["Completeness","tRNA_match"]] = tboxes.apply(lambda x: add_completeness(x['codon'], x['Trimmed_antiterm_struct'], x['vienna_antiterminator_errors'], x['Trimmed_term_struct'], x['new_term_errors'], x['trna_seq_top']), axis = 'columns', result_type = 'expand')
tboxes.to_csv(sys.argv[2], index = False) | mpiersonsmela/tbox | pipeline/add_completeness.py | add_completeness.py | py | 889 | python | en | code | 0 | github-code | 90 |
40236438791 | import unittest
import mock
from opencensus.trace.ext.requests import trace
class Test_requests_trace(unittest.TestCase):
def test_trace_integration(self):
mock_wrap = mock.Mock()
mock_requests = mock.Mock()
wrap_result = 'wrap result'
mock_wrap.return_value = wrap_result
for func in trace.REQUESTS_WRAP_METHODS:
mock_func = mock.Mock()
mock_func.__name__ = func
setattr(mock_requests, func, mock_func)
patch_wrap = mock.patch(
'opencensus.trace.ext.requests.trace.wrap_requests', mock_wrap)
patch_requests = mock.patch(
'opencensus.trace.ext.requests.trace.requests', mock_requests)
with patch_wrap, patch_requests:
trace.trace_integration()
for func in trace.REQUESTS_WRAP_METHODS:
self.assertEqual(getattr(mock_requests, func), wrap_result)
def test_wrap_requests(self):
mock_return = mock.Mock()
mock_return.status_code = 200
return_value = mock_return
mock_func = mock.Mock()
mock_func.__name__ = 'get'
mock_func.return_value = return_value
mock_tracer = MockTracer()
patch = mock.patch(
'opencensus.trace.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
wrapped = trace.wrap_requests(mock_func)
url = 'http://localhost:8080'
with patch:
wrapped(url)
expected_labels = {
'requests/url': url,
'requests/status_code': 200}
expected_name = '[requests]get'
self.assertEqual(expected_labels, mock_tracer.current_span.labels)
self.assertEqual(expected_name, mock_tracer.current_span.name)
def test_wrap_session_request(self):
mock_return = mock.Mock()
mock_return.status_code = 200
return_value = mock_return
mock_func = mock.Mock()
mock_func.return_value = return_value
mock_tracer = MockTracer()
patch = mock.patch(
'opencensus.trace.ext.requests.trace.execution_context.'
'get_opencensus_tracer',
return_value=mock_tracer)
wrapped = trace.wrap_session_request(mock_func)
url = 'http://localhost:8080'
request_method = 'POST'
with patch:
wrapped(request_method, url)
expected_labels = {
'requests/url': url,
'requests/status_code': 200}
expected_name = '[requests]POST'
self.assertEqual(expected_labels, mock_tracer.current_span.labels)
self.assertEqual(expected_name, mock_tracer.current_span.name)
class TestTraceSession(unittest.TestCase):
def test___init__(self):
import requests
mock_wrapped = mock.Mock()
patch = mock.patch(
'opencensus.trace.ext.requests.trace.wrap_session_request',
return_value=mock_wrapped)
with patch:
session = trace.TraceSession()
self.assertEqual(session.request, mock_wrapped)
assert isinstance(session, requests.Session)
class MockTracer(object):
def __init__(self):
self.current_span = None
def start_span(self):
span = mock.Mock()
span.labels = {}
self.current_span = span
return span
def end_span(self):
pass
def add_label_to_current_span(self, key, value):
self.current_span.labels[key] = value
| pombredanne/opencensus-python | trace/tests/unit/ext/requests/test_requests_trace.py | test_requests_trace.py | py | 3,522 | python | en | code | null | github-code | 90 |
31822842239 | import pygame
from Mode.Components import Component
from Mode.Components.Text import Text
class Button(Component):
def __init__(self, state, pos, size, label, on_click, icon=None, color=None):
super().__init__(state, pos, size)
self.enabled = True
self.label = label
self.on_click = on_click
self.border_color = color if color is not None else self.state.colors['button_border']
self.icon = icon
if self.icon is not None:
self.icon_surface = pygame.image.load("assets/images/" + self.icon)
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def process_event(self, event):
if not self.enabled:
return False
handled = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1 and self.is_mouse_over(event.pos):
handled = True
self.on_click(self)
return handled
def set_label(self, label):
self.label = label
def on(self):
self.border_color = self.state.colors['button_border_on']
def off(self):
self.border_color = self.state.colors['button_border']
def update(self):
pass
def render(self, surface):
if not self.enabled:
return False
pygame.draw.rect(surface, self.border_color, self.get_rect(), 1)
if self.icon:
surface.blit(self.icon_surface,
(self.x + (self.width // 2) - (self.icon_surface.get_width() // 2), self.y))
Text(self.state, self.label, 'button',
midtop=(self.x + (self.width // 2), self.y + self.icon_surface.get_height()),
owidth=1.5,
ocolor="purple"
).render(surface)
else:
Text(self.state, self.label, 'button',
center=(self.x + (self.width // 2), self.y + (self.height // 2)),
owidth=1.5,
ocolor="purple"
).render(surface)
| Hypnopompia/PrinterController | Mode/Components/Button/Button.py | Button.py | py | 2,075 | python | en | code | 0 | github-code | 90 |
14760508567 | import pygame
import random
from settings import *
from sprites import *
from time import sleep
class Game():
def __init__(self):
pygame.init()
pygame.mixer.init()
self.screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption(TITLE)
self.clock = pygame.time.Clock()
self.running = True
self.font_name = pygame.font.match_font(FONT_NAME)
def new(self):
self.score = 0
self.all_sprites = pygame.sprite.Group()
self.pipes = pygame.sprite.Group()
self.clouds = pygame.sprite.Group()
self.ground_sprite = pygame.sprite.Group()
for cloud in CLOUDS_LIST:
c = Background(*cloud)
self.clouds.add(c)
self.bird = Bird(self, BIRD_IMAGE)
self.all_sprites.add(self.bird)
self.ground = Background(0, HEIGHT - 40, WIDTH, 40, "images/ground.png")
self.ground_sprite.add(self.ground)
self.paused = False
for pipe in PIPES_LIST:
p = Pipe(*pipe)
self.all_sprites.add(p)
self.pipes.add(p)
self.run()
def run(self):
# Game Loop
self.playing = True
self.win = False
while self.playing:
self.clock.tick(FPS)
self.events()
if not self.paused:
self.update()
self.draw()
def update(self):
# Game Loop - Update
self.all_sprites.update()
# Check if bird hits a pipe
hits = pygame.sprite.spritecollide(self.bird, self.pipes, False)
if hits:
self.playing = False
# if bird reaches screen's width/2
if self.bird.rect.x + self.bird.rect.width >= WIDTH / 2:
self.bird.pos.x -= abs(self.bird.vel.x)
for cloud in self.clouds:
cloud.rect.x -= abs(self.bird.vel.x)
if cloud.rect.x + cloud.rect.width < 0:
cloud.kill()
for pipe in self.pipes:
pipe.rect.x -= abs(self.bird.vel.x)
if pipe.rect.x + pipe.rect.width < 0:
pipe.rect.x -= abs(self.bird.vel.x)
pipe.kill()
if self.bird.pos.x >= pipe.rect.x + pipe.rect.width and pipe.active:
self.score += 1
pipe.de_activate()
# spawn clouds
while len(self.clouds) < 4:
width = random.randrange(80, 120)
height = random.randrange(40, 80)
cloud_pos = random.randrange(0, 400)
cloud = Background(WIDTH,
(HEIGHT / 2 + CLOUD_GAP_Y - cloud_pos),
width, height, CLOUD_IMAGE)
self.clouds.add(cloud)
# spawn new pipes
while len(self.pipes) < 6:
for i in range(1, 14):
if i != 14:
r_high = random.randint(0, 1)
r_low = random.randint(2, 3)
elif i == 14:
r_high = 1
r_low = 3
p_high = Pipe(PIPES_LIST[3][0] + i * CONSEUTIVE_PIPE_GAP,
0, PIPE_WIDTH,
random.randrange(80, HEIGHT * 3 / 5),
PIPE_IMAGES_LIST[r_high])
p_low = Pipe(PIPES_LIST[3][0] + i * CONSEUTIVE_PIPE_GAP,
p_high.rect.y + p_high.rect.height + PIPE_BW_GAP,
PIPE_WIDTH, HEIGHT * 3 / 4,
PIPE_IMAGES_LIST[r_low])
self.pipes.add(p_high)
self.all_sprites.add(p_high)
self.pipes.add(p_low)
self.all_sprites.add(p_low)
# Die if fall!
if self.bird.rect.bottom > self.ground.rect.top:
for sprite in self.all_sprites:
sprite.rect.y -= max(self.bird.vel.y, 10)
if sprite.rect.y >= 0:
sprite.kill()
self.playing = False
# Winning condition
if self.score >= WIN_SCORE:
sleep(1)
self.playing = False
self.win = True
self.running = True
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.bird.fly()
if event.key == pygame.K_p:
self.paused = not self.paused
def draw(self):
# Game Loop - Draw
self.screen.fill(BGCOLOR)
self.clouds.draw(self.screen)
self.all_sprites.draw(self.screen)
self.ground_sprite.draw(self.screen)
self.draw_text('Score: {0}'.format(self.score), 22, WHITE, WIDTH / 2, 15)
pygame.display.flip()
def show_start_screen(self):
# Game start screen
self.screen.fill(ORANGE)
self.draw_text(TITLE, 100, BLACK, WIDTH / 2, HEIGHT / 5)
self.draw_text("Press UP to Fly...", 30,
BLUE, WIDTH / 2, HEIGHT / 2)
self.draw_text("& SPACE + UP for Fly Boost!!",
30, BLUE, WIDTH / 2, HEIGHT / 2 + 50)
self.draw_text("Press SPACE to Play!!", 30,
BLUE, WIDTH / 2, HEIGHT * 4 / 5)
pygame.display.flip()
self.wait_for_mouse_press()
def show_game_over_screen(self):
# Game over
if not self.running:
return
self.screen.fill(RED)
self.draw_text("GAME OVER :(", 72, BLACK, WIDTH / 2, HEIGHT / 4)
self.draw_text("Your Score: {0}".format(
self.score), 50, BLUE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press SPACE to Play Again!!",
50, BLUE, WIDTH / 2, HEIGHT * 3 / 4)
pygame.display.flip()
self.wait_for_mouse_press()
def show_win_screen(self):
if not self.running:
return
self.screen.fill(LIGHTBLUE)
self.draw_text("YOU WIN :D", 72, BLACK, WIDTH / 2, HEIGHT / 4)
self.draw_text("Your Score: {0}".format(
self.score), 50, ORANGE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press SPACE to go to Menu Screen...",
50, ORANGE, WIDTH / 2, HEIGHT * 3 / 4)
pygame.display.flip()
self.wait_for_mouse_press()
def wait_for_mouse_press(self):
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
waiting = False
self.running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
waiting = False
def draw_text(self, text, size, color, x, y):
font = pygame.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
g = Game()
g.show_start_screen()
while g.running:
g.new()
if not g.win:
g.show_game_over_screen()
elif g.win:
g.show_win_screen()
g.show_start_screen()
pygame.quit()
| AbeerVaishnav13/Flappy-by-Abeer | Flappy.py | Flappy.py | py | 7,659 | python | en | code | 0 | github-code | 90 |
37761598283 | from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import View
from django.template.loader import get_template
import datetime
from .utils import render_to_pdf #created in step 4
class GeneratePdf(View):
def get(self, request, *args, **kwargs):
template = get_template('invoice.html')
context = {
'today': datetime.date.today(),
'amount': 39.99,
'customer_name': 'Cooper Mann',
'invoice_id': 1233434,
}
html = template.render(context)
return HttpResponse(html)
class downloadPdf(View):
def get(self, request, *args, **kwargs):
data = {
'today': datetime.date.today(),
'amount': 39.99,
'customer_name': 'Cooper Mann',
'invoice_id': 1233434,
}
pdf = render_to_pdf('download.html', data)
return HttpResponse(pdf, content_type='application/pdf')
| ian-yitzhak/receipt | myapp/views.py | views.py | py | 981 | python | en | code | 0 | github-code | 90 |
25698287902 | from django.conf import settings
from rest_framework import serializers
from revibe._errors import network
from accounts.models import CustomUser
from content.models import Song
from metrics.models import *
# -----------------------------------------------------------------------------
class StreamSerializer(serializers.ModelSerializer):
# write-only
song_id = serializers.CharField(write_only=True)
user_id = serializers.CharField(write_only=True, required=False)
platform = serializers.CharField(write_only=True, required=False, source='alternate_platform')
class Meta:
model = Stream
fields = [
'stream_duration',
'is_downloaded',
'is_saved',
'song_id',
'user_id',
'lat',
'long',
'source',
'platform',
]
def create(self, validate_data):
song_id = validate_data.pop('song_id')
try:
song = Song.objects.get(id=song_id)
validate_data["song"] = song
except Song.DoesNotExist as e:
# raise network.BadEnvironmentError("This song_id has not yet been recorded, this is normal for non-Revibe content.")
validate_data["alternate_id"] = song_id
stream = Stream.objects.create(**validate_data)
user = self.context.get("request").user
if user and user.profile.allow_listening_data:
stream.user = user
stream.save()
return stream
# -----------------------------------------------------------------------------
# DEPRECATED
# Used to use AWS DynamoDB for tracking stream information
# class DynamoDBSerializer:
# def __init__(self, data=None, *args, **kwargs):
# if data == None:
# raise ValueError("Must include data when instantiating {}".format(self.__class__.__name__))
# assert hasattr(self, "Meta"), "Must implement a Meta class in a DynamoDBSerializer"
# assert hasattr(self.Meta, "model"), "Must implement a 'model' a DynamoDBSerializer.Meta"
# self.data=data
# self.validated = False
# self.errors = {}
# def is_valid(self, raise_exception=False):
# self.validate_data()
# if len(self.errors) == 0:
# self.validated = True
# return True
# elif raise_exception:
# key = next(iter(self.errors))
# value = self.errors[key]
# raise Exception("Invalid data: {} - {}".format(key, value))
# return False
# def validate_data(self, *args, **kwargs):
# for key in self.data.keys():
# if key not in self.Meta.fields:
# self.errors.update({key: "unknown field: {}".format(key)})
# for field in self.Meta.fields:
# if field not in self.data.keys():
# self.errors.update({field: "field '{}' must be included in data".format(field)})
# def save(self, *args, **kwargs):
# assert self.validated, "Must call is_valid"
# instance = self.create(self.data, *args, **kwargs)
# if not isinstance(instance, self.Meta.model):
# raise ValueError("Could not create row")
# self.instance = instance
# return instance
# def create(self, **validated_data):
# # instance = self.Meta.model(**validated_data) # don't think this will work but we'll find out
# # instance.save()
# # return instance
# raise NotImplementedError("must implement '{}.create()'".format(self.__class__.__name__))
# class StreamSerializer(DynamoDBSerializer):
# class Meta:
# model = Stream
# fields = [
# 'song_id',
# 'user_id',
# 'stream_duration',
# 'is_downloaded',
# 'is_saved',
# 'device',
# ]
# def create(self, validated_data, *args, **kwargs):
# environment = "test" if settings.DEBUG else "production"
# stream = self.Meta.model(
# song_id = validated_data['song_id'],
# user_id = validated_data['user_id'] if validated_data['user_id'] else 'opt-out',
# stream_duration = int(validated_data['stream_duration']),
# stream_percentage = validated_data['stream_percentage'],
# is_downloaded = validated_data['is_downloaded'],
# is_saved = validated_data['is_saved'],
# device = validated_data['device'],
# environment = environment
# )
# stream.save()
# return stream
| Revibe-Music/core-services | metrics/serializers/v1.py | v1.py | py | 4,598 | python | en | code | 2 | github-code | 90 |
26278504916 | '''
This file defines how to train and test the neural network.
The main function takes the following arguments:
- modes: A list containing a subset of ['train', 'test']
- epochs: Number of training epochs
- dataset_type: A string from ['torchvision', 'folder', 'custom'].
See dataset.py for more details.
- model_load_path: Load path of a saved model
- model_save_dir: Save directory for models saved during training
- save_every: Number of epochs to train before checkpoint saving
This code can be used as a skeleton for your own code.
'''
import os
import model
import dataset
import torch
def main(modes,
epochs = 1,
dataset_type = 'torchvision',
model_load_path = None,
model_save_dir = None,
save_every = 100):
'''
This beginning section is mainly for initialization of everything. Once everything
is initialized, we then define how to use the network.
'''
# Create a save directory if it doesn't already exist
if model_save_dir is not None and not os.path.exists(model_save_dir):
os.mkdir(model_save_dir)
'''
If you have access to an Nvidia GPU and CUDA, this line will use the GPU. It will
check automatically for you. For data that you want to send to the GPU, use the `to`
method, callable from the data. When we initialize the network for example, we use
the `to` function.
Common items to send to the GPU are:
- The network
- Inputs
- Outputs
- Labels
- Loss function
You do *not* send the optimizer to the GPU
'''
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Initialize datasets
train_data = test_data = None
if dataset_type == 'torchvision': train_data, test_data = dataset.dataset_torch()
elif dataset_type == 'folder': train_data, test_data = dataset.dataset_dir()
elif dataset_type == 'custom': train_data, test_data = dataset.dataset_custom()
# Initialize data loaders
train_loader, test_loader = dataset.create_dataloaders(train_data = train_data,
test_data = test_data)
'''
Initialize the model and load weights if applicable
To load a trained model, we load the `state_dict` of the model, which contains
information about the weights themselves as well as where the weights go within
the network.
'''
net = model.Example_Network().to(device)
if model_load_path is not None:
try:
net.load_state_dict(torch.load(model_load_path))
except:
net.load_state_dict(torch.load(model_load_path,
map_location = device))
# Initialize the loss function
loss_fn = model.Loss_Function().to(device)
'''
Initialize the optimizer:
lr: Learning rate
betas: Adam momentum terms
For other optimizers, visit https://pytorch.org/docs/stable/optim.html
'''
optimizer = None
if 'train' in modes:
optimizer = torch.optim.Adam(params = net.parameters(),
lr = 1e-4,
betas = (0.9, 0.999))
'''
HOW TO RUN ONE EPOCH
This function tells the network how to run an epoch based on the mode.
If the mode is 'train', then it will train the network. If the mode is
'test', then it will provide additional statistics for misclassification.
Regardless, a lot of the train/val/test code (val not done here) is similar,
so it makes sense to join them into one function.
'''
def run_epoch(mode):
# Initialize statistics
running_loss = 0
misclass = 0 if mode == 'test' else None
# Get the right data loader
loader = train_loader if mode == 'train' else test_loader
'''
Run through each batch
To get the data within a batch, all you need to do is iterate through
the loader. It will collect a batch automatically based on the parameters
used to initialize it.
'''
for data in loader:
'''
Clear the gradient for new batch, i.e. don't accumulate the gradient
from the previous batch. Instead, reset the gradient.
'''
if mode == 'train': optimizer.zero_grad()
'''
Collect the data from the batch
As you iterate, `data` will be a tuple containing the inputs and labels if
you used a torchvision dataset including ImageFolder. For custom datasets,
the `__getitem__` method determines the structure of the iterable. If we
see the `__getitem__` method of Dataset_Custom within dataset.py, we give
it the same return variables as a predefined torchvision dataset.
'''
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
'''
Pass the inputs through the network
Our network only takes in one set of image data as defined in
Example_Network's `forward` function. You can modify it to have more
inputs should your netowrk require it.
'''
outputs = net(inputs)
'''
Calculate the loss and update the running loss
The `item` function of a tensor gives a Python int, float, etc.
instead of a PyTorch tensor.
'''
loss = loss_fn(outputs, labels)
running_loss += loss.item()
'''
Update the model weights for training only
The `backward` function backpropogates to find the gradient of the
loss function. The `step` function in the optimizer then carries out
the weight update step. This is obviously only needed for training.
'''
if mode == 'train':
loss.backward()
optimizer.step()
# Count the number of misclassifications for testing only
elif mode == 'test':
'''
Extract the integer class labels
Here, our outputs and labels are of size (N,C) where N is the
size of the batch, and C is the number of classes (10). They are
one-hot vectors, so we want to compare whether or not the argmax
of each row matches between `outputs` and `labels`.
'''
outputs_class = torch.argmax(outputs, dim = 1)
labels_class = torch.argmax(labels, dim = 1)
# Accumulate misclassifications
misclass += (outputs_class != labels_class).sum().item()
return running_loss, misclass
'''
Function to save the model
To save the model, you don't save the network, but rather its `state_dict` which
contains the weights of the parameters among other things. Typically we use a
.pth or .pt extension
'''
def save_model(epoch = None):
# Get model name
model_name = 'model_epoch_{}'.format(epoch) if epoch is not None else 'model_best'
# Save the model
torch.save(net.state_dict(), os.path.join(model_save_dir, model_name + '.pth'))
print('\tSaved best model' if epoch is None else '\tCheckpoint saved')
'''
TRAINING PROCEDURE
Here, we define how to train the model. There are some preparations that need to
be done before calling the `run_epoch` function in a loop. Those steps are
described in detail below.
'''
if 'train' in modes:
print('Starting training...\n')
'''
Enable gradients to be stored
The default setting is that gradients are stored, so this line isn't necessary.
But why risk it? During testing, we turn this off since the gradients need not
be calculated.
'''
torch.set_grad_enabled(True)
'''
Allow model training
This tells our network that we intend to train it. This line of code is mainly
for batch normalization and dropout layers. It tells the network that we should
be using these layers for training.
'''
net.train()
# Initialize statistics
best_epoch = 0
best_epoch_loss = 1e9
'''
Train the model
Here, we run the training data through our network for however many epochs
we defined. Training statistics are printed to show that our model is actually
training, and can also be used to determine when to stop training. Early
stopping of training can easily be programmed if, for example, our loss
decreases by a small margin a certain number of times. This is not coded here,
and is left for you should you want an early stopping criterion of any sort.
'''
for epoch in range(1, epochs + 1):
print('Epoch {}:'.format(epoch))
# Train for one epoch
epoch_loss, _ = run_epoch(mode = 'train')
print('\tLoss = {:.8f}'.format(epoch_loss))
# Save the weights if the new model produces a lower loss
if epoch_loss < best_epoch_loss:
best_epoch_loss = epoch_loss
best_epoch = epoch
save_model()
# Checkpoint save
if epoch % save_every == 0 and model_save_dir is not None:
save_model(epoch)
# Save the last set of weights
if epoch % save_every != 0:
save_model(epoch)
print('\nTrain results: Epoch {} had the best loss of {:.8f}'.format(best_epoch,
best_epoch_loss))
'''
TESTING PROCEDURE
Like the testing procedure, there are some items to do before running `run_epoch`
over the testing data. Each step will be described in detail.
'''
if 'test' in modes:
if 'train' in modes: print('')
print('Starting testing...\n')
'''
Disable gradients from being stored
Since we are testing, we do not need to store the gradients. Gradients are
only needed when we train so the optimizer can update the network weights.
'''
torch.set_grad_enabled(False)
'''
Ignore batch norm and dropout layers (inference mode)
Here, we tell the network to ignore certain layers. For example, we do not
want to apply dropout when we test, since that is a training-specific layer.
The `eval` function does just that for us without having to define a new
testing model without dropout and batch normalization.
'''
net.eval()
# Test the network
test_loss, misclassifications = run_epoch(mode = 'test')
# Calculate the network's accuracy
accuracy = 100 * (1 - misclassifications / len(test_data))
print('Testing results:')
print('\tLoss = {:.8f}'.format(test_loss))
print('\tMisclassifications = {}/{}'.format(misclassifications, len(test_data)))
print('\tAccuracy = {:.4f}%'.format(accuracy))
if __name__ == '__main__':
main(modes = ['train', 'test'],
epochs = 10,
dataset_type = 'custom',
model_save_dir = 'Run_1',
save_every = 2) | IVPLatNU/Sample_PyTorch_Code | run_model.py | run_model.py | py | 10,028 | python | en | code | 6 | github-code | 90 |
4367169019 | import os, sys, urlparse
from inc.functions import *
from PySide.QtGui import QMainWindow
from ui.mainwindow import Ui_MainWindow
from inc.modules import themes, presets
class MainWindow(QMainWindow):
def __init__(self):
# Load window
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.load_tab(0)
# TabView slot
self.ui.tabWidget.currentChanged.connect(self.load_tab)
# Menu slots
self.ui.actionExit.triggered.connect(sys.exit)
self.ui.actionOptions.triggered.connect(self.optionsTriggered)
def load_tab(self, index):
module_name = self.ui.tabWidget.tabText(index)
if module_name == 'Themes':
if not hasattr(self, 'themesTab'):
self.themesTab = themes.ThemesWindow(self.ui)
self.themesTab.load_window()
elif module_name == 'Presets':
if not hasattr(self, 'presetsTab'):
self.presetsTab = presets.PresetsWindow(self.ui)
self.presetsTab.load_window()
def optionsTriggered(self):
from configuration import ConfigurationWindow
self.configWindow = ConfigurationWindow()
self.configWindow.show() | kmklr72/LMMS-Theme-Installer | ui/mainwindow.py | mainwindow.py | py | 1,091 | python | en | code | 1 | github-code | 90 |
25244086854 | from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.priors import GammaPrior
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood
from gp_models import StrictlyAdditiveKernel, ExactGPModel, RPPolyKernel, ProjectionKernel
from fitting.optimizing import train_to_convergence, mean_squared_error, learn_projections
from training_routines import create_strictly_additive_kernel, create_additive_rp_kernel
import torch
import gpytorch
from math import sqrt, pi
import numpy as np
import matplotlib.pyplot as plt
import gc
import json
import gpytorch.settings as gp_set
from gp_experiment_runner import run_experiment
device = 'cuda:7'
########## FUNCTIONS ####################
def unimodal_d_dim(x):
n, d = x.shape
return torch.exp(- torch.norm(x, dim=1)**2)
def bimodal_d_dim(x):
n, d = x.shape
return torch.exp(- torch.norm(x + torch.ones(1, d).to(x), dim=1)) + \
torch.exp(- torch.norm(x - torch.ones(1, d).to(x), dim=1))
def multimodal_d_dim(x):
n, d = x.shape
centers = [torch.zeros(1, d).to(x) for _ in range(d)]
for i in range(d):
centers[i][0,i] = 1.
centers[i][0,:i] = -1.
centers[i][0,i+1:] = -1.
total = 0
for i in range(d):
total = total + torch.exp(-torch.norm(x - centers[i], dim=1))
return total
def leading_dim(x):
n, d = x.shape
l = torch.sin(x[:, 0] * pi) * 1.
return bimodal_d_dim(x[:, 1:])*0.4 + l
def one_dim(x):
n, d = x.shape
l = torch.sin(x[:, 0] * pi) * 1.
return l
def half_relevant(x):
n, d = x.shape
dprime = d//2
return unimodal_d_dim(x[:, :dprime])
def nonseparable(x):
n, d = x.shape
return x.prod(dim=-1)
def additive(x):
n, d = x.shape
return torch.sin(x).sum(dim=-1)
def non_additive(x):
n, d = x.shape
# Continuous XOR by mixture of Gaussians
centers = torch.eye(d)*1.4
centers = torch.cat([centers, -centers]).t()
centers = centers.repeat(n, 1, 1)
y = torch.exp(-3 * (x.unsqueeze(2).expand_as(centers) - centers).pow(2).sum(dim=1)).sum(dim=1)
return y
def benchmark_on_n_pts(n_pts, create_model_func, target_func, ho_x, ho_y, fit=True, repeats=3, max_iter=1000, return_model=False, verbose=0, checkpoint=True, print_freq=1, use_chol=False, **kwargs):
dims = ho_x.shape[1]
# if n_pts > 20:
# ho_x = ho_x.to(device)
# ho_y = ho_y.to(device)
rep_mses = []
models = []
mlls = []
for i in range(repeats):
# Don't edit the master copies fo the hold-out dataset
test_ho_x = torch.empty_like(ho_x).copy_(ho_x)
test_ho_y = torch.empty_like(ho_y).copy_(ho_y)
# test_ho_x = ho_x.copy_()
# test_ho_y = ho_y.copy_()
# Create the data.
data = torch.rand(n_pts, dims)*4 - 2
y = target_func(data) + torch.randn(n_pts)*0.01
# Normalize by TEST in this case for all methods for more accurate comparison
m = ho_x.mean(dim=0)
s = ho_x.std(dim=0)
data = (data - m) / s
test_ho_x = (test_ho_x - m) / s
# Do the same for Ys.
m = ho_y.mean()
s = ho_y.std()
y = (y - m) / s
test_ho_y = (test_ho_y - m) / s
# Create the model now.
model = create_model_func(data, y, **kwargs)
# Put things on the GPU if necessary
if n_pts > 20:
test_ho_x = test_ho_x.to(device)
test_ho_y = test_ho_y.to(device)
model = model.to(device)
data = data.to(device)
y = y.to(device)
fast = not use_chol
with gp_set.fast_computations(fast, fast, fast), gp_set.max_cg_iterations(10_000):
with gp_set.cg_tolerance(0.001), gp_set.eval_cg_tolerance(0.0005), gp_set.memory_efficient(True):
if fit:
mll = ExactMarginalLogLikelihood(model.likelihood, model)
train_to_convergence(model, data, y, torch.optim.Adam, objective=mll, checkpoint=checkpoint,
max_iter=max_iter, print_freq=print_freq, verbose=verbose)
model.eval()
with torch.no_grad():
mse = mean_squared_error(model(test_ho_x).mean, test_ho_y)
print(i, mse)
rep_mses.append(mse)
if return_model:
models.append(model)
mlls.append(mll)
else:
del mll
del model
del data
del y
del ho_x
del ho_y
torch.cuda.empty_cache()
gc.collect()
return rep_mses, models, mlls
def benchmark_algo_on_func(create_model_func, target_func, dims=6, max_pts=2560, fit=True, repeats=3, start_after=0, use_chol=False, **kwargs):
identifier = np.random.randint(0, 1e9)
file = './progress_log_{:09d}.json'.format(identifier)
print(file)
rmses = []
ho_x = torch.rand(4000, dims)*4 - 2
ho_y = target_func(ho_x)
for n_pts in (10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 10240):
if n_pts <= start_after:
continue
if n_pts > max_pts:
break
print('n_pts={}'.format(n_pts))
rep_mses, _, _ = benchmark_on_n_pts(n_pts, create_model_func, target_func, ho_x, ho_y, fit=fit, repeats=repeats, use_chol=use_chol, **kwargs)
rmses.append(np.mean(np.sqrt(rep_mses)))
json.dump(rmses, open(file, 'w'))
return rmses
################## MODELS ###########################
def create_bl_model(data, y):
kernel = ScaleKernel(MaternKernel())
model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
return model
def create_rp_model(data, y, proj_ratio=1):
n, d = data.shape
kernel = ScaleKernel(RPPolyKernel(round(proj_ratio * d), 1, d, MaternKernel, nu=2.5, weighted=True,
space_proj=True))
model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
return model
def create_poly_rp_model(data, y, J, k):
n, d = data.shape
kernel = ScaleKernel(RPPolyKernel(J, k, d, RBFKernel, weighted=True,
space_proj=True))
model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
return model
def create_dpa_gp_ard_model(data, y, J):
n, d = data.shape
kernel = ScaleKernel(create_additive_rp_kernel(d, J, learn_proj=False, kernel_type='RBF', space_proj=True, prescale=True, batch_kernel=False, ard=True, proj_dist='sphere', mem_efficient=True))
model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
return model
def create_gam_model(data, y):
n, d = data.shape
kernel = ScaleKernel(create_strictly_additive_kernel(d, False, 'RBF', memory_efficient=True))
model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
return model
############# Configs #################
dims = 6
min_pts = 600
max_pts = 12000 # only partial
repeats = 15
func = additive
output_fname = 'test_synth_experiment_6d_additive_gam_partial_chol.json'
use_chol = True
rbf_rmses = benchmark_algo_on_func(create_bl_model, func, dims=dims, start_after=min_pts, max_pts=max_pts, repeats=repeats, use_chol=use_chol)
gam_rmses = benchmark_algo_on_func(create_gam_model, func, dims=dims, start_after=min_pts, max_pts=max_pts, repeats=repeats, use_chol=use_chol)
# dpa_rmses = benchmark_algo_on_func(create_rp_model, func, dims=dims, max_pts=max_pts, repeats=repeats)
# dpa_ard_rmses = benchmark_algo_on_func(create_dpa_gp_ard_model, func, dims=dims, max_pts=max_pts, repeats=repeats, J=dims)
json.dump({
'rbf': rbf_rmses,
'gam': gam_rmses,
# 'dpa': dpa_rmses,
# 'dpa_ard': dpa_ard_rmses
},
open('./run_outputs/{}'.format(output_fname), 'w'))
| idelbrid/Randomly-Projected-Additive-GPs | synthetic_test_script.py | synthetic_test_script.py | py | 7,910 | python | en | code | 25 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.