seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
16521320986 | import os
import json
import discord
from discord.ext import commands
import asyncio
import time
class ShopCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Load money from money.json
with open("money.json", "r") as f:
self.money = json.load(f)
# Load inventory from inventory.json
with open("inventory.json", "r") as f:
self.inventory = json.load(f)
# Define the products available in the shop
self.products = {
1: {"name": "Basic_Fishing_Rod", "description": "A simple fishing rod for catching fish", "price": 100},
2: {"name": "Fishing_Bait", "description": "A pack of fishing bait to attract fish", "price": 50},
3: {"name": "Basic_Axe", "description": "A basic axe for chopping wood", "price": 300},
4: {"name": "Taxi_car", "description": "A reliable taxi car for transporting passengers", "price": 800},
5: {"name": "Driver_license", "description": "A license to legally operate a taxi car", "price": 1000}
}
async def buy_product(self, user_id, product):
with open("money.json", "r") as f:
money = json.load(f)
@commands.Cog.listener()
async def on_ready(self):
print('Shop cog loaded')
# Check if the user has enough money to make the purchase
credits = money.get(str(user_id), 0)
if credits < product['price']:
return "You do not have enough money to buy this product."
# Subtract the cost from their money balance
money[str(user_id)] = credits - product['price']
# Add the product to the user's inventory
with open("inventory.json", "r") as f:
inventory = json.load(f)
if str(user_id) in inventory:
# If the user already has an inventory, add the product to it
if product['name'] in inventory[str(user_id)]["items"]:
inventory[str(user_id)]["items"][product['name']]["quantity"] += 1
else:
inventory[str(user_id)]["items"][product['name']] = {"quantity": 1, "price": product['price']}
inventory[str(user_id)]["credits"] -= product['price']
else:
# If the user doesn't have an inventory, create one for them
inventory[str(user_id)] = {"items": {product['name']: {"quantity": 1, "price": product['price']}},
"credits": -product['price']}
# Save the updated inventory and money balance to JSON files
with open("inventory.json", "w") as f:
json.dump(inventory, f, indent=4)
with open("money.json", "w") as f:
json.dump(money, f)
return "Product purchased successfully."
@commands.command(name="shop", aliases=["buy", 'store'])
async def shop(self, ctx):
with open("money.json", "r") as f:
money = json.load(f)
embed = discord.Embed(title="Shop", description="React with the corresponding number to purchase an item")
for product_id, product in self.products.items():
embed.add_field(
name=f"{product_id}. {product['name'].replace('_', ' ')}",
value=f"{product['description']} - {product['price']} ¥",
inline=False,
)
message = await ctx.send(embed=embed)
for product_id in self.products.keys():
await message.add_reaction(f"{product_id}\N{COMBINING ENCLOSING KEYCAP}")
user_id = str(ctx.author.id)
items_to_purchase = {}
with open("money.json", "r") as f:
money = json.load(f)
def check(reaction, user):
return (
user == ctx.author
and str(reaction.emoji) in [f"{product_id}\N{COMBINING ENCLOSING KEYCAP}" for product_id in
self.products.keys()]
and reaction.message.id == message.id
)
while True:
try:
reaction, user = await self.bot.wait_for("reaction_add", timeout=300.0, check=check)
except asyncio.TimeoutError:
break
with open("money.json", "r") as f:
money = json.load(f)
credits = money.get(str(user.id), 0)
product = self.products[int(reaction.emoji[0])]
if credits < product["price"]:
await ctx.send(
f"{ctx.author.mention} You do not have enough money to buy {product['name'].replace('_', ' ')}.")
continue
# Subtract the cost of the product from the user's money balance
money[str(user.id)] = credits - product['price']
with open("money.json", "w") as f:
json.dump(money, f, indent=4)
# Add the product to the user's inventory
if str(user.id) in self.inventory:
if product['name'] in self.inventory[str(user.id)]["items"]:
self.inventory[str(user.id)]["items"][product['name']]["quantity"] += 1
else:
self.inventory[str(user.id)]["items"][product['name']] = {"quantity": 1, "price": product['price']}
self.inventory[str(user.id)]["credits"] -= product['price']
else:
self.inventory[str(user.id)] = {"items": {product['name']: {"quantity": 1, "price": product['price']}},
"credits": -product['price']}
with open("inventory.json", "w") as f:
json.dump(self.inventory, f, indent=4)
await ctx.send(
f"{ctx.author.mention} purchased {product['name'].replace('_', ' ')} for {product['price']} ¥")
async def setup(bot):
await bot.add_cog(ShopCog(bot))
return None
| frozuu/Python-Discord-bot-w-cogs | cogs/ShopCog.py | ShopCog.py | py | 6,063 | python | en | code | 0 | github-code | 13 |
41993329289 | import argparse
def textToBed(input_text_file, output_bed_file, chromosome_number):
data = open(input_text_file, 'r')
data = data.readlines()
f = open(output_bed_file, "w")
adjust = 1 #adjust for quadron to bed conversion: bed file is zero-based
for i in range(0, len(data)):
crit = data[i].split()
if crit[0] == "DATA:" and crit[4] != "NA":
start = int(crit[1])
length = int(crit[3])
f.write("chr%s\t%i\t%i\t%.2f\t%s\t%i\n" %(chromosome_number, start-adjust, length+start-adjust-1, float(crit[4]), crit[2], length))
if __name__ == "__main__":
argParser = argparse.ArgumentParser()
argParser.add_argument("-i", "--input_text_file", type=str, required=True)
argParser.add_argument("-o", "--output_bed_file", type=str, required=True)
argParser.add_argument("-n", "--chromosome_number", type=str, required=True)
args = argParser.parse_args()
textToBed(args.input_text_file, args.output_bed_file, args.chromosome_number)
| kxk302/Quadron_Docker | scripts/quadron_txt2bed.py | quadron_txt2bed.py | py | 988 | python | en | code | 0 | github-code | 13 |
70632726738 | from sklearn.neighbors import KNeighborsClassifier
import lvq_common as lvqc
import numpy as np
e = 1e-12
def gen_prototypes(x, y, num_protos):
protos_x, protos_y = lvqc.get_random_prototypes(x, y, num_protos)
classifier = KNeighborsClassifier(n_neighbors=2)
classifier.fit(protos_x, protos_y)
neighbors_proto = classifier.kneighbors(X=x, n_neighbors=2,
return_distance=False)
for _ in range(lvqc.NUM_UPDATES):
for idx in range(len(x)):
instance = x.iloc[idx]
instance_class = y.iloc[idx].values[0]
nbs = neighbors_proto[idx]
p1_idx, p2_idx = nbs[0], nbs[1]
p1_x = protos_x.iloc[p1_idx]
p1_y = protos_y[p1_idx]
p1_distance = np.linalg.norm(p1_x - instance)
p2_x = protos_x.iloc[p2_idx]
p2_y = protos_y[p2_idx]
p2_distance = np.linalg.norm(p2_x - instance)
distance12 = p1_distance / (p2_distance + e)
distance21 = p2_distance / (p1_distance + e)
if min(distance12, distance21) > lvqc.WINDOW:
if p1_y != p2_y:
weight = lvqc.WEIGHT if p1_y == instance_class else -lvqc.WEIGHT
lvqc.update_prototype(p1_x, instance, weight)
weight = lvqc.WEIGHT if p2_y == instance_class else -lvqc.WEIGHT
lvqc.update_prototype(p2_x, instance, weight)
else:
lvqc.update_prototype(p1_x, instance, lvqc.WEIGHT)
lvqc.update_prototype(p2_x, instance, lvqc.WEIGHT)
return protos_x, protos_y
| augustoolucas/IF699-Machine-Learning | lista2/lvq31.py | lvq31.py | py | 1,680 | python | en | code | 0 | github-code | 13 |
74292553937 | # Kutay Cinar
# V00******
# CSC 361: Assingment 3
import sys
import struct
class GlobalHeader:
magic_number = None # uint32
version_minor = None # uint16
version_major = None # uint16
thiszone = None # int32
sigfigs = None # uint32
snaplen = None # uint32
network = None # uint32
def __init__(self, buffer):
self.magic_number, self.version_minor, self.version_major, self.thiszone, self.sigfigs, self.snaplen, self.network = struct.unpack('IHHiIII', buffer)
class PacketHeader:
ts_sec = None # uint32
ts_usec = None # uint32
incl_len = None # uint32
orig_len = None # uint32
def __init__(self):
self.ts_sec = 0
self.ts_usec = 0
self.incl_len = 0
self. orig_len = 0
def set_header(self, buffer):
self.ts_sec, self.ts_usec, self.incl_len, self.orig_len = struct.unpack('IIII', buffer)
class IPV4Header:
ihl = None # int
total_length = None # int
identification = None # int
flags = None # int
fragment_offset = None # int
ttl = None # int
protocol = None # int
src_ip = None # str
dst_ip = None # str
def set_ihl(self, value):
result = struct.unpack('B', value)[0]
self.ihl = (result & 15) * 4
def set_total_len(self, buffer):
num1 = ((buffer[0] & 240) >> 4) * 16 * 16 * 16
num2 = (buffer[0] & 15) * 16 * 16
num3 = ((buffer[1] & 240) >> 4) * 16
num4 = (buffer[1] & 15)
self.total_length = num1 + num2 + num3 + num4
def set_ip(self, buffer1, buffer2):
src_addr = struct.unpack('BBBB', buffer1)
dst_addr = struct.unpack('BBBB', buffer2)
self.src_ip = str(src_addr[0]) + '.' + str(src_addr[1]) + '.' + str(src_addr[2]) + '.' + str(src_addr[3])
self.dst_ip = str(dst_addr[0]) + '.' + str(dst_addr[1]) + '.' + str(dst_addr[2]) + '.' + str(dst_addr[3])
def set_identification(self, buffer):
result = struct.unpack('BB', buffer)
self.identification = int(str(hex(result[0])) + str(hex(result[1]))[2:], 16)
def set_fragment_offset(self, buffer):
num0 = hex(((buffer[0] & 224) >> 5))
num1 = ((buffer[0] & 16) >> 4) * 16 * 16 * 16
num2 = (buffer[0] & 15) * 16 * 16
num3 = ((buffer[1] & 240) >> 4) * 16
num4 = (buffer[1] & 15)
self.flags = num0
self.fragment_offset = (num1 + num2 + num3 + num4) * 8
def set_ttl(self, buffer):
self.ttl = struct.unpack('B', buffer)[0]
def set_protocol(self, buffer):
self.protocol = struct.unpack('B', buffer)[0]
class UDPHeader:
src_port = None
dst_port = None
udp_length = None
checksum = None
def set_src_port(self, buffer):
result = struct.unpack('BB', buffer)
self.src_port = int(str(hex(result[0])) + str(hex(result[1]))[2:], 16)
def set_dst_port(self, buffer):
result = struct.unpack('BB', buffer)
self.dst_port = int(str(hex(result[0])) + str(hex(result[1]))[2:], 16)
def set_udp_len(self, buffer):
result = struct.unpack('BB', buffer)
self.udp_length = int(str(hex(result[0])) + str(hex(result[1]))[2:], 16)
def set_checksum(self, buffer):
result = struct.unpack('BB', buffer)
self.checksum = str(hex(result[0])) + str(hex(result[1]))
class ICMPHeader:
type_num = None
code = None
src_port = None
dst_port = None
sequence = None
def set_type(self, buffer):
result = struct.unpack('B', buffer)[0]
self.type_num = int(result)
def set_code(self, buffer):
result = struct.unpack('B', buffer)[0]
self.code = int(result)
def set_src_port(self, buffer):
result = struct.unpack('BB', buffer)
self.src_port = int(str(hex(result[0])) + str(hex(result[1]))[2:], 16)
def set_dst_port(self, buffer):
result = struct.unpack('BB', buffer)
self.dst_port = int(str(hex(result[0])) + str(hex(result[1]))[2:], 16)
def set_sequence(self, buffer):
result = struct.unpack('BB', buffer)
self.sequence = int(str(hex(result[0])) + str(hex(result[1]))[2:], 16)
class Packet:
header = None # PacketHeader
ipv4 = None # IPV4Header
icmp = None # ICMPHeader
udp = None # UDPHeader
data = None # byte
payload = None # int
timestamp = None # int
def __init__(self):
self.header = PacketHeader()
self.ipv4 = IPV4Header()
self.icmp = ICMPHeader()
self.udp = UDPHeader()
self.data = b''
self.payload = 0
self.timestamp = 0
def set_header(self, buffer):
self.header.set_header(buffer)
def set_data(self, buffer):
self.data = buffer
def set_number(self, value):
self.number = value
def set_rtt(self, p):
rtt = p.timestamp - self.timestamp
self.RTT_value = round(rtt, 8)
def set_timestamp(self, orig_time):
seconds = self.header.ts_sec
microseconds = self.header.ts_usec
self.timestamp = 1000 * round(seconds + microseconds * 0.000000001 - orig_time, 6)
def set_ipv4(self):
offset = 14 # ethernet header length
self.ipv4.set_ihl(self.data[offset+0: offset+1])
self.ipv4.set_total_len(self.data[offset+2: offset+4])
self.ipv4.set_identification(self.data[offset+4: offset+6])
self.ipv4.set_fragment_offset(self.data[offset+6: offset+8])
self.ipv4.set_ttl(self.data[offset+8: offset+9])
self.ipv4.set_protocol(self.data[offset+9: offset+10])
self.ipv4.set_ip(self.data[offset+12: offset+16], self.data[offset+16: offset+20])
def set_icmp(self):
offset = 14 + self.ipv4.ihl
self.icmp.set_type(self.data[offset+0: offset+1])
self.icmp.set_code(self.data[offset+1: offset+2])
# windows
if self.icmp.type_num == 8 or self.icmp.type_num == 0:
self.icmp.set_sequence(self.data[offset+6: offset+8])
#linux
offset += 8 + self.ipv4.ihl
if offset+4 <= self.header.incl_len:
if self.icmp.type_num != 8 and self.icmp.type_num != 0:
self.icmp.set_sequence(self.data[offset+6: offset+8]) # also windows
self.icmp.set_src_port(self.data[offset+0: offset+2])
self.icmp.set_dst_port(self.data[offset+2: offset+4])
else:
self.icmp.src_port = 0
self.icmp.dst_port = 0
def set_udp(self):
offset = 14 + self.ipv4.ihl
self.udp.set_src_port(self.data[offset+0: offset+2])
self.udp.set_dst_port(self.data[offset+2: offset+4])
self.udp.set_udp_len(self.data[offset+4: offset+6])
self.udp.set_checksum(self.data[offset+6: offset+8])
#############################################################
################ Parse Command Line Argument ################
# Get filename from command line
if len(sys.argv) != 2:
print('Unexpected input. Usage: python3 TraceRouteAnalyzer.py <sample_trace_file.cap>')
exit()
# Set input filename from given argument
input_file = sys.argv[1]
# Open the given pcap file in the binary mode
f = open(input_file, 'rb')
#############################################################
#################### Read Global Header #####################
# Read the first 24 bytes to get the global header
global_header = GlobalHeader(f.read(24))
# Map of protocols we care about
protocol_map = {1: 'ICMP', 17: 'UDP'}
protocol_used = {}
# Lists for storing packets
src = []
dst = []
pcap_start_time = None
packet_counter = 0
#############################################################
########## Parse Packets Headers and Packet Data) ###########
while True:
packet_counter += 1
# Read the next 16 bytes to get the packet header
stream = f.read(16)
# Terminate if reached end of file / empty byte
if stream == b'':
break
# Create packet and parse header
packet = Packet()
packet.set_header(stream)
packet.set_number(packet_counter)
# Check incl_len for the length of packet
incl_len = packet.header.incl_len
# Use relative time, i.e., the time with respect to the cap file
if pcap_start_time is None:
seconds = packet.header.ts_sec
microseconds = packet.header.ts_usec
pcap_start_time = round(seconds + microseconds * 0.000001, 6)
# Read the next incl_len bytes for the packet data
packet.set_data(f.read(incl_len))
# Parse IPV4 header
packet.set_ipv4()
# Depending on protocol, parse ICMP header
if packet.ipv4.protocol == 1:
packet.set_icmp()
dst.append(packet)
protocol_used[1] = 'ICMP'
# Depending on protocol, parse UDP header
if packet.ipv4.protocol == 17:
packet.set_udp()
src.append(packet)
# condition check to find the right UDP packets
if not 33434 <= packet.udp.dst_port <= 33529:
continue
protocol_used[17] = 'UDP'
# Skip all other packets with protocols we don't care about
if packet.ipv4.protocol not in protocol_map:
continue
#############################################################
################### R2 Helper Program #######################
### DON"T RUN FOR R1 ###
# R2 TTL probe calculation:
# ttl_dict = {}
# for p in src:
# if p.ipv4.ttl not in ttl_dict:
# ttl_dict[p.ipv4.ttl] = []
# ttl_dict[p.ipv4.ttl].append(p)
# for ttl in sorted(ttl_dict):
# #print(f'ttl: {ttl:2d} -> {len(ttl_dict[ttl])} probes')
# print(len(ttl_dict[ttl]))
# exit()
### DON"T RUN FOR R1 ###
#############################################################
# Windows
if any(p.icmp.type_num == 8 for p in dst):
icmp_all = dst
src = []
dst = []
for p in icmp_all:
if p.icmp.type_num == 8:
src.append(p)
if p.icmp.type_num == 11 or p.icmp.type_num == 0: #or p.icmp.type_num == 3:
dst.append(p)
intermediate = []
intermediate_packets = []
rtt_dict = {}
for p1 in src:
for p2 in dst:
if p1.icmp.sequence == p2.icmp.sequence:
if p2.ipv4.src_ip not in intermediate:
intermediate.append(p2.ipv4.src_ip)
intermediate_packets.append(p2)
rtt_dict[p2.ipv4.src_ip] = []
# RTT Calculation
p1.set_timestamp(pcap_start_time)
p2.set_timestamp(pcap_start_time)
rtt_dict[p2.ipv4.src_ip].append(p2.timestamp-p1.timestamp)
# Linux
else:
intermediate = []
intermediate_packets = []
rtt_dict = {}
for p1 in src:
for p2 in dst:
if p1.udp.src_port == p2.icmp.src_port: # and p2.icmp.type_num == 11 and p2.icmp.code == 0
if p2.ipv4.src_ip not in intermediate:
intermediate.append(p2.ipv4.src_ip)
intermediate_packets.append(p2)
rtt_dict[p2.ipv4.src_ip] = []
# RTT Calculation
p1.set_timestamp(pcap_start_time)
p2.set_timestamp(pcap_start_time)
rtt_dict[p2.ipv4.src_ip].append(p2.timestamp-p1.timestamp)
identity_dict = {}
# figure out fragmented datagrams
for packet in src:
if packet.ipv4.identification not in identity_dict:
identity_dict[packet.ipv4.identification] = []
identity_dict[packet.ipv4.identification].append(packet)
# check fragment count
frag_count = 0
for identity in identity_dict:
if len(identity_dict[identity]) > 1:
frag_count += 1
#############################################################
################### R1 Required Output #######################
print('The IP address of the source node:', src[0].ipv4.src_ip)
print('The IP address of ultimate destination node:', src[0].ipv4.dst_ip)
print('The IP addresses of the intermediate destination nodes:')
for i in range(len(intermediate)-1):
print(f'\trouter {i+1}: {intermediate[i]}')
print()
print('The values in the protocol field of IP headers:')
for protocol in sorted(protocol_used):
print(f'\t{protocol}: {protocol_used[protocol]}')
print()
if frag_count == 0:
print('The number of fragments created from the original datagram is:', frag_count)
print('The offset of the last fragment is:', frag_count, '\n')
else:
for identity in identity_dict:
if len(identity_dict[identity]) > 1:
print('The number of fragments created from the original datagram', identity, 'is:', len(identity_dict[identity]))
offset = max(packet.ipv4.fragment_offset for packet in identity_dict[identity])
print('The offset of the last fragment is:', offset, '\n')
# RTT average time and standard deviation
for i in range(len(intermediate)):
avg = round(sum(rtt_dict[intermediate[i]]) / len(rtt_dict[intermediate[i]]), 6)
std = round( (sum(pow(x-avg,2) for x in rtt_dict[intermediate[i]]) / len(rtt_dict[intermediate[i]]))**(1/2), 6)
print('The avg RTT between', src[0].ipv4.src_ip, 'and', intermediate[i], 'is:', avg, 'ms, the s.d. is:', std, 'ms')
# End of program
| kutaycinar/CSC-361 | Assignment 3/TraceRouteAnalyzer.py | TraceRouteAnalyzer.py | py | 12,281 | python | en | code | 0 | github-code | 13 |
31878434243 | import subprocess
import re
import pandas as pd
import numpy as np
from path_configure import *
def run_subprocess(command,quiet=False,dry=False):
print("------{}-----".format("RUN"))
print(command)
if dry:
return
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if quiet==False:
print("------{}-----".format("ERROR"))
print(stderr.decode())
print("------{}-----".format("OUTPUT"))
print(stdout.decode())
return stdout.decode(),stderr.decode()
def replace_characters(str_input,character_dict,catch_exception=True):
for to_replace,value in character_dict.items():
if catch_exception:
try:
str_input=str_input.replace(to_replace,value)
except:
pass
else:
str_input=str_input.replace(to_replace,value)
return str_input
def parse_plink_assoc(file_name):
with open(file_name,'r') as f:
lines=f.readlines()
#lines=[line.strip().split(' ') for line in lines]
header=re.split('\s+',lines[0].strip())
lines=[re.split('\s+',line.strip()) for line in lines[1:]]
ret=pd.DataFrame(lines,columns=header).replace('NA',np.nan)
if file_name.split('.')[-1]=='assoc':
return ret.astype({'BP':int,'A1':str,'A2':str,'F_A':float,'F_U':float,'P':float,'OR':float})
elif file_name.split('.')[-1]=='qassoc':
return ret.astype({'CHR':int,'SNP':str,'BP':int,'NMISS':int,'BETA':float,'SE':float,'R2':float,'T':float,'P':float})
elif file_name.split('.')[-1]=='logistic':
return ret.astype({'CHR':int,'SNP':str,'BP':int,'A1':str,'NMISS':int,'OR':float,'STAT':float,'P':float})
elif file_name.split('.')[-1]=='linear':
return ret.astype({'CHR':int,'SNP':str,'BP':int,'A1':str,'NMISS':int,'BETA':float,'STAT':float,'P':float})
else:
raise | chanwkimlab/MHC_Kor_Assoc | basic_tools.py | basic_tools.py | py | 1,977 | python | en | code | 1 | github-code | 13 |
29902532303 | def transformaEmLista(str1): #Transforma uma string em uma lista
lista = []
for x in str1:
lista.append(x)
return lista
def transformaEmString(lista):
str1 = ''
for i in lista:
if(i!=' ' and i!=',' and i!='[' and i!=']' and i!="'"):
i = str(i)
str1 = str1+ i
return str1
def completaComZeros(lista):
casasquefaltam = 12 - len(lista)
for x in range(0,casasquefaltam):
lista.insert(0,0)
return lista
def completaComZeros24(lista): #Completa multiplicações com zero até chegar em 24(casos de multiplicação)
casasquefaltam = 24 - len(lista)
for x in range(0,casasquefaltam):
lista.insert(0,0)
return lista
def alteraString(string,valor,pos):
tmp = []
for i in string:
tmp.append(i)
tmp[pos] = valor
stri = ''
for i in tmp:
stri+= i
return stri
def desloca_direita(string): #Desloca os bits 1 posição pra esquerda
tmp = []
tamanho = len(string)
tmp.append('0')
for i in range(1,tamanho):
tmp.append(string[i-1])
tmp = transformaEmString(tmp)
return tmp
def desloca_esquerda(string): #Desloca os bits 1 posição pra esquerda
tmp = []
tamanho = len(string)
for i in range(1,tamanho):
tmp.append(string[i])
tmp.insert(len(tmp),'0')
tmp = transformaEmString(tmp)
return tmp
| PabloAbreu95/ProcessadorRISC | manip_strings_listas.py | manip_strings_listas.py | py | 1,388 | python | pt | code | 0 | github-code | 13 |
34888165041 | import os
import pickle
import re
# check to see if the file exists, then load the file, else return the empty dictionary
def load():
""" load student information from file"""
info = {}
if os.path.exists("student.txt"):
with open("student.txt", "rb") as input_char:
info = pickle.load(input_char)
print("initial load ",info)
return info
def save():
""" save all student info to file"""
with open("student.txt", "wb") as output:
pickle.dump(student_info, output)
print("picke success")
student_info = load() # load student infomations from a file
#print("student info is ",student_info)
user_cond = True # it will return false when q is selected
while user_cond: # program will run until q is selected
student_first = ""
student_last = ""
student_id = 0
# print user choice to pick
print("\nPlease enter \"a\" to ADD a student")
print("Please enter \"d\" to remove a student")
print("Please enter \"p\" to print student list")
print("Please enter \"q\" to exit\n")
user_input = input(">>>") # user input
if user_input == "a": # add student information
isValid = True # Return False all inputs are valid
while isValid: # all the information entered is correct
check_first = True # return false when first name is entered correctly
check_last = True # return false when last name is entered correctly
check_id = True # return false when id is entered correctly
while check_first:
first = input("Please enter first name :")
# strip while splace and call RE and strip multiple whitesace if any
first = re.sub(' +', ' ', first.strip())
if not first.isnumeric():# this check the string contains number - allow to have space btw two name
student_first = first
check_first = False
else:
print("invalid first name \n")
while check_last:
last = input("Please enter last name :")
# strip whitesplace and call RE and strip multiple whitesace if any
last = re.sub(' +', ' ', last.strip())
if not last.isnumeric():
student_last = last
check_last = False
else:
print("invalid last name\n")
while check_id:
tmp = input("Please enter student ID :")
tmp = tmp.strip() # remove any space
if tmp.isnumeric():
tmp=int(tmp) #save id as integer
#print(tmp)
# check to see if the student ID exist-NO DUPLICATE ID allows
if tmp not in student_info:
student_id = tmp
check_id = False
isValid = False
else:
print("Student ID you entered already exists, please choose another ID number\n")
else:
print("Invalid id, please enter ID\n")
student_info[student_id] = (student_first, student_last) # save student information
elif user_input == "d": # remove the student
isValid = True # return false when information is deleted from dictionary
while isValid:
remove_id = input("Please enter student ID :")
if remove_id.isnumeric():
remove_id = int(remove_id)
if remove_id in student_info:
del student_info[remove_id] # check to see the keyword exists and valid number
isValid = False
print("student removed\n")
else:
print("student id you entered does not exists, please enter valid number\n")
else:
print("invalid id number, please enter student id\n")
elif user_input == "p":
print("print function",student_info) # print student information
elif user_input == "q":
user_cond = False
save()
else:
print("Please enter a valid option\n")
print(student_info)
print("Good bye")
| aberu78/pythonclass | main.py | main.py | py | 4,299 | python | en | code | 0 | github-code | 13 |
40928153658 | from flask import Flask, request
app = Flask(__name__)
@app.route('/hello', methods=["POST"])
def index():
username= request.form.get('username')
print('username=', username)
# 逻辑判断
msg = {"code": 200, 'msg': 'success'}
return msg
if __name__ == '__main__':
app.run(host='0.0.0.0',
port=8081,
debug=True) | EpitomM/yolov5 | server_test.py | server_test.py | py | 351 | python | en | code | 0 | github-code | 13 |
26159378660 | #!/usr/bin/python3
def find_duplicate(chars):
for char in chars:
if chars.count(char) > 1:
return True
def find_start(input_string):
a=0
start_char = 0
length = 14
while a < len(input_string):
sub_string = input_string[a:length+a]
print(sub_string)
if find_duplicate(sub_string):
# check next
b=0
while b < len(sub_string):
new_sub_string = input_string[a+b:length+a+b]
print("sub: ",new_sub_string)
if find_duplicate(new_sub_string) == None:
start_char = a + len(sub_string) + 1 + (b-1)
return start_char
b += 1
a += 1
f = open("./day06/input06.txt")
lines = f.readlines()
print(lines)
for linenumber,line in enumerate(lines):
input_string = line
print ("START: ",find_start(input_string))
| maartenstorm/AdventOfCode | day06/day06b.py | day06b.py | py | 913 | python | en | code | 0 | github-code | 13 |
23713579360 | from django.shortcuts import render,redirect
from .models import Task
from .forms import TaskForm
from django.utils.text import slugify
# Create your views here.
def home(request):
task_form = TaskForm()
tasks = Task.objects.all()
if request.method == "POST":
task_form = TaskForm(data=request.POST)
if task_form.is_valid():
form = task_form.save(commit=False)
form.slug = slugify(form.title)
form.save()
return redirect("todo:all_tasks")
return render(request,'todo/home.html',{"tasks":tasks,'task_form':task_form})
def remove(request,year,month,day,slug):
task = Task.objects.get(created__year=year,created__month=month,created__day=day,slug=slug)
if request.method == "POST":
task.delete()
return redirect("todo:all_tasks")
return render(request,'todo/delete.html',{"task":task})
def update(request,year,month,day,slug):
task = Task.objects.get(created__year=year,created__month=month,created__day=day,slug=slug)
task_form = TaskForm(instance=task)
if request.method == "POST":
task_form = TaskForm(instance=task,data=request.POST)
if task_form.is_valid():
task_form.save()
return redirect("todo:all_tasks")
return render(request,'todo/update.html',{"task_form":task_form})
| HanZawNyine/WebDevelopment2022182 | Project-for-Django-Lessons/todoproject/todo/views.py | views.py | py | 1,348 | python | en | code | 0 | github-code | 13 |
19714942887 |
# ************* function based views*******************
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.template import loader,RequestContext
from . models import *
from .form import BookForm
from django.shortcuts import render, redirect
from django.contrib import messages
def index(request):
template = loader.get_template('index.html')
context = {
'books':Book.objects.all().order_by('subject'),
}
return HttpResponse(template.render(context,request))
def book(request, id):
template = loader.get_template('book.html')
context = {
'book': Book.objects.get(id=id),
}
return HttpResponse(template.render(context,request))
def about(request):
template = loader.get_template('about.html')
return HttpResponse(template.render(request))
def new_book(request):
form = BookForm(request.POST or None)
title = 'Add Book'
context = {'title': title, 'form': form, }
if form.is_valid():
# print request.POST['subject']
name = form.cleaned_data['name']
author_name = form.cleaned_data['author_name']
subject = form.cleaned_data['subject']
instance = Book(name = name, author_name = author_name, subject = subject)
instance.save()
messages.success(request, 'The book has been added! ')
return redirect('home')
template = 'new_book.html'
return render(request, template,context)
def modify(request, id):
title = 'Edit Book'
data = Book.objects.get(id=id)
form = BookForm(initial={'name': data.name, 'author_name': data.author_name, 'subject': data.subject})
context = {'title': title, 'form': form, }
form = BookForm(request.POST or None)
if form.is_valid():
name = form.cleaned_data['name']
author_name = form.cleaned_data['author_name']
subject = form.cleaned_data['subject']
instance = Book(id=id, name=name, author_name=author_name, subject=subject)
instance.save()
messages.success(request, 'The book details has been modified! ')
return redirect('home')
template = 'modify_book.html'
return render(request, template, context)
def delete(request, id):
title = 'Book Deleted!'
data = Book.objects.get(id=id)
Book.objects.filter(id=id).delete()
messages.add_message(request, messages.SUCCESS, 'The book has been deleted! ')
return redirect('home')
| ekshams/my_libra | library/views_t.py | views_t.py | py | 2,459 | python | en | code | 0 | github-code | 13 |
35317322076 | #code to show only skin color trial and error
import cv2
import numpy as np
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
low_skin=np.array([0,30,60])
up_skin=np.array([20,150,255])
mask=cv2.inRange(hsv,low_skin,up_skin)
result=cv2.bitwise_and(frame,frame,mask=mask)
blur=cv2.GaussianBlur(mask,(5,5),0)
## cv2.imshow('res',result)
## cv2.imshow('frame',frame)
## cv2.imshow('mask',mask)
cv2.imshow('blur',blur)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| htgdokania/hand_gesture_masking | hand.py | hand.py | py | 613 | python | en | code | 0 | github-code | 13 |
22270259859 | from xml.dom import ValidationErr
import pygame
import settings
import time
import random
pygame.init()
blue = (0,0,255)
black = (0,0,0)
red = (255,0,0)
white = (255,255,255)
dis = pygame.display.set_mode((settings.WIDTH, settings.HEIGHT))
pygame.display.set_caption("Snake Game JRY62")
x1 = settings.X1
y1 = settings.Y1
clock = pygame.time.Clock()
snake_block = settings.SNAKE_BLOCK
snake_speed = settings.SNAKE_SPEED
font_style = pygame.font.SysFont("bahnschrift", 25)
score_font = pygame.font.SysFont("comicsanams", 25)
def your_score(score):
value = score_font.render("YOUR SCORE: " + str(score), True, settings.YELLOW)
dis.blit(value, [0,0])
def our_snake(snake_block, snake_list):
for x in snake_list:
pygame.draw.rect(dis, settings.BLACK, [x[0], x[1], snake_block, snake_block])
def message(msg, color):
mesg = font_style.render(msg, True, color)
dis.blit(mesg, [settings.WIDTH / 6, settings.HEIGHT / 3])
def game_loop():
game_over = False
game_close = False
x1 = settings.X1
y1 = settings.Y1
x1_change = 0
y1_change = 0
snake_list = []
length_of_snake = 1
foodx = settings.FOODX
foody = settings.FOODY
while not game_over:
while game_close == True:
dis.fill(settings.BLUE)
message('You lost! Press C-Play Again or Q-Quit', settings.RED)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_over = True
game_close = False
if event.key == pygame.K_c:
game_loop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x1_change = -snake_block
y1_change = 0
elif event.key == pygame.K_RIGHT:
x1_change = snake_block
y1_change = 0
elif event.key == pygame.K_UP:
x1_change = 0
y1_change = -snake_block
elif event.key == pygame.K_DOWN:
x1_change = 0
y1_change = snake_block
if x1 >= settings.WIDTH or x1 < 0 or y1 >= settings.HEIGHT or y1 < 0:
game_close = True
x1 += x1_change
y1 += y1_change
dis.fill(settings.BLUE)
pygame.draw.rect(dis, settings.GREEN, [foodx, foody, snake_block, snake_block])
snake_Head = []
snake_Head.append(x1)
snake_Head.append(y1)
snake_list.append(snake_Head)
if len(snake_list) > length_of_snake:
del snake_list[0]
for x in snake_list[:-1]:
if x == snake_Head:
game_close = True
our_snake(snake_block, snake_list)
your_score(length_of_snake - 1)
pygame.display.update()
if x1 == foodx and y1 == foody:
foodx = round(random.randrange(0, settings.WIDTH - snake_block) / 10.0)
foody = round(random.randrange(0, settings.HEIGHT - snake_block) / 10.0)
length_of_snake += 1
clock.tick(snake_speed)
pygame.quit()
quit()
game_loop() | jry62/snake_game | snake.py | snake.py | py | 3,560 | python | en | code | 0 | github-code | 13 |
6979191684 | import csv
from myapp.models import KnowledgeBase # Replace 'myapp' with the name of your Django app
def import_data_from_csv(file_path):
with open(file_path, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader) # Skip the header row if it exists in your CSV file
for row in csv_reader:
KnowledgeBase.objects.create(
subject_id=row[0],
subject_name=row[1],
topic_name=row[2],
text=row[3],
level=row[4],
q1=int(row[5]),
q2=int(row[6]),
q3=int(row[7]),
q4=int(row[8]),
q5=int(row[9]),
a1=int(row[10]),
a2=int(row[11]),
a3=int(row[12]),
a4=int(row[13]),
a5=int(row[14])
)
# Usage example
csv_file_path = 'path/to/your/csvfile.csv'
import_data_from_csv(csv_file_path)
| devdattatemgire/StressAdaptiveReading2 | portfolio/knowledegebase_init.py | knowledegebase_init.py | py | 1,007 | python | en | code | 0 | github-code | 13 |
29287369648 | # --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data =data[data['Rating'] <=5]
print(data.head())
print(data.shape)
plt.hist(data['Rating'])
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())
print(type(total_null))
print(type(percent_null))
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percent'])
print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percent'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
sns.catplot(x='Category',y='Rating',data=data,kind='box',height=10)
plt.xticks(rotation=90)
plt.title('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
# #Importing header files
# from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# import re
# #Code starts here
# print(data['Installs'].value_counts())
# # data['Installs'].apply(re.sub(r'[,\+]', " ",data['Installs']))
# # data['Installs'] = data['Installs'].replace(",", "")
# # data['Installs'] = data['Installs'].str.apply(lambda x: re.sub(r"\+"," ",(x))
# # data['Installs'] = data['Installs'].str.apply(lambda x: re.sub(r",","",(x))
# data['Installs'] = data['Installs'].str.replace(',','')
# data['Installs'] = data['Installs'].str.replace('+','')
# print(data["Installs"].head())
# le = LabelEncoder()
# data['Installs'] = data['Installs'].apply(int)
# sns.regplot(x='Installs',y='Rating',data=data)
# plt.title('Rating vs Installs [RegPlot]')
#Code ends here
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data['Installs'].head())
data['Installs'] = data['Installs'].str.replace(',','')
data['Installs'] = data['Installs'].str.replace('+','')
data['Installs'] = data['Installs'].apply(int)
print(data['Installs'].head())
le = LabelEncoder()
data['Installs']=le.fit_transform(data['Installs'])
graph = sns.regplot(x="Installs", y="Rating" , data=data)
graph.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
import re
import seaborn as sns
print(data['Price'].value_counts())
# data['Price'] = data['Price'].str.replace("$","")
data['Price'] = data['Price'].apply(lambda x: re.sub(r'[$]',"",str(x)))
print(data['Price'].head())
data['Price'] = data['Price'].apply(float)
sns.regplot(x="Price", y="Rating", data=data)
plt.title('Rating vs Price [RegPlot]')
#Code ends here
# --------------
#Code starts here
print(data['Genres'].unique())
data['Genres'] = data['Genres'].apply(lambda x: x.split(";")[0])
gr_mean = data.groupby('Genres',as_index=False)['Rating'].mean()
print(gr_mean.describe())
gr_mean = gr_mean.sort_values(by='Rating')
print(gr_mean)
# print(gr_mean)
#Code ends here
# --------------
#Code starts here
import pandas as pd
import seaborn as sns
data['Last Updated'] =pd.to_datetime(data['Last Updated'])
max_date = data['Last Updated'].max()
data['Last Updated Days'] = (max_date - data['Last Updated']).dt.days
sns.regplot(x="Last Updated Days", y="Rating", data=data)
plt.title('Rating vs Last Updated [RegPlot]')
#Code ends here
| Suchitra-Majumdar/ga-learner-dsmp-repo | High-Rated-Games-on-Google-Playstore/code.py | code.py | py | 3,382 | python | en | code | 0 | github-code | 13 |
35805777753 | # Описати клас "Банківський рахунок", атрибути якого:
#
# - ім'я облікового запису - str
# - унікальний id (uuid)
# - баланс float (чи Decimal)
# - транзакції (список)
# Методи
#
# депозит коштів
# виведення коштів
# отримати баланс
#
#
# При зміні балансу записувати в транзакції (сума, тип операції, поточна_дата)
#
# * Дод. додати та враховувати банківські комісії (1%)
from datetime import date
from uuid import UUID, uuid4
class Bank_account:
def __init__(
self,
account_name: str,
id: UUID,
balance: float,
transactions: list = None,
):
self.account_name = account_name
self.id = id
self.balance = balance
self.transactions = transactions
def deposit(self, sum_of_deposit: float):
"""put money on deposit"""
self.transactions = []
self.balance = self.balance + sum_of_deposit - sum_of_deposit * 0.01
self.transactions.append(f"Sum: {sum_of_deposit}, "
f"type_of_transaction: deposit, "
f"date: {date.today()}")
return self.transactions
def cash_withdrawal(self, sum_of_cash: float):
"""withdraw cash from an account"""
self.balance = self.balance - sum_of_cash - sum_of_cash * 0.01
self.transactions.append(f"Sum: {sum_of_cash}, type_of_transaction: "
f"cash_withdrawal, date: {date.today()}")
return self.transactions
def get_balance(self):
"""return current balance"""
return self.balance
def get_transactions(self):
"""return list of transaction"""
return self.transactions
if __name__ == '__main__':
some_acc = Bank_account("name_of_acc", uuid4(), 0.00)
print(some_acc.deposit(300.00))
print(some_acc.cash_withdrawal(100))
print(some_acc.transactions)
print(some_acc.get_balance())
print(some_acc.get_transactions())
| KiraGol/hillel_python_basic | homework_9/bank_acc.py | bank_acc.py | py | 2,258 | python | uk | code | 0 | github-code | 13 |
71773490258 | import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
import imageio
import os
import argparse
def make_gif():
args = getArgs()
path = args.input_path
filenames = os.listdir(path)
print(filenames[0:5])
images = []
for filename in tqdm(filenames):
images.append(imageio.v2.imread(f"{path}/{filename}"))
kargs = {"duration": args.dur,'quantizer':'nq'}
imageio.mimsave(f"{args.input_path}.gif", images, **kargs)
def getArgs(argv=None):
parser = argparse.ArgumentParser(description="gif-maker")
parser.add_argument("--input_path", type=str, help="output_directory (which is created)")
parser.add_argument("--dur", type=float, help="duration of 1 frame")
return parser.parse_args(argv)
if __name__ == "__main__":
make_gif() | Kaczmarekrr/2022L-Computer-modeling-of-physical-phenomena | 04-25/make_gif.py | make_gif.py | py | 809 | python | en | code | 0 | github-code | 13 |
46383974104 | from itertools import count
from collections import OrderedDict
from bs4 import BeautifulSoup
import requests
import urllib.request as req
def get_url():
url = "https://search.naver.com/search.naver"
hrd = {'User-Agent' : 'Mozilla/5.0', 'referer' : 'http://naver.com'}
post_dict = OrderedDict()
cnt = 1
query = input("검색어를 입력해주세요: ")
page = int(input("크롤링할 페이지를 입력해주세요: "))
for page in count(1,1):
param = {
'where' :'post',
'query' : query,
'start' : (page - 1) * 10 + 1
}
response = requests.get(url, params = param, headers = hrd)
soup = BeautifulSoup(response.text, 'html.parser')
area = soup.find("div", {"class":"blog section _blogBase _prs_blg"}).find_all("a", {"class":"url"})
for tag in area:
url1 = tag.get('href')
post_dict[tag['href']] = tag.text
cnt += 1
return url1
def get_final_url(url):
try:
url_1 = url
html_result = requests.get(url_1)
soup_temp = BeautifulSoup(html_result.text, 'html.parser')
area_temp = soup_temp.find(id='screenFrame')
url_2 = area_temp.get('src')
except:
try:
area_temp = soup_temp.find(id='mainFrame')
url_3 = area_temp.get('src')
url_4 = "http://blog.naver.com"+url_3
except:
return None
try:
html_result = requests.get(url_2)
soup_temp = BeautifulSoup(html_result.text, 'html.parser')
area_temp = soup_temp.find(id='mainFrame')
url_3 = area_temp.get('src')
url_4 = "http://blog.naver.com"+url_3
except:
print("error")
return None
return url_4
def final_url():
url1 = get_url()
f_url = ''
for i in range(len(url1)):
f_url = f_url + '\n' + get_final_url(url1[i])
return f_url
if __name__ == '__main__':
main() | sieun-Bae/electronic-cars_PJT | final_url.py | final_url.py | py | 1,709 | python | en | code | 0 | github-code | 13 |
14191673482 | import cv2
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
video = cv2.VideoCapture(0)
# address = "https://192.168.1.5:8080/video"
# video.open(address)
while True:
check, frame = video.read()
# frame = cv2.flip(frame ,1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face = face_cascade.detectMultiScale(gray,1.1,5)
for x, y, w, h in face:
img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.imshow("ip wepcam", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| MOHAMMED-NASSER22/PycharmProjects | funStuff/ip wepcam.py | ip wepcam.py | py | 634 | python | en | code | 0 | github-code | 13 |
4825959562 | """
Startup main window
"""
import os
import platform
from distutils.dir_util import copy_tree
import uuid
from Qt import QtCore, QtWidgets, QtGui
from core import definitions
from core import resource
from core import path_maker
import utilsa
logging = utilsa.Logger('armada')
USER, WORKSPACE = ('user', 'workspace')
class LoginFlow(QtWidgets.QDialog):
"""Sets up user and/or shared data depending on type of setup process
"""
# Signal vars
enter_pressed = QtCore.Signal(str)
enter_signal_str = "returnPressed"
esc_pressed = QtCore.Signal(str)
esc_signal_str = "escPressed"
loginPressed = QtCore.Signal()
def __init__(self, parent=None):
"""
Args:
flow: What part of setup is the user entering into?
"""
super(LoginFlow, self).__init__(parent)
self.logger = logging.getLogger('menu.' + self.__class__.__name__)
self.logger.info('Setup starting...')
self.setObjectName('launcher_{0}'.format(self.__class__.__name__))
self.parent = parent
self.armada_root_path = definitions.ROOT_PATH
self.setWindowIcon(resource.icon('armada_logo', 'png'))
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.installEventFilter(self)
self.setStyleSheet(resource.style_sheet('setup'))
self.setWindowTitle('Armada Startup')
# GUI -----------------------------------------------
self.frame_login = QtWidgets.QFrame()
self.frame_login.setStyleSheet("QFrame{background: #202020;}")
self.frame_login.setFixedSize(300, 500)
# Logo
self.logo_image = QtWidgets.QLabel(self)
self.logo_image.setObjectName('MainLogo')
self.logo_image.resize(self.logo_image.sizeHint())
self.logo_image_pixmap = resource.pixmap('banner').scaled(
230, 40, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
self.logo_image.setPixmap(self.logo_image_pixmap)
self.logo_image.setAlignment(QtCore.Qt.AlignCenter)
self.by_knufflebeast = QtWidgets.QLabel(self)
self.by_knufflebeast.setText("""<p style="font: 12px;font-weight: normal;">by Knufflebeast</p>""")
self.btn_log_in_google = QtWidgets.QPushButton("Log in with Google (coming soon)")
self.btn_log_in_google.setIcon(resource.icon('google', 'png'))
self.btn_log_in_google.setFixedHeight(40)
self.btn_log_in_google.setDisabled(True)
self.hline_or1 = QtWidgets.QFrame()
self.hline_or1.setFixedHeight(1)
self.hline_or1.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
self.hline_or1.setStyleSheet("background-color: #656565;")
self.lbl_or = QtWidgets.QLabel("or")
self.lbl_or.setStyleSheet("color: #656565")
self.hline_or2 = QtWidgets.QFrame()
self.hline_or2.setFixedHeight(1)
self.hline_or2.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
self.hline_or2.setStyleSheet("background-color: #656565;")
# Input
self.lbl_email = QtWidgets.QLabel('Email address')
self.le_email = QtWidgets.QLineEdit()
self.le_email.setFocus()
regexp = QtCore.QRegExp("\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}\\b", QtCore.Qt.CaseInsensitive)
validator = QtGui.QRegExpValidator(regexp)
self.le_email.setValidator(validator)
self.hline_email = QtWidgets.QFrame()
self.hline_email.setFixedHeight(1)
self.hline_email.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
self.hline_email.setStyleSheet("background-color: #636363;")
self.btn_log_in = QtWidgets.QPushButton('Log in')
self.btn_log_in.setStyleSheet('''
QPushButton{
Background:#2e7a78;
height: 30px;
font: 12px "Roboto-Thin"
}
QPushButton:hover{
Background: #369593;
}
QPushButton:hover:pressed{
Background: #2a615f;
}
QPushButton:pressed{
Background: #2a615f;
}
QPushButton:disabled{
Background: #3b3b3b;
}'''
)
self.btn_log_in.setFixedHeight(30)
self.btn_log_in.setEnabled(False)
# self.lbl_disclaimer = QtWidgets.QTextBrowser()
# self.lbl_disclaimer.setReadOnly(True)
# self.lbl_disclaimer.setText('Armada Pipeline does not store passwords or account data at this time. Your acocunt is stored locally and only used to add another degree of flexibility project')
# self.lbl_disclaimer.setMinimumSize(100, 50)
self.lbl_made_by = QtWidgets.QLabel('Made with')
self.lbl_made_by.setAlignment(QtCore.Qt.AlignCenter)
self.pix_heart_ = QtGui.QPixmap(resource.pixmap('heart'))
self.pix_heart = self.pix_heart_.scaled(20, 20)
self.lbl_heart = QtWidgets.QLabel()
self.lbl_heart.setPixmap(self.pix_heart)
self.lbl_new_york = QtWidgets.QLabel('in New York City')
self.lbl_new_york.setAlignment(QtCore.Qt.AlignCenter)
self.pix_city_ = QtGui.QPixmap(resource.pixmap('statue_of_liberty'))
self.pix_city = self.pix_city_.scaled(20, 20)
self.lbl_city = QtWidgets.QLabel()
self.lbl_city.setPixmap(self.pix_city)
# Layout -----------------------------
frame_layout = QtWidgets.QHBoxLayout()
frame_layout.addWidget(self.frame_login, 0, QtCore.Qt.AlignCenter)
frame_layout.setAlignment(QtCore.Qt.AlignCenter)
frame_layout.setContentsMargins(0, 0, 0, 0)
frame_layout.setSpacing(0)
logo_layout = QtWidgets.QVBoxLayout()
logo_layout.addWidget(self.logo_image)
logo_layout.addWidget(self.by_knufflebeast, 0, QtCore.Qt.AlignRight)
logo_layout.setAlignment(QtCore.Qt.AlignTop)
logo_layout.setContentsMargins(0, 40, 0, 40)
logo_layout.setSpacing(0)
google_layout = QtWidgets.QHBoxLayout()
google_layout.addWidget(self.btn_log_in_google)
google_layout.setAlignment(QtCore.Qt.AlignTop)
google_layout.setContentsMargins(0, 20, 0, 20)
google_layout.setSpacing(0)
or_layout = QtWidgets.QHBoxLayout()
or_layout.addWidget(self.hline_or1)
or_layout.addWidget(self.lbl_or)
or_layout.addWidget(self.hline_or2)
or_layout.setContentsMargins(0, 20, 0, 20)
or_layout.setSpacing(10)
input_layout = QtWidgets.QVBoxLayout()
input_layout.addWidget(self.lbl_email)
input_layout.addSpacing(5)
input_layout.addWidget(self.le_email)
input_layout.addWidget(self.hline_email)
input_layout.setAlignment(QtCore.Qt.AlignTop)
input_layout.setContentsMargins(0, 20, 0, 20)
input_layout.setSpacing(0)
btn_layout = QtWidgets.QVBoxLayout()
btn_layout.addWidget(self.btn_log_in)
btn_layout.setAlignment(QtCore.Qt.AlignTop)
btn_layout.setContentsMargins(0, 20, 0, 20)
btn_layout.setSpacing(0)
with_love_layout = QtWidgets.QHBoxLayout()
with_love_layout.addWidget(self.lbl_made_by)
with_love_layout.addWidget(self.lbl_heart)
with_love_layout.addWidget(self.lbl_new_york)
with_love_layout.addWidget(self.lbl_city)
with_love_layout.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignHCenter)
with_love_layout.setContentsMargins(0, 0, 0, 40)
with_love_layout.setSpacing(5)
contents_layout = QtWidgets.QVBoxLayout(self.frame_login)
contents_layout.addLayout(logo_layout)
contents_layout.addLayout(google_layout)
contents_layout.addLayout(or_layout)
contents_layout.addLayout(input_layout)
contents_layout.addLayout(btn_layout)
contents_layout.addStretch()
contents_layout.addLayout(with_love_layout)
contents_layout.setAlignment(QtCore.Qt.AlignTop)
contents_layout.setContentsMargins(30, 0, 30, 0)
contents_layout.setSpacing(0)
# disclaimer_layout = QtWidgets.QVBoxLayout()
# disclaimer_layout.addWidget(self.lbl_disclaimer)
# disclaimer_layout.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignCenter)
# disclaimer_layout.setContentsMargins(0, 20, 0, 20)
# disclaimer_layout.setSpacing(0)
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.addLayout(frame_layout)
# self.main_layout.addLayout(disclaimer_layout)
self.main_layout.setAlignment(QtCore.Qt.AlignCenter)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(0)
self.setLayout(self.main_layout)
# Connections -----------------------------------
self.btn_log_in_google.clicked.connect(self._on_google_log_in)
self.btn_log_in.clicked.connect(self._on_log_in)
self.le_email.textChanged.connect(self.check_le_state)
def check_le_state(self, *args, **kwargs):
"""
Makes sure line edit input is an email address
"""
sender = self.sender()
validator = sender.validator()
state = validator.validate(sender.text(), 0)[0]
if state == QtGui.QValidator.Acceptable:
self.btn_log_in.setEnabled(True)
elif state == QtGui.QValidator.Intermediate:
self.btn_log_in.setEnabled(False)
else:
self.btn_log_in.setEnabled(False)
def _on_google_log_in(self):
from google_auth_oauthlib.flow import InstalledAppFlow
flow = InstalledAppFlow.from_client_secrets_file(
'W:/OneDrive/Knufflebeast/Technology/ArmadaPipeline/Google_API/client_secret.json',
['openid'])
cred = flow.run_local_server()
account_uuid = str(uuid.uuid4())
# data = resource.json_read(definitions.USER_PATH, filename='armada_settings')
# data['CURRENT_ACCOUNT'] = cred.token
# print(cred.token)
# resource.json_save(definitions.USER_PATH, filename='armada_settings', data=data)
os.environ['ARMADA_CURRENT_ACCOUNT'] = cred.token
os.environ['ARMADA_SETUP_ACCOUNT_UUID'] = account_uuid
self.parent.sw_main.setCurrentIndex(1)
def _on_log_in(self):
account_name = self.le_email.text()
account_uuid = str(uuid.uuid4())
os.environ['ARMADA_CURRENT_ACCOUNT'] = account_name
os.environ['ARMADA_SETUP_ACCOUNT_UUID'] = account_uuid
self.loginPressed.emit()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Return:
if self.btn_log_in.isEnabled():
self._on_log_in()
return True
else:
return False
if event.key() == QtCore.Qt.Key_Escape:
return False
else:
super(LoginFlow, self).keyPressEvent(event)
| Knufflebeast/armada-pipeline | packages/startup/gui/login_flow.py | login_flow.py | py | 9,607 | python | en | code | 27 | github-code | 13 |
28350623073 | import heapq
from collections import Counter
class Solution:
def repeatLimitedString(self, s: str, repeatLimit: int) -> str:
ans=""
dic=Counter(s)
size=0
heap=[]
for i in dic:
heapq.heappush(heap,(-ord(i),i))
size+=1
while heap:
if size==1:
val,a=heapq.heappop(heap)
ans+=a*min(dic[a],repeatLimit)
break
val,a=heapq.heappop(heap)
val,b=heapq.heappop(heap)
size-=2
while dic[a]>0 and dic[b]>0:
if dic[a]<=repeatLimit:
ans+=a*dic[a]
dic[a]=0
break
ans+=a*repeatLimit
ans+=b
dic[a]-=repeatLimit
dic[b]-=1
if dic[a]>0:
heapq.heappush(heap,(-ord(a),a))
size+=1
if dic[b]>0:
heapq.heappush(heap,(-ord(b),b))
size+=1
return ans
| saurabhjain17/leetcode-coding-questions | 2182-construct-string-with-repeat-limit/2182-construct-string-with-repeat-limit.py | 2182-construct-string-with-repeat-limit.py | py | 1,093 | python | en | code | 1 | github-code | 13 |
1943447211 | import hashlib
import json
import os
from time import time
COIN_DIR = os.curdir + '/coins/'
def check_coin(index):
current_index = str(index)
previous_index = str(int(index) - 1)
current_proof = -1
current_hash = 0
previous_hash = 0
temp = {'coin' : '', 'result' : '', 'proof': ''}
try:
file_dict = json.load(open(COIN_DIR + current_index + '.json'))
current_hash = file_dict['previous_hash']
current_proof = file_dict['proof']
except Exception as exception:
print(exception)
try:
previous_hash = hashlib.sha256(open(COIN_DIR + previous_index + '.json', 'rb').read()).hexdigest()
except Exception as exception:
print(exception)
temp['coin'] = previous_index
temp['proof'] = current_proof
if current_hash == previous_hash:
temp['result'] = 'Ok'
else:
temp['result'] = 'Error'
return temp
def check_coins_integrity():
result = []
index = int(get_next_coin)
for i in range(2, index):
check_coin(index)
result.append(temp)
return result
def hash_coin(file_name):
file_name = str(file_name)
if not file_name.endswith('.json'):
file_name += '.json'
try:
with open(COIN_DIR + file_name, 'rb') as file:
return hashlib.sha256(file.read()).hexdigest()
except Exception as exception:
print('File "'+file_name+'" does not exist!n', exception)
def get_next_coin():
files = os.listdir(COIN_DIR)
index_list = [int(file.split('.')[0]) for file in files]
current_index = sorted(index_list)[-1]
next_index = current_index + 1
return str(next_index)
def is_valid_proof(last_proof, proof, difficulty):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def proof_of_work(file_name, difficulty = 1):
file_name = str(file_name)
if file_name.endswith('.json'):
file_name = int(file_name.split('.')[0])
else:
file_name = int(file_name)
last_proof = json.load(open(COIN_DIR + str(file_name - 1) + '.json'))['proof']
proof = 0
while is_valid_proof(last_proof, proof, difficulty) is False:
proof += 1
current_coin = json.load(open(COIN_DIR + str(file_name) + '.json'))
current_coin['proof'] = proof
current_coin['previous_hash'] = hash_coin(str(file_name - 1))
with open(COIN_DIR + str(file_name) + '.json', 'w') as file:
json.dump(current_coin, file, indent=4, ensure_ascii=False)
def write_coin(make_proof=False):
current_index = get_next_coin()
previous_index = str(int(current_index) - 1)
prev_coin_hash = hash_coin(previous_index)
data = {
'previous_hash' : prev_coin_hash,
'timestamp' : time(),
'proof' : -1,
'index' : current_index
}
with open(COIN_DIR + current_index + '.json', 'w') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
if make_proof is True:
proof_of_work(str(current_index))
if __name__ == '__main__':
print(check_coins_integrity())
| dnl2612/coin | coin.py | coin.py | py | 3,240 | python | en | code | 0 | github-code | 13 |
28457502066 | import random
import numpy as np
class QAgent():
def __init__(self, actions, epsilon=0.1, alpha=0.2, gamma=0.9):
self.q = {}
self.epsilon = epsilon
self.alpha = alpha
self.gamma = gamma
self.actions = actions
def getQ(self, state, action):
return self.q.get((state, action), 0.0)
def learnQ(self, state, action, reward, value):
oldv = self.q.get((state, action), None)
if oldv is None:
self.q[(state, action)] = reward
else:
self.q[(state, action)] = oldv + self.alpha * (value - oldv)
def chooseAction(self, state):
if random.random() < self.epsilon:
action = random.choice(self.actions)
else:
q = [self.getQ(state, a) for a in self.actions]
maxQ = max(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
return action
def learn(self, state1, action1, reward, state2):
maxqnew = max([self.getQ(state2, a) for a in self.actions])
self.learnQ(state1, action1, reward, reward + self.gamma*maxqnew)
def run(self, task, n):
i = 0
while i < n:
s = task.state
a = self.chooseAction(s)
new_s, r = task.executeAction(a)
self.learn(s, a, r, new_s)
i += 1
def calculateV(self, task):
v = np.zeros(task.grid.shape)
for i in range(v.shape[0]):
for j in range(v.shape[1]):
maxq = max([self.getQ((i, j), a) for a in self.actions])
v[i,j] =maxq
return v
def run_trials_for_altair(environment, agent, n, collect=True):
"""Runs N trials"""
state_action = {}
# init all state_actions
for i in range(4):
for j in range(4):
for direction in ['down', 'right', 'up', 'left']:
state_action[str(((i, j), direction))] = [0]
for j in range(n):
run_trial(environment, agent)
all_keys = set(state_action.keys())
# keys with new values
for key, val in agent.Q.items():
state_action[str(key)].append(val)
all_keys.remove(str(key))
# keys without new values
for key in all_keys:
state_action[str(key)].append(state_action[str(key)][-1])
environment.state = Maze.INITIAL_STATE
import pandas as pd
location = []
run = []
q_value = []
for loc in state_action.keys():
for i in range(len(state_action[loc])):
location.append(loc)
run.append(i)
q_value.append(state_action[loc][i])
df = pd.DataFrame({"location":location, "run":run, "q_value":q_value})
return df
m = Maze()
a = Agent()
df = run_trials_for_altair(m, a, 100)
import altair as alt
slider = alt.binding_range(min=1, max=100, step=1)
select_run = alt.selection_single(name="iteration", fields=['run'], bind=slider)
alt.data_transformers.enable('default', max_rows=None)
alt.Chart(df).mark_bar().encode(
x='location:N',
y=alt.Y('q_value:Q', scale=alt.Scale(domain=(0, 11))),
).add_selection(
select_run
).transform_filter(
select_run
) | TheRealDrDre/CompCogNeuro | Part2_ReinforcementLearning/rl/qagent.py | qagent.py | py | 3,398 | python | en | code | 3 | github-code | 13 |
36297423570 | from flask import render_template, redirect, request, session
from flask_app.config.mysqlconnection import connectToMySQL
from flask_app.models.dojo import Dojo
from flask_app import app
@app.route("/")
def index():
return redirect("/dojos")
@app.route("/dojos")
def dojos():
dojos = Dojo.get_all()
return render_template("dojo.html", all_dojos = dojos)
@app.route("/add", methods=["POST"])
def add_dojo():
data = {
"name" : request.form["name"]
}
Dojo.save(data)
return redirect("/dojos")
@app.route("/dojos/<int:id>")
def show(id):
data = {
"id":id
}
return render_template("show.html",dojo = Dojo.get_one_with_ninjas(data))
@app.route("/delete/<int:id>")
def delete(id):
data = {
"id":id
}
Dojo.delete(data)
return redirect("/") | Matthew-Luk/Python-Bootcamp | Flask_MySQL/CRUD/dojos_and_ninjas/flask_app/controllers/dojos.py | dojos.py | py | 816 | python | en | code | 0 | github-code | 13 |
17051969404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PayUserInfoDTO import PayUserInfoDTO
from alipay.aop.api.domain.PayUserInfoDTO import PayUserInfoDTO
class FdsPayFundItemDTO(object):
def __init__(self):
self._amount = None
self._fund_biz_info = None
self._fund_item_id = None
self._gmt_pay = None
self._memo = None
self._payee_user_info = None
self._payer_user_info = None
self._status = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def fund_biz_info(self):
return self._fund_biz_info
@fund_biz_info.setter
def fund_biz_info(self, value):
self._fund_biz_info = value
@property
def fund_item_id(self):
return self._fund_item_id
@fund_item_id.setter
def fund_item_id(self, value):
self._fund_item_id = value
@property
def gmt_pay(self):
return self._gmt_pay
@gmt_pay.setter
def gmt_pay(self, value):
self._gmt_pay = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def payee_user_info(self):
return self._payee_user_info
@payee_user_info.setter
def payee_user_info(self, value):
if isinstance(value, PayUserInfoDTO):
self._payee_user_info = value
else:
self._payee_user_info = PayUserInfoDTO.from_alipay_dict(value)
@property
def payer_user_info(self):
return self._payer_user_info
@payer_user_info.setter
def payer_user_info(self, value):
if isinstance(value, PayUserInfoDTO):
self._payer_user_info = value
else:
self._payer_user_info = PayUserInfoDTO.from_alipay_dict(value)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.fund_biz_info:
if hasattr(self.fund_biz_info, 'to_alipay_dict'):
params['fund_biz_info'] = self.fund_biz_info.to_alipay_dict()
else:
params['fund_biz_info'] = self.fund_biz_info
if self.fund_item_id:
if hasattr(self.fund_item_id, 'to_alipay_dict'):
params['fund_item_id'] = self.fund_item_id.to_alipay_dict()
else:
params['fund_item_id'] = self.fund_item_id
if self.gmt_pay:
if hasattr(self.gmt_pay, 'to_alipay_dict'):
params['gmt_pay'] = self.gmt_pay.to_alipay_dict()
else:
params['gmt_pay'] = self.gmt_pay
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.payee_user_info:
if hasattr(self.payee_user_info, 'to_alipay_dict'):
params['payee_user_info'] = self.payee_user_info.to_alipay_dict()
else:
params['payee_user_info'] = self.payee_user_info
if self.payer_user_info:
if hasattr(self.payer_user_info, 'to_alipay_dict'):
params['payer_user_info'] = self.payer_user_info.to_alipay_dict()
else:
params['payer_user_info'] = self.payer_user_info
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FdsPayFundItemDTO()
if 'amount' in d:
o.amount = d['amount']
if 'fund_biz_info' in d:
o.fund_biz_info = d['fund_biz_info']
if 'fund_item_id' in d:
o.fund_item_id = d['fund_item_id']
if 'gmt_pay' in d:
o.gmt_pay = d['gmt_pay']
if 'memo' in d:
o.memo = d['memo']
if 'payee_user_info' in d:
o.payee_user_info = d['payee_user_info']
if 'payer_user_info' in d:
o.payer_user_info = d['payer_user_info']
if 'status' in d:
o.status = d['status']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/FdsPayFundItemDTO.py | FdsPayFundItemDTO.py | py | 4,760 | python | en | code | 241 | github-code | 13 |
16982591527 | import PySimpleGUI as sg
import networkx as nx
import matplotlib.pyplot as plt
import kruskal as k
def cria_graf():
sg.theme_background_color('#1C1C1C')
sg.theme_text_color('#FFD700')
sg.theme_button_color(('#273755', '#fad029'))
layout1 = [
[sg.Text('Árvore Geradora Mínima', background_color='#1C1C1C', font='Ubuntu', pad=(200,7) )],
[sg.Text('Número de Vértices:', background_color='#1C1C1C', font='Ubuntu')],
[sg.InputText(key='num_vertices')],
[sg.Text('Número de Arestas:', background_color='#1C1C1C', font='Ubuntu')],
[sg.InputText(key='num_arestas')],
[sg.Button('Salvar', font='Ubuntu')]
]
window1 = sg.Window('Trabalho Grafo', layout1)
while True:
event, values = window1.read()
if event == sg.WINDOW_CLOSED:
break
elif event == 'Salvar':
num_vertices = int(values['num_vertices'])
num_arestas = int(values['num_arestas'])
window1.close()
cria_input(num_vertices, num_arestas)
window1.close()
def cria_input(num_vertices, num_arestas):
layout2 = [
[sg.Text('Aresta {}/{}'.format(1, num_arestas), key='label_edge',background_color='#1C1C1C', pad=(200,7),font='Ubuntu')],
[sg.Text('Vértice 1:',background_color='#1C1C1C',font='Ubuntu')],
[sg.InputText(key='v1')],
[sg.Text('Vértice 2:',background_color='#1C1C1C',font='Ubuntu')],
[sg.InputText(key='v2')],
[sg.Text('Peso: ',background_color='#1C1C1C',font='Ubuntu')],
[sg.InputText(key='peso')],
[sg.Button('Próxima Aresta',font='Ubuntu')]
]
window2 = sg.Window('Entrada de Grafos', layout2)
arestas = []
arestas_criadas = []
aresta_atual = 1
while True:
event, values = window2.read()
if event == sg.WINDOW_CLOSED:
break
elif event == 'Próxima Aresta':
vert1 = int(values['v1'])
vert2 = int(values['v2'])
peso = int(values['peso'])
if ((vert1,vert2) in arestas_criadas) or ((vert2,vert1) in arestas_criadas):
print('ERRO')
sg.popup('CONJUNTO DE ARESTA JÁ UTILIZADO',background_color='#1C1C1C', font='Ubuntu')
else:
arestas.append((vert1, vert2, peso))
arestas_criadas.append((vert1,vert2))
aresta_atual += 1
if aresta_atual <= num_arestas:
window2['label_edge'].update('Aresta {}/{}'.format(aresta_atual, num_arestas))
window2['v1'].update('')
window2['v2'].update('')
window2['peso'].update('')
else:
window2.close()
k.kruskal_inicio(num_vertices, arestas)
window2.close()
cria_graf() | roberio3620/arvore-geradora-minima | main.py | main.py | py | 2,832 | python | en | code | 0 | github-code | 13 |
4697178607 | import pyautogui
from src import movearrow
import time
import clipboard
def VerifyStatus():
global itemID
global status
if pyautogui.locateOnScreen(image=".\images\pausadostatus.png"):
status = True
movearrow.MoveArrow(times=18, side="left")
img = pyautogui.locateCenterOnScreen(image=".\images\id.png", confidence=0.7)
pyautogui.moveTo(img.x, img.y+120)
time.sleep(2)
pyautogui.click
time.sleep(1)
pyautogui.doubleClick(interval=0.1)
time.sleep(1)
pyautogui.hotkey('ctrl', 'a')
time.sleep(1)
pyautogui.hotkey('ctrl', 'c')
itemID = clipboard.paste()
return
else:
status = False
itemID = ""
print('Status diferente')
movearrow.MoveArrow(times=18, side="left")
return
| carlynxd/automaticbacklog | src/verifystatus.py | verifystatus.py | py | 835 | python | en | code | 0 | github-code | 13 |
17174829686 | import argparse
import pandas as pd
def parse_args():
parser=argparse.ArgumentParser(description="use gencode annotation to get gene coordinates; add flanks of specified length with a stride of specific size")
parser.add_argument("-gene_list")
parser.add_argument("-gencode_gtf",default="/mnt/data/annotations/gencode/GRCh37/gencode.v32lift37.annotation.gtf.gz")
parser.add_argument("-expression")
parser.add_argument("-outf")
return parser.parse_args()
def main():
args=parse_args()
#get genes of interest
genes=open(args.gene_list,'r').read().strip().split('\n')
gene_dict={}
for gene in genes:
gene_dict[gene]=1
expression=pd.read_csv(args.expression,header=0,sep='\t')
expression_dict={}
for index,row in expression.iterrows():
gid=row['gene_id'].split('.')[0]
val=row['TPM']
expression_dict[gid]=val
gtf=pd.read_csv(args.gencode_gtf,header=None,sep='\t',skiprows=5)
gtf=gtf[gtf[2]=='gene']
outf=open(args.outf,'w')
outf.write('Gene\tGeneID\tTPM\n')
print("loaded gtf:"+str(gtf.shape))
for index,row in gtf.iterrows():
keep=False
gene_info=[i.strip() for i in row[8].split(';')]
for entry in gene_info:
if entry.startswith('gene_id'):
gene_id=entry.split('"')[1].split('.')[0]
if entry.startswith('gene_name'):
gene_name=entry.split('"')[1].upper()
if gene_name in gene_dict:
keep=True
if keep is True:
try:
cur_expression=expression_dict[gene_id]
except:
cur_expression="NA"
outf.write(gene_name+'\t'+gene_id+'\t'+str(cur_expression)+'\n')
outf.close()
if __name__=="__main__":
main()
| ENCODE-AWG/locusselect_applications | expression/get_expression_for_gene_list_ENCODE.py | get_expression_for_gene_list_ENCODE.py | py | 1,839 | python | en | code | 0 | github-code | 13 |
16393535327 | import boto
import boto.s3.connection
import os
import secret
access_key = secret.access_key
secret_key = secret.secret_key
conn = boto.connect_s3(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
host=secret.host,
port=secret.port,
is_secure=False, # uncomment if you are not using ssl
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.get_bucket('uploads')
# Получить объект
# key = bucket.get_key('persistent/c/47/7e664d041f128c8aacafc1c1abf37b1704698aed14630130cad6ff4817729') # bad
key = bucket.get_key('persistent/0/00/23d07267d8df05459e914f916804409420db9cf138b64d73bccc1bbcb5500') # good
print(key.get_contents_as_string())
| MinistrBob/MyPythonTools | S3/s3get.py | s3get.py | py | 717 | python | en | code | 0 | github-code | 13 |
3725389290 | """Tools for running experiments with Garage."""
import base64
import collections
import datetime
import enum
import functools
import gc
import inspect
import json
import os
import os.path as osp
import pathlib
import pickle
import re
import subprocess
import warnings
import cloudpickle
import dateutil.tz
import dowel
from dowel import logger
import dowel_wrapper
import __main__ as main # noqa: I100
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
EIGHT_MEBIBYTES = 8 * 2**20
def run_experiment(method_call=None,
batch_tasks=None,
exp_prefix='experiment',
exp_name=None,
log_dir=None,
script='garage.experiment.experiment_wrapper',
python_command='python',
dry=False,
env=None,
variant=None,
force_cpu=False,
pre_commands=None,
**kwargs):
# pylint: disable=missing-raises-doc,too-many-branches,global-statement
"""Serialize the method call and run the experiment using specified mode.
Args:
method_call (callable): A method call.
batch_tasks (list[dict]): A batch of method calls.
exp_prefix (str): Name prefix for the experiment.
exp_name (str): Name of the experiment.
log_dir (str): Log directory for the experiment.
script (str): The name of the entrance point python script.
python_command (str): Python command to run the experiment.
dry (bool): Whether to do a dry-run, which only prints the
commands without executing them.
env (dict): Extra environment variables.
variant (dict): If provided, should be a dictionary of parameters.
force_cpu (bool): Whether to set all GPU devices invisible
to force use CPU.
pre_commands (str): Pre commands to run the experiment.
kwargs (dict): Additional parameters.
"""
warnings.warn(
DeprecationWarning(
'run_experiment is deprecated, and will be removed in the next '
'release. Please use wrap_experiment instead.'))
if method_call is None and batch_tasks is None:
raise Exception(
'Must provide at least either method_call or batch_tasks')
for task in (batch_tasks or [method_call]):
if not hasattr(task, '__call__'):
raise ValueError('batch_tasks should be callable')
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(kwargs,
pre_commands=pre_commands,
method_call=method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant)
]
global exp_count
if force_cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
for task in batch_tasks:
call = task.pop('method_call')
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
if task.get('exp_name', None) is None:
task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp,
exp_count)
if task.get('log_dir', None) is None:
task['log_dir'] = (
'{log_dir}/local/{exp_prefix}/{exp_name}'.format(
log_dir=osp.join(os.getcwd(), 'data'),
exp_prefix=exp_prefix.replace('_', '-'),
exp_name=task['exp_name']))
if task.get('variant', None) is not None:
variant = task.pop('variant')
if 'exp_name' not in variant:
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(
pickle.dumps(variant)).decode('utf-8')
elif 'variant' in task:
del task['variant']
task['env'] = task.get('env', dict()) or dict()
task['env']['GARAGE_FORCE_CPU'] = str(force_cpu)
for task in batch_tasks:
env = task.pop('env', None)
command = to_local_command(task,
python_command=python_command,
script=script)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.run(command,
shell=True,
env=dict(os.environ, **env),
check=True)
except Exception as e:
print(e)
raise
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def _shellquote(s):
"""Return a shell-escaped version of the string *s*.
Args:
s (str): String to shell quote.
Returns:
str: The shell-quoted string.
"""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
"""Return a shell-escaped version of v.
Args:
v (object): object to shell quote
Returns:
str: The shell-quoted string.
"""
if v is None:
return ''
elif isinstance(v, list):
return ' '.join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(
params,
python_command='python',
script='garage.experiment.experiment_wrapper'): # noqa: D103,E501
# noqa:E501 ; pylint: disable=eval-used,missing-return-doc,missing-return-type-doc,missing-function-docstring
command = python_command + ' -m ' + script
garage_env = eval(os.environ.get('GARAGE_ENV', '{}'))
for k, v in garage_env.items():
command = '{}={} '.format(k, v) + command
pre_commands = params.pop('pre_commands', None)
post_commands = params.pop('post_commands', None)
if pre_commands is not None or post_commands is not None:
print('Not executing the pre_commands: ', pre_commands,
', nor post_commands: ', post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == '_name':
command += ' --{} {}'.format(k, _to_param_val(nv))
else:
command += \
' --{}_{} {}'.format(k, nk, _to_param_val(nv))
else:
command += ' --{} {}'.format(k, _to_param_val(v))
return command
def _make_sequential_log_dir(log_dir):
"""Creates log_dir, appending a number if necessary.
Attempts to create the directory `log_dir`. If it already exists, appends
"_1". If that already exists, appends "_2" instead, etc.
Args:
log_dir (str): The log directory to attempt to create.
Returns:
str: The log directory actually created.
"""
i = 0
while True:
try:
if i == 0:
os.makedirs(log_dir)
else:
possible_log_dir = '{}_{}'.format(log_dir, i)
os.makedirs(possible_log_dir)
log_dir = possible_log_dir
return log_dir
except FileExistsError:
i += 1
def _make_experiment_signature(function):
"""Generate an ExperimentTemplate's signature from its function.
Checks that the first parameter is named ctxt and removes it from the
signature. Makes all other parameters keyword only.
Args:
function (callable[ExperimentContext, ...]): The wrapped function.
Returns:
inspect.Signature: The signature of the ExperimentTemplate.
Raises:
ValueError: If the wrapped function's first parameter is not 'ctxt'.
"""
func_sig = inspect.signature(function)
new_params = []
saw_first_param = False
for param in func_sig.parameters.values():
if not saw_first_param:
# Don't output it to the experiment params, since it will contain
# the context.
if param.name != 'ctxt':
raise ValueError(
'Experiment functions should have a first '
"parameter named 'ctxt' instead of {!r}".format(
param.name))
saw_first_param = True
else:
new_params.append(
inspect.Parameter(name=param.name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=param.default,
annotation=param.annotation))
if not saw_first_param:
raise ValueError(
'Experiment functions should have a first parameter '
"named 'ctxt', but {!r} has no parameters".format(function))
return inspect.Signature(new_params,
return_annotation=func_sig.return_annotation)
class ExperimentContext:
"""Context in which an experiment is being run.
Currently, this class implements the same interface as SnapshotConfig, but
it will be extended in the future.
Args:
snapshot_dir (str): The full directory to put snapshots in.
snapshot_mode (str): Policy for which snapshots to keep (or make at
all). Can be either "all" (all iterations will be saved), "last"
(only the last iteration will be saved), "gap" (every snapshot_gap
iterations are saved), or "none" (do not save snapshots).
snapshot_gap (int): Gap between snapshot iterations. Waits this number
of iterations before taking another snapshot.
"""
# pylint: disable=too-few-public-methods
def __init__(self, *, snapshot_dir, snapshot_mode, snapshot_gap):
self.snapshot_dir = snapshot_dir
self.snapshot_mode = snapshot_mode
self.snapshot_gap = snapshot_gap
def get_git_commit_hash():
import subprocess
p = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit, _ = p.communicate()
git_commit = git_commit.strip().decode('utf-8')
return git_commit
def save_git_diff_to_file(git_diff_file_path):
import subprocess
git_diff_file = open(git_diff_file_path, 'w')
p = subprocess.Popen(['git', 'diff', '--patch', 'HEAD'], stdout=git_diff_file)
p.wait()
class ExperimentTemplate:
"""Creates experiment log directories and runs an experiment.
This class should only be created by calling garage.wrap_experiment.
Generally, it's used as a decorator like this:
@wrap_experiment(snapshot_mode='all')
def my_experiment(ctxt, seed, lr=0.5):
...
my_experiment(seed=1)
Even though this class could be implemented as a closure in
wrap_experiment(), it's more readable (and easier to pickle) implemented as
a class.
Note that the full path that will be created is
f'{data}/local/{prefix}/{name}'.
Args:
function (callable or None): The experiment function to wrap.
log_dir (str or None): The full log directory to log to. Will be
computed from `name` if omitted.
name (str or None): The name of this experiment template. Will be
filled from the wrapped function's name if omitted.
prefix (str): Directory under data/local in which to place the
experiment directory.
snapshot_mode (str): Policy for which snapshots to keep (or make at
all). Can be either "all" (all iterations will be saved), "last"
(only the last iteration will be saved), "gap" (every snapshot_gap
iterations are saved), or "none" (do not save snapshots).
snapshot_gap (int): Gap between snapshot iterations. Waits this number
of iterations before taking another snapshot.
archive_launch_repo (bool): Whether to save an archive of the
repository containing the launcher script. This is a potentially
expensive operation which is useful for ensuring reproducibility.
name_parameters (str or None): Parameters to insert into the experiment
name. Should be either None (the default), 'all' (all parameters
will be used), or 'passed' (only passed parameters will be used).
The used parameters will be inserted in the order they appear in
the function definition.
use_existing_dir (bool): If true, (re)use the directory for this
experiment, even if it already contains data.
"""
# pylint: disable=too-few-public-methods
def __init__(self, *, function, log_dir, name, prefix, snapshot_mode,
snapshot_gap, archive_launch_repo, name_parameters,
use_existing_dir):
self.function = function
self.log_dir = log_dir
self.name = name
self.prefix = prefix
self.snapshot_mode = snapshot_mode
self.snapshot_gap = snapshot_gap
self.archive_launch_repo = archive_launch_repo
self.name_parameters = name_parameters
self.use_existing_dir = use_existing_dir
if self.function is not None:
self._update_wrap_params()
def _update_wrap_params(self):
"""Update self to "look like" the wrapped funciton.
Mostly, this involves creating a function signature for the
ExperimentTemplate that looks like the wrapped function, but with the
first argument (ctxt) excluded, and all other arguments required to be
keyword only.
"""
functools.update_wrapper(self, self.function)
self.__signature__ = _make_experiment_signature(self.function)
@classmethod
def _augment_name(cls, options, name, params):
"""Augment the experiment name with parameters.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
name (str): Name without parameter names.
params (dict): Dictionary of parameters.
Raises:
ValueError: If self.name_parameters is not set to None, "passed",
or "all".
Returns:
str: Returns the augmented name.
"""
name_parameters = collections.OrderedDict()
if options['name_parameters'] == 'passed':
for param in options['signature'].parameters.values():
try:
name_parameters[param.name] = params[param.name]
except KeyError:
pass
elif options['name_parameters'] == 'all':
for param in options['signature'].parameters.values():
name_parameters[param.name] = params.get(
param.name, param.default)
elif options['name_parameters'] is not None:
raise ValueError('wrap_experiment.name_parameters should be set '
'to one of None, "passed", or "all"')
param_str = '_'.join('{}={}'.format(k, v)
for (k, v) in name_parameters.items())
if param_str:
return '{}_{}'.format(name, param_str)
else:
return name
def _get_options(self, *args):
"""Get the options for wrap_experiment.
This method combines options passed to `wrap_experiment` itself and to
the wrapped experiment.
Args:
args (list[dict]): Unnamed arguments to the wrapped experiment. May
be an empty list or a list containing a single dictionary.
Raises:
ValueError: If args contains more than one value, or the value is
not a dictionary containing at most the same keys as are
arguments to `wrap_experiment`.
Returns:
dict: The final options.
"""
options = dict(name=self.name,
function=self.function,
prefix=self.prefix,
name_parameters=self.name_parameters,
log_dir=self.log_dir,
archive_launch_repo=self.archive_launch_repo,
snapshot_gap=self.snapshot_gap,
snapshot_mode=self.snapshot_mode,
use_existing_dir=self.use_existing_dir,
signature=self.__signature__)
if args:
if len(args) == 1 and isinstance(args[0], dict):
for k in args[0]:
if k not in options:
raise ValueError('Unknown key {} in wrap_experiment '
'options'.format(k))
options.update(args[0])
else:
raise ValueError('garage.experiment currently only supports '
'keyword arguments')
return options
@classmethod
def _make_context(cls, options, **kwargs):
"""Make a context from the template information and variant args.
Currently, all arguments should be keyword arguments.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
kwargs (dict): Keyword arguments for the wrapped function. Will be
logged to `variant.json`
Returns:
ExperimentContext: The created experiment context.
"""
name = options['name']
if name is None:
name = options['function'].__name__
name = cls._augment_name(options, name, kwargs)
log_dir = options['log_dir']
if log_dir is None:
log_dir = ('{data}/local/{prefix}/{name}'.format(
data=os.path.join(os.getcwd(), 'data'),
prefix=options['prefix'],
name=name))
if options['use_existing_dir']:
os.makedirs(log_dir, exist_ok=True)
else:
log_dir = _make_sequential_log_dir(log_dir)
tabular_log_file = os.path.join(log_dir, 'progress.csv')
text_log_file = os.path.join(log_dir, 'debug.log')
variant_log_file = os.path.join(log_dir, 'variant.json')
metadata_log_file = os.path.join(log_dir, 'metadata.json')
tb_dir = os.path.join(log_dir, 'tb')
tabular_log_file_eval = os.path.join(log_dir, 'progress_eval.csv')
text_log_file_eval = os.path.join(log_dir, 'debug_eval.log')
tb_dir_eval = os.path.join(log_dir, 'tb_eval')
tb_dir_plot = os.path.join(log_dir, 'tb_plot')
text_log_file_tcp = os.path.join(log_dir, 'debug_tcp.log')
dump_json(variant_log_file, kwargs)
git_root_path, metadata = get_metadata()
dump_json(metadata_log_file, metadata)
if git_root_path and options['archive_launch_repo']:
make_launcher_archive(git_root_path=git_root_path, log_dir=log_dir)
logger.add_output(dowel.TextOutput(text_log_file))
logger.add_output(dowel.CsvOutput(tabular_log_file))
logger.add_output(
dowel.TensorBoardOutput(tb_dir, x_axis='TotalEnvSteps'))
logger.add_output(dowel.StdOutput())
dowel_eval = dowel_wrapper.get_dowel('eval')
logger_eval = dowel_eval.logger
logger_eval.add_output(dowel_eval.TextOutput(text_log_file_eval))
logger_eval.add_output(dowel_eval.CsvOutput(tabular_log_file_eval))
logger_eval.add_output(
dowel_eval.TensorBoardOutput(tb_dir_eval, x_axis='TotalEnvSteps'))
logger_eval.add_output(dowel_eval.StdOutput())
dowel_plot = dowel_wrapper.get_dowel('plot')
logger_plot = dowel_plot.logger
logger_plot.add_output(
dowel_plot.TensorBoardOutput(tb_dir_plot, x_axis='TotalEnvSteps'))
dowel_tcp = dowel_wrapper.get_dowel('tcp')
logger_tcp = dowel_tcp.logger
logger_tcp.add_output(dowel_tcp.TextOutput(text_log_file_tcp))
logger_tcp.add_output(dowel_tcp.StdOutput())
logger.push_prefix('[{}] '.format(name))
logger.log('Logging to {}'.format(log_dir))
git_commit = get_git_commit_hash()
logger.log('Git commit: {}'.format(git_commit))
git_diff_file_path = os.path.join(log_dir, 'git_diff_{}.patch'.format(git_commit))
save_git_diff_to_file(git_diff_file_path)
return ExperimentContext(snapshot_dir=log_dir,
snapshot_mode=options['snapshot_mode'],
snapshot_gap=options['snapshot_gap'])
def __call__(self, *args, **kwargs):
"""Wrap a function to turn it into an ExperimentTemplate.
Note that this docstring will be overriden to match the function's
docstring on the ExperimentTemplate once a function is passed in.
Args:
args (list): If no function has been set yet, must be a list
containing a single callable. If the function has been set, may
be a single value, a dictionary containing overrides for the
original arguments to `wrap_experiment`.
kwargs (dict): Arguments passed onto the wrapped function.
Returns:
object: The returned value of the wrapped function.
Raises:
ValueError: If not passed a single callable argument.
"""
if self.function is None:
if len(args) != 1 or len(kwargs) != 0 or not callable(args[0]):
raise ValueError('Please apply the result of '
'wrap_experiment() to a single function')
# Apply ourselves as a decorator
self.function = args[0]
self._update_wrap_params()
return self
else:
ctxt = self._make_context(self._get_options(*args), **kwargs)
result = self.function(ctxt, **kwargs)
logger.remove_all()
logger.pop_prefix()
gc.collect() # See dowel issue #44
return result
def wrap_experiment(function=None,
*,
log_dir=None,
prefix='experiment',
name=None,
snapshot_mode='last',
snapshot_gap=1,
archive_launch_repo=True,
name_parameters=None,
use_existing_dir=False):
"""Decorate a function to turn it into an ExperimentTemplate.
When invoked, the wrapped function will receive an ExperimentContext, which
will contain the log directory into which the experiment should log
information.
This decorator can be invoked in two differed ways.
Without arguments, like this:
@wrap_experiment
def my_experiment(ctxt, seed, lr=0.5):
...
Or with arguments:
@wrap_experiment(snapshot_mode='all')
def my_experiment(ctxt, seed, lr=0.5):
...
All arguments must be keyword arguments.
Args:
function (callable or None): The experiment function to wrap.
log_dir (str or None): The full log directory to log to. Will be
computed from `name` if omitted.
name (str or None): The name of this experiment template. Will be
filled from the wrapped function's name if omitted.
prefix (str): Directory under data/local in which to place the
experiment directory.
snapshot_mode (str): Policy for which snapshots to keep (or make at
all). Can be either "all" (all iterations will be saved), "last"
(only the last iteration will be saved), "gap" (every snapshot_gap
iterations are saved), or "none" (do not save snapshots).
snapshot_gap (int): Gap between snapshot iterations. Waits this number
of iterations before taking another snapshot.
archive_launch_repo (bool): Whether to save an archive of the
repository containing the launcher script. This is a potentially
expensive operation which is useful for ensuring reproducibility.
name_parameters (str or None): Parameters to insert into the experiment
name. Should be either None (the default), 'all' (all parameters
will be used), or 'passed' (only passed parameters will be used).
The used parameters will be inserted in the order they appear in
the function definition.
use_existing_dir (bool): If true, (re)use the directory for this
experiment, even if it already contains data.
Returns:
callable: The wrapped function.
"""
return ExperimentTemplate(function=function,
log_dir=log_dir,
prefix=prefix,
name=name,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
archive_launch_repo=archive_launch_repo,
name_parameters=name_parameters,
use_existing_dir=use_existing_dir)
def dump_json(filename, data):
"""Dump a dictionary to a file in JSON format.
Args:
filename(str): Filename for the file.
data(dict): Data to save to file.
"""
pathlib.Path(os.path.dirname(filename)).mkdir(parents=True, exist_ok=True)
with open(filename, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True, cls=LogEncoder)
def get_metadata():
"""Get metadata about the main script.
The goal of this function is to capture the additional information needed
to re-run an experiment, assuming that the launcher script that started the
experiment is located in a clean git repository.
Returns:
tuple[str, dict[str, str]]:
* Absolute path to root directory of launcher's git repo.
* Directory containing:
* githash (str): Hash of the git revision of the repo the
experiment was started from. "-dirty" will be appended to this
string if the repo has uncommitted changes. May not be present
if the main script is not in a git repo.
* launcher (str): Relative path to the main script from the base of
the repo the experiment was started from. If the main script
was not started from a git repo, this will instead be an
absolute path to the main script.
"""
main_file = getattr(main, '__file__', None)
if not main_file:
return None, {}
main_file_path = os.path.abspath(main_file)
try:
git_root_path = subprocess.check_output(
('git', 'rev-parse', '--show-toplevel'),
cwd=os.path.dirname(main_file_path),
stderr=subprocess.DEVNULL)
git_root_path = git_root_path.strip()
except subprocess.CalledProcessError:
# This file is always considered not to exist.
git_root_path = ''
# We check that the path exists since in old versions of git the above
# rev-parse command silently exits with 0 when run outside of a git repo.
if not os.path.exists(git_root_path):
return None, {
'launcher': main_file_path,
}
launcher_path = os.path.relpath(bytes(main_file_path, encoding='utf8'),
git_root_path)
git_hash = subprocess.check_output(('git', 'rev-parse', 'HEAD'),
cwd=git_root_path)
git_hash = git_hash.decode('utf-8').strip()
git_status = subprocess.check_output(('git', 'status', '--short'),
cwd=git_root_path)
git_status = git_status.decode('utf-8').strip()
if git_status != '':
git_hash = git_hash + '-dirty'
return git_root_path, {
'githash': git_hash,
'launcher': launcher_path.decode('utf-8'),
}
def make_launcher_archive(*, git_root_path, log_dir):
"""Saves an archive of the launcher's git repo to the log directory.
Args:
git_root_path (str): Absolute path to git repo to archive.
log_dir (str): Absolute path to the log directory.
"""
git_files = subprocess.check_output(
('git', 'ls-files', '--others', '--exclude-standard', '--cached',
'-z'),
cwd=git_root_path).strip()
repo_size = 0
files_to_archive = []
for f in git_files.split(b'\0'):
try:
file_size = os.stat(os.path.join(git_root_path, f)).st_size
repo_size += file_size
if file_size < EIGHT_MEBIBYTES:
files_to_archive.append(f)
except FileNotFoundError:
pass
if repo_size >= EIGHT_MEBIBYTES:
warnings.warn('Archiving a launch repo larger than 8MiB. This may be '
'slow. Set archive_launch_repo=False in wrap_experiment '
'to disable this behavior.')
archive_path = os.path.join(log_dir, 'launch_archive.tar.xz')
subprocess.run(('tar', '--null', '--files-from', '-', '--xz', '--create',
'--file', archive_path),
input=b'\0'.join(files_to_archive),
cwd=git_root_path,
check=True)
class LogEncoder(json.JSONEncoder):
"""Encoder to be used as cls in json.dump."""
def default(self, o):
"""Perform JSON encoding.
Args:
o (object): Object to encode.
Returns:
str: Object encoded in JSON.
"""
# Why is this method hidden? What does that mean?
# pylint: disable=method-hidden
if isinstance(o, type):
return {'$class': o.__module__ + '.' + o.__name__}
elif isinstance(o, enum.Enum):
return {
'$enum':
o.__module__ + '.' + o.__class__.__name__ + '.' + o.name
}
elif callable(o):
return {'$function': o.__module__ + '.' + o.__name__}
return json.JSONEncoder.default(self, o)
| jaekyeom/IBOL | garaged/src/garage/experiment/experiment.py | experiment.py | py | 30,422 | python | en | code | 28 | github-code | 13 |
20839368163 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^update/stock_info$', views.update_stock_info, name='update_stock_info'),
url(r'^update/history$', views.update_history, name='update_history'),
url(r'^update/tick_data$', views.update_tick_data, name='update_tick_data'),
url(r'^update/fundamental$', views.update_fundamental, name='update_fundamental'),
url(r'^info/update_time$', views.update_time, name='update_time'),
]
| flychensc/orange | storage/urls.py | urls.py | py | 471 | python | en | code | 1 | github-code | 13 |
1693497793 | __author__ = 'kwheelerj'
# Show how to implement a queue using two stacks. Analyze the running time of the queue operations.
class Queue:
def __init__(self, length):
self.length = length
self.enqueue_stack = Stack(length)
self.dequeue_stack = Stack(length)
def enqueue(self, value):
self.transfer_to_enqueue_stack()
if self.enqueue_stack.push(value):
return True
print("Queue Overflow Error")
return False
def dequeue(self):
self.transfer_to_dequeue_stack()
if self.is_empty():
print("Queue is empty")
return self.dequeue_stack.pop()
def transfer_to_enqueue_stack(self):
count = 0
while not self.dequeue_stack.is_empty():
value = self.dequeue_stack.pop()
self.enqueue_stack.push(value)
count += 1
print('\t\tnumber of ops: {}'.format(count))
def transfer_to_dequeue_stack(self):
count = 0
while not self.enqueue_stack.is_empty():
value = self.enqueue_stack.pop()
self.dequeue_stack.push(value)
count += 1
print('\t\tnumber of ops: {}'.format(count))
def is_empty(self):
return self.dequeue_stack.is_empty()
def disp(self):
if self.enqueue_stack.is_empty():
for i in range(self.length):
print(self.dequeue_stack.data[self.length - 1 - i], end=', ')
else:
for i in range(self.length):
print(self.enqueue_stack.data[i], end=', ')
print()
print('*' * 35)
class Stack:
def __init__(self, length):
self.length = length
self.data = [None] * length
self.top = -1
def push(self, x):
if self.top + 1 == self.length:
print("\tUnderlying Stack Overflow Error")
return False
self.top += 1
self.data[self.top] = x
return True
def pop(self):
if self.is_empty():
print("\tUnderlying Stack empty")
return None
value = self.data[self.top]
self.data[self.top] = None
self.top -= 1
return value
def is_empty(self):
return self.top == -1
if __name__ == '__main__':
q = Queue(6)
q.disp()
q.enqueue(1)
q.disp()
q.dequeue()
q.disp()
q.enqueue(1)
q.disp()
q.enqueue(2)
q.enqueue(3)
q.enqueue(4)
q.enqueue(5)
q.enqueue(6)
q.disp()
q.enqueue(7)
q.disp()
q.dequeue()
q.disp()
q.dequeue()
q.dequeue()
q.dequeue()
q.dequeue()
q.dequeue()
q.disp()
q.dequeue()
q.disp()
q.enqueue(1)
q.disp()
| kwheelerj/IntroToAlgorithms | Chapter10_ElementaryDataStructures/section_1/Exc_10.1-6.py | Exc_10.1-6.py | py | 2,243 | python | en | code | 0 | github-code | 13 |
71201508817 | from django.contrib.auth.models import Permission
from django.test import TestCase, Client
from django.urls import reverse
from accounts.models import Account
from .models import Lesson
class LessonTestCase(TestCase):
def setUp(self):
perm = Permission.objects.get(name='Can see hidden lesson')
self.lesson1 = Lesson.objects.create(index='0', is_visible=True)
self.lesson2 = Lesson.objects.create(index='1', is_visible=False)
self.user1 = Account.objects.create_user('user1', is_active=True)
self.user2 = Account.objects.create_user('user2', is_active=True)
self.user2.user_permissions.add(perm)
self.client = Client()
def test_user_without_perm(self):
"""Normální uživatel smí přistoupit pouze na viditelné lekce."""
self.client.force_login(self.user1)
# Obvyklý dotaz na lekci
response = self.client.get(reverse('lessons:detail', args=(self.lesson1.index,)))
self.assertEqual(response.status_code, 200)
# Dotaz na skrytou lekci
response = self.client.get(reverse('lessons:detail', args=(self.lesson2.index,)))
self.assertEqual(response.status_code, 404)
def test_user_with_perm(self):
"""Uživatel s právy může přistoupit i na skrytou lekci."""
self.client.force_login(self.user2)
# Obvyklý dotaz na lekci
response = self.client.get(reverse('lessons:detail', args=(self.lesson1.index,)))
self.assertEqual(response.status_code, 200)
# Dotaz na skrytou lekci
response = self.client.get(reverse('lessons:detail', args=(self.lesson2.index,)))
self.assertEqual(response.status_code, 200)
| bugulin/gymgeek-web | lessons/tests.py | tests.py | py | 1,706 | python | en | code | 0 | github-code | 13 |
35951180326 | from python import *
from python.cellfft import *
import sys, os, shutil
import subprocess
OUT_DIR='out'
CPP_DIR=os.path.join('test', 'twiddle')
ARCH="gfx908"
def run_cmd(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr = subprocess.STDOUT)
try:
(out, _) = p.communicate()
if p.returncode != 0:
print('run fail:{}'.format(" ".join(cmd)))
print('{}'.format(out.decode('utf-8')))
return False
print('{}'.format(out.decode('utf-8')), end='')
return True
except Exception as e:
print('fail to run cmd:{}'.format(" ".join(cmd)))
print('err:{}'.format(e))
return False
def emit_kernel_header(mc, kernel_name, covx):
mc.emit('.text')
if covx == 'cov3':
mc.emit('.globl {}'.format(kernel_name))
mc.emit('.p2align 8')
if covx == 'cov3':
mc.emit('.type {},@function'.format(kernel_name))
if covx == 'cov2':
mc.emit('.amdgpu_hsa_kernel {}'.format(kernel_name))
mc.emit('{}:'.format(kernel_name))
def test_fft():
asm_target = os.path.join(OUT_DIR, "twiddle.s")
emitter = mc_emit_to_file_t(asm_target)
arch = amdgpu_arch_config_t({'arch' : amdgpu_string_to_arch(ARCH) })
# create mc
mc = mc_asm_printer_t(emitter, arch)
mc_set_current(mc)
hsa_header_t(mc).emit()
kernel_info_list = []
def emit_fft(n, is_fwd):
kernel_func = 'twiddle_fft{}_{}'.format(n, 'fwd' if is_fwd else 'bwd')
fft = fft_t(mc, ctrl_fft_t(n, 0, BUTTERFLY_DIRECTION_FORWARD if is_fwd else BUTTERFLY_DIRECTION_BACKWARD), True)
emit_kernel_header(mc, kernel_func, 'cov3')
def get_kernel_code():
kernel_code = amdgpu_kernel_code_t({
'enable_sgpr_kernarg_segment_ptr' : 1,
'enable_sgpr_workgroup_id_x' : 1,
'enable_vgpr_workitem_id' : 0,
'workgroup_group_segment_byte_size' : 0,
'kernarg_segment_byte_size' : 16,
'wavefront_sgpr_count' : 100,
'workitem_vgpr_count' : 100}) # this is test kernel so just let this value big enough
return kernel_code
def get_kernel_args():
'''
float *p_in;
float *p_out;
'''
kas = []
# name: {}, .size: {}, .offset: {}, .value_kind: {}, .value_type
kas.append(amdgpu_kernel_arg_t('p_in' , 8, 0, 'global_buffer','f32',address_space='global',is_const='true'))
kas.append(amdgpu_kernel_arg_t('p_out' , 8, 8, 'global_buffer','f32',address_space='global',is_const='false'))
return kas
def get_kernel_info():
kernel_code = get_kernel_code()
kernel_args = get_kernel_args()
kernel_info = amdgpu_kernel_info_t(kernel_code, kernel_func, 256, kernel_args)
return kernel_info
kernel_info_list.append(get_kernel_info())
label_end = f"{kernel_func}_end"
mc.emit(f".set s_ka, 0")
mc.emit(f".set s_bx, 2")
mc.emit(f".set s_in, 4")
mc.emit(f".set s_out, 8")
mc.emit(f".set v_tid, 0")
mc.emit(f".set v_pt, 0 ; for simplicity, give 64 vgpr for this twiddle")
mc.emit(f".set v_tmp, 64 ; for simplicity, give 64 vgpr for this twiddle")
mc.emit(f"")
mc.emit(f"s_load_dwordx2 s[s_in:s_in+1], s[s_ka:s_ka+1], 0")
mc.emit(f"s_load_dwordx2 s[s_out:s_out+1], s[s_ka:s_ka+1], 8")
mc.emit(f"s_mov_b32 s[s_in+2], 0xffffffff")
mc.emit(f"s_mov_b32 s[s_in+3], 0x27000")
mc.emit(f"s_mov_b32 s[s_out+2], 0xffffffff")
mc.emit(f"s_mov_b32 s[s_out+3], 0x27000")
mc.emit(f"s_waitcnt lgkmcnt(0)")
mc.emit(f"")
mc.emit(f"s_cmp_eq_u32 0, s[s_bx]")
mc.emit(f"s_cbranch_scc0 {label_end}")
mc.emit(v_cmpx_eq_u32(0, "v_tid"))
mc.emit(f"v_mov_b32 v[v_tmp], 0")
for i in range(n * 2):
# mc.emit(f"buffer_load_dword v[v_pt + {i}], v[v_tmp], s[s_in:s_in+3], 0, offen offset:0")
# mc.emit(v_add_nc_u32("v_tmp", "v_tmp", 4))
# mc.emit(f"buffer_load_dword v[v_pt + {i}], v[v_tmp], s[s_in:s_in+3], 0, offen offset:{i * 4}")
mc.emit(f"global_load_dword v[v_pt + {i}], v[v_tmp], s[s_in:s_in+1] offset:{i * 4}")
mc.emit(f"s_waitcnt vmcnt(0)")
mc.emit(f";----------------")
mc.emit(fft("v_pt", "v_tmp"))
mc.emit(f";----------------")
mc.emit(f"v_mov_b32 v[v_tmp], 0")
for i in range(n * 2):
# mc.emit(f"buffer_store_dword v[v_pt + {i}], v[v_tmp], s[s_out:s_out+3], 0, offen offset:0")
# mc.emit(v_add_nc_u32("v_tmp", "v_tmp", 4))
# mc.emit(f"buffer_store_dword v[v_pt + {i}], v[v_tmp], s[s_out:s_out+3], 0, offen offset:{i * 4}")
mc.emit(f"global_store_dword v[v_tmp], v[v_pt + {i}],s[s_out:s_out+1] offset:{i * 4}")
mc.emit(f"s_waitcnt vmcnt(0)")
mc.emit(f"s_mov_b64 exec, -1")
mc.emit(f"{label_end}:")
mc.emit(f"s_endpgm")
mc.emit(f"")
amd_kernel_code_t(mc, get_kernel_info()).emit()
mc.emit(f"")
mc.emit(f"")
radix_list = [4, 8, 16, 32]
for radix in radix_list:
emit_fft(radix, True)
emit_fft(radix, False)
amdgpu_metadata_t(mc, kernel_info_list).emit()
# compile device code
ass = compile_asm_t(mc, mc.emitter.file_name)
rtn = ass.compile()
if not rtn:
assert False
disass = compile_disass_t(mc, ass.target_hsaco)
rtn = disass.compile()
if not rtn:
assert False
# compile host code
cpp_src = os.path.join(CPP_DIR, "twiddle_test.cpp")
target_exe = os.path.join(OUT_DIR, 'twiddle_test.exe')
builder = compile_host_t(arch, cpp_src, target_exe)
rtn = builder.compile(cxxflags=['-DHSACO=\"{}\"'.format(ass.target_hsaco),
'-I{}'.format(os.path.join('test', 'common')) ])
if not rtn:
assert False
while True:
for radix in radix_list:
# run this exe
cmd = [target_exe, f"{radix}", "fwd"]
run_cmd(cmd)
cmd = [target_exe, f"{radix}", "bwd"]
run_cmd(cmd)
break
if __name__ == '__main__':
if os.path.exists(OUT_DIR):
shutil.rmtree(OUT_DIR)
os.mkdir(OUT_DIR)
test_fft()
| ROCmSoftwarePlatform/MISA | test/twiddle/twiddle_test.py | twiddle_test.py | py | 6,689 | python | en | code | 29 | github-code | 13 |
37984860572 | #!/usr/bin/env python3
from threading import Condition
import time
from dr_hardware_tests.flight_predicate import is_offboard_mode
from dr_hardware_tests.flight_helpers import enter_offboard_mode
import rospy
from dr_hardware_tests import Drone, SensorSynchronizer, SensorData, flight_helpers, sleep
from dr_hardware_tests import FlightMode
from dr_hardware_tests import is_user_ready_to_start, start_RC_failsafe
from dr_hardware_tests.Drone import Drone
from dr_hardware_tests import SetpointSender
def log(msg):
rospy.loginfo(f"arming test: {msg}")
def is_armed(data: SensorData):
if not data.state:
return False
return data.state.armed
def is_disarmed(data: SensorData):
if not data.state:
return False
return not data.state.armed
def send_velocity_zero_setpoints(drone: Drone):
log("creating SetpointSender")
setpoint_sender: SetpointSender = SetpointSender(drone=drone)
log("starting SetpointSender")
setpoint_sender.start()
log("done starting SetpointSender")
setpoint_sender.velocity = 0.0, 0.0, 0.0
def main():
drone, sensors = flight_helpers.start_drone_io()
send_velocity_zero_setpoints(drone)
sleep(1.75)
log("waiting for user to start the test with the RC transmitter")
sensors.await_condition(is_user_ready_to_start)
log("sending arm command")
drone.arm()
log("waiting for sensors to indicate that we're armed")
sensors.await_condition(is_armed, 30)
t = enter_offboard_mode(drone, sensors)
log("starting RC failsafe trigger")
start_RC_failsafe(sensors)
sleep(max(9.5 - t, 5))
log("sending disarm command")
drone.disarm()
log("waiting for sensors to indicate that we're disarmed")
sensors.await_condition(is_disarmed, 30)
log("SUCCESS")
if __name__ == "__main__":
rospy.init_node("test_arm")
main()
rospy.signal_shutdown("arming test: finished")
| DroneResponse/hardware-tests | nodes/arm.py | arm.py | py | 1,918 | python | en | code | 0 | github-code | 13 |
38089302508 | from drawer_v1 import Drawer
from vec_v1 import Vec, Vec3
from line_v1 import Line, vec_prod
from utils import read_points, prepare_points, add_prepare_args, init_tk_drawer
import argparse
import math
DESCRIPTION = '''
Program to draw wurfs for contours
'''
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
)
# parser.add_argument('--rounds', type=int, default=2, help='how many rounds each pair plays')
parser.add_argument('-f', '--files', type=str, nargs="+", required=True, help='input file name')
parser.add_argument('-f2', '--files_other', type=str, nargs="+", help='input file name for other files to calc diff')
parser.add_argument('-n', '--no_image', action="store_true", help='not to draw image')
parser.add_argument('-dps', '--diff_points_share', type=float, default=0.4, help='share of points to use in diff')
parser.add_argument('-wm', '--wurfs_method', default=1, help='index of wurfs_method to use')
parser.add_argument('-dm', '--diff_method', default=1, help='index of diff method to use')
parser.add_argument('-nl', '--normalize_length', action="store_true", default=False, help='use length normalizing')
parser.add_argument('-ws', '--wurfs_skip', type=int, help='count of points to skip in wurfs')
parser.add_argument('-um', '--use_metrics', type=int, default=0, help='use metrics')
add_prepare_args(parser)
# parser.add_argument('--points_multiplier', type=int, default="2", help='how many points to use')
# parser.add_argument('--tangent_curve', action="store_true", help='draw tangent curve')
# parser.add_argument('--points_count', type=int, default=180, help='how many points to use (more points is slower)')
# # parser.add_argument('--cyclic', action="store_true", default="False", help='draw tangent curve')
# # parser.add_argument('--draw_points', action="store_true", default=False, help='draw selected points')
# parser.add_argument('--draw_points', nargs="+", help='draw selected points. format: x_coordinate,label[,draw_tangent]')
parsed_args = parser.parse_args()
return parsed_args
def vec_div(v1, v2):
# collinear vecs only
if abs(v1.x) >= abs(v1.y):
return v1.x / v2.x
return v1.y / v2.y
def wurf(p1, p2, p3, p4):
# collinear vecs only
return vec_div(p3 - p1, p3 - p2) / vec_div(p4 - p1, p4 - p2)
def calc_wurfs(five_points):
def calc_left_wurf(five_points):
p1, p2, p3, p4, p5 = five_points
p14_25 = Line(p1, p4).intersect(Line(p2, p5))
p14_35 = Line(p1, p4).intersect(Line(p3, p5))
return wurf(p1, p4, p14_25, p14_35)
return calc_left_wurf(five_points), calc_left_wurf(five_points[::-1])
def get_colour(index):
colors = ['blue', 'green', 'red', 'pink']
return colors[index % len(colors)]
def calc_average(points):
s = Vec(0, 0)
for p in points:
s += p
return s / len(points)
def calc_perimeter(points, cyclic=True):
if len(points) < 2:
return 0
length = sum(abs(p2 - p1) for (p1, p2) in zip(points, points[1:]))
if cyclic:
length += abs(points[-1] - points[0])
return length
def calc_complex_correlation(points1, points2):
zipped = list(zip(points1, points2))
if len(points1) == len(points2):
zipped.append((points1[0], points2[0]))
corr = complex(0)
for (p11, p21), (p12, p22) in zip(zipped, zipped[1:]):
def c_diff(v1, v2):
d = v2 - v1
return complex(d.x, d.y)
c1 = c_diff(p11, p12)
c2 = c_diff(p21, p22)
corr += c1 * c2.conjugate()
return corr
def toVec(ar):
assert(len(ar) == 2)
return Vec(ar[0], ar[1])
def toVec3(ar):
assert(len(ar) == 3)
return Vec(ar[0], ar[1], ar[2])
def cyclic_shifts(points):
cyclic_shifts = []
for i in range(len(points)):
cyclic_shifts.append(points[i:] + points[:i])
return cyclic_shifts
def prepare_and_calc_wurfs_points(points, args):
points = prepare_points(points, args)
inv_points = []
# if args.wurfs_method in ["201"]:
if args.wurfs_method in ["12"]:
skip = len(points) // 5
if args.wurfs_skip:
skip = args.wurfs_skip
if len(points) >= 1 + 4*skip:
for shifted in cyclic_shifts(points):
five_points = shifted[:5*skip:skip]
x, y = calc_wurfs(five_points)
inv_points.append(Vec(x, y) / 2)
# elif args.wurfs_method in ["202"]:
elif args.wurfs_method in ["13"]:
skip = len(points) // 4
if len(points) >= 1 + 3*skip:
for shifted in cyclic_shifts(points):
def get_line(ind):
next = (ind + 1)%len(shifted)
return Line(shifted[ind], shifted[next] - shifted[ind-1])
try:
p3 = shifted[0]
l1 = get_line(0)
l2 = get_line(skip)
ipt1 = shifted[skip]
l3 = get_line(2*skip)
l4 = get_line(3*skip)
ipt2 = shifted[2*skip]
ipt3 = shifted[3*skip]
p2 = l1.intersect(l2)
p1 = l2.intersect(l3)
p5 = l3.intersect(l4)
p4 = l4.intersect(l1)
# x, y = calc_wurfs([p1,p2,p3,p4,p5])
x, y = calc_wurfs([p2, ipt1, ipt2, ipt3 ,p4])
inv_points.append(Vec(x,y) / 2)
except:
pass
# elif args.wurfs_method in ["203"]:
elif args.wurfs_method in ["5", "TR"]:
# euclidian invariant, triangle lengths
norm_len = calc_perimeter(points)
skip = len(points) // 3
if args.wurfs_skip:
skip = args.wurfs_skip
if len(points) >= 1 + 2*skip:
for shifted in cyclic_shifts(points):
x, y, z = shifted[:3*skip:skip]
if args.normalize_length:
inv_points.append(Vec3(abs(x-y), abs(x-z), abs(y-z)) / norm_len)
else:
inv_points.append(Vec3(abs(x-y), abs(x-z), abs(y-z)))
# elif args.wurfs_method in ["204"]:
elif args.wurfs_method in ["6", "CR1"]:
# euclidian invarian
skip = len(points) // 7
if args.wurfs_skip:
skip = args.wurfs_skip
if len(points) >= 1 + 6*skip:
norm_len = calc_perimeter(points)
for shifted in cyclic_shifts(points):
d = []
for sk in 0.5*skip, 1*skip, 2*skip:
sk = int(sk)
d.append(abs(shifted[sk] - shifted[-sk])*skip/sk)
if args.normalize_length:
inv_points.append(Vec3(d[0], d[1], d[2]) / norm_len)
else:
inv_points.append(Vec3(d[0], d[1], d[2]))
# elif args.wurfs_method in ["205"]:
elif args.wurfs_method in ["7", "CR2"]:
#euclidian invarian
skip = len(points) // 4
if args.wurfs_skip:
skip = args.wurfs_skip
if len(points) >= 1 + 3*skip:
norm_len = calc_perimeter(points)
for shifted in cyclic_shifts(points):
d = []
d.append(abs(shifted[0] - shifted[2*skip]))
d.append(abs(shifted[skip] - shifted[-skip]))
if args.normalize_length:
inv_points.append(Vec(d[0], d[1]) / norm_len)
else:
inv_points.append(Vec(d[0], d[1]))
# elif args.wurfs_method in ["206"]:
elif args.wurfs_method in ["1", "NO"]:
# place of points :)
inv_points = points[:]
# elif args.wurfs_method in ["207"]:
elif args.wurfs_method in ["8", "CUR"]:
# curvative and curvative derivative
skip = len(points) // 10
if args.wurfs_skip:
skip = args.wurfs_skip
if len(points) >= 1 + 4*skip:
norm_len = calc_perimeter(points)
for shifted in cyclic_shifts(points):
curvative = (shifted[skip] - shifted[0]) - (shifted[0] - shifted[-skip])
curvative_next = (shifted[2*skip] - shifted[skip]) - (shifted[skip] - shifted[0])
d = []
d.append(abs(vec_prod(curvative, (shifted[skip] - shifted[0]))))
d.append(abs(vec_prod(curvative_next - curvative, (shifted[skip] - shifted[0])))*abs(shifted[skip] - shifted[0]))
# inv_points.append(Vec3(d[0], d[1], d[2]))
# inv_points.append(Vec3(d[0], d[1], d[2]) / norm_len)
if args.normalize_length:
inv_points.append(Vec(d[0], d[1]) * 1000 / norm_len)
else:
inv_points.append(Vec(d[0], d[1]))
# elif args.wurfs_method in ["208"]:
elif args.wurfs_method in ["3", "TAN"]:
# tangent angle
skip = 1
if args.wurfs_skip:
skip = args.wurfs_skip
for p1, p2 in zip(points, points[1:] + points[:1]):
inv_points.append((p2-p1)/abs(p2-p1))
# elif args.wurfs_method in ["209"]:
elif args.wurfs_method in ["2", "MASS"]:
# center of mass
av = calc_average(points)
inv_points += [av] * 5
# following code can break on single value
# repeating doesn't change result metrics
# elif args.wurfs_method in ["2010"]:
elif args.wurfs_method in ["4", "M1"]:
# normalized average
av = calc_average(points)
for p in points:
inv_points.append(p - av)
# elif args.wurfs_method in ["2011"]:
elif args.wurfs_method in ["10", "M1.5"]:
# normalized average and (av_x**2) and (av_y**2)
av = calc_average(points)
norm_av_points = [p - av for p in points]
def av_sq(nums):
return math.sqrt(sum([n**2 for n in nums])/len(nums))
av_x_squared = av_sq([p.x for p in norm_av_points])
av_y_squared = av_sq([p.y for p in norm_av_points])
for p in norm_av_points:
inv_points.append(Vec(p.x/av_x_squared, p.y/av_y_squared))
# elif args.wurfs_method in ["2012"]:
elif args.wurfs_method in ["11", "M2"]:
# normalized average and second momentum. ortomatrix invariant
av = calc_average(points)
norm_av_points = [p - av for p in points]
def av_num(nums):
return sum(nums) / len(nums)
av_xx = av_num([p.x**2 for p in norm_av_points])
av_xy = av_num([p.x*p.y for p in norm_av_points])
av_yy = av_num([p.y**2 for p in norm_av_points])
# print(av_xx, av_xy, av_yy)
v1 = av_xx - av_yy
v2 = av_xy * 2
if abs(v1) > 10**-6:
#rotate
two_alpha = math.atan(v2/v1)
alpha = two_alpha/2
s = math.sin(alpha)
c = math.cos(alpha)
# print(sum( (c*p.x + s*p.y)*(-s*p.x + c*p.y) for p in norm_av_points))
norm_av_points = [Vec(c*p.x + s*p.y, -s*p.x + c*p.y) for p in norm_av_points]
av_xx = av_num([p.x**2 for p in norm_av_points])
av_xy = av_num([p.x*p.y for p in norm_av_points])
av_yy = av_num([p.y**2 for p in norm_av_points])
# print(av_xx, av_xy, av_yy)
norm_av_points = [Vec(p.x/math.sqrt(av_xx), p.y/math.sqrt(av_yy)) for p in norm_av_points]
# norm_av_points = prepare_points(norm_av_points, args)
for p in norm_av_points:
inv_points.append(p)
# elif args.wurfs_method in ["2013"]:
elif args.wurfs_method in ["9", "ACOR"]:
# autocorr:
diffs = [(p2 - p1) for p1, p2 in zip(points, points[1:] + points[:1])]
diffs = [complex(d.x, d.y) for d in diffs]
# if args.sqrt_len_optimization:
diffs = [d/(abs(d)**0.5) for d in diffs]
autocorrelations = []
for diffs_shifted in cyclic_shifts(diffs):
value = 0
for d1, d2 in zip(diffs, diffs_shifted):
value += d1 * d2.conjugate()
autocorrelations.append(value)
#norming
coef = abs(autocorrelations[0])
for v in autocorrelations:
inv_points.append(Vec(v.real, v.imag)/coef)
else:
raise IOError("Unexpected method")
# print(len(inv_points))
return inv_points
def calc_and_draw_values(drawer, files, args, fill='green'):
values = []
for file_index, filename in enumerate(files):
points = read_points(filename)
wurfs_points = prepare_and_calc_wurfs_points(points, args)
values.append(wurfs_points)
if drawer is not None:
prev_vec = Vec(0, 0)
for vec in wurfs_points:
drawer.draw_circle(vec, fill=fill)
drawer.draw_line(prev_vec, vec, fill=fill)
# drawer.draw_line(prev_vec, vec, fill=get_colour(file_index))
prev_vec = vec
return values
def calc_diff(wurfs1, wurfs2, args):
# if args.diff_method in ["301"]: # old numeration
if args.diff_method in ["6", "AVMIN"]:
distances = []
for i in range(len(wurfs1)):
i_distances = []
for j in range(len(wurfs2)):
i_distances.append(abs(wurfs1[i] - wurfs2[j]))
distances.append(min(i_distances))
distances.sort()
used_diff_count = int(len(wurfs1) * args.diff_points_share)
distances_part = distances[:used_diff_count]
return 1000 * (sum(distances_part) / len(distances_part))
# return 1000 * (sum(distances_part) / len(distances_part) - sum(distances)/len(distances)/100)
# elif args.diff_method in ["302"]:
elif args.diff_method in ["7", "AVC"]:
dists = []
for shifted in cyclic_shifts(wurfs2):
dist = 0
diff_pt_count = int(min(len(wurfs1), len(wurfs2)) * args.diff_points_share)
for j in range(diff_pt_count // 2):
dist += abs(wurfs1[j] - shifted[j])
dist += abs(wurfs1[-j] - shifted[-j])
dists.append(dist)
return min(dists)
# elif args.diff_method in ["303"]:
elif args.diff_method in ["1", "AV"]:
#trivial
zipped = list(zip(wurfs1, wurfs2))
used_diff_count = int(len(zipped) * args.diff_points_share)
dist = 0
for w1, w2 in zipped[:used_diff_count]:
dist += abs(w2 - w1)
return dist
# elif args.diff_method in ["304"]:
elif args.diff_method in ["2", "DYN"]:
# dynamic
# not really correct with wurfs1[0] ~ wurfs2[0]
path_dist = [[-1]*len(wurfs2) for i in range(len(wurfs1))]
def w_dist(i, j):
return abs(wurfs1[i] - wurfs2[j])
for j in range(len(wurfs2)):
path_dist[0][j] = w_dist(0, j)
for i in range(1, len(wurfs1)):
path_dist[i][0] = path_dist[i-1][0] + w_dist(i, 0)
for i in range(1, len(wurfs1)):
for j in range(1, len(wurfs2)):
prev_i = path_dist[i-1][j]
prev_j = path_dist[i][j-1] - w_dist(i, j-1) # NB
path_dist[i][j] = min(prev_i, prev_j) + w_dist(i, j)
return min(path_dist[-1])
# elif args.diff_method in ["305"]:
elif args.diff_method in ["3", "DYN2"]:
# dynamic with sqrt product
# (idea from "computable elastic distances between shapes")
# not really correct with wurfs1[0] ~ wurfs2[0]
path_dist = [[-1]*len(wurfs2) for i in range(len(wurfs1))]
prevs = [[None]*len(wurfs2) for i in range(len(wurfs1))]
def w_dist(i, j, prev_i_j):
dist = abs(wurfs1[i] - wurfs2[j])
# for elasctic distance article. With wm = 8 (angles)
# print(abs(wurfs1[i] - wurfs2[j]))
# dist = -math.sqrt(1 - (abs(wurfs1[i] - wurfs2[j]))**2/4.01)
if prev_i_j is None:
return dist
else:
prev_i, prev_j = prev_i_j
return dist \
* math.sqrt(i - prev_i + 1) \
* math.sqrt(j - prev_j + 1)
path_dist[0][0] = w_dist(0, 0, None)
for j in range(1, len(wurfs2)):
path_dist[0][j] = float("Inf")
for i in range(1, len(wurfs1)):
path_dist[i][0] = path_dist[i-1][0] + w_dist(i, 0, (i-1, 0))
prevs[i][0] = (i-1, 0)
for i in range(1, len(wurfs1)):
for j in range(1, len(wurfs2)):
prev_i = path_dist[i-1][j]
dist_i = prev_i + w_dist(i, j, (i-1, j))
prev_j = path_dist[i][j-1] - w_dist(i, j-1, prevs[i][j-1])
dist_j = prev_j + w_dist(i, j, prevs[i][j-1])
if dist_i <= prev_j:
path_dist[i][j] = dist_i
prevs[i][j] = (i-1, j)
else:
path_dist[i][j] = dist_j
prevs[i][j] = prevs[i][j-1]
return path_dist[-1][-1]
# elif args.diff_method in ["306"]:
elif args.diff_method in ["5", "COVC"]:
# ~covariance
cors = []
for shift in range(len(wurfs2)):
shifted_wurfs2 = wurfs2[shift:] + wurfs2[:shift]
cors.append(abs(calc_complex_correlation(wurfs1, shifted_wurfs2)))
ac1 = abs(calc_complex_correlation(wurfs1, wurfs1))
ac2 = abs(calc_complex_correlation(wurfs2, wurfs2))
m_value = max(cors) / math.sqrt(ac1*ac2)
return 1 - m_value
# elif args.diff_method in ["307"]:
elif args.diff_method in ["4", "COV"]:
# ~covariance
cors = []
cors.append(abs(calc_complex_correlation(wurfs1, wurfs2)))
ac1 = abs(calc_complex_correlation(wurfs1, wurfs1))
ac2 = abs(calc_complex_correlation(wurfs2, wurfs2))
m_value = max(cors) / math.sqrt(ac1*ac2)
return 1 - m_value
else:
raise IOError("unexpected method")
def calc_metrics(diff_values, args):
err = 0
n1 = len(diff_values)
n2 = len(diff_values[0])
if args.use_metrics == 1:
for i in range(min(n1, n2)):
j = i
val = diff_values[i][j]
col_other = [diff_values[n_i][j] for n_i in range(n1) if n_i != i]
row_other = [diff_values[i][n_j] for n_j in range(n2) if n_j != j]
# print(col_other, row_other)
# print(err, val)
err += (val/min(col_other))**2
err += (val/min(row_other))**2
if min(col_other) <= val:
err += 100
if min(row_other) <= val:
err += 100
return err / min(n1,n2)
elif args.use_metrics == 2:
for i in range(min(n1, n2)):
j = i
val = diff_values[i][j]
col_other = [diff_values[n_i][j] for n_i in range(n1) if n_i != i]
row_other = [diff_values[i][n_j] for n_j in range(n2) if n_j != j]
# print(col_other, row_other)
# print(err, val)
err += (val/min(col_other))**4
err += (val/min(row_other))**4
return err / min(n1,n2)
if args.use_metrics == 3:
for i in range(min(n1, n2)):
j = i
val = diff_values[i][j]
col_other = [diff_values[n_i][j] for n_i in range(n1) if n_i != i]
row_other = [diff_values[i][n_j] for n_j in range(n2) if n_j != j]
# print(col_other, row_other)
# print(err, val)
# err += (val/min(col_other))**2
# err += (val/min(row_other))**2
if min(col_other) <= val:
err += 1.
if min(row_other) <= val:
err += 1.
return err / (2*min(n1,n2))
if args.use_metrics == 4:
for i in range(min(n1, n2)):
j = i
val = diff_values[i][j]
col_other = [diff_values[n_i][j] for n_i in range(n1) if n_i != i]
row_other = [diff_values[i][n_j] for n_j in range(n2) if n_j != j]
# print(col_other, row_other)
# print(err, val)
err += (min(col_other)/(val + min(col_other)))
err += (min(row_other)/(val + min(row_other)))
return err / (2*min(n1,n2))
else:
raise IOError("unexpected method")
def main():
args = parse_args()
drawer = None
if not args.no_image:
tk, drawer = init_tk_drawer()
values_for_files_1 = calc_and_draw_values(drawer, args.files, args)
if args.files_other:
values_for_files_2 = calc_and_draw_values(drawer, args.files_other, args, fill='blue')
n1 = len(values_for_files_1)
n2 = len(values_for_files_2)
diff_values = [[0]*n2 for i in range(n1)]
for i in range(n1):
for j in range(n2):
diff_values[i][j] = calc_diff(values_for_files_1[i], values_for_files_2[j], args)
if args.use_metrics == 0:
print("\t".join([""] + [str(i+1) for i in range(n2)]))
for i in range(n1):
s = "{i}\t".format(i=i+1)
for j in range(n2):
s += str(diff_values[i][j]) + "\t"
print(s)
else:
print(calc_metrics(diff_values, args))
# for p in points[::100]:
# drawer.draw_circle(p)
def zoom( event):
print("Hello windows/macos! Not-tested scaling.")
drawer.scale(1.1 ** event.delta, event.x, event.y)
def zoom_in( event):
drawer.scale(1.1, event.x, event.y)
def zoom_out( event):
drawer.scale(1.1 ** (-1), event.x, event.y)
if not args.no_image:
tk.bind("<MouseWheel>", zoom)
tk.bind("<Button-4>", zoom_in)
tk.bind("<Button-5>", zoom_out)
tk.mainloop()
if __name__ == "__main__":
main()
| savfod/contours_correspondence | code/draw_wurfs.py | draw_wurfs.py | py | 21,982 | python | en | code | 0 | github-code | 13 |
21786353070 | from volux import VoluxDemo
class DemoAudio(VoluxDemo):
def __init__(self, *args, **kwargs):
super().__init__(
demo_name="Demo Audio",
demo_method=self.run_demo,
alias="audio",
requirements=["voluxaudio"],
*args,
**kwargs
)
def run_demo(self):
self._check_reqs()
from time import sleep
import volux
import voluxaudio
# create Volux Operator object (hub for communication between modules)
vlx = volux.VoluxOperator()
vlx.add_module(voluxaudio.VoluxAudio())
vlx.add_module(volux.VoluxCliPrint())
vlx.add_connection(volux.VoluxConnection(vlx.audio, vlx.cli, 60))
try:
while True:
vlx.start_sync()
sleep(10)
vlx.stop_sync()
print("Ctrl+C to exit demo at any time")
sleep(4)
except KeyboardInterrupt:
print("exiting...")
finally:
vlx.stop_sync()
exit()
| DrTexx/Volux | volux/demos/audio.py | audio.py | py | 1,077 | python | en | code | 7 | github-code | 13 |
26790084131 | class Solution:
def jump(self, nums: List[int]) -> int:
n = len(nums)
dp = [-1] * len(nums)
dp[n-1] = 0
for i in range(n-1,-1,-1):
reachables = [i + j for j in range(1,nums[i]+1) if i + j < n]
if len(reachables) == 0:
continue
l = [1+dp[idx] for idx in reachables if dp[idx] != -1]
dp[i] = min(l) if len(l) != 0 else -1
return dp[0]
| forestphilosophy/LeetCode_solutions | Interview Questions/jump_game_ii.py | jump_game_ii.py | py | 455 | python | en | code | 0 | github-code | 13 |
34512084211 | import sys
import sqlite3
from sqlite3 import Error
class Database:
def create_connection(db_file):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def top_cards(conn):
"""
Query all rows in the colors table
:param conn: the Connection object
:return:
"""
sql = ''' SELECT Name, Number, Colors.Card_ID
FROM Colors
INNER JOIN Cards
ON Colors.Card_ID=Cards.Card_ID
WHERE Rarity!='B'
ORDER BY Colors.Card_ID'''
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
return rows
def top_cards_specific(conn,color):
"""
Query all rows in the colors table
:param conn: the Connection object
:return:
"""
test=(color,)
sql = ''' SELECT Name, Number
FROM Colors
INNER JOIN Cards
ON Colors.Card_ID=Cards.Card_ID
WHERE Rarity!='B'
AND Color=?
ORDER BY Number
DESC LIMIT 20'''
cur = conn.cursor()
cur.execute(sql,test)
rows = cur.fetchall()
return rows
| ohnoanarrow/Senior_Thesis | src/analysis/top_cards_db.py | top_cards_db.py | py | 1,443 | python | en | code | 1 | github-code | 13 |
72337482257 |
"""
"""
# Native
import os
import time
# 3rd-Party
from flask import Flask, request, jsonify, send_from_directory
# Proprietary
app = Flask(__name__)
UPLOAD_DIRECTORY = '/files'
if not os.path.exists(UPLOAD_DIRECTORY):
os.makedirs(UPLOAD_DIRECTORY)
@app.route('/')
def hello():
"""
"""
return "hello", 200
@app.route('/files')
def files():
"""
"""
files = []
for filename in os.listdir(UPLOAD_DIRECTORY):
#
path = os.path.join(UPLOAD_DIRECTORY, filename)
#
if os.path.isfile(path):
files.append(filename)
return jsonify(files)
@app.route('/<filename>', methods=['POST'])
def upload(filename):
"""
"""
if '/' in filename:
return "no subdirectories directories allowed", 400
file = request.files['file']
path = os.path.join(UPLOAD_DIRECTORY, filename)
file.save(path)
return '', 201
@app.route('/<filename>', methods=['GET'])
def download(filename):
"""
"""
return send_from_directory(UPLOAD_DIRECTORY, filename, as_attachment=True)
@app.route('/<filename>', methods=['DELETE'])
def delete(filename):
"""
"""
path = os.path.join(UPLOAD_DIRECTORY, filename)
try:
os.remove(path)
except:
return '', 500
return '', 200
if __name__ == "__main__":
app.run()
| m3talstorm/flask-http-store | FHS-API/app/app.py | app.py | py | 1,355 | python | en | code | 1 | github-code | 13 |
18711583543 |
from collections import defaultdict
# class collections.defaultdict([default_factory[, ...]])
# 返回一个新的类似字典的对象。 defaultdict 是内置 dict 类的子类。
# 它重载了一个方法并添加了一个可写的实例变量。其余的功能与 dict 类相同,此处不再重复说明。
# 本对象包含一个名为 default_factory 的属性,构造时,第一个参数用于为该属性提供初始值,默认为 None。
# 所有其他参数(包括关键字参数)都相当于传递给 dict 的构造函数。
# __missing__(key)
# 如果 default_factory 属性为 None,则调用本方法会抛出 KeyError 异常,附带参数 key。
# 如果 default_factory 不为 None,则它会被(不带参数地)调用来为 key 提供一个默认值,
# 这个值和 key 作为一对键值对被插入到字典中,并作为本方法的返回值返回。
# 如果调用 default_factory 时抛出了异常,这个异常会原封不动地向外层传递。
# 在无法找到所需键值时,本方法会被 dict 中的 __getitem__() 方法调用。
# 无论本方法返回了值还是抛出了异常,都会被 __getitem__() 传递。
# 注意,__missing__() 不会 被 __getitem__() 以外的其他方法调用。
# 意味着 get() 会像正常的 dict 那样返回 None,而不是使用 default_factory。
# default_factory
# 本属性由 __missing__() 方法来调用。如果构造对象时提供了第一个参数,则本属性会被初始化成那个参数,
# 如果未提供第一个参数,则本属性为 None。
# ================================================================
s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
d1 = defaultdict()
d1.default_factory = list
# 下面这个写法和上面两行写法类似
# d1.default_factory(list)
for k,v in s :
d1[k].append(v)
# a3 不在d1 中,会调用 default_factory 方法初始化a3并且插入到d1中
d1.__missing__("a3")
print(d1)
# ================================================================
s2 = ["b1","b2","b3"]
d2 = defaultdict(int, a1=1,a2=4)
for i in s2 :
d2.__missing__(i)
print(d2)
# ================================================================
# default_factory 为自定义的lambda 函数
s3 = ["b1","b2","b3"]
d2 = defaultdict(lambda : 4, a1=1,a2=4)
for i in s2 :
d2.__missing__(i)
print(d2)
# ================================================================
| russellgao/algorithm | ProgrammingLanuage/python/collecttions/collections_defaultdict.py | collections_defaultdict.py | py | 2,458 | python | zh | code | 3 | github-code | 13 |
35543126242 | #!/usr/bin/python
from helper import *
from config import *
display_header()
ticket = ''
def handle_claims_gathering_response():
global ticket
if is_ticket_in_url():
arguments = cgi.FieldStorage()
ticket = arguments['ticket'].value
# Here is my PCT token!
# Client attempts to get RPT at UMA /token endpoint, this time presenting the PCT
# display_action_name("Client attempts to get RPT at UMA /token endpoint, this time presenting the PCT")
host = is_claim_in_url()
handle_claims_gathering_response()
# Client calls API without RPT token
if not is_ticket_in_url():
(as_uri, ticket) = get_as_and_ticket(host=host)
# Get Permission access token
# (remove. this is RS->AS, performed by RS internally in call above: get_as_and_ticket)
# access_token = get_permission_access_token_fpx(as_uri)
# Client calls AS UMA /token endpoint with permission ticket and client credentials
need_info, token, redirect_url, ticket_two = get_rpt_fpx(as_uri, ticket)
# Client calls API Gateway with RPT token
if not need_info:
call_gg_rpt(host=host, rpt=token)
# No RPT for you! Go directly to Claims Gathering!
# AS returns needs_info with claims gathering URI, which the user should
# put in his browser. Link shorter would be nice if the user has to type it in.
if need_info:
display_redirect_link_fpx(redirect_url, ticket_two)
display_footer()
| aleclaws/gg-demo-fpx | index.py | index.py | py | 1,401 | python | en | code | 0 | github-code | 13 |
44464473391 | import tensorflow as tf
import time
import numpy as np
from reader import *
import os
import warnings
import metric
try:
import neptune
except ImportError:
warnings.warn('neptune module is not installed (used for logging)', ImportWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
np.set_printoptions(suppress=True)
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights, scaled_attention
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu', input_shape=(None, d_model)), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
def read_embeddings(reader, embeddings_file="data/glove.6B.{}d.txt", embedding_size=50):
"""
:param reader: a dialogue dataset reader, where we will get words mapped to indices
:param embeddings_file: file path for glove embeddings
:return: dictionary of indices mapped to their glove embeddings
"""
vocab_to_index = {reader.vocab.decode(id): id for id in range(cfg.vocab_size)}
embedding_matrix = np.zeros((cfg.vocab_size + 1, embedding_size))
embeddings_file = embeddings_file.format(embedding_size)
with open(embeddings_file) as infile:
for line in infile:
word, coeffs = line.split(maxsplit=1)
if word in vocab_to_index:
word_index = vocab_to_index[word]
embedding_matrix[word_index] = np.fromstring(coeffs, 'f', sep=' ')
return embedding_matrix
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2, attn_output
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1, _ = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2, scaled_attention = self.mha2(
enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2, attn2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1, embeddings_matrix=None):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
if embeddings_matrix is not None:
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model,
embeddings_initializer=tf.keras.initializers.Constant(embeddings_matrix))
else:
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, attn = self.enc_layers[i](x, training, mask)
return x, attn # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1, copynet=False, embeddings_matrix=None):
super(Decoder, self).__init__()
self.target_vocab_size = target_vocab_size
self.copynet = copynet
self.d_model = d_model
self.num_layers = num_layers
if embeddings_matrix is not None:
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model,
embeddings_initializer=tf.keras.initializers.Constant(embeddings_matrix))
else:
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
if self.copynet:
self.copy_network = tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu', input_shape=(None, d_model * 2)),
tf.keras.layers.Dense(1)]) # (batch_size, seq_len, d_model)
self.gen_prob = tf.keras.layers.Dense(1, activation="sigmoid")
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask, encoder_attn, inp):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2, attn = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1
attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2
if self.copynet:
p_gen = self.gen_prob(x)
copy_distributions = []
for dec_token in tf.unstack(x, axis=1):
to_concat = tf.tile(tf.expand_dims(dec_token, 1), [1,enc_output.shape[1], 1])
copynet_input = tf.concat([enc_output, to_concat], axis=-1)
copy_distribution = self.copy_network(copynet_input)
try:
copy_distribution = tf.squeeze(copy_distribution, axis=1)
except tf.errors.InvalidArgumentError:
copy_distribution = tf.squeeze(copy_distribution)
copy_probs = tf.nn.softmax(copy_distribution)
if copy_probs.shape.ndims == 1:
copy_probs = tf.expand_dims(copy_probs, axis=0)
i1, i2 = tf.meshgrid(tf.range(inp.shape[0]),
tf.range(inp.shape[1]), indexing="ij")
i1 = tf.tile(i1[:, :, tf.newaxis], [1, 1, 1])
i2 = tf.tile(i2[:, :, tf.newaxis], [1, 1, 1])
# Create final indices
idx = tf.stack([i1, i2, tf.expand_dims(inp, axis=2)], axis=-1)
# Output shape
to_shape = [inp.shape[0], inp.shape[1], self.target_vocab_size]
# Get scattered tensor
output = tf.scatter_nd(idx, tf.expand_dims(copy_probs, axis=2), to_shape)
copy_logits = tf.reduce_sum(output, axis=1)
copy_distributions.append(copy_logits)
copy_distributions = tf.stack(copy_distributions, axis=1)
return x, attention_weights, p_gen, copy_distributions
else:
p_gen = 0.
return x, attention_weights, p_gen, 0.
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1, copynet=False, embeddings_matrix=None):
super(Transformer, self).__init__()
self.copynet = copynet
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate, )
self.response_decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate, copynet, embeddings_matrix)
self.bspan_decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate, copynet, embeddings_matrix)
self.response_final = tf.keras.layers.Dense(target_vocab_size)
self.bspan_final = tf.keras.layers.Dense(target_vocab_size)
def bspan(self, inp, tar, training, enc_padding_mask, look_ahead_mask, dec_padding_mask):
enc_output, enc_attn = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights, p_gen, copy_distributions = self.bspan_decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask, enc_attn, inp)
bspan_output = self.response_final(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
if self.copynet:
bspan_output = p_gen * bspan_output + (1-p_gen) * copy_distributions
return bspan_output, attention_weights
def response(self, inp, tar, training, enc_padding_mask, look_ahead_mask, dec_padding_mask):
enc_output, enc_attn = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights, p_gen, copy_distributions = self.response_decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask, enc_attn, inp)
response_output = self.response_final(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
if self.copynet:
response_output = p_gen * response_output + (1-p_gen) * copy_distributions
return response_output, attention_weights
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_) / tf.reduce_sum(mask)
def tensorize(id_lists):
tensorized = tf.ragged.constant([x for x in id_lists]).to_tensor()
return tf.cast(tensorized, dtype=tf.int32)
# TODO change these functions so that they can take tensor input and not just list
def produce_bspan_decoder_input(previous_bspan, previous_response, user_input):
inputs =[]
start_symbol = [cfg.vocab_size]
for counter, (x, y, z) in enumerate(zip(previous_bspan, previous_response, user_input)):
new_sample = start_symbol + x + y + z # TODO concatenation should be more readable than this
inputs.append(new_sample)
return tensorize(inputs)
def produce_response_decoder_input(previous_bspan, previous_response, user_input, bspan, kb):
start_symbol = [cfg.vocab_size]
inputs = []
for a, b, c, d, e in zip(previous_bspan, previous_response, user_input, bspan, kb):
inputs.append(start_symbol + a + b + c + d + e)
return tensorize(inputs)
class SeqModel:
def __init__(self, vocab_size, num_layers=3, d_model=50, dff=512, num_heads=5, dropout_rate=0.1, copynet=False,
reader=None, warmup_steps=4000):
self.vocab_size = vocab_size + 1
input_vocab_size = vocab_size + 1
target_vocab_size = vocab_size + 1
self.learning_rate = CustomSchedule(d_model, warmup_steps)
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
self.bspan_loss = tf.keras.metrics.Mean(name='train_loss')
self.response_loss = tf.keras.metrics.Mean(name='train_loss')
self.bspan_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
self.response_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
self.reader = reader
self.f1s = []
if reader:
print("Reading pre-trained word embeddings with {} dimensions".format(d_model))
embeddings_matrix = read_embeddings(reader, embedding_size=d_model)
else:
print("Initializing without pre-trained embeddings.")
embeddings_matrix=None
self.transformer = Transformer(num_layers, d_model, num_heads, dff,
input_vocab_size, target_vocab_size,
pe_input=input_vocab_size,
pe_target=target_vocab_size,
rate=dropout_rate, copynet=copynet, embeddings_matrix=embeddings_matrix)
#@tf.function(input_signature=[tf.TensorSpec(shape=(None, None), dtype=tf.int32),
# tf.TensorSpec(shape=(None, None), dtype=tf.int32)])
def train_step_bspan(self, inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = self.transformer.bspan(inp=inp, tar=tar_inp, training=True,
enc_padding_mask=enc_padding_mask, look_ahead_mask=combined_mask,
dec_padding_mask=dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, self.transformer.trainable_variables)
gradients =[grad if grad is not None else tf.zeros_like(var)
for grad, var in zip(gradients, self.transformer.trainable_variables)]
self.optimizer.apply_gradients(zip(gradients, self.transformer.trainable_variables))
self.bspan_accuracy(tar_real, predictions)
#@tf.function(input_signature=[tf.TensorSpec(shape=(None, None), dtype=tf.int32),
# tf.TensorSpec(shape=(None, None), dtype=tf.int32)])
def train_step_response(self, inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = self.transformer.response(inp=inp, tar=tar_inp, training=True,
enc_padding_mask=enc_padding_mask, look_ahead_mask=combined_mask,
dec_padding_mask=dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, self.transformer.trainable_variables)
gradients =[grad if grad is not None else tf.zeros_like(var)
for grad, var in zip(gradients, self.transformer.trainable_variables)]
self.optimizer.apply_gradients(zip(gradients, self.transformer.trainable_variables))
self.response_accuracy(tar_real, predictions)
def train_model(self, epochs=20, log=False, max_sent=1, max_turns=1):
constraint_eos, request_eos, response_eos = "EOS_Z1", "EOS_Z2", "EOS_M"
for epoch in range(epochs):
data_iterator = self.reader.mini_batch_iterator('train')
for iter_num, dial_batch in enumerate(data_iterator):
previous_bspan, previous_response = None, None
for turn_num, turn_batch in enumerate(dial_batch):
_, _, user, response, bspan_received, u_len, m_len, degree, _ = turn_batch.values()
batch_size = len(user)
if previous_bspan is None:
previous_bspan = [[self.reader.vocab.encode(constraint_eos),
self.reader.vocab.encode(request_eos)] for i in range(batch_size)]
previous_response = [[self.reader.vocab.encode(response_eos)] for i in range(batch_size)]
target_bspan = tensorize([[cfg.vocab_size] + x for x in bspan_received])
target_response = tensorize([[cfg.vocab_size] + x for x in response])
bspan_decoder_input = produce_bspan_decoder_input(previous_bspan, previous_response, user)
response_decoder_input = produce_response_decoder_input(previous_bspan, previous_response,
user, bspan_received, degree)
# TODO actually save the models, keeping track of the best one
# training the model
self.train_step_bspan(bspan_decoder_input, target_bspan)
self.train_step_response(response_decoder_input, target_response)
previous_bspan = bspan_received
previous_response = response
print("Completed epoch #{} of {}".format(epoch + 1, epochs))
# if epoch >= 50 and epoch % 1 == 0:
# self.evaluation(verbose=True, log=log, max_sent=max_sent, max_turns=max_turns, use_metric=True, epoch=epoch)
def auto_regress(self, input_sequence, decoder, MAX_LENGTH=128):
assert decoder in ["bspan", "response"]
decoder_input = [cfg.vocab_size]
output = tf.expand_dims(decoder_input, 0)
end_token_id = self.reader.vocab.encode("EOS_Z2") if decoder == "bspan" else self.reader.vocab.encode("EOS_M")
for i in range(MAX_LENGTH):
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(input_sequence, output)
if decoder == "bspan":
predictions, attention_weights = self.transformer.bspan(input_sequence, output, False,
enc_padding_mask, combined_mask,
dec_padding_mask)
else:
predictions, attention_weights = self.transformer.response(input_sequence, output, False,
enc_padding_mask, combined_mask,
dec_padding_mask)
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
output = tf.concat([output, predicted_id], axis=-1)
if predicted_id == end_token_id:
return tf.squeeze(output, axis=0), attention_weights
return tf.squeeze(output, axis=0), attention_weights
def evaluate(self, previous_bspan, previous_response, user, degree):
bspan_decoder_input = produce_bspan_decoder_input([previous_bspan], [previous_response], [user])
predicted_bspan, _ = self.auto_regress(bspan_decoder_input, "bspan")
response_decoder_input = produce_response_decoder_input([previous_bspan], [previous_response],
[user], [list(predicted_bspan.numpy())], [degree])
predicted_response, _ = self.auto_regress(response_decoder_input, "response")
return predicted_response
def evaluation(self, mode="dev", verbose=False, log=False, max_sent=1, max_turns=1, use_metric=False, epoch=999):
dialogue_set = self.reader.dev if mode == "dev" else self.reader.test
predictions, targets = list(), list()
constraint_eos, request_eos, response_eos = "EOS_Z1", "EOS_Z2", "EOS_M"
for dialogue in dialogue_set[0:max_sent]:
previous_bspan = [self.reader.vocab.encode(constraint_eos), self.reader.vocab.encode(request_eos)]
previous_response = [self.reader.vocab.encode(response_eos)]
real_turns = []
predicted_turns = []
for turn in dialogue[0:max_turns]:
dial_id, turn_num, user, response, bspan, u_len, m_len, degree = turn.values()
response, bspan = [cfg.vocab_size] + response, [cfg.vocab_size] + bspan
predicted_response = self.evaluate(previous_bspan, previous_response, user, degree)
predicted_decoded = self.reader.vocab.sentence_decode(predicted_response.numpy())
real_decoded = self.reader.vocab.sentence_decode(response)
real_turns.append(real_decoded)
predicted_turns.append(predicted_decoded)
if verbose:
print("Predicted:", predicted_decoded)
print("Real:", real_decoded)
if log:
neptune.log_text('predicted', self.reader.vocab.sentence_decode(predicted_response.numpy()))
neptune.log_text('real', self.reader.vocab.sentence_decode(response))
predictions.append(predicted_turns)
targets.append(real_turns)
if use_metric:
# BLEU
scorer = metric.BLEUScorer()
bleu = scorer.score(zip(predictions, targets))
# Sucess F1
f1 = metric.success_f1_metric(targets, predictions)
self.f1s.append(f1)
if verbose:
print("Bleu: {:.4f}%".format(bleu*100))
print("F1: {:.4f}%".format(f1*100))
if log:
neptune.log_metric('bleu', epoch, bleu)
neptune.log_metric('f1', epoch, f1)
neptune.log_metric('f1_max', max(self.f1s))
if mode=='test':
neptune.log_metric('f1_test', epoch, f1)
neptune.log_metric('bleu_test', epoch, bleu)
if __name__ == "__main__":
ds = "tsdf-camrest"
cfg.init_handler(ds)
cfg.dataset = ds.split('-')[-1]
reader = CamRest676Reader()
model = SeqModel(d_model=50, vocab_size=cfg.vocab_size, copynet=True, reader=reader)
model.train_model(epochs=1, log=False)
| pixelneo/dialogue-transformer-e2e | implementation/tf/transformer.py | transformer.py | py | 29,573 | python | en | code | 5 | github-code | 13 |
10273618549 | from protocolo import *
MAX_SEQ = 7 # Define una constante para el número máximo de secuencia.
# Función que determina si un número se encuentra en un rango circular.
def between(a, b, c):
# Los números son tratados como si estuvieran en un círculo, y esta función determina si b está entre a y c en ese círculo.
return ((a <= b) < c) or ((c < a) <= b) or ((b < c) < a)
# Función que envía datos.
def send_data(frame_nr, frame_expected, buffer):
s = Frame() # Crea un nuevo marco.
s.info = buffer[frame_nr] # Carga el marco con datos del buffer.
s.seq = frame_nr # Establece el número de secuencia del marco.
s.ack = (frame_expected + MAX_SEQ) % (MAX_SEQ + 1) # Establece el número de reconocimiento (ack) del marco.
to_physical_layer(s) # Envía el marco a la capa física.
start_timer(frame_nr) # Inicia un temporizador para este marco.
# Función principal del protocolo 5.
def protocol5():
next_frame_to_send = 0 # Establece el número del próximo marco a enviar.
ack_expected = 0 # Establece el número del próximo ack esperado.
frame_expected = 0 # Establece el número del próximo marco esperado.
buffer = [None for _ in range(MAX_SEQ + 1)] # Crea un buffer para almacenar marcos.
nbuffered = 0 # Inicializa el número de marcos en el buffer a 0.
enable_network_layer() # Habilita la capa de red para enviar paquetes.
# Bucle principal del protocolo.
while True:
event = wait_for_event(50,"go_back_n") # Espera un evento.
# Si la capa de red está lista para enviar un paquete.
if event == EventType.NETWORK_LAYER_READY:
buffer[next_frame_to_send] = from_network_layer() # Recupera un paquete de la capa de red y lo almacena en el buffer.
nbuffered += 1 # Incrementa el número de paquetes en el buffer.
send_data(next_frame_to_send, frame_expected, buffer) # Envía el paquete.
next_frame_to_send = (next_frame_to_send + 1) % (MAX_SEQ + 1) # Incrementa el número del próximo marco a enviar.
# Si se ha recibido un marco.
elif event == EventType.FRAME_ARRIVAL:
r = from_physical_layer() # Recupera el marco de la capa física.
# Si el número de secuencia del marco coincide con el esperado.
if r.seq == frame_expected:
to_network_layer(r.info) # Envia la información del marco a la capa de red.
frame_expected = (frame_expected + 1) % (MAX_SEQ + 1) # Incrementa el número del próximo marco esperado.
# Mientras el ack esperado esté entre el ack del marco recibido y el próximo marco a enviar.
while between(ack_expected, r.ack, next_frame_to_send):
nbuffered -= 1 # Decrementa el número de paquetes en el buffer.
stop_timer(ack_expected) # Detiene el temporizador para este ack.
ack_expected = (ack_expected + 1) % (MAX_SEQ + 1) # Incrementa el número del próximo ack esperado.
# Si el marco tiene un error de suma de verificación, simplemente lo ignora.
elif event == EventType.CKSUM_ERR:
pass
# Si se ha producido un tiempo de espera.
elif event == EventType.TIMEOUT:
next_frame_to_send = ack_expected # Restablece el número del próximo marco a enviar al ack esperado.
for i in range(1, nbuffered + 1): # Para cada paquete en el buffer.
send_data(next_frame_to_send, frame_expected, buffer) # Reenvía el paquete.
next_frame_to_send = (next_frame_to_send + 1) % (MAX_SEQ + 1) # Incrementa el número del próximo marco a enviar.
# Si el número de paquetes en el buffer es menor que MAX_SEQ, habilita la capa de red.
if nbuffered < MAX_SEQ:
enable_network_layer()
else: # De lo contrario, la deshabilita.
disable_network_layer()
| johanec/Proyecto1-Redes | backend/go_back_n.py | go_back_n.py | py | 3,999 | python | es | code | 0 | github-code | 13 |
16276353444 | import json
import numpy as np
from utils import *
def prepare_data():
vec = DictVectorizer()
data = pd.read_csv('蘑菇分类数据集.csv')
data_array = np.hstack([data['class'].values.reshape(-1, 1),
vec.fit_transform(data.drop(['class', 'odor', 'stalk-color-below-ring'], axis=1).to_dict(
'records')).toarray()]
)
# json.dump(list(vec.get_feature_names_out()), open('1.json', 'w', encoding='utf8'))
np.random.shuffle(data_array)
np.save('data.npy', data_array)
exit(0)
def load_data():
data = np.load('data.npy', allow_pickle=True)
return data[:500, 1:], data[:500, 0], data[-3000:, 1:], data[-3000:, 0]
def get_classes_name():
return ['e', 'p']
def get_feature_name():
return [
"cap-color=b",
"cap-color=c",
"cap-color=e",
"cap-color=g",
"cap-color=n",
"cap-color=p",
"cap-color=r",
"cap-color=u",
"cap-color=w",
"cap-color=y",
"cap-shape=b",
"cap-shape=c",
"cap-shape=f",
"cap-shape=k",
"cap-shape=s",
"cap-shape=x",
"cap-surface=f",
"cap-surface=g",
"cap-surface=s",
"cap-surface=y",
"gill-color=b",
"gill-color=e",
"gill-color=g",
"gill-color=h",
"gill-color=k",
"gill-color=n",
"gill-color=o",
"gill-color=p",
"gill-color=r",
"gill-color=u",
"gill-color=w",
"gill-color=y",
"habitat=d",
"habitat=g",
"habitat=l",
"habitat=m",
"habitat=p",
"habitat=u",
"habitat=w",
# "odor=a",
# "odor=c",
# "odor=f",
# "odor=l",
# "odor=m",
# "odor=n",
# "odor=p",
# "odor=s",
# "odor=y",
"population=a",
"population=c",
"population=n",
"population=s",
"population=v",
"population=y",
"ring-number=n",
"ring-number=o",
"ring-number=t",
"ring-type=e",
"ring-type=f",
"ring-type=l",
"ring-type=n",
"ring-type=p",
"spore-print-color=b",
"spore-print-color=h",
"spore-print-color=k",
"spore-print-color=n",
"spore-print-color=o",
"spore-print-color=r",
"spore-print-color=u",
"spore-print-color=w",
"spore-print-color=y",
"stalk-color-above-ring=b",
"stalk-color-above-ring=c",
"stalk-color-above-ring=e",
"stalk-color-above-ring=g",
"stalk-color-above-ring=n",
"stalk-color-above-ring=o",
"stalk-color-above-ring=p",
"stalk-color-above-ring=w",
"stalk-color-above-ring=y",
# "stalk-color-below-ring=b",
# "stalk-color-below-ring=c",
# "stalk-color-below-ring=e",
# "stalk-color-below-ring=g",
# "stalk-color-below-ring=n",
# "stalk-color-below-ring=o",
# "stalk-color-below-ring=p",
# "stalk-color-below-ring=w",
# "stalk-color-below-ring=y",
"stalk-root=?",
"stalk-root=b",
"stalk-root=c",
"stalk-root=e",
"stalk-root=r",
"stalk-surface-above-ring=f",
"stalk-surface-above-ring=k",
"stalk-surface-above-ring=s",
"stalk-surface-above-ring=y",
"stalk-surface-below-ring=f",
"stalk-surface-below-ring=k",
"stalk-surface-below-ring=s",
"stalk-surface-below-ring=y",
"veil-color=n",
"veil-color=o",
"veil-color=w",
"veil-color=y"
]
if __name__ == '__main__':
# pass
prepare_data()
data = np.load('data.npy', allow_pickle=True)
print(data.shape)
# print(data)
# p = DecisionTreeClassifier(max_depth=1, random_state=42)
# p = RandomForestClassifier(random_state=42)
p = AdaBoostClassifier(estimator=DecisionTreeClassifier(), random_state=42)
p.fit(data[:500, 1:], data[:500, 0])
print(p.score(data[-1000:, 1:], data[-1000:, 0]))
print(np.mean(p.predict(data[-1000:, 1:]) == data[-1000:, 0]))
| MosRat/BnuMcLab | MCExp5/dataset.py | dataset.py | py | 4,215 | python | en | code | 1 | github-code | 13 |
8208917014 | """CP1404 Practical 2 - Files"""
# 1. Write code that asks the user for their name, then opens a file called "name.txt" and writes that name to it.
name = input("What is your name: ")
out_file = open('name.txt', 'w')
print(name, file=out_file)
out_file.close()
# 2. Write code that opens "name.txt" and reads the name (as above) then prints,
# "Your name is Bob" (or whatever the name is in the file).
in_file = open('name.txt', 'r')
name = in_file.read().strip()
in_file.close()
print(f"Your name is {name}")
# 3. Write code that opens "numbers.txt", reads only the first two numbers and
# adds them together then prints the result, which should be... 59.
in_file = open('numbers.txt', 'r')
first_number = int(in_file.readline())
second_number = int(in_file.readline())
in_file.close()
print(first_number + second_number)
# Now write a fourth block of code that prints the total for all lines in
# numbers.txt or a file with any number of numbers.
in_file = open('numbers.txt', 'r')
total = 0
for line in in_file:
number = int(line)
total += number
in_file.close()
print(f"The total value for the numbers in {in_file.name} is {total}.")
| McTenshi/cp1404practicals | prac_02/files.py | files.py | py | 1,153 | python | en | code | 0 | github-code | 13 |
30583928238 | # -*- coding: utf-8 -*-
from ge.bpmc import MAX_BUSINESS_TASK_RETRIES, business
from ge.bpmc.app.injection import Contexts, Core, Factories, Services
from ge.bpmc.utilities.sqlalchemy import transaction
app = Factories.celery_factory()
@transaction(Core.logger, Contexts.em)
def wrapped_match_procedure_images(procedure_uid):
wf = Services.workflow()
wf.match_procedure(procedure_uid)
@app.task(bind=True, max_retries=MAX_BUSINESS_TASK_RETRIES)
def match_procedure_images(self, procedure_uid):
"""
Matches up to two sets of two images for a procedure.
Keyword arguments:
procedure_uid -- Int, Procedure UID
images_metadata -- List of tuple which contains for each image uid the
image_laterality, view_position, acquisition_time and overlay_data
"""
logger = Core.logger()
logger.info('Running matching for procedure %(uid)s' % (
{'uid': procedure_uid}))
try:
wrapped_match_procedure_images(procedure_uid)
logger.info('Procedure %(uid)s has been matched' % (
{'uid': procedure_uid}))
except Exception as e:
logger.exception(e)
self.retry(exc=e)
| dbenlopers/SANDBOX | misc/bpm_cloud/ge.bpmc/ge/bpmc/tasks/matching.py | matching.py | py | 1,156 | python | en | code | 0 | github-code | 13 |
42747451874 | #%%
#
# Project 1, starter code part b
#
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def ffn(x, feature_size, neuron_size, weight_decay_beta, layers=3, dropout=False):
"""Feedforward net with hidden layers
"""
sum_regularization = 0
with tf.name_scope('hidden'):
weights = tf.Variable(tf.truncated_normal([feature_size, neuron_size], stddev=1.0 / np.sqrt(feature_size), dtype=tf.float32), name='weights')
biases = tf.Variable(tf.zeros([neuron_size]), dtype=tf.float32, name='biases')
h = tf.nn.relu(tf.matmul(x, weights) + biases)
if dropout:
h = tf.nn.dropout(h, 0.8)
sum_regularization += weight_decay_beta * tf.nn.l2_loss(weights)
if layers > 3:
for i in range(layers-3):
with tf.name_scope('hidden{}'.format(i)):
weights = tf.Variable(tf.truncated_normal([neuron_size, neuron_size], stddev=1.0 / np.sqrt(neuron_size), dtype=tf.float32), name='weights')
biases = tf.Variable(tf.zeros([neuron_size]), dtype=tf.float32, name='biases')
h = tf.nn.relu(tf.matmul(h, weights) + biases)
if dropout:
h = tf.nn.dropout(h, 0.8)
sum_regularization += weight_decay_beta * tf.nn.l2_loss(weights)
with tf.name_scope('linear'):
weights = tf.Variable(tf.truncated_normal([neuron_size, 1], stddev=1.0 / np.sqrt(neuron_size), dtype=tf.float32), name='weights')
biases = tf.Variable(tf.zeros([1]), dtype=tf.float32, name='biases')
u = tf.matmul(h, weights) + biases
sum_regularization += weight_decay_beta * tf.nn.l2_loss(weights)
return u, sum_regularization
def create_model(feature_size, neuron_size, weight_decay_beta, learning_rate, layers=3, dropout=False):
# Create the model
x = tf.placeholder(tf.float32, [None, feature_size])
y_ = tf.placeholder(tf.float32, [None, 1])
y, regularizer = ffn(x, feature_size, neuron_size, weight_decay_beta, layers=layers, dropout=dropout)
#Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
cost = tf.square(y_ - y)
loss = tf.reduce_mean(cost + regularizer)
train_op = optimizer.minimize(loss)
return y, train_op, y_, x, loss
def train_model(train_op, train_x, train_y, test_x, test_y, y, y_, x, loss ,sample_X=[]):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_err = []
test_err = []
prediction = []
for i in range(epochs):
# Handle in batches
for start, end in zip(range(0, len(train_x), batch_size), range(batch_size, len(train_x), batch_size)):
train_op.run(feed_dict={x: train_x[start:end], y_: train_y[start:end]})
err = loss.eval(feed_dict={x: train_x, y_: train_y})
test_err_ = loss.eval(feed_dict={x: test_x, y_: test_y})
train_err.append(err)
test_err.append(test_err_)
if i % 100 == 0:
print('iter %d: train error %g'%(i, train_err[i]))
if sample_X != []:
prediction = sess.run(y, feed_dict={x: sample_X})
return test_err, train_err, prediction
def plot_rfe_loss(filename, x_headers, epochs, plot_list):
fig, ax = plt.subplots(figsize=[12.8,9.6])
# plt.figure(1)
for i, plot in enumerate(plot_list):
plt.plot(range(epochs), plot[-epochs:], label=f'Without {x_headers[i]}')
plt.xlabel(str(epochs) + ' epochs')
plt.ylabel('Mean Square Error')
ax.legend(loc='best')
plt.savefig(filename)
plt.show()
def plot_test_err_comparison(filename, epochs, error_list):
fig, ax = plt.subplots(figsize=[12.8,9.6])
for i,errors in enumerate(error_list):
plt.plot(range(epochs), errors[-epochs:], label=f'Test Error with {i} RFE')
plt.xlabel(str(epochs) + ' epochs')
plt.ylabel('Mean Square Error')
ax.legend(loc='best')
plt.savefig(filename)
plt.show()
def plot_acc_vs_pred(filename, prediction, Y_data):
fig, ax = plt.subplots(figsize=[12.8,9.6])
plt.plot(range(50), prediction, label=f'Prediction')
plt.plot(range(50), Y_data[-50:], label=f'Actual')
plt.xlabel(str(50) + ' epochs')
plt.ylabel('Prediction')
ax.legend(loc='best')
plt.savefig(filename)
plt.show()
def plot_train_test_err(filename, epochs, train_err, test_err):
fig, ax = plt.subplots(figsize=[12.8,9.6])
plt.plot(range(epochs), train_err, label=f'Train Error', color='green')
plt.plot(range(epochs), test_err, label=f'Test Error', color='red')
plt.xlabel(str(epochs) + ' epochs')
plt.ylabel('Mean Square Error')
ax.legend(loc='best')
plt.savefig(filename)
plt.show()
def plot_layer_comp(filename, epochs, err_list, train_or_test):
fig, ax = plt.subplots(figsize=[12.8,9.6])
for i in range(3):
plt.plot(range(epochs), err_list[2*i], label=f'{train_or_test} {i+3}-layer net without dropout')
plt.plot(range(epochs), err_list[2*i+1], label=f'{train_or_test} {i+3}-layer net with dropout')
plt.xlabel(str(epochs) + ' epochs')
plt.ylabel('Mean Square Error')
ax.legend(loc='best')
plt.savefig(filename)
plt.show()
# Initial base parameters
NUM_FEATURES = 7
learning_rate = 0.01
epochs = 1000
batch_size = 8
neuron_size = 30
weight_decay_beta = float('10e-3')
seed = 10
test_split = 0.3
np.random.seed(seed)
#read and divide data into test and train sets
admit_data = np.genfromtxt('admission_predict.csv', delimiter= ',')
X_data, Y_data = admit_data[1:,1:8], admit_data[1:,-1]
Y_data = Y_data.reshape(Y_data.shape[0], 1)
idx = np.arange(X_data.shape[0])
np.random.shuffle(idx)
X_data, Y_data = X_data[idx], Y_data[idx]
# experiment with small datasets
# trainX = X_data[:100]
# trainY = Y_data[:100]
trainX = X_data
trainY = Y_data
trainX = (trainX- np.mean(trainX, axis=0))/ np.std(trainX, axis=0)
test_split_num = int(len(trainX) * test_split)
train_x, test_x = trainX[test_split_num:], trainX[:test_split_num]
train_y, test_y = trainY[test_split_num:], trainY[:test_split_num]
sample_X = trainX[-50:]
#%%
# Q1
y, train_op, y_, x, loss = create_model(NUM_FEATURES, neuron_size, weight_decay_beta, learning_rate)
test_err, train_err, prediction = train_model(train_op, train_x, train_y, test_x, test_y, y, y_, x, loss, sample_X)
#%%
plot_train_test_err('plots2/part2_Q1a', epochs, train_err, test_err)
plot_acc_vs_pred('plots2/part2_Q1c', prediction, Y_data)
#%%
# Q2a
df = pd.read_csv('admission_predict.csv')
df = df.iloc[:,1:]
df = df.corr()
df.to_csv('plots2/correlation_matrix.csv')
#%%
# Q3p1
y, train_op, y_, x, loss = create_model(6, neuron_size, weight_decay_beta, learning_rate)
test_err_list = []
train_err_list = []
prediction_list = []
x_headers = ['GRE Score','TOEFL Score','University Rating','SOP','LOR','CGPA','Research']
for i in range(7):
if i == 0:
train_x_ = train_x[:, i+1:]
test_x_ = test_x[:, i+1:]
print(f'With {x_headers[i+1:]}')
elif i == 6:
train_x_ = train_x[:, :i]
test_x_ = test_x[:, :i]
print(f'With {x_headers[:i]}')
else:
train_x_ = np.append(train_x[:, :i], train_x[:, i+1:], axis=1)
test_x_ = np.append(test_x[:, :i], test_x[:, i+1:], axis=1)
print(f'With {np.append(x_headers[:i], x_headers[i+1:], axis=0)}')
test_err, train_err, prediction = train_model(train_op, train_x_, train_y, test_x_, test_y, y, y_, x, loss)
test_err_list.append(test_err)
train_err_list.append(train_err)
prediction_list.append(prediction)
#%%
# Conclusion: Remove University Ranking
plot_rfe_loss('plots2/part3_1', x_headers, epochs, train_err_list)
plot_rfe_loss('plots2/part3_2', x_headers, epochs, test_err_list)
plot_rfe_loss('plots2/part3_3', x_headers, 100, test_err_list)
#%%
# Q3p2. Remove University Ranking
y, train_op, y_, x, loss = create_model(5, neuron_size, weight_decay_beta, learning_rate)
test_err_list = []
train_err_list = []
prediction_list = []
x_headers2 = ['GRE Score','TOEFL Score','SOP','LOR','CGPA','Research']
for i in range(6):
# Remove University Ranking
train_x_ = np.append(train_x[:, :2], train_x[:, 2+1:], axis=1)
test_x_ = np.append(test_x[:, :2], test_x[:, 2+1:], axis=1)
if i == 0:
train_x_2 = train_x_[:, i+1:]
test_x_2 = test_x_[:, i+1:]
print(f'With {x_headers2[i+1:]}')
elif i == 6:
train_x_2 = train_x_[:, :i]
test_x_2 = test_x_[:, :i]
print(f'With {x_headers2[:i]}')
else:
train_x_2 = np.append(train_x_[:, :i], train_x_[:, i+1:], axis=1)
test_x_2 = np.append(test_x_[:, :i], test_x_[:, i+1:], axis=1)
print(f'With {np.append(x_headers2[:i], x_headers2[i+1:], axis=0)}')
test_err, train_err, prediction = train_model(train_op, train_x_2, train_y, test_x_2, test_y, y, y_, x, loss)
test_err_list.append(test_err)
train_err_list.append(train_err)
prediction_list.append(prediction)
#%%
# Conclusion: Remove SOP
plot_rfe_loss('plots2/part3_4', x_headers2, epochs, train_err_list)
plot_rfe_loss('plots2/part3_5', x_headers2, epochs, test_err_list)
plot_rfe_loss('plots2/part3_6', x_headers2, 100, test_err_list)
#%%
# Q3 comparison between RFE
test_err_list = []
train_err_list = []
prediction_list = []
# Before any removal
y, train_op, y_, x, loss = create_model(7, neuron_size, weight_decay_beta, learning_rate)
test_err, train_err, prediction = train_model(train_op, train_x, train_y, test_x, test_y, y, y_, x, loss)
test_err_list.append(test_err)
train_err_list.append(train_err)
prediction_list.append(prediction)
# Remove University Ranking
train_x_ = np.append(train_x[:, :2], train_x[:, 2+1:], axis=1)
test_x_ = np.append(test_x[:, :2], test_x[:, 2+1:], axis=1)
y, train_op, y_, x, loss = create_model(6, neuron_size, weight_decay_beta, learning_rate)
test_err, train_err, prediction = train_model(train_op, train_x_, train_y, test_x_, test_y, y, y_, x, loss)
test_err_list.append(test_err)
train_err_list.append(train_err)
prediction_list.append(prediction)
# Remove SOP
train_x_ = np.append(train_x_[:, :2], train_x_[:, 2+1:], axis=1)
test_x_ = np.append(test_x_[:, :2], test_x_[:, 2+1:], axis=1)
y, train_op, y_, x, loss = create_model(5, neuron_size, weight_decay_beta, learning_rate)
test_err, train_err, prediction = train_model(train_op, train_x_, train_y, test_x_, test_y, y, y_, x, loss)
test_err_list.append(test_err)
train_err_list.append(train_err)
prediction_list.append(prediction)
#%%
plot_test_err_comparison('plots2/part3_7', epochs, test_err_list)
plot_test_err_comparison('plots2/part3_8', 100, test_err_list)
#%%
# Q4. Neuron size 50, 4 and 5 layer network, learning rate 10e-3,
# features = ['GRE Score','TOEFL Score','LOR','CGPA','Research']
test_err_list = []
train_err_list = []
prediction_list = []
# Remove University Ranking
train_x_ = np.append(train_x[:, :2], train_x[:, 2+1:], axis=1)
test_x_ = np.append(test_x[:, :2], test_x[:, 2+1:], axis=1)
# Remove SOP
train_x_ = np.append(train_x_[:, :2], train_x_[:, 2+1:], axis=1)
test_x_ = np.append(test_x_[:, :2], test_x_[:, 2+1:], axis=1)
for i in range(3, 6):
# No Dropouts
y, train_op, y_, x, loss = create_model(5, neuron_size, weight_decay_beta, learning_rate, layers=i)
test_err, train_err, prediction = train_model(train_op, train_x_, train_y, test_x_, test_y, y, y_, x, loss)
test_err_list.append(test_err)
train_err_list.append(train_err)
prediction_list.append(prediction)
# With Dropouts
y, train_op, y_, x, loss = create_model(5, neuron_size, weight_decay_beta, learning_rate, layers=i, dropout=True)
test_err, train_err, prediction = train_model(train_op, train_x_, train_y, test_x_, test_y, y, y_, x, loss)
test_err_list.append(test_err)
train_err_list.append(train_err)
prediction_list.append(prediction)
#%%
# 3-layer net with dropout is the best
plot_layer_comp('plots2/part4_1', epochs, test_err_list, 'Test')
plot_layer_comp('plots2/part4_2', epochs, train_err_list, 'Train')
| eddylim95/CZ4042_NeuralNet_project | Assignment_1/start_project_1b.py | start_project_1b.py | py | 12,200 | python | en | code | 1 | github-code | 13 |
26781712662 | """
:Module: shopify_crawler.py
:Author:
Peter Hyl
:Description: This module contains web crawler and other necessary function which
from input csv file load shopify urls to crawl. Collecting emails,
facebook, twitter and first N products, then save this data to
output csv file. Wrote in Python 3.6
"""
import csv
import logging
import re
import threading
from json import JSONDecodeError
from queue import Queue
from time import time
from urllib.parse import urlunparse
from Python.basic_functions import initialize_logging
# modules to install, pip3 install requests, bs4
import requests
from bs4 import BeautifulSoup
THREAD_COUNT = 40
class Crawler(threading.Thread):
"""
Crawler thread class that crawls sub-links["", "about", "about-us", "contact", "contact-us"]
searching contacts, then collecting title and image source first N products ("collections/all")
on domain urls.
"""
__slots__ = ["data", "input_queue", "_urls", "_url_collections", "sess"]
_email_regex = re.compile("([A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4})", re.IGNORECASE)
_email_black_list = (".png", ".jpg", ".jpeg", ".gif", "example.com") # ignore this suffix
_sub_pages = ["", "about", "about-us", "contact", "contact-us"]
def __init__(self, input_queue, uid):
"""
:param input_queue: queue with dictionary{"url": "domain.url"} contain domain url
:type input_queue: Queue
"""
super().__init__(name=f"Crawler_{uid}")
self.input_queue = input_queue
self.sess = requests.Session()
self.data = None
self._urls = None
self._url_collections = None
def run(self):
"""
Running until input queue contain any domain to process.
Iterate the list of sub-pages and request each page, then parse it and
collect emails, facebook and twitter pages, first N products.
Append this data to input dict data.
"""
logging.info("Thread %s running", self.name)
while not self.input_queue.empty():
self.data = self.input_queue.get()
logging.info("Start crawling on domain: %s", self.data["url"])
self.data["email"] = set()
self.data["facebook"] = set()
self.data["twitter"] = set()
# merge scheme, domain, path to sub-pages
self._urls = list(map(lambda sub: urlunparse(("http", self.data["url"], sub, None, None, None)),
self._sub_pages))
# merge scheme, domain, path to collections
self._url_collections = urlunparse(("http", self.data["url"], "collections/all", None, None, None))
for url in self._urls:
logging.debug("Crawling page: %s", url)
try:
response = self.sess.get(url)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
# ignore pages with errors
continue
if response.status_code == 404:
# ignore not found pages
continue
mail, facebook, twitter = self.get_contacts(response.text)
logging.debug("Found all contacts from sub-page: %s", url)
self.data["email"].update(mail)
self.data["facebook"].update(facebook)
self.data["twitter"].update(twitter)
self._convert_to_list()
logging.info("Collected contacts from domain: %s", self.data["url"])
self.get_first_products()
logging.info("Collected data from domain: %s", self.data["url"])
self.sess.close()
logging.info("Thread %s stopped", self.name)
def get_contacts(self, data):
"""
Return all emails, facebook and twitter pages from url
:param data: data from page
:return: emails, facebook, twitter
"""
facebook = set()
twitter = set()
logging.debug("Finding emails...")
# emails are case insensitive (item.lower)
emails = set([item.lower() for item in self._email_regex.findall(data)
if not item.endswith(self._email_black_list)])
logging.debug("Finding facebook and twitter pages...")
soup = BeautifulSoup(data, "html.parser")
for ref in soup.find_all(href=re.compile(r"facebook.com|twitter.com")):
link = ref.get("href")
facebook.add(link) if "facebook" in link else twitter.add(link)
return emails, facebook, twitter
def get_first_products(self, limit=5):
"""
Find first N(limit) products from "domain/collections/all" get title and image source
then append it to input data.
:param limit: number of first products who want return
"""
products = []
logging.info("Finding first %d products from page %s", limit, self._url_collections)
try:
response = self.sess.get(self._url_collections)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
# ignore pages with errors
self._fill_empty(limit)
return
if response.status_code == 404:
# ignore not found pages
self._fill_empty(limit)
return
soup = BeautifulSoup(response.text, "html.parser")
for ref in soup.find_all(["a", "href"], href=re.compile(r"/products/")):
link = ref.get("href")
if link.startswith("/") and not any([l for l in products if link == l]): # exact string match
products.append(link)
if len(products) >= limit:
break
logging.debug("Found first %d products from %s, collecting data...", limit, self._url_collections)
# merge scheme, domain, path to absolute link, .json
urls = list(map(lambda path: urlunparse(("http", self.data["url"], path + ".json", None, None, None)),
products))
i = 1
for url in urls:
try:
response = self.sess.get(url)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
# ignore pages with errors
continue
if response.status_code == 404:
# ignore not found pages
continue
try:
data = response.json()
self.data["title " + str(i)] = data["product"]["title"]
if data["product"]["image"]:
self.data["image " + str(i)] = data["product"]["image"]["src"]
else:
self.data["image " + str(i)] = ""
except JSONDecodeError:
self.data["title " + str(i)] = ""
self.data["image " + str(i)] = ""
i += 1
while i <= limit:
self.data["title " + str(i)] = ""
self.data["image " + str(i)] = ""
i += 1
logging.info("Collected first %s products from page %s", limit, self._url_collections)
def _fill_empty(self, count):
"""
Fill empty data.
"""
for i in range(1, count + 1):
self.data["title " + str(i)] = ""
self.data["image " + str(i)] = ""
def _convert_to_list(self):
"""
Convert set of data to list or string if contains less than two items
"""
for item in ["email", "facebook", "twitter"]:
self.data[item] = list(self.data[item])
if self.data[item]:
if len(self.data[item]) == 1:
self.data[item] = self.data[item][0]
else:
self.data[item] = ""
def load_stores_from_csv(input_file):
"""
Return dictionary, which loaded from input file.
:param input_file: input csv file
:return: dict of urls
"""
result = []
logging.info("Starting loading data from file: %s", input_file)
with open(input_file, encoding="utf-8") as file:
reader = csv.DictReader(file)
for row in reader:
result.append({"url": row["url"]})
logging.info("Loaded data")
return result
def write_to_csv(data, output_file):
"""
Write input dictionary(data) into cvs output file
:param data: data will by write
:param output_file: output file
:type data: list
:type output_file: str
"""
logging.info("Starting writing data into file: %s", output_file)
with open(output_file, "w", newline="", encoding="utf-8") as file:
writer = csv.DictWriter(file, fieldnames=data[0].keys())
writer.writeheader()
for row in data:
writer.writerow(row)
logging.info("Wrote data")
def main():
"""
Main function to load urls, crawl pages and save result in format:
[url, email, facebook, twitter, title 1, image 1, ..., title n, image n]
Domain are processing in Threads
"""
workers = []
input_queue = Queue()
start = time()
initialize_logging(log_file="./shopify_crawler.log", level="info")
logging.info("Starting...")
dict_stores = load_stores_from_csv("stores.csv")
[input_queue.put(i) for i in dict_stores] # initializing queue (thread-safe)
# init and start threads
for uid in range(THREAD_COUNT):
crawler = Crawler(input_queue, uid)
workers.append(crawler)
crawler.start()
# waiting completion of data collection
for w in workers:
w.join()
write_to_csv(dict_stores, "output.csv")
end = time()
logging.info("Elapsed time (seconds) = %s", str(round(end - start, 3)))
if __name__ == "__main__":
main()
| peterhyl/codes | Python/shopify_crawler.py | shopify_crawler.py | py | 9,845 | python | en | code | 0 | github-code | 13 |
31006637522 | import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
cap = cv2.VideoCapture(args["video"])
#while(cap.isOpened()):
# ret, frame = cap.read()
# cv2.imshow('frame',frame)
# cv2.waitKey(0)
while(cap.isOpened()):
(grabbed, frame) = cap.read()
if not grabbed:
break
fps = 15
height , width , layers = frame.shape
#print height, width
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',frame)
# cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| eric-macdonald/opencv | play_video.py | play_video.py | py | 781 | python | en | code | 2 | github-code | 13 |
71257258899 | import openpyxl
# Buka file Excel
workbook = openpyxl.load_workbook("file.xlsx")
# Dapatkan sheet pertama
sheet = workbook.worksheets[0]
# Cetak nama kolom
for column in sheet.columns:
print(column[0].value)
# Cetak data dari baris pertama
for row in sheet.rows:
for cell in row:
print(cell.value, end=" ")
print()
| ugunNet21/learn-python | advanced/readexcel.py | readexcel.py | py | 340 | python | en | code | 1 | github-code | 13 |
37952889918 | ###############################################################
#
# Job options file to read charge interpolation constants from
# text file and output a new pool file and sqlite file
#
#==============================================================
if 'WRITEDB' in dir() and WRITEDB:
dowrite=TRUE
doread=FALSE
constantssource=1
constantsfile="WrittenConstants.txt"
errorsfile="WrittenErrors.txt"
else:
dowrite=FALSE
doread=TRUE
constantssource=2
constantsfile="ReadConstants.txt"
errorsfile="ReadErrors.txt"
if not 'MYTAG' in dir() or MYTAG=='':
MYTAG="PixelOfflineReco-03"
if not 'MYDBTYPE' in dir() or MYDBTYPE!="PIXEL_OFL":
#MYDBTYPE="PIXEL_OFL"
MYDBTYPE="LOCAL"
if not 'MYRUN' in dir():
MYRUN=0
#==============================================================
from AthenaCommon.GlobalFlags import GlobalFlags
from AthenaCommon.DetFlags import DetFlags
#GlobalFlags.DetGeo.set_ctbh8()
GlobalFlags.DetGeo.set_atlas()
#GlobalFlags.DetGeo.set_commis()
GlobalFlags.DataSource.set_geant4()
#GlobalFlags.DataSource.set_data()
#include("IOVDbSvc/CondDBSetup.py")
from IOVDbSvc.CondDB import conddb
conddb.setGlobalTag('DEFAULTCOND')
if doread:
conddb.addFolder(MYDBTYPE,"/PIXEL/PixReco <tag>"+MYTAG+"</tag>")
ServiceMgr.IOVDbSvc.forceRunNumber=MYRUN
# Just the pixel and SCT
DetFlags.detdescr.pixel_setOn()
#DetFlags.detdescr.SCT_setOn()
# Select the geometry version.
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetDescrVersion='ATLAS-CSC-02-00-00'
# Initialize geometry
from AtlasGeoModel import GeoModelInit
from AtlasGeoModel import SetGeometryVersion
# This line can be excluded and it will by default connect to SQlite file mycool.db
# IOVDbSvc.dbConnection="impl=cool;techno=sqlite;schema=mycool.db;X:OFLP200"
#include ( "DetDescrCondAthenaPool/DetDescrCondAthenaPool_joboptions.py" )
from RegistrationServices.OutputConditionsAlg import OutputConditionsAlg
myOCA=OutputConditionsAlg(outputFile="dummy.root")
myOCA.ObjectList = [ "DetCondCFloat#/PIXEL/PixReco"]
myOCA.IOVTagList= [MYTAG]
# Load algorithms Any algorithm that uses the tool will do
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
from PixelConditionsTools.PixelConditionsToolsConf import PixelRecoDbTestWriteRead
topSequence += PixelRecoDbTestWriteRead()
topSequence.PixelRecoDbTestWriteRead.Read = doread
topSequence.PixelRecoDbTestWriteRead.Write = dowrite
from PixelConditionsTools.PixelConditionsToolsConf import PixelRecoDbTool
ToolSvc += PixelRecoDbTool()
ToolSvc.PixelRecoDbTool.OutputLevel = VERBOSE
ToolSvc.PixelRecoDbTool.InputSource = constantssource
ToolSvc.PixelRecoDbTool.PixelChargeInterpolationDataFile = constantsfile
ToolSvc.PixelRecoDbTool.PixelClusterOnTrackErrorDataFile = errorsfile
ToolSvc.PixelRecoDbTool.DumpConstants = 1
#--------------------------------------------------------------
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = INFO
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
# Number of events to be processed (default is 10)
theApp.EvtMax = 1
#==============================================================
#
# End of job options file
#
###############################################################
| rushioda/PIXELVALID_athena | athena/InnerDetector/InDetConditions/PixelConditionsTools/share/PixelOfflineCalibDbInteraction.py | PixelOfflineCalibDbInteraction.py | py | 3,475 | python | en | code | 1 | github-code | 13 |
10328771617 | def is_palindromic(number: int) -> bool:
number_as_list = list(str(number))
reversed_number_list = number_as_list[::-1]
reversed_number_str = ''.join(reversed_number_list)
reversed_number = int(reversed_number_str)
if reversed_number == number:
return True
return False
def solution(number_of_digits: int) -> int | None:
lower_border = 10**(number_of_digits - 1)
# since the range not inclusive the last integer, I removed "+ 1"
upper_border = 10**(number_of_digits)
products: list[int] = []
for number1 in range(lower_border, upper_border):
for number2 in range(lower_border, upper_border):
product = number1 * number2
products.append(product)
products.sort(reverse=True)
for product in products:
if is_palindromic(product):
return product
return None
| Irench1k/ProjectEuler | problems/problem4/p4.py | p4.py | py | 872 | python | en | code | 0 | github-code | 13 |
26473998288 | from optimization.src.TSPOptimizerStrategy import TSPOptimizerStrategy
class TSPOptimizerClosestCityStrategy(TSPOptimizerStrategy):
def __init__(self, origin_city, cities):
TSPOptimizerStrategy.__init__(self, origin_city, cities)
self.visited_cities = {}
for city in self.cities:
self.visited_cities[city.name] = False
def optimize(self):
"""Optimize a cities route and returns it"""
if len(self.cities) == 0:
return []
route = []
previous_city = self.origin_city
for x in range(len(self.cities)):
city = self.get_closest_city_not_visited(previous_city)
self.visited_cities[city.name] = True
route.append(city)
previous_city = city
return route
def get_closest_city_not_visited(self, city):
closest_city = None
closest_city_trip_time = 9999999
for other_city in self.cities:
if self.is_city_visited(other_city):
continue
else:
trip_time = city.get_trip_time(other_city)
if trip_time < closest_city_trip_time:
closest_city = other_city
closest_city_trip_time = trip_time
return closest_city
def is_city_visited(self, city):
return self.visited_cities[city.name]
| marianoo-andres/EasyTripServer | optimization/src/TSPOptimizerClosestCityStrategy.py | TSPOptimizerClosestCityStrategy.py | py | 1,378 | python | en | code | 0 | github-code | 13 |
22478702775 | '''
1. 从wiki_crop里面按一些条件筛选图片
2. 将人脸部分裁剪出来
3. 文件名包含性别和年龄
4. 放入images-<dataset-size>文件夹
'''
import scipy.io as sio
import cv2
import os
face_cascade = cv2.CascadeClassifier('H:/venvs/pytorch-cpu/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')
root = "./wiki_crop/"
path = "wiki.mat"
data = sio.loadmat(root+path)["wiki"][0][0]
# birth_times = data[0][0]
shot_times = data[1][0]
file_paths = data[2][0]
gender_flags = data[3][0] # 0-female, 1-male, NaN-unknown
# person_names = data[4][0]
face_locations = data[5][0]
face_scores = data[6][0] # Inf means no face in the image, and returns the entire image
second_face_scores = data[7][0] # NaN means no second face in the image
total = len(shot_times)
cnt = 0 # 有效样例数目
infact = 0 # 实际遍历数目
male = 0
female = 0
dataset_size = 1100
if not os.path.exists("./images-{}/".format(dataset_size)):
os.mkdir("./images-{}/".format(dataset_size))
for i in range(total):
# 显示进度
cnt += 1
infact += 1
if cnt > dataset_size:
break
print("{}/{}/{}".format(cnt, infact, total), end="\r")
# 获得图片对应的特征信息
shot_time = int(shot_times[i])
file_path = file_paths[i][0]
gender_flag = gender_flags[i]
face_score = face_scores[i]
second_face_score = second_face_scores[i]
birth_time = int(file_path.split("_")[-2].split("-")[0]) # 从文件名获取出生年份
age = shot_time - birth_time # 按周岁计
# 只使用face数目为1,性别为0或1
if face_score < 0 or second_face_score < 10 or str(gender_flag) == "nan" \
or int(gender_flag) not in (0,1):
cnt -= 1
continue
# 控制男女数目各一半
gender_flag = int(gender_flag)
if gender_flag == 0 and female >= dataset_size//2:
cnt -= 1
continue
elif gender_flag == 1 and male >= dataset_size//2:
cnt -= 1
continue
# 识别脸部位置
try:
img = cv2.imread(root+file_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
except:
cnt -= 1
continue
if len(faces) == 0:
cnt -= 1
continue
# 累加男女人数
gender_flag = int(gender_flag)
if gender_flag == 0:
female += 1
elif gender_flag == 1:
male += 1
# 只保留脸部,保留RGB图
x, y, w, h = faces[0]
img = img[x:x+w, y:y+h]
cv2.imwrite("images-{}/{}-{}-{}.jpg".format(dataset_size, cnt, age, gender_flag), img)
| NICE-FUTURE/predict-gender-and-age-from-camera | data/process_wiki_data.py | process_wiki_data.py | py | 2,650 | python | en | code | 33 | github-code | 13 |
2867260268 | import os
import numpy as np
import torch
import pdb
import cleaner
import load_args
import utils
# Parse the commandline arguments
args = load_args.load_args()
# Create the config dictionary
cfg = utils.load_config(args)
# Get the class IDs for the novel set of classes
novel_class_ids = utils.get_novel_class_ids(cfg)
# Load the training and test set features
train_features, train_labels = utils.load_features(cfg, 'train')
val_features, val_labels = utils.load_features(cfg, 'val')
# Load the noisy set features, extracted from images retrieved from YFCC100M
noisy_features, noisy_labels = utils.load_noisy_features(cfg)
# Start running the few-shot experiments
all_acc = np.zeros((cfg['num_episodes'],))
for episode_id in range(cfg['num_episodes']):
# Get the indices of clean images for this episode
ep_indices = utils.get_splits(cfg, novel_class_ids, train_labels, episode_id)
# Only select the features corresponding to the clean images
ep_clean_feats = train_features[ep_indices,:]
ep_clean_labels = train_labels[ep_indices]
# Run the cleaner to assign relevance weights
rel_weights = cleaner.run_cleaner(cfg, ep_clean_feats, ep_clean_labels, noisy_features, noisy_labels, faiss_gpu_id = args.faiss_gpu_id)
# Create the prototypical classifier
classifier, label_set = utils.get_prototypical_classifier(ep_clean_feats, ep_clean_labels, noisy_features, noisy_labels, rel_weights)
# Classify the test images
accuracy = utils.run_eval(classifier, label_set, novel_class_ids, val_features, val_labels)
all_acc[episode_id] = accuracy
print('{}, {}-shot, Episode/split:{:d}, Accuracy: {:.2f}'.format(cfg['args'].dataset, cfg['args'].kshot, episode_id, accuracy))
std_results = all_acc.std(axis=0)
ci95_results = 1.96*std_results/np.sqrt(cfg['num_episodes'])
print('Completed {:d} episodes (splits). {}, {}-shot, Average Accuracy: {:.2f} +- {:.2f}'.format(cfg['num_episodes'], cfg['args'].dataset, cfg['args'].kshot, all_acc.mean(), ci95_results))
| google-research/noisy-fewshot-learning | run.py | run.py | py | 1,981 | python | en | code | 23 | github-code | 13 |
29604103889 | import socket
from time import sleep, time
HOST = '127.0.0.1'
PORT = 50007
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print('working')
def listen(): # produces encoder values for the wheels. returns right wheel value, left wheel value.
buffer = ""
while not buffer:
data = conn.recv(1)
if data == b'*':
data = conn.recv(1)
buffer += data.decode(encoding='utf-8')
while data != b'*':
data = conn.recv(1)
buffer += data.decode(encoding='utf-8')
x = buffer.split('*')
if len(x) > 1:
x = x[-2]
buffer = ""
raw_vals = [float(i) for i in x.split(',')]
RWV = int(raw_vals[0]*153)
LWV = int(raw_vals[1]*153)
return RWV, LWV # returned as a tuple!
def strait(mag,direction): #direction +1 or -1
current = listen()
target = current[0]+mag*direction
while True:
current = listen()
print(current,target)
if current[0]<= target and direction == +1 or current[0]>= target and direction == '-1':
#print(f'{direction*-10},{direction*-10}')
conn.send(bytes(f'{direction*-10},{direction*-10}','utf-8'))
else:
conn.send(bytes('0,0','utf-8'))
return
while True:
strait(int(input('Distance to travel: ')),+1)#int(input('enter direction: '))
| shimonfiddler/1420 | encodedbotmover.py | encodedbotmover.py | py | 1,430 | python | en | code | 0 | github-code | 13 |
31075988506 | import numpy as np
from utils import wrapToPi
def ctrl_pose(x,y,th,x_g,y_g,th_g):
# (x,y,th): current state
# (x_g,y_g,th_g): desired final state
# Code pose controller
k = np.array([0.5, 0.5, 1.2]) # (k1,k2,k3) > 0
# Incremental change in state
dx = x_g - x
dy = y_g - y
# Convert to polar coordinates
rho = np.sqrt(dx**2 + dy**2)
alpha = wrapToPi(np.arctan2(dy,dx) - th)
delta = wrapToPi(alpha + th - th_g)
# Closed-loop control law
V = k[0]*rho*np.cos(alpha)
om = k[1]*alpha + k[0]*np.sinc(alpha/np.pi)*np.cos(alpha)*(alpha + k[2]*delta)
# Apply saturation limits
V = np.sign(V)*min(0.5, np.abs(V))
om = np.sign(om)*min(1, np.abs(om))
return np.array([V, om])
| anqif/AA274_HW1 | P3_pose_stabilization.py | P3_pose_stabilization.py | py | 742 | python | en | code | 1 | github-code | 13 |
24384537992 | import matplotlib.pyplot as plt
import sys
# import numpy as np
# plt.rcParams['font.sas-serig']=['SimHei'] #用来正常显示中文标签
# plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
# data=np.loadtxt('loss.txt',delimiter='\t')
# print(data)
# x = [row[0] for row in data]
# y = [row[3] for row in data]
# z = [row[4] for row in data]
# s = [row[1] for row in data]
x = []
y = []
z = []
s = []
f = open(sys.argv[1], 'r')
lines = f.readlines()
f.close()
avg = -1.0
for line in lines :
l = line.strip().split('\t')
x.append(float(l[0]))
loss = float(l[3])
if(avg < 0.0) :
avg = loss
else :
avg = 0.9 * avg + 0.1 * loss
y.append( loss )
z.append( avg )
plt.title('Loss Tendency of Palms Recognition Training')
# plt.axis([0,6,0,6])
plt.plot(x, y, color='green', label='Loss')
plt.plot(x, z, color='blue', label='Avg Loss')
plt.xlabel('Iteration times')
plt.ylabel('Loss')
plt.grid()
plt.legend()
plt.show()
# plt.savefig('scatter.png') | Linzmin1927/darknet_chs | python/loss_display.py | loss_display.py | py | 983 | python | en | code | 2 | github-code | 13 |
32456364693 | """Utils for tracking graph homophily and heterophily"""
# pylint: disable=W0611
from . import function as fn, to_bidirected
try:
import torch
except ImportError:
HAS_TORCH = False
else:
HAS_TORCH = True
__all__ = [
"node_homophily",
"edge_homophily",
"linkx_homophily",
"adjusted_homophily",
]
def check_pytorch():
"""Check if PyTorch is the backend."""
if HAS_TORCH is False:
raise ModuleNotFoundError(
"This function requires PyTorch to be the backend."
)
def get_long_edges(graph):
"""Internal function for getting the edges of a graph as long tensors."""
src, dst = graph.edges()
return src.long(), dst.long()
def node_homophily(graph, y):
r"""Homophily measure from `Geom-GCN: Geometric Graph Convolutional
Networks <https://arxiv.org/abs/2002.05287>`__
We follow the practice of a later paper `Large Scale Learning on
Non-Homophilous Graphs: New Benchmarks and Strong Simple Methods
<https://arxiv.org/abs/2110.14446>`__ to call it node homophily.
Mathematically it is defined as follows:
.. math::
\frac{1}{|\mathcal{V}|} \sum_{v \in \mathcal{V}} \frac{ | \{u
\in \mathcal{N}(v): y_v = y_u \} | } { |\mathcal{N}(v)| },
where :math:`\mathcal{V}` is the set of nodes, :math:`\mathcal{N}(v)` is
the predecessors of node :math:`v`, and :math:`y_v` is the class of node
:math:`v`.
Parameters
----------
graph : DGLGraph
The graph.
y : torch.Tensor
The node labels, which is a tensor of shape (|V|).
Returns
-------
float
The node homophily value.
Examples
--------
>>> import dgl
>>> import torch
>>> graph = dgl.graph(([1, 2, 0, 4], [0, 1, 2, 3]))
>>> y = torch.tensor([0, 0, 0, 0, 1])
>>> dgl.node_homophily(graph, y)
0.6000000238418579
"""
check_pytorch()
with graph.local_scope():
# Handle the case where graph is of dtype int32.
src, dst = get_long_edges(graph)
# Compute y_v = y_u for all edges.
graph.edata["same_class"] = (y[src] == y[dst]).float()
graph.update_all(
fn.copy_e("same_class", "m"), fn.mean("m", "same_class_deg")
)
return graph.ndata["same_class_deg"].mean(dim=0).item()
def edge_homophily(graph, y):
r"""Homophily measure from `Beyond Homophily in Graph Neural Networks:
Current Limitations and Effective Designs
<https://arxiv.org/abs/2006.11468>`__
Mathematically it is defined as follows:
.. math::
\frac{| \{ (u,v) : (u,v) \in \mathcal{E} \wedge y_u = y_v \} | }
{|\mathcal{E}|},
where :math:`\mathcal{E}` is the set of edges, and :math:`y_u` is the class
of node :math:`u`.
Parameters
----------
graph : DGLGraph
The graph.
y : torch.Tensor
The node labels, which is a tensor of shape (|V|).
Returns
-------
float
The edge homophily ratio value.
Examples
--------
>>> import dgl
>>> import torch
>>> graph = dgl.graph(([1, 2, 0, 4], [0, 1, 2, 3]))
>>> y = torch.tensor([0, 0, 0, 0, 1])
>>> dgl.edge_homophily(graph, y)
0.75
"""
check_pytorch()
with graph.local_scope():
# Handle the case where graph is of dtype int32.
src, dst = get_long_edges(graph)
# Compute y_v = y_u for all edges.
edge_indicator = (y[src] == y[dst]).float()
return edge_indicator.mean(dim=0).item()
def linkx_homophily(graph, y):
r"""Homophily measure from `Large Scale Learning on Non-Homophilous Graphs:
New Benchmarks and Strong Simple Methods
<https://arxiv.org/abs/2110.14446>`__
Mathematically it is defined as follows:
.. math::
\frac{1}{C-1} \sum_{k=1}^{C} \max \left(0, \frac{\sum_{v\in C_k}|\{u\in
\mathcal{N}(v): y_v = y_u \}|}{\sum_{v\in C_k}|\mathcal{N}(v)|} -
\frac{|\mathcal{C}_k|}{|\mathcal{V}|} \right),
where :math:`C` is the number of node classes, :math:`C_k` is the set of
nodes that belong to class k, :math:`\mathcal{N}(v)` are the predecessors
of node :math:`v`, :math:`y_v` is the class of node :math:`v`, and
:math:`\mathcal{V}` is the set of nodes.
Parameters
----------
graph : DGLGraph
The graph.
y : torch.Tensor
The node labels, which is a tensor of shape (|V|).
Returns
-------
float
The homophily value.
Examples
--------
>>> import dgl
>>> import torch
>>> graph = dgl.graph(([0, 1, 2, 3], [1, 2, 0, 4]))
>>> y = torch.tensor([0, 0, 0, 0, 1])
>>> dgl.linkx_homophily(graph, y)
0.19999998807907104
"""
check_pytorch()
with graph.local_scope():
# Compute |{u\in N(v): y_v = y_u}| for each node v.
# Handle the case where graph is of dtype int32.
src, dst = get_long_edges(graph)
# Compute y_v = y_u for all edges.
graph.edata["same_class"] = (y[src] == y[dst]).float()
graph.update_all(
fn.copy_e("same_class", "m"), fn.sum("m", "same_class_deg")
)
deg = graph.in_degrees().float()
num_nodes = graph.num_nodes()
num_classes = y.max(dim=0).values.item() + 1
value = torch.tensor(0.0).to(graph.device)
for k in range(num_classes):
# Get the nodes that belong to class k.
class_mask = y == k
same_class_deg_k = graph.ndata["same_class_deg"][class_mask].sum()
deg_k = deg[class_mask].sum()
num_nodes_k = class_mask.sum()
value += max(0, same_class_deg_k / deg_k - num_nodes_k / num_nodes)
return value.item() / (num_classes - 1)
def adjusted_homophily(graph, y):
r"""Homophily measure recommended in `Characterizing Graph Datasets for
Node Classification: Homophily-Heterophily Dichotomy and Beyond
<https://arxiv.org/abs/2209.06177>`__
Adjusted homophily is edge homophily adjusted for the expected number of
edges connecting nodes with the same class label (taking into account the
number of classes, their sizes, and the distribution of node degrees among
them).
Mathematically it is defined as follows:
.. math::
\frac{h_{edge} - \sum_{k=1}^C \bar{p}(k)^2}
{1 - \sum_{k=1}^C \bar{p}(k)^2},
where :math:`h_{edge}` denotes edge homophily, :math:`C` denotes the
number of classes, and :math:`\bar{p}(\cdot)` is the empirical
degree-weighted distribution of classes:
:math:`\bar{p}(k) = \frac{\sum_{v\,:\,y_v = k} d(v)}{2|E|}`,
where :math:`d(v)` is the degree of node :math:`v`.
It has been shown that adjusted homophily satisifes more desirable
properties than other homophily measures, which makes it appropriate for
comparing the levels of homophily across datasets with different number
of classes, different class sizes, andd different degree distributions
among classes.
Adjusted homophily can be negative. If adjusted homophily is zero, then
the edge pattern in the graph is independent of node class labels. If it
is positive, then the nodes in the graph tend to connect to nodes of the
same class more often, and if it is negative, than the nodes in the graph
tend to connect to nodes of different classes more often (compared to the
null model where edges are independent of node class labels).
Parameters
----------
graph : DGLGraph
The graph.
y : torch.Tensor
The node labels, which is a tensor of shape (|V|).
Returns
-------
float
The adjusted homophily value.
Examples
--------
>>> import dgl
>>> import torch
>>> graph = dgl.graph(([1, 2, 0, 4], [0, 1, 2, 3]))
>>> y = torch.tensor([0, 0, 0, 0, 1])
>>> dgl.adjusted_homophily(graph, y)
-0.1428571492433548
"""
check_pytorch()
graph = to_bidirected(graph.cpu()).to(y.device)
h_edge = edge_homophily(graph, y)
degrees = graph.in_degrees().float()
num_classes = y.max().item() + 1
degree_sums = torch.zeros(num_classes).to(y.device)
degree_sums.index_add_(dim=0, index=y, source=degrees)
adjust = (degree_sums**2).sum() / graph.num_edges() ** 2
h_adj = (h_edge - adjust) / (1 - adjust)
return h_adj.item()
| keli-wen/dgl | python/dgl/homophily.py | homophily.py | py | 8,309 | python | en | code | null | github-code | 13 |
34337338932 | class Universal:
data_type = 'Universal'
data_types = set()
data_keys = []
instances = {}
def __init__(self, datas=None):
self.__datas = datas
def __repr__(self):
return f'{self.__datas}'
def get_data(self, key):
return self.__datas[key]
def set_data(self, key, data):
self.__datas[key] = data
@classmethod
def new_instance(cls, datas=None):
from random import randint
while True:
address = f'{randint(0, 999999999):09}'
if address in cls.instances:
continue
break
if datas is None:
datas = {}
for key in cls.data_keys:
datas[key] = input(f'{key}:')
cls.instances[address] = cls(datas)
cls.data_types.update([cls.data_type])
return cls.instances[address]
class Manager(Universal):
@classmethod
def new_type_instance(cls):
print('data types : ', end='')
for i in Manager.data_types:
print(f'[{i}]', end=' ')
print()
type = input('type : ')
for i in Universal.__subclasses__():
if type == i.data_type:
i.new_instance()
@classmethod
def get_instance(cls, address):
return cls.instances[address]
@classmethod
def get_all_instance(cls):
return cls.instances
@classmethod
def show_instance(cls):
for i in cls.instances:
print(cls.instances[i])
@classmethod
def manager(cls):
menus = {'0': {'func_name': 'new data', 'func': cls.new_type_instance},
'1': {'func_name': 'show data', 'func': cls.show_instance},
'9': {'func_name': 'exit', 'func': lambda: True}}
for i in menus:
print(f'{i}.{menus[i]["func_name"]}', end=' ')
print()
select = input('select:')
if select in menus:
if menus[select]['func']():
return True
else:
print('wrong input')
| JJeKJJeKeee/Python_study | Universal_v1.py | Universal_v1.py | py | 2,122 | python | en | code | 0 | github-code | 13 |
72304573139 | from entry_task.models import User, EventInfo, Event, EventLike, EventParticipation, EventComment, Image
from entry_task.helpers import event_helpers
from entry_task.exceptions import InsertError,NotFoundError
def get_event(event_id):
try:
event_info = EventInfo.objects.get(event_id=event_id)
participants = EventParticipation.objects.get_list_users(event_id)
likes = EventLike.objects.get_list_users(event_id)
comments = EventComment.objects.get_list_comments(event_id)
photos = Image.objects.get_photo_srcs(event_id)
event = Event(event_info,photos,likes,comments,participants)
return event.as_json()
except EventInfo.DoesNotExist:
raise NotFoundError("Cannot find the event")
def get_list_events(page, page_size, event_type, start_date, end_date):
events = EventInfo.objects.all()
events = event_helpers.filter_list_by_type(events, event_type)
events = event_helpers.filter_list_by_date_ranges(events, start_date, end_date)
events = event_helpers.paginate_list(events, page, page_size)
data = [event.as_json() for event in events]
return data
def insert_activity(event_id, user_id, type, date, content=""):
if not EventInfo.objects.filter(event_id = event_id).exists() or not User.objects.filter(user_id = user_id).exists():
raise InsertError("Event or user does not exist")
if type == "comments":
EventComment.objects.insert_to_database(event_id, user_id, date, content)
elif type == "likes":
EventLike.objects.insert_to_database(event_id, user_id, date)
else:
EventParticipation.objects.insert_to_database(event_id, user_id, date)
| hvloc15/Entry-Task | EntryTask/entry_task/services/event_services.py | event_services.py | py | 1,687 | python | en | code | 0 | github-code | 13 |
37948249668 | from AthenaCommon import Logging
from .non_blocking_stream_reader import NonBlockingStreamReader
import subprocess
## Get handle to Athena logging
logger = Logging.logging.getLogger("PowhegControl")
class ProcessManager(object):
"""! Wrapper to handle multiple Powheg subprocesses.
@author James Robinson <james.robinson@cern.ch>
"""
def __init__(self, process_list):
"""! Constructor.
@param process_list List of processes to manage.
"""
self.__process_list = process_list
self.__n_initial = len(process_list)
def monitor(self):
"""! Monitor each of the managed processes and log when they are finished."""
for idx, process in enumerate(self.__process_list):
process.id_number = idx + 1
while len(self.__process_list) > 0:
for process in list(self.__process_list):
if not process.has_output():
_return_code = process.return_code
self.__process_list.remove(process)
if _return_code == 0:
logger.info("Finished process #{}: there are now {}/{} running".format(process.id_number, len(self.__process_list), self.__n_initial))
else:
logger.warning("Process #{} terminated unexpectedly (return code {}): there are now {}/{} running".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))
class SingleProcessThread(object):
"""! Single executable running in a subprocess (usually PowhegBox).
@author James Robinson <james.robinson@cern.ch>
"""
log_level = {"stdout": "info", "stderr": "error"}
__output_prefix = " | "
__ignore_output = []
def __init__(self, command_list, seed_index=None, stdin=None, ignore_output=None):
"""! Constructor.
Setup underlying process together with non-blocking readers for stdout and stderr.
@param command_list Command that will be run (possibly with options).
@param seed_index Which seed from pwgseeds.dat to use.
@param stdin An open file handle providing input.
@param ignore_output List of strings to filter out from messages.
"""
if not isinstance(command_list, list):
command_list = [command_list]
command_list = [str(x) for x in command_list]
# Set up messages to ignore
if ignore_output is not None:
self.__ignore_output = ignore_output
# Usual case, where no open file handle is provided
if stdin is None:
self.__process = subprocess.Popen(command_list, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
# Write seed to stdin
if seed_index is not None:
self.__output_prefix += "Process #{}: ".format(seed_index)
self.__process.stdin.write(str(seed_index))
self.__process.stdin.close()
with open("pwgseeds.dat", "rb") as seed_file:
random_seed_list = seed_file.read().splitlines()
self.log("Providing random seed: {}".format(random_seed_list[seed_index - 1]))
# Using an open file handle to provide input to stdin: remember to close this later
else:
self.__process = subprocess.Popen(command_list, stdout=subprocess.PIPE, stdin=stdin, stderr=subprocess.PIPE)
# Setup non-blocking stream readers for stdout and stderr
self.__stdout = NonBlockingStreamReader(self.__process.stdout)
self.__stderr = NonBlockingStreamReader(self.__process.stderr)
def has_output(self):
"""! Write queued output and return process status."""
status = self.is_running()
self.write_queued_output()
return status
def is_running(self):
"""! Check if the underlying process is running and finalise stream readers if not."""
if self.__process.poll() is not None: # process has ended
for nbsr in ("stdout", "stderr"):
getattr(self, nbsr).finalise()
return False
return True
def log(self, message, log_level="info"):
"""! Write to the logger with appropriate log-level.
@param message The message to pass to the logger.
@param log_level Which level to log at.
"""
for word in self.__ignore_output:
while word in message:
message = message.replace(word, "")
getattr(logger, log_level)("{}{}".format(self.__output_prefix, message.strip()))
def write_queued_output(self):
"""! Pass queued output to the logger."""
for stream in ["stdout", "stderr"]:
while True:
output, queue_size = getattr(self, stream).readline(timeout=0.1)
if not (output is None or len(output) == 0):
self.log(output, self.log_level[stream])
if queue_size == 0:
break
@property
def return_code(self):
"""! Return code of underlying process."""
return self.__process.returncode
@property
def stdout(self):
"""! stdout stream from underlying process."""
return self.__stdout
@property
def stderr(self):
"""! stderr stream from underlying process."""
return self.__stderr
| rushioda/PIXELVALID_athena | athena/Generators/PowhegControl/python/utility/process_handling.py | process_handling.py | py | 5,409 | python | en | code | 1 | github-code | 13 |
2355049493 | '''
This module contains all of the !commands that the users
can call upon for execution.
'''
from functions import chat as _chat
from functions import queryAPI as _queryAPI
from functions import getXMLAttributes as _getXMLAttributes
from functions import isOp as _isOp
from functions import printv as _printv
from functions import getViewerList as _getViewerList
from functions import streamIsUp as _streamIsUp
import sys as _sys
import os as _os
import cfg as _cfg
import random as _R
import time as _T
import requests as _requests
from datetime import datetime as _datetime
import re as _re
from html import unescape as _uesc
import psycopg2 as _psycopg2
from psycopg2.extras import DictCursor as _dictCursor
import collections as _collections
import numpy as _np
_MAX_DICE = 10
_MAX_DSIDES = 150
_RESPONSES = {
"roll": {
"error": {
"bad_args": "I don't know what to roll! Try specifying a "
"die using something like: !roll 20 or !roll 2d6",
"too_many_dice": "Hey now, don't be rollin' more than {} of "
"those!".format(_MAX_DICE),
"too_many_dsides": "Hey now, a dice with {} sides is too round "
"to get a good answer!".format(_MAX_DSIDES),
},
"success": {
"roll": "I rolled {rolls} die with {dSides} sides "
"and got {result}.",
"rolls": "I rolled {rolls} dice with {dSides} sides "
"and got {result}. The dice add to {result_sum}.",
},
},
}
def time(args):
sock = args[0]
# TODO: Get rid of time and replace it with datetime instead
_chat(sock, "At Blaskatronic HQ, it is currently " + _T.strftime("%I:%M %p %Z on %A, %B, %d, %Y."))
def bb(args):
sock = args[0]
_chat(sock, "BEEP BOOP")
def wa(args):
sock = args[0]
_chat(sock, "WEIGH ANCHOR!!!")
def calc(args):
sock = args[0]
_chat(sock, "Calculated. Calculated. Calculated. Calculated. Chat disabled for 1 seconds")
def dece(args):
sock = args[0]
_chat(sock, "That was dece, lad!")
def discord(args):
sock = args[0]
_chat(sock, "Chat to us on Discord at: www.discord.me/blaskatronic")
def roll(args):
sock = args[0]
# parse args
try:
rollArg = args[2].lower()
except IndexError:
return _chat(sock, _RESPONSES["roll"]["error"]["bad_args"])
# parse rollArg to allow for a d, ie: 3d4 to roll 3 dice with 4 sides each
rollList = rollArg.split('d')
if len(rollList) == 1: # There was no d in the rollArg
rollList = [1] + rollList # Make the list in the correct format, ie: [1, 4]
if len(rollList) != 2: # Correct format, ie [3, 4]
return _chat(sock, _RESPONSES["roll"]["error"]["bad_args"])
# get the int values for the roll
try :
rolls = int(rollList[0])
except ValueError:
if rollList[0] == '': # this means we get a d3
rolls = 1
else:
return _chat(sock, _RESPONSES["roll"]["error"]["bad_args"])
# get the int values for the sides
try :
dSides = int(rollList[1])
except ValueError:
return _chat(sock, _RESPONSES["roll"]["error"]["bad_args"])
# check for negetives and zeros`
if min([rolls, dSides]) <= 0:
return _chat(sock, _RESPONSES["roll"]["error"]["bad_args"])
# check for too many sides (for to long of a response)
elif dSides > _MAX_DSIDES:
return _chat(sock, _RESPONSES["roll"]["error"]["too_many_dsides"])
# check for too many dice (for to long of a response)
elif rolls > _MAX_DICE:
return _chat(sock, _RESPONSES["roll"]["error"]["too_many_dice"])
# Use a generator to get the list of rolls
result = [_R.randint(1, dSides) for _ in range(rolls)]
result_sum = sum(result)
# format the result to be a string
result = str.join(', ', [str(_) for _ in result])
# No one wants to say 1 dice
fmt = {
'rolls': rolls,
'dSides': dSides,
'result': result,
}
if rolls == 1:
return _chat(sock, _RESPONSES["roll"]["success"]["roll"].format(**fmt))
else:
fmt['result_sum'] = result_sum
return _chat(sock, _RESPONSES["roll"]["success"]["rolls"].format(**fmt))
def buydrink(args):
sock = args[0]
userName = args[1]
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor()
cursor.execute("SELECT points FROM Viewers WHERE name='" + userName.lower() + "';")
currentPoints = int(cursor.fetchone()[0])
try:
numberOfDrinks = int(args[2])
viewersRequested = args[3:]
if numberOfDrinks <= 0:
raise IndexError
except(IndexError, ValueError) as e:
_chat(sock, "The bartender doesn't know how many drinks you want to buy, but begins pouring you a drink anyway.")
numberOfDrinks = 1
viewersRequested = args[2:]
viewerList = []
attempts = 0
while len(viewerList) == 0:
viewerJSON = _getViewerList()
viewerList = [viewerName for nameRank in [viewerJSON['chatters'][x] \
for x in viewerJSON['chatters'].keys()] for viewerName \
in nameRank]
attempts += 1
if attempts == 10:
_chat(sock, "The bartender is busy serving someone else. Try again shortly!")
return 0
if 'all' in viewersRequested:
viewersToBuyFor = viewerList
else:
if len(viewersRequested) == 0:
viewersRequested = [userName]
viewersToBuyFor = []
cannotFind = []
for viewer in viewersRequested: # Put in a .lower here?
if viewer.lower() in viewerList:
viewersToBuyFor.append(viewer.lower())
else:
cannotFind.append(viewer)
if len(cannotFind) == 1:
_chat(sock, "The bartender looks around but cannot see " +\
cannotFind[0] + "!")
elif len(cannotFind) == len(viewersToBuyFor):
_chat(sock, "The bartender looks around but cannot see " +\
"any of the people you'd like to buy drinks for!")
return 0
elif len(cannotFind) == 2:
_chat(sock, "The bartender looks around but cannot see " +\
cannotFind[0] + " or " + cannotFind[1] + "!")
elif len(cannotFind) > 2:
_chat(sock, "The bartender looks around but cannot see " +\
", ".join(cannotFind[:-1]) + ", or " + cannotFind[-1] + "!")
if len(viewersToBuyFor) == 0:
return 0
totalCost = numberOfDrinks * (len(viewersToBuyFor) * _cfg.drinksCost)
if currentPoints < totalCost:
_chat(sock, "Sorry, " + userName + ", but you do not have " + str(totalCost) + " " + _cfg.currencyName + "s to buy that many drinks!")
else:
giveMoneyString = userName + " gives " + str(totalCost) + " " +\
_cfg.currencyName + "s to the bartender"
cursor.execute("UPDATE Viewers SET points=points - " + str(totalCost) + " WHERE name='" + userName.lower() + "';")
if viewersToBuyFor == 'all':
for viewer in viewerList:
cursor.execute("UPDATE Viewers SET drinks=drinks + " + str(numberOfDrinks) + " WHERE name='" + viewer.lower() + "';")
_chat(sock, giveMoneyString + ". Drinks for everyone!")
else:
viewersString = viewersToBuyFor[0]
if len(viewersToBuyFor) > 1:
for viewer in viewersToBuyFor[1:]:
if viewer == viewersToBuyFor[-1]:
viewersString += " and " + viewer
else:
viewersString += ", " + viewer
viewersString = _re.sub(r'\b' + userName + r'\b', 'themselves', viewersString)
for viewer in viewersToBuyFor:
cursor.execute("UPDATE Viewers SET drinks=drinks + " + str(numberOfDrinks) + " WHERE name='" + viewer.lower() + "';")
if numberOfDrinks == 1:
drinkString = "a drink"
else:
drinkString = str(numberOfDrinks) + " drinks"
_chat(sock, giveMoneyString + " to buy " + viewersString + " " + drinkString + "!")
connection.commit()
connection.close()
def drink(args):
sock = args[0]
userName = args[1]
try:
numberOfDrinks = int(args[2])
if numberOfDrinks <= 0:
_chat(sock, userName + " takes a deep breath and decides not to drink anything.")
return 0
except (IndexError, ValueError) as e:
if isinstance(e, IndexError):
numberOfDrinks = 1
elif isinstance(e, ValueError):
_chat(sock, "You can't drink that!")
return 0
if numberOfDrinks > 5:
_chat(sock, "That's way too many drinks to have all at once! You'll be chundering " +\
"everywhere!")
return 0
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor()
cursor.execute("SELECT drinks FROM Viewers WHERE name='" + userName.lower() + "';")
totalNumberAllowed = int(cursor.fetchone()[0])
if totalNumberAllowed == 0:
_chat(sock, "You don't have any drinks, " + userName + "! Maybe a kind soul will buy you one...")
return 0
if numberOfDrinks > totalNumberAllowed:
if totalNumberAllowed == 1:
allowed = "1 drink"
else:
allowed = str(totalNumberAllowed) + " drinks"
_chat(sock, "You only have " + allowed + " drink in front of you, " + userName + "!")
return 0
drinkString = userName + " takes a deep breath and then downs a drink"
if numberOfDrinks > 1:
drinkString += "...or " + str(numberOfDrinks) + "! It doesn't do anything yet except make you feel woozy..."
else:
drinkString += "! It doesn't do anything yet except make you feel woozy..."
_chat(sock, drinkString)
cursor.execute("UPDATE Viewers SET drinks=drinks - " + str(numberOfDrinks) + " WHERE name='" + userName.lower() + "';")
connection.commit()
connection.close()
def drinks(args):
sock = args[0]
userName = args[1]
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor()
cursor.execute("SELECT drinks FROM Viewers WHERE name='" + userName.lower() + "';")
numberOfDrinks = int(cursor.fetchone()[0])
if numberOfDrinks == 0:
_chat(sock, "You don't have any drinks, " + userName + "! Maybe a kind soul will buy you one...")
return 0
elif numberOfDrinks == 1:
drinkString = "1 drink"
else:
drinkString = str(numberOfDrinks) + " drinks"
_chat(sock, "You have " + drinkString + ", " + userName + "!")
connection.close()
def schedule(args):
sock = args[0]
_chat(sock, "Blaskatronic TV goes live at 2:30am UTC on Wednesdays and Fridays and 5:30pm UTC on Saturdays!")
def commands(args):
help(args)
def help(args):
sock = args[0]
username = args[1]
commandsList = sorted([o for o in dir(_sys.modules[__name__])
if o[0] != '_'])
if username not in _cfg.opList:
for command in _cfg.opOnlyCommands:
commandsList.remove(command)
commandString = ""
_chat(sock, username + " can access the following commands: " +
', '.join(['!' + command for command in commandsList]) +
'.')
def subscribe(args):
sock = args[0]
fileName = './Subscribe.txt'
with open(fileName, 'r') as subFile:
lines = subFile.readlines()
lineToDisplay = None
while True:
lineToDisplay = _R.choice(lines)
if lineToDisplay[0] == '#':
continue
break
_chat(sock, lineToDisplay[:-1])
def nowplaying(args):
sock = args[0]
VLCLUAURL = "http://" + _cfg.EXTERNALIP + ":8080/requests/status.xml"
#VLCLUAURL = "http://127.0.0.1:8080/requests/status.xml"
try:
nowPlayingData = _requests.get(VLCLUAURL, auth=('',_cfg.VLCLUAPASS))
VLCDict = _getXMLAttributes(nowPlayingData.content)
nowPlayingLine = _uesc(VLCDict['information']['meta']['title']) + " by " +\
_uesc(VLCDict['information']['meta']['artist'])
_chat(sock, "We're currently listening to the following song: " + nowPlayingLine)
_printv(nowPlayingLine, 1)
except:
_chat(sock, "I can't read the now playing data right now! Sorry!")
def twitter(args):
sock = args[0]
if "<YOUR TWITTER USERNAME HERE>" not in str(_cfg.twitterUsername):
latestTweetURL = "https://decapi.me/twitter/latest.php?name=" +\
str(_cfg.twitterUsername)
tweetHandle = _requests.get(latestTweetURL)
latestTweet = tweetHandle.text
_chat(sock, "Latest tweet from " + str(_cfg.twitterUsername) +
": " + latestTweet)
def uptime(args):
sock = args[0]
streamData = _queryAPI("https://api.twitch.tv/kraken/streams/" + _cfg.JOIN)
if (streamData is None) or (not streamData['stream']):
_chat(sock, "The stream isn't online, or the Twitch API hasn't" +\
" been updated yet!")
else:
createdTime = _datetime.strptime(streamData['stream']['created_at'],
"%Y-%m-%dT%H:%M:%SZ")
currentTime = _datetime.utcnow()
deltaTime = str(currentTime - createdTime)
components = _re.match(r"(.*)\:(.*)\:(.*)\.(.*)", deltaTime)
componentDict = _collections.OrderedDict()
componentDict['hour'] = int(components.group(1))
componentDict['minute'] = int(components.group(2))
componentDict['second'] = int(components.group(3))
upArray = []
for key, value in componentDict.items():
if value > 1:
upArray.append(str(value) + " " + str(key) + "s")
elif value > 0:
upArray.append(str(value) + " " + str(key))
uptime = ' and '.join(upArray[-2:])
if len(upArray) == 3:
uptime = upArray[0] + ", " + uptime
_chat(sock, "The stream has been live for: " + uptime + "!")
def blaskoins(args):
sock = args[0]
userName = args[1]
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor()
try:
cursor.execute("SELECT points FROM Viewers WHERE name='" + userName.lower() + "';")
currentPoints = int(cursor.fetchone()[0])
cursor.execute("SELECT totalpoints FROM Viewers WHERE name='" + userName.lower() + "';")
totalPoints = int(cursor.fetchone()[0])
currencyUnits = _cfg.currencyName
if currentPoints > 1:
currencyUnits += "s"
cursor.execute("SELECT multiplier FROM Viewers WHERE name='" + userName.lower() + "';")
currentMultiplier = float(cursor.fetchone()[0])
outputLine = userName + " currently has " + str(currentPoints) + " " + str(currencyUnits)
if currentMultiplier > 1.01:
outputLine += ", with an active bonus of {:.2%}!".format(currentMultiplier - 1)
else:
outputLine += "!"
_chat(sock, outputLine)
except (IndexError, TypeError):
_chat(sock, "I'm sorry, " + userName + ", but I don't have any " + _cfg.currencyName +\
" data for you yet! Please try again later (and also welcome to the stream ;)).")
connection.close()
def rank(args):
sock = args[0]
userName = args[1]
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor()
try:
cursor.execute("SELECT totalpoints FROM Viewers WHERE name='" + userName.lower() + "';")
totalPoints = int(cursor.fetchone()[0])
cursor.execute("SELECT rank FROM Viewers WHERE name='" + userName.lower() + "';")
currentRank = str(cursor.fetchone()[0])
cursor.execute("SELECT multiplier FROM Viewers WHERE name='" + userName.lower() + "';")
currentMultiplier = float(cursor.fetchone()[0])
nextRank = None
pointsForNextRank = None
for rankPoints in sorted(_cfg.ranks.keys()):
nextRank = _cfg.ranks[rankPoints]
pointsForNextRank = rankPoints
if totalPoints < rankPoints:
break
secondsToNextRank = (pointsForNextRank - totalPoints) * int(_cfg.awardDeltaT /\
(_cfg.pointsToAward * currentMultiplier))
totalSecondsSoFar = totalPoints * int(_cfg.awardDeltaT / _cfg.pointsToAward)
totalMins, totalSecs = divmod(totalSecondsSoFar, 60)
totalHours, totalMins = divmod(totalMins, 60)
totalTimeDict = _collections.OrderedDict()
totalTimeDict['hour'] = int(totalHours)
totalTimeDict['minute'] = int(totalMins)
totalTimeDict['second'] = int(totalSecs)
totalTimeArray = []
mins, secs = divmod(secondsToNextRank, 60)
hours, mins = divmod(mins, 60)
timeDict = _collections.OrderedDict()
timeDict['hour'] = int(hours)
timeDict['minute'] = int(mins)
timeDict['second'] = int(secs)
timeArray = []
for key, value in totalTimeDict.items():
if value > 1:
totalTimeArray.append(str(value) + " " + str(key) + "s")
elif value > 0:
totalTimeArray.append(str(value) + " " + str(key))
totalTime = ' and '.join(totalTimeArray[-2:])
if len(totalTimeArray) == 3:
totalTime = totalTimeArray[0] + ", " + totalTime
for key, value in timeDict.items():
if value > 1:
timeArray.append(str(value) + " " + str(key) + "s")
elif value > 0:
timeArray.append(str(value) + " " + str(key))
timeToNext = ' and '.join(timeArray[-2:])
if len(timeArray) == 3:
timeToNext = timeArray[0] + ", " + timeToNext
rankMod = ' '
if currentRank[0] in ['a', 'e', 'i', 'o', 'u']:
rankMod = 'n '
outputLine = userName + " has currently watched for " + totalTime +\
" and is a" + rankMod + str(currentRank) +\
" (" + timeToNext + " until next rank!)"
_chat(sock, outputLine)
except (IndexError, TypeError):
_chat(sock, "I'm sorry, " + userName + ", but I don't have any rank" +\
" data for you yet! Please try again later (and also welcome to the stream ;)).")
connection.close()
def clip(args):
sock = args[0]
additionalArgs = args[1:]
userName = additionalArgs[0]
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor(cursor_factory=_dictCursor)
cursor.execute("SELECT * FROM Clips;")
clipList = cursor.fetchall()
if len(additionalArgs) == 1:
# Just return a random clip
clipNo = int(_R.randrange(len(clipList)))
url = "https://clips.twitch.tv/" + clipList[clipNo]['url']
author = clipList[clipNo]['author']
_printv("Clip request: " + url, 4)
_chat(sock, "Check out this awesome clip (#" + str(clipNo) + "): " + url)
elif additionalArgs[1] == 'add':
if userName is _isOp():
try:
url = additionalArgs[2]
author = additionalArgs[3]
if len(author) > len(url):
raise IndexError
except IndexError:
_chat(sock, "The correct syntax is !clip add <CLIP SLUG> <AUTHOR>.")
else:
cursor.execute("INSERT INTO Clips VALUES (%s, %s);", (url, author))
connection.commit()
else:
_chat(sock, "A moderator will take a look at your clip and " +\
"add it to my database if they like it!")
elif len(additionalArgs) == 2:
try:
clipNo = int(additionalArgs[1])
if (clipNo > -len(clipList)) and (clipNo <= len(clipList)):
url = "https://clips.twitch.tv/" + clipList[clipNo]['url']
_printv("Clip request: " + url, 4)
_chat(sock, "Here is clip #" + str(clipNo) + ": " + url)
else:
_chat(sock, "Valid clip #s are 0 to " + str(len(clipList) - 1) + " inclusive.")
except ValueError:
# Username specified instead
clipFromUser = str(additionalArgs[1])
cursor.execute("SELECT * FROM Clips WHERE author='" + clipFromUser + "';")
userClips = cursor.fetchall()
userClips = clipDB.search(_Query().author == clipFromUser)
if len(userClips) > 0:
clipToShow = _R.choice(userClips)
url = "https://clips.twitch.tv/" + clipToShow['url']
_printv("Clip request: " + url, 4)
_chat(sock, "Check out " + clipFromUser + "'s awesome clip (#" +\
str(clipToShow['id'] - 1) + "): " + url)
else:
_chat(sock, "Sorry, there are no clips from " + clipFromUser + " yet.")
else:
_chat(sock, "The correct syntax is !clip, !clip #, or !clip <NAME>.")
connection.close()
def pay(args):
sock = args[0]
userName = args[1]
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor()
try:
cursor.execute("SELECT points FROM Viewers WHERE name='" + userName.lower() + "';")
coinsAvailable = int(cursor.fetchone()[0])
userToPay = args[2].lower()
amountToPay = int(args[3])
if amountToPay < 0:
raise IndexError
if amountToPay > coinsAvailable:
errorString = "You only have", coinsAvailable, _cfg.currencyName
if coinsAvailable > 1:
errorString += "s"
errorString += " available, " + userName + "!"
_chat(sock, errorString)
viewerJSON = _getViewerList()
viewerList = [viewerName for nameRank in [viewerJSON['chatters'][x] \
for x in viewerJSON['chatters'].keys()] for viewerName \
in nameRank]
if userToPay not in viewerList:
_chat(sock, "I don't see " + userToPay + " in chat!")
return 0
cursor.execute("UPDATE Viewers SET points=points + " + str(amountToPay) + " WHERE name='" + userToPay.lower() + "';")
cursor.execute("UPDATE Viewers SET points=points - " + str(amountToPay) + " WHERE name='" + userName.lower() + "';")
payString = userName + " very kindly gives " + userToPay + " " + str(amountToPay) + " of" +\
" their " + _cfg.currencyName + "s"
_chat(sock, payString + "!")
except:
_chat(sock, "The correct syntax: !pay <USERNAME> <AMOUNT>. There are no defaults!")
connection.commit()
connection.close()
def slot(args):
sock = args[0]
userName = args[1]
if len(args) > 2:
return
streamStatus = _streamIsUp()
if streamStatus is not None:
if streamStatus is False:
_chat(sock, "Sorry, " + userName + ", but you can't win anything off stream! Try using !next to see when you can next play with the slot machine!")
return
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor()
cursor.execute("SELECT points FROM Viewers WHERE name='" + userName.lower() + "';")
currentPoints = int(cursor.fetchone()[0])
if currentPoints < _cfg.slotCost:
_chat(sock, "Sorry, " + userName + ", but you do not have enough" + _cfg.currencyName +\
" to play! You need at least " + str(_cfg.slotCost) + ".")
return 0
_chat(sock, "You insert " + str(_cfg.slotCost) + " " + _cfg.currencyName +\
"s and pull the slot machine arm...")
with open('./slotWin.txt', 'r') as winFile:
winLines = winFile.readlines()
with open('./slotLose.txt', 'r') as loseFile:
loseLines = loseFile.readlines()
results = []
for i in range(_cfg.slotNReels):
results.append(_R.choice(_cfg.slotStops))
_chat(sock, "| " + " | ".join([x for x in results]) + " |")
responseLine = _R.choice(winLines)[:-1]
if (len(list(set(results))) == _cfg.slotNReels) and (results != _cfg.slotJackpot):
# None are matching
responseLine = _R.choice(loseLines)[:-1]
payout = _cfg.slotPayout[0]
elif len(list(set(results))) == 3:
# Exactly 2 are matching
responseLine += " A pair!"
payout = _cfg.slotPayout[2]
elif len(list(set(results))) == 2:
# Could be 2x2 or exactly 3 matching
if results.count(list(set(results))[0]) == 2:
# 2x2 are matching
responseLine += " Two pairs!"
payout = _cfg.slotPayout[1]
else:
# 3 are matching
responseLine += " Trips!"
payout = _cfg.slotPayout[3]
elif len(list(set(results))) == 1:
# All 4 match
responseLine += " 4-of-a-kind!"
payout = _cfg.slotPayout[4]
elif results == _cfg.slotJackpot:
responseLine = "YOU HAVE WON THE JACKPOT!"
payout = 0
# TODO Add the game keys to the database
if payout == 1:
responseLine += " A single" + _cfg.currencyName + " clatters out" +\
" of the machine for " + userName + "!"
elif payout > 1:
responseLine += " " + str(payout) + " " + _cfg.currencyName + "s clatter out" +\
" of the machine for " + userName + "!"
cursor.execute("UPDATE Viewers SET points=points - " + str(_cfg.slotCost) + " WHERE name='" + userName.lower() + "';")
cursor.execute("UPDATE Viewers SET points=points + " + str(payout) + " WHERE name='" + userName.lower() + "';")
_printv("Username = " + userName + "," + responseLine + ", Winnings = " + str(payout), 1)
_chat(sock, responseLine)
connection.commit()
connection.close()
def leaderboard(args):
sock = args[0]
userName = args[1]
connection = _psycopg2.connect(database=_cfg.JOIN.lower(), user=_cfg.NICK.lower())
cursor = connection.cursor(cursor_factory=_dictCursor)
cursor.execute("SELECT * FROM Viewers WHERE name NOT IN (" + ', '.join([repr(x) for x in _cfg.skipViewers]) + ") ORDER BY totalpoints DESC LIMIT 5;")
topRanked = cursor.fetchall()
leaderboardLine = "--== MOST MINUTES WATCHED ==-- "
for i, viewerDetails in enumerate(topRanked):
leaderboardLine += " %1d) %15s %15s, %5d | " % (i + 1, viewerDetails['rank'], viewerDetails['name'], viewerDetails['totalpoints'])
_chat(sock, leaderboardLine[:-3])
connection.close()
def top(args):
leaderboard(args)
def next(args):
sock = args[0]
userName = args[1]
if _cfg.streamScheduleOverride is not None:
_chat(sock, _cfg.streamScheduleOverride)
return
now = list(map(int, _datetime.utcnow().strftime("%H %M").split(' ')))
today = int(_datetime.utcnow().date().weekday())
nowArray = _np.array([today] + now)
timeDeltaArray = _np.array(_cfg.streamSchedule) - nowArray
modulos = [7, 24, 60]
changed = True
while changed == True:
changed = False
for (x, y), element in _np.ndenumerate(timeDeltaArray):
if element < 0:
timeDeltaArray[x, y] = element%modulos[y]
# Decrement the next time level up to reflect this change
timeDeltaArray[x, y-1] -= 1
changed = True
nextStreamTime = timeDeltaArray[timeDeltaArray[:,0].argsort()][0]
nextStreamDict = _collections.OrderedDict()
nextStreamDict['day'] = int(nextStreamTime[0])
nextStreamDict['hour'] = int(nextStreamTime[1])
nextStreamDict['minute'] = int(nextStreamTime[2])
outputString = "The next scheduled stream starts"
nonZeroIndices = [index for index, value in enumerate(nextStreamDict.values()) if value != 0]
if len(nonZeroIndices) == 0:
outputString += " right the hell now!"
elif len(nonZeroIndices) == 1:
if nonZeroIndices[0] == 2:
outputString += " in just "
else:
outputString += " in exactly "
else:
outputString += " in "
timeStrings = []
for key, value in nextStreamDict.items():
if value > 1:
timeStrings.append(str(value) + " " + str(key) + "s")
elif value > 0:
timeStrings.append(str(value) + " " + str(key))
totalTime = ' and '.join(timeStrings[-2:])
if len(timeStrings) == 3:
totalTime = timeStrings[0] + ", " + totalTime
outputString += totalTime
if _cfg.streamScheduleAdditional is not None:
outputString += ". " + _cfg.streamScheduleAdditional
_chat(sock, outputString)
| matty-jones/blaskbot | commands.py | commands.py | py | 29,017 | python | en | code | 3 | github-code | 13 |
14914759023 | import numpy as np
import multiprocessing as mp
import time, os
import numpy.linalg as LA
import subprocess as sp
from random import random
import Model_Miller_3 as model
def getGlobalParams():
global dtE, dtI, NSteps, NTraj, NStates, M, windowtype
global adjustedgamma, NCPUS, initstate, dirName, NSkip
dtE = model.parameters.dtE
dtI = model.parameters.dtI
NSteps = model.parameters.NSteps
NTraj = model.parameters.NTraj
NStates = model.parameters.NStates
M = model.parameters.M
windowtype = model.parameters.windowtype.lower()
adjustedgamma = model.parameters.adjustedgamma.lower()
NCPUS = model.parameters.NCPUS
initstate = model.parameters.initState
dirName = model.parameters.dirName
NSkip = model.parameters.NSkip
def initFiles(traj):
name = f"{dirName}/traj-{traj}/"
sp.call(f"mkdir -p {name}", shell=True)
#sp.call(f"rm {name}/*dat", shell=True)
InitCondsFile = open(f"{name}/initConds.dat","w")
densityFile = open(f"{name}/density.dat","w")
mappingFile = open(f"{name}/mapping.dat","w")
return InitCondsFile,densityFile, mappingFile
def closeFiles(InitCondsFile, densityFile, mappingFile):
InitCondsFile.close()
densityFile.close()
mappingFile.close()
def initMapping(InitCondsFile):
"""
Initialize mapping variables according to various SQC schemes
"""
# Initialize mapping variables
ek = np.zeros((NStates))
angle = np.zeros((NStates))
if (windowtype == "square"):
for i in range(NStates):
ek[i] = 2*0.366*random()
angle[i] = random() * 2 * np.pi
elif (windowtype == "triangle"):
# Initial state
while (True):
ek[initstate] = random()
if ( 1 - ek[initstate] >= random() ):
break
# Other states
for i in range(NStates):
angle[i] = random() * 2 * np.pi
if (i != initstate):
rand = random() * ( 1 - ek[initstate] )
ek[i] = rand
# Shift up initial state
ek[initstate] += 1
### Now we assign mapping oscillator initial conditions ###
z = np.zeros((NStates), dtype=complex)
ZPE = np.zeros((NStates))
for state in range(NStates):
# Construct zero-point energy (ZPE)
if (adjustedgamma == "no"):
ZPE[state] = (np.sqrt(3)-1)/2 * (windowtype == "square") + 1/3. * (windowtype == "triangle")
if (adjustedgamma == "yes"):
ZPE[state] = ek[state] - 1 * (state == initstate)
# Construct mapping variable
q = np.sqrt( 2 * ek[state] ) * np.cos(angle[state])
p = -1 * np.sqrt( 2 * ek[state] ) * np.sin(angle[state])
z[state] = q + 1j*p
InitCondsFile.write( "ek-ZPE(Force_Weight)\t" + "\t".join(map(str,np.round(ek - ZPE,4))) + "\n" )
InitCondsFile.write( "ZPE\t\t\t" + "\t".join(map(str,np.round(ZPE,4))) + "\n" )
InitCondsFile.write( "ek\t\t\t" + "\t".join(map(str,np.round(ek,4))))
return z, ZPE
def propagateMapVars(z, VMat):
"""
Updates mapping variables
Method: Velocity Verlet
TODO Implement Runge-Kutta time-integration
"""
Zreal = np.real(z)
Zimag = np.imag(z)
# Propagate Imaginary first by dt/2
Zimag -= 0.5 * VMat @ Zreal * dtE
# Propagate Real by full dt
Zreal += VMat @ Zimag * dtE
# Propagate Imaginary final by dt/2
Zimag -= 0.5 * VMat @ Zreal * dtE
return Zreal + 1j*Zimag
def Force(dHel, dHel0, R, z, ZPE ):
"""
Return force for all nuclear DOFs.
F = F0 + Fm
F0 = -GRAD V_0 (State-Independent)
Fm = -GRAD V_m (State-Dependent and Traceless)
V_m = 0.5 * SUM_(lam, u) <lam|V|u> z*_lam z'_u
"""
action = 0.5 * np.real( np.outer( z, np.conjugate(z) ) - 2 * np.diag(ZPE) )
F = np.zeros((len(R)))
F -= dHel0
for i in range(NStates):
F -= dHel[i,i,:] * action[i,i]
for j in range(i+1,NStates): # Double counting off-diagonal to save time
F -= 2 * dHel[i,j,:] * action[i,j]
return F
def VelVerF(R, P, z, ZPE): # Ionic position, ionic momentum, etc.
"""
Routine for nuclear and electronic propagation
Nuclear Method: Velcoty Verlet
"""
v = P/M
Hel = model.Hel(R) # Electronic Structure
dHel = model.dHel(R)
dHel0 = model.dHel0(R)
EStep = int(dtI/dtE)
for t in range( int(EStep/2) ): # Half-step Mapping
z = propagateMapVars(z, Hel) * 1
F1 = Force(dHel, dHel0, R, z, ZPE )
v += 0.5000 * F1 * dtI / M # Half-step velocity
R += v * dtI # Full Step Position
dHel = model.dHel(R)
dHel0 = model.dHel0(R)
F2 = Force(dHel, dHel0, R, z, ZPE )
v += 0.5000 * F2 * dtI / M # Half-step Velocity
Hel = model.Hel(R) # Electronic Structure
for t in range( int(EStep/2) ): # Half-step Mappings
z = propagateMapVars(z, Hel) * 1
return R, v*M, z, Hel
def window(step, z, ZPE, densityFile, mappingFile):
"""
Construct histogram for binning the electronic action
TODO CONSTRUCT OFF-DIAGONAL BINS FOR COHERENCE CALCULATIONS
"""
hist = np.ones((NStates))
ek = np.zeros((NStates))
# Get the diagonal actions for each state
for state in range(NStates):
ek[state] = 0.5 * np.abs( z[state] )**2 # q^2 + p^2 = (q + ip)(q - ip)
for i in range(NStates):
for j in range(NStates):
if (windowtype.lower() == "square"):
if ( ek[j] - (i==j) < 0.0 or ek[j] - 1 > 2*0.366 ):
hist[i] = 0
if (windowtype.lower() == "triangle"):
if ( (i == j and ek[j] < 1.0) or (i != j and ek[j] >= 1.0) ):
hist[i] = 0
writeDensity(step,hist,z,densityFile,mappingFile)
return None
def writeDensity(step,hist,z,densityFile,mappingFile):
outArray_map = [step * dtI]
outArray_den = [step * dtI]
for state in range(NStates):
outArray_map.append( np.round(np.real(z[state]),4) )
outArray_map.append( np.round(np.imag(z[state]),4) )
outArray_den.append( int( hist[state] ) )
mappingFile.write( "\t".join(map(str,outArray_map)) + "\n" )
densityFile.write( "\t".join(map(str,outArray_den)) + "\n" )
return None
def Run_Trajectory(traj): # This is parallelized already. "Main" for each trajectory.
print (f"Working in traj {traj} for NSteps = {NSteps}")
InitCondsFile, densityFile, mappingFile = initFiles(traj)
R,P = model.initR() # Initialize nuclear DOFs
z, ZPE = initMapping(InitCondsFile) # Initialize electronic DOFs
Hel = model.Hel(R)
dHij = model.dHel(R)
for step in range(NSteps):
print ("Step:", step)
if ( step % NSkip == 0 ):
window(step, z, ZPE, densityFile, mappingFile)
R, P, z, Hel = VelVerF(R, P, z, ZPE)
closeFiles(InitCondsFile, densityFile, mappingFile)
return None
### Start Main Program ###
if ( __name__ == "__main__" ):
getGlobalParams()
start = time.time()
print (f"There will be {NCPUS} cores with {NTraj} trajectories.")
runList = np.arange(NTraj)
with mp.Pool(processes=NCPUS) as pool:
pool.map(Run_Trajectory, runList)
stop = time.time()
print (f"Total Computation Time (Hours): {(stop - start) / 3600}")
| bradenmweight/QuantumDynamicsMethodsSuite | MQC/SQC.py | SQC.py | py | 7,401 | python | en | code | 2 | github-code | 13 |
71347649618 | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import Products, Distributor
@login_required
def index(request):
products = Products.objects.all()
template = 'products/index.html'
context = {
'products': products,
}
return render(request, template, context)
@login_required
def details(request, slug):
product = get_object_or_404(Products, slug=slug)
context = {}
context['product'] = product
template_name = 'products/details.html'
return render(request, template_name, context)
@login_required
def delete_product(request, slug):
print('teste')
product = Products.objects.get(slug=slug)
product.delete()
return render(request,'products/index.html') | diego-lucas/system-d | system/products/views.py | views.py | py | 748 | python | en | code | 0 | github-code | 13 |
70102289939 | import tkinter as tk
# if you are still working under a Python 2 version,
# comment out the previous line and uncomment the following line
# import Tkinter as tk
# root = tk.Tk()
#
# w = tk.Label(root, text="Hello Tkinter!")
# w.pack()
#
# root.mainloop()
root = tk.Tk()
logo = tk.PhotoImage(file="logo64.gif")
w1 = tk.Label(root, image=logo).pack(side="right")
explanation = """At present, only GIF and PPM/PGM
formats are supported, but an interface
exists to allow additional image file
formats to be added easily."""
w2 = tk.Label(root,
justify=tk.LEFT,
padx = 10,
text=explanation).pack(side="left")
root.mainloop() | cyoukaikai/ahc_ete | smrc/utils/test/test_tk.py | test_tk.py | py | 669 | python | en | code | 2 | github-code | 13 |
72349882258 | import logging
import threading
from articleRec import handler as articleRecHandler
from topicModeling import handler as topicModelingHandler
from mdsModel.handler import *
from datetime import datetime
from idl import *
from userPreferences import handler as upHandler
from topicFeed import handler as topicFeedHandler
from topicModeling import handler as topicModelingHandler
from multiprocessing.pool import ThreadPool, Pool
handler = LogtailHandler(source_token="tvoi6AuG8ieLux2PbHqdJSVR")
logger = logging.getLogger(__name__)
logger.handlers = [handler]
logger.setLevel(logging.INFO)
def hydrateHomePageCached(hydrateHomePageRequest):
"""
This sets up a flow that directly reads the topic pages from the cached pages stored in the database.
"""
# It needs to search topic pages by the topic name
topicList = []
beforeGetTopicsYouFollow = datetime.now()
# If the user is following any topics, get those topics first
if hydrateHomePageRequest.userId != None:
getTopicsYouFollowResponse = upHandler.get_topics_you_follow(
GetTopicsForUserRequest(
user_id = hydrateHomePageRequest.userId
)
)
if getTopicsYouFollowResponse.error != None:
logger.warn("error in getTopicsYouFollow")
return HydrateHomePageResponse(
topicPages= [],
error=str(getTopicsYouFollowResponse.error)
)
topicList = [t.TopicName for t in getTopicsYouFollowResponse.topics]
logger.info(topicList)
afterGetTopicsYouFollow = datetime.now()
logger.info("Time taken to getTopicsYouFollow: %s", str(afterGetTopicsYouFollow-beforeGetTopicsYouFollow))
# If the user isn't following any topics get the top topics currently
beforeGetTopics = datetime.now()
if len(topicList) < 25:
getTopicsResponse = topicModelingHandler.get_topics(
GetTopicsRequest(
num_topics=10,
reduced = False,
)
)
if getTopicsResponse.error != None:
return HydrateHomePageResponse(
topicPages =[],
error=str(getTopicsResponse.error)
)
topicList.extend(getTopicsResponse.topic_words)
logger.info("The topic list is: " + str(topicList))
logger.info(topicList)
afterGetTopics = datetime.now()
logger.info("Time to getTopics: %s", str(afterGetTopics-beforeGetTopics))
topicPages = []
for topic in topicList:
fetchTopicPageByTopicRes = topicFeedHandler.fetchTopicPageByTopic(
fetchTopicPageByTopicRequest=FetchTopicPageRequest(
topic=topic,
)
)
logger.info("Fetch topic page cached: ")
logger.info(fetchTopicPageByTopicRes)
if fetchTopicPageByTopicRes.error == None and fetchTopicPageByTopicRes.topic_page != None:
topicPages.append(fetchTopicPageByTopicRes)
else:
logger.warn("Failed to hydrate topic page: " + str(fetchTopicPageByTopicRes.error))
# Order the topic pages by date and possibly include a date header in between the pages
sortedTopicPages = sorted(topicPages, key=lambda fetchTopicPageRes: fetchTopicPageRes.topic_page.CreatedAt, reverse=True)
logger.info("number of topic pages: " + str(len(sortedTopicPages)))
return HydrateHomePageResponse(
topicPages=sortedTopicPages,
error = None
)
def hydrateHomePage(hydrateHomePageRequest):
"""
The home page will consist of all the topic modals. It will first query for the top topics, then for each topic it will include the MDS, and a list of the facts for that topic and a top image. If the user is signed in then it will first query for the topics that the user has saved and surface those first. After that it will surface the rest of the top topics.
"""
topicList = []
beforeGetTopicsYouFollow = datetime.now()
# If the user is following any topics, get those topics first
if hydrateHomePageRequest.userId != None:
getTopicsYouFollowResponse = upHandler.get_topics_you_follow(
GetTopicsForUserRequest(
user_id = hydrateHomePageRequest.userId
)
)
if getTopicsYouFollowResponse.error != None:
logger.warn("error in getTopicsYouFollow")
return HydrateHomePageResponse(
topicPages= [],
error=str(getTopicsYouFollowResponse.error)
)
topicList = [t.TopicName for t in getTopicsYouFollowResponse.topics]
logger.info(topicList)
afterGetTopicsYouFollow = datetime.now()
logger.info("Time taken to getTopicsYouFollow: %s", str(afterGetTopicsYouFollow-beforeGetTopicsYouFollow))
# If the user isn't following any topics get the top topics currently
beforeGetTopics = datetime.now()
if len(topicList) < 5:
getTopicsResponse = topicModelingHandler.get_topics(
GetTopicsRequest(
num_topics=5,
reduced = False,
)
)
if getTopicsResponse.error != None:
return HydrateHomePageResponse(
topicPages =[],
error=str(getTopicsResponse.error)
)
topicList.extend(getTopicsResponse.topic_words)
logger.info("The topic list is")
logger.info(topicList)
afterGetTopics = datetime.now()
logger.info("Time to getTopics: %s", str(afterGetTopics-beforeGetTopics))
beforeParallelHydration = datetime.now()
# Aysynchronously populate all of the topic pages to display on the home page
pool = ThreadPool(processes=len(topicList))
getTopicPageRequests = [GetTopicPageRequest(topicName = topic) for topic in topicList]
topicPages = pool.map(topicFeedHandler.getTopicPage, getTopicPageRequests)
i = 0
for topicPage in topicPages:
logger.info("Topic page " + str(i))
logger.info(topicPage)
i+= 1
pool.close()
pool.join()
afterParallelHydration = datetime.now()
logger.info("Time to parallel hydrate: %s", str(afterParallelHydration-beforeParallelHydration))
return HydrateHomePageResponse(
topicPages=topicPages,
error = None
)
| aiswaryasankar/dbrief | homeFeed/handler.py | handler.py | py | 5,798 | python | en | code | 1 | github-code | 13 |
20999094983 | from sklearn.decomposition import PCA
from scipy.cluster.vq import kmeans2
import numpy as np
def calculate_pca(embeddings, dim=16):
print("Calculating PCA")
pca = PCA(n_components=dim)
pca_embeddings = pca.fit_transform(embeddings.squeeze())
print("PCA calculating done!")
return pca_embeddings
def calculate_kmeans(embeddings, k):
print("KMeans processing...")
centroid, labels = kmeans2(data=embeddings, k=k, minit="points")
counts = np.bincount(labels)
print("Kmeans done!")
return centroid, labels
| cobanov/image-clustering | clustering.py | clustering.py | py | 547 | python | en | code | 8 | github-code | 13 |
9838513736 | import argparse
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
from environments.swingup import CartPoleSwingUp
from environments.pongwrapper import PongWrapper
plt.style.use("dark_background")
def demo_cartpole():
cartpole = gym.make('CartPole-v1')
cartpole.reset()
cartpole.render()
for i in range(2000):
_, _, d, _ = cartpole.step(np.random.randint(2))
cartpole.render()
if d:
print(i)
break
time.sleep(cartpole.tau)
cartpole.close()
def demo_swingup():
swingup = CartPoleSwingUp()
swingup.reset()
swingup.render()
for i in range(1000):
_, _, d, _ = swingup.step(np.random.randint(2))
swingup.render()
if d:
print(i)
break
swingup.close()
def demo_pong():
pong = PongWrapper(noop_max=0,
frame_skip=4,
terminal_on_life_loss=True,
grayscale_obs=True,
scale_obs=True)
x = pong.reset()
pong.render()
for i in range(60):
a = np.random.randint(2)
x, r, _, _ = pong.step(a)
pong.render()
# print('\r', "reward", r, end="")
time.sleep(0.1)
pong.close()
print(f"shape: {x.shape} min = {x.min()} max = {x.max()}")
plt.imshow(x, cmap='gray')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cartpole", action="store_true")
parser.add_argument("-s", "--swingup", action="store_true")
parser.add_argument("-p", "--pong", action="store_true")
args = parser.parse_args()
if args.cartpole:
demo_cartpole()
if args.swingup:
demo_swingup()
if args.pong:
demo_pong()
| amtoine/dqn | src/demo.py | demo.py | py | 1,809 | python | en | code | 0 | github-code | 13 |
1397428241 | #Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tkinter as tk
import sys
import time
import os
from PIL import Image, ImageOps
from collections import defaultdict
#Set File
file = sys.argv[1]
with open(file) as myfile:
head = [next(myfile) for x in range(5)]
if (head[0][0:5]) == 'ascii': #check if there's header information
df = pd.read_csv(file, sep = ' ', header=None, skiprows = 4)
elif (head[0][0] == str(0) or head[0][0] == str(1)):
df = pd.read_csv(file, sep = ' ', header=None)
df = df.iloc[:, :-1]
print( 'File Dimensions:', df.shape[0], 'by', df.shape[1] )
X = df.to_numpy()
del df
isize = len(X[0])
#Extract values from slider
def takeValues():
input1,input2,input3,input4,input5 = w1.get(),w2.get(),w3.get(),w4.get(),w5.get()
return input1, input2, input3, input4, input5
#Show Image
def showImage():
plt.close('all')
inp = takeValues()
y_top,y_bot,x_left,x_right = inp[0],inp[1],inp[2],inp[3]
islice = inp[4]
X2 = X[isize*islice:isize*islice+isize,:]
plt.matshow(X2[0:isize, 0:isize], origin = 'lower')
plt.gca().xaxis.tick_bottom()
width = abs(x_right - x_left)
length = abs(y_top - y_bot)
print('Z Level:', '\t', islice)
print('Width:', '\t', '\t', width)
print('Length:', '\t', length)
if (width != length):
print('WARNING: NOT SQUARE')
print('')
plt.text(-30, isize+25, 'Width: ' + str(width))
plt.text(-30, isize+10, 'Length:' + str(length))
plt.hlines(y_bot, x_left, x_right-1, 'r', linewidth = 2)
plt.hlines(y_top-1, x_left, x_right-1, 'r', linewidth = 2)
plt.vlines(x_left, y_bot, y_top-1, 'r', linewidth = 2)
plt.vlines(x_right-1,y_bot, y_top-1, 'r', linewidth = 2)
plt.title('Height Level:'+str(islice))
plt.show()
#Cut Image
def cutImage():
inp = takeValues()
y_top = inp[0]
y_bot = inp[1]
x_left = inp[2]
x_right = inp[3]
islice = inp[4]
plt.close('all')
X3 = X[isize*islice:isize*islice+isize,:]
if (y_top > y_bot) and (x_right > x_left):
plt.matshow(X3[y_bot: y_top, x_left : x_right], origin = 'lower')
if (y_top < y_bot) and (x_right > x_left):
plt.matshow(X3[y_top: y_bot, x_left : x_right], origin = 'lower')
elif (y_top < y_bot) and (x_right < x_left):
plt.matshow(X3[y_top: y_bot, x_right : x_left], origin = 'lower')
elif (y_top > y_bot) and (x_right < x_left):
plt.matshow(X3[y_bot: y_top, x_right : x_left], origin = 'lower')
plt.gca().xaxis.tick_bottom()
plt.axis('off')
plt.show()
def saveImage():
print('Saving')
plt.savefig('CroppedImage.png',bbox_inches='tight', pad_inches=0)
plt.close('all')
img = Image.open('CroppedImage.png')
pixels = img.load()
by_color = defaultdict(int)
for pixel in img.getdata():
by_color[pixel] += 1
for i in range(img.size[0]):
for j in range(img.size[1]):
if pixels[i,j][0] == 68 and pixels[i,j][1] == 1 and pixels[i,j][2] == 84:
pixels[i,j] = (0,0,0,255)
elif pixels[i,j][0] == 253 and pixels[i,j][1] == 231 and pixels[i,j][2] == 36:
pixels[i,j] = (255,255,255,255)
newimg = ImageOps.expand(img, border=1, fill='black')
newimg.show()
imgsize = newimg.size
print(imgsize)
newimg.save("FinalImage.png")
os.remove('CroppedImage.png')
#Tinker GUI
while 1:
m = tk.Tk()
m.title('Area Selection')
m.configure(bg='tan')
stop_button = tk.Button(m, width=25, command=m.destroy, bg = 'orangered', text='Stop').pack()
w1 = tk.Scale(m, from_=1, to=isize, length=400, orient=tk.HORIZONTAL, bg= 'beige', label='Y Top')
w1.set(isize)
w1.pack()
w2 = tk.Scale(m, from_=0, to=isize-1, length=400, orient=tk.HORIZONTAL, bg = 'beige', label='Y Bottom')
w2.pack()
w3 = tk.Scale(m, from_=0, to=isize-1, length=400, orient=tk.HORIZONTAL, bg = 'beige', label='X Left')
w3.pack()
w4 = tk.Scale(m, from_=1, to=isize, length=400, orient=tk.HORIZONTAL, bg = 'beige', label='X Right')
w4.set(isize)
w4.pack()
w5 = tk.Scale(m, from_=0, to=isize-1, length=400, orient=tk.HORIZONTAL, bg = 'skyblue', label='Height')
w5.set(0)
w5.pack()
show_button = tk.Button(m, width=15, text='Show', command=showImage, bg = 'gold').pack()
produce_button = tk.Button(m, width=15, text='Cut', command=cutImage, bg = 'gold').pack()
save_button = tk.Button(m, width=15, text='Save', command=saveImage, bg = 'gold').pack()
m.mainloop()
break
| RiceAllDay22/Hele-Shaw-Model | MainHeleCode.py | MainHeleCode.py | py | 4,683 | python | en | code | 0 | github-code | 13 |
5411171947 | import os
import numpy as np
from pylab import mpl
import matplotlib.pyplot as plt
import coordinate_transformation as ct
ref = []
data_allday = [[], [], [], [], [], [],
[], [], [], [], [], []]
x = []
y = []
z = []
X = []
Y = []
Z = []
result_XYZ = [[], [], []]
result_ENU = [[], [], []]
plot_ENU = [[], [], []]
plot_times = []
file_path = 'D:\\data\\VRS\\国地信VRS\\solution_clear\\py\\'
file_list = os.listdir(file_path)
for file in file_list:
with open(file_path + file, "r") as f:
lines = f.readlines()
for line in lines:
if line[0] == '%':
if line[2:9] == 'ref pos':
ref.append(float(line[14:27]))
ref.append(float(line[30:42]))
ref.append(float(line[45:57]))
continue
if line[71] == "1":
# print(line.strip('\n'))
data_index = int(line[11:13]) // 2
data_allday[data_index].append(line[25: 68])
for i in range(len(data_allday)):
if len(data_allday[i]) == 0:
# result_XYZ[0].append(None)
# result_XYZ[1].append(None)
# result_XYZ[2].append(None)
continue
for j in range(len(data_allday[i])):
x.append(float(data_allday[i][j][0:13]))
y.append(float(data_allday[i][j][16:28]))
z.append(float(data_allday[i][j][31:43]))
# threshold_x = np.mean(x)
# threshold_y = np.mean(y)
# threshold_z = np.mean(z)
#
# for k in range(len(x)):
# if np.abs(x[k] - threshold_x) < 0.02:
# X.append(x[k])
# if np.abs(y[k] - threshold_y) < 0.02:
# Y.append(y[k])
# if np.abs(z[k] - threshold_z) < 0.02:
# Z.append(z[k])
result_XYZ[0].append(np.mean(x))
result_XYZ[1].append(np.mean(y))
result_XYZ[2].append(np.mean(z))
plot_time = file[4:8] + "-" + file[8:10] + "-" + file[10:12] + " " + str(i * 2 + 1) + ":00:00"
plot_times.append(plot_time)
for i in range(len(result_XYZ[0])):
ENU = ct.xyz2enu([result_XYZ[0][i], result_XYZ[1][i], result_XYZ[2][i]], ref)
result_ENU[0].append(ENU[0])
result_ENU[1].append(ENU[1])
result_ENU[2].append(ENU[2])
ref = []
data_allday = [[], [], [], [], [], [],
[], [], [], [], [], []]
x = []
y = []
z = []
X = []
Y = []
Z = []
result_XYZ = [[], [], []]
print(plot_times)
for i in range(len(result_ENU[0])):
plot_ENU[0].append((result_ENU[0][i] - result_ENU[0][0]) * 1000)
plot_ENU[1].append((result_ENU[1][i] - result_ENU[1][0]) * 1000)
plot_ENU[2].append((result_ENU[2][i] - result_ENU[2][0]) * 1000)
plt.figure(figsize=(20, 4), dpi=400)
plt.plot(plot_times, plot_ENU[0], label="E")
plt.plot(plot_times, plot_ENU[1], label="N")
plt.plot(plot_times, plot_ENU[2], label="U")
# 设置显示中文字体
# mpl.rcParams["font.sans-serif"] = ["SimHei"]
plt.title("VRS: GHLY-HLY1")
plt.xlabel("time/h", fontsize=16)
plt.ylabel("mm", fontsize=16)
plt.ylim(-100, 100)
plt.legend(loc="best")
plt.xticks(list(plot_times)[::11])
plt.savefig('./result.jpg')
plt.show()
with open('D:\\data\\VRS\\国地信VRS\\ref\\new.txt', 'w') as f2:
for i in range(len(plot_ENU[0])):
f2.writelines(plot_times[i] + ',' + str(plot_ENU[0][i]) + ',' + str(plot_ENU[1][i]) + ',' + str(plot_ENU[2][i])+'\n')
| FLAGLEE/My_Python_Code | two_hour_solution.py | two_hour_solution.py | py | 3,495 | python | en | code | 0 | github-code | 13 |
31048041906 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import os
import pickle
from pprint import pprint
import structlog
import aiohttp
import arrow
import ujson
class Verisure:
log = structlog.get_logger(__name__)
def __init__(self, mfa: bool, username, password, cookieFileName='~/.verisure_mfa_cookie'):
self._mfa = mfa
self._username = username
self._password = password
self._cookieFileName = cookieFileName
self._giid = None
self.tokenExpires = arrow.now("Europe/Stockholm")
self._applicationID = "Python"
self._headers = {
"Content-Type": "application/json",
"Host": "m-api01.verisure.com",
"Cache-Control": "no-cache",
"APPLICATION_ID": self._applicationID
}
self._session = aiohttp.ClientSession()
async def _doSession(self, method, url, headers, data=None, params=None, auth=None):
try:
async with self._session.request(method=method, url=url, headers=headers, data=data, params=params, auth=auth) as response:
try:
return await response.json()
except:
return await response.text()
except aiohttp.ClientConnectorError as e:
self.log.error("Exception in _doSession Failed to connect to host", error=e)
pass
except Exception as e:
self.log.error("Exception in _doSession", error=e)
return None
async def login(self):
self.log.info("trying login")
_urls = ["https://m-api01.verisure.com/auth/login",
"https://m-api02.verisure.com/auth/login"]
self.auth = aiohttp.BasicAuth(self._username, self._password)
if self._mfa:
# with mfa get the trustxxx token from saved file
try:
with open(os.path.expanduser(self._cookieFileName), 'rb') as f:
self._session.cookies = pickle.load(f)
# session cookies set now
except:
self.log.error("No tokenfile found")
for url in _urls:
out = await self._doSession(method="POST", url=url, headers=self._headers, auth=self.auth)
if 'errors' not in out:
await self.getAllInstallations()
else:
try:
out = await self._doSession(method="POST", url=_urls[0], headers=self._headers, auth=self.auth)
if 'errors' not in out:
print("login ")
print(_urls[0])
self.tokenExpires = arrow.now("Europe/Stockholm").shift(seconds=out['accessTokenMaxAgeSeconds'])
await self.getAllInstallations()
except:
try:
out = await self._doSession(method="POST", url=_urls[1], headers=self._headers, auth=self.auth)
if 'errors' not in out:
print("login except ")
print(_urls[1])
self.tokenExpires = arrow.now("Europe/Stockholm").shift(seconds=out['accessTokenMaxAgeSeconds'])
await self.getAllInstallations()
except Exception as e:
self.log.error("Exception in login", out=out, error=e)
async def getMfaToken(self):
self.auth = aiohttp.BasicAuth(self._username, self._password)
# Step 1: call auth/login with username and password and get a stepUpToken in reply valid 1200 seconds i.e. 20 minutes
await self._doSession(method="POST", url="https://m-api01.verisure.com/auth/login", headers=self._headers, auth=self.auth)
# Step 2: call auth/mfa and Verisure vill send you a SMS with a code valid for 300 seconds i.e 5 minutes
await self._doSession(method="POST", url="https://m-api01.verisure.com/auth/mfa", headers=self._headers)
smsToken = input("Enter code sent by SMS: ")
tok = dict()
tok["token"] = smsToken
# Step 3: call auth/mfa/validate with the SMS code and get an accesstoken in reply
await self._doSession(method="POST", url="https://m-api01.verisure.com/auth/mfa/validate", headers=self._headers, data=ujson.dumps(tok))
# session.cookies contains stepUpCookie, vid, vs-access and vs-refresh
# Step 4: call auth/trust and get the trust token
await self._doSession(method="POST", url="https://m-api01.verisure.com/auth/trust", headers=self._headers)
# session.cookies contains stepUpCookie, vid, vs-access, vs-refresh and vs-trustxxx
# Step 5: save only trustxxx session.cookies to file
self._session.cookies["vs-access"] = None
self._session.cookies["vs-stepup"] = None
self._session.cookies["vs-refresh"] = None
self._session.cookies["vid"] = None
with open(os.path.expanduser(self._cookieFileName), 'wb') as f:
pickle.dump(self._session.cookies, f)
async def renewToken(self):
_urls = ['https://m-api01.verisure.com/auth/token',
'https://m-api02.verisure.com/auth/token']
try:
result = await self._doSession(method="POST", url=_urls[0], headers=self._headers)
self.tokenExpires = arrow.now("Europe/Stockholm").shift(seconds=result['accessTokenMaxAgeSeconds'])
except:
try:
result = await self._doSession(method="POST", url=_urls[1], headers=self._headers)
if "accessTokenMaxAgeSeconds" in result:
self.tokenExpires = arrow.now("Europe/Stockholm").shift(seconds=result['accessTokenMaxAgeSeconds'])
else:
self.log.warning("validateToken cant work without a valid tokenExpires", tokenExpires=self.tokenExpires, error=e)
await self.login()
except Exception as e:
self.log.error("Exception in renewToken", result=result, error=e)
await self.login()
async def _validateToken(self):
now = arrow.now("Europe/Stockholm")
if (self.tokenExpires - now).total_seconds() < 30:
self.log.info("renewing token")
await self.renewToken()
async def logout(self):
_urls = ['https://m-api01.verisure.com/auth/logout',
'https://m-api02.verisure.com/auth/logout']
try:
await self._doSession(method="DELETE", url=_urls[0], headers=self._headers)
await self._session.close()
except:
try:
await self._doSession(method="DELETE", url=_urls[1], headers=self._headers)
await self._session.close()
except Exception as e:
self.log.error("Exception in logout", error=e)
async def _doRequest(self, body):
_urls = ['https://m-api01.verisure.com/graphql',
'https://m-api02.verisure.com/graphql']
try:
await self._validateToken()
out = await self._doSession(method="POST", url=_urls[0], headers=self._headers, data=ujson.dumps(list(body)))
if 'errors' in out:
out2 = await self._doSession(method="POST", url=_urls[1], headers=self._headers, data=ujson.dumps(list(body)))
if 'errors' in out2:
return {}
else:
return out2
else:
return out
except Exception as e:
self.log.error("Exception in _doRequest", error=e)
return {}
async def getAllInstallations(self):
_body = [{
"operationName": "fetchAllInstallations",
"variables": {
"email": self._username},
"query": "query fetchAllInstallations($email: String!){\n account(email: $email) {\n installations {\n giid\n alias\n customerType\n dealerId\n subsidiary\n pinCodeLength\n"
"locale\n address {\n street\n city\n postalNumber\n __typename\n }\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
for d in response["data"]["account"]["installations"]:
self._giid = d["giid"]
return response
async def getBatteryProcessStatus(self):
_body = [{
"operationName": "batteryDevices",
"variables": {
"giid": self._giid},
"query": "query batteryDevices($giid: String!) {\n installation(giid: $giid) {\n batteryDevices {\n device {\n area\n deviceLabel\n gui {\n picture\n label\n __typename\n"
"}\n __typename\n }\n batteryCount\n recommendedToChange\n batteryTrend\n estimatedRemainingBatteryLifetime\n batteryType\n batteryHealth\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["batteryDevices"]:
name = d["device"]["area"] + "/" + d["device"]["gui"]["label"]
out[name] = dict()
out[name]["batteryHealth"] = d["batteryHealth"]
out[name]["estimatedRemainingBatteryLifetime"] = d["estimatedRemainingBatteryLifetime"]
out[name]["recommendedToChange"] = d["recommendedToChange"]
return out
async def getClimate(self):
_body = [{
"operationName": "Climate",
"variables": {
"giid": self._giid},
"query": "query Climate($giid: String!) {\n installation(giid: $giid) {\n climates {\n device {\n deviceLabel\n area\n gui {\n label\n support\n __typename\n"
"}\n __typename\n }\n humidityEnabled\n humidityTimestamp\n humidityValue\n temperatureTimestamp\n temperatureValue\n supportsThresholdSettings\n"
"thresholds {\n aboveMaxAlert\n belowMinAlert\n sensorType\n __typename\n }\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["climates"]:
name = d["device"]["area"] + "/" + d["device"]["gui"]["label"]
out[name] = dict()
out[name]["temperature"] = d["temperatureValue"]
out[name]["timestamp"] = arrow.get(d["temperatureTimestamp"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
return out
async def userTracking(self):
_body = [{
"operationName": "userTrackings",
"variables": {
"giid": self._giid},
"query": "query userTrackings($giid: String!) {\n installation(giid: $giid) {\n userTrackings {\n isCallingUser\n webAccount\n status\n xbnContactId\n currentLocationName\n"
"deviceId\n name\n initials\n currentLocationTimestamp\n deviceName\n currentLocationId\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["userTrackings"]:
name = d["name"]
out[name] = dict()
if (d["currentLocationName"] is not None):
out[name]["currentLocationName"] = d["currentLocationName"]
else:
out[name]["currentLocationName"] = "None"
if (d["currentLocationTimestamp"] is not None):
out[name]["timestamp"] = arrow.get(d["currentLocationTimestamp"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
else:
out[name]["timestamp"] = arrow.get('1970-01-01 00:00:00').format("YYYY-MM-DD HH:mm:ss")
return out
async def getAllCardConfig(self):
_body = [{
"operationName": "AllCardConfig",
"variables": {
"giid": self._giid},
"query": "query AllCardConfig($giid: String!) {\n installation(giid: $giid) {\n allCardConfig {\n cardName\n selection\n visible\n items {\n id\n visible\n"
"__typename\n }\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def getVacationMode(self):
_body = [{
"operationName": "VacationMode",
"variables": {
"giid": self._giid},
"query": "query VacationMode($giid: String!) {\n installation(giid: $giid) {\n vacationMode {\n isAllowed\n turnOffPetImmunity\n fromDate\n toDate\n temporaryContactName\n"
"temporaryContactPhone\n active\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
name = response["data"]["installation"]["vacationMode"]["__typename"]
out[name] = dict()
out[name]["active"] = response["data"]["installation"]["vacationMode"]["active"]
if (response["data"]["installation"]["vacationMode"]["fromDate"] == None):
out[name]["fromDate"] = None
else:
arrow.get(response["data"]["installation"]["vacationMode"]["fromDate"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
if (response["data"]["installation"]["vacationMode"]["toDate"] == None):
out[name]["toDate"] = None
else:
out[name]["toDate"] = arrow.get(response["data"]["installation"]["vacationMode"]["toDate"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
out[name]["contactName"] = response["data"]["installation"]["vacationMode"]["temporaryContactName"]
out[name]["contactPhone"] = response["data"]["installation"]["vacationMode"]["temporaryContactPhone"]
return out
async def getCommunication(self):
_body = [{
"operationName": "communicationState",
"variables": {
"giid": self._giid},
"query": "query communicationState($giid: String!) {\n installation(giid: $giid) {\n communicationState {\n hardwareCarrierType\n result\n mediaType\n device {\n deviceLabel\n"
"area\n gui {\n label\n __typename\n }\n __typename\n }\n testDate\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["communicationState"]:
name = d["device"]["area"]
if out.get(name) == None:
out[name] = list()
part = dict()
part["result"] = d["result"]
part["hardwareCarrierType"] = d["hardwareCarrierType"]
part["mediaType"] = d["mediaType"]
part["timestamp"] = arrow.get(d["testDate"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
out[name].append(part)
return out
async def getEventLogCategories(self):
_body = [{
"operationName": "EventLogCategories",
"variables": {
"giid": self._giid},
"query": "query EventLogCategories($giid: String!) {\n installation(giid: $giid) {\n notificationCategoryFilter\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response["data"]["installation"]["notificationCategoryFilter"]
async def getEventLog(self, fromDate, toDate, eventCategories):
# "eventCategories":["INTRUSION","FIRE","SOS","WATER","ANIMAL","TECHNICAL","WARNING","ARM","DISARM","LOCK","UNLOCK","PICTURE","CLIMATE","CAMERA_SETTINGS","DOORWINDOW_STATE_OPENED","DOORWINDOW_STATE_CLOSED"],
_body = [{
"operationName": "EventLog",
"variables": {
"hideNotifications": True,
"offset": 0,
"pagesize": 255,
"eventCategories": eventCategories,
"giid": self._giid,
"eventContactIds": [],
"fromDate":arrow.get(fromDate).format("YYYYMMDD"),
"toDate":arrow.get(toDate).format("YYYYMMDD")},
"query":"query EventLog($giid: String!, $offset: Int!, $pagesize: Int!, $eventCategories: [String], $fromDate: String, $toDate: String, $eventContactIds: [String]) {\n installation(giid: $giid) {\n"
"eventLog(offset: $offset, pagesize: $pagesize, eventCategories: $eventCategories, eventContactIds: $eventContactIds, fromDate: $fromDate, toDate: $toDate) {\n moreDataAvailable\n"
"pagedList {\n device {\n deviceLabel\n area\n gui {\n label\n __typename\n }\n __typename\n }\n"
"arloDevice {\n name\n __typename\n }\n gatewayArea\n eventType\n eventCategory\n eventId\n eventTime\n userName\n"
"armState\n userType\n climateValue\n sensorType\n eventCount\n __typename\n }\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["eventLog"]["pagedList"]:
name = d["eventCategory"]
if out.get(name) == None:
out[name] = list()
part = dict()
part["device"] = d["device"]["area"]
part["timestamp"] = arrow.get(d["eventTime"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
if (name in ["ARM", "DISARM"]):
part["user"] = d["userName"]
part["armState"] = d["armState"]
out[name].append(part)
return out
async def getInstallation(self):
_body = [{
"operationName": "Installation",
"variables": {
"giid": self._giid},
"query": "query Installation($giid: String!) {\n installation(giid: $giid) {\n alias\n pinCodeLength\n customerType\n notificationCategoryFilter\n userNotificationCategories\n"
"doorWindowReportState\n dealerId\n isOperatorMonitorable\n removeInstallationNotAllowed\n installationNumber\n editInstallationAddressNotAllowed\n locale\n"
"editGuardInformationAllowed\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response["data"]["installation"]
async def getUsers(self):
_body = [{
"operationName": "Users",
"variables": {
"giid": self._giid},
"query": "fragment Users on User {\n profile\n accessCodeChangeInProgress\n hasDoorLockTag\n pendingInviteProfile\n relationWithInstallation\n contactId\n accessCodeSetTransactionId\n userIndex\n name\n"
"hasTag\n hasDoorLockPin\n hasDigitalSignatureKey\n email\n mobilePhoneNumber\n callOrder\n tagColor\n phoneNumber\n webAccount\n doorLockUser\n alternativePhoneNumber\n keyHolder\n"
"hasCode\n pendingInviteStatus\n xbnContactId\n userAccessTimeLimitation {\n activeOnMonday\n activeOnTuesday\n activeOnWednesday\n activeOnThursday\n activeOnFriday\n"
"activeOnSaturday\n activeOnSunday\n fromLocalDate\n toLocalDate\n toLocalTimeOfDay\n fromLocalTimeOfDay\n __typename\n }\n __typename\n}\n\nquery Users($giid: String!)"
"{\n users(giid: $giid) {\n ...Users\n notificationTypes\n notificationSettings {\n contactFilter {\n contactName\n filterContactId\n __typename\n }\n"
"notificationCategory\n notificationType\n optionFilter\n __typename\n }\n keyfob {\n device {\n deviceLabel\n area\n __typename\n }\n"
"__typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response["data"]["users"]
async def getVacationModeAndPetSetting(self):
_body = [{
"operationName": "VacationModeAndPetSettings",
"variables": {
"giid": self._giid},
"query": "query VacationModeAndPetSettings($giid: String!) {\n installation(giid: $giid) {\n vacationMode {\n isAllowed\n turnOffPetImmunity\n fromDate\n toDate\n temporaryContactName\n"
"temporaryContactPhone\n active\n __typename\n }\n petSettings {\n devices {\n area\n deviceLabel\n petSettingsActive\n __typename\n }\n"
"__typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["petSettings"]["devices"]:
name = d["area"]
out[name] = dict()
out[name]["petSettingsActive"] = d["petSettingsActive"]
name = response["data"]["installation"]["vacationMode"]["__typename"]
out[name] = dict()
out[name]["active"] = response["data"]["installation"]["vacationMode"]["active"]
if (response["data"]["installation"]["vacationMode"]["fromDate"] == None):
out[name]["toDate"] = None
else:
arrow.get(response["data"]["installation"]["vacationMode"]["fromDate"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
if (response["data"]["installation"]["vacationMode"]["toDate"] == None):
out[name]["toDate"] = None
else:
out[name]["toDate"] = arrow.get(response["data"]["installation"]["vacationMode"]["toDate"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
out[name]["contactName"] = response["data"]["installation"]["vacationMode"]["temporaryContactName"]
out[name]["contactPhone"] = response["data"]["installation"]["vacationMode"]["temporaryContactPhone"]
out[name]["turnOffPetImmunity"] = response["data"]["installation"]["vacationMode"]["turnOffPetImmunity"]
return out
async def getPetType(self):
_body = [{"operationName": "GetPetType",
"variables": {
"giid": self._giid},
"query": "query GetPetType($giid: String!) {\n installation(giid: $giid) {\n pettingSettings {\n petType\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response["data"]["installation"]["pettingSettings"]["petType"]
async def getCentralUnit(self):
_body = [{
"operationName": "centralUnits",
"variables": {
"giid": self._giid},
"query": "query centralUnits($giid: String!) {\n installation(giid: $giid) {\n centralUnits {\n macAddress {\n macAddressEthernet\n __typename\n }\n device {\n deviceLabel\n"
"area\n gui {\n label\n support\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["centralUnits"]:
name = d["device"]["area"]
out[name] = dict()
out[name]["label"] = d["device"]["gui"]["label"]
out[name]["macAddressEthernet"] = d["macAddress"]["macAddressEthernet"]
return out
async def getDevices(self):
_body = [{"operationName": "Devices",
"variables": {
"giid": self._giid},
"query": "fragment DeviceFragment on Device {\n deviceLabel\n area\n capability\n gui {\n support\n picture\n deviceGroup\n sortOrder\n label\n __typename\n }\n monitoring {\n"
"operatorMonitored\n __typename\n }\n __typename\n}\n\nquery Devices($giid: String!) {\n installation(giid: $giid) {\n devices {\n ...DeviceFragment\n canChangeEntryExit\n"
"entryExit\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = list()
for d in response["data"]["installation"]["devices"]:
label = d["gui"]["label"]
namn = d["area"]
out.append(f"{namn}/{label}")
# out[label] = {"namn": d["area"], "label": d["gui"]["label"]}
# out[name][""] = d["currentLocationName"]
# out[name]["timestamp"] = arrow.get(d["currentLocationTimestamp"]).format("YYYY-MM-DD HH:mm")
return out
async def setArmStatusAway(self, code):
_body = [{
"operationName": "armAway",
"variables": {
"giid": self._giid,
"code": code},
"query": "mutation armAway($giid: String!, $code: String!) {\n armStateArmAway(giid: $giid, code: $code)\n}\n"}]
response = await self._doRequest(_body)
return response
async def setArmStatusHome(self, code):
_body = [{
"operationName": "armHome",
"variables": {
"giid": self._giid,
"code": code},
"query": "mutation armHome($giid: String!, $code: String!) {\n armStateArmHome(giid: $giid, code: $code)\n}\n"}]
response = await self._doRequest(_body)
return response
async def getArmState(self):
_body = [{
"operationName": "ArmState",
"variables": {
"giid": self._giid},
"query": "query ArmState($giid: String!) {\n installation(giid: $giid) {\n armState {\n type\n statusType\n date\n name\n changedVia\n __typename\n }\n"
"__typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
name = response["data"]["installation"]["armState"]["__typename"]
out[name] = dict()
out[name]["statusType"] = response["data"]["installation"]["armState"]["statusType"]
out[name]["changedVia"] = response["data"]["installation"]["armState"]["changedVia"]
out[name]["timestamp"] = arrow.get(response["data"]["installation"]["armState"]["date"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
return out
async def getBroadbandStatus(self):
_body = [{
"operationName": "Broadband",
"variables": {
"giid": self._giid},
"query": "query Broadband($giid: String!) {\n installation(giid: $giid) {\n broadband {\n testDate\n isBroadbandConnected\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
name = response["data"]["installation"]["broadband"]["__typename"]
out[name] = dict()
out[name]["connected"] = response["data"]["installation"]["broadband"]["isBroadbandConnected"]
out[name]["timestamp"] = arrow.get(response["data"]["installation"]["broadband"]["testDate"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
return out
async def getCamera(self):
_body = [{"operationName": "Camera",
"variables": {
"giid": self._giid,
"all": True},
"query": "fragment CommonCameraFragment on Camera {\n device {\n deviceLabel\n area\n capability\n gui {\n label\n support\n __typename\n }\n __typename\n "
"}\n type\n latestImageCapture\n motionDetectorMode\n imageCaptureAllowedByArmstate\n accelerometerMode\n supportedBlockSettingValues\n imageCaptureAllowed\n initiallyConfigured\n "
"imageResolution\n hasMotionSupport\n totalUnseenImages\n canTakePicture\n takePictureProblems\n canStream\n streamProblems\n videoRecordSettingAllowed\n microphoneSettingAllowed\n "
"supportsFullDuplexAudio\n fullDuplexAudioProblems\n cvr {\n supported\n recording\n availablePlaylistDays\n __typename\n }\n __typename\n}\n\nquery Camera($giid: String!, $all: Boolean!)"
"{\n installation(giid: $giid) {\n cameras(allCameras: $all) {\n ...CommonCameraFragment\n canChangeEntryExit\n entryExit\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response["data"]["installation"]["cameras"]
async def getCapability(self):
_body = [{
"operationName": "Capability",
"variables": {
"giid": self._giid},
"query": "query Capability($giid: String!) {\n installation(giid: $giid) {\n capability {\n current\n gained {\n capability\n __typename\n }\n __typename\n"
"}\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def chargeSms(self):
_body = [{
"operationName": "ChargeSms",
"variables": {
"giid": self._giid},
"query": "query ChargeSms($giid: String!) {\n installation(giid: $giid) {\n chargeSms {\n chargeSmartPlugOnOff\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def disarmAlarm(self, code):
_body = [{
"operationName": "disarm",
"variables": {
"giid": self._giid,
"code": code},
"query": "mutation disarm($giid: String!, $code: String!) {\n armStateDisarm(giid: $giid, code: $code)\n}\n"}]
response = await self._doRequest(_body)
return response
async def doorLock(self, deviceLabel):
_body = [{
"operationName": "DoorLock",
"variables": {
"giid": self._giid,
"deviceLabel": deviceLabel},
"query": "mutation DoorLock($giid: String!, $deviceLabel: String!, $input: LockDoorInput!) {\n DoorLock(giid: $giid, deviceLabel: $deviceLabel, input: $input)\n}\n"}]
response = await self._doRequest(_body)
return response
async def doorUnlook(self, deviceLabel):
_body = [{
"operationName": "DoorUnlock",
"variables": {
"giid": self._giid,
"deviceLabel": deviceLabel},
"input": code,
"query": "mutation DoorUnlock($giid: String!, $deviceLabel: String!, $input: LockDoorInput!) {\n DoorUnlock(giid: $giid, deviceLabel: $deviceLabel, input: $input)\n}\n"}]
response = await self._doRequest(_body)
return response
async def getDoorWindow(self):
_body = [{
"operationName": "DoorWindow",
"variables": {
"giid": self._giid},
"query": "query DoorWindow($giid: String!) {\n installation(giid: $giid) {\n doorWindows {\n device {\n deviceLabel\n __typename\n }\n type\n area\n state\n"
"wired\n reportTime\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
out = dict()
for d in response["data"]["installation"]["doorWindows"]:
name = d["area"]
out[name] = dict()
out[name]["state"] = d["state"]
out[name]["timestamp"] = arrow.get(d["reportTime"]).to('Europe/Stockholm').format("YYYY-MM-DD HH:mm:ss")
return out
async def guardianSos(self):
_body = [{
"operationName": "GuardianSos",
"variables": {},
"query": "query GuardianSos {\n guardianSos {\n serverTime\n sos {\n fullName\n phone\n deviceId\n deviceName\n giid\n type\n username\n expireDate\n"
"warnBeforeExpireDate\n contactId\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def isGuardianActivated(self):
_body = [{
"operationName": "IsGuardianActivated",
"variables": {
"giid": self._giid,
"featureName": "GUARDIAN"},
"query": "query IsGuardianActivated($giid: String!, $featureName: String!) {\n installation(giid: $giid) {\n activatedFeature {\n isFeatureActivated(featureName: $featureName)\n __typename\n"
"}\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def permissions(self):
_body = [{
"operationName": "Permissions",
"variables": {
"giid": self._giid,
"email": self._username},
"query": "query Permissions($giid: String!, $email: String!) {\n permissions(giid: $giid, email: $email) {\n accountPermissionsHash\n name\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def pollArmState(self, transactionID, futurestate):
_body = [{
"operationName": "pollArmState",
"variables": {
"giid": self._giid,
"transactionId": transactionId,
"futureState": futureState},
"query": "query pollArmState($giid: String!, $transactionId: String, $futureState: ArmStateStatusTypes!) {\n installation(giid: $giid) {\n"
"armStateChangePollResult(transactionId: $transactionId, futureState: $futureState) {\n result\n createTime\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def pollLockState(self, transactionID, deviceLabel, futureState):
_body = [{
"operationName": "pollLockState",
"variables": {
"giid": self._giid,
"transactionId": transactionId,
"deviceLabel": deviceLabel,
"futureState": futureState},
"query": "query pollLockState($giid: String!, $transactionId: String, $deviceLabel: String!, $futureState: DoorLockState!) {\n installation(giid: $giid) {\n"
"doorLockStateChangePollResult(transactionId: $transactionId, deviceLabel: $deviceLabel, futureState: $futureState) {\n result\n createTime\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def remainingSms(self):
_body = [{
"operationName": "RemainingSms",
"variables": {
"giid": self._giid},
"query": "query RemainingSms($giid: String!) {\n installation(giid: $giid) {\n remainingSms\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def smartButton(self):
_body = [{
"operationName": "SmartButton",
"variables": {
"giid": self._giid},
"query": "query SmartButton($giid: String!) {\n installation(giid: $giid) {\n smartButton {\n entries {\n smartButtonId\n icon\n label\n color\n active\n"
"action {\n actionType\n expectedState\n target {\n ... on Installation {\n alias\n __typename\n }\n"
"... on Device {\n deviceLabel\n area\n gui {\n label\n __typename\n }\n featureStatuses(type: \"SmartPlug\")"
"{\n device {\n deviceLabel\n __typename\n }\n ... on SmartPlug {\n icon\n isHazardous\n"
"__typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n __typename\n }\n"
"__typename\n }\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def smartLock(self):
_body = [{
"operationName": "SmartLock",
"variables": {
"giid": self._giid},
"query": "query SmartLock($giid: String!) {\n installation(giid: $giid) {\n smartLocks {\n lockStatus\n doorState\n lockMethod\n eventTime\n doorLockType\n secureMode\n"
"device {\n deviceLabel\n area\n __typename\n }\n user {\n name\n __typename\n }\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def setSmartPlug(self, deviceLabel, state):
_body = [{
"operationName": "UpdateState",
"variables": {
"giid": self._giid,
"deviceLabel": deviceLabel,
"state": state},
"query": "mutation UpdateState($giid: String!, $deviceLabel: String!, $state: Boolean!) {\n SmartPlugSetState(giid: $giid, input: [{deviceLabel: $deviceLabel, state: $state}])}"}]
response = await self._doRequest(_body)
return response
async def getSmartplugState(self, devicelabel):
_body = [{
"operationName": "SmartPlug",
"variables": {
"giid": self._giid,
"deviceLabel": deviceLabel},
"query": "query SmartPlug($giid: String!, $deviceLabel: String!) {\n installation(giid: $giid) {\n smartplugs(filter: {deviceLabels: [$deviceLabel]}) {\n device {\n deviceLabel\n area\n"
"__typename\n }\n currentState\n icon\n isHazardous\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(_body)
return response
async def read_smartplug_state(self):
__body = [{
"operationName": "SmartPlug",
"variables": {
"giid": self._giid},
"query": "query SmartPlug($giid: String!) {\n installation(giid: $giid) {\n smartplugs {\n device {\n deviceLabel\n area\n __typename\n }\n currentState\n icon\n"
"isHazardous\n __typename\n }\n __typename\n }\n}\n"}]
response = await self._doRequest(__body)
out = dict()
for d in response["data"]["installation"]["smartplugs"]:
name = d["device"]["area"]
out[name] = d["currentState"]
return out
| Soleg06/Verisure_API | verisureGrafqlAPI_async.py | verisureGrafqlAPI_async.py | py | 39,369 | python | en | code | 1 | github-code | 13 |
3721332770 | '''
Given the root of a binary tree, imagine yourself standing on the right side of it,
return the values of the nodes you can see ordered from top to bottom.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
self.result = {}
self.dfs(root, 0)
return [self.result[i] for i in range(len(self.result))]
def dfs(self, root: TreeNode, level: int):
if not root:
return
self.dfs(root.right, level + 1)
if level not in self.result:
self.result[level] = root.val
self.dfs(root.left, level + 1)
| JaeEon-Ryu/Coding_test | LeetCode/0199_ Binary Tree Right Side View.py | 0199_ Binary Tree Right Side View.py | py | 794 | python | en | code | 1 | github-code | 13 |
46200892704 | #!/usr/bin/env python3
"""LAN monitor stock notifications handler
"""
__version__ = "3.1"
#==========================================================
#
# Chris Nelson, Copyright 2021-2023
#
# 3.1 230320 - Debug mode status dump
# 3.0 230301 - Packaged
# V2.0 221130 Dropped --once, added --service. Added on-demand summary.
# V1.4 221120 Summaries optional if SummaryDays is not defined.
# V1.3 220420 Incorporated funcs3 timevalue and retime
# V1.2 220217 Allow logging of repeat warnings when the log level is INFO or DEBUG. Catch snd_notif/snd_email fails.
# V1.1c 220101 Bug fix - clear prior events on config file reload (re-init of notification handlers)
# V1.1b 210604 Logging fix for logging fails in service mode and LoggingLevel 20
# V1.1a 210529 Notification and logging fix along with funcs3 V0.7a
# V1.1 210523 Added LogSummary switch
# V1.0 210507 New
#
# Changes pending
#
#==========================================================
import datetime
import __main__
from cjnfuncs.cjnfuncs import getcfg, snd_email, snd_notif, logging, timevalue
import lanmonitor.globvars as globvars
from lanmonitor.lanmonfuncs import next_summary_timestring, RTN_PASS, RTN_WARNING, RTN_FAIL, RTN_CRITICAL
from lanmonitor.lanmonitor import inst_dict
# Configs / Constants
HANDLER_NAME = "stock_notif"
NOTIF_SUBJ = "LAN Monitor"
class notif_class:
events = {}
next_summary = None
next_renotif = None
def __init__(self):
logging.debug (f"Notif handler <{__name__}> initialized")
self.next_summary = next_summary_timestring()
if not globvars.args.service:
self.next_summary = datetime.datetime.now() # force summary in interactive debug level logging
self.next_renotif = datetime.datetime.now().replace(microsecond=0) # forcing int seconds keeps logging value prettier
self.events.clear()
def are_criticals (self):
""" Returns True if there are any critical events in the events dictionary.
"""
for event in self.events:
if self.events[event]["criticality"] == RTN_CRITICAL:
return True
return False
def log_event (self, dict):
""" Handle any logging for each event status type
Passed in dict keys:
notif_key - Corresponds to the monitortype_key in the config file
rslt
RTN_PASS - Clears any prior logged WARNING / FAIL / CRITICAL events
RTN_WARNING - Logged and included in summary, but no notification.
RTN_FAIL - Logged & notified
RTN_CRITICAL - Logged & notified, with periodic renotification
message - Message text from the monitor plugin
All notifications are disabled if config NotifList is not defined.
"""
if dict["rslt"] == RTN_PASS:
logging.info(dict["message"])
if dict["notif_key"] in self.events:
del self.events[dict["notif_key"]]
logging.warning(f" Event {dict['notif_key']} now passing. Removed from events log.")
return
if dict["rslt"] == RTN_WARNING: # Normally log a warning only once so as to not flood the log
if dict["notif_key"] not in self.events or logging.getLogger().level < logging.WARNING:
logging.warning(dict["message"])
else: # RTN_FAIL and RTN_CRITICAL cases
if dict["rslt"] == RTN_CRITICAL:
# if there are no prior active criticals, then set renotif time to now + renotif value
if self.next_renotif < datetime.datetime.now() and not self.are_criticals():
self.next_renotif += datetime.timedelta(seconds=timevalue(getcfg("CriticalReNotificationInterval")).seconds)
if globvars.args.service:
logging.debug(f"Next critical renotification: {self.next_renotif}")
if globvars.args.service:
if dict["notif_key"] not in self.events:
if getcfg("NotifList", False):
try:
snd_notif (subj=NOTIF_SUBJ, msg=dict["message"], log=True)
except Exception as e:
logging.warning(f"snd_notif failed. Email server down?:\n {dict['message']}\n {e}")
else:
logging.warning(dict["message"])
else: # non-service mode
logging.warning(dict["message"])
self.events[dict["notif_key"]] = {"message": dict["message"], "criticality": dict["rslt"]}
def each_loop(self):
""" Status dump enabled either by:
Signal SIGUSR2
Debug level logging (args verbose == 2) and non-service mode
"""
logging.debug (f"Entering: {HANDLER_NAME}.each_loop()")
if not globvars.sig_status and not (not globvars.args.service and globvars.args.verbose == 2):
return
globvars.sig_status = False
status_log = f" {'Monitor item'.ljust(globvars.keylen)} Prior run time Next run time Last check status\n"
for key in inst_dict:
if key in self.events:
status = self.events[key]['message']
else:
status = " OK"
status_log += f" {key.ljust(globvars.keylen)} {inst_dict[key].prior_run} {inst_dict[key].next_run} {status}\n"
# NOTE - prior_run vars are not defined until after first run. each_loop() isn't called until after check items have been run, so _shouldn't_ crash.
logging.warning(f"On-demand status dump:\n{status_log}")
def renotif(self):
""" Periodically send a consolidated notification with all current critical events.
if renotif time passed then
if there are active criticals then
send consolidated renotif message
else
set renotif time = now, which allows next critical to be notified immediately
All notifications are disabled if config NotifList is not defined.
"""
logging.debug (f"Entering: {HANDLER_NAME}.renotif()")
if not getcfg("NotifList", False):
return
if (self.next_renotif < datetime.datetime.now()):
if self.are_criticals():
criticals = ""
for event in self.events:
if self.events[event]["criticality"] == RTN_CRITICAL:
criticals += f"\n {self.events[event]['message']}"
try:
snd_notif (subj=NOTIF_SUBJ, msg=criticals, log=True)
except Exception as e:
logging.warning(f"snd_notif failed. Email server down?:\n {criticals}\n {e}")
self.next_renotif += datetime.timedelta(seconds=timevalue(getcfg("CriticalReNotificationInterval")).seconds)
logging.debug(f"Next critical renotification: {self.next_renotif}")
else:
self.next_renotif = datetime.datetime.now().replace(microsecond=0)
def summary(self):
""" Periodically produce a summary and email it and print it in the log file.
Config file params
SummaryDays, SummaryTime - processed by lanmonfuncs.next_summary_timestring().
Comment out SummaryDays to disable periodic summaries.
EmailTo - Whitespace separated list of email addresses.
Comment out EmailTo to disable emailing of summaries.
LogSummary - Cause the summary to be printed to the log file.
Summary debug feature: The summary will be printed when running in non-service
mode and debug level logging.
On-demand summary feature: In service mode, a summary may be forced by placing a
file named "summary" in the program directory. The file will be deleted and the
summary will be printed to the log file.
"""
logging.debug (f"Entering: {HANDLER_NAME}.summary()")
if globvars.sig_summary:
globvars.sig_summary = False
sum = ""
if len(self.events) == 0:
sum += " No current events. All is well."
else:
for event in self.events:
sum += f"{self.events[event]['message']}\n"
logging.warning(f"On-demand summary:\n{sum}")
if self.next_summary: # Will be None if SummaryDays is not defined.
if (self.next_summary < datetime.datetime.now()) or not globvars.args.service:
sum = ""
if len(self.events) == 0:
sum += " No current events. All is well."
else:
for event in self.events:
sum += f"{self.events[event]['message']}\n"
if not globvars.args.service:
logging.debug(f"lanmonitor status summary:\n{sum}")
return
if getcfg("EmailTo", False):
try:
snd_email(subj="lanmonitor status summary", body=sum, to=getcfg("EmailTo"), log=True)
except Exception as e:
logging.warning(f"snd_summary failed. Email server down?:\n {e}")
if getcfg("LogSummary", False):
logging.warning(f"Summary:\n{sum}")
self.next_summary = next_summary_timestring() | cjnaz/lanmonitor | src/lanmonitor/stock_notif.py | stock_notif.py | py | 9,608 | python | en | code | 2 | github-code | 13 |
71031628817 | from flask import Flask, render_template, url_for, request
app = Flask(__name__)
import pyshorteners
import pyperclip
def shortenit(longurl):
s = pyshorteners.Shortener()
url = longurl;
shorturl= s.tinyurl.short(url)
return shorturl
def convert(longurl):
if ' ' in longurl:
return "Remove spaces from URL 🥲"
if len(longurl)==0:
return "URL Box is blank 😒"
x = shortenit(longurl)
pyperclip.copy(x)
spam = pyperclip.paste()
return x
@app.route('/')
# @app.route('/home')
def home():
return render_template("index.html")
@app.route('/result',methods=['POST', 'GET'])
def result():
longurl = request.form.to_dict()
name = longurl["name"]
shorturl = convert(name)
return render_template('index.html', name = shorturl)
if __name__ == "__main__":
app.run(debug=True) | priyanshuv-raw/CodeClauseInternship_URLShortner | run.py | run.py | py | 863 | python | en | code | 0 | github-code | 13 |
38320405981 | import datetime
from django.db.models import Q
from django.http import Http404
from rest_framework import status
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.settings import api_settings
from rest_framework.exceptions import ValidationError
from .permissions import IsAdmin, IsAgent, IsAdminOrAgent
from .models import User
from .serializers import UserSerializer, LoginSerializer, ListUserSerializer, CreateAdminSerializer, \
ApproveAgentSerializer
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
class UserView(APIView):
permission_classes = (AllowAny,)
def post(self, request):
user_serializer = UserSerializer(data=request.data)
if not user_serializer.is_valid():
response = {
'success': False,
'message': 'This account already exists',
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
user_serializer.save()
user = User.objects.get(email=request.data['email'])
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
response = {
'success': True,
'message': 'User registered successfully',
'token': token
}
return Response(response, status=status.HTTP_201_CREATED)
class CreateAdminView(APIView):
permission_classes = (IsAuthenticated, IsAdmin,)
authentication_classes = (JSONWebTokenAuthentication,)
def post(self, request):
user_serializer = CreateAdminSerializer(data=request.data)
if not user_serializer.is_valid():
response = {
'success': False,
'message': 'This account already exists',
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
response = {
'success': True,
'message': 'Admin registered successfully',
}
return Response(response, status=status.HTTP_201_CREATED)
class LoginView(APIView):
permission_classes = (AllowAny,)
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
try:
serializer.is_valid(raise_exception=True)
response = {
'success': True,
'message': 'User logged in successfully',
'token': serializer.data['token'],
'is_customer': serializer.data['is_customer'],
'is_agent': serializer.data['is_agent'],
'is_admin': serializer.data['is_admin']
}
return Response(response, status=status.HTTP_200_OK)
except ValidationError as e:
response = {
'success': False,
'message': f'Internal Server Error'
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class ProfileView(APIView):
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
def get(self, request):
try:
user = User.objects.get(email=request.user)
response = {
'success': True,
'message': 'Profile fetched',
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'is_customer': user.is_customer,
'is_agent': user.is_agent,
'is_admin': user.is_admin,
'last_login': user.last_login,
'date_joined': user.date_joined
}
return Response(response, status=status.HTTP_200_OK)
except Exception as e:
response = {
'success': False,
'message': 'User does not exists',
'error': str(e)
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class ListAgentUserView(generics.ListAPIView):
permission_classes = (IsAuthenticated, IsAdminOrAgent,)
authentication_classes = (JSONWebTokenAuthentication,)
serializer_class = ListUserSerializer
queryset = User.objects.filter(is_customer=True)
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
class ListAdminUserView(generics.ListAPIView):
permission_classes = (IsAuthenticated, IsAdmin,)
authentication_classes = (JSONWebTokenAuthentication,)
serializer_class = ListUserSerializer
queryset = User.objects.filter(Q(is_customer=True) | Q(is_agent=True))
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
class ListApprovalsView(generics.ListAPIView):
permission_classes = (IsAuthenticated, IsAdmin,)
authentication_classes = (JSONWebTokenAuthentication,)
serializer_class = ListUserSerializer
queryset = User.objects.filter(is_agent=True, is_approved=False)
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
class ApproveDeleteAgentView(APIView):
permission_classes = (IsAuthenticated, IsAdmin,)
authentication_classes = (JSONWebTokenAuthentication,)
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def put(self, request, pk):
instance = self.get_object(pk)
serializer = ApproveAgentSerializer(instance, data=request.data)
if serializer.is_valid():
serializer.save()
response = {
"success": True,
"message": f"Agent id {pk} has been approved"
}
return Response(response, status=status.HTTP_200_OK)
response = {
"success": False,
"message": "Could not approve agent"
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
instance = self.get_object(pk)
try:
instance.delete()
response = {
"success": True,
"message": f"Agent id {pk} has been deleted"
}
return Response(response, status=status.HTTP_200_OK)
except Exception as e:
response = {
"success": False,
"message": "Could not delete agent"
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
| tanmaypardeshi/Loan-Management-System | backend/user/views.py | views.py | py | 7,044 | python | en | code | 6 | github-code | 13 |
31126770234 | import pandas as pd
path = "/Users/davidaxelrod/Documents/solarcities/census.csv"
# Make a row iterator (this will go row by row)
from app import models
head = True
escapes = ''.join([chr(char) for char in range(1, 32)])
with open(path, 'rb') as f:
for line in f:
if not head:
data=str(line).split(",")
print("{} {} {}".format(data[8], data[9], data[17]))
models.City.objects.create(name=data[8], state=data[9], population=data[17])
else:
head=False
| daxaxelrod/solarcities | app/census_creation.py | census_creation.py | py | 507 | python | en | code | 0 | github-code | 13 |
13377921481 | import pandas as pd
import matplotlib.pyplot as plt
csv_file='sherry.csv'
data = pd.read_csv(csv_file)
likes = list(data["likes"])[:50][::-1]
timestamp = list(data["time"])[:50][::-1]
x= timestamp
y= likes
plt.scatter(x,y)
plt.xlabel('timestamp->')
plt.ylabel('likes->')
plt.title('Insta')
plt.show()
| tlylt/Social-Media-Dashboard | read_insta.py | read_insta.py | py | 301 | python | en | code | 1 | github-code | 13 |
12349416124 | import pygame, sys, os
from pygame.locals import *
from mainWindow import Window
# Attempting to recreate a Snake clone
BLACK = (0, 0, 0)
window_width = 500
window_height = 500
pygame.init()
fps = 30
fpsClock = pygame.time.Clock()
main_surface = pygame.display.set_mode((window_width, window_height))
# Creating block
path_2_block_img = r"Assets_snake\wall.png"
block = pygame.image.load(path_2_block_img)
block_surface = block.get_rect()
# Creating Snake
# Creating berry
# game loop
while True:
main_surface.fill(BLACK)
main_surface.blit(block, block_surface)
# events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
fpsClock.tick(fps)
| AlbertoEngineersEverything/pygaming | Chapter_16/Snake.py | Snake.py | py | 764 | python | en | code | 0 | github-code | 13 |
18889537906 | from django.urls import path
from . import views
urlpatterns = [
path('categories', views.product_categories, name='product_categories'),
path('categories/products', views.all_products, name='products'),
path('<int:product_id>/', views.design_product, name='design_product'),
path('add/', views.add_product, name='add_product'),
path('edit/<int:product_id>/', views.edit_product, name='edit_product'),
path('delete/<int:product_id>/', views.delete_product, name='delete_product'),
] | natalijabujevic0708/DesignYourCrafts | products/urls.py | urls.py | py | 507 | python | en | code | 0 | github-code | 13 |
31112195902 | #Abrir uma sequência de imagens coloridas, transformar para tom de cinza cada imagem e obtenha os momentos centrais de todas estas imagens. Imprima os resultados de cada imagem em um arquivo e na tela do prompt de comandos. Cada linha do arquivo gerado deve representar os atributos obtidos em uma imagem.
import cv2
import csv
import os
import glob
def extrair_momentos_centrais(imagens):
print('[INFO] Extracting central moments.')
momentos_centrais = []
for i, imagem in enumerate(imagens):
print('[INFO] Extracting features of image {}/{}'.format(i + 1, len(imagens)))
imcolor = cv2.imread(imagem)
imcolor = cv2.cvtColor(imcolor, cv2.COLOR_BGR2GRAY)
momentos = cv2.moments(imcolor)
momentos_centrais.append([momentos['mu20'], momentos['mu11'], momentos['mu02'], momentos['mu30'], momentos['mu21'], momentos['mu12'], momentos['mu03']])
print('\n')
return momentos_centrais
def salvar_resultado(extractor_name, features):
for vector in features:
print(vector)
with open(extractor_name + '.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerows(features)
if __name__ == '__main__':
pasta = 'seq_img/'
caminho = glob.glob(os.path.join(pasta, '*.jpg'))
features = extrair_momentos_centrais(caminho)
salvar_resultado('momentos_centrais', features)
| VivianeSouza923/ComputerVisionPy_Lapisco | 47/questão47.py | questão47.py | py | 1,389 | python | pt | code | 0 | github-code | 13 |
19219089807 | n = int(input())
answer = 0
saving = {}
values = []
num = 9
for _ in range(n):
word = input()
for s in range(len(word)):
if word[s] in saving:
saving[word[s]] += 10 ** (len(word) - 1 - s)
else:
saving[word[s]] = 10 ** (len(word) - 1 - s)
# print(saving)
for i in saving.values():
values.append(i)
values.sort(reverse = True)
#print(values)
for j in values:
answer += num * j
num -= 1
print(answer) | Choi-Jiwon-38/WINK-algorithm-study | week 7/단어 수학.py | 단어 수학.py | py | 491 | python | en | code | 0 | github-code | 13 |
33627649681 |
from django.urls import path
from . import views
app_name = "staking"
urlpatterns = [
path("", views.IndexView, name="index"),
path("stake/", views.StakeView, name="stake"),
path("stake/metamask/", views.StakeWithMView, name="stake_metamask"),
path("stake/metamask/pay/", views.StakeWithM2View, name="stake_metamask2"),
path("my-stakes/", views.MyStakesView, name="my_stakes"),
path("make-payment/<int:staking_id>/", views.MakePaymentView, name="make_payment"),
path("confirm-payment/<int:staking_id>/", views.ConfirmPaymentView, name="confirm_payment"),
path("request-payment/<int:staking_id>/", views.RequestPaymentView, name="request_payment"),
]
| lurdray/aibra.io-version2- | stake/urls.py | urls.py | py | 668 | python | en | code | 0 | github-code | 13 |
35205361689 | from util import aoc
def look_and_say(model):
last = model[0]
n = 1
result = []
for c in model[1:]:
if last == c:
n += 1
else:
result += str(n), last
last = c
n = 1
result += str(n), last
return "".join(result)
def part_one(model):
for _ in range(40):
model = look_and_say(model)
return len(model)
def part_two(model):
for _ in range(50):
model = look_and_say(model)
return len(model)
if __name__ == "__main__":
aoc.solve(
__file__,
None,
part_one,
part_two,
)
| barneyb/aoc-2023 | python/aoc2015/day10/elves_look_elves_say.py | elves_look_elves_say.py | py | 629 | python | en | code | 0 | github-code | 13 |
1378175911 | # Import the required libraries
from tkinter import *
from tkinter import messagebox
# Create an instance of tkinter frame or window
win=Tk()
# Set the size of the tkinter window
win.geometry("700x350")
def cal_sum():
t1=int(a.get())
t2=int(b.get())
sum=t1+t2
label.config(text=sum)
# messagebox.showinfo(f("Addition","{t1}+{t2}={sum}"))
def cal_sub():
t1=int(a.get())
t2=int(b.get())
sum=t1-t2
label.config(text=sum)
# messagebox.showinfo("Subtriction",f("{t1}-{t2}={sum}"))
def cal_sub():
t1=int(a.get())
t2=int(b.get())
sum=t1*t2
label.config(text=sum)
# messagebox.showinfo("multiplication",f("{t1}*{t2}={sum}"))
def cal_div():
t1=int(a.get())
t2=int(b.get())
sum=t1/t2
label.config(text=sum)
# messagebox.showinfo("Division",f("{t1}/{t2}={sum}"))
# Create an Entry widget
t1=Label(win, text="Enter First Number", font=('Calibri 20'))
t1.pack()
a=Entry(win, font='Calibri 15', width=35)
a.pack()
t2=Label(win, text="Enter Second Number", font=('Calibri 20'))
t2.pack()
b=Entry(win, font='Calibri 15', width=35)
b.pack()
label=Label(win, text="Total Sum : ", font=('Calibri 20'))
label.pack(pady=20)
# Button(win, text="Calculate Sum", command=cal_sum).pack()
# Button(win, text="Calculate Sub", command=sub_sum).pack()
# Button(win, text="Calculate mul", command=mul_sum).pack()
# Button(win, text="Calculate div", command=div_sum).pack()
Button(win,text="Calculate Sum", font='Calibri 15', command=cal_sum).place(x=200,y=200)
Button(win,text="Calculate Sub", font='Calibri 15', command=sub_sum).place(x=350,y=200)
Button(win,text="Calculate mul", font='Calibri 15', command=mul_sum).place(x=200,y=270)
Button(win,text="Calculate div", font='Calibri 15', command=div_sum).place(x=350,y=270)
win.mainloop()
| Parth9780/Backend_12-SEP | Python 12_Sep/Practis/Tkinter/TTk.py | TTk.py | py | 1,784 | python | en | code | 0 | github-code | 13 |
33626996203 | from importlib import metadata
# NOTE: importing to have the types registered
import h5pyckle.interop_builtins
import h5pyckle.interop_numpy # noqa: F401
from h5pyckle.base import (
PickleGroup,
dump,
dump_sequence_to_group,
dump_to_attribute,
dump_to_group,
dumper,
load,
load_by_pattern,
load_from_attribute,
load_from_group,
load_from_type,
load_group_as_dict,
loader,
)
from h5pyckle.decorator import h5pyckable
__version__ = metadata.version("h5pyckle")
__all__ = (
"PickleGroup",
"dump",
"dump_sequence_to_group",
"dump_to_attribute",
"dump_to_group",
"dumper",
"h5pyckable",
"load",
"load_by_pattern",
"load_from_attribute",
"load_from_group",
"load_from_type",
"load_group_as_dict",
"loader",
)
| alexfikl/h5pyckle | h5pyckle/__init__.py | __init__.py | py | 815 | python | en | code | 0 | github-code | 13 |
11192471439 | from PIL import Image, ImageFilter, ImageOps
img = Image.open('./cat.jpg')
# cropping the body area of the cat
img = img.crop((200, 125, 420, 310))
# Blurring the image
img = img.filter(ImageFilter.GaussianBlur(6))
# then mirroring the image
img = ImageOps.mirror(img)
# and converting the image into black and white in the end
img = img.convert(mode='L')
img.show() | AshuAhlawat/Python | Modules/Pillow/multiple.py | multiple.py | py | 370 | python | en | code | 1 | github-code | 13 |
20280063552 | import os
import pandas as pd
import numpy as np
import xlrd
from django.http import HttpResponse
from django.shortcuts import render
from openpyxl import load_workbook
from elucidata import models
# Create your views here.
def ques1(request):
context_dict = {}
if request.method == 'POST':
try:
my_file = models.File()
if "file" in request.FILES:
my_file.media = request.FILES["file"]
name = request.FILES["file"].name
excel_file = my_file.media
df = pd.read_excel(excel_file)
df["Accepted Compound ID"] = df["Accepted Compound ID"].astype(str)
end_with_lpc = df[df["Accepted Compound ID"].map(lambda x: x.endswith('LPC'))]
df1 = df[df["Accepted Compound ID"].map(lambda x: x.endswith('PC'))]
end_with_pc = df1[~df1["Accepted Compound ID"].map(lambda x: x.endswith('LPC'))]
end_with_plasmalogen = df[df["Accepted Compound ID"].map(lambda x: x.endswith('plasmalogen'))]
print("Accepted Compound ID : Ends with PC")
print(end_with_pc)
print("Accepted Compound ID : Ends with LPC")
print(end_with_lpc)
print("Accepted Compound ID : Ends with PLASMALOGEN")
print(end_with_plasmalogen)
writer = pd.ExcelWriter(excel_file, engine = 'xlsxwriter')
df.to_excel(writer, sheet_name='Raw Data')
end_with_pc.to_excel(writer, sheet_name = 'PC')
end_with_lpc.to_excel(writer, sheet_name= 'LPC')
end_with_plasmalogen.to_excel(writer, sheet_name= 'Plasmalogen')
writer.save()
writer.close()
my_file.save()
fname = my_file.media.name
path = 'media/'+fname
print(path)
if os.path.exists(path):
with open(path, "r") as excel:
data = excel.read()
response = HttpResponse(data,content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=ques1.xlsx'
if os.path.isfile(path):
os.remove(path)
return response
except Exception as e:
print(e)
return render(
request, "ques1.html", context_dict
)
def ques2(request):
context_dict = {}
if request.method == 'POST':
try:
my_file = models.File()
if "file" in request.FILES:
my_file.media = request.FILES["file"]
excel_file = my_file.media
df = pd.read_excel(excel_file)
df["Retention Time Roundoff (in mins)"] = df['Retention time (min)'].apply(np.round)
df.to_excel("./media/files/ques2.xlsx", index=False);
path = 'media/files/ques2.xlsx'
if os.path.exists(path):
with open(path, "r") as excel:
data = excel.read()
response = HttpResponse(data,content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=ques2.xlsx'
if os.path.isfile(path):
os.remove(path)
return response
except Exception as e:
print(e)
return render(
request, "ques2.html", context_dict
)
def ques3(request):
context_dict = {}
if request.method == 'POST':
try:
my_file = models.File()
if "file" in request.FILES:
my_file.media = request.FILES["file"]
excel_file = my_file.media
df = pd.read_excel(excel_file)
df1 = df.groupby('Retention Time Roundoff (in mins)').mean().count()
print(df1)
except Exception as e:
print (e)
return render(
request, "ques3.html", context_dict
)
def ques4(request):
context_dict = {}
if request.method == 'POST':
try:
my_file = models.File()
if "file" in request.FILES:
my_file.media = request.FILES["file"]
csv_file = my_file.media
df = pd.read_csv(csv_file)
print(pd.concat([df.ix[:,i:i+3].mean(axis=1) for i in range(2,len(df.columns),3)], axis=1))
except Exception as e:
print(e)
return render(
request, "ques4.html", context_dict
)
| saket9000/Elucidata | elucidata/views.py | views.py | py | 3,692 | python | en | code | 0 | github-code | 13 |
33586549169 |
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Bidirectional
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.layers import GlobalAveragePooling1D
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.layers import Reshape
from models.model import NNBaseModel
class SimpleDense(NNBaseModel):
def train(self):
self.model = Sequential()
self.model.add(Embedding(self.vocab_size, 16))
self.model.add(GlobalAveragePooling1D())
self.model.add(Dense(16, activation=tf.nn.relu))
self.model.add(Dense(self.output_size, activation=tf.nn.sigmoid))
print(self.model.summary())
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['acc'])
history = self.model.fit(
self.X_train,
self.y_train,
epochs=100,
batch_size=64,
verbose=1
)
class BiLSTM(NNBaseModel):
def train(self):
batch_size = 64
units = 100
embedding_matrix = np.zeros((self.vocab_size, 100))
for word, index in self.tk.word_index.items():
embedding_vector = self.word2vec.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
self.model = Sequential()
self.model.add(
Embedding(self.vocab_size, units, weights=[embedding_matrix], trainable=False)
)
self.model.add(Bidirectional(LSTM(units, return_sequences=True, dropout=0.2)))
self.model.add(Bidirectional(LSTM(units, dropout=0.2)))
self.model.add(Dense(self.output_size, activation='sigmoid'))
print(self.model.summary())
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['acc'])
history = self.model.fit(
self.X_train,
self.y_train,
epochs=100,
batch_size=batch_size,
verbose=1
) | vahedq/rumors | models/dl.py | dl.py | py | 1,994 | python | en | code | 6 | github-code | 13 |
42148509924 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
try:
long_description = open("README.md").read()
except IOError:
long_description = ""
setup(
name="openwhisk_docker_action",
version="0.1.7",
description="A class to make writing openwhisk docker actions easier to write in python",
license="MIT",
author="Joshua B. Smith",
author_email='kognate@gmail.com',
url='https://github.com/kognate/openwhisk_docker_action',
packages=find_packages(),
install_requires=[ 'flask' ],
long_description=long_description,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
]
)
| kognate/openwhisk_docker_action | setup.py | setup.py | py | 692 | python | en | code | 0 | github-code | 13 |
37197156044 | import pymongo, time
import sys
# import scrapyd_api
# from scrapyd_api import ScrapydAPI
# import scrapyd_api
# start = time.time()
# client = pymongo.MongoClient("mongodb://zhubo:zb52971552@101.132.117.61:27017/admin") # Alice
# client = pymongo.MongoClient("mongodb://zhubo:zb52971552@203.195.224.50:27017/admin") # Arthur
client = pymongo.MongoClient("mongodb://hadoop:hadoop@192.168.11.33:27017/admin") # hadoop3
# client = pymongo.MongoClient(host='129.28.67.74', port=27017)
# client = pymongo.MongoClient(host='198.181.32.215', port=27017)
# client = pymongo.MongoClient(host='101.132.117.61', port=27017)
# client.admin.authenticate('zhubo', '529715') # 这一步是用户认证,用户跟着数据库走,所以必须在对应的数据库下认证
# a = client.list_database_names() # 列举所有的数据库出来
# db = client['tutorial'] # 选择一个数据库
db = client['hive1'] # 选择一个数据库
# db.create_collection('students') # 创建一个集合
# b = db.list_collection_names() # 列举改数据库所有的集合
# account = db.get_collection(db.list_collection_names()[0]) # 选择一个集合
# account = db['51job1'] # 选择一个集合
collection = db['big_t']
# i1 = account.find_one() # 找出集合中的某一项
# i = account.find(projection={'_id': False}).limit(10)
# end = time.time()
# print(a)
# print(account)
# print(i.next())
# for j in i:
# print(j)
#
# filename = sys.argv[1]
#
# with open(filename, 'r', encoding='utf-8') as f:
# for line in f.readlines():
# info_d = {}
# info_d.setdefault('info', line.strip())
#
# collection.insert(info_d)
# f.close()
# search = {'$or': [
# {'j_cate_s': {'$in': ['python', 'java']}},
# {'j_responsibilities': {'$regex': '.*职责.*'}}
# ]}
search = {
'info': {'$regex': '^(?=.*大数据)(?=.*美女)(?=.*python).*'},
}
res = collection.find(search)
count = 0
sum = 0
for i in res:
print(i)
count += 1
if count > 100:
break
# for i in res:
# count += 1
# sum += float(i['salary_range'])
#
#
# print(sum/count)
# sum = 0
# f = open('amazon.txt', 'w', encoding='UTF-8')
# for item in db['project'].find(projection={'_id': False}):
# sum += 1
# for key in item:
# if item[key]:
# f.write(item[key].strip() + '\t')
# else:
# f.write('数据缺失' + '\t')
# f.write('\n')
# f.close()
# print(sum)
# print(end - start)
# db.drop_collection('QuoteItem')
client.close()
# db.drop_collection('students') # 删除一个集合
# client = pymongo.MongoClient(host='127.0.0.1', port=27017)
| Mew97/atoz | mongo_db.py | mongo_db.py | py | 2,715 | python | en | code | 0 | github-code | 13 |
40256619374 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 1 14:03:09 2022
@author: jonwinkelman
"""
import pysam
import os
import pd
filepath = '/Users/jonwinkelman/Dropbox/Trestle_projects/Eustaquio_lab/Epigenetics_Project/RNAseq_bam_files/BAN-1.bam'
def get_bam_pairs(filename):
samfile = pysam.AlignmentFile(filename, 'rb') # BAM file reader.
# Iterate through reads.
read1 = None
read2 = None
pair_dict = {}
for read in samfile:
if read.flag==83:
print(read.flag)
if not read.is_paired or read.mate_is_unmapped or read.is_duplicate:
continue
if read.is_read2:
read2 = read
else:
read1 = read
continue
if not read1 is None and not read2 is None and read1.query_name == read2.query_name:
pair_dict[read1.query_name] = [read1, read2]
#print(pair_dict[read1.query_name][0].flag )
return pair_dict
min_length=0
max_length=1000
offset = 1
pysam_arg = 'rb'
reverse_stranded = False
fiveprime_end = True
def basic_processing_fr_firststrand(min_length, max_length, filepath, offset = 16,
pysam_arg = 'rb', fiveprime_end = True):
pass
"""
flags: 147 rna is on the '-' strand, is read2, fastq seq is rna seq
Parameters:
min_length (int): min length of the template that will be kept
max_length (int): max length of the template that will be kept
filepath (str): path to bam file aligned by bowtie2
offset (int): number of bases back from template 3prime end, 1-based coordinates,
Some notes:
r.seq is always + strand reference sequence, not necessarily the sequence of the read
rna 3' refers to the template rna, not necessarily the read.
flags 147 and 163 indicate that they are reads from the reverse sequencing reaction. IF ,
thus the 3' of the template rna will be early in the read and of higher quality
*offset=15 selects the 15th residue back from the 3prime end of template rna, i.e. there
are 14 residues that are ommited from the 3' end '
"""
if fiveprime_end:
flags = [147,163]
else:
flags = [99,83]
if offset == 0:
raise Exception('there is no zeroth residue to select, this is 1-based numbering')
samfilefull = pysam.AlignmentFile(filepath, pysam_arg)
new_filename = filepath.split('/')[-1].split('.')[0] + '_filtered'
if not os.path.isdir('./results/analysis'):
os.makedirs('./results/analysis')
if not os.path.isdir('./results/analysis/raw_filtered'):
os.makedirs('./results/analysis/raw_filtered')
for contig in samfilefull.references:
new_path = f'./results/analysis/raw_filtered/{contig}_{new_filename}_{offset}_offset.txt'
if os.path.isfile(new_path):
raise Exception(f'the file {new_path} already exists')
print(f'Creating file for {contig}')
samfile = samfilefull.fetch(contig=contig)
with open(new_path, 'w') as f:
mapped = 0
forward= 0
proper_pair = 0
f.write('polarity' + '\t' + 'sequence' + '\t' + 'rna_5prime'+ '\t' + 'temp_len' + '\n')
for i, r in enumerate(samfile):
template_length = abs(r.tlen)
if r.is_proper_pair:
proper_pair +=1
if r.is_mapped:
mapped +=1
positions = r.get_reference_positions()
positions = [p+1 for p in positions] # convert to 1-based numbering
# - strand genes
if r.flag == flags[0]: #r.seq is reverse complement of actual read
f.write('\t'.join([ '-', r.seq, str(positions[-1]), str(template_length) + '\n' ]))
# + strand genes
elif r.flag == flags[1]:
f.write('\t'.join([ '+', r.seq, str(positions[0]), str(template_length) + '\n' ]))
source_name = filepath.split('/')[-1]
#write log file
with open(f'./results/analysis/raw_filtered/{contig}_{new_filename}_{offset}.log', 'w') as f:
f.write(f'an offset of {offset} was added into file\n')
f.write(f'{source_name} contained {i} total reads\n')
f.write(f'of {i} total reads, { (proper_pair/i)*100 }% were part of a proper pair\n')
df = pd.DataFrame()
data = [source_name, i, proper_pair, mapped]
columns = ['source_name', 'offset', 'total_reads', 'proper_pair', 'mapped']
for column, d in zip(columns, data):
df[column] = [d]
df.to_csv(f'./results/analysis/raw_filtered/{new_filename}_{offset}.log.csv')
#return template_3
| jtwinkel/eustaquio_epigenetics | Eustaquio_epigenetics/jw_utils/RNAseq_utils.py | RNAseq_utils.py | py | 4,757 | python | en | code | 0 | github-code | 13 |
14277583916 | import bpy, subprocess,ast, re
from bpy.app.handlers import persistent
from bpy.types import Operator
import datetime
class RENTASKLIST_OT_probar_modaltimer(Operator):
bl_idname = "rentask.probar_modal_timer"
bl_label = "Modal Timer Operator"
_timer = None
def modal(self, context, event):
sc = bpy.context.scene
props = sc.rentask.rentask_main
colle = sc.rentask.rentask_colle
if not props.probar_active:
context.window_manager.event_timer_remove(self._timer) #オフなら除去
return {'FINISHED'}
if event.type == 'TIMER': # n秒ごとに実行
# for item in colle:
# if not item.complete:
# probar_update_status(item)
if bpy.context.region:
bpy.context.region.tag_redraw()
return {'PASS_THROUGH'}
def execute(self, context):
sc = bpy.context.scene
props = sc.rentask.rentask_main
wm = context.window_manager
# prefs = context.preferences.addons[__name__.partition('.')[0]].preferences
# n秒ごとに機会を出力するタイマーを設定
# self._timer = wm.event_timer_add(prefs.probar_monitoring_interval, window=context.window)
self._timer = wm.event_timer_add(1, window=context.window)
wm.modal_handler_add(self)
return {'RUNNING_MODAL'}
| Tilapiatsu/blender-custom_config | scripts/addon_library/local/render_task_list/rentask/op_rentask_probar.py | op_rentask_probar.py | py | 1,221 | python | en | code | 5 | github-code | 13 |
2718481941 | num_oper = input().split("-")
nums = []
for i in range(len(num_oper)):
nums.append(num_oper[i].split("+"))
# print(nums)
first = 0
minus = 0
for i in range(0, len(nums)):
for j in range(len(nums[i])):
if i == 0:
first = first + int(nums[i][j])
else:
minus = minus + int(nums[i][j])
print(first - minus)
| jinlee9270/algo | InJungle/week04/1514.py | 1514.py | py | 354 | python | en | code | 0 | github-code | 13 |
27075850819 | import os
from dotenv import load_dotenv
from langchain.llms import LlamaCpp
import logging
import spacy
load_dotenv()
class Configuration:
_instance = None
def __new__(cls):
if not cls._instance:
cls._instance = super(Configuration, cls).__new__(cls)
cls._instance.init_variables()
cls._instance.init_model()
cls._instance.init_misc()
cls._instance.init_logger()
return cls._instance
def init_variables(self):
self.news_api_key = os.environ.get("NEWS_API_KEY")
self.pexels_api_key = os.environ.get("PEXELS_API_KEY")
self.api_url = os.environ.get("API_URL")
self.email = os.environ.get("EMAIL")
self.password = os.environ.get("PASSWORD")
self.model = os.environ.get("MODEL")
def init_model(self):
self.llm = LlamaCpp(
model_path=self.model,
temperature=0.7,
top_p=0.95,
n_ctx=4000,
max_tokens=2048,
)
self.nlp = spacy.load("fr_core_news_sm")
def init_misc(self):
self.headers = {
"Referer": "http://www.google.com",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:90.0) Gecko/20100101 Firefox/90.0",
}
self.categories = [
"business",
"entertainment",
"general",
"health",
"science",
"sports",
"technology",
]
def init_logger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
file_handler = logging.FileHandler(os.environ.get("LOG_FILE"))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
self.logger.addHandler(stream_handler)
config = Configuration()
| innermost47/autogenius-daily | config.py | config.py | py | 2,197 | python | en | code | 5 | github-code | 13 |
24582992745 | import numpy as np
import pandas as pd
import scipy.ndimage as nd
from imageio import imsave as imsave2d
from timagetk.components import SpatialImage
from timagetk.io import imsave
from timagetk.algorithms.trsf import allocate_c_bal_matrix, apply_trsf, create_trsf
from timagetk.algorithms.reconstruction import pts2transfo
from timagetk.wrapping.bal_trsf import TRSF_TYPE_DICT
from timagetk.wrapping.bal_trsf import TRSF_UNIT_DICT
from tissue_nukem_3d.epidermal_maps import compute_local_2d_signal, nuclei_density_function
from sam_spaghetti.sam_sequence_loading import load_sequence_signal_images, load_sequence_signal_data, load_sequence_primordia_data, load_sequence_segmented_images
from sam_spaghetti.sam_sequence_primordia_alignment import golden_angle
from time import time as current_time
from copy import deepcopy
import logging
def sequence_aligned_signal_images(sequence_name, image_dirname, save_files=False, signal_names=None, filenames=None,microscope_orientation=-1, verbose=False, debug=False, loglevel=0):
signal_images = load_sequence_signal_images(sequence_name, image_dirname, verbose=verbose, debug=debug, loglevel=loglevel + 1)
signal_data = load_sequence_signal_data(sequence_name, image_dirname, normalized=True, aligned=True, verbose=verbose, debug=debug, loglevel=loglevel + 1)
if signal_names is None:
signal_names = list(signal_images.keys())
logging.info("".join([" " for l in range(loglevel)]) + "--> Computing aligned signal images " + str(signal_names))
if filenames is None:
filenames = np.sort(list(signal_images[signal_names[0]].keys()))
if len(filenames) > 0:
file_times = np.array([int(f[-2:]) for f in filenames])
reflections = {}
alignment_transformations = {}
aligned_images = {}
for signal_name in signal_names:
aligned_images[signal_name] = {}
for i_time, (time, filename) in enumerate(zip(file_times, filenames)):
file_data = signal_data[filename]
file_data = file_data[file_data['layer'] == 1]
X = file_data['aligned_x'].values
Y = file_data['aligned_y'].values
Z = file_data['aligned_z'].values
img_X = file_data['center_x'].values
img_Y = file_data['center_y'].values
img_Z = file_data['center_z'].values
img_points = np.transpose([img_X, img_Y, img_Z])
aligned_points = np.transpose([X, Y, Z])
reference_img = signal_images[signal_names[0]][filename]
img_center = (np.array(reference_img.shape) * np.array(reference_img.voxelsize)) / 2.
img_center[2] = reference_img.shape[2] * reference_img.voxelsize[2] / 8.
alignment_transformation = pts2transfo(img_center + microscope_orientation * aligned_points, microscope_orientation * img_points)
rotation_angle = ((180. * np.arctan2(alignment_transformation[1, 0], alignment_transformation[0, 0]) / np.pi) + 180) % 360 - 180
reflection = np.sign(alignment_transformation[0, 0] * alignment_transformation[1, 1]) == -1
if reflection:
img_points = np.transpose([img_X, microscope_orientation * reference_img.shape[1] * reference_img.voxelsize[1] - img_Y, img_Z])
alignment_transformation = pts2transfo(img_center + microscope_orientation * aligned_points, microscope_orientation * img_points)
reflections[filename] = reflection
alignment_transformations[filename] = alignment_transformation
alignment_trsf = create_trsf(param_str_2='-identity', trsf_type=TRSF_TYPE_DICT['RIGID_3D'], trsf_unit=TRSF_UNIT_DICT['REAL_UNIT'])
allocate_c_bal_matrix(alignment_trsf.mat.c_struct, alignment_transformations[filename])
for i_signal, signal_name in enumerate(signal_names):
start_time = current_time()
logging.info("".join([" " for l in range(loglevel)]) + " --> Aligning : " + filename + " " + signal_name)
if filename in signal_images[signal_name].keys():
if reflections[filename]:
reflected_image = deepcopy(signal_images[signal_name][filename])
reflected_image[:, :] = signal_images[signal_name][filename][:, ::-1, :]
aligned_images[signal_name][filename] = apply_trsf(SpatialImage(reflected_image.astype(reference_img.dtype), voxelsize=reference_img.voxelsize), alignment_trsf, param_str_2='-interpolation nearest')
else:
aligned_images[signal_name][filename] = apply_trsf(SpatialImage(deepcopy(signal_images[signal_name][filename]).astype(reference_img.dtype), voxelsize=reference_img.voxelsize), alignment_trsf, param_str_2='-interpolation nearest')
if 'timagetk' in aligned_images[signal_name][filename].metadata.keys():
del aligned_images[signal_name][filename].metadata['timagetk']
logging.info("".join([" " for l in range(loglevel)]) + " <-- Aligning : " + filename + " " + signal_name + " [" + str(current_time() - start_time) + " s]")
if save_files:
logging.info("".join([" " for l in range(loglevel)]) + "--> Saving aligned signal images : " + filename + " " + str(signal_names))
for i_signal, signal_name in enumerate(signal_names):
image_filename = image_dirname + "/" + sequence_name + "/" + filename + "/" + filename + "_aligned_" + signal_name + ".inr.gz"
imsave(image_filename, aligned_images[signal_name][filename])
return aligned_images
def sequence_signal_image_slices(sequence_name, image_dirname, save_files=False, signal_names=None, filenames=None, registered=False, aligned=False, filtering=False, projection_type="L1_slice", reference_name='TagBFP', membrane_name='PI', resolution=None, r_max=120., microscope_orientation=-1, verbose=False, debug=False, loglevel=0):
signal_images = load_sequence_signal_images(sequence_name, image_dirname, registered=registered, verbose=verbose, debug=debug, loglevel=loglevel+1)
signal_data = load_sequence_signal_data(sequence_name, image_dirname, normalized=aligned or registered, aligned=aligned, verbose=verbose, debug=debug, loglevel=loglevel+1)
if len(signal_data)==0:
signal_data = load_sequence_signal_data(sequence_name, image_dirname, nuclei=False, aligned=aligned, verbose=verbose, debug=debug, loglevel=loglevel + 1)
segmented_images = load_sequence_segmented_images(sequence_name, image_dirname, membrane_name=membrane_name, registered=registered, verbose=verbose, debug=debug, loglevel=loglevel+1)
if len(segmented_images)>0:
signal_images[membrane_name+"_seg"] = segmented_images
if signal_names is None:
signal_names = list(signal_images.keys())
logging.info("".join([" " for l in range(loglevel)])+"--> Computing 2D signal images "+str(signal_names))
assert reference_name in signal_names
if filenames is None:
filenames = np.sort(list(signal_images[reference_name].keys()))
if len(filenames)>0:
file_times = np.array([int(f[-2:]) for f in filenames])
filtered_signal_images = {}
for signal_name in signal_names:
filtered_signal_images[signal_name] = {}
for filename in filenames:
for signal_name in signal_names:
signal_img = signal_images[signal_name][filename].get_array().astype(float)
filtered_img = signal_img
if filtering:
start_time = current_time()
logging.info("".join([" " for l in range(loglevel)])+" --> Filtering : "+filename+" "+signal_name)
filtered_img = gaussian_filter(filtered_img,sigma=nuclei_sigma/np.array(signal_img.voxelsize),order=0)
logging.info("".join([" " for l in range(loglevel)])+" <-- Filtering : "+filename+" "+signal_name+" ["+str(current_time() - start_time)+" s]")
filtered_signal_images[signal_name][filename] = filtered_img.astype(signal_images[signal_name][filename].dtype)
if aligned:
aligned_images = sequence_aligned_signal_images(sequence_name, image_dirname, save_files=save_files, signal_names=signal_names,microscope_orientation=microscope_orientation, verbose=verbose, debug=debug, loglevel=loglevel+1)
image_centers = {}
for i_time, (time, filename) in enumerate(zip(file_times, filenames)):
reference_img = signal_images[signal_names[0]][filename]
img_center = (np.array(reference_img.shape) * np.array(reference_img.voxelsize)) / 2.
img_center[2] = reference_img.shape[2] * reference_img.voxelsize[2] / 8.
image_centers[filename] = img_center
slice_coords = {}
image_slices = {}
for signal_name in signal_names:
image_slices[signal_name] = {}
for i_time, (time, filename) in enumerate(zip(file_times,filenames)):
file_data = signal_data[filename]
file_data = file_data[file_data['layer']==1]
if aligned:
X = file_data['aligned_x'].values
Y = file_data['aligned_y'].values
Z = file_data['aligned_z'].values
elif registered:
X = file_data['registered_x'].values
Y = file_data['registered_y'].values
Z = file_data['registered_z'].values
else:
X = file_data['center_x'].values
Y = file_data['center_y'].values
Z = file_data['center_z'].values
reference_img = signal_images[reference_name][filename]
size = np.array(reference_img.shape)
voxelsize = microscope_orientation*np.array(reference_img.voxelsize)
if resolution is None:
resolution = np.abs(voxelsize)[0]
if aligned:
n_points = int(np.round((2*r_max)/resolution+1))
xx,yy = np.meshgrid(np.linspace(-r_max,r_max,n_points),np.linspace(-r_max,r_max,n_points))
else:
n_points = int(np.round(((size-1)*np.abs(voxelsize))[0]/resolution+1))
xx,yy = np.meshgrid(np.linspace(0,((size-1)*voxelsize)[0],n_points),np.linspace(0,((size-1)*voxelsize)[1],n_points))
# print(signal_images[signal_names[0]][filename].shape, xx.shape)
# extent = xx.max(),xx.min(),yy.min(),yy.max()
extent = xx.min(),xx.max(),yy.max(),yy.min()
if projection_type == "L1_slice":
start_time = current_time()
logging.info("".join([" " for l in range(loglevel)])+" --> Computing Z-map : "+filename)
zz = compute_local_2d_signal(np.transpose([X,Y]),np.transpose([xx,yy],(1,2,0)),Z)
if aligned:
img_center = image_centers[filename]
coords = (img_center+microscope_orientation*np.transpose([xx,yy,zz],(1,2,0)))/np.array(reference_img.voxelsize)
else:
coords = (microscope_orientation*np.transpose([xx,yy,zz],(1,2,0)))/np.array(reference_img.voxelsize)
extra_mask = np.any(coords > (np.array(reference_img.shape) - 1),axis=-1)
# extra_mask = np.any(coords > (np.array(reference_img.shape) - 1), axis=1).reshape(xx.shape)
coords = np.maximum(np.minimum(coords, np.array(reference_img.shape) - 1), 0)
coords[np.isnan(coords)]=0
coords = coords.astype(int)
coords = tuple(np.transpose(np.concatenate(coords)))
slice_coords[filename] = coords
logging.info("".join([" " for l in range(loglevel)])+" <-- Computing Z-map : "+filename+" ["+str(current_time() - start_time)+" s]")
for i_signal, signal_name in enumerate(signal_names):
start_time = current_time()
logging.info("".join([" " for l in range(loglevel)])+" --> Slicing : "+filename+" "+signal_name)
if aligned:
image_slices[signal_name][filename] = aligned_images[signal_name][filename][slice_coords[filename]].reshape(xx.shape).T[:,::-1]
else:
image_slices[signal_name][filename] = filtered_signal_images[signal_name][filename][slice_coords[filename]].reshape(xx.shape).T[:,::-1]
image_slices[signal_name][filename][extra_mask] = 0
if "_seg" in signal_name:
image_slices[signal_name][filename][image_slices[signal_name][filename]==0] = 1
logging.info("".join([" " for l in range(loglevel)])+" <-- Slicing : "+filename+" "+signal_name+" ["+str(current_time() - start_time)+" s]")
elif projection_type == "max_intensity":
if aligned:
img_center = image_centers[filename]
coords = (img_center + microscope_orientation*np.transpose([xx,yy,np.zeros_like(xx)],(1,2,0)))/np.array(reference_img.voxelsize)
else:
coords = (microscope_orientation*np.transpose([xx,yy,np.zeros_like(xx)],(1,2,0)))/np.array(reference_img.voxelsize)
extra_mask = np.any(coords > (np.array(reference_img.shape) - 1),axis=-1)
coords = np.maximum(np.minimum(coords, np.array(reference_img.shape) - 1), 0)
coords[np.isnan(coords)]=0
coords = coords.astype(int)
coords = tuple(np.transpose(np.concatenate(coords)))
for i_signal, signal_name in enumerate(signal_names):
if not '_seg' in signal_name:
start_time = current_time()
logging.info("".join([" " for l in range(loglevel)])+" --> Projecting : "+filename+" "+signal_name)
# depth = (np.arange(size[2])/float(size[2]-1))[np.newaxis,np.newaxis]*np.ones_like(xx)[:,:,np.newaxis]
# depth = np.ones_like(depth)
if aligned:
# max_projection = (depth * (aligned_images[signal_name][filename].get_array()[coords[:2]].reshape(xx.shape + (reference_img.shape[2],)))).max(axis=2)
max_projection = (aligned_images[signal_name][filename].get_array()[coords[:2]].reshape(xx.shape + (reference_img.shape[2],))).max(axis=2)
# max_projection = np.transpose(max_projection)[::-1,::-1]
else:
# max_projection = (depth * (filtered_signal_images[signal_name][filename][coords[:2]].reshape(xx.shape + (reference_img.shape[2],)))).max(axis=2)
max_projection = (filtered_signal_images[signal_name][filename][coords[:2]].reshape(xx.shape + (reference_img.shape[2],))).max(axis=2)
max_projection[extra_mask] = 0
image_slices[signal_name][filename] = max_projection.T[:,::-1]
logging.info("".join([" " for l in range(loglevel)])+" <-- Projecting : "+filename+" "+signal_name+" ["+str(current_time() - start_time)+" s]")
else:
start_time = current_time()
logging.info("".join([" " for l in range(loglevel)])+" --> Projecting : "+filename+" segmented " + membrane_name)
projection = labelled_image_projection(filtered_signal_images[signal_name][filename],direction=microscope_orientation)
image_slices[signal_name][filename] = projection.T[:,::-1]
image_slices[signal_name][filename][image_slices[signal_name][filename]==0] = 1
logging.info("".join([" " for l in range(loglevel)]) + " <-- Projecting : " + filename + " segmented " + membrane_name + " [" + str(current_time() - start_time) + " s]")
if save_files and projection_type in ['L1_slice']:
logging.info("".join([" " for l in range(loglevel)])+"--> Saving 2D signal images : "+filename+" "+str(signal_names))
for i_signal, signal_name in enumerate(signal_names):
image_filename = image_dirname+"/"+sequence_name+"/"+filename+"/"+filename+("_aligned_" if aligned else "_")+projection_type+"_"+signal_name+"_projection.tif"
imsave2d(image_filename,image_slices[signal_name][filename])
return image_slices
def labelled_image_projection(seg_img, axis=2, direction=1, background_label=1):
if "get_array" in dir(seg_img):
seg_img =seg_img.get_array()
xxx, yyy, zzz = np.mgrid[0:seg_img.shape[0], 0:seg_img.shape[1], 0:seg_img.shape[2]].astype(float)
if axis == 0:
y = np.arange(seg_img.shape[1])
z = np.arange(seg_img.shape[2])
yy,zz = map(np.transpose,np.meshgrid(y,z))
proj = xxx * (seg_img != background_label)
elif axis == 1:
x = np.arange(seg_img.shape[0])
z = np.arange(seg_img.shape[2])
xx,zz = map(np.transpose,np.meshgrid(x,z))
proj = yyy * (seg_img != background_label)
elif axis == 2:
x = np.arange(seg_img.shape[0])
y = np.arange(seg_img.shape[1])
xx,yy = map(np.transpose,np.meshgrid(x,y))
proj = zzz * (seg_img != background_label)
proj[proj == 0] = np.nan
if direction == 1:
proj = np.nanmax(proj, axis=axis)
proj[np.isnan(proj)] = seg_img.shape[axis] - 1
elif direction == -1:
proj = np.nanmin(proj, axis=axis)
proj[np.isnan(proj)] = 0
if axis == 0:
xx = proj
elif axis == 1:
yy = proj
elif axis == 2:
zz = proj
# coords = tuple(np.transpose(np.concatenate(np.transpose([xx, yy, zz], (1, 2, 0)).astype(int))))
coords = tuple(np.transpose(np.concatenate(np.transpose([xx, yy, zz], (1, 2, 0)).astype(int))))
projected_img = np.transpose(seg_img[coords].reshape(xx.shape))
return projected_img
def image_angular_slice(img, theta=0., resolution=None, extent=None, width=0.):
img_center = (np.array(img.shape) * np.array(img.voxelsize)) / 2.
if resolution is None:
resolution = img.voxelsize[0]
if extent is None:
image_x = np.arange(img.shape[0])*img.voxelsize[0] - img_center[0]
extent = (np.min(image_x),np.max(image_x))
radial_distances = np.linspace(extent[0],extent[1],1+(extent[1]-extent[0])/resolution)
if width>0:
orthoradial_distances = np.linspace(-width,width,2*width/resolution)
else:
orthoradial_distances = np.array([0.])
slice_images = []
for d in orthoradial_distances:
radial_x = -d*np.sin(np.radians(theta)) + radial_distances*np.cos(np.radians(theta))
radial_y = d*np.cos(np.radians(theta)) + radial_distances*np.sin(np.radians(theta))
image_z = np.arange(img.shape[2]) * img.voxelsize[2] - img_center[2]
xx,zz = np.meshgrid(radial_x,image_z)
yy,zz = np.meshgrid(radial_y,image_z)
coords = np.concatenate(np.transpose([xx, yy, zz], (1, 2, 0)))
coords = (img_center + coords)/np.array(img.voxelsize)
extra_mask = np.any(coords>(np.array(img.shape)-1),axis=1).reshape(xx.shape)
coords = np.maximum(np.minimum(coords,np.array(img.shape)-1),0)
coords = tuple(np.transpose(coords.astype(int)))
slice_img = img.get_array()[coords].reshape(xx.shape)
slice_img[extra_mask] = 0
slice_images += [slice_img]
return np.max(slice_images,axis=0)
def sequence_image_primordium_slices(sequence_name, image_dirname, save_files=False, signal_names=None, filenames=None, primordia_range=range(-3,6), reference_name='TagBFP', resolution=None, r_max=120., microscope_orientation=-1, verbose=False, debug=False, loglevel=0):
aligned_images = sequence_aligned_signal_images(sequence_name, image_dirname, save_files=False, signal_names=signal_names, microscope_orientation=microscope_orientation, verbose=verbose, debug=debug, loglevel=loglevel + 1)
primordia_data = load_sequence_primordia_data(sequence_name, image_dirname, verbose=verbose, debug=debug, loglevel=loglevel+1)
if signal_names is None:
signal_names = list(aligned_images.keys())
image_slices = {}
for signal_name in signal_names:
image_slices[signal_name] = {}
for primordium in primordia_range:
image_slices[signal_name][primordium] = {}
if filenames is None:
filenames = np.sort(list(aligned_images[reference_name].keys()))
if len(filenames) > 0:
file_times = np.array([int(f[-2:]) for f in filenames])
image_centers = {}
for i_time, (time, filename) in enumerate(zip(file_times, filenames)):
reference_img = aligned_images[signal_names[0]][filename]
img_center = (np.array(reference_img.shape) * np.array(reference_img.voxelsize)) / 2.
# img_center[2] = reference_img.shape[2] * reference_img.voxelsize[2] / 8.
image_centers[filename] = img_center
for i_time, (time, filename) in enumerate(zip(file_times, filenames)):
reference_img = aligned_images[reference_name][filename]
size = np.array(reference_img.shape)
voxelsize = microscope_orientation * np.array(reference_img.voxelsize)
if resolution is None:
resolution = np.abs(voxelsize)[0]
img_z = np.arange(size[2]) * voxelsize[2] - img_center[2]
img_r = np.arange(r_max/resolution) * resolution
rr, zz = map(np.transpose,np.meshgrid(img_r,img_z))
extent = rr.min(), rr.max(), zz.max(), zz.min()
for primordium in primordia_range:
primordium_data = pd.concat([primordia_data[f][primordia_data[f]['primordium'] == primordium] for f in filenames])
if len(primordium_data) > 0:
primordium_theta = (primordium * golden_angle + 180) % 360 - 180
primordium_theta = primordium_theta + np.mean(primordium_data['aligned_theta'].values - primordium_theta)
primordium_theta = (primordium_theta + 180) % 360 - 180
print(primordium,primordium_theta)
for i_signal, signal_name in enumerate(signal_names):
start_time = current_time()
logging.info("".join([" " for l in range(loglevel)]) + " --> Slicing P"+str(primordium)+" : " + filename + " " + signal_name)
# image_theta = primordium_theta # identity
# image_theta = -primordium_theta # flip X
# image_theta = 180 - primordium_theta # flip Y
image_theta = 180 + primordium_theta # flip X + flip Y
# image_theta = 90 - primordium_theta # transpose
# image_theta = primordium_theta - 90 # transpose + flip X
# image_theta = primordium_theta + 90 # transpose + flip Y
slice_img = image_angular_slice(aligned_images[signal_name][filename],theta=image_theta,extent=(0,r_max),width=0. if signal_name in ['PI','PIN1'] else 2.)
image_slices[signal_name][primordium][filename] = SpatialImage(np.transpose(slice_img),voxelsize=(resolution,reference_img.voxelsize[2]))
logging.info("".join([" " for l in range(loglevel)]) + " <-- Slicing P"+str(primordium)+" : " + filename + " " + signal_name + " [" + str(current_time() - start_time) + " s]")
if save_files:
logging.info("".join([" " for l in range(loglevel)])+"--> Saving primordium signal images : "+filename+" "+str(signal_names))
for i_signal, signal_name in enumerate(signal_names):
for primordium in primordia_range:
if filename in image_slices[signal_name][primordium].keys():
image_filename = image_dirname+"/"+sequence_name+"/"+filename+"/"+filename+"_P"+str(primordium)+"_"+signal_name+"_slice.tif"
imsave2d(image_filename,image_slices[signal_name][primordium][filename])
return image_slices
def sequence_signal_data_primordium_slices(sequence_name, image_dirname, filenames=None, primordia_range=range(-3,6), width=2., microscope_orientation=-1, verbose=False, debug=False, loglevel=0):
signal_images = load_sequence_signal_images(sequence_name, image_dirname, signal_names=['TagBFP'], verbose=verbose, debug=debug, loglevel=loglevel + 1)
aligned_signal_data = load_sequence_signal_data(sequence_name, image_dirname, normalized=True, aligned=True, verbose=verbose, debug=debug, loglevel=loglevel + 1)
signal_data = load_sequence_signal_data(sequence_name, image_dirname, normalized=True, aligned=False, verbose=verbose, debug=debug, loglevel=loglevel + 1)
primordia_data = load_sequence_primordia_data(sequence_name, image_dirname, verbose=verbose, debug=debug, loglevel=loglevel + 1)
if filenames is None:
filenames = np.sort(list(signal_data.keys()))
if len(filenames) > 0:
file_times = np.array([int(f[-2:]) for f in filenames])
signal_data_slices = {}
for primordium in primordia_range:
signal_data_slices[primordium] = {}
alignment_transformations = {}
for i_time, (time, filename) in enumerate(zip(file_times, filenames)):
reference_img = list(signal_images.values())[0][filename]
file_data = aligned_signal_data[filename]
file_data = file_data[file_data['layer'] == 1]
img_points = file_data[['center_'+dim for dim in ['x','y','z']]].values
aligned_points = file_data[['aligned_'+dim for dim in ['x','y','z']]].values
alignment_transformation = pts2transfo(microscope_orientation * img_points, microscope_orientation * aligned_points)
reflection = np.sign(alignment_transformation[0, 0] * alignment_transformation[1, 1]) == -1
if reflection:
img_points[:,1] = microscope_orientation * reference_img.shape[1] * reference_img.voxelsize[1] - img_points[:,1]
alignment_transformation = pts2transfo(microscope_orientation * img_points, microscope_orientation * aligned_points)
alignment_transformations[filename] = alignment_transformation
file_data = signal_data[filename]
image_points = file_data[['center_'+dim for dim in ['x','y','z']]].values
if reflection:
image_points[:, 1] = microscope_orientation * reference_img.shape[1] * reference_img.voxelsize[1] - image_points[:, 1]
homogeneous_points = np.concatenate([microscope_orientation * image_points,np.ones((len(file_data),1))],axis=1)
aligned_homogeneous_points = np.einsum("...ij,...j->...i",alignment_transformation,homogeneous_points)
file_data['aligned_x'] = microscope_orientation * aligned_homogeneous_points[:,0]
file_data['aligned_y'] = microscope_orientation * aligned_homogeneous_points[:,1]
file_data['aligned_z'] = microscope_orientation * aligned_homogeneous_points[:,2]
file_data['radial_distance'] = np.linalg.norm([file_data['aligned_x'], file_data['aligned_y']], axis=0)
file_data['aligned_theta'] = 180. / np.pi * np.sign(file_data['aligned_y']) * np.arccos(file_data['aligned_x'] / file_data['radial_distance'])
aligned_points = file_data[['aligned_'+dim for dim in ['x','y','z']]].values
for primordium in primordia_range:
primordium_data = pd.concat([primordia_data[f][primordia_data[f]['primordium'] == primordium] for f in filenames])
primordium_theta = (primordium * golden_angle + 180) % 360 - 180
if len(primordium_data) > 0:
primordium_theta = primordium_theta + np.mean(primordium_data['aligned_theta'].values - primordium_theta)
primordium_theta = (primordium_theta + 180) % 360 - 180
primordium_plane_normal = np.array([-np.sin(np.radians(primordium_theta)),np.cos(np.radians(primordium_theta)),0])
primordium_plane_dot_products = np.einsum("...ij,...j->...i",aligned_points,primordium_plane_normal)
primordium_vector = np.array([np.cos(np.radians(primordium_theta)), np.sin(np.radians(primordium_theta)), 0])
primordium_dot_products = np.einsum("...ij,...j->...i",aligned_points,primordium_vector)
file_primordium_data = file_data[(np.abs(primordium_plane_dot_products)<width)&(primordium_dot_products>-width)]
# file_primordium_data = file_data[(np.abs(primordium_plane_dot_products)<width)&(primordium_dot_products>0)]
signal_data_slices[primordium][filename] = file_primordium_data
return signal_data_slices
| elifesciences-publications/sam_spaghetti | src/sam_spaghetti/signal_image_slices.py | signal_image_slices.py | py | 29,239 | python | en | code | 0 | github-code | 13 |
71270841937 | import os
dir_path = os.path.dirname(os.path.realpath(__file__))
from collections import defaultdict
CL = {
'(': ')',
'[': ']',
'{': '}',
'<': '>'
}
def part1(inp):
global CL
wrong = defaultdict(int)
for line in inp:
s = []
for c in line:
if c in ['(', '<', '[', '{']:
s.append(c)
elif c in [')', '>', '}', ']']:
if s and CL[s[-1]] == c:
s.pop()
else:
wrong[c] += 1
break
result = 0
for k, v in wrong.items():
if k == ')':
result += v * 3
elif k == ']':
result += v * 57
elif k == '}':
result += v * 1197
elif k == '>':
result += v * 25137
return result
def part2(inp):
global CL
incomplete = []
for line in inp:
s = []
for c in line:
if c in ['(', '<', '[', '{']:
s.append(c)
elif c in [')', '>', '}', ']']:
if s and CL[s[-1]] == c:
s.pop()
else:
break
else:
incomplete.append(s[::-1])
results = []
scores = {
'(': 1,
'[': 2,
'{': 3,
'<': 4
}
for s in incomplete:
score = 0
for c in s:
score *= 5
score += scores[c]
results.append(score)
results.sort()
return results[len(results) // 2]
def main():
with open(f'{dir_path}/../../inputs/day10/input') as f:
inp = list(map(lambda x: x, f.read().strip().split('\n')))
print(inp)
print(part1(inp[:]))
print(part2(inp[:]))
if __name__ == '__main__':
main()
| Lammatian/AdventOfCode | 2021/src/day10/main.py | main.py | py | 1,812 | python | en | code | 1 | github-code | 13 |
71645328019 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('seedbank', '0008_auto_20141206_1245'),
]
operations = [
migrations.AlterField(
model_name='seed',
name='acquisition_location',
field=models.ForeignKey(to='seedbank.Location', null=True),
preserve_default=True,
),
]
| briandant/echo-seeds | echo_seeds/seedbank/migrations/0009_auto_20141206_1307.py | 0009_auto_20141206_1307.py | py | 471 | python | en | code | 0 | github-code | 13 |
30313101620 | import logging
import signal
import threading
from flask_babel import lazy_gettext as l_
from xivo import plugin_helpers
from .http_server import Server
from wazo_ui.helpers.destination import register_destination_form
from wazo_ui.helpers.error import (
ErrorExtractor,
ErrorTranslator,
ConfdErrorExtractor,
URL_TO_NAME_RESOURCES,
RESOURCES,
GENERIC_PATTERN_ERRORS,
SPECIFIC_PATTERN_ERRORS,
)
from wazo_ui.core.client import engine_clients
from wazo_ui.core.form import (
ApplicationDestination,
ApplicationCallBackDISADestination,
ApplicationDISADestination,
ApplicationDirectoryDestination,
ApplicationFaxToMailDestination,
ApplicationVoicemailDestination,
CustomDestination,
HangupDestination,
NoneDestination,
register_destination_form_application,
)
logger = logging.getLogger(__name__)
class Controller:
def __init__(self, config):
self.server = Server(config)
self._stopping_thread = None
plugin_helpers.load(
namespace='wazo_ui.plugins',
names=config['enabled_plugins'],
dependencies={
'config': config,
'flask': self.server.get_app(),
'clients': engine_clients,
},
)
ErrorExtractor.register_url_to_name_resources(URL_TO_NAME_RESOURCES)
ErrorTranslator.register_resources(RESOURCES)
ConfdErrorExtractor.register_generic_patterns(GENERIC_PATTERN_ERRORS)
ConfdErrorExtractor.register_specific_patterns(SPECIFIC_PATTERN_ERRORS)
register_destination_form(
'application', l_('Application'), ApplicationDestination
)
register_destination_form('hangup', l_('Hangup'), HangupDestination)
register_destination_form('custom', l_('Custom'), CustomDestination)
register_destination_form('none', l_('None'), NoneDestination, position=0)
register_destination_form_application(
'callback_disa',
l_('CallBack DISA'),
ApplicationCallBackDISADestination,
)
register_destination_form_application(
'directory',
l_('Directory'),
ApplicationDirectoryDestination,
)
register_destination_form_application(
'disa',
l_('DISA'),
ApplicationDISADestination,
)
register_destination_form_application(
'fax_to_mail',
l_('Fax to Mail'),
ApplicationFaxToMailDestination,
)
register_destination_form_application(
'voicemail',
l_('Voicemail'),
ApplicationVoicemailDestination,
)
def run(self):
logger.info('wazo-ui starting...')
try:
self.server.run()
finally:
logger.info('wazo-ui stopping...')
if self._stopping_thread:
self._stopping_thread.join()
def stop(self, reason):
logger.warning('Stopping wazo-ui: %s', reason)
self._stopping_thread = threading.Thread(target=self.server.stop, name=reason)
self._stopping_thread.start()
def _signal_handler(controller, signum, frame):
controller.stop(reason=signal.Signals(signum).name)
| wazo-platform/wazo-ui | wazo_ui/controller.py | controller.py | py | 3,275 | python | en | code | 4 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.