seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18335497129 | N = int(input())
A = list(map(int, input().split()))
dct = dict(enumerate(A))
ad = sorted(dct.items(), key=lambda x:x[1])
ans = []
for i in ad:
j = i[0] + 1
ans.append(j)
a = map(str, ans)
b = ' '.join(a)
print(b) | Aasthaengg/IBMdataset | Python_codes/p02899/s145751975.py | s145751975.py | py | 221 | python | en | code | 0 | github-code | 90 |
34562093604 | import numpy as np
import unittest
from caffe2.python import core, workspace, muji, test_util
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
[],
"testblob_gpu_" + str(id),
shape=[1, 2, 3, 4],
value=float(id + 1),
device_option=muji.OnGPU(id)
)
allreduce_function(
net, ["testblob_gpu_" + str(i)
for i in gpu_ids], "_reduced", gpu_ids
)
workspace.RunNetOnce(net)
target_value = sum(gpu_ids) + len(gpu_ids)
all_blobs = workspace.Blobs()
all_blobs.sort()
for blob in all_blobs:
print('{} {}'.format(blob, workspace.FetchBlob(blob)))
for idx in gpu_ids:
blob = workspace.FetchBlob("testblob_gpu_" + str(idx) + "_reduced")
np.testing.assert_array_equal(
blob,
target_value,
err_msg="gpu id %d of %s" % (idx, str(gpu_ids))
)
def testAllreduceFallback(self):
self.RunningAllreduceWithGPUs(
list(range(workspace.NumCudaDevices())), muji.AllreduceFallback
)
def testAllreduceSingleGPU(self):
for i in range(workspace.NumCudaDevices()):
self.RunningAllreduceWithGPUs([i], muji.Allreduce)
def testAllreduceWithTwoGPUs(self):
pattern = workspace.GetCudaPeerAccessPattern()
if pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):
self.RunningAllreduceWithGPUs([0, 1], muji.Allreduce2)
else:
print('Skipping allreduce with 2 gpus. Not peer access ready.')
def testAllreduceWithFourGPUs(self):
pattern = workspace.GetCudaPeerAccessPattern()
if pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):
self.RunningAllreduceWithGPUs([0, 1, 2, 3], muji.Allreduce4)
else:
print('Skipping allreduce with 4 gpus. Not peer access ready.')
def testAllreduceWithEightGPUs(self):
pattern = workspace.GetCudaPeerAccessPattern()
if (
pattern.shape[0] >= 8 and np.all(pattern[:4, :4]) and
np.all(pattern[4:, 4:])
):
self.RunningAllreduceWithGPUs(
list(range(8)), muji.Allreduce8)
else:
print('Skipping allreduce with 8 gpus. Not peer access ready.')
| facebookarchive/AICamera-Style-Transfer | app/src/main/cpp/caffe2/python/muji_test.py | muji_test.py | py | 2,632 | python | en | code | 81 | github-code | 90 |
4663994456 | from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import os
from_email = os.getenv('sg_from_email')
gift_template_id = "d-4158ee9a983f496cbd4bff994f818192"
purchase_template_id = "d-a7cd129a71744a30ac219698fb4a6ae9"
sg = SendGridAPIClient(os.getenv('sendgrid_api_key'))
def send_email(action, email, to_name=''):
if action == "gift_dorks":
send_gift_email(email, to_name)
elif action == "purchase_dorks":
send_purchase_email(email)
def send_gift_email(to_email, to_name):
message = Mail(from_email=from_email, to_emails=to_email)
# pass custom values for our HTML placeholders
message.dynamic_template_data = {
'gift_receiver': to_name,
'registration_link': 'https://hundreddorks.com',
}
message.template_id = gift_template_id
try:
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
raise e
print(e.message)
def send_purchase_email(email):
message = Mail(from_email=from_email, to_emails=email, subject="Dorks purchased!")
# pass custom values for our HTML placeholders
message.dynamic_template_data = {}
message.template_id = purchase_template_id
try:
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
raise e
print(e.message)
| FirstCoder1/Dorks | server/service/email_service.py | email_service.py | py | 1,502 | python | en | code | 0 | github-code | 90 |
40689596030 | from selenium import webdriver
from fixtures.contact import ContactHelper
from fixtures.group import GroupHelper
from fixtures.session import Session
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.driver = webdriver.Firefox()
elif browser == "chrome":
self.driver = webdriver.Chrome()
elif browser == "edge":
self.driver = webdriver.Edge()
else:
raise ValueError("Unrecognized browser %" % browser)
self.driver.maximize_window()
# self.driver.implicitly_wait(2)
self.session = Session(self)
self.groupHelper = GroupHelper(self)
self.contactHelper = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.driver.current_url
return True
except:
return False
def open_home_page(self):
self.driver.get(self.base_url)
def destroy(self):
self.driver.quit()
| shuradrozd/webProject | fixtures/application.py | application.py | py | 1,030 | python | en | code | 0 | github-code | 90 |
18325710909 |
def resolve():
n = int(input())
for i in range(1, 10):
a = n // i
if n % i == 0 and a < 10:
print('Yes')
return
print('No')
if __name__ == "__main__":
resolve()
| Aasthaengg/IBMdataset | Python_codes/p02880/s024786774.py | s024786774.py | py | 219 | python | en | code | 0 | github-code | 90 |
74340486377 |
# Method based on L. N. Trefethen,Spectral Methods in MATLAB(SIAM,2000) and http://blue.math.buffalo.edu/438/trefethen_spectral/all_py_files/
import numpy as np
import math
pi = math.pi
#It builds the Chebyshev grid and a differentiation matrix in a general domain (a, b)
def chebymatrix(Ncheb,a,b):
range_cheb = np.arange(0,Ncheb+1)
x = np.cos(pi*range_cheb/Ncheb)
t = (a+b)/2.-((a-b)/2.)*x
carray = np.hstack([2, np.ones(Ncheb-1), 2])*(-1)**np.arange(0,Ncheb+1)
X = np.tile(x,(Ncheb+1,1))
dX = X.T - X
Dp = (carray[:,np.newaxis]*(1.0/carray)[np.newaxis,:])/(dX+(np.identity(Ncheb+1)))
Dp = Dp - np.diag(Dp.sum(axis=1))
Dcheb =(2./(b-a))*Dp
return Dcheb, t
| cjoana/GREx | SPBHS/Dmatrix.py | Dmatrix.py | py | 712 | python | en | code | 1 | github-code | 90 |
22237355494 | ### 13023
def dfs(p,res):
if res >= 5:
print(1)
exit()
for a in r[p]:
if visit[a] == 0:
visit[a] = 1
dfs(a,res+1)
visit[a] = 0
n, m = map(int, input().split())
r = [[] for _ in range(n)]
for _ in range(m):
i,j = map(int, input().split())
r[i].append(j)
r[j].append(i)
visit = [0 for _ in range(n)]
for i in range(n):
visit[i] = 1
dfs(i,1)
visit[i] = 0
print(0)
### 4963
import sys
sys.setrecursionlimit(10**6)
dx = [1,1,1,0,-1,-1,-1,0]
dy = [1,0,-1,-1,-1,0,1,1]
def dfs(y,x):
for i in range(8):
if 0<=y+dy[i]<h and 0<=x+dx[i]<w and m[y+dy[i]][x+dx[i]] == 1 and visit[y+dy[i]][x+dx[i]] == 0:
visit[y+dy[i]][x+dx[i]] = 1
dfs(y+dy[i],x+dx[i])
while True:
w, h = map(int, input().split())
if w == 0:
exit()
m = []
visit = []
for _ in range(h):
m.append(list(map(int, input().split())))
visit.append([0 for _ in range(w)])
cnt = 0
for i in range(h):
for j in range(w):
if m[i][j] == 1 and visit[i][j] == 0:
visit[i][j] = 1
dfs(i,j)
cnt += 1
print(cnt)
### 24479
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.readline
def dfs(p):
global res
visit[p] = res
res+=1
for v in sorted(nums[p]):
if visit[v] == 0:
dfs(v)
n,e,r = map(int,input().split())
visit = [0 for _ in range(n+1)]
nums = [ [] for _ in range(n+1) ]
for _ in range(e):
a,b = map(int, input().split())
nums[a].append(b)
nums[b].append(a)
res = 1
dfs(r)
for a in visit[1:]:
print(a) | happysang/baekjoon_algorithm | 코딩테스트준비2023/dfs.py | dfs.py | py | 1,711 | python | en | code | 0 | github-code | 90 |
29981098775 | import sys
import time
import struct
import json
from pprint import pprint
from datetime import datetime
import os
import shutil
import fnmatch, re
import wotdecoder
# Returns the list of .extension files in path directory. Omit skip file. Can be recursive.
def custom_listfiles(path, extension, recursive, skip = None):
if recursive:
files = []
for root, subFolders, filename in os.walk(path):
for f in filename:
if f.endswith("."+extension) and f!=skip:
files.append(os.path.join(root,f))
else:
files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(path + os.path.sep + f) and f.endswith("."+extension) and f!=skip]
return files
def main():
nickname = "*"
clantag = "*"
csens = re.IGNORECASE
verbose = 4
show_errors = False
owner = False
recursive = True
full_path = False
battle_result = False
source = os.getcwd()
# Parse arguments
skip = -1
for argind, arg in enumerate(sys.argv[1:]):
if argind == skip: pass
elif arg == "-c" : csens = 0
elif arg == "-v0" : verbose = 0
elif arg == "-v1" : verbose = 1
elif arg == "-v2" : verbose = 2
elif arg == "-v3" : verbose = 3
elif arg == "-v4" : verbose = 4
elif arg == "-e" : show_errors = True
elif arg == "-o" : owner = True
elif arg == "-r" : recursive = False
elif arg == "-p" : full_path = True
elif arg == "-b" : battle_result = True
elif arg == "-i" :
if len(sys.argv) <= argind+2:
sys.exit("\nUnspecified input directory.")
source = sys.argv[argind+2]
if not os.path.exists(source):
sys.exit("\n"+source+" doesnt exist.")
skip = argind+1
elif arg in ("-h", "-?") or arg.startswith("-") :
sys.exit("findplayer scans replay files for players using nickname and/or clantag."
"\nUsage:" \
"\n\nfindplayer nickname [clantag] -c -v0..3 -e -o -r -p -b -i input_file_or_directory" \
"\n\nTry `*` for string wildcard, `?` for character wildcard." \
"\n-c Case sensitive search." \
"\n-v0 Verbose 0 = silent running, only give summary." \
"\n-v1 + list replay name, default." \
"\n-v2 + show match result, frag count." \
"\n-v3 + detailed stats." \
"\n-v4 + stats summary." \
"\n-e Show errors." \
"\n-o Include replay owner stats." \
"\n-r Turn off recursive subdirectory scan." \
"\n-p Show full patch." \
"\n-b Scan battle_results(.dat) instead of wotreplays." \
"\n-i Specify input directory. Default is current." \
"\n\nExamples:" \
"\n`*z_?l [1?3]` will match Rasz_pl[123]" \
"\n`[*]` will match any person in a clan." \
"\n`[]` will only match people without clan." \
"\n`??` will list all people with 2 letter nicknames." \
"\n`*` will match everyone.")
elif arg.startswith("[") and arg.endswith("]"): clantag = arg[1:-1]
else: nickname = arg
print ("\nLooking for nickname:", nickname, " clantag: ["+clantag+"]")
print ("Source:", source)
print ("Verbose:", verbose, "Recursive:", recursive, "Errors:", ("hide","show")[show_errors])
t1 = time.clock()
if os.path.isfile(source):
listdir = [source]
if source.endswith(".dat"):
battle_result = True
else:
listdir = custom_listfiles(source, ("wotreplay", "dat")[battle_result], recursive, "temp.wotreplay")
# Prepare regex filters
regexnickname = fnmatch.translate(nickname)
regexclantag = fnmatch.translate(clantag)
reobjnickname = re.compile(regexnickname, csens)
reobjclantag = re.compile(regexclantag, csens)
matches = 0
matches_kills = 0
matches_stats = 0
errors = 0
owner_kills = 0
owner_damage = 0
owner_spotted = 0
player_kills = 0
player_damage = 0
player_spotted = 0
for files in listdir:
while True:
# if verbose < 2:
# scan_mask = 1 #1 means try to only decode first block (binary 001)
# else:
# scan_mask = 7 #7 means decode everything (binary 111)
scan_mask = 7 #above speeds -v0 -v1 scanning x3, but it doesnt detect certain errors, defaulting to slower method
if battle_result:
chunks = ["", "", ""]
chunks[2], version = wotdecoder.battle_result(files)
chunks_bitmask = 4
processing = 4
else:
chunks, chunks_bitmask, processing, version = wotdecoder.replay(files, scan_mask)
# pprint (chunks[0])
# pprint (chunks[1])chunks[2]['arenaUniqueID']
# pprint (chunks[2])
# pprint (chunks[2]['personal']['accountDBID'])
# pprint (chunks[2]['players'][ chunks[2]['personal']['accountDBID'] ]['name'])
# pprint(chunks)
# print(datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S'))
# print(chunks[2]['common']['arenaCreateTime'])
# print( (datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime'])- datetime(1970, 1, 1, 0, 0)).total_seconds())
# print(datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').timestamp())
# xx = (datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime'])- datetime(1970, 1, 1, 0, 0)).total_seconds()
# print( datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime']))
# print( datetime.fromtimestamp(xx))
# print( mapidname[ chunks[2]['common']['arenaTypeID'] & 65535 ])
# print( chunks[0]['mapName'])
if (processing >8) or (not chunks_bitmask&5): #ignore replays with no useful data, must have at least first Json or pickle
errors += 1
if show_errors:
print ("\n\n---")
print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
print (wotdecoder.status[processing])
print ("---", end="")
break
elif processing ==6: #show error messages for recoverable errors
errors += 1
if show_errors:
print ("\n\n---")
print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
print (wotdecoder.status[processing])
print ("---", end="")
elif processing ==8: #very broken replay, only first json valid, have to disabble pickle
errors += 1
chunks_bitmask = 1
if show_errors:
print ("\n\n---")
print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
print (wotdecoder.status[processing])
print ("---", end="")
match = False
player_found = 0
owner_found = 0
owner_name = ""
owner_clan = ""
if chunks_bitmask&4:
vehicles = chunks[2]['players']
owner_name = chunks[2]['players'][ chunks[2]['personal']['accountDBID'] ]['name']
owner_found = chunks[2]['personal']['accountDBID']
elif chunks_bitmask&2:
vehicles = chunks[1][1]
owner_name = chunks[0]['playerName']
else:
vehicles = chunks[0]['vehicles']
owner_name = chunks[0]['playerName']
for player in vehicles:
check_player_name = vehicles[player]['name']
check_player_clan = vehicles[player]['clanAbbrev']
if not match and reobjnickname.match(check_player_name) and reobjclantag.match(check_player_clan):
match = True
matches += 1
player_found = player
player_name = vehicles[player]['name']
player_clan = "["+vehicles[player]['clanAbbrev']+"]"
if owner_found==0 and (vehicles[player]['name'] == owner_name): #find owner playerID
owner_found = player
owner_clan = "["+vehicles[player]['clanAbbrev']+"]"
if not match: break
if verbose >0:
print ("\n\n--------------------------------------------------------------------------------")
print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
print ("---")
print ("{0:39}{1:39}".format(player_name+player_clan, ("","| "+owner_name+owner_clan)[owner]))
if chunks_bitmask&4:
vehicle_player_found = chunks[2]['players'][player_found]['vehicleid']
vehicle_owner_found = chunks[2]['players'][owner_found]['vehicleid']
if verbose >1:
if chunks_bitmask&4: #is pickle available?
if chunks[2]['common']['finishReason']==3:
win_loss="Draw"
else:
win_loss = ("Loss","Win ")[chunks[2]['common']['winnerTeam']==chunks[2]['vehicles'][vehicle_player_found]['team']]
finishReason = "("+ wotdecoder.finishreason[ chunks[2]['common']['finishReason'] ] +")"
# print ("--- {0:4} on {1:28}{2:>40}".format(win_loss, wotdecoder.maps[ chunks[2]['common']['arenaTypeID'] & 65535 ][1], finishReason))
print ("--- {0:4} on {1:28}{2:>40}".format(win_loss, wotdecoder.maps[ chunks[2]['common']['arenaTypeID'] & 65535 ][1], finishReason))
#wotdecoder.gameplayid[ chunks[2]['common']['arenaTypeID'] >>16 ]
#wotdecoder.bonustype[ chunks[2]['common']['bonusType'] ]
elif chunks_bitmask&2: #is second Json available?
finishReason = ""
print ("--- {0:4} on {1:28}{2:15}".format(("Loss","Win ")[chunks[1][0]['isWinner']==1], chunks[0]['mapDisplayName'], finishReason))
else: #incomplete, all we can tell is tanks
if owner:
owner_string = " {0:<18}".format(chunks[0]['vehicles'][owner_found]['vehicleType'].split(":")[1])
else:
owner_string = ""
print (" {0:<18}{1:39}".format(chunks[0]['vehicles'][player_found]['vehicleType'].split(":")[1], owner_string))
if chunks_bitmask&4: #is second Json available?
if owner:
owner_string_kills = "| Kills ={0:>5}".format( chunks[2]['vehicles'][vehicle_owner_found]['kills'])
owner_string_tank = "| {0:8} in {1:<27}".format( ("Died","Survived")[chunks[2]['vehicles'][vehicle_owner_found]['health']>0], wotdecoder.tank[ chunks[2]['vehicles'][vehicle_owner_found]['typeCompDescr'] ][1])
owner_kills += chunks[2]['vehicles'][vehicle_owner_found]['kills']
else:
owner_string_kills = ""
owner_string_tank = ""
print ("{0:8} in {1:<27}{2:39}".format(("Died","Survived")[chunks[2]['vehicles'][vehicle_player_found]['health']>0], wotdecoder.tank[ chunks[2]['vehicles'][vehicle_player_found]['typeCompDescr'] ][1], owner_string_tank ))
print ("Kills ={0:>5}{1:26}{2:39}".format(chunks[2]['vehicles'][vehicle_player_found]['kills'], "", owner_string_kills ))
player_kills += chunks[2]['vehicles'][vehicle_player_found]['kills']
matches_kills += 1
elif chunks_bitmask&2: #is second Json available?
if owner:
# print (player_found, owner_found)
# pprint (chunks[1][1])
owner_string_kills = "| Kills ={0:>5}".format( len(chunks[1][0]['killed']) )
owner_string_tank = "| {0:8} in {1:<27}".format( ("Died","Survived")[ chunks[1][1][owner_found]['isAlive']==1 ], chunks[1][1][owner_found]['vehicleType'].split(":")[1] )
owner_kills += chunks[1][2][owner_found]['frags']
else:
owner_string_kills = ""
owner_string_tank = ""
print ("{0:8} in {1:<27}{2:39}".format(("Died","Survived")[ chunks[1][1][player_found]['isAlive']==1 ], chunks[1][1][player_found]['vehicleType'].split(":")[1], owner_string_tank))
if player_found in chunks[1][2]: #WTF WG, why Y hate sanity? sometimes not all player frag counts saved :/
frags = chunks[1][2][player_found]['frags']
else:
frags = 0
print ("Kills ={0:>5}{1:26}{2:39}".format(frags, "", owner_string_kills))
player_kills += frags
matches_kills += 1
if verbose >2 and chunks_bitmask&4: #is pickle available? use it for detailed stats
player = int(player)
if owner:
if version >= 860:
chunks[2]['vehicles'][vehicle_owner_found]['damageAssisted'] = chunks[2]['vehicles'][vehicle_owner_found]['damageAssistedTrack'] + chunks[2]['vehicles'][vehicle_owner_found]['damageAssistedRadio']
owner_string_damage = "| Damage ={0:>5}".format(chunks[2]['vehicles'][vehicle_owner_found]['damageDealt'])
owner_string_spotted = "| Spotted={0:>5}".format(chunks[2]['vehicles'][vehicle_owner_found]['damageAssisted'])
owner_damage += chunks[2]['vehicles'][vehicle_owner_found]['damageDealt']
owner_spotted += chunks[2]['vehicles'][vehicle_owner_found]['damageAssisted']
else:
owner_string_damage = ""
owner_string_spotted = ""
print ("Damage ={0:>5}{1:26}{2:39}".format(chunks[2]['vehicles'][vehicle_player_found]['damageDealt'], "", owner_string_damage))
if version >= 860:
chunks[2]['vehicles'][vehicle_player_found]['damageAssisted'] = chunks[2]['vehicles'][vehicle_player_found]['damageAssistedTrack'] + chunks[2]['vehicles'][vehicle_player_found]['damageAssistedRadio']
print ("Spotted={0:>5}{1:26}{2:39}".format(chunks[2]['vehicles'][vehicle_player_found]['damageAssisted'], "", owner_string_spotted))
player_damage += chunks[2]['vehicles'][vehicle_player_found]['damageDealt']
player_spotted += chunks[2]['vehicles'][vehicle_player_found]['damageAssisted']
matches_stats += 1
if battle_result: #we are decoding battle_result, lets more-or-less reconstruct potential replay name
# its not 'pixel' accurate, im too lazy to get tank country and underscores correct.
timestamp = datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime']).strftime('%Y%m%d_%H%M')
print ("Belongs to~", timestamp+"_"+wotdecoder.tank[ chunks[2]['vehicles'][vehicle_owner_found]['typeCompDescr'] ][1]+"_"+wotdecoder.maps[ chunks[2]['common']['arenaTypeID'] & 65535 ][0]+".wotreplay")
break
if matches > 0:
if verbose >3 and (matches_kills!=0 or matches_stats!=0) : # stats summary
if matches_kills==0: matches_kills =1 #lets not divide by zero today :)
if matches_stats==0: matches_stats =1
if owner:
owner_string_kills = "| Kills ={0:>9.2f}".format( owner_kills/matches_kills )
owner_string_damage = "| Damage ={0:>9.2f}".format( owner_damage/matches_stats )
owner_string_spotted = "| Spotted={0:>9.2f}".format( owner_spotted/matches_stats )
else:
owner_string_kills = ""
owner_string_damage = ""
owner_string_spotted = ""
print ("\nSummary (average):")
print ("Kills ={0:>9.2f}{1:23}{2:39}".format(player_kills/matches_kills , "", owner_string_kills))
print ("Damage ={0:>9.2f}{1:23}{2:39}".format(player_damage/matches_stats , "", owner_string_damage))
print ("Spotted={0:>9.2f}{1:23}{2:39}".format(player_spotted/matches_stats , "", owner_string_spotted))
print("\n\nFound", matches, "matches. ", end="")
else:
print("\n\nNo matches found. ", end="")
print(errors, "errors.")
t2 = time.clock()
print ("\nProcessing "+str(len(listdir))+" files took %0.3fms" % ((t2-t1)*1000))
main()
| raszpl/wotdecoder | findplayer.py | findplayer.py | py | 15,816 | python | en | code | 35 | github-code | 90 |
5759849265 | import time
import inspect
# Use as function decorator for printing the execution time of a function
# eg.
# @PrintExecutionTime
# async def on_step(self, iteration):
# ...
#
# will print
# on_step: 0.495ms
# whenever on_step is called
def PrintExecutionTime(func):
def calculate_execution_time(start):
return (time.time() - start) * 1000
def print_execution_time(func_name, time):
print(f'{func_name}: {round(time, 3)}ms')
def wrapper(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
execution_time = calculate_execution_time(start)
print_execution_time(f'{get_class_that_defined_method(func)} : {func.__name__}', execution_time)
async def async_wrapper(*args, **kwargs):
start = time.time()
await func(*args, **kwargs)
execution_time = calculate_execution_time(start)
print_execution_time(f'{get_class_that_defined_method(func)} : {func.__name__}', execution_time)
if (inspect.iscoroutinefunction(func)):
return async_wrapper
return wrapper
def get_class_that_defined_method(meth):
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(inspect.getmodule(meth),
meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return getattr(meth, '__objclass__', None) # handle special descriptor objects | Scottdecat/SwarmLord | bot/debug/debug_utils.py | debug_utils.py | py | 1,670 | python | en | code | 0 | github-code | 90 |
28489808662 | #!/usr/bin/python
import os
import cv2
import numpy as np
def SGBM(left, right):
kernel_size = 3
smooth_left = cv2.GaussianBlur(left, (kernel_size,kernel_size), 1.5)
smooth_right = cv2.GaussianBlur(right, (kernel_size, kernel_size), 1.5)
window_size = 9
left_matcher = cv2.StereoSGBM_create(
numDisparities=96,
blockSize=7,
P1=8*3*window_size**2,
P2=32*3*window_size**2,
disp12MaxDiff=1,
uniquenessRatio=16,
speckleRange=2,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY
)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(80000)
wls_filter.setSigmaColor(1.2)
disparity_left = np.int16(left_matcher.compute(smooth_left, smooth_right))
disparity_right = np.int16(right_matcher.compute(smooth_right, smooth_left) )
wls_image = wls_filter.filter(disparity_left, smooth_left, None, disparity_right)
wls_image = cv2.normalize(src=wls_image, dst=wls_image, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)
wls_image = np.uint8(wls_image)
return wls_image
| ImaCVer/SGBM | SGBM.py | SGBM.py | py | 1,111 | python | en | code | 1 | github-code | 90 |
24827376192 | from helpers import alphabet_position, rotate_character
def encrypt(text, rot_key):
lister = list(rot_key)
iterate = 0
rot = 0
result = ""
addition = ""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHA_bet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for char in text:
if char in alphabet or char in ALPHA_bet:
if rot == len(rot_key):
rot = 0
iterate = alphabet_position(lister[rot])
addition = rotate_character(char, iterate)
result = result + addition
rot = rot + 1
else:
result = result + char
return result
def main():
message = input("Enter your secret message here: ")
key = input("Encrypt with: ")
crypto = encrypt(message, key)
print(crypto)
if __name__ == "__main__":
main() | p-fannon/Crypto | vigenere.py | vigenere.py | py | 840 | python | en | code | 0 | github-code | 90 |
27097400018 | from spack import *
import glob
class Vardictjava(Package):
"""VarDictJava is a variant discovery program written in Java.
It is a partial Java port of VarDict variant caller."""
homepage = "https://github.com/AstraZeneca-NGS/VarDictJava"
url = "https://github.com/AstraZeneca-NGS/VarDictJava/releases/download/v1.5.1/VarDict-1.5.1.tar"
version('1.5.1', '8c0387bcc1f7dc696b04e926c48b27e6')
version('1.4.4', '6b2d7e1e5502b875760fc9938a0fe5e0')
depends_on('java@8:', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('bin/VarDict', prefix.bin)
mkdirp(prefix.lib)
files = [x for x in glob.glob("lib/*jar")]
for f in files:
install(f, prefix.lib)
| matzke1/spack | var/spack/repos/builtin/packages/vardictjava/package.py | package.py | py | 761 | python | en | code | 2 | github-code | 90 |
16448338230 | from arl.env import BaseEnv, EnvSpaceType
import gymnasium as gym
from typing import Dict, Any, List, Tuple, Union, Optional
import numpy as np
class GymEnv(BaseEnv):
def __init__(
self, env_name: str, env_params: dict = {}, seed: Optional[int] = None
) -> None:
super().__init__(env_name, env_params, seed)
self.env = gym.make(self.env_name, **self.env_params)
self.action_dim = None
self.state_dim = None
self.get_shape()
def step(self, action: Any) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
state, reward, terminated, truncated, info = self.env.step(action)
done = False
if terminated or truncated:
done = True
return state, float(reward), done, info
def reset(self) -> Tuple[np.ndarray, Union[dict, List[dict]]]:
return self.env.reset(seed=self.seed)
def render(self) -> np.ndarray:
return self.env.render()
def close(self) -> None:
self.env.close()
def get_shape_type(self) -> Tuple[EnvSpaceType, EnvSpaceType]:
if type(self.env.action_space) == gym.spaces.Discrete:
self.action_type = EnvSpaceType.Discrete
elif type(self.env.action_space) == gym.spaces.Box:
self.action_type = EnvSpaceType.Continuous
if type(self.env.observation_space) == gym.spaces.Discrete:
self.state_type = EnvSpaceType.Discrete
elif type(self.env.observation_space) == gym.spaces.Box:
self.state_type = EnvSpaceType.Continuous
return self.state_type, self.action_type
def get_shape(self) -> Tuple[np.ndarray,np.ndarray]:
self.get_shape_type()
if self.state_dim is None:
if self.state_type.is_discrete():
# state is discrete type
self.state_dim = np.array([self.env.observation_space.n])
elif self.state_type.is_continuous():
# state is continuous type
# array
self.state_dim = self.env.observation_space.shape
if self.action_dim is None:
if self.action_type.is_discrete():
# action is discrete type
self.action_dim = np.array([self.env.action_space.n])
elif self.action_type.is_continuous():
# action is continuous type
self.action_dim = self.env.action_space.shape
return self.state_dim, self.action_dim | noobHuKai/arl | arl/env/gym_env.py | gym_env.py | py | 2,485 | python | en | code | 0 | github-code | 90 |
9914010818 | '''
# -*- coding: UTF-8 -*-
# Interstitial Error Detector
# Version 0.2, 2013-08-28
# Copyright (c) 2013 AudioVisual Preservation Solutions
# All rights reserved.
# Released under the Apache license, v. 2.0
# Created on Aug 6, 2014
# @author: Furqan Wasi <furqan@avpreserve.com>
'''
from PySide.QtCore import *
from PySide.QtGui import *
import webbrowser
from Core import SharedApp
class AboutInterstitialGUI(QDialog):
''' Class to manage the Filter to be implemented for the files with specific extensions '''
def __init__(self, parent_win):
'''
Contstructor
'''
QDialog.__init__(self, parent_win)
self.Interstitial = SharedApp.SharedApp.App
self.setWindowTitle('About Intersitial')
self.parent_win = parent_win
self.setWindowModality(Qt.WindowModal)
self.parent_win.setWindowTitle('About Intersitial' +' '+self.Interstitial.Configuration.getApplicationVersion())
self.setWindowIcon(QIcon(self.Interstitial.Configuration.getLogoSignSmall()))
self.AboutInterstitialLayout = QVBoxLayout()
self.widget = QWidget(self)
self.pgroup = QGroupBox()
self.detail_layout = QVBoxLayout()
self.pgroup.setStyleSheet(" QGroupBox { border-style: none; border: none;}")
self.close_btn = QPushButton('Close')
self.about_layout = QGroupBox()
self.heading = QTextBrowser()
self.content = QTextEdit()
self.content.installEventFilter(self)
self.heading.setReadOnly(True)
self.content.setReadOnly(False)
self.content.viewport().setCursor(Qt.PointingHandCursor)
self.main = QHBoxLayout()
def openUserGuideUrl(self):
try:
QDesktopServices.openUrl(QUrl(self.Interstitial.Configuration.getUserGuideUrl()))
except:
webbrowser.open_new_tab(self.Interstitial.Configuration.getUserGuideUrl())
pass
def destroy(self):
''' Distructor'''
del self
def ShowDialog(self):
''' Show Dialog'''
self.show()
self.exec_()
def SetLayout(self, layout):
''' Set Layout'''
self.AboutInterstitialLayout = layout
def showDescription(self):
''' Show Description'''
self.heading.setText(self.Interstitial.label['description_heading'])
self.content.setHtml(self.Interstitial.label['description_content'])
def eventFilter(self, target, event):
"""
Capturing Content Clicked Event
@param target: Event triggered by Widget Object
@param event: Event triggered
@return Boolean: weather to launch
"""
if event.type() == QEvent.RequestSoftwareInputPanel:
self.openUserGuideUrl()
return True;
return False;
def SetDesgin(self):
''' All design Management Done in Here'''
self.close_btn = QPushButton('Close')
pic = QLabel(self)
pic.setFixedSize(300,400)
'''use full ABSOLUTE path to the image, not relative'''
pic.setPixmap(QPixmap(self.Interstitial.Configuration.getLogoSignSmall()))
self.close_btn.clicked.connect(self.Cancel)
self.detail_layout.addWidget(pic)
self.pgroup.setLayout(self.detail_layout)
slay = QVBoxLayout()
if self.Interstitial.Configuration.getOsType() == 'windows':
self.heading.setFixedSize(555, 40)
self.content.setFixedSize(555, 260)
else:
self.heading.setFixedSize(570, 40)
self.content.setFixedSize(570, 260)
self.close_btn.setFixedSize(200, 30)
slay.addWidget(self.heading)
slay.addWidget(self.content)
slay.addWidget(self.close_btn)
if self.Interstitial.Configuration.getOsType() == 'windows':
self.about_layout.setFixedSize(575, 360)
else:
self.about_layout.setFixedSize(585, 360)
self.pgroup.setFixedSize(40, 360)
self.main.addWidget(self.pgroup)
self.main.addWidget(self.about_layout)
self.about_layout.setLayout(slay)
self.setLayout(self.main)
self.showDescription()
def Cancel(self):
"""
Close the Dialog Box
@return:
"""
try:
self.Interstitial = SharedApp.SharedApp.App
except:
pass
self.parent_win.setWindowTitle(self.Interstitial.messages['InterErrorDetectTitle'] + ' ' + self.Interstitial.Configuration.getApplicationVersion() )
self.destroy()
self.close()
def LaunchDialog(self):
"""
Launch Dialog
@return:
"""
self.SetDesgin()
self.ShowDialog() | WeAreAVP/interstitial | GUI/AboutInterstitialGUI.py | AboutInterstitialGUI.py | py | 4,910 | python | en | code | 9 | github-code | 90 |
7819942656 | ### util functions for parsing all the moonshot data
### matthew.robinson@postera.ai
# general imports
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Descriptors
from chembl_structure_pipeline import standardizer
# get parent path of file
from pathlib import Path
dir_path = Path(__file__).parent.absolute()
# all_df = pd.read_csv(dir_path / "../covid_submissions_all_info.csv")
id_df = pd.read_csv(dir_path / "../covid_moonshot_ids.csv")
cdd_df = pd.read_csv(
dir_path / "../data_for_CDD/current_vault_data/current_vault_data.csv"
)
# CID_df = pd.read_csv("https://covid.postera.ai/covid/submissions.csv")
CID_df = pd.read_csv(dir_path / "../downloaded_COVID_submissions_file.csv")
def get_CID(ik):
# if ik == "NQIQTGDJKOVBRF-UHFFFAOYSA-N":
# print("seeing JAG-UCB-52b62a6f-9 IK")
# print(list(id_df.loc[id_df["inchikey"] == ik]["canonical_CID"])[0])
short_ik = ik.split("-")[0]
if ik in list(id_df["inchikey"]):
return list(id_df.loc[id_df["inchikey"] == ik]["canonical_CID"])[0]
elif short_ik in list(id_df["short_inchikey"]):
return list(
id_df.loc[id_df["short_inchikey"] == short_ik]["canonical_CID"]
)[0] # this will pick up the first one, which is what we want when enantiopures are separated
else:
print("NOT FOUND")
return np.nan
def get_CDD_ID(external_id):
# if external_id == "JAG-UCB-52b62a6f-9":
# print("seeing JAG-UCB-52b62a6f-9")
# print(list(
# cdd_df.loc[cdd_df["external_ID"] == external_id]["CDD_name"]
# )[0])
if external_id in list(cdd_df["external_ID"]):
return list(
cdd_df.loc[cdd_df["external_ID"] == external_id]["CDD_name"]
)[0]
else:
print("NOT FOUND")
return np.nan
def get_comments(ik):
# print("seeing JAG-UCB-52b62a6f-9 IK for comments")
print(ik in list(id_df["inchikey"]))
short_ik = ik.split("-")[0]
print(short_ik in list(id_df["short_inchikey"]))
if ik in list(id_df["inchikey"]):
return ""
elif short_ik in list(id_df["short_inchikey"]):
return f"imperfect stereochemical match for {list(id_df.loc[id_df['short_inchikey']==short_ik]['canonical_CID'])[0]}"
else:
return "not found"
def strip_and_standardize_smi(smi):
try:
return Chem.MolToSmiles(
Chem.MolFromSmiles(
Chem.MolToSmiles(
standardizer.standardize_mol(
standardizer.get_parent_mol(Chem.MolFromSmiles(smi))[0]
)
)
)
)
except:
print(smi)
raise ValueError(f"failed on {smi}")
# code to retrieve new and old CIDS
new_CID_list = list(CID_df["CID"])
old_CID_list = [str(x) for x in list(CID_df["CID (old format)"])]
old_to_new_CID_dict = {}
for old_CID, new_CID in zip(old_CID_list, new_CID_list):
if "nan" in old_CID:
old_to_new_CID_dict[new_CID] = new_CID
else:
old_to_new_CID_dict[old_CID] = new_CID
new_to_old_CID_dict = {v: k for k, v in old_to_new_CID_dict.items()}
def get_new_CID_from_old(old_CID):
return old_to_new_CID_dict[old_CID]
def get_old_CID_from_new(new_CID):
return new_to_old_CID_dict[new_CID]
def get_series(smi):
series_SMARTS_dict = {
# "3-aminopyridine": "[R1][C,N;R0;!$(NC(=O)CN)]C(=O)[C,N;R0;!$(NC(=O)CN)][c]1cnccc1",
"Ugi": "[c,C:1][C](=[O])[N]([c,C,#1:2])[C]([c,C,#1:3])([c,C,#1:4])[C](=[O])[NH1][c,C:5]",
"Isatins": "O=C1Nc2ccccc2C1=O",
"3-aminopyridine-like": "[cR1,cR2]-[C,N]C(=O)[C,N]!@[R1]",
"quinolones": "NC(=O)c1cc(=O)[nH]c2ccccc12",
"piperazine-chloroacetamide": "O=C(CCl)N1CCNCC1",
"activated-ester": "O=C(Oc1cncc(Cl)c1)c1cccc2[nH]ccc12"
}
def check_if_smi_in_series(
smi, SMARTS, MW_cutoff=550, num_atoms_cutoff=70, num_rings_cutoff=10
):
mol = Chem.MolFromSmiles(smi)
MW = Chem.Descriptors.MolWt(mol)
num_heavy_atoms = mol.GetNumHeavyAtoms()
num_rings = Chem.rdMolDescriptors.CalcNumRings(mol)
patt = Chem.MolFromSmarts(SMARTS)
if (
(
len(
Chem.AddHs(Chem.MolFromSmiles(smi)).GetSubstructMatches(
patt
)
)
> 0
)
and (MW <= MW_cutoff)
and (num_heavy_atoms <= num_atoms_cutoff)
and (num_rings <= num_rings_cutoff)
):
return True
else:
return False
for series in series_SMARTS_dict:
series_SMARTS = series_SMARTS_dict[series]
if series == "3-amonipyridine-like":
if check_if_smi_in_series(
smi,
series_SMARTS,
MW_cutoff=450,
num_rings_cutoff=4,
num_atoms_cutoff=35,
):
return series
else:
if check_if_smi_in_series(smi, series_SMARTS):
return series
return None
| postera-ai/COVID_moonshot_submissions | lib/utils.py | utils.py | py | 5,136 | python | en | code | 18 | github-code | 90 |
43690412082 | import discord
from discord.ext import commands, tasks
import requests
from datetime import datetime
def get_data():
json = requests.get('https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/ncov_cases/FeatureServer'
'/2/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects'
'&outFields=*&orderByFields=Confirmed%20desc&resultOffset=0&resultRecordCount=250&cacheHint'
'=true').json()
return [country['attributes'] for country in json['features']]
def convert_to_length(text, length):
text = str(text)
print(text)
print(len(text))
if length > len(text):
text = text.rjust(length - len(text))
print(text)
return text
def make_list(data):
timestamp = int(str(data[0]['Last_Update'])[:-3])
timestamp = datetime.fromtimestamp(timestamp).strftime('%B %d, %Y %H:%M')
response = "Conavirus numbers (Updated {})\n\n".format(timestamp)
response += "%40s|%10s|%7s|%10s\n\n" % ("Country", "Confirmed", "Death", "Recovered")
for country in data:
response += "%40s|%10s|%7s|%10s\n" % (
country['Country_Region'], str(country['Confirmed']), str(country['Deaths']), str(country['Recovered']))
return response
class Coronavirus(commands.Cog):
@commands.command(name="coronavirus", aliases=['corona', 'covid', 'covid19'], pass_content=True)
async def coronavirus(self, ctx: commands.Context):
"""
Get global data on the coronavirus (via ArcGIS)
"""
data = get_data()
if data:
list = make_list(data)
temp_list = ""
for line in list.splitlines():
if len(temp_list) < 1800:
temp_list += "\n" + line
else:
await ctx.send("```" + temp_list + "```")
temp_list = ""
await ctx.send("```" + temp_list + "```")
else:
await ctx.send("Sorry, I couldn't grab the latest numbers")
@coronavirus.error
async def coronavirus_error(self, ctx, error):
print(error)
await ctx.send("Something went wrong.")
def __init__(self, bot):
self.bot = bot
print("Coronavirus ready to go!")
def setup(bot):
bot.add_cog(Coronavirus(bot))
| nwithan8/Arca | general/coronavirus.py | coronavirus.py | py | 2,356 | python | en | code | 22 | github-code | 90 |
10921173712 | #!/usr/bin/env python3
"""
Description:
This script will launch ``middle_bed_enrichment`` for every bed in a given folder
"""
import os
import subprocess
import argparse
def main(trna_launcher, folder_bed, fasterdb_bed, output):
"""
:param trna_launcher: (string) file corresponding to the tRNA launcher
:param folder_bed: (string) folder containing the bed files
:param output: (string) path where the output will be created
"""
cur_folder = os.path.realpath(os.path.dirname(__file__))
bed_files = sorted(os.listdir(folder_bed))
bed_files = [folder_bed + my_file for my_file in bed_files]
for my_bed in bed_files:
print("Working on %s" % my_bed)
name_file = os.path.basename(my_bed).split(".")[0]
output_folder = output + name_file
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
cmd = "python3 %s/middle_bed_enrichment.py --output %s --name %s --clip_bed %s --fasterdb_bed %s \
--trna_launcher %s" % (cur_folder, output_folder, name_file, my_bed, fasterdb_bed, trna_launcher)
if "SRSF3" in my_bed:
cmd += " --overlap 2"
if "SRSF1" in my_bed:
cmd += " --overlap 5"
print(cmd)
subprocess.check_call(cmd, shell=True, stderr=subprocess.STDOUT)
def launcher():
"""
function that contains a parser to launch the program
"""
# description on how to use the program
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Launch the script ``middle_bed_enrichment`` multiple time
""")
# Arguments for the parser
required_args = parser.add_argument_group("Required argument")
parser.add_argument('--output', dest='output',
help="""path where the result will be created - default : current directory""",
default=".")
required_args.add_argument('--clip_folder', dest='clip_folder',
help="The bed folder containing bed file",
required=True)
required_args.add_argument('--fasterdb_bed', dest='fasterdb_bed',
help="""the bed containing all fasterDB exons""",
required=True)
required_args.add_argument('--trna_launcher', dest='trna_launcher',
help="""file corresponding the tRNA launcher""",
required=True)
args = parser.parse_args() # parsing arguments
# Defining global parameters
if args.output[-1] != "/":
args.output += "/"
main(args.trna_launcher, args.clip_folder, args.fasterdb_bed, args.output)
if __name__ == "__main__":
launcher()
| LBMC/Fontro_Aube_2019 | clip_analysis/src/middle_bed_launcher.py | middle_bed_launcher.py | py | 2,817 | python | en | code | 0 | github-code | 90 |
40606337848 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
head = None
temp = None
add = 0
while l2 is not None or l1 is not None:
s1 = add
if l1 is not None:
s1 += (l1.val)
l1 = l1.next
if l2 is not None:
s1 += (l2.val)
l2 = l2.next
valu = s1 % 10
new = ListNode(valu)
add = s1 // 10
if temp is None:
temp = new
head = new
else:
temp.next = new
temp = temp.next
if add > 0:
newNode = ListNode(add)
temp.next = newNode
temp = temp.next
return head
def revers(self,li: ListNode):
prev = None
current = li
while current is not None:
new = current.next
current.next = prev
prev = current
current = new
li = prev
def get_count(self):
temp = self.head
count = 0
while temp:
count += 1
temp = temp.next
return count | ajaydeep300/leet | 2-add-two-numbers/2-add-two-numbers.py | 2-add-two-numbers.py | py | 1,437 | python | en | code | 0 | github-code | 90 |
13046639626 | from neural_network import *
from text_parser import Parser
from tkinter import *
import drawer as dr
"""
Klasa Menu odpowiedzialna jest za wyświetlanie i obsługę menu - wywoływanie metod sieci neuronowej i parsera.
"""
class Menu:
root = Tk()
network = None
parser = Parser()
text_entry = None
out_text_var = StringVar()
lang_tab = ["Angielski:", "Niemiecki:", "Polski:", "Czeski:", "Włoski:", "Rosyjski (tr.):"]
############################################################################################
def display_menu(self):
self.network = NeuralNetwork(676, len(self.lang_tab))
np.set_printoptions(formatter={"float": lambda x: "{0:0.6f}".format(x)}, threshold=np.inf)
self.root.geometry("640x480")
self.root.title("Detektor języków")
b_width = 32
option1 = Button(self.root, width = b_width, text ="Uczenie sieci", command = self.option1_callback)
option2 = Button(self.root, width = b_width, text ="Detekcja języka wpisanego tekstu", command = self.option2_callback)
option3 = Button(self.root, width = b_width, text ="Pokaż wagi", command = self.option3_callback)
option_exit = Button(self.root, width = b_width, text ="Wyjdź", command = self.option_exit_callback)
self.text_entry = Entry(self.root, width = b_width * 2)
self.text_entry.insert(0, "Miejsce na tekst w obsługiwanym języku")
out_text = Label(self.root, textvariable = self.out_text_var, justify = LEFT)
option1.pack()
self.text_entry.pack()
option2.pack()
option3.pack()
option_exit.pack()
out_text.pack()
self.root.mainloop()
############################################################################################
def option1_callback(self):
self.parser.show_info = True
print(self.lang_tab[0])
eng_data = self.parser.parse_file("TrainingTexts/english.txt")
print(self.lang_tab[1])
ger_data = self.parser.parse_file("TrainingTexts/german.txt")
print(self.lang_tab[2])
pol_data = self.parser.parse_file("TrainingTexts/polish.txt")
print(self.lang_tab[3])
cze_data = self.parser.parse_file("TrainingTexts/czech.txt")
print(self.lang_tab[4])
ita_data = self.parser.parse_file("TrainingTexts/italian.txt")
print(self.lang_tab[5])
rus_data = self.parser.parse_file("TrainingTexts/russian.txt")
self.parser.show_info = False
train_inputs = np.array([eng_data, ger_data, pol_data, cze_data, ita_data, rus_data])
train_outputs = np.identity(len(self.lang_tab))
train_iterations = 80000
print("Iteracje uczenia: " + str(train_iterations))
self.network.train(train_inputs, train_outputs, train_iterations)
self.parser.save_weights(self.network.weights)
print("\nWagi zostały zapisane")
############################################################################################
def option2_callback(self):
self.network.weights = self.parser.load_weights()
test_data = np.array( [self.parser.parse_string(self.text_entry.get())] )
result = self.network.propagation(test_data[0])
out_str = "\nWynik detekcji:\n\n"
for i in range(len(self.lang_tab)):
out_str = out_str + "{: >15}".format(self.lang_tab[i]) + "\t" + "{0:.2%}".format(result[i]) + "\n"
self.out_text_var.set(out_str)
print(out_str)
dr.draw_plot(self.lang_tab, result)
############################################################################################
def option3_callback(self):
print("\nWagi sieci:")
print(self.network.weights)
############################################################################################
def option_exit_callback(self):
exit() | KowalDrzo/LanguageDetector | gui.py | gui.py | py | 4,022 | python | en | code | 0 | github-code | 90 |
10527452022 | # © 2011,2013 Michael Telahun Makonnen <mmakonnen@gmail.com>
# © 2014 initOS GmbH & Co. KG <http://www.initos.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models, api
from odoo.exceptions import Warning as UserError
class HrPublicHolidaysLine(models.Model):
_name = 'hr.holidays.public.line'
_description = 'Public Holidays Lines'
_order = "date, name desc"
name = fields.Char(
'Name',
required=True,
)
date = fields.Date(
'Date',
required=True
)
year_id = fields.Many2one(
'hr.holidays.public',
'Calendar Year',
required=True,
)
variable = fields.Boolean('Date may change')
state_ids = fields.Many2many(
'res.country.state',
'hr_holiday_public_state_rel',
'line_id',
'state_id',
'Related States'
)
@api.multi
@api.constrains('date', 'state_ids')
def _check_date_state(self):
for s in self:
if fields.Date.from_string(s.date).year != s.year_id.year:
raise UserError(
'Dates of holidays should be the same year '
'as the calendar year they are being assigned to'
)
if s.state_ids:
domain = [('date', '=', s.date),
('year_id', '=', s.year_id.id),
('state_ids', '!=', False),
('id', '!=', s.id)]
holidays = s.search(domain)
for holiday in holidays:
if s.state_ids & holiday.state_ids:
raise UserError('You can\'t create duplicate public '
'holiday per date %s and one of the '
'country states.' % s.date)
domain = [('date', '=', s.date),
('year_id', '=', s.year_id.id),
('state_ids', '=', False)]
if s.search_count(domain) > 1:
raise UserError('You can\'t create duplicate public holiday '
'per date %s.' % s.date)
return True
| JoryWeb/illuminati | poi_hr_public_holidays/models/hr_public_holidays_line.py | hr_public_holidays_line.py | py | 2,221 | python | en | code | 1 | github-code | 90 |
10680771102 | import numpy as np
import matplotlib.pyplot as plt
import src.util.utl as utl
def nat_spline_interpolation(x: np.ndarray, y: np.ndarray, x_int: np.ndarray) \
-> np.ndarray:
'''
natürliche kubische Spline Interpolation für n+1 Stützpunkte
Parameters:
x: Zeilenvektor mit x der Stützpunkte, länge = n + 1
y: Zeilenvektor mit y der Stützpunkte, länge = n + 1
x_int: Die x für die die Interpolation berechnet werden soll
Returns:
y_int: Die für x_int interpolierten y
'''
assert len(x.shape) == 1
assert x.shape == y.shape
n = len(x) - 1
assert n >= 2
x, y, x_int = x.astype(np.float64), y.astype(
np.float64), x_int.astype(np.float64)
print('natürliche kubische Spline Interpolation')
a = y[:-1]
print('Koeffizienten a_i aus y_i:')
utl.np_pprint(a)
h = x[1:] - x[:-1]
c = np.zeros_like(x)
if n >= 2:
A = np.diag(2 * (h[:-1] + h[1:])) + \
np.diag(h[1:-1], -1) + np.diag(h[1:-1], 1)
print('A-Matrix für die c_i:')
utl.np_pprint(A)
z = 3 * (y[2:] - y[1:-1]) / h[1:] - \
3 * (y[1:-1] - y[0:-2]) / h[:-1]
print('z-Vektor für die c_i:')
utl.np_pprint(z)
c[1:-1] = np.linalg.solve(A, z)
print('Berechnete Koeffizienten c_i aus Ac = z:')
utl.np_pprint(c)
b = (y[1:] - y[:-1]) / h[:] \
- h[:] / 3 * (c[1:] + 2 * c[:-1])
print('Berechnete Koeffizienten b_i:')
utl.np_pprint(b)
d = (c[1:] - c[:-1]) / (3*h[:])
print('Berechnete Koeffizienten d_i:')
utl.np_pprint(d)
yy = np.zeros_like(x_int)
# x werte mit Funktion des korrekten Intervalls interpolieren
# n+1 Stützpunkte -> n Intervalle (der reihe nach)
for k in range(n):
idx = np.where(np.logical_and(x_int >= x[k], x_int <= x[k+1]))
dx = x_int[idx] - x[k]
yy[idx] = a[k] + b[k] * dx + c[k] * dx**2 + d[k] * dx**3
return yy
def lagrange_interpolation(x: np.ndarray, y: np.ndarray, x_int: np.ndarray) \
-> np.ndarray:
'''
Lagrange Interpolation für ein Polynom vom Grad n
Parameters:
x: Zeilenvektor mit x der Stützpunkte, länge = n + 1
y: Zeilenvektor mit y der Stützpunkte, länge = n + 1
x_int: Die x für die die Interpolation berechnet werden soll
Returns:
y_int: Die für x_int interpolierten y
'''
utl.assert_is_vec(x)
utl.assert_eq_shape(x, y)
utl.assert_is_vec(x_int)
x = x.astype(np.float64)
y = y.astype(np.float64)
x_int = x_int.astype(np.float64)
y_int = np.zeros_like(x_int)
# n + 1 Stützpunkte!
n = len(x) - 1
for i in range(n+1):
li = np.ones_like(x_int)
for j in range(n+1):
if i == j: continue
li *= (x_int - x[j]) / (x[i] - x[j])
y_int += li * y[i]
return y_int
def np_polyval_fit_scaling_bsp():
'''
Beispiel aus Serie 4 Aufgabe 3 b)
zur Interpolation mittels numpy's polyfit/polyval
'''
x = np.array([1981, 1984, 1989, 1993, 1997, 2000, 2001, 2003, 2004, 2010], dtype=np.float64)
y = np.array([0.5, 8.2, 15, 22.9, 36.6, 51, 56.3, 61.8, 65, 76.7], dtype=np.float64)
assert len(x) == len(y)
'''
Hier sind die Daten noch zusätzlich Skaliert (x - mean(x))
Dadurch ist das Problem besser konditioniert
Die Kurve an sich hat eine grössere Varianz,
bei den einzelnen Stützpunkten dafür ist die Kurve exakt,
was bei a) nicht der Fall ist
'''
# n + 1 Stützpunkte
n = len(x) - 1
x_nrm = x - np.mean(x)
# DIE REIHENFOLGE der returned Koeffizienten ist beginnend
# mit dem für den höchsten Exponent x^n danach absteigend
coeff = np.polyfit(x_nrm, y, n)
x_int = np.arange(1975, 2020.1, step=0.1)
y_int = np.polyval(coeff, x_int - np.mean(x))
plt.plot(x, y, label='original')
plt.plot(x_int, y_int, label='interpolated')
plt.xlim(1975, 2020)
plt.ylim(-100, 250)
plt.legend()
plt.grid()
plt.show()
import unittest
class InterpolationTest(unittest.TestCase):
def test_lagrange_int_S4_A1(self):
x = np.array([0, 2_500, 5_000, 10_000], dtype=np.float64)
y = np.array([1_013, 747, 540, 226], dtype=np.float64)
x_gesucht = np.array([3_750], dtype=np.float64)
y_int = lagrange_interpolation(x, y, x_gesucht)
actual = y_int[0]
self.assertAlmostEqual(actual, 637.328125)
def test_spline_int_S5_A2(self):
x = np.array([4., 6, 8, 10])
y = np.array([6., 3, 9, 0])
_ = nat_spline_interpolation(x, y, np.array([]))
def test_spline_int_S5_A3(self):
x = np.array([1_900, 1_910, 1_920, 1_930, 1_940, 1_950,
1_960, 1_970, 1_980, 1_990, 2_000, 2_010])
y = np.array([75.995, 91.972, 105.711, 123.203, 131.669, 150.697,
179.323, 203.212, 226.505, 249.633, 281.422, 308.745])
y_int = nat_spline_interpolation(x, y, x)
self.assertTrue(np.allclose(y_int, y))
# x_int = np.linspace(x[0], x[-1], num=100_000)
# y_int = nat_spline_interpolation(x, y, x_int)
# plt.plot(x, y, 'bo', label='Messpunkte')
# plt.plot(x_int, y_int, 'r', label='Spline Interpoliert')
# plt.legend()
# plt.grid()
# plt.show()
| merlinio2000/seppi | src/hm2/interpolation.py | interpolation.py | py | 5,404 | python | de | code | 0 | github-code | 90 |
15836449468 | from SharedInterfaces.RegistryAPI import *
from SharedInterfaces.ProvenanceAPI import *
from tests.helpers.general_helpers import *
from tests.helpers.datastore_helpers import *
from tests.helpers.prov_helpers import *
from tests.helpers.registry_helpers import *
from tests.helpers.link_helpers import *
from resources.example_models import *
from tests.config import config, Tokens
from tests.helpers.fixtures import *
from tests.helpers.prov_api_helpers import *
def test_provenance_workflow(dataset_io_fixture: Tuple[str, str], linked_person_fixture: ItemPerson, organisation_fixture: ItemOrganisation) -> None:
# prov test that will create the requirements needed for a model run record and register it
# Procedure:
# create the simple entities required (person, organisation)
# register custom dataset templates for input and output datasets
# register simple model
# register model run workflow tempalte using references to pre registered entities
# create and register the model run object using references to pre registered entitites
person = linked_person_fixture
organisation = organisation_fixture
write_token = Tokens.user1
# register custom dataset templates (input and output)
input_deferred_resource_key = "key1"
input_template = create_item_from_domain_info_successfully(
item_subtype=ItemSubType.DATASET_TEMPLATE,
token=write_token(),
domain_info=DatasetTemplateDomainInfo(
description="A template for integration Test input dataset",
display_name="Integration test input template",
defined_resources=[
DefinedResource(
usage_type=ResourceUsageType.GENERAL_DATA,
description="Used for connectivities",
path="forcing/",
)
],
deferred_resources=[
DeferredResource(
usage_type=ResourceUsageType.GENERAL_DATA,
description="Used for connectivities",
key=input_deferred_resource_key,
)
]
)
)
cleanup_items.append((input_template.item_subtype, input_template.id))
# cleanup create activity
cleanup_create_activity_from_item_base(
item=input_template,
get_token=Tokens.user1
)
output_deferred_resource_key = "Key2"
output_template = create_item_from_domain_info_successfully(
item_subtype=ItemSubType.DATASET_TEMPLATE,
token=write_token(),
domain_info=DatasetTemplateDomainInfo(
description="A template for integration Test output dataset",
display_name="Integration test output template",
defined_resources=[
DefinedResource(
usage_type=ResourceUsageType.GENERAL_DATA,
description="Used for connectivities",
path="forcing/",
)
],
deferred_resources=[
DeferredResource(
usage_type=ResourceUsageType.GENERAL_DATA,
description="Used for connectivities",
key=output_deferred_resource_key,
)
]
)
)
cleanup_items.append((output_template.item_subtype, output_template.id))
# cleanup create activity
cleanup_create_activity_from_item_base(
item=output_template,
get_token=Tokens.user1
)
# regiter the model used in the model run
model = create_item_successfully(
item_subtype=ItemSubType.MODEL, token=write_token())
cleanup_items.append((model.item_subtype, model.id))
# cleanup create activity
cleanup_create_activity_from_item_base(
item=model,
get_token=Tokens.user1
)
# create and register model run workflow template
required_annotation_key = "annotation_key1"
optional_annotation_key = "annotation_key2"
mrwt_domain_info = ModelRunWorkflowTemplateDomainInfo(
display_name="IntegrationTestMRWT",
software_id=model.id, # model is software
software_version="v1.17",
input_templates=[TemplateResource(
template_id=input_template.id, optional=False)],
output_templates=[TemplateResource(
template_id=output_template.id, optional=False)],
annotations=WorkflowTemplateAnnotations(
required=[required_annotation_key],
optional=[optional_annotation_key]
)
)
mrwt = create_item_from_domain_info_successfully(
item_subtype=ItemSubType.MODEL_RUN_WORKFLOW_TEMPLATE, token=write_token(), domain_info=mrwt_domain_info)
cleanup_items.append((mrwt.item_subtype, mrwt.id))
# cleanup create activity
cleanup_create_activity_from_item_base(
item=mrwt,
get_token=Tokens.user1
)
# create model run to register
model_run_record = ModelRunRecord(
workflow_template_id=mrwt.id,
inputs=[TemplatedDataset(
dataset_template_id=input_template.id,
dataset_id=dataset_io_fixture[0],
dataset_type=DatasetType.DATA_STORE,
resources={
input_deferred_resource_key: '/path/to/resource.csv'
}
)],
outputs=[TemplatedDataset(
dataset_template_id=output_template.id,
dataset_id=dataset_io_fixture[1],
dataset_type=DatasetType.DATA_STORE,
resources={
output_deferred_resource_key: '/path/to/resource.csv'
}
)],
associations=AssociationInfo(
modeller_id=person.id,
requesting_organisation_id=organisation.id
),
display_name="Integration test fake model run display name",
start_time=(datetime.now().timestamp()),
end_time=(datetime.now().timestamp()),
description="Integration test fake model run",
annotations={
required_annotation_key: 'somevalue',
optional_annotation_key: 'some other optional value'
}
)
# register model run
response_model_run_record = register_modelrun_from_record_info_successfully(
get_token=write_token, model_run_record=model_run_record)
model_run_id = response_model_run_record.id
cleanup_items.append((ItemSubType.MODEL_RUN, model_run_id))
# create model run to register including a linked study
study = create_item_successfully(
item_subtype=ItemSubType.STUDY, token=write_token())
cleanup_items.append((study.item_subtype, study.id))
model_run_record.study_id = study.id
# register model run
response_model_run_record = register_modelrun_from_record_info_successfully(
get_token=write_token, model_run_record=model_run_record)
model_run_id = response_model_run_record.id
cleanup_items.append((ItemSubType.MODEL_RUN, model_run_id))
# - check the prov graph lineage is appropriate
# The lineage should have
activity_upstream_query = successful_basic_prov_query(
start=model_run_id,
direction=Direction.UPSTREAM,
depth=1,
token=Tokens.user1()
)
# model run -wasInformedBy-> study
assert_non_empty_graph_property(
prop=GraphProperty(
type="wasInformedBy",
source=model_run_id,
target=study.id
),
lineage_response=activity_upstream_query
)
# ensure invalid study id results in failure
model_run_record.study_id = '1234'
# register model run
failed, possible_model_run_record = register_modelrun_from_record_info_failed(
get_token=write_token, model_run_record=model_run_record, expected_code=400)
if not failed:
assert possible_model_run_record
model_run_id = possible_model_run_record.id
cleanup_items.append((ItemSubType.MODEL_RUN, model_run_id))
assert False, f"Model run registration with invalid study should have failed, but did not."
def test_create_and_update_history(dataset_io_fixture: Tuple[str, str], linked_person_fixture: ItemPerson, organisation_fixture: ItemOrganisation) -> None:
write_token = Tokens.user1
person = linked_person_fixture
organisation = organisation_fixture
# Data store API update workflow
# ==============================
# use one of the provided datasets for testing
id = dataset_io_fixture[0]
# fetch
raw_item = raw_fetch_item_successfully(
item_subtype=ItemSubType.DATASET, id=id, token=write_token())
item = DatasetFetchResponse.parse_obj(raw_item).item
assert item
assert isinstance(item, ItemBase)
original_item = item
# check history
history = item.history
# check length
assert len(
history) == 1, f"Should have single item in history but had {len(history)}."
# check item is parsable as type and is equal
domain_info_model = DatasetDomainInfo
entry = domain_info_model.parse_obj(history[0].item)
check_equal_models(entry, domain_info_model.parse_obj(item))
# check basic properties of the history item
history_item = history[0]
# timestamp is reasonable? - NA as this item was created previously
# check_current_with_buffer(history_item.timestamp)
# make sure username is correct
assert history_item.username == config.SYSTEM_WRITE_USERNAME
# check reason is not empty
assert history_item.reason != ""
# grant write
# --------------
auth_config = get_auth_config(
id=id, item_subtype=ItemSubType.DATASET, token=write_token())
auth_config.general.append("metadata-write")
put_auth_config(id=id, auth_payload=py_to_dict(auth_config),
item_subtype=ItemSubType.DATASET, token=write_token())
# update (user 2)
# --------------
write_token = Tokens.user2
existing_metadata = entry.collection_format
existing_metadata.dataset_info.name += "-updated"
reason = "test reason"
update_metadata_sucessfully(
dataset_id=id,
updated_metadata=py_to_dict(existing_metadata),
token=write_token(),
reason=reason
)
# check history
# --------------
raw_item = raw_fetch_item_successfully(
item_subtype=ItemSubType.DATASET, id=id, token=write_token())
item = DatasetFetchResponse.parse_obj(raw_item).item
assert item
assert isinstance(item, ItemBase)
# check history
history = item.history
# check length
assert len(
history) == 2, f"Should have two items in history but had {len(history)}."
# check item is parsable as type and is equal
domain_info_model = DatasetDomainInfo
first_entry = domain_info_model.parse_obj(history[0].item)
check_equal_models(first_entry, domain_info_model.parse_obj(item))
# check basic properties of the history items
history_item = history[0]
# timestamp is reasonable?
check_current_with_buffer(history_item.timestamp)
# make sure username is correct
assert history_item.username == config.SYSTEM_WRITE_USERNAME_2
# check reason is not empty
assert history_item.reason == reason
# check basic properties of the history items
history_item = history[1]
# make sure username is correct (should be original username)
assert history_item.username == config.SYSTEM_WRITE_USERNAME
# check reason is not empty
assert history_item.reason != ""
# perform reversion to v1
# -----------------------
# identify id and contents from history
history_id = history[1].id
# revert to id
revert_dataset_successfully(
dataset_id=id,
reason="integration tests",
history_id=history_id,
token=write_token()
)
# fetch
raw_item = raw_fetch_item_successfully(
item_subtype=ItemSubType.DATASET, id=id, token=write_token())
item = DatasetFetchResponse.parse_obj(raw_item).item
assert item
assert isinstance(item, ItemDataset)
# check contents after update
history = item.history
# check len is > + 1
assert len(
history) == 3, f"length of history should be three (two versions + update) but was {len(history)}."
# also check that the new item has the correct original contents
check_equal_models(
DatasetDomainInfo.parse_obj(original_item), DatasetDomainInfo.parse_obj(py_to_dict(history[0].item)))
# Direct registry API update workflow
# ===================================
# use user 1 initially
write_token = Tokens.user1
# create Model
item = ModelCreateResponse.parse_obj(raw_create_item_successfully(
item_subtype=ItemSubType.MODEL, token=write_token())).created_item
assert item
id = item.id
# make sure model is cleaned up
cleanup_items.append((ItemSubType.MODEL, id))
# check history
history = item.history
# check length
assert len(
history) == 1, f"Should have single item in history but had {len(history)}."
# check item is parsable as type and is equal
domain_info_model = ModelDomainInfo
entry = domain_info_model.parse_obj(history[0].item)
check_equal_models(entry, domain_info_model.parse_obj(item))
# check basic properties of the history item
history_item = history[0]
# timestamp is reasonable?
check_current_with_buffer(history_item.timestamp)
# make sure username is correct
assert history_item.username == config.SYSTEM_WRITE_USERNAME
# check reason is not empty
assert history_item.reason != ""
# Clean up Create Activity
cleanup_create_activity_from_item_base(item=item, get_token=write_token)
# grant write
# --------------
auth_config = get_auth_config(
id=id, item_subtype=ItemSubType.MODEL, token=write_token())
auth_config.general.append("metadata-write")
put_auth_config(id=id, auth_payload=py_to_dict(auth_config),
item_subtype=ItemSubType.MODEL, token=write_token())
# update (user 2)
# --------------
write_token = Tokens.user2
existing_metadata = entry
existing_metadata.display_name += "-updated"
reason = "test reason"
resp = update_item(
id=id,
updated_domain_info=existing_metadata,
item_subtype=ItemSubType.MODEL,
token=write_token(),
reason=reason
)
assert resp.status_code == 200, f"Non 200 code: {resp.status_code}, reason: {resp.text}"
# check history
# --------------
raw_item = raw_fetch_item_successfully(
item_subtype=ItemSubType.MODEL, id=id, token=write_token())
item = ModelFetchResponse.parse_obj(raw_item).item
assert item
assert isinstance(item, ItemBase)
# check history
history = item.history
# check length
assert len(
history) == 2, f"Should have two items in history but had {len(history)}."
first_entry = domain_info_model.parse_obj(history[0].item)
check_equal_models(first_entry, domain_info_model.parse_obj(item))
# check basic properties of the history items
history_item = history[0]
# timestamp is reasonable?
check_current_with_buffer(history_item.timestamp)
# make sure username is correct
assert history_item.username == config.SYSTEM_WRITE_USERNAME_2
# check reason is not empty
assert history_item.reason == reason
# check basic properties of the history items
history_item = history[1]
# make sure username is correct (should be original username)
assert history_item.username == config.SYSTEM_WRITE_USERNAME
# check reason is not empty
assert history_item.reason != ""
# perform reversion to v1
# -----------------------
# identify id and contents from history
history_id = history[1].id
# revert to id
revert_item_successfully(
item_subtype=ItemSubType.MODEL,
id=item.id,
history_id=history_id,
token=write_token()
)
# fetch
raw_item = raw_fetch_item_successfully(
item_subtype=ItemSubType.MODEL, id=id, token=write_token())
item = ModelFetchResponse.parse_obj(raw_item).item
assert item
assert isinstance(item, ItemModel)
# check contents after update
history = item.history
# check len is > + 1
assert len(
history) == 3, f"length of history should be three (two versions + update) but was {len(history)}."
# also check that the new item has the correct original contents
check_equal_models(
domain_info_model.parse_obj(item), ModelDomainInfo.parse_obj(py_to_dict(history[0].item)))
| provena/provena | tests/integration/tests/workflows/test_workflows.py | test_workflows.py | py | 16,675 | python | en | code | 3 | github-code | 90 |
44158407599 | # -*- coding: utf-8 -*-
import os
import base64
import time
from datetime import timedelta
from proj.celery import celery_app
from messaging.sms import SmsSubmit
from django.utils import timezone
from email.utils import formataddr
from celery.utils.log import get_task_logger
import asterisk.manager
from django.conf import settings
from .models import SMS, Template
from .utils import increase_send_sms, send_mail
__author__ = 'AlexStarov'
logger = get_task_logger(__name__)
PROJECT_PATH = os.path.abspath(os.path.dirname(__name__), )
path = lambda base: os.path.abspath(
os.path.join(
PROJECT_PATH, base
).replace('\\', '/')
)
def decorate(func):
start = time.time()
print('print: Декорируем ext1 %s(*args, **kwargs): | Start: %s' % (func.__name__, start, ), )
logger.info('logger: Декорируем ext1 %s... | Start: %s' % (func.__name__, start, ), )
def wrapped(*args, **kwargs):
start_int = time.time()
print('print: Декорируем int2 %s(*args, **kwargs): | Start: %s' % (func.__name__, start_int,), )
logger.info('logger: Декорируем int2 %s... | Start: %s' % (func.__name__, start_int,), )
print('print: Вызываем обёрнутую функцию с аргументами: *args и **kwargs ', )
logger.info('logger: Вызываем обёрнутую функцию с аргументами: *args и **kwargs ', )
result = func(*args, **kwargs)
stop_int = time.time()
print('print: выполнено! | Stop: %s | Running time: %s' % (stop_int, stop_int - start_int,), )
logger.info('logger: выполнено! | Stop: %s | Running time: %s' % (stop_int, stop_int - start_int,), )
return result
stop = time.time()
print('print: выполнено! | Stop: %s | Running time: %s' % (stop, stop - start, ), )
logger.info('logger: выполнено! | Stop: %s | Running time: %s' % (stop, stop - start, ), )
return wrapped
@celery_app.task(name='sms_ussd.tasks.send_sms', )
@decorate
def send_sms(*args, **kwargs):
sms_pk = kwargs.get('sms_pk')
try:
sms_inst = SMS.objects.get(pk=sms_pk, is_send=False, )
except SMS.DoesNotExist:
return False
manager = asterisk.manager.Manager()
# connect to the manager
try:
manager.connect(settings.ASTERISK_HOST)
manager.login(*settings.ASTERISK_AUTH)
# get a status report
response = manager.status()
print('print: response: ', response)
logger.info('logger: response: %s' % response)
# Success
number = '+380{code}{phone}'\
.format(
code=sms_inst.to_code,
phone=sms_inst.to_phone,
)
sms_to_pdu = SmsSubmit(number=number, text=sms_inst.message, )
sms_to_pdu.request_status = True
sms_to_pdu.validity = timedelta(days=2)
sms_list = sms_to_pdu.to_pdu()
# last_loop = len(sms_list) - 1
for i, pdu_sms in enumerate(sms_list):
time.sleep(0.5)
response = manager.command('dongle pdu {device} {pdu}'
.format(
device='Vodafone1',
pdu=pdu_sms.pdu,
),
)
print('print: response.data: ', response.data)
logger.info('logger: response.data: %s' % response.data)
# [Vodafone1] SMS queued for send with id 0x7f98c8004420\n--END COMMAND--\r\n
sended_sms = increase_send_sms()
print('print: sended SMS: ', sended_sms)
logger.info('logger: sended SMS: %s' % sended_sms)
# if i != last_loop:
# time.sleep(1.5)
time.sleep(0.5)
manager.logoff()
except asterisk.manager.ManagerSocketException as e:
print("Error connecting to the manager: %s" % e, )
except asterisk.manager.ManagerAuthException as e:
print("Error logging in to the manager: %s" % e, )
except asterisk.manager.ManagerException as e:
print("Error: %s" % e, )
finally:
# remember to clean up
try:
manager.close()
except Exception as e:
print('print: sms_ussd/task.py: e: ', e)
logger.info('logger: sms_ussd/task.py: e: %s' % e)
sms_inst.task_id = None
sms_inst.is_send = True
sms_inst.send_at = timezone.now()
sms_inst.save(skip_super_save=True, )
return True, timezone.now(), '__name__: {0}'.format(str(__name__))
@celery_app.task(name='sms_ussd.tasks.send_received_sms', )
@decorate
def send_received_sms(*args, **kwargs):
try:
smses = SMS.objects.filter(direction=1, is_send=False, )
except SMS.DoesNotExist:
return False
logger.info(len(smses), )
send_sms_successful = True
for sms in smses:
sms.message = base64.b64decode(sms.message_b64).decode('utf8')
subject = 'Направение SMS: {direction} | от аббонента: {from_phone_char} | к аббоненту: {to_phone_char} '\
'| дата и время получения сообщения: {received_at}'\
.format(
direction=SMS.DIRECTION[sms.direction-1][1],
from_phone_char=sms.from_phone_char,
to_phone_char=sms.to_phone_char,
received_at=sms.received_at,
)
message = 'Направление: {direction}\nОт аббонента: {from_phone_char}\nАббоненту: {to_phone_char}\n'\
'Дата и Время Получения: {received_at}\nСообщение:\n{message}'\
.format(
direction=SMS.DIRECTION[sms.direction-1][1],
from_phone_char=sms.from_phone_char,
to_phone_char=sms.to_phone_char,
received_at=sms.received_at,
message=sms.message,
)
message_kwargs = {
'from_email': formataddr(('Телефонная станция Asterisk Keksik', 'site@keksik.com.ua', ), ),
'to': [formataddr(('Менеджер магазина Keksik', 'site@keksik.com.ua', ), ), ],
'subject': subject,
'body': message,
}
if send_mail(**message_kwargs):
sms.sim_id = 255016140761290
sms.task_id = None
sms.is_send = True
sms.send_at = timezone.now()
sms.save(skip_super_save=True, )
else:
send_sms_successful = False
if send_sms_successful:
return True, timezone.now(), '__name__: {0}'.format(str(__name__))
else:
return False, timezone.now(), '__name__: {0}'.format(str(__name__))
@celery_app.task(name='sms_ussd.task.send_template_sms')
@decorate
def send_template_sms(*args, **kwargs):
phone = kwargs.pop('sms_to_phone_char', False, )
if not phone:
return False
phone = phone.replace(' ', '').strip('+') \
.replace('(', '').replace(')', '').replace('-', '') \
.lstrip('380').lstrip('38').lstrip('80').lstrip('0')
try:
int_phone = int(phone[2:])
int_code = int(phone[:2])
except ValueError:
return False
template_name = kwargs.pop('sms_template_name', False, )
try:
template = Template.objects.get(name=template_name, )
except Template.DoesNotExist:
return False
template_dict = {}
for key, value in kwargs.items():
if key.startswith('sms_'):
template_dict.update({key.lstrip('sms_'): value})
message = template.template.format(**template_dict)
sms_inst = SMS(template=template,
direction=2,
task_id=None,
sim_id=255016140761290,
is_send=True,
message=message,
to_phone_char=phone,
to_code=int_code,
to_phone=int_phone,
send_at=timezone.now(),
)
manager = asterisk.manager.Manager()
# connect to the manager
try:
manager.connect(settings.ASTERISK_HOST)
manager.login(*settings.ASTERISK_AUTH)
# get a status report
response = manager.status()
print('response: ', response)
number = '+380{code}{phone}'\
.format(
code=sms_inst.to_code,
phone=sms_inst.to_phone,
)
sms_to_pdu = SmsSubmit(number=number, text=sms_inst.message, )
sms_to_pdu.request_status = False
sms_to_pdu.validity = timedelta(days=2)
sms_list = sms_to_pdu.to_pdu()
# last_loop = len(sms_list) - 1
for i, pdu_sms in enumerate(sms_list):
time.sleep(0.5)
response = manager.command('dongle pdu {device} {pdu}'
.format(
device='Vodafone1',
pdu=pdu_sms.pdu,
),
)
print('print: response.data: ', response.data)
logger.info('logger: response.data: %s' % response.data)
# [Vodafone1] SMS queued for send with id 0x7f98c8004420\n--END COMMAND--\r\n
sended_sms = increase_send_sms()
print('print: sended SMS: ', sended_sms)
logger.info('logger: sended SMS: %s' % sended_sms)
# if i != last_loop:
# time.sleep(1.5)
time.sleep(0.5)
manager.logoff()
except asterisk.manager.ManagerSocketException as e:
print("Error connecting to the manager: %s" % e, )
except asterisk.manager.ManagerAuthException as e:
print("Error logging in to the manager: %s" % e, )
except asterisk.manager.ManagerException as e:
print("Error: %s" % e, )
finally:
# remember to clean up
try:
manager.close()
except Exception as e:
print('sms_ussd/tasks.py: e: ', e, )
sms_inst.save(skip_super_save=True, )
return True, timezone.now(), '__name__: {0}'.format(str(__name__))
| denispan1993/vitaliy | applications/sms_ussd/tasks.py | tasks.py | py | 10,370 | python | en | code | 0 | github-code | 90 |
45858587209 | import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np # to create the lambda grid
import pandas as pd
from sklearn import linear_model
from sklearn.linear_model import Lasso # for lasso regression only
# #############################################################################
col_list = ["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8"]
X = pd.read_csv("data.csv", usecols = col_list)
X_scaled = X - np.mean(X) # rescale all features to have mean zero
X_scaled = X_scaled / np.std(X_scaled) # normalize all features to var = 1
target = ["Y"]
y = pd.read_csv("data.csv", usecols = target)
y = np.array(y)
y = np.reshape(y, (38,))
X_np = np.array(X_scaled)
# #############################################################################
lambdas = np.arange (0, 20.1, 0.1) # 0, 0.1, 0.2, ..., 20
loo_err_avg = [] # initialise the Leave-one-out error array used to average the LOO for all n for all lambda
loo_counter = 0 # index for the loo_err_avg arrary above
lambda_optimal = 0
loo_err_avg_optimal = 0
for l in lambdas:
loo_err = [] # initialise the Leave-one-out error used to calculate n errors of a given lambda.
for n in list(range(0,38)): # index 0, ..., 37
X_train = X_np # copy X_np into X_train
X_train = np.delete(X_train, n, 0) # every row except n
X_test = X_np[n] # row n
y_train = y
y_train = np.delete(y_train, n, 0)
y_test = y[n]
# lasso regression on the train set
lasso_weight = []
lasso = Lasso(alpha = l, fit_intercept = True)
lasso.fit(X_train, y_train)
lasso_weight.append(lasso.coef_)
# test the lasso on the test set (find the error and save into loo_err)
y_pred = lasso.predict(X_test.reshape(1,-1))
loo_err.append(np.sum((y_test - y_pred)**2))
loo_err_avg.append(np.mean(loo_err))
if (min(loo_err_avg) == loo_err_avg[loo_counter]):
lambda_optimal = l
loo_err_avg_optimal = loo_err_avg[loo_counter]
loo_counter = loo_counter + 1
# #############################################################################
plt.plot(lambdas, loo_err_avg)
plt.title('Leave-One-Out Error as lambda grows (lasso)')
plt.ylabel('LOO Error')
plt.xlabel('lambda')
print("The best lambda value = ", lambda_optimal)
print("The LOO Error at lambda ", lambda_optimal, " is ", loo_err_avg_optimal)
| alexlee2000/LASSO_and_Ridge_Regression | Part6.py | Part6.py | py | 2,420 | python | en | code | 1 | github-code | 90 |
37712882382 | import numpy as np
import pandas as pd
from sklearn import linear_model
from scipy import signal
import argparse
#==============================================================================
# COMMAND LINE ARGUMENTS
# Create parser object
cl_parser= argparse.ArgumentParser(
description="Filter data and compute doubling time."
)
# ARGUMENTS
# Path to data folder
cl_parser.add_argument(
"--data_path", action="store", default="data/",
help="Path to data folder"
)
# Collect command-line arguments
cl_options= cl_parser.parse_args()
# Create Linear Regression Model
reg= linear_model.LinearRegression(fit_intercept= True)
def get_doubling_rate_via_regression(in_array):
""" Approximate the doubling time using linear regression.
3 datapoints are used to approximate the number of days
it takes for the number of infected people to double at each point.
Parameters:
----------
in_array: List/ numpy Array
input data
Returns:
-------
doubling_time: double
"""
# Assert output vector is 3 datapoints long
assert len(in_array)==3
y= np.array(in_array)
# Calculate slope using central difference
X= np.arange(-1,2).reshape(-1,1)
# Fit data
reg.fit(X,y)
intercept= reg.intercept_
slope= reg.coef_
return intercept/slope
def rolling_regression(df_input, col="confirmed"):
""" Roll over entries to approximate the doubling time using linear regression.
Parameters:
----------
df_input: pandas DataFrame
input data
col: string
key to column which holds data entries
Returns:
-------
result: pandas Series
"""
days_back= 3
result= df_input[col].rolling(
window=days_back,
min_periods=days_back
).apply(get_doubling_rate_via_regression, raw=False)
return result
def savgol_filter(df_input, col='confirmed', window=5):
""" Filter data using savgol filter.
Parameters:
----------
df_input: pandas DataFrame
input data
col: string
key to column which holds data entries
Returns:
-------
df_result: pandas DataFrame
df_input with additional column with name col+"_filtered"
"""
window=5
degree=1
df_result=df_input
filter_in= df_input[col].fillna(0)
result= signal.savgol_filter(
np.array(filter_in), window, degree
)
df_result[col+ "_filtered"]= result
return df_result
def calc_filtered_data(df_input, filter_on='confirmed'):
""" Filter data using savgol filter and return merged dataframe
Parameters:
----------
df_input: pandas DataFrame
input data
filter_on: string
key to column which holds data entries on which to filter
Returns:
-------
df_out: pandas DataFrame
df_input with additional column with name filter_on+"_filtered"
"""
# Assertion
must_contain= set(['state', 'country', filter_on])
assert must_contain.issubset(set(df_input.columns))
pd_filt_res= df_input.groupby(['state','country']).apply(savgol_filter, filter_on).reset_index()
df_out= pd.merge(df_input, pd_filt_res[['index', filter_on+'_filtered']], on=['index'], how='left')
return df_out
def calc_doubling_rate(df_input, double_on='confirmed'):
""" Calculate doubling rate using linear regression and return merged dataframe
Parameters:
----------
df_input: pandas DataFrame
input data
double_on: string
key to column which holds data entries
Returns:
-------
df_out: pandas DataFrame
df_input with additional column with name double_on+"_filtered"
"""
# Assertion
must_contain= set(['state', 'country', double_on])
assert must_contain.issubset(set(df_input.columns))
pd_doub_res= df_input.groupby(['state','country']).apply(rolling_regression, double_on).reset_index()
pd_doub_res= pd_doub_res.rename(columns={'level_2': 'index', double_on: double_on+"_DR"})
df_out= pd.merge(df_input, pd_doub_res[['index', double_on+'_DR']], on=['index'], how='left')
return df_out
if __name__ == "__main__":
# Test data
test_data= np.array([2,4,6])
# Expected result= 2
result= get_doubling_rate_via_regression(test_data)
assert(int(result[0]) == 2)
pd_JH_rel= pd.read_csv(
cl_options.data_path + 'processed/COVID_relational_full.csv',
sep=';', parse_dates=[0]
)
pd_JH_rel= pd_JH_rel.sort_values('date', ascending=True).reset_index(drop=True)
pd_JH_rel= pd_JH_rel.reset_index()
pd_res= calc_filtered_data(pd_JH_rel, filter_on='confirmed')
pd_res= calc_doubling_rate(pd_res, double_on='confirmed')
pd_res= calc_doubling_rate(pd_res, double_on='confirmed_filtered')
# Cleanup confirmed_filtered_DR
DR_mask= pd_res['confirmed']>100
pd_res['confirmed_filtered_DR']= pd_res['confirmed_filtered_DR'].where(DR_mask, other=np.NaN)
# Save
pd_res.to_csv(cl_options.data_path + 'processed/COVID_final_set.csv', sep=';', index=False)
| Faaizz/covid_19_analysis | src/features/build_features.py | build_features.py | py | 5,120 | python | en | code | 0 | github-code | 90 |
17941648849 | import sys
import math
from collections import defaultdict
sys.setrecursionlimit(10**7)
def input():
return sys.stdin.readline()[:-1]
mod = 10**9 + 7
def I(): return int(input())
def LI(): return list(map(int, input().split()))
def LIR(row,col):
if row <= 0:
return [[] for _ in range(col)]
elif col == 1:
return [I() for _ in range(row)]
else:
read_all = [LI() for _ in range(row)]
return map(list, zip(*read_all))
#################
s = list(input())
ans = 0
left = 0
right = len(s)-1
while left < right:
if s[left] == s[right]:
left += 1
right -= 1
elif s[left] == 'x':
ans += 1
left += 1
elif s[right] == 'x':
ans += 1
right -= 1
else:
print(-1)
exit()
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03569/s253004695.py | s253004695.py | py | 801 | python | en | code | 0 | github-code | 90 |
73061944936 | from misc import dp, bot
from aiogram.types import Message
from aiogram.types.message import ContentType
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
import logging
from .states import AdminState, ShowSearch, cancel_keyboard, admin_keyboard
from .menu import show_search
from user import get_all_users
@dp.message_handler(content_types = ContentType.ANY, state=AdminState.wait_message_text)
async def admin_send_message(message: Message, state: FSMContext):
"""
Отправить сообщение всем пользователям.
Parameters
----------
message : Message
Сообщение
state : FSMContext
Состояние админа
"""
if message.text == 'Отмена':
await AdminState.wait_admin_action.set()
await message.answer('Выбери действие',
reply_markup=admin_keyboard)
else:
users = get_all_users()
photo = message.photo
video = message.video
text = message.text
for user in users:
if photo:
await bot.send_photo(user, photo[-1].file_id)
if video:
await bot.send_video(user, video.file_id)
if text:
await bot.send_message(user, text)
@dp.message_handler(state=AdminState.wait_admin_action)
async def admin_menu(message: Message, state: FSMContext):
"""
Действия в меню для админа.
Parameters
----------
message : Message
Сообщение
state : FSMContext
Состояние админа
"""
import pdb; pdb.set_trace()
if message.text == _('Send a message to everyone'):
await AdminState.wait_message_text.set()
await message.answer('Пришли сообщение, которое нужно всем разослать, если хочешь отменить, то нажми отмена.',
reply_markup=cancel_keyboard)
if message.text == _('Text search'):
await ShowSearch.waiting_for_search_text.set()
| katustrica/bot_twitt | handlers/admin.py | admin.py | py | 2,153 | python | ru | code | 0 | github-code | 90 |
28407545687 | class Solution(object):
def minPathSum(self, grid):
n,m = len(grid), len(grid[0])
# f[i][j] - minimal cost to get to the i-th field
f = [[0 for _ in range(m)] for _ in range(n )]
f[0][0] = grid[0][0]
for i in range(1,n):
f[i][0] = f[i-1][0] + grid[i][0]
for j in range(1,m):
f[0][j] = f[0][j-1] + grid[0][j]
for i in range(1, n):
for j in range(1, m):
f[i][j] = min(f[i-1][j],f[i][j-1]) + grid[i][j]
return f[-1][-1] | psp515/LeetCode | 64-minimum-path-sum/64-minimum-path-sum.py | 64-minimum-path-sum.py | py | 585 | python | en | code | 1 | github-code | 90 |
29154139127 | from datetime import datetime
def get_days_from_today(date):
list = []
date_1 = date.split("-")
date_now = datetime.now()
for i in date_1:
i = int(i)
list.append(i)
date_2 = datetime(year=list[0], month=list[1], day=list[2])
result = date_now - date_2
return result.days
print(get_days_from_today('2020-10-09')) | LeadShadow/hw8-autocheck | 1ex.py | 1ex.py | py | 359 | python | en | code | 0 | github-code | 90 |
3939023320 | def read_input(path):
instructions = []
with open(path) as f:
for line in f:
tmp = line.split()
instructions.append((tmp[0], int(tmp[1])))
return instructions
def calc_position(instructions):
x = 0
y = 0
for operation, distance in instructions:
if operation == "forward":
x += distance
elif operation == "down":
y += distance
else:
y -= distance
return x, y
def calc_position_v2(instructions):
x = 0
y = 0
aim = 0
for operation, distance in instructions:
if operation == "forward":
x += distance
y += distance * aim
elif operation == "down":
aim += distance
else:
aim -= distance
return x, y
instructions = read_input("2021/inputs/day02.txt")
x, y = calc_position(instructions)
print(f"Horizontal position is: {x} and depth is {y}. Their product is {x*y}")
x, y = calc_position_v2(instructions)
print(
"According to part 2 logic: Horizontal position is:"
f"{x} and depth is {y}. Their product is {x*y}"
)
| 95ep/AoC | y2021/day02.py | day02.py | py | 1,134 | python | en | code | 0 | github-code | 90 |
5417748598 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 16:35:51 2021
@author: jsy18
"""
# vary the probability of flipping the bit
import numpy as np
import matplotlib.pyplot as plt
from errcorrect_sigma2 import QR,rng, bit_flip_code
import scipy as sp
from scipy.optimize import curve_fit
#%%
#noiseParamList=[0.005,0.01,0.02,0.03,0.05,0.1,0.2,0.3,0.4,0.5]
noiseParamList=[0.3]
#noiseParamList=np.arange(0.01,0.5,0.02)
flipparamlist=np.arange(0.01,0.5,0.02)
xdata=[]
ydata1_std=[]
ydata2_std=[]
ydata1_bitflip=[]
ydata2_bitflip=[]
ydata1_errcor=[]
ydata2_errcor=[]
iteration = 100000
for n in noiseParamList:
for tprob in flipparamlist:
qr1 = QR(inp=0,noiseParam=n)
for i in range(0,iteration):
qr1.QRrun1()
# print('qr1.thetalist1',qr1.thetalist1)
# std procedure
qr2_std = QR(inp=qr1.thetalist1,noiseParam=n)
qr2_std.QRrun2()
countup1_std=0
countup2_std=0
for i in qr1.result1:
if i==0:
countup1_std+=1
for i in qr2_std.result2:
if i==0:
countup2_std+=1
xdata.append(tprob)
ydata1_std.append(100*countup1_std/len(qr1.result1))
ydata2_std.append(100*countup2_std/len(qr2_std.result2))
#bit flip
b = bit_flip_code(inptheta=qr1.thetalist1)
b.bit_flip(tt=tprob)
# gg1 = b.inptheta1
# print(gg1)
qr2_bitflip = QR(inp=b.inptheta1,noiseParam=n)
qr2_bitflip.QRrun2()
# countup1_bitflip=0
countup2_bitflip=0
# for i in qr1.result1:
# if i==0:
# countup1_bitflip+=1
for i in qr2_bitflip.result2:
if i==0:
countup2_bitflip+=1
# ydata1_bitflip.append(100*countup1/len(qr1.result1))
ydata2_bitflip.append(100*countup2_bitflip/len(qr2_bitflip.result2))
#error correction
b.error_correct()
qr2_errcor = QR(inp=b.inptheta1,noiseParam=n)
qr2_errcor.QRrun2()
countup1_errcor=0
countup2_errcor=0
# for i in qr1.result1:
# if i==0:
# countup1_bitflip+=1
for i in qr2_errcor.result2:
if i==0:
countup2_errcor+=1
# ydata1_errcor.append(100*countup1/len(qr1.result1))
ydata2_errcor.append(100*countup2_errcor/len(qr2_errcor.result2))
print(n)
print(tprob)
def writetofile_errcorrect(filename='QRerr.1.1e5.txt'): #save p(k) and k
myfile=open(filename,'x')
for i in range(0,len(xdata)):
myfile.write(str(xdata[i]) + " " + str(ydata2_errcor[i]) + '\n') #
print('i',i)
# print(max(self.deg_dist))
myfile.close()
#writetofile(filename='QRerr.2.1e5.txt')
#%%
xplist = np.arange(0,0.5,0.01)
def linear(m,x,c):
return m * x + c
def no_2or3(plist):
return (3*(plist**2)*(1-plist)+plist**3)*100
popt,pcov = sp.optimize.curve_fit(linear,xdata, ydata2_bitflip)
plt.figure(2)
#plt.plot(xdata,ydata2_std,'x',linestyle='',marker='.',label='normal')
plt.plot(xdata,ydata2_bitflip,linestyle='',marker='.',label='bit flip')
plt.plot(xdata,ydata2_errcor,linestyle='',marker='.',label='err cor')
plt.plot(xplist,linear(popt[0],xplist,popt[1]),linestyle=':',label='Theoretical Prediction')
plt.plot(xplist, no_2or3(xplist),linestyle=':',label='Theoretical Prediction')
plt.legend()
plt.xlabel('Bit Flip parameter')
plt.ylabel('percentage of |0> state')
#plt.title('|0> occurrence after 2 noisy pi/2 rotations in y with error correction')
plt.show()
fig,(ax1, ax2)=plt.subplots(2)
#plt.plot(xdata,ydata2_std,'x',linestyle='',marker='.',label='normal')
ax1.plot(xdata,ydata2_bitflip,linestyle='',marker='.',label='bit flip')
ax2.plot(xdata,ydata2_errcor,linestyle='',marker='.',label='err cor')
ax1.legend()
ax2.legend()
plt.xlabel('Bit Flip parameter')
plt.ylabel('percentage of |0> state')
#plt.title('|0> occurrence after 2 noisy pi/2 rotations in y with error correction')
plt.show() | JieSing/BScproject | vary_bitflip.py | vary_bitflip.py | py | 4,259 | python | en | code | 0 | github-code | 90 |
8798311869 | import bs4
import json
import parse
import argparse
import requests
import pandas as pd
import alive_progress as ap
from os import path
def get_mod_gitlinks(path: str):
links = []
with open(path) as f:
for line in f.readlines():
matches = parse.findall("{:s}github.com/{}{:s}v{:d}.{:d}.{:d}",line)
for result in matches:
links.append((0,f"github.com/{result.fixed[1]}"))
return links
def get_npm_links(path: str):
links = []
with open(path) as f:
packageJson = json.load(f)
if "dependencies" not in packageJson:
print("No `dependencies` field found in package json")
exit()
dependencies = packageJson["dependencies"]
for d in dependencies:
links.append((1,f"npmjs.com/package/{d}"))
return links
return links
return []
def get_license_from_github(link: str):
try:
licenseType = "Unknown"
html = requests.get(f"https://{link}")
soup = bs4.BeautifulSoup(html.text,'html.parser')
license = soup.select('h3:-soup-contains("License") + div.mt-2 > a')
if len(license) > 0:
licenseType = license[0].get_text()
if "View" in licenseType:
licenseType = "Unknown"
return licenseType.strip('"').strip()
except Exception as err:
print(err)
return "Error"
def get_license_from_npm(link: str):
try:
licenseType = "Unknown"
html = requests.get(f"https://{link}")
soup = bs4.BeautifulSoup(html.text,'html.parser')
license = soup.select('h3:-soup-contains("License") + p')
if len(license) > 0:
licenseType = license[0].get_text()
return licenseType.strip('"').strip()
except Exception as err:
print(err)
return "Error"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape config files for license info')
parser.add_argument('--file',dest='file',type=str,help='file to be parsed')
parser.add_argument('--repo',dest='repo',type=str,help='repo that should be added to the sheet',default="")
parser.add_argument('--lang',dest='lang',type=str,help='programming language constant',default="Go")
parser.add_argument('--side',dest='side',type=str,help='ServerSide or Distributed',default='Server-Side')
parser.add_argument('--output',dest='output',type=str,help='output csv file',default="./o.csv")
parser.add_argument('--used',dest='used',type=str,help='source or binary inclusion',default='Binary')
parser.add_argument('--link',dest='link',type=str,help='how is the package linked into program',default='Static')
args = parser.parse_args()
if not path.isfile(args.file):
print(f"'{args.file}' does not exist")
exit()
links = []
if args.file.endswith(".mod"):
links = get_mod_gitlinks(args.file)
elif args.file.endswith(".json"):
links = get_npm_links(args.file)
else:
print("Unsupported file type")
exit()
df = pd.DataFrame(columns=['repo','Package Name','Used as source or binary','License type','Server-Side or Distributed','Modified','Link Type','Program Lang.'])
with ap.alive_bar(len(links)) as bar:
for link in links:
row = [
args.repo,
link[1],
args.used,
"Unknown",
args.side,
"No",
args.link,
args.lang
]
licenseType = "Unknown"
if link[0] == 0:
licenseType = get_license_from_github(link[1])
elif link[0] == 1:
licenseType = get_license_from_npm(link[1])
row[3] = licenseType
df.loc[len(df.index)] = row
bar()
print(f"Saving output file @ {args.output}")
df.to_csv(args.output,encoding='utf-8',index=False)
| DeveloperChaseLewis/scripts | gitscrape.py | gitscrape.py | py | 4,147 | python | en | code | 0 | github-code | 90 |
21694077720 | def file_to_list(filename):
fin = open(filename, "rt", encoding="utf-8")
names = fin.readlines()
fin.close()
return names
def order_name(names):
return names.sort()
def list_to_file(names):
messages = {"total": "Total of {} names"}
fout = open("41_out.txt", "wt", encoding="utf-8")
print("\n" + messages["total"].format(len(names)))
print("-" * 17)
for name in names:
print(name, file=fout, end="")
fout.close()
def read_file(filename):
content = ""
fin = open(filename, "rt", encoding="utf-8")
for line in fin:
content += line
fin.close()
return content
def main():
names = file_to_list("../data/41.txt")
order_name(names)
list_to_file(names)
print(read_file("41_out.txt"))
main()
| jbaltop/57_Challenges | part7/41.py | 41.py | py | 790 | python | en | code | 29 | github-code | 90 |
5291662838 | from __future__ import annotations
import logging
import pathlib
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Sequence, Tuple, Union
from composer.profiler.json_trace_handler import JSONTraceHandler
from composer.profiler.marker import Marker
from composer.profiler.profiler_action import ProfilerAction
from composer.profiler.system_profiler import SystemProfiler
from composer.profiler.torch_profiler import TorchProfiler
from composer.profiler.trace_handler import TraceHandler
from composer.utils import ensure_tuple, parse_uri
if TYPE_CHECKING:
from composer.core import Callback, State
__all__ = ['Profiler']
log = logging.getLogger(__name__)
class Profiler:
"""Composer Profiler.
See the :doc:`Profiling Guide </trainer/performance_tutorials/profiling>` for additional information.
Args:
schedule ((State) -> ProfilerAction): The profiling scheduling function.
It takes the training state and returns a :class:`.ProfilerAction`.
For convenience, Composer includes a :meth:`~composer.profiler.cyclic_schedule.cyclic_schedule` helper.
.. testsetup::
from composer.profiler import Profiler, cyclic_schedule
original_profiler_init = Profiler.__init__
def new_profiler_init(self, dummy_ellipsis=None, **kwargs):
if 'trace_handlers' not in kwargs:
kwargs['trace_handlers'] = []
kwargs['torch_prof_memory_filename'] = None
original_profiler_init(self, **kwargs)
Profiler.__init__ = new_profiler_init
.. testcode::
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(
...,
schedule=cyclic_schedule(
skip_first=1,
wait=0,
warmup=1,
active=4,
repeat=1,
),
torch_prof_memory_filename=None,
)
trace_handlers (TraceHandler | Sequence[TraceHandler]): Trace handlers which record and
save profiling data to traces. Additionally supports full object store paths.
sys_prof_cpu (bool, optional): Whether to record cpu statistics. (default: ``True``).
sys_prof_memory (bool, optional): Whether to record memory statistics. (default: ``False``).
sys_prof_disk (bool, optional): Whether to record disk statistics. (default: ``False``).
sys_prof_net (bool, optional): Whether to record network statistics. (default: ``False``).
sys_prof_stats_thread_interval_seconds (float, optional): Interval to record stats, in seconds.
(default: ``0.5``).
torch_prof_folder (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_filename (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_remote_file_name (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
Additionally supports full object store paths e.g: s3://bucket/path/to/file.
torch_prof_memory_filename (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_memory_remote_file_name (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
Additionally supports full object store paths e.g: s3://bucket/path/to/file.
torch_prof_overwrite (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_use_gzip (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_record_shapes (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_profile_memory (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_with_stack (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_with_flops (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_num_traces_to_keep (int, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
"""
def __init__(
self,
schedule: Callable[[State], ProfilerAction],
trace_handlers: List[TraceHandler],
sys_prof_cpu: bool = True,
sys_prof_memory: bool = False,
sys_prof_disk: bool = False,
sys_prof_net: bool = False,
sys_prof_stats_thread_interval_seconds: float = 0.5,
torch_prof_folder: str = '{run_name}/torch_traces',
torch_prof_filename: str = 'rank{rank}.{batch}.pt.trace.json',
torch_prof_remote_file_name: Optional[str] = '{run_name}/torch_traces/rank{rank}.{batch}.pt.trace.json',
torch_prof_memory_filename: Optional[str] = 'rank{rank}.{batch}.pt.memory_trace.html',
torch_prof_memory_remote_file_name: Optional[
str] = '{run_name}/torch_memory_traces/rank{rank}.{batch}.pt.memory_trace.html',
torch_prof_overwrite: bool = False,
torch_prof_use_gzip: bool = False,
torch_prof_record_shapes: bool = False,
torch_prof_profile_memory: bool = True,
torch_prof_with_stack: bool = False,
torch_prof_with_flops: bool = True,
torch_prof_num_traces_to_keep: int = -1,
) -> None:
self._names_to_markers: Dict[str, Marker] = {}
self._trace_handlers = list(ensure_tuple(trace_handlers))
self.schedule = schedule
self.state = None
self._callbacks: List[Callback] = []
self.remote_filenames: List[str] = []
# First, add each remote file name to self.remote_filenames to create RemoteUploaderDownloader logger in trainer. [s3://bucket/path/to/file]
# Then modify remote file name to be a local path to pass into torch_profiler and system_profiler. e.g: path/to/file
if torch_prof_remote_file_name:
self.remote_filenames.append(torch_prof_remote_file_name)
_, _, torch_prof_remote_file_name = parse_uri(torch_prof_remote_file_name)
if torch_prof_memory_remote_file_name:
self.remote_filenames.append(torch_prof_memory_remote_file_name)
_, _, torch_prof_memory_remote_file_name = parse_uri(torch_prof_memory_remote_file_name)
for handler in self._trace_handlers:
if isinstance(handler, JSONTraceHandler):
if handler.remote_file_name:
self.remote_filenames.append(handler.remote_file_name)
_, _, handler.remote_file_name = parse_uri(handler.remote_file_name)
if handler.merged_trace_remote_file_name:
self.remote_filenames.append(handler.merged_trace_remote_file_name)
_, _, handler.merged_trace_remote_file_name = parse_uri(handler.merged_trace_remote_file_name)
if sys_prof_cpu or sys_prof_memory or sys_prof_disk or sys_prof_net:
self._callbacks.append(
SystemProfiler(profile_cpu=sys_prof_cpu,
profile_memory=sys_prof_memory,
profile_disk=sys_prof_disk,
profile_net=sys_prof_net,
stats_thread_interval_seconds=sys_prof_stats_thread_interval_seconds))
if torch_prof_memory_filename is not None:
if not (torch_prof_with_stack and torch_prof_record_shapes and torch_prof_profile_memory):
raise ValueError(
f'torch_prof_memory_filename is set. Generating the memory timeline graph requires all the three flags torch_prof_with_stack, torch_prof_record_shapes, and torch_prof_profile_memory to be true. Got torch_prof_with_stack={torch_prof_with_stack}, torch_prof_record_shapes={torch_prof_record_shapes}, torch_prof_profile_memory={torch_prof_profile_memory}'
)
log.info(
f'Memory profiling is enabled and uses {torch_prof_memory_filename} as the filename to generate the memory timeline graph. To disable the memory timeline graph generation, explicitly set torch_prof_memory_filename to None.'
)
else:
log.info(f'torch_prof_memory_filename is explicitly set to None. Memory timeline will not be be generated.')
if torch_prof_record_shapes or torch_prof_profile_memory or torch_prof_with_stack or torch_prof_with_flops:
self._callbacks.append(
TorchProfiler(filename=torch_prof_filename,
folder=torch_prof_folder,
remote_file_name=torch_prof_remote_file_name,
memory_filename=torch_prof_memory_filename,
memory_remote_file_name=torch_prof_memory_remote_file_name,
num_traces_to_keep=torch_prof_num_traces_to_keep,
overwrite=torch_prof_overwrite,
record_shapes=torch_prof_record_shapes,
profile_memory=torch_prof_profile_memory,
use_gzip=torch_prof_use_gzip,
with_stack=torch_prof_with_stack,
with_flops=torch_prof_with_flops))
def bind_to_state(
self,
state: State,
):
"""Bind the profiler to the ``state``.
.. note::
The :class:`.Trainer` automatically invokes this method.
Args:
state (State): The training state.
"""
self.state = state
self.state.callbacks.extend(self._callbacks)
self.state.callbacks.extend(self._trace_handlers)
@property
def trace_handlers(self):
"""Profiler trace handlers."""
return self._trace_handlers
@trace_handlers.setter
def trace_handlers(self, trace_handlers: Union[TraceHandler, Sequence[TraceHandler]]):
"""Profiler trace handlers."""
self._trace_handlers[:] = ensure_tuple(trace_handlers)
def record_chrome_json_trace_file(self, filepath: Union[str, pathlib.Path]):
"""Record trace events in Chrome JSON format in the trace handlers.
See `this document <https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview>`_
for more information about Chrome JSON format.
.. note::
For custom profiling, it is recommended to use :meth:`marker` instead of manually creating a Chrome JSON
trace file. By default, the Composer Profiler will automatically saving :class:`.Marker` events in Chrome
JSON format.
This method exists for external profilers that natively record events in Chrome JSON format (such as the
:class:`~composer.profiler.torch_profiler.TorchProfiler`). These profilers can use this method to route
their profiling traces to the Composer profiler :attr:`~trace_handlers` so events from both the Composer
Profiler and external profilers are recorded in the same trace file.
"""
for recorder in self.trace_handlers:
recorder.process_chrome_json_trace_file(pathlib.Path(filepath))
def marker(
self,
name: str,
actions: Sequence[ProfilerAction] = (ProfilerAction.WARMUP, ProfilerAction.ACTIVE,
ProfilerAction.ACTIVE_AND_SAVE),
record_instant_on_start: bool = False,
record_instant_on_finish: bool = False,
categories: Union[List[str], Tuple[str, ...]] = (),
) -> Marker:
"""Create and get an instance of a :class:`.Marker`.
If a :class:`.Marker` with the specified ``name`` does not already exist, it will be created.
Otherwise, the existing instance will be returned.
.. note::
:meth:`.Profiler.marker()` should be used to construct markers. :class:`.Marker` **should not** be
instantiated directly by the user.
For example:
.. testsetup:: composer.profiler.profiler.Profiler.marker
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(schedule=cyclic_schedule(), trace_handlers=[], torch_prof_memory_filename=None)
profiler.bind_to_state(state)
state.profiler = profiler
.. doctest:: composer.profiler.profiler.Profiler.marker
>>> marker = profiler.marker("foo")
>>> marker
<composer.profiler.marker.Marker object at ...>
Please see :meth:`.Marker.start()` and :meth:`.Marker.finish()` for usage on creating markers to measure duration events,
:meth:`.Marker.instant()` for usage on creating markers to mark instant events and :meth:`.Marker.counter()` for usage on
creating markers for counting.
Args:
name (str): The name for the :class:`.Marker`.
actions (Sequence[ProfilerAction], optional): :class:`.ProfilerAction` states to record on.
Defaults to (:attr:`~.ProfilerAction.WARMUP`, :attr:`~.ProfilerAction.ACTIVE`,
:attr:`~.ProfilerAction.ACTIVE_AND_SAVE`).
record_instant_on_start (bool, optional): Whether to record an instant event whenever the marker is started.
Defaults to ``False``.
record_instant_on_finish (bool, optional): Whether to record an instant event whenever the marker is finished.
Defaults to ``False``.
categories (Union[List[str], Tuple[str, ...]], optional): Categories for this marker. Defaults to ``None``.
Returns:
Marker: Marker instance.
"""
if self.state is None:
raise RuntimeError('Profiler.bind_to_state() must be invoked before the Profiler can be used.')
if name not in self._names_to_markers:
def should_record(state: State) -> bool:
return self.schedule(state) in actions
self._names_to_markers[name] = Marker(
state=self.state,
trace_handlers=self.trace_handlers,
name=name,
should_record=should_record,
record_instant_on_start=record_instant_on_start,
record_instant_on_finish=record_instant_on_finish,
categories=categories,
)
self._names_to_markers[name].categories = categories
return self._names_to_markers[name]
| mosaicml/composer | composer/profiler/profiler.py | profiler.py | py | 14,764 | python | en | code | 4,712 | github-code | 90 |
70361217897 | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
# creating a new user model by inheriting AbstractUser model and changing username to email
# Also updating a few extra fields like phone, gender and session token
class CustomUser(AbstractUser):
name = models.CharField(max_length=50, default='Anonymous')
email = models.EmailField(max_length=254, unique=True)
username = None
# username will be governed by email instead of default username value
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
phone = models.CharField(max_length=20, blank=True, null=True)
gender = models.CharField(max_length=10, blank=True, null=True)
session_token = models.CharField(max_length=10, default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True) | thej123/ecom | ecom/api/user/models.py | models.py | py | 888 | python | en | code | 1 | github-code | 90 |
23722324267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
问题描述:
给定一个数组和一个目标值,在数组中找出三个数,使它们的和为目标值(two_sum的升级问题)
https://leetcode-cn.com/problems/3sum/
示例:
输入[-1,0,1,2,-1,-4] 0 输出[[-1,0,1], [-1,-1,2]]
说明:
输出为数组元素的值,而不是元素的下标,不能包含重复的三个数的组合
"""
def three_sum(arr, target):
"""
算法思路:
排序后从头到尾遍历数组元素,如取第一个元素,则在剩下的数组元素中取另外两个元素,使两个元素的和为目标值减去第一个元素的值
取两个元素使用双指针的方法,一个在头,一个在尾,当头尾两个元素的和超过需要补充的值,则尾指针向前移(减少头尾指针和)
当头尾两个元素的和小于需要补充的值,则头指针向后移(增大头尾指针和),
当头尾两个元素的和等于需要补充的值,则和第一个元素构成一个符合要求的三元组。
由于不能包含重复的三元组,遍历数组元素及头尾指针移动时,需跳过重复元素。
时间复杂度:n^2
"""
arr.sort()
result = list()
for i in range(len(arr)):
if i > 0 and arr[i] == arr[i-1]: # 跳过重复出现的值
continue
j = i+1
k = len(arr) - 1
while j < k:
if arr[j] + arr[k] > target - arr[i]:
k = k-1
elif arr[j] + arr[k] < target - arr[i]:
j = j+1
else:
result.append([arr[i], arr[j], arr[k]])
j = j+1
k = k-1
while j < k and arr[j] == arr[j-1]: # 跳过重复出现的值
j = j+1
while j < k and arr[k] == arr[k+1]: # 跳过重复出现的值
k = k-1
return result
if __name__ == '__main__':
arr = [-1,0,1,2,-1,-4]
target = 0
print(three_sum(arr, target))
arr = [-4,0,1,2,2,2,2,4]
print(three_sum(arr, target))
| sharevong/algothrim | three_sum.py | three_sum.py | py | 2,096 | python | zh | code | 0 | github-code | 90 |
16214561095 | # methods to work with the Google Spreadsheet
# tutorial: https://youtube.com/watch?v=aruInGd-m40
import json
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
from config import your_email
# Connect to Google
# Scope: Enable access to specific links
scope = ['https://www.googleapis.com/auth/spreadsheets',
"https://www.googleapis.com/auth/drive"]
credentials = ServiceAccountCredentials.from_json_keyfile_name("gs_credentials.json", scope)
client = gspread.authorize(credentials)
# Create a blank spreadsheet (Note: We're using a service account, so this spreadsheet is visible only to this account)
# sheet = client.create("albion-market-prices-hunter")
# To access newly created spreadsheet from Google Sheets with your own Google account you must share it with your email
# Sharing a Spreadsheet
# sheet.share(your_email, perm_type='user', role='writer') # your_email from config.py
# initial dataframe
def create_dataframe():
df = pd.DataFrame(columns=[
'Item',
'Ench lvl',
'Caerleon price',
'BlackMarket price',
'From city',
'From city price',
'Spread Caerleon',
'Spread BlackMarket',
])
return df
# upload data to the spreadsheet
def sheet_updater(albion_data):
# Open the spreadsheet
sheet = client.open("albion-market-prices-hunter").sheet1
# read incomming data with pandas
# df = pd.DataFrame(albion_data)
# sort data by spread Caerleon
albion_data.sort_values(by=["Spread Caerleon"], inplace=True, ascending=False)
# export df to a sheet
sheet.update([albion_data.columns.values.tolist()] + albion_data.values.tolist())
def city_formatter(item_prices):
"""
Format city names by model
:param item_price: dict with city-price pairs
:return: dict with formatted city-price pairs
"""
formatted_city_price = {}
# TMP for testing
# with open('item_prices.json') as f:
# item_prices = json.load(f)
if 'Caerleon' in item_prices:
formatted_city_price['Caerleon'] = item_prices['Caerleon']
else:
formatted_city_price['Caerleon'] = None
if 'Black Market' in item_prices:
formatted_city_price['Black Market'] = item_prices['Black Market']
else:
formatted_city_price['Black Market'] = None
if 'Bridgewatch' in item_prices:
formatted_city_price['Bridgewatch'] = item_prices['Bridgewatch']
else:
formatted_city_price['Bridgewatch'] = None
if 'Fort Sterling' in item_prices:
formatted_city_price['Fort Sterling'] = item_prices['Fort Sterling']
else:
formatted_city_price['Fort Sterling'] = None
if 'Lymhurst' in item_prices:
formatted_city_price['Lymhurst'] = item_prices['Lymhurst']
else:
formatted_city_price['Lymhurst'] = None
if 'Thetford' in item_prices:
formatted_city_price['Thetford'] = item_prices['Thetford']
else:
formatted_city_price['Thetford'] = None
if 'Martlock' in item_prices:
formatted_city_price['Martlock'] = item_prices['Martlock']
else:
formatted_city_price['Martlock'] = None
# print(formatted_city_price)
return formatted_city_price
# make dataframe row with index and correct price positions
def format_prices(item_title, item_prices):
"""
dataframe row with index and correct price positions
:param item_title: item name
:param item_prices: list with sorted prices by model
:return: list with formatted prices
"""
formatted_prices = []
formatted_prices.append(item_title)
for city, price in item_prices.items():
formatted_prices.append(price)
return formatted_prices
# save data to csv
def csv_saver(albion_data):
df = pd.DataFrame(albion_data)
df.to_csv("albion_data.csv")
def item_data_formatter(item_data):
"""
Format item data by model
:param item_data:
:return: formatted item data
"""
formatted_item_data = []
# TODO filter critical values, like if spread more that 200% of item price
for city in item_data[2]:
if item_data[2].get(city) is not None:
try:
formatted_item_data.append([
item_data[0],
item_data[1],
item_data[2].get('Caerleon'),
item_data[2].get('Black Market'),
city,
item_data[2].get(city),
item_data[2].get('Caerleon')*0.92 - item_data[2].get(city),
item_data[2].get('Black Market')*0.92 - item_data[2].get(city),
])
except TypeError:
# print("TypeError: ", item_data[0], city, item_data[2].get(city))
pass
# print(formatted_item_data[2:])
return formatted_item_data[2:]
def df_append(item_data, item_calc):
"""
Append new data to the dataframe
:param item_data: current dataframe
:param item_calc: list with calculated data
:return: None
"""
new_row = pd.DataFrame({
'Item': item_calc[0],
'Ench lvl': item_calc[1],
'Caerleon price': item_calc[2],
'BlackMarket price': item_calc[3],
'From city': item_calc[4],
'From city price': item_calc[5],
'Spread Caerleon': item_calc[6],
'Spread BlackMarket': item_calc[7],},
index=[0])
item_data = pd.concat([new_row, item_data.loc[:]]).reset_index(drop=True)
return item_data
# test data
# item_data_formatter((
# "Adept's Cleric Robe",
# 1,
# {
# 'Caerleon': 4679,
# 'Black Market': 9993,
# 'Bridgewatch': 6689,
# 'Fort Sterling': 4402,
# 'Lymhurst': None,
# 'Thetford': None,
# 'Martlock': None
# }
# )) | Trionyx/albion_price_parser | data_handler.py | data_handler.py | py | 5,851 | python | en | code | 0 | github-code | 90 |
16623080786 | # https://adventofcode.com/2022/day/4
import pathlib
import time
script_path = pathlib.Path(__file__).parent
input = script_path / "input.txt" # 524 // 798
input_test = script_path / "test.txt" # 2 //
def parse(puzzle_input):
"""Parse input"""
with open(puzzle_input, "r") as file:
data = file.read().split('\n') # Read file make list by splitting on new line \n
data = [tuple(d.split(",")) for d in data]
data = [[tuple(sec.split("-")) for sec in pair] for pair in data]
data = [[tuple(map(int,sec)) for sec in pair] for pair in data]
return data
def check_sections_fully_overlap(a,b):
checks = []
if a[0] <= b[0] and a[1] >= b[1]:
checks.append(True)
if b[0] <= a[0] and b[1] >= a[1]:
checks.append(True)
return any(checks)
def check_sections_for_overlap(a,b):
checks = []
if a[0] <= b[0] and a[1] >= b[0]:
checks.append(True)
if a[0] <= b[0] and a[1] >= b[1]:
checks.append(True)
if b[0] <= a[0] and b[1] >= a[0]:
checks.append(True)
if b[0] <= a[0] and b[1] >= a[1]:
checks.append(True)
return any(checks)
def part1(data):
"""Solve part 1"""
overlaps = []
for pair in data:
section1, section2 = pair
ans = check_sections_fully_overlap(section1,section2)
if ans:
overlaps.append(pair)
return len(overlaps)
def part2(data):
"""Solve part 2"""
overlaps = []
for pair in data:
section1, section2 = pair
ans = check_sections_for_overlap(section1,section2)
if ans:
overlaps.append(pair)
return len(overlaps)
def solve(puzzle_input):
"""Solve the puzzle for the given input"""
times = []
data = parse(puzzle_input)
times.append(time.perf_counter())
solution1 = part1(data)
times.append(time.perf_counter())
solution2 = part2(data)
times.append(time.perf_counter())
return solution1, solution2, times
def runTest(test_file):
data = parse(test_file)
test_solution1 = part1(data)
test_solution2 = part2(data)
return test_solution1, test_solution2
def runAllTests():
print("Tests")
a, b = runTest(input_test)
print(f"Test1. Part1: {a} Part 2: {b}")
if __name__ == "__main__":
runAllTests()
solutions = solve(input)
print("\nAOC")
print(f"Solution 1: {str(solutions[0])} in {solutions[2][1]-solutions[2][0]:.4f}s")
print(f"Solution 2: {str(solutions[1])} in {solutions[2][2]-solutions[2][1]:.4f}s")
print(f"\nExecution total: {solutions[2][-1]-solutions[2][0]:.4f} seconds")
| TragicMayhem/advent_of_code | aoc_2022/day04/aoc2022d04.py | aoc2022d04.py | py | 2,765 | python | en | code | 0 | github-code | 90 |
18429238249 | import sys
N=int(input())
b=list(map(int,input().split()))
ans=[]
for i in range(N):
for j in range(N-1-i,-1,-1):
if b[j]==j+1:
ans.append(b.pop(j))
break
elif j==0:
print('-1')
sys.exit()
else:
continue
for i in range(N-1,-1,-1):
print(ans[i]) | Aasthaengg/IBMdataset | Python_codes/p03089/s318452993.py | s318452993.py | py | 338 | python | en | code | 0 | github-code | 90 |
34039951158 | import os
import json
import cv2
import numpy as np
#########################################################################################
# GLOBAL VARIABLES
# Total amount of keypoints presented in the new OpenPose model
KEYPOINTS_TOTAL = 25.0
# A keypoint is considered as a valid one if its score is greater than this value
SCORE_TRIGGER = 0.6
# Percentage of valid keypoints an object must have to be considered as a human skeleton
VALID_KEYPOINTS_TRIGGER = 0.4
# Openpose Mapping values
KEYPOINT_COLORS = {
"0":(195,1,68),
"1":(206,37,9),
"2":(186,60,1),
"3":(169,115,1),
"4":(151,153,1),
"5":(143,213,4),
"6":(143,213,4),
"7":(143,213,4),
"8":(206,37,9),
"9":(143,213,4),
"10":(3,196,134),
"11":(0,228,227),
"12":(0,98,154),
"13":(1,51,158),
"14":(1,51,158),
"15":(225,1,155),
"16":(101,0,152),
"17":(148,1,154),
"18":(60,0,199),
"19":(1,51,158),
"20":(1,51,158),
"21":(1,51,158),
"22":(0,228,227),
"23":(0,228,227),
"24":(0,228,227),
}
KEYPOINTS_MAPPING = {
"0":[{"id": 15, "color": (138,1,91)},{"id": 16, "color": (100,1,150)},{"id": 1, "color": (153,1,52)}],
"1":[{"id": 0, "color": (153,1,52)},{"id": 2, "color": (154,50,1)},{"id": 5, "color": (94,145,0)},{"id": 8, "color": (152,0,0)}],
"2":[{"id": 1, "color": ((152,50,0))},{"id": 3, "color": (154,102,0)}],
"3":[{"id": 2, "color": (154,102,0)},{"id": 4, "color": (153,155,1)}],
"4":[{"id": 3, "color": (153,155,1)}],
"5":[{"id": 1, "color": (94,145,0)},{"id": 6, "color": (51,152,1)}],
"6":[{"id": 5, "color": (51,152,1)},{"id": 7, "color": (0,153,0)}],
"7":[{"id": 6, "color": (0,153,0)}],
"8":[{"id": 1, "color": (152,0,0)},{"id": 9, "color": (1,153,52)},{"id": 12, "color": (0,101,153)}],
"9":[{"id": 8, "color": (1,153,52)},{"id": 10, "color": (0,152,101)}],
"10":[{"id": 9, "color": (0,152,101)},{"id": 11, "color": (0,153,153)}],
"11":[{"id": 10, "color": (0,153,153)},{"id": 22, "color": (8,149,153)},{"id": 24, "color": (8,149,153)}],
"12":[{"id": 8, "color": (0,101,153)},{"id": 13, "color": (0,49,144)}],
"13":[{"id": 12, "color": (0,49,144)},{"id": 14, "color": (0,0,152)}],
"14":[{"id": 13, "color": (0,0,152)},{"id": 19, "color": (0,0,152)},{"id": 21, "color": (0,0,152)}],
"15":[{"id": 0, "color": (138,1,91)},{"id": 17, "color": (155,1,155)}],
"16":[{"id": 0, "color": (100,1,150)},{"id": 18, "color": (50,1,152)}],
"17":[{"id": 15, "color": (155,1,155)}],
"18":[{"id": 16, "color": (50,1,152)}],
"19":[{"id": 14, "color": (0,0,152)},{"id": 20, "color": (1,0,140)}],
"20":[{"id": 19, "color": (1,0,140)}],
"21":[{"id": 14, "color": (0,0,152)}],
"22":[{"id": 23, "color": (8,149,153)},{"id": 11, "color": (8,149,153)}],
"23":[{"id": 22, "color": (8,149,153)}],
"24":[{"id": 11, "color": (8,149,153)}]
}
#########################################################################################
# AUX FUNCTIONS
def read_json(json_path):
with open(json_path) as f:
return json.load(f)
def read_frames(video_path):
input_frames = []
cap = cv2.VideoCapture(video_path)
while(cap.isOpened()):
# Read frame
ret, frame = cap.read()
if isinstance(frame, np.ndarray):
# Store frame in array
input_frames.append(frame)
else:
break
cap.release()
return input_frames
def write_video(video_path, frames):
out = cv2.VideoWriter(video_path,
cv2.VideoWriter_fourcc(*'MJPG'),
10, (1288,728))
for frame in frames:
out.write(frame)
out.release()
def draw_keypoints(input_frames, json_object):
plotted_frames = []
for frame, frame_annotation in zip(input_frames, json_object["annotations"]):
plotted_image = np.copy(frame)
for frame_object in frame_annotation["objects"]:
plotted_ids = []
keypoints_dict = {}
for keypoint in frame_object["keypoints"]:
x = keypoint["position"]["x"]
y = keypoint["position"]["y"]
keypoint_id = keypoint["id"]
keypoints_dict[keypoint_id]={
"x":int(x),
"y":int(y)
}
plotted_ids.append(keypoint_id)
plotted_image = cv2.circle(plotted_image, (int(x),int(y)), radius=8, color=KEYPOINT_COLORS[keypoint_id], thickness=-1)
plotted_paths = []
for plotted_id in plotted_ids:
for mapping in KEYPOINTS_MAPPING[plotted_id]:
if str(mapping["id"]) in plotted_ids:
if "{plotted}-{mapped}".format(plotted=plotted_id,mapped=mapping["id"]) not in plotted_paths and "{mapped}-{plotted}".format(plotted=plotted_id,mapped=mapping["id"]) not in plotted_paths:
x1 = keypoints_dict[plotted_id]["x"]
y1 = keypoints_dict[plotted_id]["y"]
x2 = keypoints_dict[str(mapping["id"])]["x"]
y2 = keypoints_dict[str(mapping["id"])]["y"]
plotted_image = cv2.line(plotted_image, (x1, y1), (x2, y2), mapping["color"], thickness=2)
plotted_paths.append("{plotted}-{mapped}".format(plotted=plotted_id,mapped=mapping["id"]))
plotted_frames.append(plotted_image)
return plotted_frames
def draw_keypoints_on_video(video_path, json_object):
print("")
print("Reading frames...")
input_frames = read_frames(video_path)
print("Done! {} frames have been read.".format(len(input_frames)))
print("")
print("Drawing keypoints on input frames...")
plotted_frames = draw_keypoints(input_frames, json_object)
print("")
print("Writing output video file...")
write_video("output_video.avi", plotted_frames)
#########################################################################################
# MAIN
def main():
json_filename = "p002g15c03"
json_path = "./{}.json".format(json_filename)
video_path = "./{}.mp4".format(json_filename)
json_obj = read_json(json_path)
for annotation in json_obj["annotations"]:
frame_resolution = annotation["resolution"]
frame_id = annotation["frame_id"]
filtered_objects = []
for object_item in annotation["objects"]:
valid_keypoints = []
for keypoint in object_item["keypoints"]:
keypoint_id = keypoint["id"]
keypoint_score = keypoint["score"]
keypoint_position = keypoint["position"]
if keypoint_score > SCORE_TRIGGER:
valid_keypoints.append(keypoint)
if float(len(valid_keypoints))/KEYPOINTS_TOTAL > VALID_KEYPOINTS_TRIGGER:
filtered_objects.append({
"label":"0",
"id":"0",
"score":"0.0",
"keypoints":valid_keypoints
})
annotation["objects"] = filtered_objects
# Generate video with filtered keypoints plotted on it.
draw_keypoints_on_video(video_path, json_obj)
# Serializing json
json_serialized = json.dumps(json_obj)
# Writing to sample.json
with open("{}-filtered.json".format(json_filename), "w") as outfile:
outfile.write(json_serialized)
return
#########################################################################################
# ENTRYPOINT
main() | gsbiel/python-stuff | filtro_confiabilidade.py | filtro_confiabilidade.py | py | 7,588 | python | en | code | 0 | github-code | 90 |
6936466111 | from lxml import etree
from . import node
import re
class Stage(object):
XMLNS = "http://tail-f.com/ns/config/1.0"
XML = "{%s}" % XMLNS
XMLNSMAP = {None : XMLNS}
NCSNS = "http://tail-f.com/ns/ncs"
NCS = "{%s}" % NCSNS
NCSNSMAP = {None : NCSNS}
name_instance = 0
def __init__(self, schema):
self.sdict = {}
self.DEV = "{%s}" % schema.namespace
self.DEVNSMAP = {None : schema.namespace}
def add_leaf(self, leaf, value=None):
# Empty type?
if value == "<empty-false>":
return
# Provide sample?
if value is None:
value = leaf.get_sample()
assert value is not None
# Enter sequence number
value = value.replace("%d", str(Stage.name_instance + 1))
path = leaf.path
if "{" in path:
while re.match(".*?{([^}]+)}.*?({\\1}).*", path):
path = re.sub("(.*?{([^}]+)}.*?)({\\2})(.*)", "\\1\\4", path)
if path.startswith("/{"):
path = "/" + path[path.index("}")+1:]
# Keys have to be ordered so put the key index in the path to
# make the correct order when sorted
if leaf.is_key():
key_index = None
# Check which key
stmt = leaf.stmt
while stmt.parent is not None:
key = node._stmt_get_value(stmt, "key")
if key is not None:
key_index = key.split(" ").index(leaf.get_arg())
break
stmt = stmt.parent
else:
assert False
path = ("/%c".join(path.rsplit("/", 1))) % (int(key_index) + 1)
self.sdict[path] = value
def save(self, dev, fname):
self._xml(dev)
f = open(fname, "w")
f.write(etree.tostring(self.root, pretty_print=True))
f.close()
self.sdict = {}
Stage.name_instance = (Stage.name_instance + 1) % 9
def flush(self, dev):
self.save(dev, "drned-work/drned-commit.xml")
dev.load("drned-work/drned-commit.xml")
def _xml(self, dev):
root = etree.Element(Stage.XML + "config",
nsmap=Stage.XMLNSMAP)
devices = etree.SubElement(root, Stage.NCS + "devices",
nsmap=Stage.NCSNSMAP)
device = etree.SubElement(devices, 'device')
name = etree.SubElement(device, 'name')
name.text = dev.name
config = etree.SubElement(device, 'config')
xml_map = {}
for s in sorted(self.sdict):
sxml = "".join([c for c in s if ord(c) >= ord(" ")])
elems = sxml.split("/")[1:]
for i,elem in enumerate(elems):
path = "/".join(elems[:i+1])
if path in xml_map:
e = xml_map[path]
elif i == 0:
e = etree.SubElement(config, self.DEV + elem,
nsmap=self.DEVNSMAP)
else:
e = etree.SubElement(e, elem)
xml_map[path] = e
text = self.sdict[s]
if not text.startswith("<empty-"):
e.text = text
self.etree = etree
self.root = root
| NSO-developer/drned-xmnr | drned/drned/stage.py | stage.py | py | 3,271 | python | en | code | 6 | github-code | 90 |
3974792169 |
def word_count(str):
counts = dict()
word = str,split('')
for word in words:
if word in counts:
counts[word] =+ 1
else:
return count
word_count('the quick brown fox jumps over the lazy dog.')
| priyankang/Debbug | bas_ek_galti.py | bas_ek_galti.py | py | 248 | python | en | code | 0 | github-code | 90 |
24107171808 | class Solution():
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: None Do not return anything, modify matrix in-place instead.
"""
n=len(matrix)
for i in range(n):
for j in range(i):
matrix[i][j],matrix[j][i]=matrix[j][i],matrix[i][j]
for i in range(n):
matrix[i].reverse()
return matrix
#Test Case
matrix = [[1,2,3],[4,5,6],[7,8,9]]
ans=Solution()
print(ans.rotate(matrix))
# print(len(matrix[0]))
# print(matrix[0]) | Snobin/CompetitiveCoding | rotateimage.py | rotateimage.py | py | 545 | python | en | code | 2 | github-code | 90 |
2206663160 | import pandas as pd
import pdfkit
import os
import subprocess
import sys
pdflocation = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop') + '\\App\\PDFFiles'
def exceltopdf(input, output):
filename = input.split('\\')[-1].split('.')[0] + '.pdf'
df = pd.read_excel(input)#input
df.to_html("input.html")#to html
path_wkhtmltopdf = r'C:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe'
config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)
pdfkit.from_file("input.html", output + '\\' + filename, configuration=config)#to pdf
if (os.path.isfile(pdflocation) == True):
if __name__ == "__main__":
exceltopdf(sys.argv[1], pdflocation)
else:
subprocess.call('mkdir ' + pdflocation, shell=True)
print('hello')
if __name__ == "__main__":
exceltopdf(sys.argv[1], pdflocation)
| narasimha193/pdf_generator | py/exceltopdf.py | exceltopdf.py | py | 869 | python | en | code | 0 | github-code | 90 |
17984987439 | from collections import Counter
S = list(input())
abc = [chr(ord('a') + i) for i in range(26)]
ans = 100000
for s in abc:
result = S
count = 0
while len(set(result)) > 1:
count += 1
tmp = ["dd"] * (len(result) - 1)
for i in range(len(result)-1):
if result[i] == s or result[i+1] == s:
tmp[i] = s
else:
tmp[i] = result[i]
result = tmp
ans = min(ans, count)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03687/s936990406.py | s936990406.py | py | 472 | python | en | code | 0 | github-code | 90 |
17160935530 | from dataclasses import dataclass, asdict
import os
from amplitude_experiment import Experiment, User, LocalEvaluationConfig
class CustomError(Exception):
pass
@dataclass
class UserProperties:
org_id: str = None
org_name: str = None
username: str = None
email: str = None
plan: str = None
hub_region: str = None
user_status: str = None
subscription_type: str = None
infra_provider: str = None
template_id: str = None
class FeatureFlag:
def __init__(self, ):
debug = bool(os.environ.get("LOCAL_EVALUATION_CONFIG_DEBUG")) or True
server_url = os.environ.get("LOCAL_EVALUATION_CONFIG_SERVER_URL") or "https://api.lambdatest.com"
flag_config_polling_interval_millis = (int(os.environ.get(
"LOCAL_EVALUATION_CONFIG_POLL_INTERVAL")) or 120) * 1000
flag_config_poller_request_timeout_millis = (int(os.environ.get(
"LOCAL_EVALUATION_CONFIG_POLLER_REQUEST_TIMEOUT")) or 10) * 1000
deploymentKey = os.environ.get("LOCAL_EVALUATION_DEPLOYMENT_KEY") or "server-jAqqJaX3l8PgNiJpcv9j20ywPzANQQFh"
config = LocalEvaluationConfig(debug, server_url, flag_config_polling_interval_millis,
flag_config_poller_request_timeout_millis)
self.experiment = Experiment.initialize_local(deploymentKey, config)
self.experiment.start()
def fetch(self, flagName, user):
if not isinstance(user, UserProperties):
raise CustomError("invalid userProperties object has passed")
expUser = User(user_properties=asdict(user))
variants = self.experiment.evaluate(expUser, [flagName])
return variants
def GetFeatureFlagString(self, flagName, user):
try:
data = self.fetch(flagName, user)
if data is not None and data.get(flagName) is not None:
return data.get(flagName).value
else:
return ""
except CustomError as e:
print("An error occurred:", str(e))
raise e
def GetFeatureFlagBool(self, flagName, user):
try:
data = self.fetch(flagName, user)
if data is not None:
return bool(data.get(flagName).value)
else:
return False
except CustomError as e:
print("An error occurred:", str(e))
raise e
def GetFeatureFlagPayload(self, flagName, user):
try:
data = self.fetch(flagName, user)
if data is not None:
return data.get(flagName)
else:
return dict()
except CustomError as e:
print("An error occurred:", str(e))
raise e
| LambdaTest/lambda-featureflag-python-sdk | localEvaluation.py | localEvaluation.py | py | 2,745 | python | en | code | 0 | github-code | 90 |
18310587729 | import sys
# sys.setrecursionlimit(100000)
def input():
return sys.stdin.readline().strip()
def input_int():
return int(input())
def input_int_list():
return [int(i) for i in input().split()]
def main():
n = input_int()
A = input_int_list()
MOD = 10**9 + 7
cnt = 1
x, y, z = 0, 0, 0
for a in A:
tmp = 0
is_used = False # x,y,zのどこかに配った。
if a == x:
tmp += 1
if not is_used:
x += 1
is_used = True
if a == y:
tmp += 1
if not is_used:
y += 1
is_used = True
if a == z:
tmp += 1
if not is_used:
z += 1
is_used = True
cnt *= tmp
cnt = cnt % MOD
print(cnt)
return
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p02845/s253205788.py | s253205788.py | py | 891 | python | en | code | 0 | github-code | 90 |
16762188600 | import dataclasses
from typing import Collection, Iterable, List, Type
from unittest import TestCase
from harmony import OnePair, HarmonyMode, TwoPairs
from .game import PlayerCards, CommunityCards
from .winner import winner
class TestWinner(TestCase):
@dataclasses.dataclass
class TestData:
players: Collection[Iterable[str]]
community: Iterable[str]
expected_winners: List[int]
expected_harmony: Type[HarmonyMode]
def test_winner_one_pair(self):
cases = (
TestWinner.TestData(
[
("4S", "TC"),
("4D", "2H"),
],
(
"4D", "JH", "AS", "9S", "7C",
),
[0],
OnePair,
),
)
for c in cases:
self.run_test_data(c)
def test_winner_two_pairs(self):
cases = (
TestWinner.TestData(
[
("4S", "TC"),
("4D", "2H"),
],
(
"4D", "JH", "AS", "TS", "2C",
),
[0],
TwoPairs,
),
TestWinner.TestData(
[
("4S", "2S"),
("4C", "2H"),
],
(
"4D", "JH", "AS", "TS", "2C",
),
[0, 1],
TwoPairs,
),
)
for c in cases:
self.run_test_data(c)
def run_test_data(self, data: TestData):
players = []
for p in data.players:
cards = []
for c in p:
cards.append((c[0], c[1]))
players.append(
PlayerCards(*cards)
)
community_cards = []
for c in data.community:
community_cards.append((c[0], c[1]))
w = winner(community_cards, players)
self.assertEqual(len(data.expected_winners), len(w))
for ww in w:
self.assertIsInstance(ww[1], data.expected_harmony)
for expected in data.expected_winners:
expected_player = players[expected]
self.assertIn(expected_player, map(lambda ww: ww[0], w))
| ehsundar/foldem | judge/test_winner.py | test_winner.py | py | 2,310 | python | en | code | 0 | github-code | 90 |
73241970537 | '''
- 30분 고민하고 1시간 30분 구현
- 시간 복잡도 생각 안함, N이 100이하여서 구현만 하면 맞을 것이라고 생각함
- 가장 중요한 점은 어항을 어떻게 저장할 것인가 -> 행렬을 회전하고 붙이려면 어느 형태가 편할까에 대한 고민을 함
- 그래서 백준에 나온 그림 기준 아래와 같이 리스트에 저장
3 5 [[3, 3],
3 14 9 11 8 -> [14, 5],
[9],
[11],
[8]]
- 그럼 회전할 때 [[3, 3], 만 뽑아서 90도 회전하고 [[9], + [[14, 3], 이런식으로 붙이면 됨
[14, 5]] [11], + [5, 3]]
[8]]
- 물고기 이동에서 중요한 건 모든 구역에서 동시에 발생하는 것 -> 이동량을 저장할 행렬하나 선언해줘서 저장해놓고 나중에 기존의 어항에 더해주면 됨
'''
import copy
def rotation_90(h, w):
if len(fishbowl) - w < h: # 오른쪽에 바닥이 없는 경우
return None
temp = fishbowl[:w]
new_bowl = fishbowl[w:]
rotate_bowl = [[0] * w for _ in range(h)] # 회전 후 저장할 공간
# 90도 회전
for i in range(w):
for j in range(h):
rotate_bowl[j][w - 1 - i] = temp[i][j]
# 회전 후 바닥에 있는 어항과 합체 (한 번에 요소들을 합치기 위해 extend사용)
for i in range(h):
new_bowl[i].extend(rotate_bowl[i])
return new_bowl
def rotation_180():
h = len(flatten_fishbowl)
new_bowl = flatten_fishbowl[h // 2:]
for i in range(h // 2):
new_bowl[i].extend(flatten_fishbowl[h//2 - i - 1][::-1])
return new_bowl
def fish_move(fishbowl):
# 1 ≤ 각 어항에 들어있는 물고기의 수 ≤ 10,000
n = len(fishbowl)
max_fish = 1
min_fish = 10000
flatten_fishbowl = []
# 물고기 이동량 저장 + or -
diff_bowl = [[0] * len(fishbowl[i]) for i in range(n)]
# 오른쪽과 아래만 비교하면서 이동 값 기록
for i in range(n):
for j in range(len(fishbowl[i])):
if i < n - 1 and len(fishbowl[i+1]) > j: # 행렬의 형태가 [[1, 2, 3], [1, 2, 3], [1], [1]] 이런 경우가 존재하기 때문에
d = abs(fishbowl[i][j] - fishbowl[i+1][j]) // 5
if d > 0:
if fishbowl[i][j] > fishbowl[i+1][j]:
diff_bowl[i][j] -= d
diff_bowl[i+1][j] += d
else:
diff_bowl[i][j] += d
diff_bowl[i + 1][j] -= d
if j < len(fishbowl[i]) - 1:
d = abs(fishbowl[i][j] - fishbowl[i][j + 1]) // 5
if d > 0:
if fishbowl[i][j] > fishbowl[i][j + 1]:
diff_bowl[i][j] -= d
diff_bowl[i][j + 1] += d
else:
diff_bowl[i][j] += d
diff_bowl[i][j + 1] -= d
# 기존의 어항에 물고기 이동량 반영
for j in range(len(fishbowl[i])):
fishbowl[i][j] += diff_bowl[i][j]
if max_fish < fishbowl[i][j]:
max_fish = fishbowl[i][j]
elif min_fish > fishbowl[i][j]:
min_fish = fishbowl[i][j]
flatten_fishbowl.append([fishbowl[i][j]])
return flatten_fishbowl, min_fish, max_fish
# 입력 받기
N, K = map(int, input().split())
temp = list(map(int, input().split()))
answer = 1
while True:
min_value = min(temp)
# 최소값에 1씩 더함
for i in range(N):
if temp[i] == min_value:
temp[i] += 1
# 행렬의 형태를 [[5], [2], [3] .... [8]]
fishbowl = [[temp[i]] for i in range(N)]
h, w = 1, 1 # 공중부양 하는 행렬의 형태 높이, 길이
while True:
new_bowl = rotation_90(h, w) # 90도 회전 후 어항을 쌓음
if not new_bowl: # 오른쪽에 있는 어항의 아래에 바닥에 있는 어항이 있을때까지 반복
break
fishbowl = copy.deepcopy(new_bowl)
# 공중 부양해야 하는 행렬의 형태가 (1, 1) (2, 1) (2, 2) (3, 2) ... 이렇게 변함
if h == w:
h += 1
else:
w += 1
flatten_fishbowl, _, _ = fish_move(fishbowl) # 물고기 이동
for _ in range(2): # 반반 작업 두 번
flatten_fishbowl = rotation_180() # 180도 돌리고 쌓기
flatten_fishbowl, min_fish, max_fish = fish_move(flatten_fishbowl) # 물고기 움직이고, 최대 최소값 구하기
if max_fish >= min_fish and max_fish - min_fish <= K: # 최대 - 최소 가 K 이하면 종료
print(answer)
exit()
answer += 1
temp = [i[0] for i in flatten_fishbowl] # 처음 입력 받을 때와 동일하게 변환 | kyeong8/CodingTestStudy | twowindragon/bj23191.py | bj23191.py | py | 5,114 | python | ko | code | 0 | github-code | 90 |
4174000041 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FinanceFeed'
db.create_table('newsconnector_financefeed', (
('rssfeed_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['newsconnector.RssFeed'], unique=True, primary_key=True)),
))
db.send_create_signal('newsconnector', ['FinanceFeed'])
# Adding model 'EntertainmentFeed'
db.create_table('newsconnector_entertainmentfeed', (
('rssfeed_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['newsconnector.RssFeed'], unique=True, primary_key=True)),
))
db.send_create_signal('newsconnector', ['EntertainmentFeed'])
# Adding model 'SportsFeed'
db.create_table('newsconnector_sportsfeed', (
('rssfeed_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['newsconnector.RssFeed'], unique=True, primary_key=True)),
))
db.send_create_signal('newsconnector', ['SportsFeed'])
# Adding model 'NewsFeed'
db.create_table('newsconnector_newsfeed', (
('rssfeed_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['newsconnector.RssFeed'], unique=True, primary_key=True)),
))
db.send_create_signal('newsconnector', ['NewsFeed'])
def backwards(self, orm):
# Deleting model 'FinanceFeed'
db.delete_table('newsconnector_financefeed')
# Deleting model 'EntertainmentFeed'
db.delete_table('newsconnector_entertainmentfeed')
# Deleting model 'SportsFeed'
db.delete_table('newsconnector_sportsfeed')
# Deleting model 'NewsFeed'
db.delete_table('newsconnector_newsfeed')
models = {
'newsconnector.article': {
'Meta': {'object_name': 'Article'},
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'source': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.TextField', [], {})
},
'newsconnector.entertainmentarticle': {
'Meta': {'object_name': 'EntertainmentArticle', '_ormbases': ['newsconnector.Article']},
'article_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Article']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.entertainmentfeed': {
'Meta': {'object_name': 'EntertainmentFeed', '_ormbases': ['newsconnector.RssFeed']},
'rssfeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.RssFeed']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.entertainmentkeyword': {
'Meta': {'object_name': 'EntertainmentKeyword', '_ormbases': ['newsconnector.Keyword']},
'keyword_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Keyword']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.financearticle': {
'Meta': {'object_name': 'FinanceArticle', '_ormbases': ['newsconnector.Article']},
'article_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Article']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.financefeed': {
'Meta': {'object_name': 'FinanceFeed', '_ormbases': ['newsconnector.RssFeed']},
'rssfeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.RssFeed']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.financekeyword': {
'Meta': {'object_name': 'FinanceKeyword', '_ormbases': ['newsconnector.Keyword']},
'keyword_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Keyword']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.keyword': {
'Meta': {'object_name': 'Keyword'},
'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 18, 14, 54, 52, 645224)', 'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.TextField', [], {})
},
'newsconnector.newsarticle': {
'Meta': {'object_name': 'NewsArticle', '_ormbases': ['newsconnector.Article']},
'article_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Article']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.newsfeed': {
'Meta': {'object_name': 'NewsFeed', '_ormbases': ['newsconnector.RssFeed']},
'rssfeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.RssFeed']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.newskeyword': {
'Meta': {'object_name': 'NewsKeyword', '_ormbases': ['newsconnector.Keyword']},
'keyword_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Keyword']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.rssfeed': {
'Meta': {'object_name': 'RssFeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'})
},
'newsconnector.sportsarticle': {
'Meta': {'object_name': 'SportsArticle', '_ormbases': ['newsconnector.Article']},
'article_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Article']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.sportsfeed': {
'Meta': {'object_name': 'SportsFeed', '_ormbases': ['newsconnector.RssFeed']},
'rssfeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.RssFeed']", 'unique': 'True', 'primary_key': 'True'})
},
'newsconnector.sportskeyword': {
'Meta': {'object_name': 'SportsKeyword', '_ormbases': ['newsconnector.Keyword']},
'keyword_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['newsconnector.Keyword']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['newsconnector']
| miltontony/newsconnector | newsconnector/migrations/0002_auto__add_financefeed__add_entertainmentfeed__add_sportsfeed__add_news.py | 0002_auto__add_financefeed__add_entertainmentfeed__add_sportsfeed__add_news.py | py | 7,319 | python | en | code | 1 | github-code | 90 |
73827937898 | from typing import Optional, Callable, Dict, Tuple, List
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import CocoDetection
def default_collate_fn(samples):
fetched_data = defaultdict(list)
for sample in samples:
for key, val in sample.items():
if isinstance(val, np.ndarray):
val = torch.from_numpy(val)
elif key == "target":
val = (
torch.from_numpy(val[0]),
torch.from_numpy(val[1]),
torch.from_numpy(val[2]),
)
fetched_data[key].append(val)
fetched_data["image"] = torch.stack(fetched_data["image"], dim=0).float().permute(0, 3, 1, 2)
if "target" in fetched_data:
cls_targets, offset_targets, shape_targets = zip(*fetched_data["target"])
fetched_data["target"] = (
torch.stack(cls_targets, dim=0),
torch.stack(offset_targets, dim=0),
torch.stack(shape_targets, dim=0),
)
return fetched_data
class Coco(CocoDetection):
def __init__(self, root: str, annFile: str, transforms: Optional[Callable] = None, target_generator = None) -> None:
super().__init__(root, annFile)
self._transforms = transforms
self._obj_id_mappings = {
i: self.coco.cats[cat_id]["name"] for i, cat_id in enumerate(self.coco.cats.keys())
}
self._rev_obj_id_mappings = {
cat_name: i for i, cat_name in self._obj_id_mappings.items()
}
self._target_generator = target_generator
@property
def target_generator(self):
return self._target_generator
@target_generator.setter
def target_generator(self, target_generator):
self._target_generator = target_generator
@property
def num_classes(self) -> int:
return len(self._obj_id_mappings.keys())
@property
def labels(self) -> List[str]:
return list(sorted(self._rev_obj_id_mappings.keys()))
def __getitem__(self, index: int) -> Tuple[np.ndarray, Dict]:
image, target = super().__getitem__(index)
image = np.array(image, dtype=np.uint8)
bboxes = list()
labels = list()
for t in target:
x, y, w, h = t["bbox"]
if min(w, h) <= 0:
# skip target if width or height is 0
continue
bboxes.append(
[float(x), float(y), float(x + w), float(y + h)]
)
labels.append(
self.coco.cats[t["category_id"]]["name"]
)
data = dict(
image=image,
bboxes=bboxes,
labels=labels
)
if self._transforms:
data = self._transforms(**data)
data["label_ids"] = [self.label2id(label) for label in data["labels"]]
if self._target_generator:
data["target"] = self._target_generator.build_targets(
*data["image"].shape[:2],
np.array(data["bboxes"], dtype=np.float32),
np.array(data["label_ids"], dtype=np.int32),
)
return data
def id2label(self, idx: int) -> str:
return self._obj_id_mappings[idx]
def label2id(self, label: str) -> int:
return self._rev_obj_id_mappings[label]
def get_dataloader(
self,
batch_size: int = 1,
num_workers: int = 0,
collate_fn = default_collate_fn,
shuffle: bool = False,
**kwargs,
) -> DataLoader:
torch.utils
return DataLoader(
self,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=collate_fn,
shuffle=shuffle,
**kwargs,
)
| borhanMorphy/object-as-points | centernet/dataset/coco.py | coco.py | py | 3,863 | python | en | code | 2 | github-code | 90 |
11609469546 | import gzip
from fastai.text import *
def build_lm(data_path, model_name):
with gzip.open(data_path, "rt", encoding="UTF-8") as fin:
data = fin.readlines()
n_data = len(data)
print(f"load {n_data} texts")
data_lm = TextLMDataBunch.from_tokens("", trn_tok=data, trn_lbls=[0]*n_data,
val_tok=[[]], val_lbls=[0])
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.5, pretrained=False)
learn.fit_one_cycle(1, 1e-2)
learn.save(model_name) | seantyh/GWA2019 | scripts/build_lm.py | build_lm.py | py | 512 | python | en | code | 0 | github-code | 90 |
43486663487 | import gspread
import numpy as np
# define data, and change list to array
x = [3,21,22,34,54,34,55,67,89,99]
x = np.array(x)
y = [2,22,24,65,79,82,55,130,150,199]
y = np.array(y)
def model(a,b,x):
return a*x + b
def loss_function(a,b,x,y):
num = len(x)
prediction = model(a,b,x)
return (0.5 / num) * (np.square(prediction - y)).sum()
def optimize(a,b,x,y):
num = len(x)
prediction = model(a,b,x)
da = (1.0 / num) * ((prediction -y)*x).sum()
db = (1.0 / num) * ((prediction -y).sum())
a = a - Lr*da
b = b - Lr*db
return a,b
def iterate(a,b,x,y,times):
for i in range(times):
a,b = optimize(a,b,x,y)
return a,b
gc = gspread.service_account(filename="unitypythonsheets-7664ce31a9fc.json")
sh = gc.open("unitysheets")
def Send(i: int, a,b,loss):
sh.sheet1.update("A" + str(i), str(a))
sh.sheet1.update("B" + str(i), str(b))
sh.sheet1.update("C" + str(i), str(loss))
print(a,b,loss)
a = np.random.rand(1)
b = np.random.rand(1)
Lr = 0.000001
a,b = iterate(a,b,x,y,1)
prediction = model(a,b,x)
loss = loss_function(a,b,x,y)
Send(1,a,b,loss)
a,b = iterate(a,b,x,y,10)
prediction = model(a,b,x)
loss = loss_function(a,b,x,y)
Send(2,a,b,loss)
a,b = iterate(a,b,x,y,100)
prediction = model(a,b,x)
loss = loss_function(a,b,x,y)
Send(3,a,b,loss)
a,b = iterate(a,b,x,y,1000)
prediction = model(a,b,x)
loss = loss_function(a,b,x,y)
Send(4,a,b,loss) | VenchasS/DA-in-GameDev-lab2 | task2.py | task2.py | py | 1,483 | python | en | code | 0 | github-code | 90 |
29061573383 | """Useful functions for matrix transformations"""
import cv2
import numpy as np
def order_points(pts):
"""
Helper function for four_point_transform.
Check pyimagesearch blog for an explanation on the matter
"""
# Order: top-left, top-right, bottom-right and top-left
rect = np.zeros((4, 2), dtype=np.float32)
# top-left will have smallest sum, while bottom-right
# will have the largest one
_sum = pts.sum(axis=1)
rect[0] = pts[np.argmin(_sum)]
rect[2] = pts[np.argmax(_sum)]
# top-right will have smallest difference, while
# bottom-left will have the largest one
_diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(_diff)]
rect[3] = pts[np.argmax(_diff)]
return rect
def four_point_transform(img, pts):
"""Returns 'bird view' of image"""
rect = order_points(pts)
tl, tr, br, bl = rect
# width of new image will be the max difference between
# bottom-right - bottom-left or top-right - top-left
widthA = np.linalg.norm(br - bl)
widthB = np.linalg.norm(tr - tl)
width = int(round(max(widthA, widthB)))
# Same goes for height
heightA = np.linalg.norm(tr - br)
heightB = np.linalg.norm(tl - bl)
height = int(round(max(heightA, heightB)))
# construct destination for 'birds eye view'
dst = np.array([
[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]],
dtype=np.float32)
# compute perspective transform and apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(img, M, (width, height))
return warped
def resize(img, new_width):
"""Resizes image to new_width while maintaining its ratio"""
height, width = img.shape[:2]
ratio = height / width
return cv2.resize(img, (new_width, int(ratio * new_width)))
| tempdata73/tic-tac-toe | utils/imutils.py | imutils.py | py | 1,822 | python | en | code | 10 | github-code | 90 |
2461885141 | class Solution(object):
def cellsInRange(self, s):
"""
:type s: str
:rtype: List[str]
"""
start_stop = s.split(":")
start, stop = start_stop[0], start_stop[1]
start_num = int(start[1:])
end_num = int(stop[1:])
start_col = (start[:1])
end_col = (stop[:1])
res = []
for i in xrange(ord(start_col), ord(end_col) + 1):
crn_col = chr(i)
for j in xrange(start_num,end_num+1):
res.append(crn_col + str(j))
return res | petrosDemetrakopoulos/Leetcode | code/Python/2194-CellsInARangeOnAnExcelSheet.py | 2194-CellsInARangeOnAnExcelSheet.py | py | 557 | python | en | code | 0 | github-code | 90 |
30893094852 | import ctypes
import datetime
import decimal
import sys
from peewee import ImproperlyConfigured
from peewee import sqlite3
from playhouse.sqlite_ext import *
sqlite3_lib_version = sqlite3.sqlite_version_info
# Peewee assumes that the `pysqlite2` module was compiled against the
# BerkeleyDB SQLite libraries.
try:
from pysqlite2 import dbapi2 as berkeleydb
except ImportError:
import sqlite3 as berkeleydb
berkeleydb.register_adapter(decimal.Decimal, str)
berkeleydb.register_adapter(datetime.date, str)
berkeleydb.register_adapter(datetime.time, str)
class BerkeleyDatabase(SqliteExtDatabase):
def __init__(self, database, pragmas=None, cache_size=None, page_size=None,
multiversion=None, *args, **kwargs):
super(BerkeleyDatabase, self).__init__(
database, pragmas=pragmas, *args, **kwargs)
if multiversion:
self._pragmas.append(('multiversion', 'on'))
if page_size:
self._pragmas.append(('page_size', page_size))
if cache_size:
self._pragmas.append(('cache_size', cache_size))
def _connect(self, database, **kwargs):
if not PYSQLITE_BERKELEYDB:
message = ('Your Python SQLite driver (%s) does not appear to '
'have been compiled against the BerkeleyDB SQLite '
'library.' % berkeleydb)
if LIBSQLITE_BERKELEYDB:
message += (' However, the libsqlite on your system is the '
'BerkeleyDB implementation. Try recompiling '
'pysqlite.')
else:
message += (' Additionally, the libsqlite on your system '
'does not appear to be the BerkeleyDB '
'implementation.')
raise ImproperlyConfigured(message)
conn = berkeleydb.connect(database, **kwargs)
conn.isolation_level = None
self._add_conn_hooks(conn)
return conn
def _set_pragmas(self, conn):
# `multiversion` is weird. It checks first whether another connection
# from the BTree cache is available, and then switches to that, which
# may have the handle of the DB_Env. If that happens, then we get
# an error stating that you cannot set `multiversion` despite the
# fact we have not done any operations and it's a brand new conn.
if self._pragmas:
cursor = conn.cursor()
for pragma, value in self._pragmas:
if pragma == 'multiversion':
try:
cursor.execute('PRAGMA %s = %s;' % (pragma, value))
except berkeleydb.OperationalError:
pass
else:
cursor.execute('PRAGMA %s = %s;' % (pragma, value))
cursor.close()
@classmethod
def check_pysqlite(cls):
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
import sqlite3
conn = sqlite3.connect(':memory:')
try:
results = conn.execute('PRAGMA compile_options;').fetchall()
finally:
conn.close()
for option, in results:
if option == 'BERKELEY_DB':
return True
return False
@classmethod
def check_libsqlite(cls):
# Checking compile options is not supported.
if sys.platform.startswith('win'):
library = 'libsqlite3.dll'
elif sys.platform == 'darwin':
library = 'libsqlite3.dylib'
else:
library = 'libsqlite3.so'
try:
libsqlite = ctypes.CDLL(library)
except OSError:
return False
return libsqlite.sqlite3_compileoption_used('BERKELEY_DB') == 1
if sqlite3_lib_version < (3, 6, 23):
# Checking compile flags is not supported in older SQLite versions.
PYSQLITE_BERKELEYDB = False
LIBSQLITE_BERKELEYDB = False
else:
PYSQLITE_BERKELEYDB = BerkeleyDatabase.check_pysqlite()
LIBSQLITE_BERKELEYDB = BerkeleyDatabase.check_libsqlite()
| theotherp/nzbhydra | libs/playhouse/berkeleydb.py | berkeleydb.py | py | 4,138 | python | en | code | 559 | github-code | 90 |
20850894532 | """
Given a number n, find length of the longest consecutive 1s in its binary representation.
Examples :
Input : n = 14
Output : 3
The binary representation of 14 is 1110.
The idea is based on the concept that if we AND a bit sequence with a shifted version of itself, we’re effectively
removing the trailing 1 from every sequence of consecutive 1s.So the operation x = (x & (x << 1)) reduces length of
every sequence of 1s by one in binary representation of x. If we keep doing this operation in a loop, we end up with
x = 0. The number of iterations required to reach 0 is actually length of the longest consecutive sequence of 1s.
"""
def count_max_consecutive_1s(no):
count = 0
while no != 0:
no = no & (no << 1)
count += 1
return count
print(count_max_consecutive_1s(14))
| Harishkumar18/data_structures | cracking_the_coding_interview/bit_manipulation/count_consecutive_1s.py | count_consecutive_1s.py | py | 814 | python | en | code | 1 | github-code | 90 |
35816924725 | #!/usr/bin/env python3
'''
Library for 74HC595 shiftregister
Based on similar script for raspberry pi https://github.com/mignev/shiftpi
'''
import RPi.GPIO as GPIO
from time import sleep
class SH74HC595:
# Define pins
_DATA_pin = 40 # pin 14 (DS) on the 75HC595 GPA0
_LATCH_pin = 38 # pin 12 (STCP) on the 75HC595 LATCH GPA1
_CLOCK_pin = 36 # pin 11 (SHCP) on the 75HC595 CLOCK GPA2
# Define MODES
ALL = -1
HIGH = 1
LOW = 0
def __init__(self):
self.gpio = GPIO
self.gpio.setmode(GPIO.BOARD)
self.gpio.setup(SH74HC595._DATA_pin, GPIO.OUT)
self.gpio.setup(SH74HC595._LATCH_pin, GPIO.OUT)
self.gpio.setup(SH74HC595._CLOCK_pin, GPIO.OUT)
# is used to store states of all pins
self._registers = list()
self._number_of_shiftregisters = 1
def digital_write(self, pin, mode):
'''
Allows the user to set the state of a pin on the shift register
'''
if pin == self.ALL:
self.set_all(mode)
else:
if len(self_registers) == 0:
self.set_all(self.LOW)
self._set_pin(pin, mode)
self._execute()
def get_num_pins(self):
return self._number_of_shiftregisters * 8
def set_all(self, mode, execute=True):
num_pins = self.get_num_pins()
for pin in range(0, num_pins):
self._set_pin(pin, mode)
if execute:
self._execute()
return self._registers
def _set_pin(self, pin, mode):
try:
self._registers[pin] = mode
except IndexError:
self._registers.insert(pin, mode)
def _execute(self):
num_pins = self.get_num_pins()
self.mcpi2c.output(SH74HC595._LATCH_pin, GPIO.LOW)
for pin in range(num_pins - 1, -1, -1):
self.mcpi2c.output(SH74HC595._CLOCK_pin, GPIO.LOW)
pin_mode = self._registers[pin]
self.mcpi2c.output(SH74HC595._DATA_pin, pin_mode)
self.mcpi2c.output(SH74HC595._CLOCK_pin, GPIO.HIGH)
self.mcpi2c.output(SH74HC595._LATCH_pin, GPIO.HIGH)
def shift_one(self, input_val):
self.gpio.output(SH74HC595._CLOCK_pin, GPIO.LOW)
if input_val == 1:
self.gpio.output(SH74HC595._DATA_pin, GPIO.HIGH)
else:
self.gpio.output(SH74HC595._DATA_pin, GPIO.LOW)
self.gpio.output(SH74HC595._CLOCK_pin, GPIO.HIGH)
self.gpio.output(SH74HC595._DATA_pin, GPIO.LOW)
def write_out(self):
self.gpio.output(SH74HC595._LATCH_pin, GPIO.HIGH)
sleep(0.04)
self.gpio.output(SH74HC595._LATCH_pin, GPIO.LOW)
def write_char(self, char_to_shift):
for x in range(0, 7):
self.shift_one((char_to_shift >> x) % 2)
self.write_out()
if __name__ == "__main__":
test = SH74HC595()
count = 0
try:
while True:
list = [0x3F, 0x06, 0x5B, 0x4F, 0x66, 0x6D, 0x7D, 0x07, 0x7F, 0x6F]
value = ~ list[count % 10]
for x in range(7, -1, -1):
test.shift_one((value >> x) % 2)
test.write_out()
sleep(0.75)
count += 1
print(count)
except KeyboardInterrupt:
pass
GPIO.cleanup()
| Brent-rb/University | master/networking-and-interfacing-iot-platforms/practica/2/3.1-shift-gpio/main.py | main.py | py | 3,290 | python | en | code | 0 | github-code | 90 |
24107139528 | class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def mergeTwoLists(l1, l2):
if l1==None:
return l2
if l2==None:
return l1
if l1.val<=l2.val:
l1.next=mergeTwoLists(l1.next,l2)
return l1
else:
l2.next=mergeTwoLists(l1,l2.next)
return l2
# # Test Case 1: Merging two empty lists should result in an empty list
# l1 = None
# l2 = None
# result = mergeTwoLists(l1, l2)
# # Expected Output: None
# print(result)
# # Test Case 2: Merging an empty list with a non-empty list should return the non-empty list
# l1 = None
# l2 = ListNode(1)
# result = mergeTwoLists(l1, l2)
# # Expected Output: 1 -> None
# while result:
# print(result.val, end=" -> ")
# result = result.next
# Test Case 3: Merging two sorted lists
l1 = ListNode(1, ListNode(3, ListNode(5)))
l2 = ListNode(4, ListNode(6, ListNode(7)))
result = mergeTwoLists(l1, l2)
# Expected Output: 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> None
while result:
print(result.val, end=" -> ")
result = result.next
# Test Case 4: Merging two sorted lists of different lengths
# l1 = ListNode(1, ListNode(3, ListNode(5, ListNode(7))))
# l2 = ListNode(2, ListNode(4, ListNode(6)))
# result = mergeTwoLists(l1, l2)
# # Expected Output: 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> None
# while result:
# print(result.val, end=" -> ")
# result = result.next
# print(l2.val)
| Snobin/CompetitiveCoding | mergetwosortedlists(2).py | mergetwosortedlists(2).py | py | 1,451 | python | en | code | 2 | github-code | 90 |
26062575924 | import sys
import os
import json
import logging
from coffee_machine import CoffeeMachine
def file_sanity_check():
file_data = None
if len(sys.argv) > 1:
file_path = sys.argv[1]
if os.path.exists(file_path):
with open(file_path) as file_ptr:
file_data = json.load(file_ptr)
logging.info('Loaded file : {}'.format(file_path))
else:
print("{} not found".format(file_path))
logging.error('File : {}, not found'.format(file_path))
return file_data
def process_beverage_requests(num_of_machine, total_ingredients, list_of_beverages, machine_data):
try:
coffee_machines = CoffeeMachine(num_of_machine)
coffee_machines.initialize_inventory(total_ingredients)
logging.info("Initiated {} coffee machines".format(total_ingredients))
for bvg_name in list_of_beverages:
bvg_ingredients_data = machine_data['beverages'][bvg_name]
was_beverage_made = coffee_machines.request_beverage(bvg_name, bvg_ingredients_data)
if was_beverage_made:
logging.info("{} was prepared successfully".format(bvg_name))
else:
logging.warning("{} was NOT prepared".format(bvg_name))
except Exception as error1:
print("Exception occurred while preparing beverages")
print(error1)
def file_input():
file_data = file_sanity_check()
if file_data:
machine_data = file_data["machine"]
num_of_machine = machine_data['outlets']['count_n']
total_ingredients = machine_data['total_items_quantity']
list_of_beverages = machine_data['beverages']
process_beverage_requests(num_of_machine, total_ingredients, list_of_beverages, machine_data)
def main():
file_input()
if __name__ == "__main__":
main()
| hakimkartik/CoffeeMachine | main.py | main.py | py | 1,856 | python | en | code | 0 | github-code | 90 |
529515492 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
:mod:`graphical_maze` module
:author: Coignion Tristan, Tayebi Ajwad, Becquembois Logan
:date: 15/11/2018
This module provides function which help display the maze from the Maze module in a window
Uses:
- maze.py
- square.py (Dependancy)
- tkinter
"""
from tkinter import * #pylint: disable=W0614
from maze import * #pylint: disable=W0614
from random import choice
CAN_WIDTH = 800
CAN_HEIGHT = 800
BG_COLOR = 'black'
GRID_COLOR = 'medium blue'
GOOD_CELL_COLOR = "yellow"
BAD_CELL_COLOR = "crimson"
CIRCLE_SCALE = 0.6
RECTANGLE_SCALE = 0.8
def draw_circle(canvas, event):
"""
Draws a circle of ray 5 at the location of `event` on the `canvas`
"""
ray = 5
x, y = event.x, event.y
canvas.create_oval(x - ray, y - ray,
x + ray, y + ray,
fill = 'red')
canvas.update()
def draw_grid(canvas, width, height, can_width=CAN_WIDTH, can_height=CAN_HEIGHT):
"""
Draws a grid on the `canvas`. The dimensions of the grid are `width` and `height`.
The dimensions of the canvas are `can_width` and `can_height` and are by default
`CAN_WIDTH` and `CAN_HEIGHT`
"""
DX = can_width // width # Width of a square
DY = can_height // height
for y in range(height):
for x in range(width):
canvas.create_line(x * DX, y * DY,
(x + 1) * DX, y * DY,
fill=GRID_COLOR, width=1)
canvas.create_line(x * DX, y * DY,
x * DX, (y + 1) * DY,
fill=GRID_COLOR, width=1)
canvas.create_line(0, height * DY - 1, width * DX - 1, height * DY - 1,
fill=GRID_COLOR, width=1)
canvas.create_line(width * DX - 1, 0, width * DX - 1, height * DY - 1,
fill=GRID_COLOR, width=1)
def random_word(filename):
"""
returns a random word taken from a file `filename`
:param filename: (str) the words have to be separated by backspaces
:return: (str) a word
"""
with open(filename, 'r') as stream:
lines = stream.readlines()
return choice(lines).rstrip('\n')
def remove_wall(canvas, x, y, side, width, height, can_width=CAN_WIDTH, can_height=CAN_HEIGHT):
"""
removes a wall from a side of a cell on the canvas
:param canvas: (Canvas)
:param x, y: (int) the coordinates of the cell
:side: (str) the side we want to remove, must be "Left" or "Top"
:param width: (int) the width of the maze
:param height: (int) the height of the maze
:param can_width: (int) the width of the canvas
:param can_height: (int) the height of the canvas
:side-effect: removes a line from the canvas
:return: None
:UC: 0<=x<=width-1, 0<=y<=height-1
"""
DX = can_width // width # This is the width of a square
DY = can_height // height # This is the height of a square
if side == "Left":
canvas.create_line(x * DX, y * DY, (x) * DX, (y + 1) * DY, fill=BG_COLOR, width=1)
if side == "Top":
canvas.create_line(x * DX, y * DY, (x+1) * DX, y * DY, fill=BG_COLOR, width=1)
def setup_wall(canvas, maze, can_width=CAN_WIDTH, can_height=CAN_HEIGHT):
"""
removes all the walls of the graphical maze according to the ones on the maze object
:param canvas: (Canvas)
:param maze: (Maze)
:param can_width: (int) the width of the canvas
:param can_height: (int) the height of the canvas
:side effect: removes lines from the canvas
:return: None
:UC: None
"""
height = maze.get_height()
width = maze.get_width()
for y in range(height):
for x in range(width):
cell = maze.get_square(x, y)
if not cell.has_left_rampart():
remove_wall(canvas, x, y, "Left", width, height, can_width, can_height)
if not cell.has_top_rampart():
remove_wall(canvas, x, y, "Top", width, height, can_width, can_height)
def set_circle(canvas, width, height, x, y, can_width=CAN_WIDTH, can_height=CAN_HEIGHT, fill_color = GOOD_CELL_COLOR, scale=CIRCLE_SCALE):
"""
draws a circle on the cell of coordinates (x,y)
:param canvas: (Canvas)
:param x,y: (int) the coordinates of the cell
:param width: (int) the width of the maze
:param height: (int) the height of the maze
:param can_width: (int) the width of the canvas
:param can_height: (int) the height of the canvas
:param fill_color: (str) [default = GOOD_CELL_COLOR] the color of the circle
:param scale: (int) [default = CIRCLE_SCALE] the scale of the circle
:side-effect: draws a circle
:return: None
:UC: 0<=x<=width-1, 0<=y<=height-1 0<= scale <= 1
"""
DX = can_width // width
DY = can_height // height
scale = scale/2 + 0.5
canvas.create_oval(DX*(x+scale), DY*(y+scale),
DX*(x+1-scale), DY*(y+1-scale),
fill = fill_color)
def remove_circle(canvas, width, height, x, y, can_width=CAN_WIDTH, can_height=CAN_HEIGHT, fill_color=BG_COLOR, scale=CIRCLE_SCALE):
"""
Removes a circle of the canvas by making its color the same as the background's
:param canvas: (Canvas)
:param x,y: (int) the coordinates of the cell
:param width: (int) the width of the maze
:param height: (int) the height of the maze
:param can_width: (int) the width of the canvas
:param can_height: (int) the height of the canvas
:param fill_color: (str) [default = BG_COLOR] the color of the circle
:param scale: (int) [default = CIRCLE_SCALE] the scale of the circle
:side-effect: erase a circle
:return: None
:UC: 0<=x<=width-1, 0<=y<=height-1 0<= scale <= 1
"""
set_circle(canvas, width, height, x, y, can_width=can_width, can_height=can_height, fill_color=fill_color, scale=scale)
def set_bad_cell(canvas, width, height, x, y, can_width=CAN_WIDTH, can_height=CAN_HEIGHT, fill_color=BAD_CELL_COLOR, scale=RECTANGLE_SCALE):
"""
Draws a cell as a cell which doesn't lead to the exit
:param canvas: (Canvas)
:param x,y: (int) the coordinates of the cell
:param width: (int) the width of the maze
:param height: (int) the height of the maze
:param can_width: (int) the width of the canvas
:param can_height: (int) the height of the canvas
:param fill_color: (str) [default = BAD_CELL_COLOR] the color of the cell
:param scale: (int) [default = RECTANGLE_SCALE] the scale of the square
:side-effect: Draws a square on the cell
:return: None
:UC: 0<=x<=width-1, 0<=y<=height-1 0<= scale <= 1
"""
scale = scale/2 + 0.5
DX = can_width // width # This is the width of a square
DY = can_height // height # This is the height of a square
canvas.create_rectangle(DX*(x+scale), DY*(y+scale),
DX*(x+1-scale), DY*(y+1-scale),
fill = fill_color)
def remove_bad_cell(canvas, width, height, x, y, can_width=CAN_WIDTH, can_height=CAN_HEIGHT, fill_color=BG_COLOR, scale=RECTANGLE_SCALE):
"""
Erase a cell as a cell which doesn't lead to the exit
:param canvas: (Canvas)
:param x,y: (int) the coordinates of the cell
:param width: (int) the width of the maze
:param height: (int) the height of the maze
:param can_width: (int) the width of the canvas
:param can_height: (int) the height of the canvas
:param fill_color: (str) [default = BG_COLOR] the color of the cell
:param scale: (int) [default = RECTANGLE_SCALE] the scale of the square
:side-effect: Draws a square on the cell
:return: None
:UC: 0<=x<=width-1, 0<=y<=height-1 0<= scale <= 1
"""
set_bad_cell(canvas, width, height, x, y, can_width=can_width, can_height=can_height, fill_color=fill_color, scale=scale)
def create_canvas(win, adjusted_can_width, adjusted_can_height):
"""
Creates and returns a canvas with a scrolling bar
:param win: (Window) A tkinter window parent to the canvas
:param adjusted_can_width: (int) the width of the canvas
:param adjusted_can_height: (int) the height of the canvas
"""
can = Canvas(win, bg=BG_COLOR, width=adjusted_can_width, height=adjusted_can_height)
can.bind('<Button-1>',
lambda event: draw_circle(can, event))
defilY = Scrollbar(win, orient="vertical", command=can.yview)
defilY.pack(side="right")
defilX = Scrollbar(win, orient="horizontal", command=can.xview)
defilX.pack(side="bottom")
can["yscrollcommand"] = defilY.set
can["xscrollcommand"] = defilX.set
can.pack(fill="both", expand=True) # Allows the canvas to be handled as grid and columns
return can | Saauan/Maze | src/graphical_maze.py | graphical_maze.py | py | 8,762 | python | en | code | 0 | github-code | 90 |
34731504427 | #!/usr/bin/env python3
""" Defines `train_transformer` """
import tensorflow.compat.v2 as tf
Dataset = __import__('3-dataset').Dataset
create_masks = __import__('4-create_masks').create_masks
Transformer = __import__('5-transformer').Transformer
def train_transformer(N, dm, h, hidden, max_len, batch_size, epochs):
"""
Creates and trains a transformer model for machine translation of
Portuguese to English.
N: The number of blocks in the encoder and decoder.
dm: The dimensionality of the model.
h: The number of heads.
hidden: The number of hidden units in the fully connected layers.
max_len: The maximum number of tokens per sequence.
batch_size: The batch size for training.
epochs: The number of epochs to train for.
Returns: The trained model.
"""
# Create the dataset
dataset = Dataset(batch_size, max_len)
# Instantiate a Transformer model
transformer = Transformer(
N,
dm,
h,
hidden,
dataset.tokenizer_pt.vocab_size + 2,
dataset.tokenizer_en.vocab_size + 2,
max_len,
max_len,
)
# Custom optimizations
class TransformerLRS(tf.keras.optimizers.schedules.LearningRateSchedule):
""" Custom learning rate schedule """
def __init__(self, warmup_steps=4000):
""" Initializes the TransformerLRS """
self.warmup_steps = warmup_steps
def __call__(self, step):
""" Calculates the learning rate at `step`. """
learning_rate = (
dm ** -0.5 *
tf.math.minimum(step ** -0.5, step * self.warmup_steps ** -1.5)
)
return learning_rate
optimizer = tf.keras.optimizers.Adam(
learning_rate=TransformerLRS(),
beta_1=0.9,
beta_2=0.98,
epsilon=1e-9,
)
# Define the loss function
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
""" Calculates the loss of a prediction. """
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_) / tf.reduce_sum(mask)
def accuracy_function(real, pred):
""" Calculates the accuracy of the model. """
accuracies = tf.equal(real, tf.argmax(pred, axis=2))
mask = tf.math.logical_not(tf.math.equal(real, 0))
accuracies = tf.math.logical_and(mask, accuracies)
accuracies = tf.cast(accuracies, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.Mean(name='train_accuracy')
# Custom training procedure
def train_step(inputs, targets):
""" Trains the model on a single batch. """
tar_inp = targets[:, :-1]
tar_real = targets[:, 1:]
encoder_mask, look_ahead_mask, decoder_mask = create_masks(
inputs, tar_inp)
with tf.GradientTape() as tape:
predictions = transformer(
inputs,
tar_inp,
True,
encoder_mask,
look_ahead_mask,
decoder_mask,
)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(
zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(accuracy_function(tar_real, predictions))
# Train
for epoch in range(epochs):
train_loss.reset_states()
train_accuracy.reset_states()
for (batch_number, (inputs, targets)) in enumerate(dataset.data_train):
train_step(inputs, targets)
if batch_number % 50 == 0:
print(
'Epoch {}, batch {}: loss {} accuracy {}'.format(
epoch, batch_number, train_loss.result(),
train_accuracy.result()
)
)
print(
'Epoch {}: loss {} accuracy {}'.format(
epoch, train_loss.result(), train_accuracy.result())
)
return transformer
| keysmusician/holbertonschool-machine_learning | supervised_learning/0x12-transformer_apps/5-train.py | 5-train.py | py | 4,428 | python | en | code | 1 | github-code | 90 |
2510262102 | from pyspark.sql import SparkSession
import pyspark.sql.functions as F
import pyspark.sql.types as T
spark = SparkSession.builder.master("local[*]").getOrCreate()
# Create dataframes
# 1. from raw source_data sources - files ( spark.read)
df = spark.read.format("json").load("source_data/flight-source_data/json/2015-summary.json")
# temporary view for query with SQL
# temporary view for query with SQL
df.createOrReplaceTempView("dfTable")
# 2. We can also create DataFrames on the fly by taking a set of rows and converting them to a DataFrame.
# ( schema, rows, spark.createDataFrame)
from pyspark.sql import Row
from pyspark.sql.types import StructField, StructType, StringType, LongType, IntegerType
myManualSchema = StructType([
StructField("some", StringType(), True),
StructField("col", StringType(), True),
StructField("names", IntegerType(), False)
])
from pyspark.sql import Row
myRow1 = Row("Hello", None, 1)
print(type(myRow1)) # <class 'pyspark.sql.types.Row'>
print(myRow1[0]) #Hello
myRow2 = Row("Bye", "Baby", 17)
myRow3 = Row("Good morning", "Dear", 23)
myRow4 = Row("Good evening", None, 5)
myDf = spark.createDataFrame([myRow1, myRow2, myRow3, myRow4], myManualSchema)
myDf.show()
| VladyslavPodrazhanskyi/learn_spark | code/my_practice/4.Creating_dataframes.py | 4.Creating_dataframes.py | py | 1,228 | python | en | code | 0 | github-code | 90 |
11597579671 | # @file tweeting_ucrocontroller.c
# @author Gregório da Luz
# @date January 2021
# @brief file to tweet through microcontroller
import serial
import tweepy
#Here you put the key, secret, token and, token secret from your Twitter Developer account
key = "x90redHO7n2gRHn1IpSc8Vcor"
secret = "CmxFBjpo6uuqFhCGi6NRFAo2U7fdZx3dUmLkRx5Z8lnEa27Dwb"
access_token = "1352355518086582272-cf3da0erLLD3RvVzUjCM6lCcjmPMcD"
access_token_secret = "1k1kPXPmilplIHXPvKUis9cZso17j8IA63WhWk4kuDUlQ"
#In this line we use serial to open the serial connection between the board and the laptop
ser = serial.Serial('COM5', 9600,timeout=1)
tweets_posted = 0
#This is the function used to login in to your Twitter account
def OAuth():
try:
auth = tweepy.OAuthHandler(key, secret)
auth.set_access_token(access_token, access_token_secret)
return auth
except Exception as e:
return None
oauth = OAuth()
api = tweepy.API(oauth)
#Here we keep track of tweets posted so far, once it reaches the limit 5, we stop stop the program
while(tweet_posted < 5):
tweet = ser.readline()
if(tweet !=b''):
api.update_status(tweet)
tweets_posted +=1
| gregorio1212/tweet-machine | Python/tweeting_ucontroller.py | tweeting_ucontroller.py | py | 1,180 | python | en | code | 0 | github-code | 90 |
23221680228 | import time
import numpy as np
from lib.hands.hands import Hands, MediapipeHands
from lib.hands.detector import HandDetModel
from lib.hands.pose import PoseLandmark
from lib.utils.draw import (
draw_point,
draw_rectangle,
draw_rotated_rect,
draw_text,
copy_past_roi,
Draw3dLandmarks,
draw_gesture,
)
from lib.utils.gesture import recognize_gesture
from lib.utils.utils import smooth_pts, coord_to_box
class HandTracker(object):
def __init__(
self,
frame_size=None,
capability=1,
threshold=0.5,
pipe_mode=0,
is_draw3d=False,
roi_mode=0,
):
if frame_size is None:
self.priori_box = [
(300, 200),
(700, 500),
] # get a priori box with kinect or hand detector
else:
h, w = frame_size[0], frame_size[1]
self.priori_box = [(0.25 * w, 0.25 * h), (0.75 * w, 0.75 * h)]
self.hand_boxes = None
self.pts_buffer = [None]
self.landmark_thres = threshold
self.pipe_mode = pipe_mode
self.roi_mode = roi_mode
self._init_models(is_draw3d, frame_size, capability)
def _init_models(self, is_draw3d, frame_size, capability):
if is_draw3d:
self.draw3der = Draw3dLandmarks(frame_size)
else:
self.draw3der = None
if self.roi_mode == 0:
self.detector = HandDetModel() # hand detector
elif self.roi_mode == 1:
self.detector = PoseLandmark() # pose landmark
else:
pass # self.roi_mode == 2, pre-defined roi
self.name = "TFLite-Full" if capability > 0 else "TFLite-Lite"
if self.pipe_mode == 0:
self.hand_model = Hands(capability) # using our original pipeline logic
else:
self.hand_model = MediapipeHands(capability) # using mediapipe's rotated rectangled roi logic
def __call__(self, img_bgr):
img_show = img_bgr.copy()
if (self.roi_mode != 2) and (self.hand_boxes is None):
priori_box = self.detector(img_bgr)
if len(priori_box) > 0:
self.hand_boxes = priori_box.copy()
else:
self.hand_boxes = []
elif self.hand_boxes is None:
self.hand_boxes = [
self.priori_box.copy(),
]
start = time.time()
(
pose_preds,
handness,
righthand_props,
roi_boxes,
rects,
world_landmarks,
) = self.hand_model.run_with_boxes(img_bgr, self.hand_boxes)
end = time.time()
print(f"Landmark time: {(end - start) * 1000:.2f} ms. - {self.name}")
hand_boxes_tmp = []
pts_bufffer_tmp = []
for (coords, is_hand, righthand_prop, coords_last, hand_box, roi_box, rect, world_landmark,) in zip(
pose_preds,
handness,
righthand_props,
self.pts_buffer,
self.hand_boxes,
roi_boxes,
rects,
world_landmarks,
):
if is_hand > self.landmark_thres:
if coords_last is not None:
coords = smooth_pts(coords_last, coords, hand_box)
box = coord_to_box(coords)
hand_boxes_tmp.append(box)
pts_bufffer_tmp.append(coords)
img_show = draw_text(img_show, is_hand, righthand_prop, rect)
img_show = draw_point(img_show, coords)
img_show = copy_past_roi(img_show, self.hand_model.img_roi_bgr)
if self.draw3der is not None: # concat world-landmarks on right side of the img
img_show = self.draw3der(img_show, world_landmark)
if self.pipe_mode == 0:
img_show = draw_rectangle(img_show, roi_box)
else: # mediapipe's rotated roi pipeline
img_show = draw_rotated_rect(img_show, self.hand_model.rect_roi_coords)
# draw gesture label
img_show = draw_gesture(
img_show,
coords,
recognize_gesture(self.hand_model.unprojected_world_landmarks),
)
else:
if self.draw3der is not None:
pad_img = 255 * np.ones((img_bgr.shape[0], img_bgr.shape[0], 3), dtype=np.uint8)
img_show = np.hstack([img_show, pad_img])
if self.roi_mode == 2:
img_show = draw_rectangle(img_show, self.priori_box) # draw initial roi_box
self.hand_boxes = hand_boxes_tmp.copy()
self.pts_buffer = pts_bufffer_tmp.copy()
if len(self.hand_boxes) == 0 or len(self.pts_buffer) == 0:
# self.hand_boxes = [self.priori_box.copy(), ]
self.hand_boxes = None
self.pts_buffer = [
None,
]
self.hand_model.clear_history()
return img_show
| Daming-TF/Mediapipe-hands | lib/hands/hand_tracker.py | hand_tracker.py | py | 5,087 | python | en | code | 3 | github-code | 90 |
18470742259 | import sys
input = sys.stdin.readline
def main():
S = input().rstrip()
ans = 0
n_white = 0
for s in S[::-1]:
if s == "W":
n_white += 1
else:
ans += n_white
print(ans)
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p03200/s714533700.py | s714533700.py | py | 272 | python | en | code | 0 | github-code | 90 |
40564911848 | from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
class KhatavahiBookServiceSetting(Document):
pass
def setup_custom_fields():
custom_fields = {
"Item": [
dict(fieldname='booking_item',
label='Booking Item',
fieldtype='Check',
insert_after='disabled',
print_hide=1),
dict(fieldname='service_item',
label='Service Item',
fieldtype='Link',
insert_after='booking_item',
options='Item',
depends_on='eval:doc.booking_item',
read_only=0, print_hide=1),
dict(fieldname='is_service_item',
label='Is Service Item',
fieldtype='Check',
insert_after='service_item',
options='Item',
depends_on='eval:!doc.booking_item',
read_only=0, print_hide=1)
],
"Sales Order": [
dict(fieldname='book_service',
label='Book Service',
fieldtype='Link',
insert_after='customer_name',
read_only=0,
options='Book Service'
),
]
}
create_custom_fields(custom_fields)
frappe.msgprint("Custom Field Updated!")
| Khatavahi-BI-Solutions/bookingapp | bookingapp/booking_service_app/doctype/khatavahi_book_service_setting/khatavahi_book_service_setting.py | khatavahi_book_service_setting.py | py | 1,482 | python | en | code | 26 | github-code | 90 |
11995234376 | #!"./venv/Scripts/python.exe"
import cv2
import numpy as np
import os
from scipy import ndimage
cv2_base_dir = os.path.dirname(os.path.abspath(cv2.__file__))
haar_model = os.path.join(cv2_base_dir, 'data/haarcascade_frontalface_default.xml')
print(" ")
print(haar_model)
blue = (255,0,0)
red = (0,0,255)
green = (0,255,00)
faceCascade = cv2.CascadeClassifier(haar_model)
# imgFile = "/home/jan/programming/python/opencv/Lenna.png"
imgFile = "people.jpg"
img = cv2.imread(imgFile)
#rotation angle in degree
# img = ndimage.rotate(img, 45)
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), blue, 2)
cv2.imshow('Gray', imgGray)
cv2.imshow('Result', img)
cv2.waitKey()
| jakem68/Python-OpenCV | tutorial/09_faceDetection.py | 09_faceDetection.py | py | 793 | python | en | code | 0 | github-code | 90 |
73397745256 | #
# @lc app=leetcode.cn id=46 lang=python3
#
# [46] 全排列
#
# https://leetcode-cn.com/problems/permutations/description/
#
# algorithms
# Medium (65.30%)
# Total Accepted: 13.9K
# Total Submissions: 21.3K
# Testcase Example: '[1,2,3]'
#
# 给定一个没有重复数字的序列,返回其所有可能的全排列。
#
# 示例:
#
# 输入: [1,2,3]
# 输出:
# [
# [1,2,3],
# [1,3,2],
# [2,1,3],
# [2,3,1],
# [3,1,2],
# [3,2,1]
# ]
#
#
class Solution:
"""
2019/05/03
"""
nums = None
results = None
def permute(self, nums: List[int]) -> List[List[int]]:
self.nums = nums
self.results = []
self.gen([])
return self.results
def gen(self, com, count=1):
if count > len(self.nums):
self.results.append(com)
return
for n in self.nums:
if n in com:
continue
if count > 1:
if count == len(com):
com.pop(-1)
com.append(n)
else:
com = [n]
self.gen(com[::], count + 1)
class Solution1:
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
from itertools import permutations
return [list(t) for t in permutations(nums, len(nums))]
| elfgzp/Leetcode | 46.permutations.py | 46.permutations.py | py | 1,373 | python | en | code | 1 | github-code | 90 |
13328505893 | import math
from tkinter import *
from random import randint, shuffle, sample
from time import sleep
root = Tk()
root.title("Sorting Algorithms Visualiser")
sortType = StringVar()
menuText = StringVar()
colourOptions = ["Red", "Green", "Blue", "Monochrome", "Random"]
#Initialises the necessary functions based on entered parameters into the form.
def go():
global sortType
myText.delete("1.0",END)
#Creates a random set of data based on selected parameters.
data = []
quantity = int(myScaleQuantity.get())
rangeMax = int(myScaleRangeMax.get())
for i in range(quantity):
data.append(randint(0,rangeMax))
#Runs the appropriate function based on which radio button for types of sort has been selected
if sortType.get() == "Bubble":
bubble_sort(data,rangeMax)
elif sortType.get() == "Bogo":
bogo_sort(data,rangeMax)
elif sortType.get() == "Cocktail":
cocktail_shaker_sort(data,rangeMax)
#Takes in a value and a max value and returns a hex colour code of corresponding intensity. e.g. 23/100 = 23% brightness on RGB.
def get_colour(value,rangeMax):
global menuText
activeColour = menuText.get()
hexIntensity = str(hex(int(math.floor(float(value)/float(rangeMax)*255)))[2:])
while len(hexIntensity) < 2:
hexIntensity = "0" + hexIntensity
if activeColour == "Red":
return "#" + hexIntensity + "0000"
elif activeColour == "Green":
return "#00" + hexIntensity + "00"
elif activeColour == "Blue":
return "#0000" + hexIntensity
elif activeColour == "Monochrome":
return "#" + hexIntensity + hexIntensity + hexIntensity
else:
return "#" + "".join(sample("0123456789ABCDEF",6))
#Bubble sort function, takes in a set of data and the maximum value this data can be (used for some calculations regarding geometry)
def bubble_sort(data,rangeMax):
iterations = 1
#This simply inverts the speed selection (e.g. speed 100 leads to a sleep of 0, speed 1 leads to a sleep of 1 second, speed 50 leads to a sleep of 0.5 second)
speed = (100-myScaleSpeed.get()) / 100
sorted = False
while sorted == False:
changeMade = False
#log to the screen the current state of the data array
myText.delete("1.0",END)
myText.insert(END,"Iteration " + str(iterations) + ": " + str(data)+"\n")
for i in range(0,len(data)-1):
if data[i] > data[i+1]:
buffer = data[i]
data[i] = data[i+1]
data[i+1] = buffer
changeMade = True
iterations += 1
plot_boxes(data,rangeMax)
sleep(speed)
if changeMade == False:
sorted = True
myText.insert(END, "Sort completed after " + str(iterations) + " iterations.")
#Bubble sort function, takes in a set of data and the maximum value this data can be (used for some calculations regarding geometry)
def cocktail_shaker_sort(data,rangeMax):
iterations = 1
#This simply inverts the speed selection (e.g. speed 100 leads to a sleep of 0, speed 1 leads to a sleep of 1 second, speed 50 leads to a sleep of 0.5 second)
speed = (100-myScaleSpeed.get()) / 100
sorted = False
while sorted == False:
changeMade = False
#log to the screen the current state of the data array
myText.delete("1.0",END)
myText.insert(END,"Iteration " + str(iterations) + ": " + str(data)+"\n")
#first parse over data
for i in range(0,len(data)-1):
if data[i] > data[i+1]:
buffer = data[i]
data[i] = data[i+1]
data[i+1] = buffer
changeMade = True
iterations += 1
plot_boxes(data,rangeMax)
sleep(speed)
myText.delete("1.0",END)
myText.insert(END,"Iteration " + str(iterations) + ": " + str(data)+"\n")
#return parse over data
for i in range(len(data)-1,0,-1):
if data[i] < data[i-1]:
buffer = data[i]
data[i] = data[i-1]
data[i-1] = buffer
changeMade = True
iterations += 1
plot_boxes(data,rangeMax)
sleep(speed)
if changeMade == False:
sorted = True
myText.insert(END, "Sort completed after " + str(iterations) + " iterations.")
#Randomly re-orders the numbers over and over until they are placed in order by chance.
def bogo_sort(data,rangeMax):
iterations = 1
#This simply inverts the speed selection (e.g. speed 100 leads to a sleep of 0, speed 1 leads to a sleep of 1 second, speed 50 leads to a sleep of 0.5 second)
speed = (100-myScaleSpeed.get()) / 100
sorted = False
while sorted == False:
plot_boxes(data,rangeMax)
changeNeeded = False
#log to the screen the current state of the data array
myText.delete("1.0",END)
myText.insert(END,"Iteration " + str(iterations) + ": " + str(data)+"\n")
for i in range(0,len(data)-1):
if data[i] > data[i+1]:
changeNeeded = True
if changeNeeded == False:
sorted = True
else:
iterations += 1
sleep(speed)
shuffle(data)
myText.insert(END, "Sort completed after " + str(iterations) + " iterations.")
#Takes in a list of numbers as well as the maximum value each number can be. Plots these as points on the canvas relative to the parameters selected on the form.
def plot_boxes(data,rangeMax):
myCanvas.delete("all")
quantity = len(data)
rectangleWidth = 800/quantity
for i in range(quantity):
#TLX: i*canvas width/quantity of data items // TLY: canvas height - canvas height/max data value*current data value
#BRX: i*canvas width/quantity of data items+quantity of data items // BRY: height of canvas
myCanvas.create_rectangle(i*rectangleWidth,400-400/rangeMax*data[i],i*rectangleWidth+rectangleWidth,400, fill=get_colour(data[i],rangeMax))
myCanvas.update()
#Takes in a list of numbers as well as the maximum value each number can be. Plots these as points on the canvas relative to the parameters selected on the form.
def plot_boxes(data,rangeMax):
myCanvas.delete("all")
quantity = len(data)
rectangleWidth = 800/quantity
for i in range(quantity):
#TLX: i*canvas width/quantity of data items // TLY: canvas height - canvas height/max data value*current data value
#BRX: i*canvas width/quantity of data items+quantity of data items // BRY: height of canvas
myCanvas.create_rectangle(i*rectangleWidth,400-400/rangeMax*data[i],i*rectangleWidth+rectangleWidth,400, width=0, fill=get_colour(data[i],rangeMax))
myCanvas.update()
#Declaration of form objects
myCanvas = Canvas(root, width=800, height=400)
myRadioBubble = Radiobutton(root, text='Bubble', variable=sortType, value="Bubble")
myRadioCocktail = Radiobutton(root, text='Cocktail Shaker', variable=sortType, value="Cocktail")
myRadioInsertion = Radiobutton(root, text="Insertion", variable=sortType, value="Insertion")
myRadioBogo = Radiobutton(root, text='Bogo', variable=sortType, value="Bogo")
myLabelQuantity = Label(root, text="Data points:")
myScaleQuantity = Scale(root, from_=3, to=1000, orient=HORIZONTAL)
myLabelRangeMax = Label(root, text="Max value:")
myScaleRangeMax = Scale(root, from_=5, to=100, orient=HORIZONTAL)
myLabelSpeed = Label(root, text="Speed:")
myScaleSpeed = Scale(root, from_=0, to=100, orient=HORIZONTAL)
myButton = Button(root, text="Go!", command=go)
myDropdownColours = OptionMenu(root , menuText, *colourOptions)
myText = Text(root, height=8)
#Default values set to form objects
sortType.set("Bubble")
menuText.set("Red")
myScaleQuantity.set(25)
myScaleRangeMax.set(100)
myScaleSpeed.set(50)
#Form object 'packing' and layout.
myCanvas.grid(row=0, columnspan=4)
myRadioBubble.grid(row=1, column=0)
myRadioCocktail.grid(row=1, column=1)
myRadioInsertion.grid(row=2, column=0)
myRadioBogo.grid(row=2, column=1)
myText.grid(rowspan=10, row=1, column=2)
myLabelQuantity.grid(row=3,column=0)
myScaleQuantity.grid(row=3,column=1)
myLabelRangeMax.grid(row=4,column=0)
myScaleRangeMax.grid(row=4,column=1)
myLabelSpeed.grid(row=5,column=0)
myScaleSpeed.grid(row=5,column=1)
myDropdownColours.grid(row=6, column=0, columnspan=2, pady=10)
myButton.grid(row=6, column=2, columnspan=2, pady=10)
root.mainloop() | jjdshrimpton/PythonJunk | Sorting Visualiser2.py | Sorting Visualiser2.py | py | 8,472 | python | en | code | 0 | github-code | 90 |
23858064439 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import os
import os.path
import cv2
import sys
fname = sys.argv[1]
vc = cv2.VideoCapture(fname)
n = -1
rval = True
if not vc.isOpened():
print("Unable to open", fname, file=sys.stderr)
while rval:
rval, frame = vc.read()
n += 1
midframe = n//2
vc.release()
print('{} has {} frames, midframe = {}'.format(fname, n, midframe))
vc = cv2.VideoCapture(fname)
frame = None
for i in range(midframe):
rval, frame = vc.read()
base_path = os.path.split(os.path.dirname(fname))[0]
name_base = os.path.splitext(os.path.basename(fname))[0]
f1, f2 = [int(x) for x in name_base.split('-')]
assert f2-f1+1 == n
image_fname = str(f1+i) + ' (' + name_base + ').jpg'
out_fname = os.path.join(base_path, 'images', 'midframe', image_fname)
if os.path.isfile(out_fname):
print("WARNING: file exists:", out_fname, file=sys.stderr)
else:
cv2.imwrite(out_fname,frame)
print('Wrote', out_fname, '...')
vc.release()
| mvsjober/pair-annotate | scripts/midframe.py | midframe.py | py | 1,051 | python | en | code | 2 | github-code | 90 |
10828506062 | import os
import cv2
import shutil
from pycocotools.coco import COCO
def copy_some_imgs(json, class_name, path, to_path):
annFile = json
coco = COCO(annFile)
catIds = coco.getCatIds(catNms=[class_name])
imgIds = coco.getImgIds(catIds=catIds)
imgs = coco.loadImgs(ids=imgIds)
AnnIds = coco.getAnnIds(catIds=catIds)
Anns = coco.loadAnns(ids=AnnIds)
for i, img in enumerate(imgs):
filename = img['file_name']
shutil.copy(os.path.join(path, filename), os.path.join(to_path, filename))
if i%20==0:
print("i-th %d img saved" % i)
jsonpath = '/zhuxuhan/mscoco2014/annotations/instances_val2014.json'
imgpath = '/zhuxuhan/mscoco2014/val2014'
class_name = 'person'
save_path = '/zhuxuhan/14val_person'
copy_some_imgs(jsonpath, class_name, imgpath, save_path)
| ZHUXUHAN/Python-Tools | coco_img_copy.py | coco_img_copy.py | py | 822 | python | en | code | 1 | github-code | 90 |
26050686125 | import logging
from rest_framework import generics, status
from rest_framework.response import Response
from .models import ArticlesModel
from .serializers import ArticleSerializer
logger = logging.getLogger(__name__)
class ArticleListCreateAPIView(generics.ListCreateAPIView):
'''
Allowed methods: POST and LIST
POST: Creates a new Articles
LIST: Returns list of Articles
'''
queryset = ArticlesModel.objects.all()
serializer_class = ArticleSerializer
#? Create a new Article
def post(self, request, *args, **kwargs):
serializer = ArticleSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
serializer.save()
except Exception as ex:
logger.error(str(ex))
return Response({'detail': str(ex)},
status=status.HTTP_400_BAD_REQUEST)
response = {'detail': 'Article Created Successfully'}
logger.info(response)
return Response(response, status=status.HTTP_201_CREATED)
class ArticleUpdateRetriveDeleteAPIView(generics.GenericAPIView):
'''
Allowed methods: Patch
GET: Article by ID
PATCH: Update an Article
DELETE: Delete an Article
'''
queryset = ArticlesModel.objects.all()
serializer_class = ArticleSerializer
lookup_field = 'pk'
#? get single Article
def get(self, request, *args, **kwargs):
article = self.get_object()
serializer = ArticleSerializer(article)
return Response(serializer.data)
#? Update a Course
def patch(self, request, *args, **kwargs):
article = self.get_object()
serializer = ArticleSerializer(article, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
try:
serializer.save()
except Exception as ex:
logger.error(str(ex))
return Response({'detail': str(ex)},
status=status.HTTP_400_BAD_REQUEST)
response = {'detail': 'Article Updated Successfully'}
logger.info(response)
return Response(response, status=status.HTTP_201_CREATED)
#? Delete an Article
def delete(self, request, *args, **kwargs):
article = self.get_object()
try:
article.delete()
except Exception as ex:
logger.error(str(ex))
return Response({'detail': str(ex)},
status=status.HTTP_400_BAD_REQUEST)
response = {'detail': 'Article Deleted Successfully'}
logger.info(response)
return Response(response, status=status.HTTP_201_CREATED)
| preitychib/AnimoAPI | articles/views.py | views.py | py | 2,719 | python | en | code | 0 | github-code | 90 |
70510731177 | import weakref
import math
import numpy as np
import py_trees
import shapely
import carla
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.timer import GameTime
from srunner.scenariomanager.traffic_events import TrafficEvent, TrafficEventType
class Criterion(py_trees.behaviour.Behaviour):
"""
Base class for all criteria used to evaluate a scenario for success/failure
Important parameters (PUBLIC):
- name: Name of the criterion
- expected_value_success: Result in case of success
(e.g. max_speed, zero collisions, ...)
- expected_value_acceptable: Result that does not mean a failure,
but is not good enough for a success
- actual_value: Actual result after running the scenario
- test_status: Used to access the result of the criterion
- optional: Indicates if a criterion is optional (not used for overall analysis)
"""
def __init__(self,
name,
actor,
expected_value_success,
expected_value_acceptable=None,
optional=False,
terminate_on_failure=False):
super(Criterion, self).__init__(name)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._terminate_on_failure = terminate_on_failure
self.name = name
self.actor = actor
self.test_status = "INIT"
self.expected_value_success = expected_value_success
self.expected_value_acceptable = expected_value_acceptable
self.actual_value = 0
self.optional = optional
self.list_traffic_events = []
def setup(self, unused_timeout=15):
self.logger.debug("%s.setup()" % (self.__class__.__name__))
return True
def initialise(self):
self.logger.debug("%s.initialise()" % (self.__class__.__name__))
def terminate(self, new_status):
if (self.test_status == "RUNNING") or (self.test_status == "INIT"):
self.test_status = "SUCCESS"
self.logger.debug("%s.terminate()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
class MaxVelocityTest(Criterion):
"""
This class contains an atomic test for maximum velocity.
Important parameters:
- actor: CARLA actor to be used for this test
- max_velocity_allowed: maximum allowed velocity in m/s
- optional [optional]: If True, the result is not considered for an overall pass/fail result
"""
def __init__(self, actor, max_velocity_allowed, optional=False, name="CheckMaximumVelocity"):
"""
Setup actor and maximum allowed velovity
"""
super(MaxVelocityTest, self).__init__(name, actor, max_velocity_allowed, None, optional)
def update(self):
"""
Check velocity
"""
new_status = py_trees.common.Status.RUNNING
if self.actor is None:
return new_status
velocity = CarlaDataProvider.get_velocity(self.actor)
self.actual_value = max(velocity, self.actual_value)
if velocity > self.expected_value_success:
self.test_status = "FAILURE"
else:
self.test_status = "SUCCESS"
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class DrivenDistanceTest(Criterion):
"""
This class contains an atomic test to check the driven distance
Important parameters:
- actor: CARLA actor to be used for this test
- distance_success: If the actor's driven distance is more than this value (in meters),
the test result is SUCCESS
- distance_acceptable: If the actor's driven distance is more than this value (in meters),
the test result is ACCEPTABLE
- optional [optional]: If True, the result is not considered for an overall pass/fail result
"""
def __init__(self,
actor,
distance_success,
distance_acceptable=None,
optional=False,
name="CheckDrivenDistance"):
"""
Setup actor
"""
super(DrivenDistanceTest, self).__init__(name, actor, distance_success, distance_acceptable, optional)
self._last_location = None
def initialise(self):
self._last_location = CarlaDataProvider.get_location(self.actor)
super(DrivenDistanceTest, self).initialise()
def update(self):
"""
Check distance
"""
new_status = py_trees.common.Status.RUNNING
if self.actor is None:
return new_status
location = CarlaDataProvider.get_location(self.actor)
if location is None:
return new_status
if self._last_location is None:
self._last_location = location
return new_status
self.actual_value += location.distance(self._last_location)
self._last_location = location
if self.actual_value > self.expected_value_success:
self.test_status = "SUCCESS"
elif (self.expected_value_acceptable is not None and
self.actual_value > self.expected_value_acceptable):
self.test_status = "ACCEPTABLE"
else:
self.test_status = "RUNNING"
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
Set final status
"""
if self.test_status != "SUCCESS":
self.test_status = "FAILURE"
super(DrivenDistanceTest, self).terminate(new_status)
class AverageVelocityTest(Criterion):
"""
This class contains an atomic test for average velocity.
Important parameters:
- actor: CARLA actor to be used for this test
- avg_velocity_success: If the actor's average velocity is more than this value (in m/s),
the test result is SUCCESS
- avg_velocity_acceptable: If the actor's average velocity is more than this value (in m/s),
the test result is ACCEPTABLE
- optional [optional]: If True, the result is not considered for an overall pass/fail result
"""
def __init__(self,
actor,
avg_velocity_success,
avg_velocity_acceptable=None,
optional=False,
name="CheckAverageVelocity"):
"""
Setup actor and average velovity expected
"""
super(AverageVelocityTest, self).__init__(name, actor,
avg_velocity_success,
avg_velocity_acceptable,
optional)
self._last_location = None
self._distance = 0.0
def initialise(self):
self._last_location = CarlaDataProvider.get_location(self.actor)
super(AverageVelocityTest, self).initialise()
def update(self):
"""
Check velocity
"""
new_status = py_trees.common.Status.RUNNING
if self.actor is None:
return new_status
location = CarlaDataProvider.get_location(self.actor)
if location is None:
return new_status
if self._last_location is None:
self._last_location = location
return new_status
self._distance += location.distance(self._last_location)
self._last_location = location
elapsed_time = GameTime.get_time()
if elapsed_time > 0.0:
self.actual_value = self._distance / elapsed_time
if self.actual_value > self.expected_value_success:
self.test_status = "SUCCESS"
elif (self.expected_value_acceptable is not None and
self.actual_value > self.expected_value_acceptable):
self.test_status = "ACCEPTABLE"
else:
self.test_status = "RUNNING"
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
Set final status
"""
if self.test_status == "RUNNING":
self.test_status = "FAILURE"
super(AverageVelocityTest, self).terminate(new_status)
class CollisionTest(Criterion):
"""
This class contains an atomic test for collisions.
Important parameters:
- actor: CARLA actor to be used for this test
- terminate_on_failure [optional]: If True, the complete scenario will terminate upon failure of this test
- optional [optional]: If True, the result is not considered for an overall pass/fail result
"""
def __init__(self, actor, optional=False, name="CheckCollisions", terminate_on_failure=False):
"""
Construction with sensor setup
"""
super(CollisionTest, self).__init__(name, actor, 0, None, optional, terminate_on_failure)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
world = self.actor.get_world()
blueprint = world.get_blueprint_library().find('sensor.other.collision')
self._collision_sensor = world.spawn_actor(blueprint, carla.Transform(), attach_to=self.actor)
self._collision_sensor.listen(lambda event: self._count_collisions(weakref.ref(self), event))
def update(self):
"""
Check collision count
"""
new_status = py_trees.common.Status.RUNNING
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
Cleanup sensor
"""
if self._collision_sensor is not None:
self._collision_sensor.destroy()
self._collision_sensor = None
super(CollisionTest, self).terminate(new_status)
@staticmethod
def _count_collisions(weak_self, event):
"""
Callback to update collision count
"""
self = weak_self()
if not self:
return
registered = False
actor_type = None
self.test_status = "FAILURE"
self.actual_value += 1
if 'static' in event.other_actor.type_id and 'sidewalk' not in event.other_actor.type_id:
actor_type = TrafficEventType.COLLISION_STATIC
elif 'vehicle' in event.other_actor.type_id:
for traffic_event in self.list_traffic_events:
if traffic_event.get_type() == TrafficEventType.COLLISION_VEHICLE \
and traffic_event.get_dict()['id'] == event.other_actor.id: # pylint: disable=bad-indentation
registered = True # pylint: disable=bad-indentation
actor_type = TrafficEventType.COLLISION_VEHICLE
elif 'walker' in event.other_actor.type_id:
for traffic_event in self.list_traffic_events:
if traffic_event.get_type() == TrafficEventType.COLLISION_PEDESTRIAN \
and traffic_event.get_dict()['id'] == event.other_actor.id:
registered = True
actor_type = TrafficEventType.COLLISION_PEDESTRIAN
if not registered:
collision_event = TrafficEvent(event_type=actor_type)
collision_event.set_dict({'type': event.other_actor.type_id, 'id': event.other_actor.id})
collision_event.set_message("Agent collided against object with type={} and id={}".format(
event.other_actor.type_id, event.other_actor.id))
self.list_traffic_events.append(collision_event)
class KeepLaneTest(Criterion):
"""
This class contains an atomic test for keeping lane.
Important parameters:
- actor: CARLA actor to be used for this test
- optional [optional]: If True, the result is not considered for an overall pass/fail result
"""
def __init__(self, actor, optional=False, name="CheckKeepLane"):
"""
Construction with sensor setup
"""
super(KeepLaneTest, self).__init__(name, actor, 0, None, optional)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
world = self.actor.get_world()
blueprint = world.get_blueprint_library().find('sensor.other.lane_invasion')
self._lane_sensor = world.spawn_actor(blueprint, carla.Transform(), attach_to=self.actor)
self._lane_sensor.listen(lambda event: self._count_lane_invasion(weakref.ref(self), event))
def update(self):
"""
Check lane invasion count
"""
new_status = py_trees.common.Status.RUNNING
if self.actual_value > 0:
self.test_status = "FAILURE"
else:
self.test_status = "SUCCESS"
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
Cleanup sensor
"""
if self._lane_sensor is not None:
self._lane_sensor.destroy()
self._lane_sensor = None
super(KeepLaneTest, self).terminate(new_status)
@staticmethod
def _count_lane_invasion(weak_self, event):
"""
Callback to update lane invasion count
"""
self = weak_self()
if not self:
return
self.actual_value += 1
class ReachedRegionTest(Criterion):
"""
This class contains the reached region test
The test is a success if the actor reaches a specified region
Important parameters:
- actor: CARLA actor to be used for this test
- min_x, max_x, min_y, max_y: Bounding box of the checked region
"""
def __init__(self, actor, min_x, max_x, min_y, max_y, name="ReachedRegionTest"):
"""
Setup trigger region (rectangle provided by
[min_x,min_y] and [max_x,max_y]
"""
super(ReachedRegionTest, self).__init__(name, actor, 0)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._min_x = min_x
self._max_x = max_x
self._min_y = min_y
self._max_y = max_y
def update(self):
"""
Check if the actor location is within trigger region
"""
new_status = py_trees.common.Status.RUNNING
location = CarlaDataProvider.get_location(self._actor)
if location is None:
return new_status
in_region = False
if self.test_status != "SUCCESS":
in_region = (location.x > self._min_x and location.x < self._max_x) and (
location.y > self._min_y and location.y < self._max_y)
if in_region:
self.test_status = "SUCCESS"
else:
self.test_status = "RUNNING"
if self.test_status == "SUCCESS":
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class OnSidewalkTest(Criterion):
"""
This class contains an atomic test to detect sidewalk invasions.
Important parameters:
- actor: CARLA actor to be used for this test
- optional [optional]: If True, the result is not considered for an overall pass/fail result
"""
def __init__(self, actor, optional=False, name="OnSidewalkTest"):
"""
Construction with sensor setup
"""
super(OnSidewalkTest, self).__init__(name, actor, 0, None, optional)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._map = CarlaDataProvider.get_map()
self._onsidewalk_active = False
self.positive_shift = shapely.geometry.LineString([(0, 0), (0.0, 1.2)])
self.negative_shift = shapely.geometry.LineString([(0, 0), (0.0, -1.2)])
def update(self):
"""
Check lane invasion count
"""
new_status = py_trees.common.Status.RUNNING
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
current_transform = self._actor.get_transform()
current_location = current_transform.location
current_yaw = current_transform.rotation.yaw
rot_x = shapely.affinity.rotate(self.positive_shift, angle=current_yaw, origin=shapely.geometry.Point(0, 0))
rot_nx = shapely.affinity.rotate(self.negative_shift, angle=current_yaw, origin=shapely.geometry.Point(0, 0))
sample_point_right = current_location + carla.Location(x=rot_x.coords[1][0], y=rot_x.coords[1][1])
sample_point_left = current_location + carla.Location(x=rot_nx.coords[1][0], y=rot_nx.coords[1][1])
closest_waypoint_right = self._map.get_waypoint(sample_point_right, lane_type=carla.LaneType.Any)
closest_waypoint_left = self._map.get_waypoint(sample_point_left, lane_type=carla.LaneType.Any)
if closest_waypoint_right and closest_waypoint_left \
and closest_waypoint_right.lane_type != carla.LaneType.Sidewalk \
and closest_waypoint_left.lane_type != carla.LaneType.Sidewalk:
# we are not on a sidewalk
self._onsidewalk_active = False
else:
if not self._onsidewalk_active:
onsidewalk_event = TrafficEvent(event_type=TrafficEventType.ON_SIDEWALK_INFRACTION)
onsidewalk_event.set_message('Agent invaded the sidewalk')
onsidewalk_event.set_dict({'x': current_location.x, 'y': current_location.y})
self.list_traffic_events.append(onsidewalk_event)
self.test_status = "FAILURE"
self.actual_value += 1
self._onsidewalk_active = True
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class WrongLaneTest(Criterion):
"""
This class contains an atomic test to detect invasions to wrong direction lanes.
Important parameters:
- actor: CARLA actor to be used for this test
- optional [optional]: If True, the result is not considered for an overall pass/fail result
"""
MAX_ALLOWED_ANGLE = 140.0
def __init__(self, actor, optional=False, name="WrongLaneTest"):
"""
Construction with sensor setup
"""
super(WrongLaneTest, self).__init__(name, actor, 0, None, optional)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._world = self.actor.get_world()
self._actor = actor
self._map = CarlaDataProvider.get_map()
self._infractions = 0
self._last_lane_id = None
self._last_road_id = None
blueprint = self._world.get_blueprint_library().find('sensor.other.lane_invasion')
self._lane_sensor = self._world.spawn_actor(blueprint, carla.Transform(), attach_to=self.actor)
self._lane_sensor.listen(lambda event: self._lane_change(weakref.ref(self), event))
def update(self):
"""
Check lane invasion count
"""
new_status = py_trees.common.Status.RUNNING
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def terminate(self, new_status):
"""
Cleanup sensor
"""
if self._lane_sensor is not None:
self._lane_sensor.destroy()
self._lane_sensor = None
super(WrongLaneTest, self).terminate(new_status)
@staticmethod
def _lane_change(weak_self, event):
"""
Callback to update lane invasion count
"""
# pylint: disable=protected-access
self = weak_self()
if not self:
return
# check the lane direction
lane_waypoint = self._map.get_waypoint(self._actor.get_location())
current_lane_id = lane_waypoint.lane_id
current_road_id = lane_waypoint.road_id
if not (self._last_road_id == current_road_id and self._last_lane_id == current_lane_id):
next_waypoint = lane_waypoint.next(2.0)[0]
if not next_waypoint:
return
vector_wp = np.array([next_waypoint.transform.location.x - lane_waypoint.transform.location.x,
next_waypoint.transform.location.y - lane_waypoint.transform.location.y])
vector_actor = np.array([math.cos(math.radians(self._actor.get_transform().rotation.yaw)),
math.sin(math.radians(self._actor.get_transform().rotation.yaw))])
ang = math.degrees(
math.acos(np.clip(np.dot(vector_actor, vector_wp) / (np.linalg.norm(vector_wp)), -1.0, 1.0)))
if ang > self.MAX_ALLOWED_ANGLE:
self.test_status = "FAILURE"
# is there a difference of orientation greater than MAX_ALLOWED_ANGLE deg with respect of the lane
# direction?
self._infractions += 1
self.actual_value += 1
wrong_way_event = TrafficEvent(event_type=TrafficEventType.WRONG_WAY_INFRACTION)
wrong_way_event.set_message('Agent invaded a lane in opposite direction: road_id={}, lane_id={}'.format(
current_road_id, current_lane_id))
wrong_way_event.set_dict({'road_id': current_road_id, 'lane_id': current_lane_id})
self.list_traffic_events.append(wrong_way_event)
# remember the current lane and road
self._last_lane_id = current_lane_id
self._last_road_id = current_road_id
class InRadiusRegionTest(Criterion):
"""
The test is a success if the actor is within a given radius of a specified region
Important parameters:
- actor: CARLA actor to be used for this test
- x, y, radius: Position (x,y) and radius (in meters) used to get the checked region
"""
def __init__(self, actor, x, y, radius, name="InRadiusRegionTest"):
"""
"""
super(InRadiusRegionTest, self).__init__(name, actor, 0)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._x = x # pylint: disable=invalid-name
self._y = y # pylint: disable=invalid-name
self._radius = radius
def update(self):
"""
Check if the actor location is within trigger region
"""
new_status = py_trees.common.Status.RUNNING
location = CarlaDataProvider.get_location(self._actor)
if location is None:
return new_status
if self.test_status != "SUCCESS":
in_radius = math.sqrt(((location.x - self._x)**2) + ((location.y - self._y)**2)) < self._radius
if in_radius:
route_completion_event = TrafficEvent(event_type=TrafficEventType.ROUTE_COMPLETED)
route_completion_event.set_message("Destination was successfully reached")
self.list_traffic_events.append(route_completion_event)
self.test_status = "SUCCESS"
else:
self.test_status = "RUNNING"
if self.test_status == "SUCCESS":
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class InRouteTest(Criterion):
"""
The test is a success if the actor is never outside route
Important parameters:
- actor: CARLA actor to be used for this test
- radius: Allowed radius around the route (meters)
- route: Route to be checked
- offroad_max: Maximum allowed distance the actor can deviate from the route, when not driving on a road (meters)
- terminate_on_failure [optional]: If True, the complete scenario will terminate upon failure of this test
"""
DISTANCE_THRESHOLD = 15.0 # meters
WINDOWS_SIZE = 3
def __init__(self, actor, radius, route, offroad_max, name="InRouteTest", terminate_on_failure=False):
"""
"""
super(InRouteTest, self).__init__(name, actor, 0, terminate_on_failure=terminate_on_failure)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._route = route
self._wsize = self.WINDOWS_SIZE
self._waypoints, _ = zip(*self._route)
self._route_length = len(self._route)
self._current_index = 0
def update(self):
"""
Check if the actor location is within trigger region
"""
new_status = py_trees.common.Status.RUNNING
location = CarlaDataProvider.get_location(self._actor)
if location is None:
return new_status
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
elif self.test_status == "RUNNING" or self.test_status == "INIT":
# are we too far away from the route waypoints (i.e., off route)?
off_route = True
shortest_distance = float('inf')
for index in range(max(0, self._current_index - self._wsize),
min(self._current_index + self._wsize + 1, self._route_length)):
# look for the distance to the current waipoint + windows_size
ref_waypoint = self._waypoints[index]
distance = math.sqrt(((location.x - ref_waypoint.x) ** 2) + ((location.y - ref_waypoint.y) ** 2))
if distance < self.DISTANCE_THRESHOLD \
and distance <= shortest_distance \
and index >= self._current_index:
shortest_distance = distance
self._current_index = index
off_route = False
if off_route:
route_deviation_event = TrafficEvent(event_type=TrafficEventType.ROUTE_DEVIATION)
route_deviation_event.set_message("Agent deviated from the route at (x={}, y={}, z={})".format(
location.x, location.y, location.z))
route_deviation_event.set_dict({'x': location.x, 'y': location.y, 'z': location.z})
self.list_traffic_events.append(route_deviation_event)
self.test_status = "FAILURE"
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
class RouteCompletionTest(Criterion):
"""
Check at which stage of the route is the actor at each tick
Important parameters:
- actor: CARLA actor to be used for this test
- route: Route to be checked
- terminate_on_failure [optional]: If True, the complete scenario will terminate upon failure of this test
"""
DISTANCE_THRESHOLD = 15.0 # meters
WINDOWS_SIZE = 2
def __init__(self, actor, route, name="RouteCompletionTest", terminate_on_failure=False):
"""
"""
super(RouteCompletionTest, self).__init__(name, actor, 100, terminate_on_failure=terminate_on_failure)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._route = route
self._wsize = self.WINDOWS_SIZE
self._current_index = 0
self._route_length = len(self._route)
self._waypoints, _ = zip(*self._route)
self.target = self._waypoints[-1]
self._accum_meters = []
prev_wp = self._waypoints[0]
for i, wp in enumerate(self._waypoints):
d = wp.distance(prev_wp)
if i > 0:
accum = self._accum_meters[i - 1]
else:
accum = 0
self._accum_meters.append(d + accum)
prev_wp = wp
self._traffic_event = TrafficEvent(event_type=TrafficEventType.ROUTE_COMPLETION)
self.list_traffic_events.append(self._traffic_event)
self._percentage_route_completed = 0.0
def update(self):
"""
Check if the actor location is within trigger region
"""
new_status = py_trees.common.Status.RUNNING
location = CarlaDataProvider.get_location(self._actor)
if location is None:
return new_status
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
elif self.test_status == "RUNNING" or self.test_status == "INIT":
for index in range(self._current_index, min(self._current_index + self._wsize + 1, self._route_length)):
# look for the distance to the current waipoint + windows_size
ref_waypoint = self._waypoints[index]
distance = math.sqrt(((location.x - ref_waypoint.x) ** 2) + ((location.y - ref_waypoint.y) ** 2))
if distance < self.DISTANCE_THRESHOLD:
# good! segment completed!
self._current_index = index
self._percentage_route_completed = 100.0 * float(self._accum_meters[self._current_index]) \
/ float(self._accum_meters[-1])
self._traffic_event.set_dict({'route_completed': self._percentage_route_completed})
self._traffic_event.set_message(
"Agent has completed > {:.2f}% of the route".format(self._percentage_route_completed))
if self._percentage_route_completed > 99.0 and location.distance(self.target) < self.DISTANCE_THRESHOLD:
route_completion_event = TrafficEvent(event_type=TrafficEventType.ROUTE_COMPLETED)
route_completion_event.set_message("Destination was successfully reached")
self.list_traffic_events.append(route_completion_event)
self.test_status = "SUCCESS"
elif self.test_status == "SUCCESS":
new_status = py_trees.common.Status.SUCCESS
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
self.actual_value = self._percentage_route_completed
return new_status
def terminate(self, new_status):
"""
Set test status to failure if not successful and terminate
"""
if self.test_status == "INIT":
self.test_status = "FAILURE"
super(RouteCompletionTest, self).terminate(new_status)
class RunningRedLightTest(Criterion):
"""
Check if an actor is running a red light
Important parameters:
- actor: CARLA actor to be used for this test
- terminate_on_failure [optional]: If True, the complete scenario will terminate upon failure of this test
"""
DISTANCE_LIGHT = 15 # m
def __init__(self, actor, name="RunningRedLightTest", terminate_on_failure=False):
"""
Init
"""
super(RunningRedLightTest, self).__init__(name, actor, 0, terminate_on_failure=terminate_on_failure)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._world = actor.get_world()
self._map = CarlaDataProvider.get_map()
self._list_traffic_lights = []
self._last_red_light_id = None
self.debug = False
all_actors = self._world.get_actors()
for _actor in all_actors:
if 'traffic_light' in _actor.type_id:
center, area = self.get_traffic_light_area(_actor)
waypoints = []
for pt in area:
waypoints.append(self._map.get_waypoint(pt))
self._list_traffic_lights.append((_actor, center, area, waypoints))
# pylint: disable=no-self-use
def is_vehicle_crossing_line(self, seg1, seg2):
"""
check if vehicle crosses a line segment
"""
line1 = shapely.geometry.LineString([(seg1[0].x, seg1[0].y), (seg1[1].x, seg1[1].y)])
line2 = shapely.geometry.LineString([(seg2[0].x, seg2[0].y), (seg2[1].x, seg2[1].y)])
inter = line1.intersection(line2)
return not inter.is_empty
def update(self):
"""
Check if the actor is running a red light
"""
new_status = py_trees.common.Status.RUNNING
location = self._actor.get_transform().location
if location is None:
return new_status
ego_waypoint = self._map.get_waypoint(location)
tail_pt0 = self.rotate_point(carla.Vector3D(-1.0, 0.0, location.z), self._actor.get_transform().rotation.yaw)
tail_pt0 = location + carla.Location(tail_pt0)
tail_pt1 = self.rotate_point(carla.Vector3D(-4.0, 0.0, location.z), self._actor.get_transform().rotation.yaw)
tail_pt1 = location + carla.Location(tail_pt1)
for traffic_light, center, area, waypoints in self._list_traffic_lights:
if self.debug:
z = 2.1
if traffic_light.state == carla.TrafficLightState.Red:
color = carla.Color(255, 0, 0)
elif traffic_light.state == carla.TrafficLightState.Green:
color = carla.Color(0, 255, 0)
else:
color = carla.Color(255, 255, 255)
self._world.debug.draw_point(center + carla.Location(z=z), size=0.2, color=color, life_time=0.01)
for pt in area:
self._world.debug.draw_point(pt + carla.Location(z=z), size=0.1, color=color, life_time=0.01)
for wp in waypoints:
text = "{}.{}".format(wp.road_id, wp.lane_id)
self._world.debug.draw_string(
wp.transform.location, text, draw_shadow=False, color=color, life_time=0.01)
# logic
center_loc = carla.Location(center)
if self._last_red_light_id and self._last_red_light_id == traffic_light.id:
continue
if center_loc.distance(location) > self.DISTANCE_LIGHT:
continue
if traffic_light.state != carla.TrafficLightState.Red:
continue
for wp in waypoints:
if ego_waypoint.road_id == wp.road_id and ego_waypoint.lane_id == wp.lane_id:
# this light is red and is affecting our lane!
# is the vehicle traversing the stop line?
if self.is_vehicle_crossing_line((tail_pt0, tail_pt1), (area[0], area[-1])):
self.test_status = "FAILURE"
self.actual_value += 1
location = traffic_light.get_transform().location
red_light_event = TrafficEvent(event_type=TrafficEventType.TRAFFIC_LIGHT_INFRACTION)
red_light_event.set_message("Agent ran a red light {} at (x={}, y={}, x={})".format(
traffic_light.id,
location.x,
location.y,
location.z))
red_light_event.set_dict({'id': traffic_light.id, 'x': location.x,
'y': location.y, 'z': location.z})
self.list_traffic_events.append(red_light_event)
self._last_red_light_id = traffic_light.id
break
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
def rotate_point(self, point, angle):
"""
rotate a given point by a given angle
"""
x_ = math.cos(math.radians(angle)) * point.x - math.sin(math.radians(angle)) * point.y
y_ = math.sin(math.radians(angle)) * point.x - math.cos(math.radians(angle)) * point.y
return carla.Vector3D(x_, y_, point.z)
def get_traffic_light_area(self, traffic_light):
"""
get area of a given traffic light
"""
base_transform = traffic_light.get_transform()
base_rot = base_transform.rotation.yaw
area_loc = base_transform.transform(traffic_light.trigger_volume.location)
wpx = self._map.get_waypoint(area_loc)
while not wpx.is_intersection:
next_wp = wpx.next(1.0)[0]
if next_wp:
wpx = next_wp
else:
break
wpx_location = wpx.transform.location
area_ext = traffic_light.trigger_volume.extent
area = []
# why the 0.9 you may ask?... because the triggerboxes are set manually and sometimes they
# cross to adjacent lanes by accident
x_values = np.arange(-area_ext.x * 0.9, area_ext.x * 0.9, 1.0)
for x in x_values:
point = self.rotate_point(carla.Vector3D(x, 0, area_ext.z), base_rot)
area.append(wpx_location + carla.Location(x=point.x, y=point.y))
return area_loc, area
class RunningStopTest(Criterion):
"""
Check if an actor is running a stop sign
Important parameters:
- actor: CARLA actor to be used for this test
- terminate_on_failure [optional]: If True, the complete scenario will terminate upon failure of this test
"""
PROXIMITY_THRESHOLD = 50.0 # meters
SPEED_THRESHOLD = 0.1
WAYPOINT_STEP = 1.0 # meters
def __init__(self, actor, name="RunningStopTest", terminate_on_failure=False):
"""
"""
super(RunningStopTest, self).__init__(name, actor, 0, terminate_on_failure=terminate_on_failure)
self.logger.debug("%s.__init__()" % (self.__class__.__name__))
self._actor = actor
self._world = CarlaDataProvider.get_world()
self._map = CarlaDataProvider.get_map()
self._list_stop_signs = []
self._target_stop_sign = None
self._stop_completed = False
all_actors = self._world.get_actors()
for _actor in all_actors:
if 'traffic.stop' in _actor.type_id:
self._list_stop_signs.append(_actor)
@staticmethod
def point_inside_boundingbox(point, bb_center, bb_extent):
"""
X
:param point:
:param bb_center:
:param bb_extent:
:return:
"""
# pylint: disable=invalid-name
A = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y - bb_extent.y)
B = carla.Vector2D(bb_center.x + bb_extent.x, bb_center.y - bb_extent.y)
D = carla.Vector2D(bb_center.x - bb_extent.x, bb_center.y + bb_extent.y)
M = carla.Vector2D(point.x, point.y)
AB = B - A
AD = D - A
AM = M - A
am_ab = AM.x * AB.x + AM.y * AB.y
ab_ab = AB.x * AB.x + AB.y * AB.y
am_ad = AM.x * AD.x + AM.y * AD.y
ad_ad = AD.x * AD.x + AD.y * AD.y
return am_ab > 0 and am_ab < ab_ab and am_ad > 0 and am_ad < ad_ad
def is_actor_affected_by_stop(self, actor, stop, multi_step=20):
"""
Check if the given actor is affected by the stop
"""
affected = False
# first we run a fast coarse test
current_location = actor.get_location()
stop_location = stop.get_transform().location
if stop_location.distance(current_location) > self.PROXIMITY_THRESHOLD:
return affected
# print("Affected by stop!")
stop_t = stop.get_transform()
transformed_tv = stop_t.transform(stop.trigger_volume.location)
# slower and accurate test based on waypoint's horizon and geometric test
list_locations = [current_location]
waypoint = self._map.get_waypoint(current_location)
for _ in range(multi_step):
if waypoint:
waypoint = waypoint.next(self.WAYPOINT_STEP)[0]
if not waypoint:
break
list_locations.append(waypoint.transform.location)
for actor_location in list_locations:
if self.point_inside_boundingbox(actor_location, transformed_tv, stop.trigger_volume.extent):
affected = True
return affected
def _scan_for_stop_sign(self):
target_stop_sign = None
for stop_sign in self._list_stop_signs:
if self.is_actor_affected_by_stop(self._actor, stop_sign):
# this stop sign is affecting the vehicle
target_stop_sign = stop_sign
break
return target_stop_sign
def update(self):
"""
Check if the actor is running a red light
"""
new_status = py_trees.common.Status.RUNNING
location = self._actor.get_location()
if location is None:
return new_status
if not self._target_stop_sign:
# scan for stop signs
self._target_stop_sign = self._scan_for_stop_sign()
else:
# we were in the middle of dealing with a stop sign
if not self.is_actor_affected_by_stop(self._actor, self._target_stop_sign):
# is the vehicle out of the influence of this stop sign now?
if not self._stop_completed:
# did we stop?
self.test_status = "FAILURE"
stop_location = self._target_stop_sign.get_transform().location
running_stop_event = TrafficEvent(event_type=TrafficEventType.STOP_INFRACTION)
running_stop_event.set_message("Agent ran a stop {} at (x={}, y={}, x={})".format(
self._target_stop_sign.id,
stop_location.x,
stop_location.y,
stop_location.z))
running_stop_event.set_dict({'id': self._target_stop_sign.id,
'x': stop_location.x,
'y': stop_location.y,
'z': stop_location.z})
self.list_traffic_events.append(running_stop_event)
# reset state
self._target_stop_sign = None
self._stop_completed = False
if self._target_stop_sign:
# we are already dealing with a target stop sign
#
# did the ego-vehicle stop?
current_speed = CarlaDataProvider.get_velocity(self._actor)
if current_speed < self.SPEED_THRESHOLD:
self._stop_completed = True
if self._terminate_on_failure and (self.test_status == "FAILURE"):
new_status = py_trees.common.Status.FAILURE
self.logger.debug("%s.update()[%s->%s]" % (self.__class__.__name__, self.status, new_status))
return new_status
| yixiao1/Action-Based-Representation-Learning | scenario_runner/srunner/scenariomanager/scenarioatomics/atomic_criteria.py | atomic_criteria.py | py | 44,084 | python | en | code | 13 | github-code | 90 |
4124049511 | #!/usr/bin/env python3
""" This is a utility module to read PGM image files that represent a map of the environment.
It can handle ascii (P2) and binary (P5) PGM file formats.
The image data is converted to a numpy array.
"""
import numpy as np
def read_line(pgm_file):
""" Read a line from the pgm file.
Comment lines (#) are skipped and trailing spaces removed.
"""
while True:
line = pgm_file.readline()
if not isinstance(line, str):
line = line.decode("utf-8")
if not line.lstrip().startswith('#'):
return line.rstrip()
def check_file_type(pgm_filename):
""" Check if the pgm file data is ascii (P2) or binary (P5) encoded
Return the type as a string.
"""
try:
with open(pgm_filename, 'r') as pgm_file:
data_type = read_line(pgm_file)
if data_type != 'P2' and data_type != 'P5':
raise ValueError("PGM file type must be P2 or P5")
return data_type
except UnicodeDecodeError:
# File is binary
with open(pgm_filename, 'rb') as pgm_file:
data_type = read_line(pgm_file)
if data_type != 'P5':
raise ValueError("Found binary file which is NOT P5")
return data_type
def read_ascii_data(pgm_file, data):
""" Read the P2 data and fill the numpy array """
for y in range(data.shape[1]):
for x, val in enumerate(read_line(pgm_file).split()):
val = int(val)
# Invert y coordinate
y_inv = data.shape[1] - y - 1
data[x, y_inv] = val
def read_binary_data(pgm_file, data):
""" Read the P5 data and fill the numpy array """
for y in range(data.shape[1]):
for x in range(data.shape[0]):
val = ord(pgm_file.read(1))
# Invert y coordinate
y_inv = data.shape[1] - y - 1
data[x, y_inv] = val
def read_pgm(pgm_filename):
""" Read a pgm file and return the data as a numpy array """
# First, check if file data is ascii (Pw) or binary (P5) encoded
data_type = check_file_type(pgm_filename)
file_read_options = 'r' if data_type == 'P2' else 'rb'
print("PGM data type: {}".format(data_type))
with open(pgm_filename, file_read_options) as pgm_file:
# Skip first line (data type)
read_line(pgm_file)
# Read data size and depth
(width, height) = [int(i) for i in read_line(pgm_file).split()]
depth = int(read_line(pgm_file))
# TODO: For now only 8bit files are supported.
# The type of the np.array should change depending on the depth
assert depth == 255
print("width: {}".format(width))
print("height: {}".format(height))
print("depth: {}".format(depth))
# Read image data
data = np.empty((width, height), np.uint8)
if data_type == 'P2':
read_ascii_data(pgm_file, data)
else:
read_binary_data(pgm_file, data)
return data
def main(args):
from matplotlib import pyplot as plt
data = read_pgm(args.map_file)
print(data.shape)
print(data)
plt.imshow(data)
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='PGM module test')
parser.add_argument('map_file', metavar='map_file', type=str,
help='Map pgm file')
args = parser.parse_args()
main(args)
| Butakus/landmark_placement_optimization | lpo/pgm.py | pgm.py | py | 3,488 | python | en | code | 1 | github-code | 90 |
1497915572 | """
Created on Sat Sep 19 10:18:34 2020
@author: Camilo
"""
import matplotlib.pyplot as plot
import numpy as np
def canicas(mat,vect,clicks):
res=[]
k=0
while k != clicks:
vect = mat*vect
k+=1
for i in range(len(vect)):
res = res + [int(vect[i])]
for i in range(len(res)):
if res[i]==1:
res[i]= ["True"]
else:
res[i]=["False"]
return res
def clasicoproba(mat,vec,clicks):
k=0
res = []
while k != clicks:
vec = mat*vec
k+=1
for i in range(len(vec)):
res = res + [[float(vec[i])]]
return res
def multiplerendija(mat,clicks):
k=0
mat1 = mat[:]
while k!= clicks:
for k in range(clicks):
mat = mat*mat1
k+=1
row, column = len(mat), len(mat[0])
for i in range(row):
nRow = []
for j in range(column):
nRow.append([(modulo(mat[i][j]) ** 2), 0])
mat[i] = nRow
return mat
def modulo (num):
ans = (num[0] ** 2 + num[1] ** 2) ** (1/2)
return ans
def grafico(vector):
data =len(vector)
x = np.array([x for x in range(data)])
y = np.array([round(vector[x][0] * 100, 2) for x in range(data)])
plot.bar(x, y, color='g', align='center')
plot.title('Probabilidad del vector')
plot.show()
| camiloarchila/clasico_a_lo_cuantico | clasicoalocuantico.py | clasicoalocuantico.py | py | 1,423 | python | en | code | 0 | github-code | 90 |
25067316274 | import os
import random
def get_dirs_and_files(path):
dir_list = [directory for directory in os.listdir(path) if os.path.isdir(path + './' + directory)]
file_list = [directory for directory in os.listdir(path) if not os.path.isdir(path + './' + directory)]
return dir_list, file_list
def classify_pic(path):
# To be implemented by Diego: Replace with ML model
if "dog" in path:
return 0.5 + random.random() / 2
return random.random() / 2
def process_dir(path):
dir_list, file_list = get_dirs_and_files(path)
cat_list = []
dog_list = []
# Your code goes here
# traverse through directory and files
for root, dirs, files in os.walk(path):
# populate directory list
for i in dirs:
dir_list.append(i)
# populate files list
for i in files:
#ignore files if they aren't Jpeg
if i.endswith('jpg'):
file_list.append(i)
# populate dog and cat lists
for i in range(len(file_list)):
# if picture includes dog
if classify_pic(file_list[i]) >= 0.5:
dog_list.append(file_list[i])
# if picture includes cat
else:
cat_list.append(file_list[i])
# test to find out if dog/cat lists are correct
print(dog_list)
print(cat_list)
return cat_list, dog_list
def main():
start_path = './' # current directory
process_dir(start_path)
main()
| eamenier/CS2302Lab1OptionA | Main.py | Main.py | py | 1,525 | python | en | code | 0 | github-code | 90 |
40389706915 | # Time: O(nlogn)
# 随机的在数组中选择一个数key, 小于等于key的数统一放到key的左边, 大于key的数统一放到key的右边。
# 对左右两个部分,分别递归的调用快速排序的过程。
# 快速排序——划分过程(Partition过程): 即找到一个数后,小于等于它的数如何放到它的左边,大于它的数如何放到它的右边。
# 1、令划分值放在整个数组最后的位置
# 2、设计一个小于等于区间,初始长度为0,放在 整个数组的左边
# 3、从左到右遍历所有元素,
# 如果当前元素m大于划分值,继续遍历下一个元素;
# 如果当前元素m小于等于划分值,将当前元素m和小于等于区间(在整个数组的左边)的下一个数进行交换。
# 令小于等于区间向右回一个位置(包含住刚刚那个元素m)。
# ...
# 4、当遍历完所有元素,直到最后那个数(划分值)的时候,将划分值与小于等于区间的下一个元素交换。
# 这就是一个完整的划分过程,时间复杂度为 O(n)
class Solution(object):
def qSort(self, strs):
self.quickSort(strs, 0, len(strs)-1)
return strs
def quickSort(self, strs, low, high):
if low < high:
# 随机的在数组中选择一个数key, 小于等于key的数统一放到key的左边, 大于key的数统一放到key的右边。
# 划分后 key的位置n:
n = self.partition(strs, low, high)
# 以key值划分好后的字符串 对key的左右两个部分,分别递归调用快排过程:
self.quickSort(strs, low, n-1)
self.quickSort(strs, n+1, high)
# 划分过程
def partition(self, strs, low, high):
# 选第一个元素为Key值
key = strs[low]
# 从右到左遍历所有元素
while low < high:
# strs[high]比Key大或者等于key, 位置不变, high往前移一位
while low < high and key <= strs[high]:
high -= 1
# strs[high]比key小, 将strs[high]放到最左边的位置。
strs[low] = strs[high]
# strs[low]比Key小或者等于key, 位置不变, low往后移一位
while low < high and strs[low] <= key:
low += 1
# strs[low]比Key大, 将strs[low]放到最右边的位置。
strs[high] = strs[low]
# 当遍历完所有元素,直到最后那个数(划分值)的时候,将划分值与小于等于区间的下一个元素交换。
# 最后的Low (小于key的都放到了左边 low右移 大于Key的都放到了右边) low所在位置为key应该放的位置
strs[low] = key
return low # 返回Key的位置
if __name__ == '__main__':
solution = Solution()
strs = [54, 35, 48, 36, 27, 12, 44, 44, 8, 14, 26, 17, 28]
print(solution.qSort(strs))
| yuanswife/LeetCode | src/Sort/O(nlogn)_快速排序.py | O(nlogn)_快速排序.py | py | 2,905 | python | zh | code | 0 | github-code | 90 |
7994526347 | from datetime import datetime as dt
from datetime import date, timedelta
import numpy as np
import pandas as pd
from readlog import readlogline
import sys
import re
import subprocess
from scapy.all import *
import math
import pickle
def main(re_name):
apps = ['snort', 'suricata', 'Lastline', 'pa3220', 'ddi4100', 'sourcefire']
base_dir = './%s/' % (re_name)
replay_log = base_dir + '%s.log' % (re_name)
outses = base_dir + '%sses.csv' % (re_name)
#outgroup = base_dir + '%sgroup.csv' % (re_name)
#c_outgroup = base_dir + '%scgroup.csv' % (re_name)
outgroup_pkl = base_dir + '%sgroup.pkl' % (re_name)
c_outgroup_pkl = base_dir + '%scgroup.pkl' % (re_name)
ori_pcap = base_dir + '%s.pcap' % (re_name[5:])
re_pcap = base_dir + '%s.pcap' % (re_name)
binary_pkl = base_dir + '%sbinary.pkl' % (re_name)
multi_pkl = base_dir + '%smulti.pkl' % (re_name)
binary_csv = base_dir + '%sbinary.csv' % (re_name)
multi_csv = base_dir + '%smulti.csv' % (re_name)
alert_df = mk_alert_df(outgroup_pkl, c_outgroup_pkl);
binary_df, multi_df = convert2bypkt(alert_df, re_pcap)
save_df(binary_df, multi_df, binary_pkl, multi_pkl, binary_csv, multi_csv)
def save_df(binary_df, multi_df, binary_pkl, multi_pkl, binary_csv, multi_csv):
with open(binary_pkl, 'wb') as f:
pickle.dump(binary_df, f)
with open(multi_pkl, 'wb') as f:
pickle.dump(multi_df, f)
binary_df.to_csv(binary_csv)
multi_df.to_csv(multi_csv)
return 0
def convert2bypkt(df, re_pcap):
binary_list = []
multi_list = []
cnt = 0
with PcapReader(re_pcap) as cap:
for pkt in cap:
cnt += 1
if cnt % 10000 == 0:
print(cnt)
timestamp = dt.fromtimestamp(float(pkt.time))
target_df = df[df['timestamps'].apply(lambda x: timestamp in x)]
if len(target_df) == 1:
binary_list.append([cnt, 1])
cat = target_df['cat']
multi_list.append([cnt, cat])
elif len(target_df) > 1:
binary_list.append([cnt, 1])
cat = target_df['cat'].iloc[0]
multi_list.append([cnt, cat])
#print('error')
#print(target_df)
else:
binary_list.append([cnt, 0])
multi_list.append([cnt, 0])
binary_df = pd.DataFrame(binary_list, columns=['frame', 'label'])
multi_df = pd.DataFrame(multi_list, columns=['frame', 'label'])
return binary_df, multi_df
def mk_alert_df(outgroup_pkl, c_outgroup_pkl):
with open(outgroup_pkl, 'rb') as f:
oss_df = pickle.load(f)
with open(c_outgroup_pkl, 'rb') as f:
reco_df = pickle.load(f)
#alert_df = pd.read_csv(outses, dtype={'timestamps':'object', 'ids':'object', 'id':'str', 'msg':'str',\
# 'classification':'str', 'priority':'object', 'protocol':'str', 'src':'str',\
# 'spt':'str', 'dst':'str', 'dpt':'str', 'app':'object', 'index':'object'})
#oss_df = pd.read_csv(outgroup, dtype={'src':'str', 'spt':'str', 'dst':'str', 'dpt':'str'})
#reco_df = pd.read_csv(c_outgroup, dtype={'src':'str', 'spt':'str', 'dst':'str', 'dpt':'str'})
alert_df = pd.concat([oss_df, reco_df], axis=0)
alert_df['cat'] = alert_df['cat'].apply(lambda x: tuple(x))
alert_df['ids'] = alert_df['ids'].apply(lambda x: tuple(x))
alert_df['index'] = alert_df['index'].apply(lambda x: tuple(x))
alert_df['date'] = alert_df['date'].apply(lambda x: tuple(x))
alert_df['lev'] = alert_df['lev'].apply(lambda x: tuple(x))
alert_df['app'] = alert_df['app'].apply(lambda x: tuple(x))
print(alert_df)
alert_df = grouping(alert_df)
return alert_df
def grouping(df):
groupdf = df.groupby('timestamps', as_index=False).agg(\
{'ids': 'first',
'index': list,
'date': list,
'cat': set,
'lev': list,
'src': 'first',
'spt': 'first',
'dst': 'first',
'dpt': 'first',
'app': set})
return groupdf
if __name__ == '__main__':
#start = dt(2021, 11, 15, 2, 50, 16)
#end = dt(2021, 11, 15, 11, 47, 0)
#re_name = '1115-2018Wed'
re_name = sys.argv[1]
#year = sys.argv[2]
sys.exit(main(re_name))
| suuri-kyudai/Generating-Dataset-for-NIDS | mk_by_packet.py | mk_by_packet.py | py | 4,361 | python | en | code | 3 | github-code | 90 |
72905503657 | import flask
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
# Create some test data for our catalog in the form of a list of dictionaries.
banks = {
123456789:
{'id': 123456789,
'Bank': 'NBP S.A.',
'Osoba': 'Aleksander Kociumaka',
'Numer': '8748374233',
'chalenge': '6789',
'danger': False,
},
123456790: {'id': 123456790,
'Bank': 'Bardzo OK Bank S.A.',
'Osoba': 'Albert Blaztowitz',
'Numer': '8748374240',
'chalenge': '6770',
'danger': False,
},
}
@app.route('/', methods=['GET'])
def home():
return '''<h1>Distant Reading Archive</h1>
<p>A prototype API for distant reading of science fiction novels.</p>'''
# A route to return all of the available entries in our catalog.
@app.route('/api/v1/tel/<int:telefon>', methods=['GET'])
def api_tel(telefon):
out = banks.get(telefon, {'id':telefon, 'no_calls':"true", 'danger':True})
return jsonify(out)
app.run() | zadadam/AssecoHacakthon | AppApi/server.py | server.py | py | 990 | python | en | code | 0 | github-code | 90 |
18011946619 | from functools import lru_cache
@lru_cache
def comb(n, k):
if k == 0:
return 1
elif n == k:
return 1
else:
return comb(n-1, k) + comb(n-1, k-1)
N, A, B = map(int, input().split())
vs = sorted(map(int, input().split()), reverse = True)
print(sum(vs[:A]) / A)
v_replaceable = vs[A]
n = vs.count(v_replaceable)
a = A - vs.index(v_replaceable)
b = min(n, B - vs.index(v_replaceable))
if vs[0] == v_replaceable:
#n個からa~b個を選ぶ選び方が答え。
print(sum(comb(n, t) for t in range(a, b+1)))
else:
#n個からa個を選ぶ選び方が答え。
print(comb(n, a)) | Aasthaengg/IBMdataset | Python_codes/p03776/s360145241.py | s360145241.py | py | 627 | python | en | code | 0 | github-code | 90 |
10214817532 | # https://www.hackerrank.com/contests/smart-interviews/challenges/si-path-in-a-matrix/copy-from/1321037246
'''Given a matrix, find the number of ways to reach from the top-left cell to the right-bottom cell. At any step, from the current cell (i,j) you can either move to (i+1,j) or (i,j+1) or (i+1, j+1). Please note that certain cells are forbidden and cannot be used.
Input Format
First line of input contains T - number of test cases. First line of each test case contains N, M - size of the matrix and B - number of forbidden cells. Its followed by B lines each containing a pair (i,j) - index of the forbidden cell.
Constraints
20 points
1 <= N, M <= 10
80 points
1 <= N, M <= 100
General Constraints
1 <= T <= 500
0 <= i < N
0 <= j < M
Output Format
For each test case, print the number of ways, separated by newline. Since the output can be very large, print output % 1000000007
Sample Input 0
5
5 2 1
2 0
7 3 1
1 0
6 3 1
5 2
2 9 1
0 1
5 6 2
0 1
1 0
Sample Output 0
4
24
0
2
129
'''
__author__ = "sheetansh"
def getWays(arr):
if(arr[0][0] == 0):
return 0
c = 1
dp = [[0 for x in range(len(arr[0]))] for x in range(len(arr))]
dp[0][0] = 1
for i in range(1, len(arr[0])):
if (arr[0][i] == 0):
break
dp[0][i] = 1
for i in range(1, len(arr)):
if (arr[i][0] == 0):
break
dp[i][0] = 1
for r in range(1, len(arr)):
for c in range(1, len(arr[0])):
if(arr[r][c] != 0):
dp[r][c] = (dp[r-1][c-1]+dp[r-1][c]+dp[r][c-1])%1000000007
return dp[len(arr)-1][len(arr[0])-1]
for _ in range(int(input())):
r,c,b = list(map(int, input().split()))
arr = [[1 for x in range(c)] for x in range(r)]
for _ in range(b):
i, j = list(map(int, input().split()))
arr[i][j] = 0
print(getWays(arr)) | SheetanshKumar/smart-interviews-problems | Path in a Matrix.py | Path in a Matrix.py | py | 1,854 | python | en | code | 6 | github-code | 90 |
37248827870 | import sys
f = open(sys.argv[1])
data = f.read().strip().split(',')
data = [int(d) for d in data]
def calculate(nums, n):
i = 0
prev = nums[-1]
numbers = dict()
numbers[0] = list()
while i < n:
if i < len(nums):
numbers[nums[i]] = [i]
i += 1
else:
if len(numbers[prev]) == 1:
numbers[0].append(i)
prev = 0
else:
prev = numbers[prev][-1] - numbers[prev][-2]
if prev in numbers:
numbers[prev].append(i)
else:
numbers[prev] = [i]
i += 1
return prev
print(f'Part 1: {calculate(data, 2020)}')
print(f'Part 2: {calculate(data, 30000000)}')
| hmludwig/aoc2020 | src/day15.py | day15.py | py | 760 | python | en | code | 0 | github-code | 90 |
34840005574 | import numpy as np
def standardize_image(image):
image -= np.min(image)
image /= np.std(image)
return image
def ensemble_expand(image):
ensemble = np.zeros((8,) + image.shape)
ensemble[0] = image
ensemble[1] = np.fliplr(image)
ensemble[2] = np.flipud(image)
ensemble[3] = np.rot90(image)
ensemble[4] = np.fliplr(np.flipud(image))
ensemble[5] = np.fliplr(np.rot90(image))
ensemble[6] = np.fliplr(np.flipud(np.rot90(image)))
ensemble[7] = np.flipud(np.rot90(image))
return ensemble
def ensemble_reduce(ensemble):
ensemble[1] = np.fliplr(ensemble[1])
ensemble[2] = np.flipud(ensemble[2])
ensemble[3] = np.rot90(ensemble[3], k=3)
ensemble[4] = np.flipud(np.fliplr(ensemble[4]))
ensemble[5] = np.rot90(np.fliplr(ensemble[5]), k=3)
ensemble[6] = np.rot90(np.flipud(np.fliplr(ensemble[6])), k=3)
ensemble[7] = np.rot90(np.flipud(ensemble[7]), k=3)
return np.sum(ensemble, axis=0) / 8.
| jacobjma/nionswift-deep-learning | nionswift_plugin/nionswift_structure_recognition/utils.py | utils.py | py | 971 | python | en | code | 0 | github-code | 90 |
70904616937 | import sys; sys.setrecursionlimit(10**6); input = sys.stdin.readline
ans = {}
def find_giga(r):
global len_gd
if len(graph[r]) == 2:
r, d = graph[r][0]
len_gd += d
return find_giga(r)
else:
return r
def dfs(x, d, sum):
global len_gi
sum = max(sum, sum + d)
if len_gi < sum:
len_gi = sum
visited[x] = True
for g in graph[x]:
if not visited[g[0]]:
dfs(g[0], g[1], sum)
if __name__ == "__main__":
n, r = map(int, input().split())
graph = [[] for _ in range(n + 1)]
visited = [[False] for _ in range(n + 1)]
for _ in range(n - 1):
a, b, d = map(int, input().split())
graph[a].append((b, d))
graph[b].append((a, b))
# 기둥 , 기가
len_gd = 0
giga = find_giga(r)
# 가장 긴 가지 찾기
len_gi = 0
dfs(giga, 0, 0)
print(len_gd, len_gi) | dohun31/algorithm | 2021/week_06/210811/20924.py | 20924.py | py | 905 | python | en | code | 1 | github-code | 90 |
72143629418 | from __future__ import print_function, unicode_literals
import json
import pytest
from gratipay.testing import Harness
from aspen import Response
class Tests(Harness):
def hit_members_json(self, method='GET', auth_as=None):
response = self.client.GET('/~Enterprise/members/index.json', auth_as=auth_as)
return json.loads(response.body)
@pytest.mark.xfail(reason='migrating to Teams; see #3399')
def test_team_has_members(self):
team = self.make_participant('Enterprise', number='plural', claimed_time='now')
team.add_member(self.make_participant('alice', claimed_time='now'))
team.add_member(self.make_participant('bob', claimed_time='now'))
team.add_member(self.make_participant('carl', claimed_time='now'))
actual = [x['username'] for x in self.hit_members_json()]
assert actual == ['carl', 'bob', 'alice', 'Enterprise']
@pytest.mark.xfail(reason='migrating to Teams; see #3399')
def test_team_admin_can_get_bare_bones_list(self):
self.make_participant('Enterprise', number='plural', claimed_time='now')
actual = [x['username'] for x in self.hit_members_json(auth_as='Enterprise')]
assert actual == ['Enterprise']
@pytest.mark.xfail(reason='migrating to Teams; see #3399')
def test_anon_cant_get_bare_bones_list(self):
self.make_participant('Enterprise', number='plural', claimed_time='now')
assert pytest.raises(Response, self.hit_members_json).value.code == 404
@pytest.mark.xfail(reason='migrating to Teams; see #3399')
def test_non_admin_cant_get_bare_bones_list(self):
self.make_participant('Enterprise', number='plural', claimed_time='now')
self.make_participant('alice', claimed_time='now')
assert pytest.raises(Response, self.hit_members_json, auth_as='alice').value.code == 404
| gratipay/gratipay.com | tests/py/test_members_json.py | test_members_json.py | py | 1,864 | python | en | code | 1,121 | github-code | 90 |
25255793822 | import sys
from collections import deque
T = int(sys.stdin.readline())
dx = [-2, -2, -1, -1, 1, 1, 2, 2]
dy = [1, -1, 2, -2, 2, -2, 1, -1]
def bfs(matrix, destination_x, destination_y, q):
while q:
x, y = q.popleft()
if x == destination_x and y == destination_y:
return
for i in range(8):
new_x = x + dx[i]
new_y = y + dy[i]
if (0<=new_x<len(matrix) and 0<=new_y<len(matrix)) and matrix[new_x][new_y] == 0:
matrix[new_x][new_y] = matrix[x][y] + 1
q.append((new_x, new_y))
for t in range(T):
l = int(sys.stdin.readline())
matrix = [[0]*l for i in range(l)]
current_x, current_y = map(int, sys.stdin.readline().split())
destination_x, destination_y = map(int, sys.stdin.readline().split())
if current_x == destination_x and current_y == destination_y:
print(0)
continue
q = deque()
q.append((current_x, current_y))
bfs(matrix, destination_x, destination_y, q)
print(matrix[destination_x][destination_y])
| choinara0/Algorithm | Baekjoon/Graph Algorithm/7562번 - 나이트의 이동/7562번 - 나이트의 이동.py | 7562번 - 나이트의 이동.py | py | 1,070 | python | en | code | 0 | github-code | 90 |
13468517790 | import os
import string
from contextlib import contextmanager
from pcg import PcgEngine
engine = PcgEngine()
alpha = string.ascii_letters
alpha_numeric = string.ascii_letters + string.digits
@contextmanager
def ctx_open(path: str, flags: int, mode: int = None):
if mode is None:
fd = os.open(path, flags)
else:
fd = os.open(path, flags, mode)
try:
yield fd
finally:
os.close(fd)
def random_id(length: int = 16):
result = engine.choice(alpha)
for _ in range(length - 1):
result += engine.choice(alpha_numeric)
return result
| Miravalier/CodeShare | src/utils.py | utils.py | py | 598 | python | en | code | 0 | github-code | 90 |
14154048063 | from collections import defaultdict
day = 2
def algo1(data):
twos = 0
threes = 0
for word in data:
freq = defaultdict(int)
for letter in word:
freq[letter] += 1
if 2 in freq.values():
twos += 1
if 3 in freq.values():
threes += 1
return twos * threes
def algo2(data):
for i, id1 in enumerate(data):
for id2 in data[i+1:]:
same_chars = []
for letter1, letter2 in zip(id1, id2):
if letter1 == letter2:
same_chars.append(letter1)
if len(id2)-len(same_chars) == 1:
return ''.join(same_chars)
if __name__ == "__main__":
test1_input = [
"abcdef",
"bababc",
"abbcde",
"abcccd",
"aabcdd",
"abcdee",
"ababab",
]
test1_answer = 12
if algo1(test1_input) == test1_answer:
print("First Question Test Passed")
else:
print("First Question Test FAILED")
test2_input = [
"abcde",
"fghij",
"klmno",
"pqrst",
"fguij",
"axcye",
"wvxyz",
]
test2_answer = "fgij"
if algo2(test2_input) == test2_answer:
print("Second Question Test Passed")
else:
print("Second Question Test FAILED")
with open(f"{day}.txt", encoding='utf-8', errors='ignore') as f:
input_data = [line.rstrip() for line in f]
print("Answer 1: ", algo1(input_data))
print("Answer 2: ", algo2(input_data))
| Surye/aoc2018.py | 2.py | 2.py | py | 1,543 | python | en | code | 0 | github-code | 90 |
40861287970 | import itertools
import unittest
from functools import partial
from typing import List, Type, Dict, Tuple, Callable, Union, Iterable
import torch
from pshape import pshape
from torch import Tensor
from torch.profiler import profile, ProfilerActivity
from torch_pconv import PConv2d
from pconv_guilin import PConvGuilin
from pconv_rfr import PConvRFR
from conv_config import ConvConfig
PConvLike = torch.nn.Module
class TestPConv(unittest.TestCase):
pconv_classes = [
PConvGuilin,
PConvRFR,
# This forces numerical error to be the same as other implementations, but makes the computation a bit slower
partial(PConv2d, legacy_behaviour=True),
]
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def test_output_shapes(self):
b, c, h = 16, 3, 256
image, mask = self.mkinput(b=b, c=c, h=h)
configs = [
ConvConfig(3, 64, 5, padding=2, stride=2),
ConvConfig(64, 64, 5, padding=1),
ConvConfig(64, 64, 3, padding=4),
ConvConfig(64, 64, 7, padding=5),
ConvConfig(64, 32, 3, padding=2),
]
expected_heights = (128, 126, 132, 136, 138,)
self.assertEqual(len(configs), len(expected_heights))
outputs_imgs, outputs_masks = image, mask
for expected_height, config in zip(expected_heights, configs):
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config=config)(outputs_imgs,
outputs_masks)
for clazz in self.pconv_classes:
img, mask = outputs_imgs[clazz], outputs_masks[clazz]
self.assertTupleEqual(tuple(img.shape), (b, config.out_channels, expected_height, expected_height))
self.assertTupleEqual(tuple(mask.shape), (b, expected_height, expected_height))
def test_output_dtype(self):
b, c, h = 16, 3, 256
image, mask = self.mkinput(b=b, c=c, h=h)
configs = [
ConvConfig(3, 64, 5, padding=2, stride=2),
ConvConfig(64, 64, 5, padding=1),
ConvConfig(64, 64, 3, padding=4),
ConvConfig(64, 64, 7, padding=5),
ConvConfig(64, 32, 3, padding=2),
]
expected_heights = (128, 126, 132, 136, 138,)
self.assertEqual(len(configs), len(expected_heights))
outputs_imgs, outputs_masks = image, mask
for expected_height, config in zip(expected_heights, configs):
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config=config)(outputs_imgs,
outputs_masks)
for clazz in self.pconv_classes:
img, mask = outputs_imgs[clazz], outputs_masks[clazz]
assert img.dtype == torch.float32
assert mask.dtype == torch.float32
def test_input_shape(self):
config = next(iter(self.realistic_config()))
# We have to call each class distinctively
pconv_calls = [clazz(**config.dict).to(self.device) for clazz in self.pconv_classes]
# Good dtypes
image = torch.rand(10, 3, 256, 256, dtype=torch.float32).to(self.device)
mask = (torch.rand(10, 256, 256) > 0.5).to(torch.float32).to(self.device)
try:
for pconv_call in pconv_calls:
pconv_call(image, mask)
except TypeError as e:
self.fail(str(e))
image = (torch.rand(10, 256, 256) * 255).to(torch.float32).to(self.device) # Bad shape, channels missing
mask = (torch.rand(10, 256, 256) > 0.5).to(torch.float32).to(self.device)
for pconv_call in pconv_calls:
self.assertRaises(TypeError, pconv_call, image, mask)
image = torch.rand(10, 3, 256, 256).to(torch.float32).to(self.device)
mask = (torch.rand(10, 3, 256, 256) > 0.5).to(torch.float32).to(self.device) # Bad shape, channels present
for pconv_call in pconv_calls:
self.assertRaises(TypeError, pconv_call, image, mask)
def test_input_dtype(self):
config = next(iter(self.realistic_config()))
# We have to call each class distinctively
pconv_calls = [clazz(**config.dict).to(self.device) for clazz in self.pconv_classes]
# Good dtypes
image = torch.rand(10, 3, 256, 256, dtype=torch.float32).to(self.device)
mask = (torch.rand(10, 256, 256) > 0.5).to(torch.float32).to(self.device)
try:
for pconv_call in pconv_calls:
pconv_call(image, mask)
except TypeError as e:
self.fail(str(e))
image = (torch.rand(10, 3, 256, 256) * 255).to(torch.uint8).to(self.device) # Bad dtype
mask = (torch.rand(10, 256, 256) > 0.5).to(torch.float32).to(self.device)
for pconv_call in pconv_calls:
self.assertRaises(TypeError, pconv_call, image, mask)
image = (torch.rand(10, 3, 256, 256) * 255).to(torch.float32).to(self.device)
mask = (torch.rand(10, 256, 256) > 0.5).to(self.device) # Bad Dtype
for pconv_call in pconv_calls:
self.assertRaises(TypeError, pconv_call, image, mask)
def test_mask_values_binary(self):
"""The mask is a float tensor because the convolution doesn't operate on boolean tensors, however,
its values are still 0.0 (False) OR 1.0 (True). The masks should NEVER have 0.34 or anything in
between those two values.
Technical explanation for why:
masks are passed to the convolution with ones kernel, at that point, their values can be any integer
since the convolution will sum ones together, so no float value can be created here.
Then, we run torch.clip(mask, 0, 1). At this point, any integer value >= 1 becomes 1, leaving only 0 and 1s.
Rince and repeat at next iteration."""
image, mask = self.realistic_input()
configs = self.realistic_config()
outputs_imgs, outputs_masks = image, mask
for config in configs:
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config=config)(outputs_imgs,
outputs_masks)
for mask in outputs_masks.values():
assert ((mask == 1.0) | (
mask == 0.0)).all(), "All mask values should remain either 1.0 or 0.0, nothing in between."
def test_dilation(self):
image, mask = self.realistic_input()
configs = self.realistic_config()
# Enable bias on every PConv
for i, c in enumerate(configs):
c.dilation = max(1, i % 4)
outputs_imgs, outputs_masks = image, mask
for config in configs:
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config=config)(outputs_imgs,
outputs_masks)
self.compare(outputs_imgs, self.allclose)
self.compare(outputs_masks, self.allclose)
def test_bias(self):
"""This test is very sensitive to numerical errors.
On my setup, this test passes when ran on GPU, but fails when ran on CPU. The most likely reason is that
the CUDA backend's way to add the bias in the convolution differs from the Intel MKL way to add the bias,
resulting in different numerical errors.
Just inspect the min/mean/max values and see if they differ significantly, and if they don't then ignore this
test failing, or send me a PR to fix it."""
image, mask = self.realistic_input()
configs = self.realistic_config()
# Enable bias on every PConv
for c in configs:
c.bias = True
outputs_imgs, outputs_masks = image, mask
for config in configs:
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config=config)(outputs_imgs,
outputs_masks)
self.compare(outputs_imgs, self.allclose)
self.compare(outputs_masks, self.allclose)
def test_backpropagation(self):
"""Does a 3 step forward pass, and then attempts to backpropagate the resulting image
to see if the gradient can be computed and wasn't lost along the way."""
image, mask = self.realistic_input()
configs = self.realistic_config()
outputs_imgs, outputs_masks = image, mask
for config in configs:
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config=config)(outputs_imgs,
outputs_masks)
for clazz in self.pconv_classes:
try:
outputs_imgs[clazz].sum().backward()
except RuntimeError:
self.fail(f"Could not compute the gradient for {clazz.__name__}")
def test_memory_complexity(self):
device = torch.device('cpu')
image, mask = self.realistic_input(c=64, d=device)
config = ConvConfig(64, 128, 9, stride=1, padding=3, bias=True)
pconv_calls = [clazz(**config.dict).to(device) for clazz in self.pconv_classes]
tolerance = 0.1 # 10 %
max_mem_use = {
PConvGuilin: 6_084_757_512, # 5.67 GiB
PConvRFR: 6_084_758_024, # 5.67 GiB
PConv2d: 2_405_797_640, # 2.24 GiB
}
for pconv_call in pconv_calls:
with profile(activities=[ProfilerActivity.CPU],
profile_memory=True, record_shapes=True, with_stack=True) as prof:
# Don't forget to run grad computation as well, since that eats a lot of memory too
out_im, _ = pconv_call(image, mask)
out_im.sum().backward()
# Stealing the total memory stat from the profiler
total_mem = abs(
list(filter(lambda fe: fe.key == "[memory]", list(prof.key_averages())))[0].cpu_memory_usage)
# Printing how much mem used in total
# print(f"{pconv_call.__class__.__name__} used {self.format_bytes(total_mem)} ({total_mem})")
max_mem = (max_mem_use[pconv_call.__class__] * (1 + tolerance))
assert total_mem < max_mem, f"{pconv_call.__class__.__name__} used {self.format_bytes(total_mem)}" \
f" which is more than {self.format_bytes(max_mem)}"
def test_iterated_equality(self):
"""
Tests that even when iterating:
1- The output images have the same values (do not diverge due to error accumulation for example)
2- The output masks have the same values
3- The outputted masks are just repeated along the channel dimension
"""
image, mask = self.realistic_input()
configs = self.realistic_config()
outputs_imgs, outputs_masks = image, mask
for config in configs:
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config=config)(outputs_imgs,
outputs_masks)
self.compare(outputs_imgs, self.allclose)
self.compare(outputs_masks, self.allclose)
def test_equality(self):
config = ConvConfig(in_channels=3, out_channels=64, kernel_size=5)
image, mask = self.mkinput(b=16, h=256, c=config.in_channels)
outputs_imgs, outputs_masks = self.run_pconvs(self.pconv_classes, config)(
image, mask)
self.compare(outputs_imgs, self.allclose)
self.compare(outputs_masks, self.allclose)
@classmethod
def realistic_input(cls, b=16, c=3, h=256, d=None) -> Tuple[Tensor, Tensor]:
# 16 images, each of 3 channels and of height/width 256 pixels
return cls.mkinput(b=b, c=c, h=h, d=cls.device if d is None else d)
@classmethod
def realistic_config(cls) -> Iterable[ConvConfig]:
# These are the partial convs used in https://github.com/jingyuanli001/RFR-Inpainting
# All have bias=False because in practice they're always followed by a BatchNorm2d anyway
return (
ConvConfig(3, 64, 7, stride=2, padding=3, bias=False),
ConvConfig(64, 64, 7, stride=1, padding=3, bias=False),
ConvConfig(64, 64, 7, stride=1, padding=3, bias=False),
ConvConfig(64, 64, 7, stride=1, padding=3, bias=False),
ConvConfig(64, 32, 3, stride=1, padding=1, bias=False),
)
@classmethod
def mkinput(cls, b, c, h, d=None) -> Tuple[Tensor, Tensor]:
if d is None:
d = cls.device
image = torch.rand(b, c, h, h).float().to(d)
mask = (torch.rand(b, h, h) > 0.5).float().to(d)
return image, mask
@staticmethod
def compare(values: Dict[Type[PConvLike], Tensor],
comparator: Callable[[Tensor, Tensor], bool]):
for (clazz1, out1), (clazz2, out2) in itertools.combinations(values.items(), 2):
eq = comparator(out1, out2)
if not eq:
pshape(out1, out2, heading=True)
assert eq, f"{clazz1.__name__ if hasattr(clazz1, '__name__') else 'class1'}'s doesn't match {clazz2.__name__ if hasattr(clazz2, '__name__') else 'class2'}'s output"
@classmethod
def run_pconvs(cls, pconvs: List[Type[PConvLike]], config: ConvConfig) -> Callable[
[Union[Dict[Type[PConvLike], Tensor], Tensor],
Union[Dict[Type[PConvLike], Tensor], Tensor]], Tuple[
Dict[Type[PConvLike], Tensor], Dict[Type[PConvLike], Tensor]]]:
"""Returns a closure that :
Initialise each PConvLike class with the provided config,
set their weights and biases to be equal, and run each of them onto the
input(s) images/masks. Then saves the output in a dict that match the class to
the output. Returns that dict.
The closure can be called with either a specific input per class, or one input
which will be shared among every class.
This method's signature is admittedly a bit unwieldy...
:param pconvs: the list of PConvLike classes to run
:param config: the ConvConfig to use for those classes
:return: The returned closure takes either two tensors, or two dict of tensors
where keys are the corresponding PConv classes which to call it on
"""
def inner(imgs: Union[Dict[Type[PConvLike], Tensor], Tensor],
masks: Union[Dict[Type[PConvLike], Tensor], Tensor]) -> \
Tuple[
Dict[Type[PConvLike], Tensor], Dict[Type[PConvLike], Tensor]]:
if not isinstance(imgs, dict):
imgs = {clazz: imgs for clazz in pconvs}
if not isinstance(masks, dict):
masks = {clazz: masks for clazz in pconvs}
outputs_imgs = dict()
outputs_masks = dict()
w = None
b = None
for clazz in pconvs:
# noinspection PyArgumentList
pconv = clazz(**config.dict).to(cls.device)
if config.bias:
if b is None:
b = pconv.get_bias()
else:
pconv.set_bias(b.clone())
if w is None:
w = pconv.get_weight()
else:
pconv.set_weight(w.clone())
out_img, out_mask = pconv(imgs[clazz].clone(), masks[clazz].clone())
outputs_imgs[clazz] = out_img
outputs_masks[clazz] = out_mask
return outputs_imgs, outputs_masks
return inner
@classmethod
def channelwise_allclose(cls, x):
close = True
for channel1, channel2 in itertools.combinations(x.transpose(0, 1), 2):
close &= cls.allclose(channel1, channel2)
return close
@classmethod
def channelwise_almost_eq(cls, x):
close = True
for channel1, channel2 in itertools.combinations(x.transpose(0, 1), 2):
close &= cls.almost_eq(channel1, channel2)
return close
@staticmethod
def almost_eq(x, y):
return torch.allclose(x, y, rtol=0, atol=2e-3)
@staticmethod
def allclose(x, y):
return torch.allclose(x, y, rtol=1e-5, atol=1e-8)
@staticmethod
def format_bytes(size):
# 2**10 = 1024
power = 2 ** 10
n = 0
power_labels = {0: '', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}
while abs(size) > power:
size /= power
n += 1
suffix = power_labels[n] + 'iB'
return f"{size:.2f} {suffix}"
if __name__ == "__main__":
unittest.main()
| DesignStripe/torch_pconv | tests/test_pconv.py | test_pconv.py | py | 17,005 | python | en | code | 4 | github-code | 90 |
39735665543 | import sys
input = sys.stdin.readline
n, m = map(int, input().split())
n_score = list(map(int, input().split()))
max_score = 0
max_person = 100000
for _ in range(m):
test = list(map(str, input().split()))
score = 0
test[0] = int(test[0])
for j,k in enumerate(test[1:]):
if k == 'O':
score += n_score[j]
if score > max_score:
max_score = score
max_person = test[0]
elif score == max_score:
if max_person > test[0]:
max_person = test[0]
print(max_person,max_score)
| lyong4432/BOJ.practice | #15702.py | #15702.py | py | 548 | python | en | code | 0 | github-code | 90 |
73332712938 | # goorm / 기타 / 피타고라스 문제
# https://level.goorm.io/exam/43279/%ED%94%BC%ED%83%80%EA%B3%A0%EB%9D%BC%EC%8A%A4-%EB%AC%B8%EC%A0%9C/quiz/1
def find():
for c in range(1, 1000):
for a in range(1, 1000-c):
b = 1000 - a - c
if a**2 + b**2 == c**2:
print(a*b*c)
return
find() | devwithpug/Algorithm_Study | python/goorm/기타/goorm_43279.py | goorm_43279.py | py | 363 | python | en | code | 0 | github-code | 90 |
28560555508 | """
Кобзарь О.С. Хабибуллин Р.А.
Модуль для построения графиков через plotly
"""
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from plotly.offline import plot, iplot
import re
def create_plotly_trace(data_x, data_y, namexy, chosen_mode='lines', use_gl = True, swap_xy = False):
"""
Создание одного trace по данным
:param data_x: данные для оси x
:param data_y: данные для оси y
:param namexy: название для trace
:param chosen_mode: настройка отображения 'lines', 'markers'
:return: один trace
"""
if swap_xy:
data_x, data_y = data_y, data_x
hovertemplate = namexy + ": %{x}<extra></extra>"
else:
hovertemplate = namexy + ": %{y}<extra></extra>"
if use_gl == True:
one_trace = go.Scattergl(
x=data_x,
y=data_y,
name=namexy,
mode=chosen_mode,
hovertemplate=hovertemplate
)
else:
one_trace = go.Scatter(
x=data_x,
y=data_y,
name=namexy,
mode=chosen_mode,
hovertemplate=hovertemplate
)
return one_trace
def plot_func(data, plot_title_str, filename_str, reversed_y=False, iplot_option=False, x_name=None, y_name=None,
annotation = None):
"""
Итоговая функция для построения графиков
:param reversed_y:
:param data: созданный список из trace
:param plot_title_str: название графика
:param filename_str: названия html файлика
:return: None
"""
if reversed_y:
layout = dict(title=plot_title_str, yaxis=dict(autorange='reversed'), hovermode='x')
else:
layout = dict(title=plot_title_str)
if annotation != None:
layout['annotations'] = [
dict(
x=annotation['x'],
y=annotation['y'],
xref="x",
yref="y",
text=annotation['text'],
showarrow=True,
font=dict(
family="Courier New, monospace",
size=17,
color="#ffffff"
),
bordercolor="#c7c7c7",
borderwidth=2,
borderpad=4,
arrowsize=10
,
bgcolor="#0e0700",
opacity=0.8
)]
if x_name != None:
layout['xaxis_title'] = x_name
if y_name != None:
layout['yaxis_title'] = y_name
fig = dict(data=data, layout=layout)
if iplot_option:
iplot(fig, filename=filename_str)
else:
plot(fig, filename=filename_str)
def plot_subplots(data_traces, filename_str, two_equal_subplots=False, auto_open = True):
"""
Построение нескольких графиков
:param data_traces: подготовленный список trace
:param filename_str: имя файла .html
:param two_equal_subplots: если True график будет разделен на 2 одинаковых друг по другом, если False - все в колонку
:return: None
"""
if two_equal_subplots:
items_in_one_subplot = int(len(data_traces))
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.02)
for i in range(items_in_one_subplot):
fig.append_trace(data_traces[i], row=1, col=1)
fig.append_trace(data_traces[i], row=2, col=1)
else:
fig = make_subplots(rows=len(data_traces), cols=1, shared_xaxes=True, vertical_spacing=0.02)
for i in range(len(data_traces)):
fig.append_trace(data_traces[i], row=i + 1, col=1)
fig.layout.hovermode = 'x'
plot(fig, filename=filename_str, auto_open=auto_open)
def create_traces_list_for_all_columms(data_frame, chosen_mode='lines', use_gl=True, swap_xy=False, traces_names=None):
"""
Создание списка из trace для данного DataFrame для передачи их в data и последующего строительства графика.
:param data_frame: подготовленный Pandas DataFrame с названиями колонок и обработанным индексом
:param chosen_mode: выбор отображения 'lines', 'markers' и т.п.
:return: trace_list для data
"""
trace_list = []
columns_name_list = data_frame.columns
if traces_names != None and len(traces_names) == len(columns_name_list):
for i, j in zip(columns_name_list, traces_names):
column_name = i
this_series = data_frame[column_name].dropna()
this_trace = create_plotly_trace(this_series.index, this_series, j, chosen_mode, use_gl, swap_xy)
trace_list.append(this_trace)
else:
for i in columns_name_list:
column_name = i
this_series = data_frame[column_name].dropna()
this_trace = create_plotly_trace(this_series.index, this_series, column_name, chosen_mode, use_gl, swap_xy)
trace_list.append(this_trace)
return trace_list
def connect_traces(traces1, trace2):
"""
Создание единого списка trace из двух. Удобно при построении графиков из разных DataFrame
:param traces1: первый список с trace
:param trace2: второй список с trace
:return: объединенный вариант
"""
connected_traces = []
for i in traces1:
connected_traces.append(i)
for j in trace2:
connected_traces.append(j)
return connected_traces
def find_by_patterns(patterns, list_to_search):
res = [x for x in list_to_search if re.search(patterns[0], x)]
if len(patterns) >1:
for i in patterns[1:]:
res = [x for x in res if re.search(i, x)]
return res
def plot_specific_columns(result_df, columns_to_plot=None, swap_xy=True, reversed_y=True, iplot_option=True,
plot_name='this_plot', x_name=None, y_name=None, traces_names=None, annotation=None):
"""
Функция для быстрого построения графиков, только для определенных колонок DataFrame
:param result_df:
:param columns_to_plot:
:param swap_xy:
:param reversed_y:
:param iplot_option:
:param plot_name:
:return:
"""
if columns_to_plot == None:
columns_to_plot = result_df.columns
result_df_to_plot = result_df[columns_to_plot]
all_traces = create_traces_list_for_all_columms(result_df_to_plot, 'lines+markers', swap_xy=swap_xy, traces_names=traces_names)
plot_func(all_traces, plot_name, f'{plot_name}.html', reversed_y=reversed_y, iplot_option= iplot_option, x_name=x_name, y_name=y_name,
annotation=annotation)
def filtr_by_antipatterns(init_list: list, antipatterns: list, print_all: bool = True):
"""
Фильтрация списка параметров по антипаттернам, удаления нежелательных элементов типа string
:param print_all: опция - выводить все удаленные совпадения по антипаттерну
:param init_list:
:param antipatterns:
:return:
"""
new_list = init_list.copy()
droped_values = []
for j in antipatterns:
new_list = [i for i in new_list if j not in i ]
for i in init_list:
if i not in new_list:
droped_values.append(i)
if print_all:
print(f"Удаленные совпадения по антипаттерну: {droped_values}")
return new_list
def create_columns_to_plot(result_df, group_patterns, antipatterns=[], print_all=False):
if type(group_patterns[0]) == str:
columns_to_plot = find_by_patterns(group_patterns, result_df.columns)
else:
columns_to_plot = []
for i in group_patterns:
this_column_to_plot = find_by_patterns(i, result_df.columns)
columns_to_plot += this_column_to_plot
if print_all:
print(f"Найденные совпадения: {columns_to_plot}")
if len(antipatterns) > 0:
columns_to_plot = filtr_by_antipatterns(columns_to_plot, antipatterns, print_all=print_all)
return columns_to_plot
def plot_by_patterns(result_df, group_patterns, antipatterns=[],
swap_xy=True, reversed_y=True, iplot_option=True, plot_name='this_plot', print_all=True, x_name=None, y_name=None, traces_names = None, annotation = None):
"""
Функция для построения графиков с учетом групп паттернов (в каждой группе должны выполняться все условия)
и антипаттернов для выбора колонок для отображения
:param print_all: опция - выводить все найденные совпадения и удаленные антипаттерны
:param result_df:
:param group_patterns:
:param antipatterns:
:return:
"""
columns_to_plot = create_columns_to_plot(result_df, group_patterns, antipatterns, print_all)
plot_specific_columns(result_df, columns_to_plot, swap_xy=swap_xy, reversed_y=reversed_y,
iplot_option=iplot_option, plot_name=plot_name, x_name=x_name, y_name=y_name, traces_names=traces_names,
annotation=annotation)
def create_banches_from_pattern(df, banches_with_patterns: dict):
banches = []
for i,j in banches_with_patterns.items():
columns_to_plot = create_columns_to_plot(df, j[0], j[1], print_all=False)
one_banch = {i: columns_to_plot}
banches.append(one_banch)
return banches
def create_report_html(df, all_banches, filename, shared_xaxes=True,
shared_yaxes=False,
cols=1, one_plot_height=450,
verical_spacing=0.01, title_text='Распределение параметров',
swap_xy=False, reversed_y=False):
"""
Создание шаблонизированного и удобного набора графиков
:param df:
:param all_banches:
:param filename:
:return:
"""
subplot_amount = len(all_banches)
subplot_titles = []
for z in all_banches:
subplot_titles.append(list(z.keys())[0])
if cols == 1:
rows = subplot_amount
else:
rows = subplot_amount // cols
if subplot_amount % cols != 0:
rows += 1
fig = make_subplots(
rows=rows, cols=cols, shared_xaxes=shared_xaxes,
shared_yaxes=shared_yaxes,
vertical_spacing=verical_spacing,
subplot_titles=subplot_titles
)
for i in range(subplot_amount):
this_df = df[all_banches[i][subplot_titles[i]]]
this_banch_trace = create_traces_list_for_all_columms(this_df, chosen_mode='lines+markers', use_gl=True,
swap_xy=swap_xy)
for j in this_banch_trace:
if cols == 1:
this_row = i+1
this_col = 1
else:
this_row = i // cols + 1
this_col = i % cols + 1
fig.add_trace(j, row=this_row, col=this_col)
fig.layout.hovermode = 'x'
fig.layout.height = one_plot_height * rows
fig.update_layout(
title_text=title_text)
if reversed_y:
fig.update_yaxes(autorange="reversed")
plot(fig, filename=filename)
| unifloc/unifloc_py | uniflocpy/uTools/plotly_workflow.py | plotly_workflow.py | py | 11,948 | python | ru | code | 13 | github-code | 90 |
21985372364 | '''
Given a sorted array of integers A(0 based index) of size N, find the starting and ending position of a given integar B in array A.
Your algorithm’s runtime complexity must be in the order of O(log n).
Return an array of size 2, such that first element = starting position of B in A and second element = ending position of B in A, if B is not found in A return [-1, -1].
Input Format
The first argument given is the integer array A.
The second argument given is the integer B.
Output Format
Return an array of size 2, such that first element = starting position of B in A and second element = ending position of B in A, if B is not found in A return [-1, -1].
Constraints
1 <= N <= 10^6
1 <= A[i], B <= 10^9
For Example
Input 1:
A = [5, 7, 7, 8, 8, 10]
B = 8
Output 1:
[3, 4]
Explanation 1:
First occurence of 8 in A is at index 3
Second occurence of 8 in A is at index 4
ans = [3, 4]
Input 2:
A = [5, 17, 100, 111]
B = 3
Output 2:
[-1, -1]
'''
'''
Easy enough!
'''
def binarySearch(A, B, n):
low = 0
high = n-1
while(low < high):
mid = low + high
mid = int(mid/2)
if(A[mid] == B):
return mid
if(B > A[mid]):
low = mid + 1
else:
high = mid - 1
return low
def solve(A, B):
result = []
n = len(A)
# All Equal
if(A[0] == A[n-1] and A[0] == B):
return [0, n-1]
if(n == 1):
if(A[0] == B):
return [0, 0]
else:
return [-1, -1]
indexValue = binarySearch(A, B, n)
if(A[indexValue] != B):
return [-1, -1]
else:
temp = indexValue
start = -1
end = -1
while( temp < n and A[temp] == B):
temp += 1
end = temp - 1
temp = indexValue - 1
while(temp >= 0 and A[temp] == B):
temp -=1
start = temp + 1
result = [start, end]
return result
A = [5, 17, 100, 111]
B = 3
result = solve(A, B)
print("Result: ")
print(result) | prashik856/cpp | InterviewBit/BinarySearch/2.SimpleBinarySearch/5.SearchForARange.py | 5.SearchForARange.py | py | 2,051 | python | en | code | 0 | github-code | 90 |
22553685973 | from matplotlib import pyplot as plt
import csv
import math
import numpy as np
#maks to 153.832040129247
#min to 43.2528741577922
def addVectors(a,b):
x = a[0] + b[0]
y = a[1] + b[1]
z = a[2] + b[2]
return [x,y,z]
def float2rgb(height,maksimum,min):
blue=0.0
green = 1.0 - (height-min)/(maksimum-min)
red = (height-min)/(maksimum-min)
return [red,green,blue]
def rgb2hsv(rgb):
#r, g, b = r/255.0, g/255.0, b/255.0
r = rgb[0]
g = rgb[1]
b = rgb[2]
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g-b)/df) + 360) % 360
elif mx == g:
h = (60 * ((b-r)/df) + 120) % 360
elif mx == b:
h = (60 * ((r-g)/df) + 240) % 360
if mx == 0:
s = 0
else:
s = df/mx
v = mx
return [h, s, v]
def hsv2rgb(hsv):
h = float(hsv[0])
s = float(hsv[1])
v = float(hsv[2])
h60 = h / 60.0
h60f = math.floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0: r, g, b = v, t, p
elif hi == 1: r, g, b = q, v, p
elif hi == 2: r, g, b = p, v, t
elif hi == 3: r, g, b = p, q, v
elif hi == 4: r, g, b = t, p, v
elif hi == 5: r, g, b = v, p, q
return [r, g, b]
def rgb2hsv2rgb(rgbArr,kosinus):
HSVarr = rgb2hsv(rgbArr)
if kosinus>0.0:
HSVarr[2] = kos * 4.5
HSVarr[1] = 1.0 - 1.2*kos
else:
HSVarr[2] = abs(kos)
HSVarr[1] = 1.0 - abs(kos)
RGBarr2 = hsv2rgb(HSVarr)
return RGBarr2
def cosinus(sunVec,pointVec):
skalarny = sunVec[0]*pointVec[0]+sunVec[1]*pointVec[1]+sunVec[2]*pointVec[2]
sunVecLen = math.sqrt(sunVec[0]**2+sunVec[1]**2+sunVec[2]**2)
pointVecLen = math.sqrt(pointVec[0]**2+pointVec[1]**2+pointVec[2]**2)
kosinus = skalarny/(sunVecLen*pointVecLen)
return kosinus
def sun2pixelVec(sunVec,pixelVec):
# x = pixelVec[0]-sunVec[0]
# y = pixelVec[1]-sunVec[1]
# z = pixelVec[2]-sunVec[2]
x = sunVec[0]-pixelVec[0]
y = sunVec[1]-pixelVec[1]
z = sunVec[2]-pixelVec[2]
vecLen = math.sqrt(x**2+y**2+z**2)
x = x/vecLen
y = y/vecLen
z = z/vecLen
return [x,y,z]
def normal(a,b):
ax = a[0]
ay = a[1]
az = a[2]
bx = b[0]
by = b[1]
bz = b[2]
# wspolrzedne wektora normalnego
x = ay*bz - az*by
y = az*bx - ax*bz
z = ax*by - ay*bx
# normalizacja wektora normalnego
# normalVecLen = math.sqrt(x**2+y**2+z**2)
# x = x/normalVecLen
# y = y / normalVecLen
# z = z / normalVecLen
return [x,y,z]
with open ('big.dem','r') as csvfile:
dane = []
plots = csv.reader(csvfile, delimiter=' ')
row_num = 0
tablica_wektorow = []
y = 0 #w ktorym wierszu aktualnie jestem
for wiersz in plots:
x = 0 #w ktorym elemencie w wierszu jestem
wiersz_wektorow = []
if row_num>0:
wiersz.pop(500)
kolorki = [float2rgb(float(i),153.832040129247,43.2528741577922) for i in wiersz]
for element in wiersz:
wiersz_wektorow.append([x*7537/100,y*7537/100,float(element)]) # x y z(height)
x = x + 1
dane.append(kolorki)
tablica_wektorow.append(wiersz_wektorow)
y = y + 1
row_num = row_num + 1
tablica_normalnych = []
wiersz_normalnych = []
for y in range(500):
wiersz_normalnych = []
for x in range(500):
if y==0 or x==0 or x==499 or y==499:
wiersz_normalnych.append([1.0,1.0,1.0])
else:
#aktualnie liczony punkt
actualVec = tablica_wektorow[y][x]
#lewo i gora (gora x lewo )
nearbyVec1 = tablica_wektorow[y][x - 1] #lewo
nearbyVec2 = tablica_wektorow[y - 1][x] #gora
a1 = [nearbyVec1[0] - actualVec[0],nearbyVec1[1] - actualVec[1],nearbyVec1[2] - actualVec[2]] #lewo
b1 = [nearbyVec2[0] - actualVec[0],nearbyVec2[1] - actualVec[1],nearbyVec2[2] - actualVec[2]] #gora
#prawo i w dol ( dol x prawo)
nearbyVec3 = tablica_wektorow[y + 1][x] #dol
nearbyVec4 = tablica_wektorow[y][x + 1] #prawo
a2 = [nearbyVec3[0] - actualVec[0],nearbyVec3[1] - actualVec[1],nearbyVec3[2] - actualVec[2]] #dol
b2 = [nearbyVec4[0] - actualVec[0],nearbyVec4[1] - actualVec[1],nearbyVec4[2] - actualVec[2]] #prawo
#wyliczenie normalnych i ich wypadkowej
normal1 = normal(a1,b1)
normal2 = normal(b2,a2)
normalna = addVectors(normal1,normal2) #wypadkowa normalnych
#normalizacja
normalnaLen = math.sqrt(normalna[0] ** 2 + normalna[1] ** 2 + normalna[2] ** 2)
normalna[0] = normalna[0] / normalnaLen
normalna[1] = normalna[1] / normalnaLen
normalna[2] = normalna[2] / normalnaLen
wiersz_normalnych.append(normalna)
tablica_normalnych.append(wiersz_normalnych)
x = 0
y = 0
tablica_koncowa = []
wektor_slonca = [-40000.0,15000.0,10000.0]
for y in range(500):
wiersz_koncowy = []
for x in range(500):
rgArr = dane[y][x]
normalny = tablica_normalnych[y][x]
kos = cosinus(sun2pixelVec(wektor_slonca,tablica_wektorow[y][x]),normalny)
wiersz_koncowy.append(rgb2hsv2rgb(rgArr,kos))
tablica_koncowa.append(wiersz_koncowy)
plt.tick_params(top=True, right=True, direction='in')
plt.imshow(tablica_koncowa)
plt.show()
| KarolCee/Elevation-Map-Shader | map.py | map.py | py | 5,667 | python | pl | code | 0 | github-code | 90 |
18141895139 | #!usr/bin/env python3
import sys
def main():
r, c = [int(row_col) for row_col in sys.stdin.readline().split()]
sheet = [
[int(row_num) for row_num in sys.stdin.readline().split()]
for row in range(r)
]
sheet.append([0 for col in range(c)])
for row in range(len(sheet)-1):
for col in range(len(sheet[0])):
sheet[-1][col] += sheet[row][col]
for row in sheet:
row.append(sum(row))
print(*row)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02413/s257358553.py | s257358553.py | py | 535 | python | en | code | 0 | github-code | 90 |
34882545079 |
def find_vowel(word):
vowels = "aieou"
for i, letter in enumerate(word):
for vowel in vowels:
if letter in vowels:
return i
def capitalize(word, flag):
if flag:
return word[0].upper() + word[1:]
return word
def igpay(sentence):
words = sentence.split()
pigged_words = []
punctuation = "'\".,:;?!"
for i, word in enumerate(words):
word_punctuation = ""
upper_flag = False
if word[0] == word[0].upper():
upper_flag = True
word = word.lower()
for char in word:
if char in punctuation:
word_punctuation = char
word = word.replace(char, "")
first_vowel_index = find_vowel(word)
if first_vowel_index == None:
pigged_words.append(capitalize(word, upper_flag) + "ay")
else:
word_slice = word[0:first_vowel_index]
if word_slice != "":
word = capitalize(word[first_vowel_index:len(word)], upper_flag) + word_slice + "ay" + word_punctuation
pigged_words.append(word)
else:
word = capitalize(word, upper_flag) + "way" + word_punctuation
pigged_words.append(word)
return " ".join(pigged_words)
def main():
print(igpay('Synthesis has a lot of leading consonants. Rhythm has no vowels? By. '))
if __name__ == "__main__":
main()
| ilikepegasi/CSCI1133 | labs/lab08/pigLatin.py | pigLatin.py | py | 1,439 | python | en | code | 0 | github-code | 90 |
73323047657 | #For more information about this, watch this video: https://www.youtube.com/watch?v=2hfoX51f6sg
import math
import os
from svg.path import * #Thank you for using complex numbers as points
from p5 import *
def save_frame(filename,char="#"): #Sorta make a copy of saveFrame() since p5 doesn't have one
global num_frames
try:
num_frames
except NameError:
num_frames=1
name,ext=os.path.splitext(filename)
num_char=name.count(char)
frame=str(num_frames)
name=list(name)
iter_char=0
sub=1
if len(frame)>=num_char:
sub=0
for i in range(len(name)):
if name[i]==char:
if iter_char<num_char-len(frame):
name[i]="0"
else:
name[i]=frame[iter_char-len(frame)-sub]
iter_char+=1
name=''.join(name)
filename=name+ext
num_frames+=1
pyglet.image.get_buffer_manager().get_color_buffer().save(filename)
def integrate(func,start,end,dx=0.01):
i=start
area=0
while i<=end:
area+=func(i)*dx
i+=dx
return area
def get_coeffs(p,start,end): #Get the Fourier coefficients along with their index with a Path object as p
coeffs=[]
i=start
while i<=end:
c=(1/(2*math.pi))*integrate(lambda x:p.point(x/(2*math.pi))*Epicycle.Cycle(-i)(x),0,2*math.pi)
coeffs.append((i,c))
i+=1
return coeffs
class Epicycle: #I made this before I was drawing the circles, but I left it in to simplify the code a bit
class Cycle:
def __init__(self,speed,rad=1): #Note that rad can be a complex number
self.rad=rad
self.speed=speed
def __call__(self,x):
return self.rad*math.e**(1j*self.speed*x)
def __init__(self,cyc): #cyc must be a list/tuple of lists and/or tuples that have the format of (speed,radius)
self.cycles=[]
for i in range(len(cyc)):
if type(cyc[i])!=tuple and type(cyc[i])!=list:
raise TypeError("Input must be a list or tuple of tuples and/or lists")
f=cyc[i]
self.cycles.append(Epicycle.Cycle(f[0],f[1]))
def __call__(self,x):
total=0
for f in self.cycles:
total+=f(x)
return total
def path_from_file(filename): #Won't get whole .svg file, just the first path it sees
with open(filename) as f:
shape=f.read()
shape=shape.split("<g")[1].split("<path")[1].split(' d="')[1].split('"')[0]
shape=parse_path(shape)
return shape
def translate_path(p,trans): #Move all the points in a Path over by some complex number trans
trans_p=Path()
for s in p:
if type(s)==Line:
trans_p.append(Line(s.start+trans,s.end+trans))
elif type(s)==CubicBezier:
trans_p.append(CubicBezier(s.start+trans,s.control1+trans,s.control2+trans,s.end+trans))
elif type(s)==QuadraticBezier:
trans_p.append(QuadraticBezier(s.start+trans,s.control+trans,s.end+trans))
elif type(s)==Arc:
trans_p.append(Arc(s.start+trans,s.radius,s.rotation,s.arc,s.sweep,s.end+trans))
trans_p.closed=p.closed
return trans_p
if not os.path.exists("frames"): #If a "frames" folder doesn't exist, make it
os.mkdir("frames")
shape=path_from_file("test.svg")
print("Getting average coordinate...")
avg_coord=integrate(lambda x:shape.point(x),0,1) #Use the fact that the average point of a function f on the interval [a,b] is (1/(b-a))*integral(f,a,b)
print("Done")
shape=translate_path(shape,-avg_coord) #Move all the points in shape so that the center is the average coordinate
print("Getting coefficients...")
coeffs=get_coeffs(shape,-50,50)
print("Done")
cycles=[(coeffs[x][0],coeffs[x][1]) for x in range(len(coeffs))] #Package the coefficients into an input for an Epicycle
cycles.sort(key=lambda x:1/abs(x[1])) #Sort the cycles from largest to smallest radius to look better
epi=Epicycle(cycles)
t=0
points=[]
def setup():
size(600,600)
def draw():
global t,points
if t>2*math.pi: #Stop drawing once the interval is over
return
background(0)
translate(width/2,height/2)
before=0
c=before
for cyc in epi.cycles: #Basically do what Epicycle.__call__ does but draw ellipses for a cool visual
stroke(255)
no_fill()
ellipse((c.real,c.imag),abs(cyc.rad*2),abs(cyc.rad*2))
c=before+cyc(t)
before=c
points.append(c)
stroke(255,0,128)
for i in range(1,len(points)): #Draw lines between all the previous points; obviously takes longer the more time that has passed
now=points[i]
old=points[i-1]
line((now.real,now.imag),(old.real,old.imag))
t+=.01
save_frame("frames/###.png") #Because I only half-implemented saveFrame(), the first two frames don't have anything in them
run()
| friedkeenan/Epicycles | Epicycles.py | Epicycles.py | py | 4,954 | python | en | code | 11 | github-code | 90 |
18552593579 | n = int(input())
a = [0]
a.extend(list(map(int,input().split())))
a.append(0)
cost = []
for i in range(n):
cost.append(abs(a[i+1]-a[i]))
cost.append(abs(a[-2]))
s_cost = sum(cost)
for i in range(n):
print(s_cost - cost[i]- cost[i+1] + abs(a[i+2]-a[i])) | Aasthaengg/IBMdataset | Python_codes/p03401/s151409513.py | s151409513.py | py | 259 | python | en | code | 0 | github-code | 90 |
41218302157 | # coding=utf-8
# 网页图片爬取
import urllib.request
import urllib
import re
def gethtml(url):
page = urllib.request.urlopen(url)
html1 = page.read()
return html1
def getimage(site):
reg = 'src="(.+?\.jpg)" alt='
imglist = re.findall(reg, site)
print(len(imglist))
x = 0
for imgurl in imglist:
urllib.request.urlretrieve(imgurl, '%s.jpg' % x)
x += 1
def getswf():
i = 1
while i < 10:
urllib.request.urlretrieve("http://tbm.alicdn.com/YlI1t0Q14T5TG33lNgp/mBLaXnwXWpm7pWJgsSo@@ld-0000"
+ str(i) + ".ts", str(i) + '.ts')
i += 1
if __name__ == "__main__":
# html = gethtml('http://pic.yxdown.com/list/0_0_1.html')
# print(html.decode('UTF-8'))
# #
# print(getimage(html.decode('UTF-8')))
getswf()
| crystal0913/AI | crawler/crawler.py | crawler.py | py | 833 | python | en | code | 1 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.