seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
33385097559 | import threading
import uuid
import json
import webbrowser
import requests
from api_clients import thangs_login_client
from time import sleep
from login_token_cache import set_token, get_bearer_json_file_location, get_api_token
class ThangsLoginService:
__GRANT_CHECK_INTERVAL_SECONDS = 0.5 # 500 milliseconds
__MAX_ATTEMPTS = 600 # 5 minutes worth
def __init__(self):
self.__login_client = thangs_login_client.ThangsLoginClient()
def login_user(self, cancellation_event: threading.Event = None) -> None:
if get_api_token:
set_token(None)
challenge_id = uuid.uuid4()
webbrowser.open(self.__login_client.get_browser_authenticate_url(challenge_id))
attempts = 0
while attempts < ThangsLoginService.__MAX_ATTEMPTS and not (cancellation_event and cancellation_event.is_set()):
try:
sleep(self.__GRANT_CHECK_INTERVAL_SECONDS)
response = self.__login_client.check_access_grant(challenge_id, attempts)
if not response:
attempts += 1
continue
print("Successful Login")
bearer_token = response['TOKEN']
bearer = {
'bearer': bearer_token,
}
with open(get_bearer_json_file_location(), 'w') as json_file:
json.dump(bearer, json_file)
set_token(response['TOKEN'])
return
except requests.HTTPError as e:
if e.response.status_code == 401:
print("Unsuccessful Login, 401 returned")
return
raise
print("Unsuccessful Login, max attempts exceeded")
| physna/thangs-blender-addon | services/thangs_login_service.py | thangs_login_service.py | py | 1,762 | python | en | code | 30 | github-code | 90 |
31085359910 | import numpy as np
import h5py
import os
from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
import torch
from utils import *
from model_convtrans import *
torch.cuda.empty_cache()
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
from torch import sigmoid, log, sub, neg, mul, add
devices = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cuda = torch.cuda.is_available()
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
model = torch.load('output/m_conv_ls_3_ff_400_dm_320_N_2_a_1e-06_b_100_init')
#model.load_state_dict(torch.load('output/m_conv_ls_3_ff_400_dm_320_N_2_a_1e-06_b_100_init'))
train_loader, valid_loader, test_loader, class_weights = load_data(100,
is_onehot=True,
is_shuffle=True,
get_class_weights=False,
data_dir='dataset/')
all_score = []
all_y = []
for batch_x, batch_y in test_loader:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
score = model.predict(batch_x)
all_score.append(score.data.cpu())
all_y.append(batch_y.cpu())
yhat = np.concatenate(all_score)
y_test = np.concatenate(all_y)
auc_s = []
for i in range(57):
if not np.count_nonzero(y_test[:,i]) == 0:
tmp = roc_auc_score(y_test[:, i], yhat[:, i])
#auc_s.append(round(tmp, 4))
auc_s.append(tmp)
prauc = []
avgpr = []
for i in range(57):
if not np.count_nonzero(y_test[:,i]) == 0:
precision, recall, thresholds = precision_recall_curve(y_test[:, i], yhat[:, i])
tmp1 = auc(recall, precision)
#avgpr.append(average_precision_score(y_test[:, i], yhat[:, i]))
prauc.append(tmp1)
prauc_m = np.mean(prauc)
auc_m = np.mean(auc_s)
np.save('results/roc.npy', auc_s)
np.save('results/pr.npy', prauc)
print(auc_m)
print(prauc_m)
| RunzeSu/DeepCAT | results.py | results.py | py | 2,189 | python | en | code | 0 | github-code | 90 |
71866708777 | Import('env', 'lib')
env = env.Clone(
LIBS=['corrective', lib.name],
LIBPATH=['..', '.'],
RPATH=lib.dir.abspath
)
File([
'Candidates.h',
'Complex.h',
'Conjunction.h',
'Disjunction.h',
'Foci.h',
'Outcome.h',
'Predicate.h',
'Predicates.h',
'RunSet.h',
'RunSuite.h',
'allFailures.h',
'main.h',
'zoom.h',
])
env.StaticLibrary('corrective', [
'Complex.cc',
'Candidates.cc',
'Conjunction.cc',
'Disjunction.cc',
'Outcome.cc',
'Predicate.cc',
'RunSet.cc',
'RunSuite.cc',
'allFailures.cc',
'main.cc',
'zoom.cc',
])
for variant in ['exact-complete', 'approximate-complete', 'adaptive-sampled']:
Default(env.Program('%s.cc' % variant))
| liblit/cbiexp | src/corrective-ranking/SConscript | SConscript | 775 | python | en | code | 5 | github-code | 90 | |
18259353989 | import collections
import sys
S=collections.deque(input())
Q=int(input())
r=0
for _ in range(Q):
fs=next(sys.stdin).split()
T=int(fs[0])
if T==1:
r^=1
else:
F,C=int(fs[1])-1,fs[2]
if F==r:
S.appendleft(C)
else:
S.append(C)
ans=''.join(reversed(S) if r else S)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02756/s798998770.py | s798998770.py | py | 346 | python | en | code | 0 | github-code | 90 |
18314593139 | import sys
from collections import defaultdict
#+++++
def main():
n, k = map(int, input().split())
aal = list(map(int, input().split()))
sss = [0]*(n+1)
si=0
for i, a in enumerate(aal):
si+=a-1
sss[i+1]=si%k
count=0
mkl_dict=defaultdict(lambda :0)
for j in range(n+1):
sj=sss[j]
if j-k >= 0:
mkl_dict[sss[j-k]]-=1
count += mkl_dict[sj]
mkl_dict[sj] += 1
print(count)
#+++++
isTest=False
def pa(v):
if isTest:
print(v)
if __name__ == "__main__":
if sys.platform =='ios':
sys.stdin=open('inputFile.txt')
isTest=True
else:
pass
#input = sys.stdin.readline
ret = main()
if ret is not None:
print(ret) | Aasthaengg/IBMdataset | Python_codes/p02851/s277692939.py | s277692939.py | py | 659 | python | en | code | 0 | github-code | 90 |
24178575167 | import logging
import tensorflow as tf
import sys
from graphs.builder import create_models, load_models
from statistical.ae_losses import expected_loglikelihood_with_lower_bound
from utils.reporting.logging import log_message
def create_graph(name, variables_params, restore=None):
variables_names = [variables['name'] for variables in variables_params] # ['inference', 'generative']
variables = create_variables(variables_params=variables_params, model_name=name, restore=restore)
def get_variables():
return dict(zip(variables_names, variables))
return get_variables
def create_losses():
return {'x_logits': cross_entropy}
def create_trans_losses(input_fn=None, output_fn=None):
def cross_entropy_fn(x_true, x_logits):
if input_fn:
x_true = input_fn(x_true)
if input_fn:
x_logits = output_fn(x_true)
return cross_entropy_fn(x_true, x_logits)
return {'x_logits': cross_entropy_fn}
def cross_entropy(inputs, x_logits):
reconstruction_loss = expected_loglikelihood_with_lower_bound(x_true=inputs, x_logits=x_logits)
Px_xreconst = tf.reduce_mean(-reconstruction_loss)
return -Px_xreconst
def create_variables(variables_params, model_name, restore=None):
variables_names = [variables['name'] for variables in variables_params]
variables = None
if restore:
try:
variables = load_models(restore, [model_name + '_' + var for var in variables_names])
except Exception as e:
print(str(e))
print()
log_message('Faild to restore old models !', logging.ERROR)
variables = variables or create_models(variables_params)
return variables
def encode_fn(**kwargs):
model = kwargs['model']
if 'inference' in kwargs['inference_inputs']:
inputs = kwargs['inference_inputs']['inference']
else:
inputs = kwargs['inference_inputs']
z = model('inference', [inputs])
return {
'z_latents': z
}
def decode_fn(model, latents, output_shape, apply_sigmoid=False):
x_logits = model('generative', [latents['generative_inputs']])
if apply_sigmoid:
probs = tf.sigmoid(x_logits)
return tf.reshape(tensor=probs, shape=[-1] + [*output_shape], name='x_probablities')
return tf.reshape(tensor=x_logits, shape=[-1] + [*output_shape], name='x_logits')
def generate_sample(model, input_shape, latents_shape, epsilon=None):
if epsilon is None:
epsilon = tf.random.normal(shape=latents_shape)
generated = decode_fn(model=model, latents={'generative_inputs': epsilon}, output_shape=input_shape, apply_sigmoid=True)
return generated | kkahloots/Generative_Models | graphs/basics/AE_graph.py | AE_graph.py | py | 2,758 | python | en | code | 1 | github-code | 90 |
27613968784 | import turtle
screen = turtle.Screen()
screen.setup(500,500)
screen.tracer(0)
screen.addshape("heart 02.gif") # register the image with the screen as a shape
don = turtle.Turtle()
screen1=turtle.clone()
screen2=turtle.clone()
screen3=turtle.clone()
don.speed(200)
turtle.bgcolor("black")
don.shape("heart 02.gif")
screen1.shape("heart 02.gif")# now set the turtle's shape to it
screen2.shape("heart 02.gif")
screen3.shape("heart 02.gif")
don.penup()
don.goto(0,180)
screen1.penup()
screen1.goto(0,0)
screen2.penup()
screen2.goto(270,-180)
screen3.penup()
screen3.goto(360,-0)
while True :
screen.update()
don.right(1.11)
don.forward(1.11)
screen1.left(1.11)
screen1.backward(1.11)
screen2.left(1.11)
screen2.backward(1.11)
screen3.right(1.11)
screen3.forward(1.11)
don.right(10) | Farhan-Malik/python-turtle-moving-heart | main.py | main.py | py | 826 | python | en | code | 0 | github-code | 90 |
7733001929 | from cmath import log
from pycardano import *
import json
import sys
from dataclasses import dataclass, field
from typing import Dict, List
t = True
f = False
dev = f
class Methods:
def l(self, l, d, s): # pretty and informative logging
if not dev:
return
dtype = type(d)
try:
da = json.dumps(d, indent=2, sort_keys=t)
except:
da = d
if s:
print(f"\n\n\n{'─' * 25}\n{l}: {dtype}\n{'─' * 15}\n")
if type(da) == list or type(da) == set:
for xi, x in enumerate(da):
print(f"{xi}:", x, "\n")
else:
print(da)
def calc_fee(self, tx):
return len(str(tx).encode("utf-8"))
def e(self, m):
print(m)
sys.exit(m)
m = Methods()
try:
example = []
args = sys.argv[1:] or example
if len(args) <= 2:
m.e("Arguments have no data.")
if len(args) > 2:
secret = args[0]
data = args[1]
bf = args[2]
jsonsecret = json.loads(secret)
jsondata = json.loads(data)
if not "payment" in jsonsecret or not "stake" in jsonsecret:
m.e("Unable to load keys")
pkey = jsonsecret["payment"]["signing"]["cborHex"]
vkey = jsonsecret["payment"]["verification"]["cborHex"]
network = Network.MAINNET
context = BlockFrostChainContext(bf, network)
if context.api.health().is_healthy != True:
m.e("Problem connecting with BlockFrost.")
sk = PaymentSigningKey.from_cbor(pkey)
vk = PaymentVerificationKey.from_signing_key(sk)
return_address = Address.from_primitive(jsondata["address"])
utxos_from_bf = context.utxos(str(return_address))
state = {
"minada": 2000000,
"minfee": int(jsondata["txfee"]) or 300000,
"sendall": f, # TODO put option on front end
"submit": jsondata["submit"],
"required": {"lovelace": int(jsondata["txfee"]) or 300000}, # default with minfee
"total_in": {},
"return": {},
"tokens": {},
"tx_inputs": [],
"tx_outputs": [],
}
s = state
d = jsondata
# iterate through tx output requests
# Iterate through available utxos, set inputs, set total_in
for utxo in utxos_from_bf:
m.l("utxo", utxo, f)
tx_id = str(utxo.input.transaction_id)
coin = utxo.output.amount.coin
index = utxo.input.index
# Set input
s["tx_inputs"].append(TransactionInput.from_primitive([tx_id, index]))
# set total_in
if not "lovelace" in s["total_in"]:
s["total_in"]["lovelace"] = coin
else:
s["total_in"]["lovelace"] += coin
if len(utxo.output.amount.multi_asset.keys()) > 0:
for token_key, token in utxo.output.amount.multi_asset.items():
name = list(token.keys())[0].to_primitive().hex()
amount = token[list(token.keys())[0]]
policy_id = token_key.to_primitive().hex()
s["tokens"][policy_id] = {"name": bytes.fromhex(name)}
if not policy_id in s["total_in"]:
s["total_in"][policy_id] = amount
else:
s["total_in"][policy_id] += amount
m.l("tokens", s["tokens"], f)
for output in d["outputs"]:
lovelace = 0
multi = {}
# Set required and prep tx output
for token in output["tokens"]:
token_policy = token["unit"][0:56]
m.l("output token", token, f)
# set required
if not token_policy in s["required"]:
s["required"][token_policy] = int(token["quantity"])
else:
s["required"][token_policy] += int(token["quantity"])
# prepare output
if token["unit"] == "lovelace":
lovelace = int(token["quantity"])
else:
multi[bytes.fromhex(token_policy)] = {
s["tokens"][token_policy]["name"]: int(token["quantity"])
}
# Create tx output
s["tx_outputs"].append(
TransactionOutput(
Address.decode(output["address"]),
Value.from_primitive(
[lovelace, multi] if len(multi.keys()) > 0 else [lovelace]
),
)
)
# Find difference of total_in from required
for token, qty in s["total_in"].items():
if token in s["required"]:
s["return"][token] = qty - s["required"][token]
else:
s["return"][token] = qty
# Create return output from difference
return_multi = {}
return_lovelace = 0
for token, qty in s["return"].items():
if token == "lovelace":
return_lovelace += qty
else:
return_multi[bytes.fromhex(token)] = {s["tokens"][token]["name"]: qty}
if return_lovelace > s["minada"]:
s["tx_outputs"].append(
TransactionOutput(
return_address,
Value.from_primitive(
[return_lovelace, return_multi]
if len(return_multi.keys()) > 0
else [return_lovelace]
),
)
)
for log in ["required", "total_in", "tx_inputs", "tx_outputs", "tokens"]:
m.l(log, s[log], f)
# Metadata
if "metadata" in jsondata:
auxiliary_data = AuxiliaryData(
AlonzoMetadata(metadata=Metadata({721: jsondata["metadata"]}))
)
# Create Raw Tx
# Subtract minfee
# total_fee_subtracted = 0
# for output in s['tx_outputs']:
# m.l('tx_output', output, t)
# if output.amount.coin - s['minfee'] >= s['minada'] - total_fee_subtracted:
# output.amount.coin -= s['minfee']
# break
# else:
# total_fee_subtracted += output.amount.coin - s['minada']
# output.amount.coin = s['minada']
# if total_fee_subtracted >= s['minfee']:
# output.amount.coin += total_fee_subtracted - s['minfee']
# break
# m.l('total_fee_subtracted', total_fee_subtracted, t)
# m.l('tx_outputs', s['tx_outputs'], t)
tx_body = TransactionBody(
inputs=s["tx_inputs"], outputs=s["tx_outputs"], fee=s["minfee"]
)
# Sign tx and get deterministic tx ID
signature = sk.sign(tx_body.hash())
vk_witnesses = [VerificationKeyWitness(vk, signature)]
signed_tx = Transaction(tx_body, TransactionWitnessSet(vkey_witnesses=vk_witnesses))
tx_id = str(signed_tx.id)
m.l('tokens', s['tokens'], t)
m.l('inputs', s['tx_inputs'], t)
m.l('outputs', s['tx_outputs'], t)
# Submit or return signed_tx
if s["submit"] == "true":
context.submit_tx(signed_tx.to_cbor())
print(tx_id)
else:
# print(builder._fee)
if not dev:
print(json.dumps([tx_id, signed_tx.to_cbor()]))
# todo return proper errors
# test metadata and check tx fee cost increase
# add minting to this script
except:
error = sys.exc_info()[1]
print(error)
m.e([sys.exc_info()[0].__name__, error])
| 34r7h/cardano-python-js | python/tx.py | tx.py | py | 7,227 | python | en | code | 0 | github-code | 90 |
34130574570 | """Open directory of current file in explorer
Usage:
1. Store in $sublime/Packages/User
2. Map to hotkey (e.g. F11)
3. Press hotkey to open explorer
Tested under Windows 7, 8 and Ubuntu 12.01.
"""
import os
import platform
import subprocess
import sublime_plugin
mapping = {'Windows': 'explorer',
'Darwin': 'start',
'Linux': 'nautilus'}
explorer = mapping.get(platform.system())
class OpenInExplorerCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command('save')
source_file = self.view.file_name()
source_dir = os.path.dirname(source_file)
subprocess.Popen([explorer, source_dir])
| mottosso/sublime | plugins/open_in_explorer.py | open_in_explorer.py | py | 685 | python | en | code | 0 | github-code | 90 |
18233965769 | def to_fizzbuzz(number):
if number % 15 == 0:
return 'FizzBuzz'
if number % 3 == 0:
return 'Fizz'
if number % 5 == 0:
return 'Buzz'
else:
return str(number)
# return i
def main():
N = int(input())
# this list concludes "FizzBuzz", "Fizz" or "Buzz"
fblist = []
for number in range(1, 10**6):
result = to_fizzbuzz(number)
fblist.append(result)
# the list up to N
n_list = fblist[0:N]
# this list contains only numbers and up to N
n_numlist = []
for s in n_list:
if s.isdigit() == True:
n_numlist.append(int(s))
print(sum(n_numlist))
main() | Aasthaengg/IBMdataset | Python_codes/p02712/s328907691.py | s328907691.py | py | 679 | python | en | code | 0 | github-code | 90 |
32275633958 | from random import randint
from time import sleep
from os import system
def Upup(Up):
if Up == 'X':
return 0
elif Up == 'K':
return 6
elif Up == 'Q':
return 5
elif Up == 'J':
return 4
elif Up == '10':
return 3
elif Up == '9':
return 2
elif Up == '8':
return 1
def Downdown(Down):
if Down == 0:
return 0
elif Down == 1:
return 6
elif Down == 2:
return 5
elif Down == 3:
return 4
elif Down == 4:
return 3
elif Down == 5:
return 2
elif Down == 6:
return 1
def endChoice():
correctInput = False
while correctInput == False:
answer = input ('Do you wish to continue? (y/n) ')
if answer == 'n' or answer == 'No' or answer == 'no' or answer == 'N':
playing = False
correctInput = True
gameMode = 'over'
elif answer == 'y' or answer == 'Yes' or answer == 'yes' or answer == 'Y':
gameMode = 'toTheEnd'
playing = True
correctInput = True
else:
print ('Incorrect input')
return (playing, gameMode)
#Takes remaining cards and creates a hand with numebr of cards in hand given to it by main program
def MakeHand(Cards, cardsPerHand):
from random import randint
Hand = []
for i in range (cardsPerHand):
num = randint(0,(len(Cards)-1))
Hand.append(Cards[num])
del Cards[num]
return(Hand)
def listPrint(lista):
for i in range (0, (len(lista))):
if i == (len(lista)-1):
print (lista[i])
else:
print (lista[i], end = ', ')
#Takes the number of players and works out how many cards each hand gets
def FindCardsPerHand():
print ('How many people would you like to play against?')
good = False
while good == False:
Players = int(input('2, 3, 4, 5 or 6? '))
if Players == 2:
return 17, 3
good = True
if Players == 3:
return 13, 4
good = True
if Players == 4:
return 10, 5
good == True
if Players == 5:
return 8, 6
good == True
if Players == 6:
return 7, 7
good = True
#Lays card given by program and deletes card from hand
def LayCard(card, hand, Diamonds, Clubs, Hearts, Spades):
#Is it a seven?
if card[1] == '7':
if card[0] == 'D':
Diamonds.append(card)
elif card[0] == 'C':
Clubs.append(card)
elif card[0] == 'H':
Hearts.append(card)
elif card[0] == 'S':
Spades.append(card)
#Adding at start if it is less than seven
if card[1] == 'J' or card[1] == 'Q' or card[1] == 'K':
if card[0] == 'D':
Diamonds.append(card)
elif card[0] == 'C':
Clubs.append(card)
elif card[0] == 'H':
Hearts.append(card)
elif card[0] == 'S':
Spades.append(card)
elif int(card[1]) < 7 and int(card[-1]) != 0:
if card[0] == 'D':
Diamonds.insert(0,card)
elif card[0] == 'C':
Clubs.insert(0,card)
elif card[0] == 'H':
Hearts.insert(0,card)
elif card[0] == 'S':
Spades.insert(0,card)
#Adding at end if it is more than 7
elif int(card[1]) > 7 or (int(card[1]) == 1 and int(card[-1]) == 0):
if card[0] == 'D':
Diamonds.append(card)
elif card[0] == 'C':
Clubs.append(card)
elif card[0] == 'H':
Hearts.append(card)
elif card[0] == 'S':
Spades.append(card)
#Deleting item from hand
hand.remove(card)
return(hand, Diamonds, Clubs, Hearts, Spades)
#Calcualtes the card number above the current one layed
def CalculateAbove(Card):
num = Card[-1]
if num == '0':
return 'J'
elif num == 'J':
return 'Q'
elif num == 'Q':
return 'K'
elif num == 'K':
return 'X'
else:
return str((int(num))+1)
def FindCard(CardNeeded, CtHand):
found = False
for i in range (0,len(CtHand)):
if CardNeeded == CtHand[i]:
return True
return False
#Laying a seven if have no other options
def FindaLay7(CtHand, Diamonds, Clubs, Hearts, Spades):
seven = False
if len(Spades) == 0:
HaveCard = FindCard('S7', CtHand)
if HaveCard == True:
seven = True
LayCard('S7', CtHand, Diamonds, Clubs, Hearts, Spades)
print ('This computer lays S7')
if len(Clubs) == 0 and seven == False:
HaveCard = FindCard('C7', CtHand)
if HaveCard == True:
seven = True
LayCard('C7', CtHand, Diamonds, Clubs, Hearts, Spades)
print ('This computer lays C7')
if len(Hearts) == 0 and seven == False:
HaveCard = FindCard('H7', CtHand)
if HaveCard == True:
seven = True
LayCard('H7', CtHand, Diamonds, Clubs, Hearts, Spades)
print ('This computer lays H7')
if seven == False:
print ('This computer knocks')
#Finding Diamond options
def FindDiamondOptions(CtHand, Options, Diamonds):
LowDiamond = False
HighDiamond = False
DiamondsLowCard = Diamonds[0]
DiamondsLowNum = int(DiamondsLowCard[1]) - 1
CardNeeded = 'D' + str(DiamondsLowNum)
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
LowDiamond = True
Options += 1
DiamondsHighCard = Diamonds[-1]
DiamondsHighNum = CalculateAbove(DiamondsHighCard)
CardNeeded = 'D' + str(DiamondsHighNum)
if CardNeeded != 'DX':
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
Options += 1
HighDiamond = True
return (Options, HighDiamond, LowDiamond)
#FindingHeartOptions
def FindHeartOptions(CtHand, Options, Hearts):
LowHeart = False
HighHeart = False
HeartsLowCard = Hearts[0]
HeartsLowNum = int(HeartsLowCard[1]) - 1
CardNeeded = 'H' + str(HeartsLowNum)
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
LowHeart = True
Options += 1
HeartsHighCard = Hearts[-1]
HeartsHighNum = CalculateAbove(HeartsHighCard)
CardNeeded = 'H' + str(HeartsHighNum)
if CardNeeded != 'HX':
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
Options += 1
HighHeart = True
return(Options, HighHeart, LowHeart)
#Finding Clubs options
def FindClubOptions(CtHand, Options, Hearts):
LowClub = False
HighClub = False
ClubsLowCard = Clubs[0]
ClubsLowNum = int(ClubsLowCard[1]) - 1
CardNeeded = 'C' + str(ClubsLowNum)
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
LowClub = True
Options += 1
ClubsHighCard = Clubs[-1]
ClubsHighNum = CalculateAbove(ClubsHighCard)
CardNeeded = 'C' + str(ClubsHighNum)
if CardNeeded != 'CX':
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
Options += 1
HighClub = True
return (Options, HighClub, LowClub)
#FindingSpadeOptions
def FindSpadeOptions(CtHand, Options, Spades):
LowSpade = False
HighSpade = False
SpadesLowCard = Spades[0]
SpadesLowNum = int(SpadesLowCard[1]) - 1
CardNeeded = 'S' + str(SpadesLowNum)
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
LowSpade = True
Options += 1
SpadesHighCard = Spades[-1]
SpadesHighNum = CalculateAbove(SpadesHighCard)
CardNeeded = 'S' + str(SpadesHighNum)
if CardNeeded != 'SX':
HaveCard = FindCard(CardNeeded, CtHand)
if HaveCard == True:
Options += 1
HighSpade = True
return (Options, HighSpade, LowSpade)
def Strategy(Diamonds, Clubs, Hearts, Spades, Hand):
Options = 0
if len(Diamonds) > 0:
Options, highDiamond, lowDiamond = FindDiamondOptions (Hand, Options, Diamonds)
if highDiamond == True:
DUp = CalculateAbove(Diamonds[-1])
DUp = Upup(DUp)
else:
DUp = 0
if lowDiamond == True:
temp = Diamonds[0]
DDown = int(temp[1])-1
DDown = Downdown(DDown)
else:
DDown = 0
else:
lowDiamond = False
DUp = 0
DDown = 0
highDiamond = False
if len(Hearts) > 0:
Options, highHeart, lowHeart = FindHeartOptions(Hand, Options, Hearts)
if highHeart == True:
HUp = CalculateAbove(Hearts[-1])
HUp = Upup(HUp)
else:
HUp = 0
if lowHeart == True:
temp = Hearts[0]
HDown = int(temp[1])-1
HDown = Downdown(HDown)
else:
HDown = 0
else:
lowHeart = False
HUp = 0
HDown = 0
highHeart = False
if len(Clubs) > 0:
Options, highClub, lowClub = FindClubOptions(Hand, Options, Clubs)
if highClub == True:
CUp = CalculateAbove(Clubs[-1])
CUp = Upup(CUp)
else:
CUp = 0
if lowClub == True:
temp = Clubs[0]
CDown = int(temp[1])-1
CDown = Downdown(CDown)
else:
CDown = 0
else:
highClub = False
CUp = 0
CDown = 0
lowClub = False
if len(Spades) > 0:
Options, highSpade, lowSpade = FindSpadeOptions(Hand, Options, Spades)
if highSpade == True:
SUp = CalculateAbove(Spades[-1])
SUp = Upup(SUp)
else:
SUp = 0
if lowSpade == True:
temp = Spades[0]
SDown = int(temp[1])-1
SDown = Downdown(SDown)
else:
SDown = 0
else:
lowSpade = False
SUp = 0
SDown = 0
highSpade = False
sleep(randint(1,3))
if Options == 0:
FindaLay7(Hand, Diamonds, Clubs, Hearts, Spades)
else:
StrategyList = []
StrategyList.append(DUp)
StrategyList.append(DDown)
StrategyList.append(HUp)
StrategyList.append(HDown)
StrategyList.append(SUp)
StrategyList.append(SDown)
StrategyList.append(CUp)
StrategyList.append(CDown)
Max = 0
MaxOption = 0
for i in range (0, 8):
if StrategyList[i] > Max:
Max = StrategyList[i]
MaxOption = i
if MaxOption == 0:
temp = CalculateAbove(Diamonds[-1])
card = 'D' + temp
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
if MaxOption == 1:
temp = Diamonds[0]
below = int(temp[1]) -1
card = 'D' + str(below)
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
if MaxOption == 2:
temp = CalculateAbove(Hearts[-1])
card = 'H' + temp
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
if MaxOption == 3:
temp = Hearts[0]
below = int(temp[1])-1
card = 'H' + str(below)
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
if MaxOption == 4:
temp = CalculateAbove(Spades[-1])
card = 'S' + temp
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
if MaxOption == 5:
temp = Spades[0]
below = int(temp[1])-1
card = 'S' + str(below)
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
if MaxOption == 6:
temp = CalculateAbove(Clubs[-1])
card ='C' + temp
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
if MaxOption == 7:
temp = Clubs[0]
below = int(temp[1])-1
card = 'C' + str(below)
Hand, Diamonds, Clubs, Hearts, Spades = LayCard(card, Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed', card)
return (Hand, Diamonds, Clubs, Hearts, Spades)
# Set Up Hands
Hearts = []
Diamonds = []
Clubs = []
Spades = []
Cards = ['D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9', 'D10', 'DJ', 'DQ', 'DK', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'H10', 'HJ', 'HQ', 'HK', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'CJ', 'CQ', 'CK', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'SJ', 'SQ', 'SK']
cardsPerHand, noPlayers = FindCardsPerHand()
Ct1Hand = MakeHand(Cards, cardsPerHand)
Ct2Hand = MakeHand(Cards, cardsPerHand)
Ct3Hand = []
Ct4Hand = []
Ct5Hand = []
Ct6Hand = []
if cardsPerHand == 13 or cardsPerHand == 10 or cardsPerHand == 7:
Ct3Hand = MakeHand(Cards, cardsPerHand)
if cardsPerHand == 10:
cardsPerHand = 11
Ct4Hand = MakeHand(Cards, cardsPerHand)
if cardsPerHand == 8:
cardsPerHand = 9
Ct3Hand = MakeHand(Cards, cardsPerHand)
Ct4Hand = MakeHand(Cards, cardsPerHand)
Ct5Hand = MakeHand(Cards, cardsPerHand)
if cardsPerHand == 7:
Ct4Hand = MakeHand(Cards, cardsPerHand)
cardsPerHand = 8
Ct5Hand = MakeHand(Cards, cardsPerHand)
Ct6Hand = MakeHand(Cards, cardsPerHand)
PlayerHand = Cards
#Hands complete
#PlayerHand, Ct1,2,3,4,5,6Hand - based on number of players input by user
oneGo = 0
twoGo = 0
threeGo = 0
fourGo = 0
fiveGo = 0
sixGo = 0
D7Player = FindCard('D7', PlayerHand)
if D7Player == True:
print ('You have the seven of diamonds!')
playerPlacement = 1
oneGo = 2
twoGo = 3
if noPlayers>3:
threeGo = 4
if noPlayers>4:
fourGo = 5
if noPlayers>5:
fiveGo = 6
if noPlayers>6:
sixGo = 7
else:
#Determines when the user will take their go
playerPlacement = randint(2, noPlayers)
print ('Your go wil be number ', playerPlacement)
oneSevenD = FindCard('D7', Ct1Hand)
twoSevenD = FindCard('D7', Ct2Hand)
if oneSevenD == True:
oneGo = 1
twoGo = 2
if noPlayers>3:
threeGo = 3
if noPlayers>4:
fourGo = 4
if noPlayers>5:
fiveGo = 5
if noPlayers>6:
sixGo =6
elif twoSevenD == True:
oneGo = 2
twoGo = 1
if noPlayers>3:
threeGo = 3
if noPlayers>4:
fourGo = 4
if noPlayers>5:
fiveGo = 5
if noPlayers>6:
sixGo = 6
else:
threeSevenD = FindCard('D7', Ct3Hand)
if threeSevenD == True:
threeGo = 1
oneGo = 2
twoGo = 3
if noPlayers>4:
fourGo =4
if noPlayers>5:
fiveGo = 5
if noPlayers>6:
sixGo = 6
else:
fourSevenD = FindCard('D7', Ct4Hand)
if fourSevenD == True:
fourGo = 1
oneGo = 2
twoGo = 3
threeGo = 4
if noPlayers>5:
fiveGo = 5
if noPlayers>6:
sixGo = 6
else:
fiveSevenD = FindCard('D7', Ct5Hand)
if fiveSevenD == True:
fiveGo = 1
oneGo = 2
twoGo = 3
threeGo = 4
fourGo = 5
if noPlayers>6:
sixGo = 6
else:
sixGo = 1
oneGo = 2
twoGo = 3
threeGo = 4
fourGo = 5
fiveGo = 6
if playerPlacement == oneGo:
oneGo = noPlayers
elif playerPlacement == twoGo:
twoGo = noPlayers
elif playerPlacement == threeGo:
threeGo = noPlayers
elif playerPlacement == fourGo:
fourGo = noPlayers
elif playerPlacement == fiveGo:
fiveGo = noPlayers
elif playerPlacement == sixGo:
sixGo = noPlayers
input('Press enter to play ')
system('clear')
gameMode = 'Playing'
playing = True
count = 1
while playing == True:
for i in range (1,(noPlayers+1)):
if playing == True:
print ('Round ', count)
listPrint(Diamonds)
listPrint(Hearts)
listPrint(Clubs)
listPrint(Spades)
print ('')
print ('Your hand: ')
listPrint(PlayerHand)
print ('')
sleep(0.5)
x = True
if gameMode == 'toTheEnd':
if len(Diamonds) == 13 and len(Hearts) == 13 and len(Clubs) == 13 and len(Spades) == 13:
x = False
playing = False
if oneGo == i and len(Ct1Hand) == 0 and playing == True:
x = False
print ('Player one has no cards left')
input('Press enter ')
system('clear')
if twoGo == i and len(Ct2Hand) == 0 and playing == True:
x = False
print ('Player two has no cards left')
input('Press enter ')
system('clear')
if threeGo == i and len(Ct3Hand) == 0 and playing == True:
x = False
print ('Player three has no cards left')
input('Press enter ')
system('clear')
if fourGo == i and len(Ct4Hand) == 0 and playing == True:
x = False
print ('Player four has no cards left')
input('Press enter ')
system('clear')
if fiveGo == i and len(Ct5Hand) == 0 and playing == True:
x = False
print ('Player five has no cards left')
input('Press enter ')
system('clear')
if sixGo == i and len(Ct6Hand) == 0 and playing == True:
print ('Player six has no cards left')
input ('Press enter ')
system('clear')
x = False
if playerPlacement == i and len(PlayerHand) == 0 and playing == True:
x = False
print ('You have no cards left!')
input('Press enter ')
system('clear')
if x == True and gameMode != 'over':
sleep (0.5)
if oneGo == i:
print ('It\'s player one\'s go!')
if count == 1 and oneGo == 1:
LayCard('D7', Ct1Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed D7')
else:
Ct1Hand, Diamonds, Clubs, Hearts, Spades = Strategy(Diamonds, Clubs, Hearts, Spades, Ct1Hand)
elif threeGo == i:
print ('It\'s player three\'s go!')
if count == 1 and threeGo == 1:
LayCard('D7', Ct3Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed D7')
else:
Ct3Hand, Diamonds, Clubs, Hearts, Spades = Strategy(Diamonds, Clubs, Hearts, Spades, Ct3Hand)
elif twoGo == i:
print ('It\'s player two\'s go!')
if count == 1 and twoGo == 1:
LayCard('D7', Ct2Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed D7')
else:
Ct2Hand, Diamonds, Clubs, Hearts, Spades = Strategy(Diamonds, Clubs, Hearts, Spades, Ct2Hand)
elif fourGo == i:
print ('It\'s player four\'s go!')
if count == 1 and fourGo == 1:
LayCard('D7', Ct4Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed D7')
else:
Ct4Hand, Diamonds, Clubs, Hearts, Spades = Strategy(Diamonds, Clubs, Hearts, Spades, Ct4Hand)
elif fiveGo == i:
print ('It\'s player four\'s go!')
if count == 1 and fiveGo == 1:
LayCard('D7', Ct5Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed D7')
else:
Ct5Hand, Diamonds, Clubs, Hearts, Spades = Strategy(Diamonds, Clubs, Hearts, Spades, Ct5Hand)
elif sixGo == i:
print ('It\'s player four\'s go!')
if count == 1 and sixGo == 1:
LayCard('D7', Ct6Hand, Diamonds, Clubs, Hearts, Spades)
print ('They layed D7')
else:
Ct6Hand, Diamonds, Clubs, Hearts, Spades = Strategy(Diamonds, Clubs, Hearts, Spades, Ct6Hand)
elif playerPlacement == i:
print ('Your go!')
sleep(1)
print ('\n')
cardInHand = False
CanLay = False
sevenp = False
if count == 1 and playerPlacement == 1:
print ('You lay D7')
LayCard('D7', PlayerHand, Diamonds, Clubs, Hearts, Spades)
cardInHand = True
CanLay = True
sevenp = True
input('Press enter ')
while cardInHand == False or CanLay == False:
CanLay = False
cardInHand = False
chosenCard = input('Please enter the card you wish to lay: (type \'k\' if you cannot lay) ')
if chosenCard != 'k':
cardInHand = FindCard(chosenCard, PlayerHand)
if cardInHand == False and chosenCard != 'k':
print ('You don\'t seem to have this card - enter again')
if cardInHand == True:
if chosenCard[1] == '7':
CanLay = True
elif chosenCard[0] == 'D' and len(Diamonds)>0:
a = CalculateAbove(Diamonds[-1])
low = Diamonds[0]
b = int(low[1])-1
if a == '10':
a = '0'
if a == chosenCard[-1]:
CanLay = True
elif str(b) == str(chosenCard[1]):
CanLay = True
elif chosenCard[0] == 'C' and len(Clubs)>0:
c = CalculateAbove(Clubs[-1])
low = Clubs[0]
d = int(low[1])-1
if c == '10':
c = '0'
if c == chosenCard[-1]:
CanLay = True
elif str(d) == str(chosenCard[1]):
CanLay = True
elif chosenCard[0] == 'H' and len(Hearts)>0:
e = CalculateAbove(Hearts[-1])
low = Hearts[0]
f = int(low[1])-1
if e == '10':
e = '0'
if e == chosenCard[-1]:
CanLay = True
elif str(f) == str(chosenCard[1]):
CanLay = True
elif chosenCard[0] == 'S' and len(Spades)>0:
g = CalculateAbove(Spades[-1])
low = Spades[0]
h = int(low[1])-1
if g == '10':
g = '0'
if g == chosenCard[-1]:
CanLay = True
elif str(h) == str(chosenCard[1]):
CanLay = True
if CanLay == False and chosenCard != 'k':
print ('It doesn\'t look like you can lay that card, try again')
if chosenCard == 'k':
CanLay = True
cardInHand = True
if sevenp == False:
if chosenCard != 'k':
LayCard(chosenCard, PlayerHand, Diamonds, Clubs, Hearts, Spades)
#Game over
if len(PlayerHand) == 0 and gameMode == 'Playing':
sleep (3)
system('clear')
sleep (1)
print ('You win!')
sleep (1)
playing, gameMode = endChoice()
if gameMode == 'over':
i = noPlayers + 2
if len(Ct1Hand) == 0 and gameMode == 'Playing':
sleep(3)
system ('clear')
sleep (1)
print ('Player one wins!')
sleep (1)
playing, gameMode = endChoice()
if gameMode == 'over':
i = noPlayers + 2
if len(Ct2Hand) == 0 and gameMode == 'Playing':
sleep(3)
system('clear')
sleep(1)
print ('Player two wins!')
sleep (1)
playing, gameMode = endChoice()
if gameMode == 'over':
i = noPlayers + 2
if len(Ct3Hand) == 0 and gameMode == 'Playing' and threeGo != 0:
sleep (3)
system('clear')
sleep (1)
print ('Player three wins!')
sleep (1)
playing, gameMode = endChoice()
if gameMode == 'over':
i = noPlayers + 2
if len(Ct4Hand) == 0 and gameMode == 'Playing' and fourGo != 0:
sleep (3)
system('clear')
sleep (1)
print ('Player four wins!')
sleep (1)
playing, gameMode = endChoice()
if gameMode == 'over':
i = noPlayers + 2
if len(Ct5Hand) == 0 and gameMode == 'Playing' and fiveGo != 0:
sleep (3)
system('clear')
sleep (1)
print ('Player five wins!')
sleep (1)
playing, gameMode = endChoice()
if gameMode == 'over':
i = noPlayers + 2
if len(Ct6Hand) == 0 and gameMode == 'Playing' and sixGo != 0:
sleep (3)
system('clear')
sleep (1)
print ('Player six wins!')
sleep (1)
playing, gameMode = endChoice()
if gameMode == 'over':
i = noPlayers + 2
if playerPlacement != i:
input('Press enter ')
system ('clear')
count = count + 1
sleep (1)
system('clear')
print ('Good game!')
| Castlestar4/Sevens | theGame.py | theGame.py | py | 23,464 | python | en | code | 0 | github-code | 90 |
18106995079 | N = int(input())
A = [int(A) for A in input().split()]
cnt = 0
for i in range(N):
A_min = i
for j in range(i, N):
if A[A_min] > A[j]:
A_min = j
if i != A_min:
t = A[i]
A[i] = A[A_min]
A[A_min] = t
cnt += 1
for i in range(N):
if i == N - 1:
print(A[i])
else:
print("{}".format(A[i]), end=' ')
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p02260/s990677395.py | s990677395.py | py | 392 | python | en | code | 0 | github-code | 90 |
43548579142 | import pygame
from pygame.locals import *
import os
import sys
from RadioButton import RadioGroup
from CheckBox import CheckBox
from startMenu import Start
class Options(object):
def __init__(self, screen, infoScreen, font_op, y_offset):
self.screen = screen
self.infoScreen = infoScreen
self.font_op = font_op
self.y_offset = y_offset
self.res_opt = 0
self.soundeffectsOff = False
self.musicOff = False
self.load_images()
self.load_buttons(self.res_opt, self.soundeffectsOff, self.musicOff)
self.screen_modes = [(960, 540), (1280, 720), (1600, 900), (1920, 1080), (960, 720)]
def load_buttons(self, res_opt, soundeffectsOn, musicOn):
# RADIO BUTTON GROUP
self.resolution_radio_buttons = RadioGroup(self.screen)
self.resolution_radio_buttons.newButton(self.screen.get_width() / 2 - 80, self.screen.get_height() / 2 - 54 + self.y_offset, 5)
self.resolution_radio_buttons.newButton(self.screen.get_width() / 2 - 80, self.screen.get_height() / 2 - 34 + self.y_offset, 5)
self.resolution_radio_buttons.newButton(self.screen.get_width() / 2 - 80, self.screen.get_height() / 2 - 14 + self.y_offset, 5)
self.resolution_radio_buttons.newButton(self.screen.get_width() / 2 + 20, self.screen.get_height() / 2 - 54 + self.y_offset, 5)
#self.resolution_radio_buttons.newButton(self.screen.get_width() / 2 + 20, self.screen.get_height() / 2 - 34 + self.y_offset, 5) #4:3 not supported yet
# CHECKBOXES
self.checkbox_soundeffects = CheckBox(self.screen, self.screen.get_width() / 2 - 80, self.screen.get_height() / 2 + 58 + self.y_offset, 10)
self.checkbox_music = CheckBox(self.screen, self.screen.get_width() / 2 - 80, self.screen.get_height()/2 + 78 + self.y_offset, 10)
# FOR RELOADING BUTTONS
# set the checked resolution box
self.resolution_radio_buttons.setCurrent(res_opt)
# if mute sound effects box should be checked check them, other wise leave unchecked
if self.soundeffectsOff == True:
if self.checkbox_soundeffects.getChecked() == False:
self.checkbox_soundeffects.setChecked(self.screen.get_width() / 2 - 79, self.screen.get_height() / 2 + 59 + self.y_offset)
else:
if self.checkbox_soundeffects.getChecked() == True:
self.checkbox_soundeffects.setChecked(self.screen.get_width() / 2 - 79, self.screen.get_height() / 2 + 59 + self.y_offset)
# if mute music box should be checked check them, other wise leave unchecked
if self.musicOff == True:
if self.checkbox_music.getChecked() == False:
self.checkbox_music.setChecked(self.screen.get_width() / 2 - 79, self.screen.get_height()/2 + 79 + self.y_offset)
else:
if self.checkbox_music.getChecked() == True:
self.checkbox_music.setChecked(self.screen.get_width() / 2 - 79, self.screen.get_height()/2 + 79 + self.y_offset)
def load_images(self):
self.img_menu_bg = pygame.image.load(os.path.join("img", "menu_bg4.png")).convert()
def buttonClick(self):
mouseX, mouseY = pygame.mouse.get_pos()
if mouseX > self.screen.get_width() / 2 + 178 \
and mouseX < self.screen.get_width() / 2 + 198 \
and mouseY > self.screen.get_height() / 2 - 123 + self.y_offset and mouseY < self.screen.get_height() / 2 - 103 + self.y_offset:
return False
if self.resolution_radio_buttons.checkButton(mouseX, mouseY):
self.res_opt = self.resolution_radio_buttons.getCurrent()
if self.infoScreen.current_h == self.screen_modes[self.resolution_radio_buttons.getCurrent()][1]\
and self.infoScreen.current_w == self.screen_modes[self.resolution_radio_buttons.getCurrent()][0]:
self.screen = pygame.display.set_mode(self.screen_modes[self.resolution_radio_buttons.getCurrent()], pygame.FULLSCREEN)
else:
self.screen = pygame.display.set_mode(self.screen_modes[self.resolution_radio_buttons.getCurrent()])
self.checkbox_soundeffects.setChecked(mouseX, mouseY)
self.soundeffectsOff = self.checkbox_soundeffects.getChecked()
self.checkbox_music.setChecked(mouseX, mouseY)
self.musicOff = self.checkbox_music.getChecked()
if self.checkbox_music.getChecked() == True:
pygame.mixer.Sound(os.path.join('sound','start_menu.wav')).stop()
return True
def run(self):
options_exit = False
while not options_exit:
# background image
self.screen.blit(self.img_menu_bg, (0, 0))
self.screen.blit(pygame.transform.scale(self.img_menu_bg,(self.screen.get_width(), int(self.screen.get_height()-(2*self.y_offset)))),(0,self.y_offset))
# rectangle window w/ exit button
self.text_exit_options = self.font_op(22, "helvetica").render("X", True, (255, 0, 0))
pygame.draw.rect(self.screen, (126, 51, 58), Rect((self.screen.get_width() / 2 - 200, self.screen.get_height() / 2 - 125 + self.y_offset), (400, 250 + self.y_offset)))
pygame.draw.rect(self.screen, (255, 255, 255), Rect((self.screen.get_width() / 2 + 178, self.screen.get_height() / 2 - 123 + self.y_offset), (self.text_exit_options.get_width() + 6, 20)))
self.screen.blit(self.text_exit_options, (self.screen.get_width() / 2 + 182, self.screen.get_height() / 2 - 127 + self.y_offset))
pygame.draw.rect(self.screen, (94, 0, 9), Rect((self.screen.get_width() / 2 - 200, self.screen.get_height() / 2 - 125 + self.y_offset), (400, 250 + self.y_offset)), 2)
# header
header = self.font_op(50, "berlin").render("Game Options", True, (255, 255, 255))
self.screen.blit(header, (self.screen.get_width() / 2 - 0.5 * header.get_width(), 4+self.y_offset))
# video options
video_options = self.font_op(20, "berlin").render("Video options", True, (255, 255, 255))
self.screen.blit(video_options, (self.screen.get_width() / 2 - 0.5 * video_options.get_width(), self.screen.get_height() / 2 - 110 + self.y_offset))
screen_resolution = self.font_op(12, "berlin").render("Screen Resolution", True, (255, 255, 255))
self.screen.blit(screen_resolution, (self.screen.get_width() / 2 - 0.5 * screen_resolution.get_width(), self.screen.get_height() / 2 - 80 + self.y_offset))
self.resoultion_text_1 = self.font_op(10, "berlin").render("960x540", True, (255, 255, 255))
self.resoultion_text_2 = self.font_op(10, "berlin").render("1280x720", True, (255, 255, 255))
self.resoultion_text_3 = self.font_op(10, "berlin").render("1600x900", True, (255, 255, 255))
self.resoultion_text_4 = self.font_op(10, "berlin").render("1920x1080", True, (255, 255, 255))
self.resoultion_text_5 = self.font_op(10, "berlin").render("960x720", True, (255, 255, 255))
self.screen.blit(self.resoultion_text_1, (self.screen.get_width() / 2 - 70, self.screen.get_height() / 2 - 60 + self.y_offset))
self.screen.blit(self.resoultion_text_2, (self.screen.get_width() / 2 - 70, self.screen.get_height() / 2 - 40 + self.y_offset))
self.screen.blit(self.resoultion_text_3, (self.screen.get_width() / 2 - 70, self.screen.get_height() / 2 - 20 + self.y_offset))
self.screen.blit(self.resoultion_text_4, (self.screen.get_width() / 2 + 30, self.screen.get_height() / 2 - 60 + self.y_offset))
#self.screen.blit(self.resoultion_text_5, (self.screen.get_width() / 2 + 30, self.screen.get_height() / 2 - 40 + self.y_offset)) #4:3 not supported yet
self.resolution_radio_buttons.draw()
# audio option
audio_options = self.font_op(20, "berlin").render("Audio options", True, (255, 255, 255))
self.screen.blit(audio_options, (self.screen.get_width() / 2 - 0.5 * audio_options.get_width(), self.screen.get_height() / 2 + 30 + self.y_offset))
self.text_sound_effect_option = self.font_op(12, "berlin").render("Mute sound effects", True, (255, 255, 255))
self.text_music_option = self.font_op(12, "berlin").render("Mute music", True, (255, 255, 255))
self.screen.blit(self.text_sound_effect_option, (self.screen.get_width() / 2 - 60, self.screen.get_height()/2 + 55 + self.y_offset))
self.screen.blit(self.text_music_option, (self.screen.get_width() / 2 - 60, self.screen.get_height()/2 + 75 + self.y_offset))
self.checkbox_soundeffects.draw()
self.checkbox_music.draw()
# player option
# player_options = self.font_op(12, "berlin").render("Player options", True, (255, 255, 255))
# self.screen.blit(player_options, (self.screen.get_width() / 2 - 0.5 * player_options.get_width(), self.screen.get_height()/2 + 100 + self.y_offset))
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
options_exit = True
break
elif event.type == MOUSEBUTTONDOWN: # Perform action on click
if not self.buttonClick():
options_exit = True
break
elif event.type == QUIT:
pygame.quit()
sys.exit()
self.load_buttons(self.res_opt, self.soundeffectsOff, self.musicOff)
pygame.display.update()
return "start"
| pmcoxson/CSC450GROUP1 | optionsMenu.py | optionsMenu.py | py | 9,666 | python | en | code | null | github-code | 90 |
43005035055 | from discord.ext import commands
from discord.ext.commands.errors import CommandNotFound
class CommandsEvents(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener("on_command")
async def on_command_event(self, ctx):
self.bot.logger.info(f'{ctx.author} used command "{ctx.command}"')
@commands.Cog.listener("on_command_error")
async def on_command_error_event(self, ctx, error: commands.errors.CommandInvokeError):
if isinstance(error, CommandNotFound):
return
else:
await ctx.send("This command has errored.")
raise error.original
async def setup(bot):
await bot.add_cog(CommandsEvents(bot))
| Sly0511/MineMineNoMiBot | modules/events/commands.py | commands.py | py | 715 | python | en | code | 0 | github-code | 90 |
7808760394 | # -*- coding: utf-8 -*-
__author__ = 'Anders Mølmen Høst'
__email__ = 'anderhos@nmbu.no'
"""
From
Book: A Primer on Scientific Programming with Python
Author: Hans Petter Langtangen
Editition: 5th Edition
Year: 2016
Exercise 2.20: Explore what zero can be on a computer
"""
"""
1: Storing 1.0 as a value for eps
2: Initializing the while loop iterating as long as
the condition is not true.
3: Inside the while loop. Print a number of dots followed by the eps
4: Dividing eps by two and storing the new eps value
5: After termination of the while loop printing the
final value of eps.
"""
eps = 1.0
while 1.0 != 1.0 + eps:
print("...............", eps)
eps = eps/2.0
print("final eps:", eps)
# Python interprets 10**(-16) as zero | anderhos/Python-exercises | src/machine_zero.py | machine_zero.py | py | 749 | python | en | code | 0 | github-code | 90 |
20616259355 | import logging
import os
import sqlite3
from datetime import date, datetime
logger = logging.getLogger("xivstrategy.data_store")
class DataStore:
def __init__(self):
databaseFilepath = os.path.dirname(os.path.realpath(__file__)) + '/' + 'data/mydb'
self.db = sqlite3.connect(databaseFilepath, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
self.db.row_factory = sqlite3.Row
cursor = self.db.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS emails (id INTEGER PRIMARY KEY, created_at TIMESTAMP, open_positions INTEGER, closed_positions INTEGER)
''')
self.db.commit()
def _destroy(self):
cursor = self.db.cursor()
cursor.execute('''DROP TABLE emails''')
self.db.commit()
def add_entry(self, openPositions=None, closedPositions=None):
cursor = self.db.cursor()
now = datetime.now()
cursor.execute('''INSERT INTO emails (created_at, open_positions, closed_positions)
VALUES(?,?,?)''', (now, openPositions, closedPositions))
logger.info('Email inserted with date: %s, buys: %s, sells: %s' % (now, openPositions, closedPositions))
self.db.commit()
def fetch_entries(self, startDate=None):
cursor = self.db.cursor()
cursor.execute('''SELECT id, created_at as "[timestamp]", open_positions, closed_positions
FROM emails
WHERE emails.created_at > ?''', (startDate,))
matches = []
for row in cursor:
entry = EmailEntry(row['id'], row[1], row['open_positions'], row['closed_positions'])
matches.append(entry)
return matches
def close(self):
self.db.close()
class EmailEntry:
def __init__(self, id, createdAt, openPositions, closedPositions):
self.id = id
self.createdAt = createdAt
self.openPositions = openPositions
self.closedPositions = closedPositions
def description(self):
return "id: %s, createdAt: %s, openPositions: %s, closedPositions: %s" % (self.id, self.createdAt, self.openPositions, self.closedPositions)
| ShadoFlameX/XIV-Strategy | data_store.py | data_store.py | py | 2,193 | python | en | code | 0 | github-code | 90 |
4717377427 | n, m = map(int, input().split())
nums = [0] * (n + m + 1)
ans = []
for i in range(1, n + 1):
for j in range(1, m + 1):
nums[i + j] += 1
max_val = max(nums)
for i, v in enumerate(nums):
if v == max_val:
ans.append(str(i))
print(' '.join(ans))
| ambosing/PlayGround | Python/Problem Solving/ETC_algorithm_problem/2-5-2 polygon.py | 2-5-2 polygon.py | py | 267 | python | en | code | 0 | github-code | 90 |
69799471016 | """Generic functions for projects in google colab.
This file contains functions to load/write files in various formats.
Covered formats:
- json
- jsonl
- txt
"""
from typing import List, Dict, Any, Union
import matplotlib.pyplot as plt
import json
import os
def load_json_file(
filepath: str
) -> List[Any]:
"""Load a json into a list
*arguments*
*filepath* path to the file
"""
with open(filepath, 'r', encoding='utf8') as reader:
json_data = json.load(reader)
return json_data
def write_json_file(
filepath: str,
input_dict: Dict[str, Any],
overwrite: bool =False
) -> None:
"""Write a dictionary into a json
*arguments*
*filepath* path to save the file into
*input_dict* dictionary to be saved in the json file
*overwrite* whether to force overwriting a file.
Default is false so you don't delete an existing file.
"""
if not overwrite:
assert not os.path.exists(filepath)
with open(filepath, 'w', encoding='utf8') as writer:
json.dump(input_dict, writer, indent=4, ensure_ascii=False)
def load_jsonl_file(
filepath: str
) -> List[Dict[str, Any]]:
"""Load a json into a list
*arguments*
*filepath* path to the file
"""
data = []
with open(filepath, "r", encoding='utf8') as f:
lines = f.readlines()
for line in lines:
data.append(json.loads(line.strip()))
return data
def load_jsonl_to_generator(
filepath: str
) -> List[Dict[str, Any]]:
"""Load a json into a list
*arguments*
*filepath* path to the file
"""
with open(filepath, "r", encoding='utf8') as f:
lines = f.readlines()
for line in lines:
yield json.loads(line.strip())
def write_jsonl_file(
filepath: str,
input_list: List[Any],
mode: str ='a+',
overwrite: bool =False
) -> None:
"""Write a list into a jsonl
*arguments*
*filepath* path to save the file into
*input_list* list to be saved in the json file, must be made of json iterable objects
*overwrite* whether to force overwriting a file.
When set to False you will append the new items to an existing jsonl file (if the file already exists).
"""
if overwrite:
try:
os.remove(filepath)
except:
pass
with open(filepath, mode, encoding='utf8') as writer:
for line in input_list:
writer.write(json.dumps(line) + '\n')
def load_txt_file(
filepath: str
) -> List[str]:
"""Load a json into a list
*arguments*
*filepath* path to the file
"""
data = []
with open(filepath, 'r', encoding='utf8') as reader:
lines = reader.readlines()
for line in lines:
data.append(line)
return data
def write_txt_file(
filepath: str,
input_list: List[str],
mode: str ='a+',
overwrite: bool =False
) -> None:
"""Write a list into a txt
*arguments*
*filepath* path to save the file into
*input_list* list to be saved in the json file, must be made of strings
*overwrite* whether to force overwriting a file.
When set to False you will append the new items to an existing jsonl file (if the file already exists).
"""
if overwrite:
try:
os.remove(filepath)
except:
pass
with open(filepath, mode, encoding='utf8') as writer:
for line in input_list:
writer.write(line + '\n')
def create_line_plot(
data: List[Union[float, int]],
title: str ='',
xlabel: str ='',
ylabel: str='',
legend_label: str ='',
color: str ='blue'
) -> None:
""" Create a line plot from a list of numbers.
*arguments*
*data* the data points to be plotted
*title* the title for the plot
*xlabel* the label of the x axis
*ylabel* the label of the y axis
*legend_label* the label of the legend
*color* the line color
"""
plt.figure(figsize=(10, 6))
plt.plot(data, label=legend_label, color=color)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.grid(True)
plt.show()
| LucaDeGrandis/utils | scripts/generic_functions.py | generic_functions.py | py | 4,159 | python | en | code | 0 | github-code | 90 |
35729983925 | from pymongo import MongoClient
import datetime
client = MongoClient()
db = client.test
posts = db.posts
post = {"author": "Mike",
"text": "My first blog post!",
"tags": ["mongodb", "python", "pymongo"],
"date": datetime.datetime.now()}
return_post = posts.insert_one(post)
print(return_post.inserted_id)
print(db.list_collection_names())
| Jacob-xyb/Web_Note | 02_Web数据库/001_MongoDB/003_PyMongo/01_Tutorial/005_Inserting_a_Document.py | 005_Inserting_a_Document.py | py | 367 | python | en | code | 0 | github-code | 90 |
19838591038 | import torch
def encode(t: torch.Tensor, dim: int = 10):
"""Encode a tensor with a sinusoidal positional encoding.
Args:
t: tensor to encode
dim: dimension of the encoding
Returns:
encoded tensor
"""
# if dim % 2 != 0:
# raise ValueError("dim must be even")
t_dim = t.dim()
device = t.device
pi_fac = torch.pow(2, torch.arange(0, dim)) * torch.pi
broadcast_t = t.unsqueeze(t_dim) #.broadcast_to(-1, dim) #.repeat(1, dim)
scaled_t = broadcast_t @ pi_fac.unsqueeze(0).to(device)
sin_t = torch.sin(scaled_t)
cos_t = torch.cos(scaled_t)
stacked_t = torch.stack((sin_t, cos_t), -1)
return stacked_t.flatten(t_dim - 1, -1)
| ShreyanshDarshan/NeuralPixels | positional_encoding.py | positional_encoding.py | py | 709 | python | en | code | 0 | github-code | 90 |
22258733012 | def solution(files, loss):
answer = []
files_dict = {}
temp = {}
for i, v in enumerate(files):
files_dict[i + 1] = v
temp[i + 1] = 0
filecnt = 0
index = 1
cnt = -1
keyindex = 0
tempkeys = list(temp.keys())
while temp:
cnt = cnt + 1
filecnt = filecnt + 1
print(files_dict, temp)
if filecnt > files_dict[index]:
filecnt = 1
if index == tempkeys[-1]:
keyindex = 0
cnt = cnt % loss
for j in tempkeys:
files_dict[j] = files_dict[j] - temp[j]
temp[j] = 0
if files_dict[j] == 0:
answer.append(j)
del temp[j]
tempkeys = list(temp.keys())
if len(temp) <= 0:
break
index = tempkeys[keyindex]
else:
tempkeys = list(temp.keys())
keyindex = keyindex + 1
index = tempkeys[keyindex]
print(files_dict, temp, index)
if cnt % loss == loss - 1:
pass
else:
temp[index] = temp[index] + 1
print(answer)
return answer
# 0000 00000 00 000 0000
# 1101 10110 11 011 0110
# 1111 11110 11 111 1110
# 1111 11111 11 111 1111
print(solution([4, 5, 2, 3, 4], 3) == [3, 1, 4, 2, 5])
print(solution([3, 2, 3, 4], 2) == [1, 2, 4, 3])
print(solution([5, 5, 5, 5, 5], 31) == [1, 2, 3, 4, 5])
| JisungKim94/CodingTest | Programmers/SW인증시험.py | SW인증시험.py | py | 1,511 | python | en | code | 0 | github-code | 90 |
34776187866 | from dataclasses import dataclass
from datetime import date
from typing import Dict, Generator, List, Tuple
from or_shifty.person import Person
from or_shifty.shift import Shift
Idx = Tuple[int, int, int, int]
PersonShift = int
@dataclass(frozen=True)
class IndexEntry:
idx: Idx
person: Person
person_shift: PersonShift
day: date
day_shift: Shift
@dataclass(frozen=True)
class Indexer:
_person_indices: Dict[Person, int]
_day_indices: Dict[date, int]
_day_shift_indices: Dict[Tuple[date, Shift], int]
_index_entries: Dict[Idx, IndexEntry]
@classmethod
def build(
cls,
people: List[Person],
max_shifts_per_person: int,
shifts_per_day: Dict[date, List[Shift]],
):
_person_indices = {}
_day_indices = {}
_day_shift_indices = {}
_index_entries = {}
for person_idx, person in enumerate(people):
_person_indices[person] = person_idx
for person_shift_idx in range(max_shifts_per_person):
for day_idx, day in enumerate(sorted(shifts_per_day.keys())):
_day_indices[day] = day_idx
for shift_idx, shift in enumerate(
sorted(shifts_per_day[day], key=lambda s: s.name)
):
_day_shift_indices[(day, shift)] = shift_idx
idx = (
person_idx,
person_shift_idx,
day_idx,
shift_idx,
)
_index_entries[idx] = IndexEntry(
idx, person, person_shift_idx, day, shift,
)
return cls(
_person_indices=_person_indices,
_day_indices=_day_indices,
_day_shift_indices=_day_shift_indices,
_index_entries=_index_entries,
)
def get(self, index: Idx) -> IndexEntry:
return self._index_entries[index]
def iter(
self,
person_filter: Person = None,
person_shift_filter: PersonShift = None,
day_filter: date = None,
day_shift_filter: Shift = None,
) -> Generator[IndexEntry, None, None]:
"""Return all indices that match the given filters
This implementation is not efficient as it sorts and traverses all the indices every
time. This should be fine for normal use as the cost of solving for the rota should be
the bottleneck as the indices grow, not this.
"""
if day_shift_filter is not None:
assert (
day_filter is not None
), "day_shift_filter can only be used together with day_filter"
def _filter(idx_):
(person_idx, person_shift_idx, day_idx, day_shift_idx) = idx_
if (
person_filter is not None
and self._person_indices[person_filter] != person_idx
):
return False
if (
person_shift_filter is not None
and person_shift_filter != person_shift_idx
):
return False
if day_filter is not None:
if self._day_indices[day_filter] != day_idx:
return False
if (
day_shift_filter is not None
and self._day_shift_indices[(day_filter, day_shift_filter)]
!= day_shift_idx
):
return False
return True
for idx in sorted(self._index_entries.keys()):
if _filter(idx):
yield self._index_entries[idx]
| Dalamar42/or-shifty | or_shifty/indexer.py | indexer.py | py | 3,757 | python | en | code | 11 | github-code | 90 |
24623030501 | import os
import time
import asyncio
from typing import Dict, Mapping, Iterable, Optional, Any, Union, cast
import aioredis
import aioredis.util
import redis
from dragonchain import logger
_log = logger.get_logger()
REDIS_ENDPOINT = os.environ["REDIS_ENDPOINT"]
LRU_REDIS_ENDPOINT = os.environ["LRU_REDIS_ENDPOINT"]
REDIS_PORT = int(os.environ["REDIS_PORT"]) or 6379
redis_client = cast(redis.Redis, None)
redis_client_lru = cast(redis.Redis, None)
async_redis_client = cast(aioredis.Redis, None)
def _set_redis_client_if_necessary() -> None:
global redis_client
if redis_client is None:
redis_client = _initialize_redis(host=REDIS_ENDPOINT, port=REDIS_PORT)
def _set_redis_client_lru_if_necessary() -> None:
global redis_client_lru
if redis_client_lru is None:
redis_client_lru = _initialize_redis(host=LRU_REDIS_ENDPOINT, port=REDIS_PORT)
async def _set_redis_client_async_if_necessary() -> None:
global async_redis_client
if async_redis_client is None:
async_redis_client = await _initialize_async_redis(host=REDIS_ENDPOINT, port=REDIS_PORT)
def _decode_response(response: Any, decode: bool) -> Any:
if decode and isinstance(response, bytes):
return response.decode("utf-8")
return response
def _decode_dict_response(response: Mapping[Any, Any], decode: bool) -> Any:
if decode is True:
new_response = {}
for key, value in response.items():
if isinstance(key, bytes):
key = key.decode("utf-8")
if isinstance(value, bytes):
value = value.decode("utf-8")
new_response[key] = value
return new_response
return response
def _decode_list_response(response: Iterable[Any], decode: bool) -> Any:
if decode is True:
new_response = []
for val in response:
if isinstance(val, bytes):
val = val.decode("utf-8")
new_response.append(val)
return new_response
return response
def _decode_tuple_response(response: Iterable[Any], decode: bool) -> Any:
if decode is True:
new_list = _decode_list_response(response, True)
return tuple(new_list)
return response
def _decode_set_response(response: Iterable[Any], decode: bool) -> Any:
if decode is True:
new_response = set()
for val in response:
if isinstance(val, bytes):
val = val.decode("utf-8")
new_response.add(val)
return new_response
return response
def _initialize_redis(host: str, port: int, wait_time: int = 30) -> redis.Redis:
"""Initialize a redis, but ensure that the redis is up and connectable, otherwise throw an error
Args:
host: host of the redis to initialize a connection
port: port of the redis to initialize a connection
wait_time: number of seconds to wait with a failed connection before throwing a RuntimeException
Returns:
Redis-py (https://redis-py.readthedocs.io/en/latest/) client that is connected and available
"""
expire_time = time.time() + wait_time
_log.debug(f"Attempting to connect to redis at {host}:{port}")
client = redis.Redis(host=host, port=port)
sleep_time = 1 # Number of seconds to wait after a failure to connect before retrying
while time.time() < expire_time:
try:
if client.ping():
_log.debug(f"Successfully connected with redis at {host}:{port}")
return client # Connected to a working redis, return now
except Exception: # nosec (We want to retry for truly any exception)
pass
time.sleep(sleep_time)
raise RuntimeError(f"Unable to initialize and connect to the redis at {host}:{port}")
async def _initialize_async_redis(host: str, port: int, wait_time: int = 30) -> aioredis.Redis:
"""Initiailize an aioredis, but ensure that the redis is up and connectable, otherwise throw an error
Args:
host: host of the redis to initialize a connection
port: port of the redis to initialize a connection
wait_time: number of seconds to wait with a failed connection before throwing a RuntimeException
Returns:
aioredis (https://aioredis.readthedocs.io/en/latest/) client (with a connection pool) that is connected and available
"""
expire_time = time.time() + wait_time
_log.debug(f"Attempting to connect to redis at {host}:{port}")
sleep_time = 1 # Number of seconds to wait after a failure to connect before retrying
while time.time() < expire_time:
try:
client = await aioredis.create_redis_pool((host, port))
if await client.ping():
_log.debug(f"Successfully connected with redis at {host}:{port}")
return client # Connected to a working redis, return now
except Exception: # nosec (We want to retry for truly any exception)
pass
await asyncio.sleep(sleep_time)
raise RuntimeError(f"Unable to initialize and connect to the redis at {host}:{port}")
# ASYNC REDIS
async def z_range_by_score_async(
key: str, min_num: int, max_num: int, withscores: bool = False, offset: Optional[int] = None, count: Optional[int] = None, *, decode: bool = True
) -> list:
await _set_redis_client_async_if_necessary()
return await async_redis_client.zrangebyscore(
key, min_num, max_num, withscores=withscores, offset=offset, count=count, encoding="utf8" if decode else aioredis.util._NOTSET
)
async def srem_async(key: str, value: str) -> int:
await _set_redis_client_async_if_necessary()
return await async_redis_client.srem(key, value)
async def get_async(key: str, *, decode: bool = True) -> Optional[str]:
await _set_redis_client_async_if_necessary()
return await async_redis_client.get(key, encoding="utf8" if decode else aioredis.util._NOTSET)
async def set_async(key: str, value: str, *, expire: int = 0, pexpire: int = 0, exist: Optional[bool] = None) -> bool:
await _set_redis_client_async_if_necessary()
return await async_redis_client.set(key, value, expire=expire, pexpire=pexpire, exist=exist)
async def zadd_async(key: str, score: int, member: str, exist: Optional[bool] = None) -> int:
await _set_redis_client_async_if_necessary()
return await async_redis_client.zadd(key, score, member, exist=exist)
async def smembers_async(key: str, *, decode: bool = True) -> set:
await _set_redis_client_async_if_necessary()
return set(await async_redis_client.smembers(key, encoding="utf8" if decode else aioredis.util._NOTSET))
async def multi_exec_async() -> aioredis.commands.transaction.MultiExec:
await _set_redis_client_async_if_necessary()
return async_redis_client.multi_exec()
async def hgetall_async(key: str, *, decode: bool = True) -> dict:
await _set_redis_client_async_if_necessary()
return await async_redis_client.hgetall(key, encoding="utf8" if decode else aioredis.util._NOTSET)
async def rpush_async(key: str, value: str, *values: str) -> int:
await _set_redis_client_async_if_necessary()
return await async_redis_client.rpush(key, value, *values)
async def delete_async(key: str, *keys: str) -> int:
await _set_redis_client_async_if_necessary()
return await async_redis_client.delete(key, *keys)
async def brpop_async(key: str, *keys: str, timeout: int = 0, decode: bool = True) -> list:
await _set_redis_client_async_if_necessary()
return await async_redis_client.brpop(key, *keys, timeout=timeout, encoding="utf8" if decode else aioredis.util._NOTSET)
async def hset_async(key: str, field: str, value: str) -> Optional[int]:
await _set_redis_client_async_if_necessary()
return await async_redis_client.hset(key, field, value)
async def hdel_async(key: str, field: str, *fields: str) -> int:
await _set_redis_client_async_if_necessary()
return await async_redis_client.hdel(key, field, *fields)
# LRU REDIS
def _cache_key(key: str, service_name: str) -> str:
return f"{service_name}:{key}"
def cache_put(key: str, value: Union[str, bytes], cache_expire: Optional[int] = None, service_name: str = "storage") -> bool:
_set_redis_client_lru_if_necessary()
# ex has 'or None' here because 0 for cache expire must be set as none
return redis_client_lru.set(_cache_key(key, service_name), value, ex=(cache_expire or None)) or False
def cache_get(key: str, service_name: str = "storage") -> Optional[bytes]:
_set_redis_client_lru_if_necessary()
return redis_client_lru.get(_cache_key(key, service_name))
def cache_delete(key: str, service_name: str = "storage") -> int:
_set_redis_client_lru_if_necessary()
return redis_client_lru.delete(_cache_key(key, service_name))
def cache_flush() -> bool:
_set_redis_client_lru_if_necessary()
return redis_client_lru.flushall()
# PESISTENT REDIS
def hdel_sync(name: str, *keys: str) -> int:
_set_redis_client_if_necessary()
return redis_client.hdel(name, *keys)
def lpush_sync(name: str, *values: str) -> int:
_set_redis_client_if_necessary()
return redis_client.lpush(name, *values)
def sadd_sync(name: str, *values: str) -> int:
_set_redis_client_if_necessary()
return redis_client.sadd(name, *values)
def sismember_sync(name: str, value: str) -> bool:
_set_redis_client_if_necessary()
return redis_client.sismember(name, value)
def rpush_sync(name: str, *values: str) -> int:
_set_redis_client_if_necessary()
return redis_client.rpush(name, *values)
def delete_sync(*names: str) -> int:
_set_redis_client_if_necessary()
return redis_client.delete(*names)
def hset_sync(name: str, key: str, value: str) -> int:
_set_redis_client_if_necessary()
return redis_client.hset(name, key, value)
def brpop_sync(keys: str, timeout: int = 0, decode: bool = True) -> Optional[tuple]:
"""Perform a blocking pop against redis list(s)
Args:
keys: Can be a single key (bytes, string, int, etc), or an array of keys to wait on
timeout: Number of seconds to wait before 'timing out' and returning None. If 0, it will block indefinitely (default)
Returns:
None when no element could be popped and the timeout expired. This is only possible when timeout is not 0
A tuple with the first element being the key where the element was popped, and the second element being the value of the popped element.
"""
_set_redis_client_if_necessary()
response = redis_client.brpop(keys, timeout=timeout)
if not response:
return None
return _decode_tuple_response(response, decode)
def brpoplpush_sync(pop_key: str, push_key: str, timeout: int = 0, decode: bool = True) -> Optional[str]:
"""Perform a blocking pop against redis list(s)
Args:
pop_key: Can be a single key (bytes, string, int, etc), or an array of keys to wait on popping from
push_key: key to push currently processing items to
timeout: Number of seconds to wait before 'timing out' and returning None. If 0, it will block indefinitely (default)
Returns:
None when no element could be popped and the timeout expired. This is only possible when timeout is not 0
The element that was moved between the lists
"""
_set_redis_client_if_necessary()
response = redis_client.brpoplpush(pop_key, push_key, timeout)
if response is None:
return None
return _decode_response(response, decode)
def get_sync(name: str, decode: bool = True) -> Optional[str]:
_set_redis_client_if_necessary()
response = redis_client.get(name)
return _decode_response(response, decode)
def lindex_sync(name: str, index: int, decode: bool = True) -> Optional[str]:
_set_redis_client_if_necessary()
response = redis_client.lindex(name, index)
return _decode_response(response, decode)
def set_sync(key: str, value: str, ex: Optional[int] = None) -> bool:
_set_redis_client_if_necessary()
return redis_client.set(key, value, ex=ex) or False
def ltrim_sync(key: str, start: int, end: int) -> bool:
_set_redis_client_if_necessary()
return redis_client.ltrim(key, start, end)
def hget_sync(name: str, key: str, decode: bool = True) -> Optional[str]:
_set_redis_client_if_necessary()
response = redis_client.hget(name, key)
return _decode_response(response, decode)
def smembers_sync(name: str, decode: bool = True) -> set:
_set_redis_client_if_necessary()
response = redis_client.smembers(name)
return _decode_set_response(response, decode)
def srem_sync(name: str, *values: str) -> int:
_set_redis_client_if_necessary()
return redis_client.srem(name, *values)
def lrange_sync(name: str, start: int, end: int, decode: bool = True) -> list:
_set_redis_client_if_necessary()
response = redis_client.lrange(name, start, end)
return _decode_list_response(response, decode)
def pipeline_sync(transaction: bool = True) -> redis.client.Pipeline:
_set_redis_client_if_necessary()
return redis_client.pipeline(transaction=transaction)
def llen_sync(name: str) -> int:
_set_redis_client_if_necessary()
return redis_client.llen(name)
def rpoplpush_sync(src: str, dst: str, decode: bool = True) -> Optional[str]:
_set_redis_client_if_necessary()
response = redis_client.rpoplpush(src, dst)
return _decode_response(response, decode)
def lpop_sync(name: str, decode: bool = True) -> Optional[str]:
_set_redis_client_if_necessary()
response = redis_client.lpop(name)
return _decode_response(response, decode)
def hgetall_sync(name: str, decode: bool = True) -> dict:
_set_redis_client_if_necessary()
response = redis_client.hgetall(name)
return _decode_dict_response(response, decode)
def hexists_sync(name: str, key: str) -> bool:
_set_redis_client_if_necessary()
return redis_client.hexists(name, key)
def zadd_sync(name: str, mapping: Dict[str, int], nx: bool = False, xx: bool = False, ch: bool = False, incr: bool = False) -> int:
_set_redis_client_if_necessary()
return redis_client.zadd(name, mapping, nx=nx, xx=xx, ch=ch, incr=incr) # noqa: T484
| dragonchain/dragonchain | dragonchain/lib/database/redis.py | redis.py | py | 14,227 | python | en | code | 701 | github-code | 90 |
9587444970 | import torch
import torch.nn as nn
import numpy as np
from utils.utils import AverageMeter, ProgressMeter
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
xent = nn.CrossEntropyLoss()
def simclr_train(train_loader, model, criterion, optimizer, epoch, pos_dataloader):
"""
Train according to the scheme from SimCLR
https://arxiv.org/abs/2002.05709
"""
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(train_loader),
[losses],
prefix="Epoch: [{}]".format(epoch))
losses_cls = []
losses_simclr = []
losses_total = []
losses_pos = []
model.train()
pos_data_iter = iter(pos_dataloader)
for i, batch in enumerate(train_loader):
try:
input1, input2, pos_gt = pos_data_iter.next()
except:
pos_data_iter = iter(pos_dataloader)
input1, input2, pos_gt = pos_data_iter.next()
input1, input2 = input1.cuda(non_blocking=True), input2.cuda(non_blocking=True)
pos_gt = pos_gt.cuda(non_blocking=True)
images = batch['image']
images_augmented = batch['image_augmented']
b, c, h, w = images.size()
input_ = torch.cat([images.unsqueeze(1), images_augmented.unsqueeze(1)], dim=1)
input_ = input_.view(-1, c, h, w)
#print (input_.size())
input_ = input_.cuda(non_blocking=True)
labels = batch['target'].cuda(non_blocking=True)
#print(targets.size())
output, logits, pred_pos = model(input_, input1, input2)
loss_pos = xent(pred_pos, pos_gt)
output = output.view(b, 2, -1)
labels = labels.repeat(2)
labels = labels.cuda(non_blocking=True)
loss_cls = xent(logits, labels)
loss = criterion(output)
total_loss = loss + loss_cls + loss_pos
losses_cls.append(loss_cls.detach().cpu().numpy())
losses_simclr.append(loss.detach().cpu().numpy())
losses_total.append(total_loss.detach().cpu().numpy())
losses.update(total_loss.item())
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if i % 25 == 0:
progress.display(i)
loss_mean_simclr = np.array(losses_simclr).mean()
loss_mean_clr = np.array(losses_cls).mean()
loss_mean_total = np.array(losses_total).mean()
loss_mean_pos = np.array(losses_pos).mean()
writer.add_scalar('Loss/cls', loss_mean_clr, epoch)
writer.add_scalar('Loss/simclr', loss_mean_simclr, epoch)
writer.add_scalar('Loss/pos', loss_mean_pos, epoch)
writer.add_scalar('Loss/total', loss_mean_total, epoch)
| tianyu0207/CCD | colon_local/utils/train_utils.py | train_utils.py | py | 2,707 | python | en | code | 41 | github-code | 90 |
71832941096 | from tkinter import *
from pathlib import Path
from tkinter import Tk, Canvas, Entry, Text, Button, PhotoImage
OUTPUT_PATH = Path(__file__).parent
ASSETS_PATH = OUTPUT_PATH / Path("./assets")
def relative_to_assets(path: str) -> Path:
return ASSETS_PATH / Path(path)
window = Tk()
window.geometry("568x382")
window.configure(bg="#FFFFFF")
def imgtopdf():
window.destroy()
import i2p
def p2w():
window.destroy()
import os
# use pip install pdf2docx
stream = os.popen('pdf2docx gui')
output = stream.read()
output
def wordtopdf():
window.destroy()
import w2p
canvas = Canvas(
window,
bg="#FFFFFF",
height=382,
width=568,
bd=0,
highlightthickness=0,
relief="ridge"
)
canvas.place(x=0, y=0)
canvas.create_rectangle(
0.0,
0.0,
568.0,
382.0,
fill="#FFFFFF",
outline="")
canvas.create_text(
82.0,
140.0,
anchor="nw",
text=" Each of these buttons will take you to a different GUI for operations.",
fill="#000000",
font=("Poppins Regular", 12 * -1)
)
canvas.create_text(
83.0,
122.0,
anchor="nw",
text="Please click any of the conversion method buttons provided below.",
fill="#000000",
font=("Poppins Regular", 12 * -1)
)
canvas.create_text(
199.0,
31.0,
anchor="nw",
text="Welcome!",
fill="#000000",
font=("Sen Bold", 36 * -1)
)
canvas.create_text(
118.0,
89.0,
anchor="nw",
text="This is a converter application developed using python.",
fill="#000000",
font=("Poppins Regular", 12 * -1)
)
button_image_1 = PhotoImage(
file=relative_to_assets("button_1.png"))
button_1 = Button(
image=button_image_1,
borderwidth=0,
highlightthickness=0,
command=wordtopdf,
relief="flat"
)
button_1.place(
x=36.0,
y=200.0,
width=124.0,
height=29.0
)
button_image_2 = PhotoImage(
file=relative_to_assets("button_2.png"))
button_2 = Button(
image=button_image_2,
borderwidth=0,
highlightthickness=0,
command=window.destroy,
relief="flat"
)
button_2.place(
x=249.0,
y=306.0,
width=69.0,
height=29.0
)
button_image_3 = PhotoImage(
file=relative_to_assets("button_3.png"))
button_3 = Button(
image=button_image_3,
borderwidth=0,
highlightthickness=0,
command=p2w,
relief="flat"
)
button_3.place(
x=222.0,
y=200.0,
width=124.0,
height=29.0
)
button_image_4 = PhotoImage(
file=relative_to_assets("button_4.png"))
button_4 = Button(
image=button_image_4,
borderwidth=0,
highlightthickness=0,
command=imgtopdf,
relief="flat"
)
button_4.place(
x=408.0,
y=200.0,
width=124.0,
height=29.0
)
window.resizable(False, False)
window.mainloop()
| guneetsura/converterapppy | index.py | index.py | py | 2,781 | python | en | code | 0 | github-code | 90 |
27704172558 | import flask
import time
import socket
import subprocess
h_name = socket.gethostname()
IP_address = socket.gethostbyname(h_name)
app = flask.Flask(__name__)
@app.route('/')
def index():
Time = time.strftime("%H:%M:%S")
client_port = str(flask.request.environ.get('REMOTE_PORT'))
hostname = h_name
host_IP = IP_address
# Retrieve server uptime using the uptime command
uptime_output = subprocess.check_output(['uptime']).decode('utf-8')
uptime_info = uptime_output.split(',')[0].strip()
return (Time + " This Service is from the Host: " + hostname +
" (" + host_IP + "):" + client_port + ".\n" +
"Server Uptime: " + uptime_info + ".\n")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
| Parth-Works/Automation-with-Ansible | application2.py | application2.py | py | 764 | python | en | code | 0 | github-code | 90 |
73501447657 | import sys
input = sys.stdin.readline
n, m = map(int, input().rstrip().split())
graph = [[] for _ in range(n)]
for i in range(n) :
instr = input().rstrip()
for c in instr:
graph[i].append(int(c))
dx = [1, 0, -1, 0]
dy = [0, 1, 0 ,-1]
def dfs(n, m, graph, stack) :
while stack :
x, y = stack.pop()
graph[y][x] = 2
for i in range(4) :
nx, ny = x + dx[i], y + dy[i]
if nx < 0 or ny < 0 or nx >= m or ny >= n :
continue
if graph[ny][nx] == 0 :
stack.append((x, y))
stack.append((nx, ny))
def solution(n, m, graph) :
stack = []
result = 0
for y in range(n) :
for x in range(m) :
if (graph[y][x] == 0) :
stack.append((x, y))
result += 1
dfs(n, m, graph, stack)
return result
print(solution(n, m, graph))
for row in graph :
print(row)
| Err0rCode7/algorithm | baekjoon/bfs_dfs/음료수얼려먹기.py | 음료수얼려먹기.py | py | 792 | python | en | code | 0 | github-code | 90 |
74330171176 | import pandas as pd
def preprocess(data_file, labels_file=None):
features = ['reanalysis_specific_humidity_g_per_kg',
'reanalysis_dew_point_temp_k',
'station_avg_temp_c', 'precipitation_amt_mm', 'week_start_date']
df = pd.read_csv(data_file, index_col=[0, 1, 2])
df['station_avg_temp_c_mv_avg'] = df['station_avg_temp_c'].rolling(window=5).mean()
df['precipitation_amt_mm_mv_avg'] = df['precipitation_amt_mm'].rolling(window=5).mean()
features.append('station_avg_temp_c_mv_avg')
features.append('precipitation_amt_mm_mv_avg')
df.fillna(method='ffill', inplace=True)
df = df.fillna(df.mean())
df['week_start_date'] = pd.to_datetime(df['week_start_date'])
for i in range(1, 5):
df['quarter_' + str(i)] = df['week_start_date'].apply(lambda date: 1 if (
((i - 1) * 3 < date.to_datetime().month) and (date.to_datetime().month <= i * 3)) else 0)
features.append('quarter_' + str(i))
df = df.drop(['week_start_date'], axis=1)
features.remove('week_start_date')
df = df[features]
sj_label = None
iq_label = None
if labels_file:
labels = pd.read_csv(labels_file, index_col=[0, 1, 2]).loc[df.index]
sj_label = pd.DataFrame(labels.loc['sj'])
iq_label = pd.DataFrame(labels.loc['iq'])
sj = pd.DataFrame(df.loc['sj'])
iq = pd.DataFrame(df.loc['iq'])
return sj, iq, sj_label, iq_label
| HimashiNethinikaRodrigo/DengiAI | code/model_2_preprocess.py | model_2_preprocess.py | py | 1,444 | python | en | code | 0 | github-code | 90 |
41921443877 | import win32com.client
import datetime as dt
import tkinter as tk
from tkinter import filedialog, simpledialog, messagebox
import os
import sys
app_window = tk.Tk()
app_window.withdraw()
senders_count = 1
senders = {}
if os.path.isfile("DBYD_asset_owners.csv"):
with open("DBYD_asset_owners.csv") as f:
for l in f:
email_add, company = l.strip().split(",")
senders[email_add] = company
else:
messagebox.showerror("No Asset Owner List", "The .CSV file with the list of common DBYD asset owners could not be "
"found.\n Please ensure the DBYD_asset_owners.csv file is in the same "
"folder as this executable file.")
# ask for input DBYD search number
DBYD = simpledialog.askinteger("DBYD Request Number", "Please enter the DBYD Job No.", minvalue=0)
if not DBYD:
messagebox.showerror('No DBYD Request number', 'No DBYD request number was entered.\nScript aborting')
sys.exit()
DBYD = str(DBYD)
# ask for output folder
save_path = filedialog.askdirectory(title="Select folder to save DBYD files")
if not save_path:
messagebox.showerror('No save location', 'No folder location was entered to store DBYD plans.\nScript aborting')
sys.exit()
outlook = win32com.client.Dispatch("Outlook.Application").GetNameSpace("MAPI")
lastmonth = (dt.datetime.now() - dt.timedelta(days=28)).strftime('%d/%m/%Y %H:%M %p')
inbox = outlook.GetDefaultFolder(6)
messages = inbox.Items
messages = messages.Restrict(f"[ReceivedTime] >= '{lastmonth}'")
DBYD_msg = []
files_saved = False
asset_own_count = 1
# loop through each message, check if DBYD number in email title
j=0
for message in messages:
j += 1
if not ((DBYD in message.Subject) or (DBYD in message.Body)):
continue
attachs = message.Attachments
sender = message.SenderEmailAddress.split("@")[1]
asset_own = ""
if sender in senders:
asset_own = senders[sender]
else:
asset_own = "ASSET_OWNER_" + str(asset_own_count)
asset_own_count += 1
for i in attachs:
files_saved = True
i.SaveAsFile(save_path + "/" + asset_own + "_" + i.FileName)
DBYD_msg.append(j)
DBYD_msg.sort(reverse=True)
if messagebox.askyesno("Delete emails", "Would you like the emails deleted from the inbox"):
for k in DBYD_msg:
messages.Item(k).Delete()
if not files_saved:
messagebox.showerror('No DBYD Files Found', 'No DBYD Files found amongst the emails.\nScript Finished')
| luke-twigg/scripts | DBYD Email sorter/Code/DBYD_email_sorter.py | DBYD_email_sorter.py | py | 2,633 | python | en | code | 0 | github-code | 90 |
27365748961 | def parser(result):
result = result.split('\n')
output = []
for line in result:
line = line.split(' ')
interval =''
transfer = 0
bandwith = 0
for n in range(len(line)):
if(line[n] == 'sec'):
interval = line[n-2][2:]+line[n-1]
elif(line[n] == 'MBytes'):
transfer = float(line[n-1])
elif(line[n] == 'Mbits/sec'):
bandwith = float(line[n-1])
if transfer>0:
output.append({
'Interval' : interval,
'Transfer' : transfer,
'Bitrate': bandwith
})
return output
| Anton-G-11/QA-Testing | lab7/pars.py | pars.py | py | 731 | python | en | code | 0 | github-code | 90 |
24823725865 | import re
import time
import urllib2
from lxml import etree
from database import PrognosDB
from conditions import Locations
class CubanWeather(object):
def __init__(self):
self.weather_data = {}
self.location = Locations()
self.db = PrognosDB()
self.db.create_connection()
self.db.create_table()
self.year = time.strftime("%Y")
self.month = time.strftime("%m")
self.day = time.strftime("%d")
def proxy_authenticate(self, host, port, user, passwd):
cmd = ''.join([
'http://',
'{u}'.format(u=user),
':',
'{p}'.format(p=passwd),
'@',
'{0}:{1}'.format(host, port)])
proxy = urllib2.ProxyHandler({'http': cmd})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
urllib2.install_opener(opener)
def fetch_weather(self, location):
title = u''
values = []
conn = urllib2.urlopen(u'http://www.met.inf.cu/asp/genesis.asp?TB0=RSSFEED')
t_data = conn.read()
conn.close()
t_root = etree.fromstring(t_data)
item = t_root.findall('channel/title')
for item in t_root.xpath('/rss/channel/item'):
if item.xpath(u"./title/text()")[0] == location:
title = item.xpath(u"./title/text()")[0]
description = item.xpath("./description/text()")[0]
dataCrop = re.findall(r'<td>\W*?.*?</td>', description)
for data in dataCrop:
values.append(re.sub("<.*?>", "", data))
title = next((k for k, v in self.location.locations.items() if v == title), None)
self.weather_data['location'] = title
self.weather_data['current_month_day'] = values[0]
self.weather_data['current_day_temp'] = values[1]
self.weather_data['current_night_temp'] = values[2]
self.weather_data['current_day_weather'] = values[3]
store_data = [
(int(self.year), int(self.month), int(values[0]), title.encode('utf-8'), int(values[1]), int(values[2]), str(values[3])),
(int(self.year), int(self.month), int(values[4]), title.encode('utf-8'), int(values[5]), int(values[6]), str(values[7])),
(int(self.year), int(self.month), int(values[8]), title.encode('utf-8'), int(values[9]), int(values[10]), str(values[11])),
(int(self.year), int(self.month), int(values[12]), title.encode('utf-8'), int(values[13]), int(values[14]), str(values[15])),
(int(self.year), int(self.month), int(values[16]), title.encode('utf-8'), int(values[17]), int(values[18]), str(values[19])),
]
self.db.insert_query(store_data)
return self.weather_data
| codeshard/prognos | prognos/weather.py | weather.py | py | 2,808 | python | en | code | 14 | github-code | 90 |
19511643056 | import sys # exit()
import time # sleep()
import pygame
from ball import Ball
from paddle import Paddle
from wall import Wall
width = 640
height = 480
white_color = (255, 255, 255)
pygame.init()
def game_over():
font = pygame.font.SysFont('Arial', 72)
text = font.render('Game over :(', True, white_color)
text_rect = text.get_rect()
text_rect.center = [width / 2, height / 2]
screen.blit(text, text_rect)
pygame.display.flip()
time.sleep(3)
sys.exit()
def show_score():
font_score = pygame.font.SysFont('Consolas', 20)
text_score = font_score.render(str(score).zfill(5), True, white_color)
text_score_rect = text_score.get_rect()
text_score_rect.topleft = [32, 30]
font_title = pygame.font.SysFont('Consolas', 25)
text_title = font_title.render('SCORE', True, white_color)
text_title_rect = text_title.get_rect()
text_title_rect.topleft = [10, 10]
screen.blit(text_score, text_score_rect)
screen.blit(text_title, text_title_rect)
def show_lives():
font = pygame.font.SysFont('Consolas', 25)
text = font.render(str(lives).zfill(2), True, white_color)
text_rect = text.get_rect()
text_rect.topleft = [width - 60, 15]
heart = pygame.image.load('images/heart.png')
heart_rect = heart.get_rect()
heart_rect.topright = [width - 10, 10]
screen.blit(text, text_rect)
screen.blit(heart, heart_rect)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Breakout')
clock = pygame.time.Clock()
pygame.key.set_repeat(30)
ball = Ball(width, height)
paddle = Paddle(width, height)
wall = Wall(65, width)
score = 0
lives = 3
waiting = True
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
paddle.update(event)
if waiting and event.key == pygame.K_SPACE:
waiting = False
if ball.rect.centerx < width / 2:
ball.speed = [3, -3]
else:
ball.speed = [-3, -3]
if waiting:
ball.rect.midbottom = paddle.rect.midtop
else:
ball.update()
if pygame.sprite.collide_rect(ball, paddle):
ball.speed[1] = -ball.speed[1]
collided_list = pygame.sprite.spritecollide(ball, wall, False)
if collided_list:
brick = collided_list[0]
cx = ball.rect.centerx
if cx < brick.rect.left or cx > brick.rect.right:
ball.speed[0] = -ball.speed[0]
else:
ball.speed[1] = -ball.speed[1]
wall.remove(brick)
score += 10
if ball.rect.top > height:
lives -= 1
waiting = True
image = pygame.image.load('images/bg.png')
screen.blit(image, image.get_rect())
show_score()
show_lives()
screen.blit(ball.image, ball.rect)
screen.blit(paddle.image, paddle.rect)
wall.draw(screen)
pygame.display.flip()
if lives <= 0:
game_over()
| gertoska/breakout | game.py | game.py | py | 3,040 | python | en | code | 0 | github-code | 90 |
26045932822 | from functools import wraps
import json
def json_io(func):
@wraps(func)
def dec(self, url, payload=None):
if payload is not None and isinstance(payload, str):
payload = json.loads(payload)
return json.dumps(func(self, url, payload), indent=2)
return dec
class User(object):
def __init__(self, name, owed_by=None, owes=None, **kwargs):
self.name = name
self.records = {}
for borrower, amount in (owed_by or {}).items():
self.loan(borrower, amount)
for lender, amount in (owes or {}).items():
self.borrow(lender, amount)
def borrow(self, borrower, amount):
self.records[borrower] = self.records.get(borrower, 0) - amount
def loan(self, lender, amount):
self.records[lender] = self.records.get(lender, 0) + amount
@property
def owes(self):
return {k: -v for k, v in self.records.items() if v < 0}
@property
def owed_by(self):
return {k: v for k, v in self.records.items() if v > 0}
@property
def balance(self):
return sum(self.records.values())
@property
def __dict__(self):
return {
'name': self.name,
'owes': self.owes,
'owed_by': self.owed_by,
'balance': self.balance
}
class RestAPI(object):
def __init__(self, database=None):
self.users = {
user['name']: User(**user)
for user in (database or {}).get('users', [])
}
@json_io
def get(self, url, payload=None):
if url == '/users':
return {'users': [
user.__dict__
for name, user in sorted(self.users.items())
if payload is None or name in payload['users']
]}
@json_io
def post(self, url, payload):
if url == '/add':
user = User(payload['user'])
self.users[user.name] = user
return user.__dict__
elif url == '/iou':
lender = self.users[payload['lender']]
borrower = self.users[payload['borrower']]
amount = payload['amount']
lender.loan(borrower.name, amount)
borrower.borrow(lender.name, amount)
return {'users': sorted(
[lender.__dict__, borrower.__dict__],
key=lambda u: u['name']
)}
| JCArya/Exercism-in-Python | rest_api.py | rest_api.py | py | 2,388 | python | en | code | 1 | github-code | 90 |
6576390697 | from typing import List
'''
def fourSumCount(nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:
count = 0
for i in range(len(nums1)):
for j in range(len(nums2)):
for k in range(len(nums3)):
for l in range(len(nums4)):
if nums1[i]+ nums2[j]+nums3[k]+nums4[l] == 0:
count += 1
return count
'''
def fourSumCount(nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:
ab = {}
for x in nums1:
for y in nums2:
sum = x + y
if sum not in ab.keys():
ab[sum] = 0
ab[sum] += 1
ans = 0
for m in nums3:
for n in nums4:
sum = m + n
if (sum * -1) in ab.keys():
ans += ab[(sum * -1)]
return ans
print(fourSumCount([-1,-1], [-1,1], [-1,1], [1,-1]))
| MuskanMall/leetcode | HashMaps/4sum.py | 4sum.py | py | 913 | python | en | code | 0 | github-code | 90 |
3350830610 | # Creating an automated Parking lott
import warnings
import datetime
import math
import dateparser
warnings.filterwarnings(action="ignore")
class Two_wheeler:
def __init__(self, plate):
"""
:rtype: object
"""
self.enterTime = datetime.datetime.now()
self.plate = plate
def display(self):
print(self.plate)
class Four_wheeler:
def __init__(self, plate):
self.enterTime = datetime.datetime.now()
self.plate = plate
def display(self):
print(self.plate)
class Membership:
def __init__(self, name, Subscription_type, Subscription_Registration_date, Subscription_Expiry_date):
self.Name = name
self.Subscription_type = Subscription_type
self.Registration_date = datetime.date.today()
tdelta = datetime.timedelta(days=365)
self.Subscription_Expiry_date = self.Registration_date + tdelta
def print_bill():
if type == 1:
rs = 15 * total_hours + 5
print(f'Rate : {rs}/- ')
elif type == 2:
if service == 1 and a.lower() == 'y':
rs = (50 * total_hours - 10) + 35
print(f'Rate : {rs}/- ')
elif service == 1 and a.lower() == 'n' and c.lower() == 'y':
rs = (50 * total_hours - 10) + 35
print(f'Rate : {rs}/- ')
elif service == 1 and a.lower() == 'n' and c.lower() == 'n':
rs = (50 * total_hours - 10) + 80
print(f'Rate : {rs}/- ')
elif service == 2:
rs = (50 * total_hours - 10)
print(f'Rate : {rs}/- ')
return rs
service = ""
a = ""
b =""
c =""
def valet_parking():
global service
global a
global b
global c
print("Valet Parking is available for cars")
print('To avail the service press 1')
print('To reject the service press 2')
service = int(input())
if service == 1:
a = input("Are you a member : Y / N ")
if a.lower() == 'y':
b = input("Choose Membership type : Classic or Premium ")
if b.lower() not in ('premium', 'classic'):
print("Invalid entry")
exit()
print(f"Membership type : {b}")
print("Total Bill for Valet Parking is 35/-")
elif a.lower() == 'n':
print("Membership option available")
c = input("Enroll in membership? : Y / N \n")
if c.lower() == 'y':
name = input("Enter your name: ") #########________registration code starts here
while not name:
name = input("Enter a name: ")
choice = input('Choose the type of Membership : Classic or Premium ')
if choice.lower() not in ('premium', 'classic'):
print("Invalid entry")
exit()
cus_2 = Membership(name, choice, datetime.date.today,
datetime.date.today() + datetime.timedelta(days=365))
print(f"Name: {cus_2.Name}")
print(f"Membership Type: {choice}")
print(f"Registration date: {cus_2.Registration_date}")
print(f"Expiry date: {cus_2.Subscription_Expiry_date}\n")
#print("Total bill for Valet Parking is 35/-\n")
#print('********Thank you visit again*********')
return
elif c.lower() == 'n':
print("Total bill for Valet Parking is 80/-\n")
# print('********Thank you visit again*********')
return
else:
print("Invalid entry")
exit()
elif service == 2:
print('Try next time :)')
return
total_two_wheelers = int(input("Enter the total number of two wheelers parked- "))
total_four_wheelers = int(input("Enter the total number of four wheelers parked- "))
if total_two_wheelers == 500 or total_two_wheelers > 475:
print("Sorry for inconvinience, you cannot park your two wheeler now!\n")
exit()
if total_four_wheelers == 100 or total_four_wheelers > 90:
print("Sorry for inconvenience, you cannot park your four wheeler vehicle now!\n")
exit()
print("You are warmly welcome:)\n")
go = "yes"
total_hours = 0
total_time = 0
while go == "yes":
print("Enter the type of vehicle")
print("Press 1 for Two_wheeler")
print("Press 2 for Four_wheeler")
type = int(input("Enter the type of the vehicle to be parked-"))
if type == 1:
print("Enter the Plate Number: ")
new = input()
tw = Two_wheeler(new)
print("Time of Entry: ", str(tw.enterTime))
leave_time = dateparser.parse(input('Enter the leaving time: '), settings={'PREFER_DATES_FROM': 'future'})
print(leave_time)
leave_time_diff = leave_time - tw.enterTime
print(leave_time_diff)
print(f"time difference = {leave_time_diff} ")
total_time = leave_time - tw.enterTime
total_hours = math.ceil(total_time.total_seconds() / 3600) # here? yes where is total hours missing from
elif type == 2:
print("Enter the Plate Number")
new = input()
fw: Four_wheeler = Four_wheeler(new)
# valet
valet_parking()
print("Time of Entry: ", str(fw.enterTime))
leave_time = dateparser.parse(input('Enter the leaving time: '), settings={'PREFER_DATES_FROM': 'future'})
print(leave_time)
leave_time_diff = leave_time - tw.enterTime
print(leave_time_diff)
print(f"time difference = {leave_time_diff} ")
total_time = leave_time - fw.enterTime
total_hours = math.ceil(total_time.total_seconds() / 3600)
else:
print("Entry Restricted!")
exit()
Rs = print_bill()
print("Type yes to enter another entry, else E to exit: ")
go = input()
print("*****Displaying_Details*****\n")
if type == 1:
try:
print("Two-wheeler number plate: ", tw.plate)
print("Time of Entry: ", str(tw.enterTime))
except NameError:
print("No four-wheeler entered")
elif type == 2:
try:
print("Four-wheeler number plate: ", fw.plate)
print("Time of Entry: ", str(fw.enterTime))
print("Total bill for Valet Parking is 35/-\n")
except NameError:
print("No two-wheeler entered")
| kumar-rohan-412/Valet-Parking | Valet_Parking_Final.py | Valet_Parking_Final.py | py | 6,361 | python | en | code | 0 | github-code | 90 |
17999120540 | from kivy.app import App
from kivy.config import Config
from kivy.core.audio import SoundLoader
from widgets import Pong
from telas import TelaJogo, TelaMenu, TelaVencedor1, TelaVencedor2
from kivy.uix.screenmanager import ScreenManager
# Carrega nosso arquivo de configurações
Config.read("Tutoriais_Kivy_KivyMD/Jogo_pong/config.ini")
# Cria nosso Gerenciador de Telas
screen_manager = ScreenManager()
class PongApp(App):
def build(self):
# Carrega o áudio
sound = SoundLoader.load(
'Tutoriais_Kivy_KivyMD/Jogo_pong/audio/bg-music.mp3')
# Verifica se houve o carregamento do nosso áudio e coloca para tocar
if sound:
sound.play()
# Objeto do nosso jogo
pong = Pong(screen_manager=screen_manager)
# Cria a Tela de Jogo
tela_jogo = TelaJogo(name="jogo")
# Adiciona o Widget Pong
tela_jogo.add_widget(pong)
# Adiciona as telas ao nosso gerenciador
screen_manager.add_widget(TelaMenu(name='menu'))
screen_manager.add_widget(tela_jogo)
screen_manager.add_widget(TelaVencedor1(name='vencedor_1'))
screen_manager.add_widget(TelaVencedor2(name='vencedor_2'))
return screen_manager
if __name__ == '__main__':
PongApp().run()
| LivioAlvarenga/Tutoriais_Kivy_KivyMD | Jogo_pong/main.py | main.py | py | 1,295 | python | pt | code | 1 | github-code | 90 |
4668986942 | # Name: Huan-Yun Chen
# soundex.py template
# CSCI 4140
import sys
import nltk
import re
# Define any global helper strings at this point
# Define tuple list and mapping code to create dictionary for consonant
# transformations at this point
# function that takes token, transforms it into its new form, and returns it
# YOU WRTIE THE FUNCTION
def wordmap(token) :
token = token.lower()
#Step 1: Save first letter, remove 'h' and 'w'
firstLetter = token[:1]
sOneToken=""
for character in token[1:]:
if (not character == "h") and (not character == "w"):
sOneToken += character
sOneToken = firstLetter+ sOneToken
#Step 2: Replace consonants with digits
sTwoToken=""
numOne =['b','f','p','v']
numTwo =['c','g','j','k','q','s','x','z']
numThree =['d','t']
numFour =['l']
numFive =['m','n']
numSix =['r']
for t in sOneToken:
if t in numOne:
sTwoToken+='1'
elif t in numTwo:
sTwoToken+='2'
elif t in numThree:
sTwoToken+='3'
elif t in numFour:
sTwoToken+='4'
elif t in numFive:
sTwoToken+='5'
elif t in numSix:
sTwoToken+='6'
else:
sTwoToken+=t
#Step 3: Replace adjacent same digits with one digit
sThreeToken=""
previous=""
for character in sTwoToken:
if not previous == character:
sThreeToken += character
previous = character
#Step 4: Remove occurrences[a,e,i,o,u,y] except first letter
sFourToken=sThreeToken[:1]
removeSet=['a','e','i','o','u','y']
for character in sThreeToken[1:]:
if not character in removeSet:
sFourToken += character
#Step 5: Replace first symobal if it's digit
sFiveToken=""
numList=['1','2','3','4','5','6']
if sFourToken[:1] in numList:
sFiveToken=firstLetter.upper()
else:
sFiveToken=sFourToken[:1].upper()
sFiveToken+=sFourToken[1:]
#Step 6: Append 3 zero if result contain less than 3 digits
sSixToken=sFiveToken
if len(sFiveToken)<4:
for i in range(3):
sSixToken+='0'
result = sSixToken[:4]
return result
# Driver code for the program
# sys.argv[1] should be the name of the input file
# sys.argv[0] will be the name of this file
for line in open(sys.argv[1]).readlines():
text = nltk.word_tokenize(line.lower())
for token in text:
#print(token)
print (wordmap(token),end=' ')
print() # This prints new line at the end of processing a line
| oliver0616/my_work2 | Natural Language Processing/NLTK/Assignment/Assignment3/hw3_Chen.py | hw3_Chen.py | py | 2,282 | python | en | code | 1 | github-code | 90 |
74025857578 | import json
import os
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse, path, include
from rest_framework.test import APITestCase, URLPatternsTestCase
from apps.order_app.models import Order
from core import settings
class OrderTests(APITestCase, URLPatternsTestCase):
urlpatterns = [
path('api/user/', include('apps.user_app.urls')), # User Application
path('api/order/', include('apps.order_app.urls')), # Order Application
]
def setUp(self):
self.username = "+77013926883"
self.password = "helloworld123@"
self.first_name = "Hello"
self.last_name = "World"
self.date_of_birth = "2002-04-03"
self.token = self.receive_token()
self.files = []
filenames = [
os.path.join(settings.BASE_DIR, 'media/orders/hearts.jpg'),
os.path.join(settings.BASE_DIR, 'media/orders/soyle.png')
]
for filename in filenames:
f = open(filename, mode='rb')
fp = SimpleUploadedFile(name=filename, content=f.read(), content_type="image/jpg; image/png")
self.files.append(fp)
def api_authentication(self):
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + self.token)
def receive_token(self):
response = self.client.post(
reverse("user_register"),
{
"username": self.username,
"password": self.password, "confirm_password": self.password,
"first_name": self.first_name, "last_name": self.last_name,
"date_of_birth": self.date_of_birth
}
)
self.assertEqual(201, response.status_code)
response = self.client.post(
reverse("user_login"),
{"username": self.username, "password": self.password}
)
self.assertEqual(200, response.status_code)
return json.loads(response.content).get('access_token')
def test_create_order(self):
self.api_authentication()
response = self.client.post(
reverse("add-order"),
{"images": self.files}
)
self.assertEqual(201, response.status_code)
def test_create_no_user_order(self):
response = self.client.post(
reverse("add-order"),
{"images": self.files}
)
self.assertEqual(201, response.status_code)
def test_get_order(self):
response = self.client.post(
reverse("add-order"),
{"images": self.files}
)
self.assertEqual(201, response.status_code)
order_id = int(json.loads(response.content).get("id"))
response = self.client.get(
reverse('get-order'),
{"order_id": order_id}
)
self.assertEqual(200, response.status_code)
def test_get_order_error(self):
response = self.client.get(
reverse("get-order"),
{"order_id": 1}
)
self.assertEqual(404, response.status_code)
def test_get_orders(self):
response = self.client.get(
reverse("get-orders")
)
self.assertEqual(200, response.status_code)
self.assertEqual(len(json.loads(response.content)), Order.objects.count())
| kaidenvlr/onedev-tz | apps/order_app/tests.py | tests.py | py | 3,311 | python | en | code | 0 | github-code | 90 |
27693240474 | from basic.collection.queue_ import Queue
from basic.collection.tree import Node
def preorder_serialize(head=None):
if not head:
return '#'
return str(head.val) + ',' + str(preorder_serialize(head.left)) + \
',' + str(preorder_serialize(head.right))
def inorder_serialize(head=None, results=[]):
pass
serialize_method = {}
serialize_method['preorder'] = preorder_serialize
def serialize(head, kind='preorder'):
return serialize_method[kind](head)
def deserialize(sequence, kind='preorder'):
list = sequence.split(',')
return deserialize_method[kind](list)
def preorder_deserialize(list):
if len(list) <= 0:
return None
root = None
val = list.pop(0)
if val != '#':
root = Node(int(val))
root.left = preorder_deserialize(list)
root.right = preorder_deserialize(list)
return root
deserialize_method = {}
deserialize_method['preorder'] = preorder_deserialize
if __name__ == '__main__':
"""
0
1 2
None 4 5 None
pre-order: 0 1 3 4 2 5 6
in-order: 3 1 4 0 5 2 6
post-order:3 4 1 5 6 2 0
"""
nodes = [Node(i) for i in range(7)]
head = nodes[0]
nodes[0].left = nodes[1]
nodes[0].right = nodes[2]
nodes[1].left = None
nodes[1].right = nodes[4]
nodes[2].left = nodes[5]
nodes[2].right = None
sequence = serialize(head, kind='preorder')
print('Level traversal: ', sequence)
k = deserialize(sequence, kind='preorder')
sequence = serialize(k, kind='preorder')
print('Level traversal: ', sequence)
| xyzacademic/LeetCode | basic/BinaryTree/preorder_serialize.py | preorder_serialize.py | py | 1,619 | python | en | code | 0 | github-code | 90 |
22242138316 | import json
import time
from classes.functions import Functions
class Trudy:
def __init__(self, neo):
self.neo = neo
self.functions = Functions()
def Trudy(self, username):
self.functions.createTaskData('Trudy', username)
if time.time() - float(self.functions.lastRun('Trudy', username)) >= 86400:
resp = self.neo.get('trudys_surprise.phtml?delevent=yes', 'https://www.jellyneo.net/?go=dailies')
if self.functions.contains(resp.text, '&slt=1'):
result = self.functions.getBetween(resp.text, 'phtml?id=', '" name="')
resp = self.neo.get('trudydaily/slotgame.phtml?id=%s' % result, resp.url)
results = self.functions.getBetween(resp.text, '\'key\': \'', '\'};')
resp = self.neo.post('trudydaily/ajax/claimprize.php', {'action': 'getslotstate', 'key': results}, 'http://www.neopets.com/trudydaily/slotgame.phtml?id=%s' % result)
resp = self.neo.post('trudydaily/ajax/claimprize.php', {'action': 'beginroll'}, resp.url)
self.neo.post('trudydaily/ajax/claimprize.php', {'action': 'prizeclaimed'}, resp.url)
self.functions.log('Trudy\'s Surprise: Done')
self.functions.updateLastRun('Trudy', username)
else:
self.functions.log('Trudy\'s Surprise: Already done today')
| MajinClraik/Multi-Tool | classes/trudy.py | trudy.py | py | 1,384 | python | en | code | 0 | github-code | 90 |
32658800096 | # -*— coding:utf-8 -*-
import asyncio
import json
import aioamqp
from anquant.tasks import LoopTask, SingleTask
from anquant.utils import logger
from anquant.utils.locker import async_method_locker
class Event:
def __init__(self, name=None, exchange=None, queue=None, routing_key=None, pre_fetch_count=1, data=None):
self._name = name
self._exchange = exchange
self._queue = queue
self._routing_key = routing_key
self._pre_fetch_count = pre_fetch_count
self._data = data
self._callback = None
@property
def name(self):
return self._name
@property
def exchange(self):
return self._exchange
@property
def queue(self):
return self._queue
@property
def routing_key(self):
return self._routing_key
@property
def prefetch_count(self):
return self._pre_fetch_count
@property
def data(self):
return self._data
def dumps(self):
d = {
"n": self.name,
"d": self.data
}
return json.dumps(d)
def loads(self, b):
d = json.loads(b)
self._name = d.get("n")
self._data = d.get("d")
return d
def parse(self):
raise NotImplemented
def subscribe(self, callback, multi=False):
from anquant import quant
self._callback = callback
SingleTask.run(quant.event_engine.subscribe, self, self.callback, multi)
def publish(self):
from anquant import quant
SingleTask.run(quant.event_engine.publish, self)
async def callback(self, exchange, routing_key, body):
self._exchange = exchange
self._routing_key = routing_key
self.loads(body)
await self._callback(self.parse())
def __str__(self):
info = "EVENT: name={n}, exchange={e}, queue={q}, routing_key={r}, data={d}".format(
e=self.exchange, q=self.queue, r=self.routing_key, n=self.name, d=self.data)
return info
def __repr__(self):
return str(self)
class EventEngine:
def __init__(self, rabbitmq_config):
self._host = rabbitmq_config.get("host", "127.0.0.1")
self._port = rabbitmq_config.get("port", 5672)
self._username = rabbitmq_config.get("username", "guest")
self._password = rabbitmq_config.get("password", "guest")
self._protocol = None
self._channel = None
self._connected = False
self._subscribers = []
self._event_handler = {}
LoopTask.register(self._check_connect, 10)
asyncio.get_event_loop().run_until_complete(self.connect())
async def _check_connect(self, *args, **kwargs):
"""
check connect
:param interval:
:return:
"""
if self._connected and self._channel and self._channel.is_open:
logger.debug("check server connection OK", caller=self)
return
logger.error("connection lose! start reconnect ...", caller=self)
self._connected = False
self._protocol = None
self._channel = None
self._event_handler = {}
SingleTask.run(self.connect, reconnect=True)
async def _check_subscriber(self, *args, **kwargs):
pass
async def connect(self, reconnect=False):
"""
build tcp connection
:param reconnect:
:return:
"""
logger.info("host:", self._host, "port:", self._port, caller=self)
if self._connected:
return
# connect
try:
transport, protocol = await aioamqp.connect(host=self._host, port=self._port,
login=self._username, password=self._password)
except Exception as e:
logger.error("connect error:", e, caller=self)
return
finally:
if self._connected:
return
channel = await protocol.channel()
self._protocol = protocol
self._channel = channel
self._connected = True
logger.info("rabbitmq initialize success!", caller=self)
# default exchange
exchanges = ["Orderbook", "Trade", "Kline", "Ticker", ]
for name in exchanges:
await self._channel.exchange_declare(exchange_name=name, type_name="topic")
logger.info("create default exchanges success!", caller=self)
if reconnect:
self._bind_consume()
else:
asyncio.get_event_loop().call_later(5, self._bind_consume)
def _bind_consume(self):
async def do_them():
for event, callback, multi in self._subscribers:
await self._initialize(event, callback, multi)
SingleTask.run(do_them)
async def _initialize(self, event: Event, callback=None, multi=False):
"""
create/bind exchange message queue
:param event:
:param callback:
:param multi:
:return:
"""
if event.queue:
await self._channel.queue_declare(queue_name=event.queue)
queue_name = event.queue
else:
result = await self._channel.queue_declare(exclusive=True)
queue_name = result["queue"]
await self._channel.queue_bind(queue_name=queue_name, exchange_name=event.exchange,
routing_key=event.routing_key)
await self._channel.basic_qos(prefetch_count=event.prefetch_count)
if callback:
if multi:
await self._channel.basic_consusme(callback=callback, queue_name=queue_name, no_ack=True)
logger.info("multi message queue:", queue_name, "callback:", callback, caller=self)
else:
await self._channel.basic_consume(self._on_consume_event_msg, queue_name=queue_name)
logger.info("queue:", queue_name, caller=self)
self._add_event_handler(event, callback)
async def _on_consume_event_msg(self, channel, body, envelope, properties):
"""
receive subscribe message
:param channel:
:param body:
:param envelope:
:param properties:
:return:
"""
logger.debug("exchange:", envelope.exchange_name, "routing_key:", envelope.routing_key,
"body:", body, caller=self)
try:
key = "{exchange}:{routing_key}".format(exchange=envelope.exchange_name, routing_key=envelope.routing_key)
funcs = self._event_handler[key]
for func in funcs:
SingleTask.run(func, envelope.exchange_name, envelope.routing_key, body)
except:
logger.error("event handle error! body:", body, caller=self)
return
finally:
await self._channel.basic_client_ack(delivery_tag=envelope.delivery_tag)
def _add_event_handler(self, event: Event, callback):
"""
add event handler function
:param event:
:param callback:
:return:
"""
key = "{exchange}:{routing_key}".format(exchange=event.exchange, routing_key=event.routing_key)
if key in self._event_handler:
self._event_handler[key].append(callback)
else:
self._event_handler[key] = [callback]
logger.info("event handlers:", self._event_handler, caller=self)
@async_method_locker("EventEngine.subscribe")
async def subscribe(self, event: Event, callback=None, multi=False):
"""
register event
:param event:
:param callback:
:param multi:
:return:
"""
logger.info("NAME:", event.name, "EXCHANGE:", event.exchange, "QUEUE:", event.queue,
"ROUTING_KEY:", event.routing_key, caller=self)
self._subscribers.append((event, callback, multi))
async def publish(self, event):
"""
publish event
:param event:
:return:
"""
if not self._connected:
logger.warn("rabbitmq not ready right now!", caller=self)
return
data = event.dumps()
logger.debug("exchange_name", event.exchange, "routing_key", event.routing_key, "data", data)
await self._channel.basic_publish(payload=bytes(data, encoding="utf8"), exchange_name=event.exchange,
routing_key=event.routing_key)
| Ansore/anquant | anquant/event_engine.py | event_engine.py | py | 8,462 | python | en | code | 0 | github-code | 90 |
33168638638 | """
@author: yigit.yildirim@boun.edu.tr
"""
from tkinter import *
from world import World
# Window-related stuff begins
# initializing root
root = Tk()
root.title("Contagion")
root.resizable(False, False)
# initializing canvas
canvas = Canvas(root, width=1000, height=500)
canvas.pack()
root.update_idletasks()
# Window-related stuff ends
world = World(canvas)
world.initialize(70, 40)
# call when resizing
# world.update_points()
world.run()
root.mainloop()
| yildirimyigit/ds-p4ai | oop/contagion/contagion.py | contagion.py | py | 468 | python | en | code | 0 | github-code | 90 |
18297632689 | n,m=map(int,input().split())
a=list(map(int,input().split()))
for i in range(n):a[i]*=-1
a.sort()
from bisect import bisect_left,bisect_right
def check(mid):
mm=0
for i in range(n):
if -(a[i]+a[0])<mid:break
mm+=bisect_right(a,-(mid+a[i]))
return mm
ok=0
ng=10**10+7
while ng!=ok+1:
mid=(ok+ng)//2
if check(mid)>=m:ok=mid
else:ng=mid
b=[0]
for i in a:b.append(b[-1]+i)
ans=0
for i in range(n):
if -(a[i]+a[0])<ok:break
ind=bisect_right(a,-(ok+a[i]))
ans+=a[i]*ind
ans+=b[ind]
print(-(ans+(check(ok)-m)*ok)) | Aasthaengg/IBMdataset | Python_codes/p02821/s208865293.py | s208865293.py | py | 534 | python | en | code | 0 | github-code | 90 |
35612990530 | import sys
from time import sleep
import pygame
from settings import Settings
from game_stats import GameStats
from ship import Ship
from bullet import Bullet
from alien import Alien
from button import Button
from scoreboard import Scoreboard
class AlienInvasion:
"""Overall class to manage game assets and behavior"""
def __init__(self):
"""Initialize the game and create game ressources"""
pygame.init()
self.settings = Settings()
if self.settings.fullscreen_flag:
self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
self.settings.screen_height = self.screen.get_rect().height
self.settings.screen_width = self.screen.get_rect().width
else:
self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height))
pygame.display.set_caption("Alien Invasion")
# Create an instance to score game statistics and create a scoreboard
self.stats = GameStats(self)
self.sb = Scoreboard(self)
# Instantiate bullets
self.bullets = pygame.sprite.Group()
# Instantiate a ship
self.ship = Ship(self)
# Instantiate aliens
self.aliens = pygame.sprite.Group()
# Produce a fleet of aliens
self._create_fleet()
# Make the Play button
self.play_button = Button(self, "Versuch antreten")
def run_game(self):
"""Start the main loop for the game"""
while True:
self._check_events()
if self.stats.game_active:
self.ship.update()
self._update_bullets()
self._update_aliens()
self._update_screen()
# helper methods are only supposed to be called by another method to refactor the code properly
def _check_events(self):
"""watch for keyboard and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
self._check_keydown_events(event)
elif event.type == pygame.KEYUP:
self._check_keyup_events(event)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
self._check_play_button(mouse_pos)
def _check_play_button(self, mouse_pos):
"""Start a new game when the player hits Play"""
button_clicked = self.play_button.rect.collidepoint(mouse_pos)
if button_clicked and not self.stats.game_active:
self._start_game()
def _start_game(self):
"""Resets the statistics, hides the mouse and starts the game"""
# Reset the game settings
self.settings.initialize_dynamic_settings()
# Reset the game statistics
self.stats.reset_stats()
self.stats.game_active = True
self.sb.prep_score()
self.sb.prep_level()
self.sb.prep_ships()
# Hide the mouse cursor
pygame.mouse.set_visible(False)
# Get rid of any remaining aliens and bullets
self.aliens.empty()
self.bullets.empty()
# Create a new fleet and center the ship
self._create_fleet()
self.ship.center_ship()
def _check_keydown_events(self, event):
"""A helper method to refactor the keydown presses"""
if event.key == pygame.K_RIGHT:
self.ship.moving_right = True
elif event.key == pygame.K_LEFT:
self.ship.moving_left = True
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_SPACE and self.stats.game_active: # here I added the flag so you cant shoot befor the game starts
self._fire_bullet()
elif event.key == pygame.K_p and not self.stats.game_active:
self._start_game()
def _check_keyup_events(self, event):
"""A helper method to refactor key releases"""
if event.key == pygame.K_RIGHT:
self.ship.moving_right = False
elif event.key == pygame.K_LEFT:
self.ship.moving_left = False
def _fire_bullet(self):
"""Create a new bullet and add it to the bullets group"""
# we cereate a new bullet only if the maximum amount of bullets wasn't reached
if len(self.bullets) < self.settings.bullets_allowed:
new_bullet = Bullet(self)
self.bullets.add(new_bullet)
def _update_bullets(self):
"""Update the position of bullets and get rid of old ones"""
# update the bullet positions
self.bullets.update()
# Getting rid of the bullets that leave the screen by looping over a copy of the original group (list)
for bullet in self.bullets.copy():
if bullet.rect.bottom <= 0:
self.bullets.remove(bullet)
self._check_bullet_alien_collisions()
def _check_bullet_alien_collisions(self):
"""Respond to bullet-alien collisions"""
# check for bullets that hit aliens and delete bullet and alien
collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)
# The True statements tell pygame to delete both members that collided
# collisions is dict with the bullet as a key and every alien it hit as a list as the value
if collisions:
for aliens in collisions.values():
self.stats.score += self.settings.alien_points * len(aliens)
self.sb.prep_score()
self.sb.check_high_score()
if not self.aliens:
# Destroy existing bullets and create a new fleet
self.bullets.empty()
self._create_fleet()
self.settings.increase_speed()
# Increase level
self.stats.level += 1
self.sb.prep_level()
def _update_aliens(self):
"""Updates the alien positions in the fleet by checking if it hit an edge"""
self._check_fleet_edges()
self.aliens.update()
# Look for alien-ship collisions
if pygame.sprite.spritecollideany(self.ship, self.aliens):
self._ship_hit()
# Look for aliens hitting the bottom of the screen
self._check_aliens_bottom()
def _update_screen(self):
"""Redraw the screen for every pass through the loop"""
self.screen.blit(self.settings.bg_image, (0, 0))
self.ship.blitme()
for bullet in self.bullets.sprites():
bullet.draw_bullet()
self.aliens.draw(self.screen)
# Draw the score informaton
self.sb.show_score()
# Draw the play button if the game is inactive
if not self.stats.game_active:
self.play_button.draw_button()
# Make the most recently drawn screen visible.
pygame.display.flip()
def _create_fleet(self):
"""Creates a fleet of aliens"""
# Create an alien and find the number of aliens in a row
# Spacing between each alien is equal to one alien width
alien = Alien(self)
alien_width, alien_height = alien.rect.size
available_space_x = self.settings.screen_width - (2 * alien_width)
number_aliens_x = available_space_x // (2 * alien_width)
# Determine the number of rows of aliens that fit on the screen.
ship_height = self.ship.rect.height
available_space_y = self.settings.screen_height - 4 * alien_height - ship_height
number_rows = available_space_y // (2 * alien_height)
# Create a full fleet of aliens
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
self._create_alien(alien_number, row_number)
def _create_alien(self, alien_number, row_number):
"""Create an alien and place it in the row with appropriate spacing"""
alien = Alien(self)
alien_width, alien_height = alien.rect.size
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number + 50
self.aliens.add(alien)
def _check_fleet_edges(self):
"""Respond appropriately if any aliens have reached an edge."""
for alien in self.aliens.sprites():
if alien.check_edges():
self._change_fleet_direction()
break
def _change_fleet_direction(self):
"""Drop the entire fleet and change its direction"""
for alien in self.aliens.sprites():
alien.rect.y += self.settings.fleet_drop_speed
self.settings.fleet_direction *= -1
# there was an interesting bug here where it changed the direction
# everytime in the loop which meant that they would drop all instantly
def _ship_hit(self):
"""Respond to the ship being hit by an alien"""
if self.stats.ships_left > 0:
# Decrement ships_left and update scoreboard
self.stats.ships_left -= 1
self.sb.prep_ships()
# Get rid of any remaining aliens and bullets
self.aliens.empty()
self.bullets.empty()
# Create a new fleet and center the ship
self._create_fleet()
self.ship.center_ship()
# Pause after a hit
sleep(1.0)
else:
self.stats.game_active = False
pygame.mouse.set_visible(True)
def _check_aliens_bottom(self):
"""Check if any alien has reached the bottom"""
screen_rect = self.screen.get_rect()
for alien in self.aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# Treat this the same as if the ship got hit
self._ship_hit()
break
if __name__ == "__main__":
# make a game instance and run the game.
ai = AlienInvasion()
ai.run_game()
| quantenmagier/Carls_studies-electric_boogalo | main.py | main.py | py | 10,016 | python | en | code | 2 | github-code | 90 |
17874371797 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
import datetime
from.models import Product, Sell, Order
from .forms import FormInventoryAdd, FormSellCustomer, FormSellOrder
from django.contrib.auth import logout
# Create your views here.
@login_required(login_url='home')
def db_home(request):
sells = Sell.objects.all()
dates, totals = sell_30(sells)
orders = Order.objects.all()
categories, counts = product_types(orders)
return render(request, "td_database/dashboard.html", {'dates': dates,
'totals': totals,
'categories': categories,
'counts': counts})
def sell_30(sells):
today = str(datetime.datetime.now()).split(' ')[0]
current_year, current_month, current_date = today.split('-')
transactions = {}
for sell in sells:
if sell.total is not None:
date = str(sell.date).split(' ')[0]
year, month, date = date.split('-')
if month == current_month and year == current_year:
g_date = f'{year}-{month}-{date}'
g_total = sell.total if sell.total is not None else 0
if g_date not in transactions:
transactions[g_date] = g_total
else:
transactions[g_date] += g_total
dates = list(transactions.keys())
totals = list(transactions.values())
return [dates, totals]
def product_types(orders):
type_counts = {}
for order in orders:
product_category = order.product.category
if product_category not in type_counts:
type_counts[product_category] = 1
else:
type_counts[product_category] += 1
categories = list(type_counts.keys())
counts = list(type_counts.values())
return categories, counts
@login_required(login_url='home')
def db_addrecord(response):
if response.method == "POST":
data = response.POST
sellObj = Sell.objects.get(id=data['id'])
sellObj.cusName = data['cusName']
sellObj.cusContact = data['cusContact']
sellObj.cusAddress = data['cusAddress']
sellObj.cusEmail = data['cusEmail']
sellObj.save()
formSell = FormSellCustomer(initial={'cusName' : sellObj.cusName,
'cusContact': sellObj.cusContact,
'cusAddress': sellObj.cusAddress,
'cusEmail': sellObj.cusEmail})
formOrder = FormSellOrder()
formSell.fields["id"].initial = sellObj.id
formSell.fields["date"].initial = str(sellObj.date).split('.')[0]
if 'btn_search' in data:
products = Product.objects.all()
if products.filter(name=data['product']):
product = Product.objects.get(name=data['product'])
if int(data['order_quantity']) <= product.quantity:
formOrder = FormSellOrder(initial={'product': product.name,
'order_quantity': data['order_quantity'],
'price_buy': product.priceBuy,
'price_sell': product.priceBuy})
print(formOrder)
if 'btn_update' in data:
products = Product.objects.all()
if products.filter(name=data['product']):
product = Product.objects.get(name=data['product'])
orderObj = Order(product=product,
sell=sellObj,
order_quantity=data['order_quantity'],
price_buy=product.priceBuy,
price_sell=data['price_sell'],
price_total=int(data['order_quantity'])*int(data['price_sell']))
product.quantity -= int(data['order_quantity'])
product.save()
orderObj.save()
else:
sellObj = Sell()
sellObj.save()
formSell = FormSellCustomer()
formOrder = FormSellOrder()
formSell.fields["id"].initial = sellObj.id
formSell.fields["date"].initial = str(sellObj.date).split('.')[0]
sellObj.total = calculate_total_cost(sellObj.order_set.all())
sellObj.save()
formSell.fields["total"].initial = sellObj.total
sells = Sell.objects.all()
products = Product.objects.all()
orders = sellObj.order_set.all()
return render(response, "td_database/addrecord.html", {'formSell': formSell,
'formOrder': formOrder,
'orders': orders,
'products': products,
'sells': sells})
def calculate_total_cost(orders):
total = 0
for order in orders:
total += order.price_total
return total
@login_required(login_url='home')
def db_records(response):
clear_null_sells()
sells = Sell.objects.all()
return render(response, "td_database/records.html", {'sells': sells})
@login_required(login_url='home')
def db_inventory(response):
if response.method == "POST":
form = FormInventoryAdd(response.POST)
if form.is_valid():
exData = form.cleaned_data
try:
product = Product.objects.get(name=exData['name'])
pID = product.id
product = Product(id=pID,
name=exData['name'],
category=exData['category'],
unit=exData['unit'],
quantity=exData['quantity'] + product.quantity,
priceBuy=exData['priceBuy'])
product.save()
except:
product = Product(name=exData['name'],
category=exData['category'],
unit=exData['unit'],
quantity=exData['quantity'],
priceBuy=exData['priceBuy'])
product.save()
form = FormInventoryAdd()
products = Product.objects.all()
return render(response, "td_database/inventory.html", {'products' : products,
'form' : form})
def db_logout(response):
logout(response)
return redirect('home')
def clear_null_sells():
sells = Sell.objects.all()
for sell in sells:
if sell.total == None or sell.total == 0:
sell.delete()
| Rono-Koushique/Inventory-records-manager | techdorbesh/database/views.py | views.py | py | 7,001 | python | en | code | 1 | github-code | 90 |
18224647606 | from twisted.internet.defer import Deferred
from crow2.events.hook import Hook
from crow2.events.yielding import yielding
from crow2.util import AttrDict
def test_simple():
"""
"Simple" test of yielding
"""
hook1 = Hook()
hook2 = Hook()
@hook1
@yielding
def handler(event):
"yielding handler"
while "derp" in event:
event = yield event.derp
event.was_called = True
assert handler #shut up, pylint
# when we pass nothing in, it doesn't see "derp" in event and so just returns
assert "was_called" in hook1.fire()
# nothing is registered to hook2 at the moment
assert not "was_called" in hook2.fire()
# when we pass hook2 into hook1's handler, it yields it, and so hook1's event never gets modified
assert not "was_called" in hook1.fire(derp=hook2)
# now, since hook2 was yielded from hook1's handler, when we fire hook2
# with no arguments it's event gets modified
assert "was_called" in hook2.fire()
# but if we call hook2 again, nothing happens, since the yield handler finished
assert not "was_called" in hook2.fire()
# now we pass back and forth just to be sure it works
assert not "was_called" in hook1.fire(derp=hook2)
assert not "was_called" in hook2.fire(derp=hook1)
assert not "was_called" in hook1.fire(derp=hook2)
# aaand, call without arguments. are you still there, handler?
assert "was_called" in hook2.fire()
# if we got here, yep!
def test_partial_adaptation():
hook1 = Hook()
hook2 = Hook()
@hook2
def first(event):
event.before_called = True
@hook2
def third(event):
assert event.handler_called
event.after_called = True
@hook1
@yielding
def handler(event):
event.handler_called = True
event = yield hook2(after=first, before=third)
assert event.before_called
event.handler_called = True
first_event = hook1.fire()
assert first_event.handler_called
second_event = hook2.fire()
assert second_event.after_called
def test_deferred_adaptation():
hook1 = Hook()
@hook1
@yielding
def handler(event):
event.handler_called = True
deferred_result = yield event.deferred
deferred_result.handler_called = True
event = hook1.fire(deferred=Deferred())
newevent = AttrDict()
event.deferred.callback(newevent)
assert newevent.handler_called
| lahwran/crow2 | crow2/events/test/test_yielding.py | test_yielding.py | py | 2,479 | python | en | code | 1 | github-code | 90 |
17521032380 | import sys
import os
sys.path.append(os.path.abspath('.'))
from hotword_detection import mfcc
import numpy as np
def test_mfcc_first_coefficient():
temp = mfcc.MFCC()
no_of_test = 100
input_matrix = np.random.random_sample((no_of_test,200))
def first_coeff_greater_than_0(x):
assert(x>0)
for i in range(no_of_test):
input_frame = input_matrix[i]
coeffs = temp.compute_mfcc(input_frame, include_dc=True)
yield first_coeff_greater_than_0, coeffs[0]
def test_hz2mel_mel2hz():
temp = mfcc.MFCC()
no_of_test = 100
input_freq = np.random.random_sample(no_of_test)
def isEqual(pred_hz, actual_hz):
assert(np.abs(pred_hz-actual_hz) < 0.001)
for i in range(no_of_test):
pred_hz = temp.mel2hz(temp.hz2mel(input_freq[i]))
yield isEqual, pred_hz, input_freq[i]
if __name__=="__main__":
test_mfcc_first_coefficient()
test_hz2mel_mel2hz()
| sakethgsharma/HotWordDetection | hotword_detection/test/test_mfcc.py | test_mfcc.py | py | 995 | python | en | code | 27 | github-code | 90 |
34872508690 | import gzip
import io
import os
from pathlib import Path
import subprocess
import sys
import tarfile
import textwrap
import time
import zipfile
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas as pd
import pandas._testing as tm
import pandas.io.common as icom
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_compression_size(obj, method, compression_only):
if compression_only == "tar":
compression_only = {"method": "tar", "mode": "w:gz"}
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression=compression_only)
compressed_size = os.path.getsize(path)
getattr(obj, method)(path, compression=None)
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as path:
with icom.get_handle(
path,
"w:gz" if compression_only == "tar" else "w",
compression=compression_only,
) as handles:
getattr(obj, method)(handles.handle)
assert not handles.handle.closed
compressed_size = os.path.getsize(path)
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=None) as handles:
getattr(obj, method)(handles.handle)
assert not handles.handle.closed
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@pytest.mark.parametrize(
"write_method, write_kwargs, read_method",
[
("to_csv", {"index": False}, pd.read_csv),
("to_json", {}, pd.read_json),
("to_pickle", {}, pd.read_pickle),
],
)
def test_dataframe_compression_defaults_to_infer(
write_method, write_kwargs, read_method, compression_only, compression_to_extension
):
# GH22004
input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=["X", "Y", "Z"])
extension = compression_to_extension[compression_only]
with tm.ensure_clean("compressed" + extension) as path:
getattr(input, write_method)(path, **write_kwargs)
output = read_method(path, compression=compression_only)
tm.assert_frame_equal(output, input)
@pytest.mark.parametrize(
"write_method,write_kwargs,read_method,read_kwargs",
[
("to_csv", {"index": False, "header": True}, pd.read_csv, {"squeeze": True}),
("to_json", {}, pd.read_json, {"typ": "series"}),
("to_pickle", {}, pd.read_pickle, {}),
],
)
def test_series_compression_defaults_to_infer(
write_method,
write_kwargs,
read_method,
read_kwargs,
compression_only,
compression_to_extension,
):
# GH22004
input = pd.Series([0, 5, -2, 10], name="X")
extension = compression_to_extension[compression_only]
with tm.ensure_clean("compressed" + extension) as path:
getattr(input, write_method)(path, **write_kwargs)
if "squeeze" in read_kwargs:
kwargs = read_kwargs.copy()
del kwargs["squeeze"]
output = read_method(path, compression=compression_only, **kwargs).squeeze(
"columns"
)
else:
output = read_method(path, compression=compression_only, **read_kwargs)
tm.assert_series_equal(output, input, check_names=False)
def test_compression_warning(compression_only):
# Assert that passing a file object to to_csv while explicitly specifying a
# compression protocol triggers a RuntimeWarning, as per GH21227.
df = pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=compression_only) as handles:
with tm.assert_produces_warning(RuntimeWarning):
df.to_csv(handles.handle, compression=compression_only)
def test_compression_binary(compression_only):
"""
Binary file handles support compression.
GH22555
"""
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD"), dtype=object),
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
)
# with a file
with tm.ensure_clean() as path:
with open(path, mode="wb") as file:
df.to_csv(file, mode="wb", compression=compression_only)
file.seek(0) # file shouldn't be closed
tm.assert_frame_equal(
df, pd.read_csv(path, index_col=0, compression=compression_only)
)
# with BytesIO
file = io.BytesIO()
df.to_csv(file, mode="wb", compression=compression_only)
file.seek(0) # file shouldn't be closed
tm.assert_frame_equal(
df, pd.read_csv(file, index_col=0, compression=compression_only)
)
def test_gzip_reproducibility_file_name():
"""
Gzip should create reproducible archives with mtime.
Note: Archives created with different filenames will still be different!
GH 28103
"""
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD"), dtype=object),
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
)
compression_options = {"method": "gzip", "mtime": 1}
# test for filename
with tm.ensure_clean() as path:
path = Path(path)
df.to_csv(path, compression=compression_options)
time.sleep(0.1)
output = path.read_bytes()
df.to_csv(path, compression=compression_options)
assert output == path.read_bytes()
def test_gzip_reproducibility_file_object():
"""
Gzip should create reproducible archives with mtime.
GH 28103
"""
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD"), dtype=object),
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
)
compression_options = {"method": "gzip", "mtime": 1}
# test for file object
buffer = io.BytesIO()
df.to_csv(buffer, compression=compression_options, mode="wb")
output = buffer.getvalue()
time.sleep(0.1)
buffer = io.BytesIO()
df.to_csv(buffer, compression=compression_options, mode="wb")
assert output == buffer.getvalue()
@pytest.mark.single_cpu
def test_with_missing_lzma():
"""Tests if import pandas works when lzma is not present."""
# https://github.com/pandas-dev/pandas/issues/27575
code = textwrap.dedent(
"""\
import sys
sys.modules['lzma'] = None
import pandas
"""
)
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
@pytest.mark.single_cpu
def test_with_missing_lzma_runtime():
"""Tests if RuntimeError is hit when calling lzma without
having the module available.
"""
code = textwrap.dedent(
"""
import sys
import pytest
sys.modules['lzma'] = None
import pandas as pd
df = pd.DataFrame()
with pytest.raises(RuntimeError, match='lzma module'):
df.to_csv('foo.csv', compression='xz')
"""
)
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_gzip_compression_level(obj, method):
# GH33196
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression="gzip")
compressed_size_default = os.path.getsize(path)
getattr(obj, method)(path, compression={"method": "gzip", "compresslevel": 1})
compressed_size_fast = os.path.getsize(path)
assert compressed_size_default < compressed_size_fast
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_xz_compression_level_read(obj, method):
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression="xz")
compressed_size_default = os.path.getsize(path)
getattr(obj, method)(path, compression={"method": "xz", "preset": 1})
compressed_size_fast = os.path.getsize(path)
assert compressed_size_default < compressed_size_fast
if method == "to_csv":
pd.read_csv(path, compression="xz")
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_bzip_compression_level(obj, method):
"""GH33196 bzip needs file size > 100k to show a size difference between
compression levels, so here we just check if the call works when
compression is passed as a dict.
"""
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression={"method": "bz2", "compresslevel": 1})
@pytest.mark.parametrize(
"suffix,archive",
[
(".zip", zipfile.ZipFile),
(".tar", tarfile.TarFile),
],
)
def test_empty_archive_zip(suffix, archive):
with tm.ensure_clean(filename=suffix) as path:
with archive(path, "w"):
pass
with pytest.raises(ValueError, match="Zero files found"):
pd.read_csv(path)
def test_ambiguous_archive_zip():
with tm.ensure_clean(filename=".zip") as path:
with zipfile.ZipFile(path, "w") as file:
file.writestr("a.csv", "foo,bar")
file.writestr("b.csv", "foo,bar")
with pytest.raises(ValueError, match="Multiple files found in ZIP file"):
pd.read_csv(path)
def test_ambiguous_archive_tar(tmp_path):
csvAPath = tmp_path / "a.csv"
with open(csvAPath, "w", encoding="utf-8") as a:
a.write("foo,bar\n")
csvBPath = tmp_path / "b.csv"
with open(csvBPath, "w", encoding="utf-8") as b:
b.write("foo,bar\n")
tarpath = tmp_path / "archive.tar"
with tarfile.TarFile(tarpath, "w") as tar:
tar.add(csvAPath, "a.csv")
tar.add(csvBPath, "b.csv")
with pytest.raises(ValueError, match="Multiple files found in TAR archive"):
pd.read_csv(tarpath)
def test_tar_gz_to_different_filename():
with tm.ensure_clean(filename=".foo") as file:
pd.DataFrame(
[["1", "2"]],
columns=["foo", "bar"],
).to_csv(file, compression={"method": "tar", "mode": "w:gz"}, index=False)
with gzip.open(file) as uncompressed:
with tarfile.TarFile(fileobj=uncompressed) as archive:
members = archive.getmembers()
assert len(members) == 1
content = archive.extractfile(members[0]).read().decode("utf8")
if is_platform_windows():
expected = "foo,bar\r\n1,2\r\n"
else:
expected = "foo,bar\n1,2\n"
assert content == expected
def test_tar_no_error_on_close():
with io.BytesIO() as buffer:
with icom._BytesTarFile(fileobj=buffer, mode="w"):
pass
| pandas-dev/pandas | pandas/tests/io/test_compression.py | test_compression.py | py | 12,343 | python | en | code | 40,398 | github-code | 90 |
25209840398 | from io import StringIO
import sys
import streamlit as st
from krr_system import (
TimeDomainDescription,
Fluent,
Scenario,
)
# to be deleted when TimeDomainDescription.description() will return value instead of print
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
file_fluents = open("variables/fluents.txt", "r")
list_of_fluents = file_fluents.read()
file_actions = open("variables/actions.txt", "r")
list_of_actions = file_actions.read()
file_statements = open("variables/statements.txt", "r")
list_of_statements = file_statements.read()
file_initial_states = open("variables/initial_states.txt", "r")
list_of_initial_states = file_initial_states.read()
file_observations = open("variables/observations.txt", "r")
list_of_observations = file_observations.read()
file_action_occurences = open("variables/action_occurences.txt", "r")
list_of_action_occurences = file_action_occurences.read()
file_technical_vars = open("variables/technical_variables.txt", "r")
list_of_technical_vars = file_technical_vars.read()
file_fluents.close()
file_actions.close()
file_statements.close()
file_initial_states.close()
file_observations.close()
file_action_occurences.close()
file_technical_vars.close()
st.title("Knowledge Representation and Reasoning")
st.header("Project D: Actions with Duration")
reset_button = st.button(label="Reset environment")
st.header("Domain Description")
if reset_button:
list_of_fluents = ""
list_of_actions = ""
list_of_statements = ""
list_of_initial_states = ""
list_of_observations = ""
list_of_action_occurences = ""
list_of_technical_vars = "1;1"
file_fluents = open("variables/fluents.txt", "w")
file_actions = open("variables/actions.txt", "w")
file_statements = open("variables/statements.txt", "w")
file_initial_states = open("variables/initial_states.txt", "w")
file_observations = open("variables/observations.txt", "w")
file_action_occurences = open("variables/action_occurences.txt", "w")
file_technical_vars = open("variables/technical_variables.txt", "w")
file_fluents.write("")
file_actions.write("")
file_statements.write("")
file_initial_states.write("")
file_observations.write("")
file_action_occurences.write("")
file_technical_vars.write("1;1")
file_fluents.close()
file_actions.close()
file_statements.close()
file_initial_states.close()
file_observations.close()
file_action_occurences.close()
file_technical_vars.close()
# fluents input
col1, col2 = st.columns([5, 1])
with col1:
fluent_input = st.text_input(label="Input fluent", max_chars=100)
with col2:
st.text("")
fluent_button = st.button(label="Submit fluent")
if fluent_button:
file_fluents = open("variables/fluents.txt", "a")
if len(list_of_fluents) == 0:
file_fluents.write(fluent_input)
list_of_fluents += fluent_input
else:
list_of_fluents_splitted = list_of_fluents.split(",")
if fluent_input not in list_of_fluents_splitted:
file_fluents.write("," + fluent_input)
list_of_fluents += "," + fluent_input
file_fluents.close()
# actions input
col1, col2, col3 = st.columns([3, 2, 1])
with col1:
action_input = st.text_input(label="Input action", max_chars=100)
with col2:
duration_input = st.number_input(label="Input duration", min_value=1, step=1)
with col3:
st.text("")
action_button = st.button(label="Submit action")
if action_button:
action_couple = f"{action_input};{duration_input}"
file_actions = open("variables/actions.txt", "a")
if len(list_of_actions) == 0:
file_actions.write(action_couple)
list_of_actions += action_couple
else:
list_of_actions_splitted = list_of_actions.split(",")
if action_couple not in list_of_actions_splitted:
file_actions.write("," + action_couple)
list_of_actions += "," + action_couple
file_actions.close()
# duration modification
col1, col2, col3 = st.columns([3, 2, 1])
actions = list_of_actions.split(",")
if len(list_of_actions) == 0:
action_names = []
action_durations_values = []
else:
action_names = [action.split(";")[0] for action in actions]
action_durations_values = [action.split(";")[1] for action in actions]
with col1:
duration_action = st.selectbox("Choose action to modify duration", action_names)
with col2:
duration = st.number_input(label="Modify duration", min_value=1, step=1)
with col3:
duration_button = st.text("")
if len(action_names) > 0:
duration_button = st.button(label="Submit duration")
else:
duration_button = st.button(label="Submit duration", disabled=True)
if duration_button:
file_actions = open("variables/actions.txt", "w")
action_index = action_names.index(duration_action)
action_durations_values[action_index] = str(duration)
new_durations = [
f"{action_names[i]};{action_durations_values[i]}"
for i in range(len(action_names))
]
list_of_actions = ",".join(new_durations)
file_actions.write(list_of_actions)
file_actions.close()
# statement input
condition_values, fluent_values = list_of_technical_vars.split(";")
condition_values = int(condition_values)
fluent_values = int(fluent_values)
st.subheader("Statements")
col1, col2, col3, col4 = st.columns([4, 3, 3, 2])
with col1:
statement_action = st.selectbox(
"Choose action", [action.split(";")[0] for action in list_of_actions.split(",")]
)
statement_type = st.radio(
"Choose type of statement", ("causes", "releases", "impossible")
)
with col2:
if statement_type != "impossible":
statement_fluent_button = st.button(label="Add new fluent")
statement_fluent_button_remove = st.button(label="Remove fluent")
if statement_fluent_button:
if fluent_values > 4:
st.write("!!! Too many values !!!")
else:
fluent_values += 1
elif statement_fluent_button_remove and fluent_values > 0:
fluent_values -= 1
# 1st statement
if fluent_values > 0:
statement_fluent_1 = st.selectbox(
"Choose fluent 1st", list_of_fluents.split(",")
)
statement_fluent_false_1 = st.checkbox("False", key="fluent_false_1")
statement_fluent_state_1 = "False" if statement_fluent_false_1 else "True"
# 2nd statement
if fluent_values > 1:
statement_fluent_2 = st.selectbox(
"Choose fluent 2nd", list_of_fluents.split(",")
)
statement_fluent_false_2 = st.checkbox("False", key="fluent_false_2")
statement_fluent_state_2 = "False" if statement_fluent_false_2 else "True"
# 3rd statement
if fluent_values > 2:
statement_fluent_3 = st.selectbox(
"Choose fluent 3rd", list_of_fluents.split(",")
)
statement_fluent_false_3 = st.checkbox("False", key="fluent_false_3")
statement_fluent_state_3 = "False" if statement_fluent_false_3 else "True"
# 4th statement
if fluent_values > 3:
statement_fluent_4 = st.selectbox(
"Choose fluent 4th", list_of_fluents.split(",")
)
statement_fluent_false_4 = st.checkbox("False", key="fluent_false_4")
statement_fluent_state_4 = "False" if statement_fluent_false_4 else "True"
# 5th statement
if fluent_values > 4:
statement_fluent_5 = st.selectbox(
"Choose fluent 5th", list_of_fluents.split(",")
)
statement_fluent_false_5 = st.checkbox("False", key="fluent_false_5")
statement_fluent_state_5 = "False" if statement_fluent_false_5 else "True"
with col3:
statement_condition_button = st.button(label="Add new condition")
statement_condition_button_remove = st.button(label="Remove condition")
if statement_condition_button:
if condition_values > 4:
st.write("!!! Too many conditions !!!")
else:
condition_values += 1
elif statement_condition_button_remove and condition_values > 0:
condition_values -= 1
# 1st statement condition
if condition_values > 0:
statement_condition_1 = st.selectbox(
"Choose condition 1st", list_of_fluents.split(",")
)
statement_condition_false_1 = st.checkbox("False", key="condition_false_1")
statement_condition_state_1 = "False" if statement_condition_false_1 else "True"
# 2nd statement condition
if condition_values > 1:
statement_condition_2 = st.selectbox(
"Choose condition 2nd", list_of_fluents.split(",")
)
statement_condition_false_2 = st.checkbox("False", key="condition_false_2")
statement_condition_state_2 = "False" if statement_condition_false_2 else "True"
# 3rd statement condition
if condition_values > 2:
statement_condition_3 = st.selectbox(
"Choose condition 3rd", list_of_fluents.split(",")
)
statement_condition_false_3 = st.checkbox("False", key="condition_false_3")
statement_condition_state_3 = "False" if statement_condition_false_3 else "True"
# 4th statement condition
if condition_values > 3:
statement_condition_4 = st.selectbox(
"Choose condition 4th", list_of_fluents.split(",")
)
statement_condition_false_4 = st.checkbox("False", key="condition_false_4")
statement_condition_state_4 = "False" if statement_condition_false_4 else "True"
# 5th statement condition
if condition_values > 4:
statement_condition_5 = st.selectbox(
"Choose condition 5th", list_of_fluents.split(",")
)
statement_condition_false_5 = st.checkbox("False", key="condition_false_5")
statement_condition_state_5 = "False" if statement_condition_false_5 else "True"
with col4:
submit_button = st.text("")
submit_button = st.button(label="Submit statement")
file_technical_vars = open("variables/technical_variables.txt", "w")
file_technical_vars.write(f"{condition_values};{fluent_values}")
file_technical_vars.close()
if submit_button:
statement_conditions = ""
if condition_values > 0:
statement_conditions += f"{statement_condition_1}#{statement_condition_state_1}"
if condition_values > 1:
statement_conditions += (
f":{statement_condition_2}#{statement_condition_state_2}"
)
if condition_values > 2:
statement_conditions += (
f":{statement_condition_3}#{statement_condition_state_3}"
)
if condition_values > 3:
statement_conditions += (
f":{statement_condition_4}#{statement_condition_state_4}"
)
if condition_values > 4:
statement_conditions += (
f":{statement_condition_5}#{statement_condition_state_5}"
)
if statement_type != "impossible":
statement_fluents = ""
if fluent_values > 0:
statement_fluents += f"{statement_fluent_1}#{statement_fluent_state_1}"
if fluent_values > 1:
statement_fluents += f":{statement_fluent_2}#{statement_fluent_state_2}"
if fluent_values > 2:
statement_fluents += f":{statement_fluent_3}#{statement_fluent_state_3}"
if fluent_values > 3:
statement_fluents += f":{statement_fluent_4}#{statement_fluent_state_4}"
if fluent_values > 4:
statement_fluents += f":{statement_fluent_5}#{statement_fluent_state_5}"
statement_quartet = f"{statement_action};{statement_type};{statement_fluents};{statement_conditions}"
elif statement_type == "impossible":
statement_quartet = (
f"{statement_action};{statement_type};;{statement_conditions}"
)
# else:
# statement_quartet = f"{statement_action};{statement_type};{statement_fluent};{statement_fluent_state};;;"
file_statements = open("variables/statements.txt", "a")
if len(list_of_statements) == 0:
file_statements.write(statement_quartet)
list_of_statements += statement_quartet
else:
list_of_statements_splitted = list_of_statements.split(",")
if statement_quartet not in list_of_statements_splitted:
file_statements.write("," + statement_quartet)
list_of_statements += "," + statement_quartet
st.write(list_of_statements)
file_statements.close()
st.write(f"{statement_quartet}")
st.write(list_of_statements.split(","))
# initial condition input
st.subheader("Initial condition")
col1, col2 = st.columns([5, 1])
with col1:
initial_state_fluent = st.selectbox(
key="initial_state_fluent",
label="Choose fluent",
options=list_of_fluents.split(","),
)
initial_state_fluent_false = st.checkbox(
key="initial_state_fluent_false", label="False"
)
initial_state_fluent_value = "False" if initial_state_fluent_false else "True"
with col2:
st.write("")
initial_state = st.button(label="Submit inital state")
if initial_state:
initial_state_couple = f"{initial_state_fluent};{initial_state_fluent_value}"
file_initial_states = open("variables/initial_states.txt", "a")
if len(list_of_initial_states) == 0:
file_initial_states.write(initial_state_couple)
list_of_initial_states += initial_state_couple
else:
list_of_initial_states_splitted = list_of_initial_states.split(",")
if initial_state_fluent not in [
initial.split(";")[0] for initial in list_of_initial_states_splitted
]:
file_initial_states.write("," + initial_state_couple)
list_of_initial_states += "," + initial_state_couple
file_initial_states.close()
st.header("Scenario")
# observations input
st.subheader("Observations")
col1, col2, col3 = st.columns([3, 2, 1])
with col1:
observation_fluent = st.selectbox(
key="observation_fluent",
label="Choose fluent",
options=list_of_fluents.split(","),
)
observation_fluent_false = st.checkbox(
key="observation_fluent_false", label="False"
)
observation_fluent_value = "False" if observation_fluent_false else "True"
with col2:
observation_fluent_time = st.number_input(
key="observation_fluent_time", label="Choose observation time", min_value=1
)
with col3:
st.write("")
observation = st.button(label="Submit observation")
if observation:
observation_couple = (
f"{observation_fluent};{observation_fluent_value};{observation_fluent_time}"
)
file_observations = open("variables/observations.txt", "a")
if len(list_of_observations) == 0:
file_observations.write(observation_couple)
list_of_observations += observation_couple
else:
list_of_observations_splitted = list_of_observations.split(",")
existing_observations = [
[obs.split(";")[0], obs.split(";")[2]]
for obs in list_of_observations_splitted
]
duplicate_observation = [observation_fluent, str(observation_fluent_time)]
if duplicate_observation not in existing_observations:
file_observations.write("," + observation_couple)
list_of_observations += "," + observation_couple
file_observations.close()
# action occurences input
st.subheader("Action occurences")
col1, col2, col3 = st.columns([3, 2, 1])
with col1:
action_occurence = st.selectbox(
key="action_occurence",
label="Choose action occurence",
options=[action.split(";")[0] for action in list_of_actions.split(",")],
)
with col2:
action_occurence_time = st.number_input(
key="action_occurence_time", label="Choose occurence time", min_value=1
)
with col3:
st.write("")
action_occurence_button = st.button(label="Submit action occurence")
if action_occurence_button:
action_occurence_couple = f"{action_occurence};{action_occurence_time}"
file_action_occurences = open("variables/action_occurences.txt", "a")
st.write()
if len(list_of_action_occurences) == 0:
file_action_occurences.write(action_occurence_couple)
list_of_action_occurences += action_occurence_couple
else:
list_of_action_occurence_times = [
ac.split(";")[1] for ac in list_of_action_occurences.split(",")
]
if action_occurence_time not in list_of_action_occurence_times:
file_action_occurences.write("," + action_occurence_couple)
list_of_action_occurences += "," + action_occurence_couple
# model preparation
calculate_button = st.button("Calculate model")
def scenario_calculation():
m = TimeDomainDescription()
if len(list_of_initial_states) > 0:
for initial_state in list_of_initial_states.split(","):
state = initial_state.split(";")
m.initially(**{state[0]: state[1] == "True"})
if len(list_of_actions) > 0:
for duration in list_of_actions.split(","):
state = duration.split(";")
m.duration(state[0], int(state[1]))
if len(list_of_statements) > 0:
for statement in list_of_statements.split(","):
stmnt = statement.split(";")
if stmnt[1] == "causes" or stmnt[1] == "releases":
statement_fluents_model = stmnt[2].split(":")
fluent_values_model = []
for i in statement_fluents_model:
fluent_values_model.append(
Fluent(**{i.split("#")[0]: i.split("#")[1] == "True"})
)
statement_conditions_model = stmnt[3].split(":")
condition_values_model = []
try:
for i in statement_conditions_model:
condition_values_model.append(
Fluent(**{i.split("#")[0]: i.split("#")[1] == "True"})
)
except:
pass
if stmnt[1] == "causes":
m.causes(
stmnt[0],
fluent_values_model,
conditions=condition_values_model,
)
elif stmnt[1] == "releases":
m.releases(stmnt[0], fluent_values_model)
elif stmnt[1] == "impossible":
m.impossible(stmnt[0], conditions=condition_values_model)
OBS = ()
if len(list_of_observations) > 0:
OBS_list = []
for observation in list_of_observations.split(","):
obs = observation.split(";")
OBS_list.append(Fluent(**{obs[0]: obs[1] == "True"}))
OBS = (OBS_list, int(obs[2]))
ACS = ()
if len(list_of_action_occurences) > 0:
ACS_list = []
for action in list_of_action_occurences.split(","):
acs = action.split(";")
ACS_list.append((acs[0], int(acs[1])))
ACS = tuple(ACS_list)
# with Capturing() as output:
# m.description()
return Scenario(domain=m, observations=OBS, action_occurances=ACS), m.description()
if calculate_button:
s, output = scenario_calculation()
try:
s_result = s.is_consistent(verbose=True)
st.text(output)
st.write(f"Is consistent: {s_result}")
except Exception as e:
st.write(f"Your mistake: {e}")
# action query
st.subheader("Action query")
col1, col2, col3 = st.columns([3, 2, 1])
with col1:
action_query = st.selectbox(
key="action_query",
label="Choose action",
options=[action.split(";")[0] for action in list_of_actions.split(",")],
)
with col2:
action_query_time = st.number_input(
key="action_query_time", label="Choose time", min_value=1
)
with col3:
st.write("")
action_query_button = st.button(
key="action_query_button", label="Calculate action query"
)
if action_query_button:
s = scenario_calculation()
try:
with Capturing() as output:
s_result = s.is_consistent(verbose=True)
if s_result:
a_result = s.does_action_perform(action_query, action_query_time)
else:
a_result = False
st.write(output)
st.write(f"Does action perform: {a_result}")
except Exception as e:
st.write(f"Your mistake: {e}")
# condition query
st.subheader("Condition query")
col1, col2, col3 = st.columns([3, 2, 1])
with col1:
condition_query = st.selectbox(
key="condition_query",
label="Choose condition",
options=list_of_fluents.split(","),
)
condition_query_false = st.checkbox(key="condition_query_false", label="False")
condition_query_value = "False" if condition_query_false else "True"
with col2:
condition_query_time = st.number_input(
key="condition_query_time", label="Choose time", min_value=1
)
with col3:
st.write("")
condition_query_button = st.button(
key="condition_query_button", label="Calculate condition query"
)
if condition_query_button:
s = scenario_calculation()
try:
with Capturing() as output:
s_result = s.is_consistent()
if s_result:
q_result = s.check_if_condition_hold(Fluent(**{condition_query: condition_query_value == "True"}), condition_query_time, verbose=True)
else:
q_result = False
st.write(output)
if q_result is None:
st.write("Condition possible, but unnecessary")
elif q_result:
st.write("Condition necessary")
else:
st.write("Condition impossible")
except Exception as e:
st.write(f"Your mistake: {e}")
# sidebar with current values
with st.sidebar:
st.subheader("Current example")
if len(list_of_fluents) == 0:
st.text("--- no fluents inserted ---")
else:
st.text("Fluents")
for fluent in list_of_fluents.split(","):
st.text("- " + fluent)
if len(list_of_actions) == 0:
st.text("--- no actions inserted ---")
else:
action_durations = list_of_actions.split(",")
action_names = [
action_duration.split(";")[0] for action_duration in action_durations
]
action_durations_values = [
action_duration.split(";")[1] for action_duration in action_durations
]
st.text("Actions (duration)")
for action in action_names:
st.text(
"- "
+ action
+ " ("
+ action_durations_values[action_names.index(action)]
+ ")"
)
if len(list_of_statements) == 0:
st.text("--- no statements inserted ---")
else:
st.text("Statements")
for statement in list_of_statements.split(","):
stmnt = statement.split(";")
statement_fluents_description = (
stmnt[2].replace("#", " = ").replace(":", " AND ")
)
statement_condtions_description = (
stmnt[3].replace("#", " = ").replace(":", " AND ")
)
if stmnt[1] != "impossible":
st.text(
f"ACTION {stmnt[0]} {stmnt[1]} FLUENT {statement_fluents_description} GIVEN THAT {statement_condtions_description}"
)
else:
st.write(stmnt)
st.text(
f"ACTION {stmnt[0]} {stmnt[1]} GIVEN THAT {statement_condtions_description}"
)
action_durations = list_of_actions.split(",")
if len(list_of_initial_states) == 0:
st.text("--- no initial values inserted ---")
else:
st.text("Initial state")
for initial_state in list_of_initial_states.split(","):
state = initial_state.split(";")
st.text(f"- {state[0]}={state[1]}")
if len(list_of_observations) == 0:
st.text("--- no observations inserted ---")
else:
st.text("Observations")
for observation in list_of_observations.split(","):
obs = observation.split(";")
st.text(f"- {obs[0]}={obs[1]} ({obs[2]})")
if len(list_of_action_occurences) == 0:
st.text("--- no action occurences inserted ---")
else:
st.text("Action occurences")
for action_occurence in list_of_action_occurences.split(","):
act = action_occurence.split(";")
st.text(f"- {act[0]}={act[1]}")
| GiveMeMoreData/krr_system | app.py | app.py | py | 24,950 | python | en | code | 0 | github-code | 90 |
7563359696 | import sys
import os
folder = sys.argv[1]
extension_count = {}
longest = ""
shortest = ""
second_c = ""
# use os.walk to get all files in the folder
for _, _, files in os.walk(folder):
print(files)
# get longest name, shortes, first file with seond letter c
longest = files[0]
shortest = files[0]
for file in files:
if len(file) > len(longest):
longest = file
if len(file) < len(shortest):
shortest = file
if file[1] == "c" and second_c == "":
second_c = file
# get list of extensions
for file in files:
extension = file.split(".")[-1]
if extension in extension_count:
extension_count[extension] += 1
else:
extension_count[extension] = 1
# sort extensions by count descending
sorted_extensions = sorted(extension_count.items(), key=lambda item: (item[1], item[0][1]), reverse=True)
print(sorted_extensions)
# write to file
try:
f = open("output.txt", "w")
f.write(f"Longest: {longest}\n")
f.write(f"Shortest: {shortest}\n")
f.write(f"Second c: {second_c}\n")
f.write(f"Extensions: {sorted_extensions}\n")
f.close()
except:
print("Error opening the file")
| nicolaee2/python-lab | main/4.py | 4.py | py | 1,275 | python | en | code | 0 | github-code | 90 |
37658848504 | from pycipher import Caesar
#!pip install pycipher
def cipher(message, offset):
encrypted_message = Caesar(key=offset).encipher(message)
print(f"{message} encrypted with the Caesar cipher with an offset of {offset}: ")
print(encrypted_message)
my_message = input("Enter message to encrypt: ")
my_offset = int(input("Enter the offset: "))
cipher(my_message, my_offset) | mit-dci/Oct-19-members-week | cipher.py | cipher.py | py | 384 | python | en | code | 0 | github-code | 90 |
18344452139 | # LRRRRRRL, LRL は同じ(不幸な人は3人)
# L, R で分割してやると一回の操作でLRL -> LLL -> で不幸な人が2人へる LR の時は LLで1人減る
n, k = list(map(int, input().split(' ')))
s = input()
# LLRRRLL -> 1,1,1に変換して合体させていく
old = ''
converted = []
for c in s:
if old != c:
converted.append(1)
old = c
temp = max(len(converted) - 2*k, 1) # 必ず一人は犠牲になる
print(n - temp) | Aasthaengg/IBMdataset | Python_codes/p02918/s244071513.py | s244071513.py | py | 460 | python | ja | code | 0 | github-code | 90 |
10885959002 | import streamlit as st
import pandas as pd
import numpy as np
from streamlit_folium import folium_static
from utils import (
get_folium_map,
add_dc_markers,
get_nearsest_dc,
get_initial_map,
get_initial_map_opt,
)
from config import dc_lat_lng, dc_colors, capacity_grid_options, dc_capacity
from st_aggrid import (
AgGrid,
GridOptionsBuilder,
GridUpdateMode,
JsCode,
ColumnsAutoSizeMode,
)
from ui import header_ui, sidebar_ui
st.set_page_config(layout="wide")
# Function
# Read the optimized data
def load_optimized_df():
opt_df = pd.read_csv("data/optimized_data.csv")
st.session_state["optimal_df"] = opt_df
return opt_df
# Header part
header_ui()
# Read the Distance Matrix Dataframe
input_df = pd.read_csv("data/distance_matrix.csv")
cur_df = get_nearsest_dc(input_df)
cur_capacity = cur_df.groupby(["DC"])["Demand"].sum().reset_index()
# Create Initial map based on distance
us_map = get_folium_map()
us_map = add_dc_markers(dc_lat_lng, dc_colors, us_map)
if "optimal_df" not in st.session_state:
us_map = get_initial_map(cur_df, us_map)
else:
opt_df = st.session_state["optimal_df"]
# st.write(opt_df.type)
us_map = get_initial_map_opt(opt_df, us_map)
folium_static(us_map, width=900)
if st.session_state.get("optimal_df", None) is not None:
sel_cols = [
"city",
"curr_DC",
"DC",
"user_input_dc",
"Demand",
"Cost Deviation",
"lat",
"lng",
]
opt_df = st.session_state["optimal_df"]
# opt_df = opt_df[sel_cols]
gd2 = GridOptionsBuilder.from_dataframe(opt_df)
gd2.configure_default_column(hide=True, editable=False)
gd2.configure_column(field="city", header_name="Zone Name", hide=False)
gd2.configure_column(field="curr_DC", header_name="Optimal DC", hide=False)
gd2.configure_column(field="DC", header_name="Recommended DC", hide=False)
gd2.configure_column(
field="Demand", header_name="Total Orders", hide=False, editable=True
)
gd2.configure_column(
field="Cost Deviation", header_name="Cost Deviation", hide=False
)
cs = JsCode(
"""
function(params){
if (params.data.user_input_dc != params.data.DC) {
return {
'backgroundColor' : '#FFCCCB'
}
}
};
"""
)
editor_params = JsCode(
"""function(params) {
var selectedCountry = params.data.country;
if (['Excel', 'Vina', 'Argonia', 'Arion'].includes(params.data.city)) {
return {
values: [
"Fresno",
"SLC",
"Olathe",
"Macon"
]
};
} else {
return {
values: [
"Fresno",
"SLC",
"Olathe",
"Indy",
"Hamburg",
"Macon",
"Charlotte",
]
};
}
}"""
)
gd2.configure_column(
field="user_input_dc",
header_name="User Input DC",
editable=True,
cellStyle=cs,
hide=False,
cellEditor="agSelectCellEditor",
cellEditorParams=editor_params,
)
grid_options2 = gd2.build()
# section to add 3 metrics (dummy values)
col1, col2, col3 = st.columns(3)
col1.metric("Total Route Changes", 30)
col2.metric("Optimal Cost(Based on Distance)", "$ 1,555")
col3.metric("Recommended Cost(After Optimization)", "$ 1,380", "11%", delta_color="normal")
_, opt_col, _ = st.columns([1, 8, 1])
with opt_col:
# st.subheader("Optimal DC", )
st.markdown(
"<h2 style='text-align: center; color: black;'>Optimal DC Routes</h2>",
unsafe_allow_html=True,
)
st.header("")
optimal_df2 = AgGrid(
opt_df,
grid_options2,
height=500,
allow_unsafe_jscode=True,
fit_columns_on_grid_load=True,
theme="alpine",
columns_auto_size_mode=ColumnsAutoSizeMode.FIT_CONTENTS
# custome_css=custom_css,
)["data"]
optimal_df2.loc[(st.session_state["optimal_df"]['user_input_dc'] != optimal_df2['user_input_dc']), 'Cost Deviation'] = 100
optimal_df2["Color"] = optimal_df2["user_input_dc"].map(dc_colors)
if not st.session_state["optimal_df"].equals(optimal_df2):
st.session_state["optimal_df"] = optimal_df2
st.experimental_rerun()
# Sidebar
with st.sidebar:
sidebar_ui()
tt = pd.DataFrame(dc_capacity)
if "optimal_df" not in st.session_state:
tt = pd.merge(tt, cur_capacity)
else:
opt_df = st.session_state["optimal_df"]
cur_capacity = opt_df.groupby(["user_input_dc"])["Demand"].sum().reset_index()
cur_capacity.columns = ["DC", "Demand"]
tt = pd.merge(tt, cur_capacity)
jscode = JsCode(
"""
function(params) {
if (params.data.Demand > params.data.Capacity) {
return {
'color': 'black',
'backgroundColor': '#FFCCCB'
}
}
};
"""
)
capacity_grid_options["getRowStyle"] = jscode
grid_return = AgGrid(
tt,
capacity_grid_options,
fit_columns_on_grid_load=True,
allow_unsafe_jscode=True,
)
st.sidebar.button(label="Optimize", on_click=load_optimized_df)
st.sidebar.file_uploader("Upload Custom Demand File", type=["csv", "excel"])
| chakra1166/s2p-route-opt | main.py | main.py | py | 5,542 | python | en | code | 0 | github-code | 90 |
42985852816 | from math import *
t=int(input())
def solve(n):
sum=0.0
if n %2==1:
for i in range(1,n+1,2):
sum += (1.0/i)
else:
for i in range(2,n+1,2):
sum += (1.0/i)
# lamf tròn đến 6 chữ số phần thập phân
print('{:.6f}'.format(sum))
while t >0:
n=int(input())
solve(n)
t-=1 | nguyenkien0703/python_ptit | PY01036.py | PY01036.py | py | 350 | python | vi | code | 0 | github-code | 90 |
2438899006 | import json
import handler.TransformClickHouseData as TCH
class AppLambdaDelegate:
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def exec(self):
event = self.event
event = json.loads(event.get("Records")[0].get("body"))
records = event["Records"]
for record in records:
print('EventID: ' + record['eventID'])
print('EventName: ' + record['eventName'])
print(record)
eventName = record["eventName"].lower()
if eventName == "insert":
new_image = record["dynamodb"]["NewImage"]
jobCat = new_image["jobCat"]["S"]
TCH.run(eventName, jobCat, new_image)
else:
continue
| PharbersDeveloper/phlambda | deprecated/phtransformschema/src/delegate/AppLambdaDelegate.py | AppLambdaDelegate.py | py | 804 | python | en | code | 0 | github-code | 90 |
19646907102 | from aiokafka import AIOKafkaConsumer
import asyncio
from pydantic import BaseSettings
try:
from dotenv import load_dotenv
except ImportError:
pass
else:
load_dotenv()
class Settings(BaseSettings):
KAFKA_HOST: str
KAFKA_PORT: int
settings = Settings()
async def consume():
consumer = AIOKafkaConsumer(
'my_topic',
bootstrap_servers=f'{settings.KAFKA_HOST}:{settings.KAFKA_PORT}',
group_id="my-group"
)
# Get cluster layout and join group `my-group`
await consumer.start()
try:
# Consume messages
async for msg in consumer:
print("consumed: ", msg.topic, msg.partition, msg.offset,
msg.key, msg.value, msg.timestamp)
finally:
# Will leave consumer group; perform autocommit if enabled.
await consumer.stop()
if __name__ == '__main__':
asyncio.run(consume()) | Vitaliy3000/python_advanced_tutorial | Asynchrony/consumer.py | consumer.py | py | 900 | python | en | code | 0 | github-code | 90 |
9037630408 | import serial
def validHeartRateValue(heart_rate):
if heart_rate < 90 or heart_rate > 150:
return True
return False
def validSpo2Value(spo2):
if spo2 < 100 and spo2 > 40:
return True
return False
def getMaxValues(heart_rate_values, spo2_values):
max_heart_rate = max(heart_rate_values)
max_spo2_value = max(spo2_values)
return max_heart_rate, max_spo2_value
def splitValues(values):
heart_rate, spo2 = values.split('/')
spo2 = spo2.split('\r')
return heart_rate, spo2[0]
def pickValues():
count = 0
heart_rate_values = []
spo2_values = []
while count < 25:
while serial_setting.inWaiting():
values = serial_setting.readline().decode()
print(values)
if values not in incorrectValues():
heart_rate, spo2 = splitValues(values)
heart_rate_values.append(float(heart_rate))
spo2_values.append(int(spo2))
count += 1
return heart_rate_values, spo2_values
def incorrectValues():
return ["ets Jun 8 2016 00:22:57\r\n", "\r\n", "rst:0x1 (POWERON_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)\r\n", "configsip: 0, SPIWP:0xee\r\n", "clk_drv:0x00,q_drv:0x00,d_drv:0x00,cs0_drv:0x00,hd_drv:0x00,wp_drv:0x00\r\n", "clk_drv:0x00,q_drv:0x00,d_drv:0x00,cs0_drv:0x00,hd_drv:0x00,wp_drv:0x00\r\n", "mode:DIO, clock div:1\r\n", "load:0x3fff0018,len:4\r\n", "load:0x3fff001c,len:1216\r\n", "ho 0 tail 12 room 4\r\n", "load:0x40078000,len:10944\r\n", "load:0x40080400,len:6388\r\n", "entry 0x400806b4\r\n", "Beat!\r\n"]
serial_setting = serial.Serial('COM6', 115200) | GabrielRodriguesDeveloper/Pulse-Oximeter-Telegram-Bot-Python | values.py | values.py | py | 1,647 | python | en | code | 0 | github-code | 90 |
25908702552 | # File: lab7.py
# Author: Edward Hanson
# Date: 10/13/2015
# Section: 18
# E-mail: ehanson1@umbc.edu
# Description:
def main():
items = ["shoes", "socks", "hat", "belt", "blouse", "dress", "tie"]
prices = [ 54.99, 7.11, 6.49, 22.58, 21.73, 38.99, 14.83]
LISTLENGTH = 7
cash = 100.00
response = ""
print("Welcome to our online store!")
while response != 0:
print("You have $ " + str(cash) + " in funds available.")
for element in range(0, LISTLENGTH):
print(str(element+1) + " - " + items[element] + " \t$ " + str(prices[element]))
response = int(input("Please choose an item number to purchase, or '0' to quit: "))
for option in range(1, LISTLENGTH + 1):
if response == option:
cash = cash - prices[option - 1]
if cash < 0:
print("Sorry, but you are unable to afford that item\n")
cash = cash + prices[option - 1]
else:
print("Thank you for purchasing: "+ items[option - 1]+"\n")
print("Thank you for visiting our store!")
main()
| Rudedaisy/CMSC-201 | Labs/lab7/lab7.py | lab7.py | py | 1,158 | python | en | code | 0 | github-code | 90 |
18476081909 | n=int(input())
lim_keta=len(str(n))
def dfs(keta,num):
if int(num)<=n and len(set(num))==3:
global cnt
cnt+=1
if keta==lim_keta:
return
for i in '753':
dfs(keta+1,num+i)
cnt=0
for i in '753':
dfs(1,i)
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p03212/s316989574.py | s316989574.py | py | 241 | python | en | code | 0 | github-code | 90 |
18151059969 | from collections import deque
N, M = [int(x) for x in input().split()]
route = {int(x): [] for x in range(N)}
for _ in range(M):
a, b = [int(x) - 1 for x in input().split()]
route[a].append(b)
route[b].append(a)
queue = deque()
queue.append(0)
all_town = set([int(x) for x in range(N)])
result = 0
while queue:
pos = queue.popleft()
all_town.discard(pos)
for next_pos in route[pos]:
if next_pos in all_town:
queue.append(next_pos)
if not queue:
if all_town:
queue.append(all_town.pop())
result += 1
print(result)
| Aasthaengg/IBMdataset | Python_codes/p02536/s564784660.py | s564784660.py | py | 601 | python | en | code | 0 | github-code | 90 |
23641088961 | from django.urls import reverse
from django.views.generic import FormView
from barriers.forms.statuses import BarrierChangeStatusForm
from .mixins import BarrierMixin
class BarrierChangeStatus(BarrierMixin, FormView):
template_name = "barriers/edit/status/change.html"
form_class = BarrierChangeStatusForm
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
form = context_data["form"]
context_data.update(
{
"barrier": self.barrier,
"valid_status_values": [
choice[0] for choice in form.fields["status"].choices
],
}
)
return context_data
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["token"] = self.request.session.get("sso_token")
kwargs["barrier"] = self.barrier
return kwargs
def form_valid(self, form):
form.save()
return super().form_valid(form)
def get_success_url(self):
return reverse(
"barriers:barrier_detail",
kwargs={"barrier_id": self.kwargs.get("barrier_id")},
)
| uktrade/market-access-python-frontend | barriers/views/statuses.py | statuses.py | py | 1,191 | python | en | code | 5 | github-code | 90 |
6045320215 | from gillespie_simple import *
from pathlib import Path
# Concentrations
conc_init_prey = 4 # M
conc_init_hunter = 10 # M
# Fake volume
vol = 10**(-21) # L
# Initial counts
no_init_prey = int(conc_init_prey * vol * AVOGADRO)
no_init_hunter = int(conc_init_hunter * vol * AVOGADRO)
print("Initial no prey: %d" % no_init_prey)
print("Initial no hunter: %d" % no_init_hunter)
# Reaction
rxn1 = Rxn("1",1.0,["P"],["P","P"])
rxn2 = Rxn("2",0.1,["H"],[])
conc_based_rate = 0.1
count_based_rate = conc_based_rate / (vol * AVOGADRO)
rxn3 = Rxn("3",count_based_rate,["H","P"],["H","H"])
# Initial counts
counts = Counts()
counts.set_count("H",no_init_hunter)
counts.set_count("P",no_init_prey)
# Run
counts_hist = run_gillespie(
rxn_list=[rxn1,rxn2,rxn3],
counts=counts,
dt_st_every=0.1,
t_max=100,
verbose=False
)
# Write
Path("data").mkdir(parents=True, exist_ok=True)
counts_hist.write_count_hist("P", "data/P.txt")
counts_hist.write_count_hist("H", "data/H.txt")
| smrfeld/gillespie-simple-python | examples/lotka_volterra/main.py | main.py | py | 991 | python | en | code | 0 | github-code | 90 |
33235269858 | # %%
import os
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import KernelPCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import accuracy_score, mean_absolute_error, roc_auc_score
from sklearn.ensemble import HistGradientBoostingClassifier, HistGradientBoostingRegressor
from scipy.stats import uniform, randint
from sklearn.model_selection import RandomizedSearchCV
##############
# Set working directory
##############
# %%
os.chdir('S:/Python/projects/semi_supervised')
##############
# Define helpers
##############
# %%
def create_data(type, nrow, ncol, seed):
if type == "classification":
X, y = make_classification(n_samples = nrow, n_features = ncol, n_informative = ncol, n_redundant = 0, random_state=seed)
else:
X, y = make_regression(n_samples = nrow, n_features = ncol, n_informative = ncol, random_state=seed)
ID = np.arange(0, X.shape[0], 1)
colnames = ['ID', 'Y'] + ["X" + str(i) for i in np.arange(0, X.shape[1])]
DF = pd.DataFrame(data = np.concatenate((ID.reshape(ID.shape[0], 1), y.reshape(y.shape[0], 1), X), axis = 1), columns = colnames)
return DF
def extract_XY(DF, prop, type):
DF_known = DF.sample(frac = prop)
DF_unknown = DF.loc[~DF.ID.isin(DF_known.ID)]
X_known = DF_known[["X" + str(i) for i in np.arange(0, DF_known.shape[1] - 2)]].to_numpy()
Y_known = DF_known[['Y']].to_numpy()
Y_known = Y_known.reshape((Y_known.shape[0],))
X_unknown = DF_unknown[["X" + str(i) for i in np.arange(0, DF_known.shape[1] - 2)]].to_numpy()
Y_unknown = DF_unknown[['Y']].to_numpy()
Y_unknown = Y_unknown.reshape((Y_unknown.shape[0],))
if type == "classification":
Y_known = Y_known.astype('int32')
Y_unknown = Y_unknown.astype('int32')
return X_known, Y_known, X_unknown, Y_unknown
def standardize(X_known, X_unknown):
X_both = np.concatenate((X_known, X_unknown), axis = 0)
scaler = StandardScaler()
scaler.fit(X_both)
X_known = scaler.transform(X_known)
X_unknown = scaler.transform(X_unknown)
return X_known, X_unknown, scaler
def create_projection_obj(X_known, X_unknown, kernel):
X_both = np.concatenate((X_known, X_unknown), axis = 0)
PCA = KernelPCA(n_components = 3, kernel = kernel, remove_zero_eig=True)
PCA.fit(X = X_both)
return PCA
def project(X_known, X_unknown, PCA):
X_known_project = PCA.transform(X_known)
X_unknown_project = PCA.transform(X_unknown)
return X_known_project, X_unknown_project
def impute_y(X_known, Y_known, X_unknown, type):
if type == "classification":
knn = KNeighborsClassifier(n_neighbors = 5)
else:
knn = KNeighborsRegressor(n_neighbors = 5)
knn.fit(X = X_known, y = Y_known)
Y_impute = knn.predict(X_unknown)
return Y_impute
def calc_match_metric(Y_unknown, Y_impute, type):
if type == "classification":
metric = accuracy_score(Y_unknown, Y_impute)
else:
metric = mean_absolute_error(Y_unknown, Y_impute)
return metric
def train_model(X_train, Y_train, type):
# run search and pick best fit
params = {'learning_rate': uniform(.005, .015), 'max_iter': randint(100, 200), 'max_depth': randint(1, 14)}
if type == "classification":
model = HistGradientBoostingClassifier(early_stopping=True)
CV = RandomizedSearchCV(estimator = model, param_distributions = params, scoring = 'roc_auc')
else:
model = HistGradientBoostingRegressor(early_stopping=True)
CV = RandomizedSearchCV(estimator = model, param_distributions = params, scoring = 'neg_median_absolute_error')
CV.fit(X_train, Y_train)
model = CV.best_estimator_
return model
def calc_model_metric(model_known, scaler_known, model_both, scaler_unknown, proj, type, nrow, ncol):
# make unseen data
if type == "classification":
X_unseen, Y_unseen = make_classification(n_samples = nrow, n_features = ncol, n_informative = ncol, n_redundant = 0)
else:
X_unseen, Y_unseen = make_regression(n_samples = nrow, n_features = ncol, n_informative = ncol)
if type == "classification":
# known approach
X_unseen_stanard = scaler_known.transform(X_unseen)
preds_known = model_known.predict_proba(X_unseen_stanard)[:, 1]
# semi supervised
X_unseen = scaler_unknown.transform(X_unseen)
X_unseen_proj = proj.transform(X_unseen)
preds_both = model_both.predict_proba(X_unseen_proj)[:, 1]
AUC_known = roc_auc_score(Y_unseen, preds_known)
AUC_impute = roc_auc_score(Y_unseen, preds_both)
metric = AUC_impute - AUC_known
else:
# known approach
X_unseen_stanard = scaler_known.transform(X_unseen)
preds_known = model_known.predict(X_unseen_stanard)
# semi supervised
X_unseen = scaler_unknown.transform(X_unseen)
X_unseen_proj = proj.transform(X_unseen)
preds_both = model_both.predict(X_unseen_proj)
MAE_known = mean_absolute_error(Y_unseen, preds_known)
MAE_impute = mean_absolute_error(Y_unseen, preds_both)
metric = MAE_impute - MAE_known
return metric
##############
# Run simulation
##############
# %%
np.random.seed(42)
pieces = []
seed = 0
for type in ["classification", "regression"]:
for prop in np.arange(.05, 1, .05):
print(prop)
for kernel in ['linear', 'poly', 'rbf', 'sigmoid', 'cosine']:
for b in np.arange(0, 5):
seed += 1
DF = create_data(type, 50000, 10, seed)
X_known, Y_known, X_unknown, Y_unknown = extract_XY(DF, prop, type)
# just known data
scaler_known = StandardScaler()
scaler_known.fit(X_known)
X_known = scaler_known.transform(X_known)
model_known = train_model(X_known, Y_known, type)
# semi supervised (imputing labels)
DF = create_data(type, 50000, 10, seed)
X_known, Y_known, X_unknown, Y_unknown = extract_XY(DF, prop, type)
X_known, X_unknown, scaler_unknown = standardize(X_known, X_unknown)
proj = create_projection_obj(X_known, X_unknown, kernel)
X_known, X_unknown = project(X_known, X_unknown, proj)
Y_impute = impute_y(X_known, Y_known, X_unknown, type)
X_both = np.concatenate((X_known, X_unknown), axis = 0)
Y_both = np.concatenate((Y_known, Y_impute), axis = 0)
model_both = train_model(X_both, Y_both, type)
matchMetric = calc_match_metric(Y_unknown, Y_impute, type)
modelMetric = calc_model_metric(model_known, scaler_known, model_both, scaler_unknown, proj, type, 20000, 10)
piece = {'type':[type], 'prop':[prop], 'kernel':[kernel], 'b':[b], 'matchMetric':[matchMetric], 'modelMetric':[modelMetric]}
piece = pd.DataFrame(piece)
pieces.append(piece)
result = pd.concat(pieces)
result.head()
# %%
result.to_csv(path_or_buf = 'data/result.csv', index=False)
# %%
| gmcmacran/semi_supervised | code/run sim.py | run sim.py | py | 7,350 | python | en | code | 0 | github-code | 90 |
29066735651 | """
from src.glcm_old import horizontal_glcm, vertical_glcm, diagonal_glcm
g = horizontal_glcm("cropped/lighthouse/lighthouse_crop14.png")
h = vertical_glcm("cropped/lighthouse/lighthouse_crop14.png")
m = diagonal_glcm("cropped/lighthouse/lighthouse_crop14.png")
for i in range(10):
s = ""
for j in range(10):
s += str(g[i][j]) + " "
print(s)
print("---------------------------")
for i in range(10):
s = ""
for j in range(10):
s += str(h[i][j]) + " "
print(s)
print("---------------------------")
for i in range(10):
s = ""
for j in range(10):
s += str(m[i][j]) + " "
print(s)
"""
import json
from src.glcm import *
collected = []
for i in range(0, 36):
collected.append(get_glcm_all_props("cropped/lighthouse/lighthouse_crop" + str(i) + ".png"))
file = open("cropped/lighthouse/lighthouse.json", "w")
file.write(json.dumps(collected))
file.close()
# Note:
# serializing numpy array
# https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
# print(collected)
| Black3800/chasjp | src/test_glcm.py | test_glcm.py | py | 1,064 | python | en | code | 0 | github-code | 90 |
27925944311 | from .plugin import Plugin
class Monitor(Plugin):
def __init__(self, running_average=True, epoch_average=True, smoothing=0.7,
precision=None, number_format=None, unit=''):
if precision is None:
precision = 4
if number_format is None:
number_format = '.{}f'.format(precision)
number_format = ':' + number_format
super(Monitor, self).__init__([(1, 'iteration'), (1, 'epoch')])
self.smoothing = smoothing
self.with_running_average = running_average
self.with_epoch_average = epoch_average
self.log_format = number_format
self.log_unit = unit
self.log_epoch_fields = None
self.log_iter_fields = ['{last' + number_format + '}' + unit]
if self.with_running_average:
self.log_iter_fields += [' ({running_avg' + number_format + '}' + unit + ')']
if self.with_epoch_average:
self.log_epoch_fields = ['{epoch_mean' + number_format + '}' + unit]
def register(self, trainer):
self.trainer = trainer
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['log_format'] = self.log_format
stats['log_unit'] = self.log_unit
stats['log_iter_fields'] = self.log_iter_fields
if self.with_epoch_average:
stats['log_epoch_fields'] = self.log_epoch_fields
if self.with_epoch_average:
stats['epoch_stats'] = (0, 0)
def iteration(self, *args):
stats = self.trainer.stats.setdefault(self.stat_name, {})
stats['last'] = self._get_value(*args)
if self.with_epoch_average:
stats['epoch_stats'] = tuple(sum(t) for t in
zip(stats['epoch_stats'], (stats['last'], 1)))
if self.with_running_average:
previous_avg = stats.get('running_avg', 0)
stats['running_avg'] = previous_avg * self.smoothing + \
stats['last'] * (1 - self.smoothing)
def epoch(self, idx):
stats = self.trainer.stats.setdefault(self.stat_name, {})
if self.with_epoch_average:
epoch_stats = stats['epoch_stats']
stats['epoch_mean'] = epoch_stats[0] / epoch_stats[1]
stats['epoch_stats'] = (0, 0)
| sibozhang/Text2Video | venv_vid2vid/lib/python3.7/site-packages/torch/utils/trainer/plugins/monitor.py | monitor.py | py | 2,292 | python | en | code | 381 | github-code | 90 |
21815216487 | from dataclasses import dataclass
@dataclass(order=True, frozen=True)
class Size:
width: int = 800
height: int = 680
max_content_width = width - 20
@dataclass(order=True, frozen=True)
class Position:
screen_x: int = int(Size().width / 2)
screen_y: int = int(Size().height / 2 - 41)
screen_hidden_x: int = int(Size().width * 2)
@dataclass(order=True, frozen=True)
class Color:
purple: str = "#b294bb"
red: str = "#bc473b"
dark_red: str = "#89332a"
black_red: str = "#4b1713"
red_brown: str = "#b26947"
orange: str = "#ab7149"
light_orange: str = "#de935f"
yellow: str = "#f0c674"
blue: str = "#81a2be"
cyan: str = "#82b3ac"
green: str = "#9fa65d"
green_brown: str = "#a59256"
grey: str = "#4c4c4c"
light_grey: str = "#727272"
silver_grey: str = "#8a8a8a"
default_fg: str = "#bdbdbd"
default_bg: str = "#1a1b1b"
default_bg_dark: str = "#111111"
light_black: str = "#0e0e0e"
black: str = "#151515"
dark_black: str = "#000000"
@dataclass(order=True, frozen=True)
class Font:
cas6b: str = "Cascadia 6 bold"
cas8: str = "Cascadia 8"
cas8i: str = "Cascadia 8 italic"
cas8b: str = "Cascadia 8 bold"
cas10b: str = "Cascadia 10 bold"
cas11: str = "Cascadia 11"
cas20b: str = "Cascadia 20 bold"
size = Size()
position = Position()
font = Font()
color = Color()
DEFAULT_LABEL_CONFIG = dict(bg=color.default_bg, fg=color.default_fg, font=font.cas8b)
DEFAULT_LABEL_GRID = dict(row=0, column=0, sticky="w", padx=2, pady=2)
DEFAULT_BTN_TOGGLE_GRID = dict(row=0, column=2, pady=2)
| vagabondHustler/subsearch | src/subsearch/gui/resources/config.py | config.py | py | 1,612 | python | en | code | 27 | github-code | 90 |
42618431013 | from typing import Dict, Any
#The strip() method removes any leading (spaces at the beginning) and trailing (spaces at the end) characters (space is the default leading character to remove) biografy: https://www.w3schools.com/python/ref_string_strip.asp
#Por eso lo usaremos para eliminar los espacios :)
class AnalizadorEventos:
def __init__(self, nombre_archivo: str):
self.nombre_archivo = nombre_archivo
def procesar_eventos(self) -> Dict[str, Any]:
eventos_por_tipo = {}
eventos_por_servidor = {}
total_eventos = 0
with open(self.nombre_archivo, "r", encoding = "ISO-8859-1") as file:
for linea in file:
if "Tipo de evento" in linea:
tipo_evento = linea.strip().split(": ")[1]
eventos_por_tipo[tipo_evento] = eventos_por_tipo.get(tipo_evento, 0) + 1
total_eventos += 1
elif "Servidor" in linea:
nombre_servidor = linea.strip().split(": ")[1]
eventos_por_servidor[nombre_servidor] = eventos_por_servidor.get(nombre_servidor, 0) + 1
total_eventos += 1
estadisticas = {
"total_eventos": total_eventos,
"eventos_por_tipo": eventos_por_tipo,
"eventos_por_servidor": eventos_por_servidor
}
return estadisticas
A = AnalizadorEventos("eventos.txt")
A.procesar_eventos()
| Dlassoc/Ejercicio_Eventos | Ejercicio.py | Ejercicio.py | py | 1,312 | python | pt | code | 0 | github-code | 90 |
1018302015 | class Conversion:
def convertntegertoromannumeral(self, integerinput):
numerals = {
1: "I",
2: "II",
3: "III",
4: "IV",
5: "V",
6: "VI",
7: "VII",
8: "VIII",
9: "IX",
10: "X"
}
if integerinput not in numerals:
result = "Integer out of range"
else:
result = numerals[integerinput]
return result
convert = Conversion()
print("Please enter a number between 1 and 10: ")
integer = int(input())
romNumeral = convert.convertntegertoromannumeral(integer)
print(romNumeral)
| MorphX2/convertIntegerToRomanNumeral | convertIntegerToRomanNumeral.py | convertIntegerToRomanNumeral.py | py | 657 | python | en | code | 0 | github-code | 90 |
25263707802 | from common import *
from functools import reduce
def draw(t=0, **kwargs):
s=np.array((2048,2048))
y,x=meshgrid_euclidean(s)
pts=[]
for (r,n,o) in ((0,1,0),(0.2+0.12*math.sin(t*0.03),3,0.001*t+math.pi/6),(0.4+0.3*math.sin(0.34+0.0174*t),6,math.sin(0.4+0.0042*t)*math.pi)):
for a in range(n):
pts.append([getattr(math,f)(o+a*math.pi*2/n)*r+0.5 for f in ['cos','sin']])
r=[np.sqrt((x-p[1]*s[1])**2+(y-p[0]*s[0])**2) for p in pts]
r=reduce(np.minimum, r[1:], r[0])
im=np.sin(r*math.pi/(40+10*math.sin(0.43586+0.006342*t)))>0
return im
if __name__ == '__main__':
for t in range(10000):
print('rendering frame %08d...'%t)
im=draw(t)
imsave(im,'video2-%08d.png'%t)
| fferri/geometric_patterns | video2.py | video2.py | py | 741 | python | en | code | 9 | github-code | 90 |
17960876969 | from collections import defaultdict
a = input()
len_a = len(a)
ans = 1 + len_a*(len_a-1)//2
d = defaultdict(int)
for ai in a:
d[ai] += 1
for key,val in d.items():
ans -= val*(val-1)//2
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03618/s100969303.py | s100969303.py | py | 207 | python | en | code | 0 | github-code | 90 |
14393998840 | # Libs
import machine
import utime
import neopixel
import gc
# region Color
'''
Color
Aufabe:
Save RGB Color
Func GetRGB | None | list[R:int, G:int, B:int]
Func SetRGB | Write RGB | None
'''
class Color():
def __init__(self, r, g, b):
self.R = r
self.G = g
self.B = b
def GetRGB(self) -> list: return [self.R, self.G, self.B]
def SetRGB(self, RGBList) -> None:
self.R = RGBList[0]
self.G = RGBList[1]
self.B = RGBList[2]
def SetRGB1(self, r, g, b):
self.R = r
self.G = g
self.B = b
# endregion
# region OnLineCode
def Millis() -> int: return utime.ticks_ms() # static Func
def Sleep(ms) -> None: utime.sleep_ms(ms) # static Func
# endregion
# region GUI_Enums
'''
__MenuOisuEnum__
Aufgabe:
Ein Enum type für Menu / UI um zu wissen wo grade das Menu ist
'''
class __MenuPosiEnum__(object):
def __init__(self):
self.Null = 0
self.Off = 1
self.Option = 2
self.SetColor1 = 3
self.SetColor2 = 4
self.SetColorOff = 5
self.SetBlockTimeDown = 6
self.SetOverBlockTimeDown = 7
self.SetOverBlockHoldTime = 8
self.SetColor1_R = 9
self.SetColor1_G = 10
self.SetColor1_B = 11
self.SetColor2_R = 12
self.SetColor2_G = 13
self.SetColor2_B = 14
self.SetColorOff_R = 15
self.SetColorOff_G = 16
self.SetColorOff_B = 17
'''
__MenuColorTypeEnum__
Augabe:
Ein Enum type für Menu / UI um zuwissen welche RGB Farbe geendert werden soll.
'''
class __MenuColorTypeEnum__(object):
def __init__(self):
self.Null = 0
self.R = 1
self.G = 2
self.B = 3
'''
__MenuSaveEnum__
Ein Enum type für Menu / UI um zuwissen was mit den returnWert zuthuhen ist.
'''
class __MenuSaveEnum__(object):
def __init__(self):
self.Null = 0
self.Save = 1
self.NotSave = 2
MenuPosiEnum = __MenuPosiEnum__()
MenuColorTypeEnum = __MenuColorTypeEnum__()
MenuSaveEnum = __MenuSaveEnum__()
# endregion
# region LedWriter
'''
LED
Aufgabe:
Write LED Pixel
Func SetNewColor | Chance old RGB Colors to new RGB Colors and Write | None
'''
class LED():
def __init__(self, size, high, dataPinAsInt):
self.BarLedList = []
self.Size = size
self.High = high
num = 0
for s in range(size):
left = []
right = []
leftRight = []
for i in range(high):
left.append(num)
num += 1
for i in range(high):
right.append(num)
num += 1
right.reverse()
for i in zip(left, right): leftRight.append((i, Color(0, 0, 0)))
self.BarLedList.append(leftRight)
if num != 0:
self.Neo = neopixel.NeoPixel(machine.Pin(dataPinAsInt), num + 1)
def SetNewColor(self, eqbarListColor):
for barListNum in range(len(eqbarListColor)):
for blockNum in range(len(barListNum)):
rgbOld = self.BarLedList[barListNum][blockNum][2].GetRGB()
rgbNew = eqbarListColor[barListNum][blockNum].GetRGB()
if rgbOld[0] != rgbNew[0] or rgbOld[1] != rgbNew[1] or rgbOld[2] != rgbNew[2]:
self.BarLedList[barListNum][blockNum][2].SetRGB([rgbNew[0], rgbNew[1], rgbNew[2]])
self.neo[self.BarLedList[barListNum][blockNum][0]] = (rgbNew[0], rgbNew[1], rgbNew[2])
self.neo[self.BarLedList[barListNum][blockNum][1]] = (rgbNew[0], rgbNew[1], rgbNew[2])
else:
pass
self.neo.write()
return
# endregion
# region ProgrammParas
'''
ProgrammParas
Augabe: Load Save Get Put => base Paras
Func Load | | None
Func SaveParas | | None
Func WriteDefault | | None
Func PrintParas | | None
'''
class ProgrammParas():
def __init__(self, _self=False, default=False):
self.pathToFile = "userParas"
# Use Default
if _self == True:
self.size = 8 # Breite des Audioanalyzer
self.high = 10 # Höre des Audioanalyzer
self.colorList = [Color(0, 0, 255), # Block Color1
Color(255, 0, 0), # Block Color2
Color(0, 0, 0) # Block ColorOff
]
self.blockFallTime = 200 # Fall des Block Pro Block in Ms
self.overBlockFallTime = 250 # Fall des OverBlock Pro Block in Ms
self.overBlockHoldTime = 500 # Warte es bis Fall gild für OverBlock
self.dataPinAsInt = 21 # DATA Pin für LED
# userParas gibt es nicht Or
# Load Default
# Create New userParas File with Default Paras
if default == True:
self.WriteDefault()
else:
try:
reader = open(self.pathToFile, "r")
reader.close()
# File gibt es
self.Load()
return
except:
# File gibt es nicht
ProgrammParas(_self=True, default=True)
self.Load()
return
def Load(self) -> None:
reader = open(self.pathToFile, "r")
textList = []
for i in reader:
textList.append(i)
self.size = int(textList[0]) # Breite des Audioanalyzer
self.high = int(textList[1]) # Höre des Audioanalyzer
self.colorList = [Color(int(textList[2]), int(textList[3]), int(textList[4])), # Block Color1
Color(int(textList[5]), int(textList[6]), int(textList[7])), # Block Color2
Color(int(textList[8]), int(textList[9]), int(textList[10])) # Block ColorOff
]
self.blockFallTime = int(textList[11]) # Fall des Block Pro Block in Ms
self.overBlockFallTime = int(textList[12]) # Fall des OverBlock Pro Block in Ms
self.overBlockHoldTime = int(textList[13]) # Warte es bis Fall gild für OverBlock
self.dataPinAsInt = int(textList[14]) # DATA Pin für LED
def SaveParas(self) -> None:
writer = open(self.pathToFile, "w")
text = ""
text += str(self.size) + "\n"
text += str(self.high) + "\n"
text += str(self.colorList[0].R) + "\n"
text += str(self.colorList[0].G) + "\n"
text += str(self.colorList[0].B) + "\n"
text += str(self.colorList[1].R) + "\n"
text += str(self.colorList[1].G) + "\n"
text += str(self.colorList[1].B) + "\n"
text += str(self.colorList[2].R) + "\n"
text += str(self.colorList[2].G) + "\n"
text += str(self.colorList[2].B) + "\n"
text += str(self.blockFallTime) + "\n"
text += str(self.overBlockFallTime) + "\n"
text += str(self.overBlockHoldTime) + "\n"
text += str(self.dataPinAsInt)
writer.write(text)
writer.close()
return
def WriteDefault(self) -> None:
writer = open(self.pathToFile, "w")
text = "8\n10\n0\n0\n255\n255\n0\n0\n0\n0\n0\n200\n250\n500\n21"
writer.write(text)
writer.close()
return
def PrintParas(self) -> None:
text = ""
text += str(self.size) + "\n"
text += str(self.high) + "\n"
text += str(self.colorList[0].R) + "\n"
text += str(self.colorList[0].G) + "\n"
text += str(self.colorList[0].B) + "\n"
text += str(self.colorList[1].R) + "\n"
text += str(self.colorList[1].G) + "\n"
text += str(self.colorList[1].B) + "\n"
text += str(self.colorList[2].R) + "\n"
text += str(self.colorList[2].G) + "\n"
text += str(self.colorList[2].B) + "\n"
text += str(self.blockFallTime) + "\n"
text += str(self.overBlockFallTime) + "\n"
text += str(self.overBlockHoldTime) + "\n"
text += str(self.dataPinAsInt)
print(text)
# endregion
# region GUI
"""
Button
Aufagabe:
Button With Correction
Func IFButtonDown | if Button Press | bool
private Func __ReadPin__ | if Button Press | bool
"""
class Button:
def __init__(self, delay, pin):
self.DelayMs = delay
self.DelayAktive = False
self.DelayEnd = 0
self.Pin = machine.Pin(pin, machine.Pin.IN)
def __ReadPin__(self) -> bool:
val = self.Pin.value()
if val == 0: return False
return True
def IFButtonDown(self) -> bool:
if self.DelayAktive:
if self.DelayEnd < Millis():
self.DelayAktive = False
return self.__ReadPin__()
else:
return False
elif self.__ReadPin__():
self.DelayAktive = True
self.DelayEnd = self.DelayMs + Millis()
return True
'''
MenuPressButton
Func UpdateButtons | Update Status of Buttons| None
Func OnButtonIsPressUpdate | Update Status of Buttons And return If One Button Press | None
Func OnButtonIsPress | return If One Button Press | bool
Func WaitOneButtonPress | wait If One Button Press | bool
'''
class MenuPressButton:
def __init__(self, delayMs, pinLeft, pinRight, pinUp, pinDown, pinOnOff):
self.ButtonLeft = Button(delayMs, pinLeft)
self.ButtonRight = Button(delayMs, pinRight)
self.ButtonUp = Button(delayMs, pinUp)
self.ButtonDown = Button(delayMs, pinDown)
self.ButtonOnOff = Button(delayMs, pinOnOff)
self.Up = False
self.Down = False
self.Right = False
self.Left = False
self.OnOff = False
def UpdateButtons(self) -> None:
self.Left = self.ButtonLeft.IFButtonDown()
self.Right = self.ButtonRight.IFButtonDown()
self.Up = self.ButtonUp.IFButtonDown()
self.Down = self.ButtonDown()
self.OnOff = self.ButtonOnOff()
def OnButtonIsPressUpdate(self) -> bool:
self.UpdateButtons()
return self.OnButtonIsPress()
def OnButtonIsPress(self) -> bool:
if self.Up or self.Down or self.Right or self.Left or self.OnOff():
return True
return False
def WaitOneButtonPress(self) -> bool:
while self.OnButtonIsPressUpdate() == False: pass
return True
'''
Menu
Aufabe:
Hier sind die Func um das UI zu erstellen oder Update zu erstellen.
Func private __SetClearList__ | Saubert das Display mit ColorOff | None
Func private __GetClearList__ | Create Liste mit ColorOff | list[Color]
Func private __GetListClearBar__ | Mit oldColorList und Saubert nur eine Bar mit ColorOff | list[Color]
Func private __SetUI_To_Option__ | Set UI to Option | None
Func private __SetUI_To_Option_UpdateUserPosi__ | Update UI as Option | None
Func private __SetUI_To_SetColor1__ | Set UI to SetColor1 | None
Func private __SetUI_To_SetColor1_UpdateUserPosi__ | Update UI as SetColor1 | None
Func private __SetUI_To_SetColor1_ChangeRGB__ | Set UI to SetColor1_ChangeRGB | None
Func private __SetUI_To_SetColor1_ChangeRGB__UpdateUserPosi__ | Update UI as SetColor1_ChangeRGB | None
Func private __SetUI_To_SetColor2__ | Set UI to SetColor1 | None
Func private __SetUI_To_SetColor2_UpdateUserPosi__ | Update UI as SetColor1 | None
Func private __SetUI_To_SetColor2_ChangeRGB__ | Set UI to SetColor1_ChangeRGB | None
Func private __SetUI_To_SetColor2_ChangeRGB__UpdateUserPosi__ | Update UI as SetColor1_ChangeRGB | None
Func private __SetUI_To_SetColorOff__ | Set UI to SetColor1 | None
Func private __SetUI_To_SetColorOff_UpdateUserPosi__ | Update UI as SetColor1 | None
Func private __SetUI_To_SetColorOff_ChangeRGB__ | Set UI to SetColor1_ChangeRGB | None
Func private __SetUI_To_SetColorOff_ChangeRGB__UpdateUserPosi__ | Update UI as SetColor1_ChangeRGB | None
'''
class Menu():
__aktiveSelect__ = Color(255, 255, 255)
__oldSelect__ = Color(0, 255, 0)
__CanSelect__ = Color(0, 200, 200)
__Bold__ = Color(255, 0, 0)
def __SetClearList__(self, ledWriter: LED) -> None:
ledWriter.SetNewColor(self.__GetClearList__(ledWriter))
''' Color 1'''
def __GetClearList__(self, ledWriter: LED) -> list:
returnWert = ledWriter.BarLedList
for i_bar in range(len(returnWert)):
for i_block in range(len(i_bar)):
returnWert[i_bar][i_block][2].SetRGB1(0, 0, 0)
return returnWert
def __GetListClearBar__(self, barNum, ledWriter: LED) -> list:
returnWert = ledWriter.BarLedList
for i in range(len(returnWert[0])):
returnWert[barNum][i][2] = Color(0, 0, 0)
return returnWert
def __SetUI_To_Option__(self, UserPosi, ledWriter: LED, colorList) -> None:
self.__SetClearList__(ledWriter)
newColor = ledWriter.BarLedList
newColor[0][0][2] = self.__Bold__
newColor[0][-1][2] = self.__Bold__
for i in range(1, (6)): # Size of Options
newColor[0][i][2] = self.__CanSelect__
newColor[0][0][UserPosi] = self.__aktiveSelect__
ledWriter.SetNewColor(newColor)
def __SetUI_To_Option_UpdateUserPosi__(self, UserPosi, oldUserPosi, ledWriter: LED, colorList) -> None:
newColor = ledWriter.BarLedList
newColor[0][oldUserPosi][2] = self.__CanSelect__
newColor[0][UserPosi][2] = self.__aktiveSelect__
ledWriter.SetNewColor(newColor)
def __SetUI_To_SetColor1__(self, UserPosi, ledWriter: LED) -> None:
newColor = ledWriter.BarLedList
newColor[0][UserPosi][2] = self.__oldSelect__
newColor[1][0][2] = self.__Bold__
newColor[1][-1][2] = self.__Bold__
newColor[1][1][2] = Color(130, 0, 0) # R
newColor[1][2][2] = Color(0, 130, 0) # G
newColor[1][3][2] = Color(0, 0, 130) # B
newColor[1][UserPosi][2] = self.__aktiveSelect__
ledWriter.SetNewColor(newColor)
def __SetUI_To_SetColor1_UpdateUserPosi__(self, UserPosi, oldUserPosi, ledWriter: LED) -> None:
newColor = ledWriter.BarLedList
newColor[1][1][2] = Color(130, 0, 0) # R
newColor[1][2][2] = Color(130, 0, 0) # G
newColor[1][3][2] = Color(130, 0, 0) # B
newColor[1][UserPosi][2] = self.__aktiveSelect__
ledWriter.SetNewColor(newColor)
def __SetUI_To_SetColor1_ChangeRGB__(self, UserPosi, typeRGB: MenuColorTypeEnum, ledWriter: LED) -> None:
newColor = ledWriter.BarLedList
newColor[2][0][2] = self.__Bold__
newColor[2][-1][2] = self.__Bold__
if typeRGB == MenuColorTypeEnum.R:
newColor[2][1][2] = Color(32, 0, 0)
newColor[2][2][2] = Color(64, 0, 0)
newColor[2][3][2] = Color(96, 0, 0)
newColor[2][4][2] = Color(128, 0, 0)
newColor[2][5][2] = Color(160, 0, 0)
newColor[2][6][2] = Color(224, 0, 0)
newColor[2][7][2] = Color(255, 0, 0)
elif typeRGB == MenuColorTypeEnum.G:
newColor[2][1][2] = Color(0, 32, 0)
newColor[2][2][2] = Color(0, 64, 0)
newColor[2][3][2] = Color(0, 96, 0)
newColor[2][4][2] = Color(0, 128, 0)
newColor[2][5][2] = Color(0, 160, 0)
newColor[2][6][2] = Color(0, 224, 0)
newColor[2][7][2] = Color(0, 255, 0)
elif typeRGB == MenuColorTypeEnum.B:
newColor[2][1][2] = Color(0, 0, 32)
newColor[2][2][2] = Color(0, 0, 64)
newColor[2][3][2] = Color(0, 0, 96)
newColor[2][4][2] = Color(0, 0, 128)
newColor[2][5][2] = Color(0, 0, 160)
newColor[2][6][2] = Color(0, 0, 224)
newColor[2][7][2] = Color(0, 0, 255)
newColor[1][UserPosi][2] = self.__oldSelect__
newColor[2][UserPosi][2] = self.__aktiveSelect__
ledWriter.SetNewColor(newColor)
def __SetUI_To_SetColor1_ChangeRGB__UpdateUserPosi__(self, UserPosi, oldUserPosi, typeRGB: MenuColorTypeEnum, ledWriter: LED) -> None:
newColor = ledWriter.BarLedList
if typeRGB == MenuColorTypeEnum.R:
newColor[2][1][2] = Color(32, 0, 0)
newColor[2][2][2] = Color(64, 0, 0)
newColor[2][3][2] = Color(96, 0, 0)
newColor[2][4][2] = Color(128, 0, 0)
newColor[2][5][2] = Color(160, 0, 0)
newColor[2][6][2] = Color(224, 0, 0)
newColor[2][7][2] = Color(255, 0, 0)
elif typeRGB == MenuColorTypeEnum.G:
newColor[2][1][2] = Color(0, 32, 0)
newColor[2][2][2] = Color(0, 64, 0)
newColor[2][3][2] = Color(0, 96, 0)
newColor[2][4][2] = Color(0, 128, 0)
newColor[2][5][2] = Color(0, 160, 0)
newColor[2][6][2] = Color(0, 224, 0)
newColor[2][7][2] = Color(0, 255, 0)
elif typeRGB == MenuColorTypeEnum.B:
newColor[2][1][2] = Color(0, 0, 32)
newColor[2][2][2] = Color(0, 0, 64)
newColor[2][3][2] = Color(0, 0, 96)
newColor[2][4][2] = Color(0, 0, 128)
newColor[2][5][2] = Color(0, 0, 160)
newColor[2][6][2] = Color(0, 0, 224)
newColor[2][7][2] = Color(0, 0, 255)
newColor[2][UserPosi][2] = self.__aktiveSelect__
ledWriter.SetNewColor(newColor)
''' Color 2 '''
def __SetUI_To_SetColor2__(self, UserPosi, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1__(UserPosi, ledWriter)
def __SetUI_To_SetColor2_UpdateUserPosi__(self, UserPosi, oldUserPosi, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1_UpdateUserPosi__(UserPosi, oldUserPosi, ledWriter)
def __SetUI_To_SetColor2_ChangeRGB__(self, UserPosi, typeRGB: MenuColorTypeEnum, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1_ChangeRGB__(UserPosi, typeRGB, ledWriter)
def __SetUI_To_SetColor2_ChangeRGB__UpdateUserPosi__(self, UserPosi, oldUserPosi, typeRGB: MenuColorTypeEnum, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1_ChangeRGB__UpdateUserPosi__(UserPosi, oldUserPosi, typeRGB, ledWriter)
''' Color On off '''
def __SetUI_To_SetColorOff__(self, UserPosi, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1__(UserPosi, ledWriter)
def __SetUI_To_SetColorOff_UpdateUserPosi__(self, UserPosi, oldUserPosi, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1_UpdateUserPosi__(UserPosi, oldUserPosi, ledWriter)
def __SetUI_To_SetColorOff_ChangeRGB__(self, UserPosi, typeRGB: MenuColorTypeEnum, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1_ChangeRGB__(UserPosi, typeRGB, ledWriter)
def __SetUI_To_SetColorOff_ChangeRGB__UpdateUserPosi__(self, UserPosi, oldUserPosi, typeRGB: MenuColorTypeEnum, ledWriter: LED) -> None:
self.__SetUI_To_SetColor1_ChangeRGB__UpdateUserPosi__(UserPosi, oldUserPosi, typeRGB, ledWriter)
'''
UI + vererbung von Menu
Aufabe:
Ist das UI für den User um sachen zu endern und zu speichern
Func + value
Update | Update Menu | None
MenuAktive | if User in Menu | bool
LoopUpdate | Loop Update and break if User not more User Menu | None
'''
class UI(Menu):
def __init__(self, buttons: MenuPressButton, ledWriter:LED, menuParas: ProgrammParas):
self.ledWriter: LED = ledWriter
self.MenuParas = ProgrammParas
self.Tasten = buttons
self.MenuStatus = MenuPosiEnum.Off
self.UserPosi_UP = 1
self.UserPosi_RIGH = 0
self.MenuSchicht = 0
self.MenuOptions = 0
def Update(self, ledWriter: LED) -> None:
self.Tasten.UpdateButtons()
if self.MenuStatus == MenuPosiEnum.Null: # MenuStatus is Null
print("MenuStatus is Null")
return
if self.MenuStatus == MenuPosiEnum.Null:
self.Tasten.UpdateButtons()
if self.Tasten.OnButtonIsPress():
self.MenuStatus == MenuPosiEnum.Option
if self.MenuStatus == MenuPosiEnum.Option: self.__StatusOption__()
elif self.MenuStatus == MenuPosiEnum.SetColor1: self.SetColor()
elif self.MenuStatus == MenuPosiEnum.SetColor2: self.SetColor()
elif self.MenuStatus == MenuPosiEnum.SetColorOff: self.SetColor()
def __StatusOption__(self) -> None:
self.__SetUI_To_Option__(ledWriter=self.ledWriter)
self.UserPosi_RIGH = 0
self.UserPosi_UP = 1
maxHigh = 6
minHigh = 1
oldUserPosi = 1
while True:
oldUserPosi = self.UserPosi_UP
self.Tasten.OnButtonIsPressUpdate()
if self.Tasten.Left:
self.MenuStatus = MenuPosiEnum.Null
return
elif self.Tasten.Up:
if maxHigh == self.UserPosi_UP: pass
elif minHigh == self.UserPosi_UP: pass
else:
self.UserPosi_UP += 1
elif self.Tasten.Down:
if maxHigh == self.UserPosi_UP: pass
elif minHigh == self.UserPosi_UP: pass
else:
self.UserPosi_UP += -1
self.Tasten.OnButtonIsPressUpdate(self, self.UserPosi_UP, oldUserPosi, self.ledWriter,
self.MenuParas.colorList)
if self.Tasten.Right:
if self.UserPosi_UP == 1: self.MenuStatus = MenuPosiEnum.SetColor1
elif self.UserPosi_UP == 2: self.MenuStatus = MenuPosiEnum.SetColor2
elif self.UserPosi_UP == 3: self.MenuStatus = MenuPosiEnum.SetColorOff
elif self.UserPosi_UP == 4: self.MenuStatus = MenuPosiEnum.SetBlockTimeDown
elif self.UserPosi_UP == 5: self.MenuStatus = MenuPosiEnum.SetOverBlockTimeDown
elif self.UserPosi_UP == 6: self.MenuStatus = MenuPosiEnum.SetOverBlockHoldTime
return
# kommt noch
def __SetColorR_orG_orB__(self, ColorType: MenuColorTypeEnum, listPosi: int):
pass
def SetColorR_orG_orB(self, ColorType: MenuColorTypeEnum, oldColor: Color) -> (MenuSaveEnum, int):
listPosi = 0
colorDessList = [] # Liste | UserPosi
colorDessList.append(32) # 0 > 1
colorDessList.append(64) # 1 > 2
colorDessList.append(96) # 2 > 3
colorDessList.append(128) # 3 > 4
colorDessList.append(160) # 4 > 5
colorDessList.append(192) # 5 > 6
colorDessList.append(224) # 6 > 7
colorDessList.append(255) # 7 > 8
if ColorType == MenuColorTypeEnum.R: pass
if ColorType == MenuColorTypeEnum.G: pass
if ColorType == MenuColorTypeEnum.B: # Ser Color for B
# Set UserPosi to Aktive Color
for num in range(len(colorDessList)):
if oldColor.R <= colorDessList[num]:
listPosi = num
self.UserPosiUp = num + 1
break
# Set UI to ColorType
if True: self.__SetColorR_orG_orB__(ColorType, listPosi)
while True:
buttons = self.GetUsetInput()
if buttons.OnOff:
self.IfPressOnOffButton()
elif buttons.Left:
return (MenuSaveEnum.NotSave, colorDessList[0])
elif buttons.Right:
return (MenuSaveEnum.Save, colorDessList[listPosi])
elif buttons.Up:
if self.UserPosiUp == 8:
self.UserPosiUp = 1
listPosi = 0
else:
self.UserPosiUp += 1
listPosi += 1
elif buttons.Down:
if self.UserPosiUp == 1:
self.UserPosiUp += -1
listPosi += -1
# Update UI
if True: self.__SetColorR_orG_orB__(ColorType, listPosi)
elif ColorType == MenuColorTypeEnum.G:
if True: # Set UserPosi AND Set colorListPosi
for num in range(len(colorDessList)):
if oldColor.G <= colorDessList[num]:
listPosi = num
self.UserPosiUp = num + 1
break
# Set UI to ColorType
if True: self.__SetColorR_orG_orB__(ColorType, listPosi)
while True:
buttons = self.GetUsetInput()
if buttons.OnOff:
self.IfPressOnOffButton()
elif buttons.Left:
return (MenuSaveEnum.NotSave, colorDessList[0])
elif buttons.Right:
return (MenuSaveEnum.Save, colorDessList[listPosi])
elif buttons.Up:
if self.UserPosiUp == 8:
self.UserPosiUp = 1
listPosi = 0
else:
self.UserPosiUp += 1
listPosi += 1
elif buttons.Down:
if self.UserPosiUp == 1:
self.UserPosiUp += -1
listPosi += -1
# Update UI
if True: self.__SetColorR_orG_orB__(ColorType, listPosi)
elif ColorType == MenuColorTypeEnum.B:
if True: # Set UserPosi AND Set colorListPosi
for num in range(len(colorDessList)):
if oldColor.B <= colorDessList[num]:
listPosi = num
self.UserPosiUp = num + 1
break
# Set UI to ColorType
if True: self.__SetColorR_orG_orB__(ColorType, listPosi)
while True:
buttons = self.GetUsetInput()
if buttons.OnOff:
self.IfPressOnOffButton()
elif buttons.Left:
return (MenuSaveEnum.NotSave, colorDessList[0])
elif buttons.Right:
return (MenuSaveEnum.Save, colorDessList[listPosi])
elif buttons.Up:
if self.UserPosiUp == 8:
self.UserPosiUp = 1
listPosi = 0
else:
self.UserPosiUp += 1
listPosi += 1
elif buttons.Down:
if self.UserPosiUp == 1:
self.UserPosiUp += -1
listPosi += -1
# Update UI
if True: self.__SetColorR_orG_orB__(ColorType, listPosi)
def SetColor(self) -> None:
returnUserPosiIFLeft = self.UserPosi_UP
self.UserPosiUp = 1
while True:
self.Tasten.OnButtonIsPressUpdate()
# MenuStatus => off
if self.Tasten.OnOff:
self.MenuStatus = MenuPosiEnum.Off
return
# not Save
elif self.Tasten.Left:
self.MenuStatus = MenuPosiEnum.Option
return
# Save
# Change color R
# Change color G
# Change color B
elif buttons.Right:
if self.UserPosiUp == 4: # Save
returnWertSave = MenuSaveEnum.Save
return
elif self.UserPosiUp == 3: # Set R
buff = self.SetColorR_orG_orB(MenuColorTypeEnum.R)
if buff[0] == MenuSaveEnum.Save:
newColor.R = buff[1]
else:
pass
elif self.UserPosiUp == 2: # Set G
buff = self.SetColorR_orG_orB(MenuColorTypeEnum.G)
if buff[0] == MenuSaveEnum.Save:
newColor.G = buff[1]
else:
pass
elif self.UserPosiUp == 1: # Set B
buff = self.SetColorR_orG_orB(MenuColorTypeEnum.B)
if buff[0] == MenuSaveEnum.Save:
newColor.B = buff[1]
else:
pass
# Go Up
elif buttons.Up:
if self.UserPosiUp == 4:
pass # Is Max Up
else:
self.UserPosiUp += 1
# Go Down
elif buttons.Down:
if self.UserPosiUp == 1:
pass # Is Max Down
else:
self.UserPosiUp += -1
def LoopUpdate(self, ledWriter: LED) -> None:
pass
# endregion
# region main
'''
Class InputAudio
Aufgabe:
Read ADC Pin and write to a List
Func Update | Update die HightList für die HzBender | None
Func UpdateReturn | Update die HightList für die HzBender | list[int]
PrintAudioHigh | Printet die Liste AudioHightList | None
GetHighList | Return HighList | list[int]
'''
class InputAudio():
def __init__(self, high, size, hzADCPinMachine):
self.high = high
self.hzADCPinList = hzADCPinMachine
self.HighList = []
for i in range(size): self.HighList.append(0)
def Update(self) -> None:
for i in range(len(self.hzADCPinList)):
self.HighList[i] = int(self.high / 4000 * self.hzADCPinList[i].read())
def UpdateReturn(self) -> list:
self.Update()
return self.HighList
def PrintAudioHigh(self):
for i in range(len(self.HighList)):
print(i, "=>", self.HighList[i].read())
def GetHighList(self) -> list:
return self.HighList
'''
Block
Aufgabe:
Save 3 Colors for 1 LED Grup
Func SetAktive | Set Block to Aktive | None
Func SetOff | Set Block to Off | None
Func ChanceColor | Chance Color Mode | None
'''
class Block():
def __init__(self, color1, color2, colorOff):
self.Color1 = color1 # Color 1 is the main Color
self.Color2 = color2 # Color 2 ist der Block der langsam fehlt
self.ColorOff = colorOff # ColorOff ist sind die Blocke die aus sind
self.AktiveColor = colorOff # Welche Frabe grade Aktive ist
self.Status = False # Ob der Block Aktive ist oder nicht
def SetAktive(self) -> None: self.Status = True
def SetOff(self) -> None: self.Status = False
def ChanceColor(self, colorInt) -> None:
if colorInt == 1: self.AktiveColor = self.Color1
elif colorInt == 2: self.AktiveColor = self.Color2
elif colorInt == 3: self.AktiveColor = self.ColorOff
'''
Bar
Aufgabe:
Save Block and Chance the Color
Func FirstStart | Set Blocks Aktive and Chance the Color to Off | None
Func Update | Chance Colors Mode from Blocks | None
'''
class Bar():
def __init__(self, high, colorList, blockFallTime, overBlockFallTime, overBlockHoldTime):
self.BlockList = []
for i in range(high):
self.BlockList.append(Block(colorList[0], colorList[1], colorList[2]))
self.BlockFallTime = blockFallTime
self.OverBlockFallTime = overBlockFallTime
self.OverBlockHoldTime = overBlockHoldTime
self.BlockNextDown_Ms = 0
self.OverBlockNextDown_Ms = 0
self.OverBlockHoldAktive = False
self.OverBlockHoldEnd_Ms = 0
self.MaxHigh = high + 1
self.PosiBlock = 0
self.PosiOverBlock = 1
def FirstStart(self) -> None:
for i in self.BlockList: # Geht alle Block durch
i.SetAktive() # Set all Block to Aktive
i.ChanceColor(3) # Chance the Color to ColorOff
def Update(self, audioHigh) -> None:
# ------------------------------------------------ Block
# If audioHigh Higher then self.PosiBlock
if self.PosiBlock <= audioHigh: # Wenn die audioHigh Higher ist als self.PosiBlock
self.BlockNextDown_Ms = Millis() + self.BlockFallTime # Set new time self.BlockNextDown_Ms
if self.MaxHigh <= audioHigh: # If audio to High as self.MaxHigh
self.PosiBlock = self.MaxHigh - 1
else: self.PosiBlock = audioHigh
# If self.BlockNextDown_Ms lower as Millis()
# Then self.PosiBlock foll
elif self.BlockNextDown_Ms < Millis():
self.BlockNextDown_Ms = Millis() + self.BlockFallTime
if self.PosiBlock <= 1:
self.PosiBlock = 0
else:
self.PosiBlock += -1
# ------------------------------------------------ OverBlock
# If self.Posi higher then self.PosiOverBlock
# Then Set self.PosiOverBlock Higher
if self.PosiBlock + 1 >= self.PosiOverBlock:
self.PosiOverBlock = self.PosiBlock + 1
if self.PosiOverBlock != 1:
self.OverBlockHoldAktive = True
self.OverBlockHoldEnd_Ms = Millis() + self.OverBlockHoldTime
# If self.OverBlockHoldAktive Aktive
# Then If
elif self.OverBlockHoldAktive:
if self.OverBlockHoldEnd_Ms < Millis():
self.OverBlockHoldAktive = False
self.OverBlockNextDown_Ms = Millis() + self.OverBlockFallTime
if self.PosiOverBlock <= 2:
self.PosiOverBlock = 1
else:
self.PosiOverBlock += -1
elif self.OverBlockNextDown_Ms < Millis():
self.OverBlockNextDown_Ms = Millis() + self.OverBlockFallTime
if self.PosiOverBlock <= 2:
self.PosiOverBlock = 1
else:
self.PosiOverBlock += -1
return
'''
EQ
Aufgabe:
Die Zusammenfassung der Classen in einer und Update alle Classen
Func FirstStart | Set all Class To FirstStart | None
Func Update | Update all Class | None
Func PrintEQ | Print all Bars | None
Func LoopUpdate | Loop Update Func | None
'''
class EQ():
def __init__(self, machineADCPinList, dataPinAsInt, buttons: MenuPressButton):
self.LedWriter = LED(self.MainParas.size, self.MainParas.high, dataPinAsInt)
self.MainParas = ProgrammParas()
self.Ui = UI(buttons, self.LedWriter, self.MainParas) # UI
self.BarList = []
self.Audio = InputAudio(self.MainParas.high + 1, self.MainParas.size, machineADCPinList)
self.High = self.MainParas.high
for i in range(self.MainParas.size):
self.BarList.append(Bar(self.MainParas.high, self.MainParas.colorList,
self.MainParas.blockFallTime, self.MainParas.overBlockFallTime,
self.MainParas.overBlockHoldTime))
def FirstStart(self) -> None:
for i in range(len(self.BarList)):
self.BarList[i].FirstStart()
def Update(self) -> None:
self.Audio.Update()
for i in range(len(self.BarList)):
self.BarList[i].Update(self.Audio.HighList[i])
def PrintEQ(self) -> None:
text = ""
for bar in self.BarList:
for textPosiHigh in range(self.High):
if 0 == textPosiHigh:
text += "#"
elif bar.PosiBlock > textPosiHigh:
text += "#"
elif bar.PosiOverBlock == textPosiHigh:
text += "|"
else:
text += " "
text += "\n"
print(text)
def LoopUpdate(self) -> None:
while True:
try:
self.Update()
gc.collect()
except:
pass
# ------------------------------------------------------ ClassS End
# Enable automatic garbage collection.
gc.enable()
# Pins for IN Hz AS Int
hzID = [32,
33,
34,
35,
36,
37,
38,
39
]
# Pins for IN Hz AS machine.ADC
__hzADCPin__ = []
for i in hzID: __hzADCPin__.append(machine.ADC(machine.Pin(i)))
# Pin DATA_LED OUT
dataPinAsInt = 22
# Buttons
buttons = MenuPressButton(delayMs=300,
pinLeft=1,
pinRight=2,
pinUp=3,
pinDown=3,
pinOnOff=4)
eq = EQ(machineADCPinList=__hzADCPin__, dataPinAsInt=dataPinAsInt, buttons=buttons)
eq.FirstStart()
eq.PrintEQ()
# Run a garbage collection.
gc.collect()
eq.Update()
print("---")
eq.BarList[1].PosiOverBlock = 0
eq.BarList[1].PosiBlock = 7
# ---------------------------
# endregion
| chaosmac1/projektsoundvisual | main.py | main.py | py | 36,493 | python | en | code | 0 | github-code | 90 |
18038709429 | s = input()
n = len(s)
i = n
while i > 0:
if i >= 7 and s[i-7:i] == 'dreamer':
i -= 7
elif i >= 5 and s[i-5:i] == 'dream':
i -= 5
elif i >= 6 and s[i-6:i] == 'eraser':
i -= 6
elif i >= 5 and s[i-5:i] == 'erase':
i -= 5
else:
print('NO')
exit()
print('YES') | Aasthaengg/IBMdataset | Python_codes/p03854/s459456800.py | s459456800.py | py | 325 | python | en | code | 0 | github-code | 90 |
39227476197 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 23 09:02:21 2015
@author: adelpret
q.shape"""
import matplotlib.pyplot as plt
import numpy as np
import plot_utils as plut
from hrp2_motors_parameters import k_d, k_p, k_tau, k_v
FOLDER_ID = 5
EST_DELAY = 40
""" delay introduced by the estimation in number of samples """
JOINT_ID = np.array([1, 2, 3, 4, 5])
# k_v = np.array([0.006332, 0.007, 0.006561, 0.006928, 0.006])
ZERO_VEL_THR = 0.0001
dt = 0.001
PLOT_TRACKING_ERROR = True
PLOT_TRAJECTORY_TRACKING = False
K_6 = 0.0
if FOLDER_ID == 1:
data_folder = "../results/20150324_151451_stairs_pos/"
elif FOLDER_ID == 2:
data_folder = "../results/20150325_170301_stairs_tc_gain_100/"
K_6 = 1.0
elif FOLDER_ID == 3:
data_folder = "../results/20150325_175521_stairs_tc_gain_50/"
K_6 = 0.5
elif FOLDER_ID == 4:
data_folder = "../results/20150325_181352_stairs_tc_gain_25/"
K_6 = 0.25
elif FOLDER_ID == 5:
data_folder = "../results/20150325_183800_stairs_tc_gain_10/"
K_6 = 0.1
plut.SAVE_FIGURES = True
plut.FIGURE_PATH = data_folder
SHOW_LEGEND = False
SHOW_PLOTS = False
DATA_FILE_NAME = "data.npz"
TEXT_DATA_FILE_NAME = "data.txt"
file_name_qDes = "dg_jtc-jointsPositionsDesired.dat"
file_name_enc = "dg_HRP2LAAS-robotState.dat"
file_name_qRef = "dg_jtg-q.dat"
file_name_dqRef = "dg_jtg-dq.dat"
file_name_ddqRef = "dg_jtg-ddq.dat"
file_name_dq = "dg_estimator-jointsVelocities.dat"
file_name_ddq = "dg_estimator-jointsAccelerations.dat"
file_name_tau = "dg_estimator-jointsTorques.dat"
file_name_delta_q_ff = "dg_jtc-deltaQ_ff.dat"
file_name_delta_q_fb = "dg_jtc-deltaQ_fb.dat"
file_name_delta_q_friction = "dg_jtc-deltaQ_friction.dat"
""" Load data from file """
try:
data = np.load(data_folder + DATA_FILE_NAME)
# q = data['q'];
enc = data["enc"]
qRef = data["qRef"]
dqRef = data["dqRef"]
dq = data["dq"]
tau = data["tau"]
qDes = data["qDes"]
if "delta_q_ff" in data.keys():
delta_q_ff = data["delta_q_ff"]
delta_q_fb = data["delta_q_fb"]
else:
delta_q_ff = np.zeros(enc.shape)
delta_q_fb = np.zeros(enc.shape)
N = len(enc[:, 0])
except (IOError, KeyError):
print("Gonna read text files...")
# q = np.loadtxt(data_folder+file_name_q);
enc = np.loadtxt(data_folder + file_name_enc)
qRef = np.loadtxt(data_folder + file_name_qRef)
dqRef = np.loadtxt(data_folder + file_name_dqRef)
dq = np.loadtxt(data_folder + file_name_dq)
tau = np.loadtxt(data_folder + file_name_tau)
qDes = np.loadtxt(data_folder + file_name_qDes)
delta_q_ff = np.loadtxt(data_folder + file_name_delta_q_ff)
delta_q_fb = np.loadtxt(data_folder + file_name_delta_q_fb)
# check that signals have same length
n_enc = len(enc[:, 0])
n_qRef = len(qRef[:, 0])
n_dqRef = len(dqRef[:, 0])
n_dq = len(dq[:, 0])
n_tau = len(tau[:, 0])
n_qDes = len(qDes[:, 0])
n_delta_q_ff = len(delta_q_ff[:, 0])
n_delta_q_fb = len(delta_q_fb[:, 0])
N = np.min([n_enc, n_qRef, n_dq, n_tau, n_qDes, n_delta_q_ff, n_delta_q_fb])
if n_enc != N:
print("Gonna reduce size of encoder signal from %d to %d" % (n_enc, N))
enc = enc[:N, :]
if n_qRef != N:
print("Gonna reduce size of qRef signal from %d to %d" % (n_qRef, N))
qRef = qRef[:N, :]
if n_dqRef != N:
print("Gonna reduce size of dqRef signal from %d to %d" % (n_dqRef, N))
dqRef = dqRef[:N, :]
if n_dq != N:
print("Gonna reduce size of dq signal from %d to %d" % (n_dq, N))
dq = dq[:N, :]
if n_tau != N:
print("Gonna reduce size of tau signal from %d to %d" % (n_tau, N))
tau = tau[:N, :]
if n_qDes != N:
print("Gonna reduce size of qDes signal from %d to %d" % (n_qDes, N))
qDes = qDes[:N, :]
if n_delta_q_ff != N:
print("Gonna reduce size of delta_q_ff signal from %d to %d" % (n_delta_q_ff, N))
delta_q_ff = delta_q_ff[:N, :]
if n_delta_q_fb != N:
print("Gonna reduce size of delta_q_fb signal from %d to %d" % (n_delta_q_fb, N))
delta_q_fb = delta_q_fb[:N, :]
# synchronize qDes with other signals
N = N - EST_DELAY
for i in range(N):
if np.linalg.norm(dqRef[i, 1:]) > ZERO_VEL_THR:
print("First sample with non-zero reference velocity is %d" % i)
FIRST_SAMPLE = i - 100
break
for i in range(N):
if np.linalg.norm(dqRef[-i, 1:]) > ZERO_VEL_THR:
print("Last sample with non-zero reference velocity is %d" % (N - i))
LAST_SAMPLE = N - i + 100
break
# plt.figure(); plt.plot(np.linalg.norm(dqRef[:,1:])); plt.title('dqRef');
N = LAST_SAMPLE - FIRST_SAMPLE
dq = dq[FIRST_SAMPLE + EST_DELAY - 1 : LAST_SAMPLE + EST_DELAY - 1 :, 1 + JOINT_ID].reshape(N, len(JOINT_ID))
tau = tau[FIRST_SAMPLE + EST_DELAY - 1 : LAST_SAMPLE + EST_DELAY - 1 :, 1 + JOINT_ID].reshape(N, len(JOINT_ID))
enc = enc[FIRST_SAMPLE:LAST_SAMPLE, 6 + 1 + JOINT_ID].reshape(N, len(JOINT_ID))
qRef = qRef[FIRST_SAMPLE:LAST_SAMPLE, 1 + JOINT_ID].reshape(N, len(JOINT_ID))
dqRef = dqRef[FIRST_SAMPLE:LAST_SAMPLE, 1 + JOINT_ID].reshape(N, len(JOINT_ID))
qDes = qDes[FIRST_SAMPLE:LAST_SAMPLE, 1 + JOINT_ID].reshape(N, len(JOINT_ID))
delta_q_ff = delta_q_ff[FIRST_SAMPLE:LAST_SAMPLE, 1 + JOINT_ID].reshape(N, len(JOINT_ID))
delta_q_fb = delta_q_fb[FIRST_SAMPLE:LAST_SAMPLE, 1 + JOINT_ID].reshape(N, len(JOINT_ID))
np.savez(
data_folder + DATA_FILE_NAME,
dq=dq,
tau=tau,
qDes=qDes,
enc=enc,
qRef=qRef,
dqRef=dqRef,
delta_q_ff=delta_q_ff,
delta_q_fb=delta_q_fb,
)
""" Save data as text file for loading it in matlab """
# np.savetxt(data_folder+TEXT_DATA_FILE_NAME, (dq,tau,qDes,enc,qRef));
""" Plot data """
time = np.arange(0, N * dt, dt)
for i in range(len(JOINT_ID)):
print(
"Max position tracking error for joint %d: %f" % (JOINT_ID[i], np.max(np.abs(enc[:, i] - qRef[:, i])))
)
print(
"Avg Squared position tracking error for joint %d: %f"
% (JOINT_ID[i], np.linalg.norm(enc[:, i] - qRef[:, i]) / N)
)
if PLOT_TRACKING_ERROR:
plt.figure()
plt.plot(time, 1e3 * (enc[:, i] - qRef[:, i]), rasterized=True)
plt.xlabel("Time [s]")
plt.ylabel(r"$q_j-q_j^d$ [$10^3$ rad]")
if JOINT_ID[i] == 3:
plt.ylim([-65, 20])
else:
plt.ylim([-20, 20])
title = "Joint " + str(JOINT_ID[i]) + " pos track error"
plut.saveCurrentFigure(title)
plt.title(title)
if PLOT_TRAJECTORY_TRACKING:
plt.figure()
plt.plot(time, enc[:, i])
plt.plot(time, qRef[:, i], "r--", rasterized=True)
plt.xlabel("Time [s]")
plt.ylabel(r"$q_j$ [rad]")
if SHOW_LEGEND:
leg = plt.legend([r"$q_j$", r"$q_j^d$"])
leg.get_frame().set_alpha(plut.LEGEND_ALPHA)
title = "Joint " + str(JOINT_ID[i]) + " pos track"
plut.saveCurrentFigure(title)
plt.title(title)
j = JOINT_ID[i]
# compute delta_q_friction from velocity estimation
delta_q_friction = k_v[j] * dq[:, i]
# compute delta_q_fb from other components of delta_q (there was a bug in the c++ code computing delta_q_fb)
delta_q_fb[:, i] = qDes[:, i] - enc[:, i] - delta_q_ff[:, i] - delta_q_friction
delta_q_fb_pos = K_6 * (qRef[:, i] - enc[:, i])
delta_q_fb_vel = k_tau[j] * (1 + k_p[j]) * k_d[j] * (dqRef[:, i] - dq[:, i])
delta_q_fb_force = delta_q_fb[:, i] - delta_q_fb_pos - delta_q_fb_vel
plt.figure()
plt.plot(time, 1e3 * delta_q_ff[:, i], rasterized=True)
plt.plot(time, 1e3 * delta_q_friction, rasterized=True)
plt.plot(time, 1e3 * delta_q_fb_force, rasterized=True)
plt.plot(time, 1e3 * delta_q_fb_pos, rasterized=True)
# plt.plot(time, 1e3*delta_q_fb_vel);
plt.plot(time, 1e3 * (qDes[:, i] - enc[:, i]), "--", rasterized=True)
plt.xlabel("Time [s]")
plt.ylabel(r"$\Delta_q$ [$10^3$ rad]")
if SHOW_LEGEND:
leg = plt.legend(
["ff torque", "ff friction", "fb force", "fb pos", "total"],
loc="upper left",
)
leg.get_frame().set_alpha(plut.LEGEND_ALPHA)
title = "Joint " + str(JOINT_ID[i]) + " delta_q components"
plut.saveCurrentFigure(title)
plt.title(title)
# delta_q_friction = k_v[i]*dq[:,i];
# plt.figure(); plt.plot(time, delta_q_ff[:,i]);
# plt.plot(time, delta_q_fb[:,i]);
# plt.plot(time, delta_q_friction);
# plt.plot(time, qDes[:,i]-enc[:,i]);
# plt.xlabel('Time [s]');
# plt.ylabel('Delta_q [rad]');
# leg = plt.legend(['feedforward', 'feedback', 'friction', 'total']);
# leg.get_frame().set_alpha(plut.LEGEND_ALPHA);
# title = 'Joint '+str(JOINT_ID[i])+' delta_q components';
# plut.saveCurrentFigure(title);
# plt.title(title);
if SHOW_PLOTS:
plt.show()
| stack-of-tasks/sot-torque-control | python/dynamic_graph/sot/torque_control/identification/pos_ctrl/compress_stairs_data.py | compress_stairs_data.py | py | 8,970 | python | en | code | 8 | github-code | 90 |
72208054378 | """
编写一个算法来判断一个数 n 是不是快乐数。
“快乐数”定义为:对于一个正整数,每一次将该数替换为它每个位置上的数字的平方和,然后重复这个过程直到这个数变为 1,也可能是 无限循环 但始终变不到 1。如果 可以变为 1,那么这个数就是快乐数。
如果 n 是快乐数就返回 True ;不是,则返回 False 。
示例:
输入:19
输出:true
解释:
12 + 92 = 82
82 + 22 = 68
62 + 82 = 100
12 + 02 + 02 = 1
"""
class Solution:
def isHappy(self, n: int) -> bool:
def get_next(num):
total_sum = 0
while num:
num, digit = divmod(num, 10)
total_sum += digit ** 2
return total_sum
# 快指针和慢指针,如果循环,就会相遇,快走两步,慢走一步
slow_runner = n
fast_runner = get_next(n)
while fast_runner != 1 and slow_runner != fast_runner:
slow_runner = get_next(slow_runner)
fast_runner = get_next(get_next(fast_runner))
return fast_runner == 1
if __name__ == '__main__':
n = 19
sol = Solution()
result = sol.isHappy(n)
print(result)
| Asunqingwen/LeetCode | 每日一题/快乐数.py | 快乐数.py | py | 1,231 | python | zh | code | 0 | github-code | 90 |
73873941417 | # Standard Library
import random
import traceback
# 3rd Party
import docker
import pybreaker
import requests
from flask import current_app
# Fastlane
from fastlane.worker.errors import NoAvailableHostsError
class DockerPool:
def __init__(self, docker_hosts):
self.docker_hosts = docker_hosts
self.max_running = {}
self.clients_per_regex = []
self.clients = {}
self.__init_clients()
def __init_clients(self):
for regex, docker_hosts, max_running in self.docker_hosts:
client_list = []
clients = (regex, client_list)
self.clients_per_regex.append(clients)
self.max_running[regex] = max_running
for address in docker_hosts:
host, port = address.split(":")
docker_client = docker.DockerClient(base_url=address)
self.clients[address] = (host, int(port), docker_client)
client_list.append((host, int(port), docker_client))
@staticmethod
def refresh_circuits(executor, clients, blacklisted_hosts, logger):
def docker_ps(client):
client.containers.list(sparse=False)
for host, port, client in clients:
if f"{host}:{port}" in blacklisted_hosts:
continue
try:
logger.debug("Refreshing host...", host=host, port=port)
circuit = executor.get_circuit(f"{host}:{port}")
circuit.call(docker_ps, client)
except (requests.exceptions.ConnectionError, pybreaker.CircuitBreakerError):
error = traceback.format_exc()
logger.error("Failed to refresh host.", error=error)
def get_client(self, executor, task_id, host=None, port=None, blacklist=None):
logger = current_app.logger.bind(
task_id=task_id, host=host, port=port, blacklist=blacklist
)
if host is not None and port is not None:
logger.debug("Custom host returned.")
docker_client = self.clients.get(f"{host}:{port}")
if docker_client is None:
return host, port, None
return docker_client
if blacklist is None:
blacklist = set()
for regex, clients in self.clients_per_regex:
logger.debug("Trying to retrieve docker client...", regex=regex)
if regex is not None and not regex.match(task_id):
logger.debug("Task ID does not match regex.", regex=regex)
continue
DockerPool.refresh_circuits(executor, clients, blacklist, logger)
filtered = [
(host, port, client)
for (host, port, client) in clients
if f"{host}:{port}" not in blacklist
and executor.get_circuit(f"{host}:{port}").current_state == "closed"
]
if not filtered:
logger.debug(
"No non-blacklisted and closed circuit clients found for farm.",
regex=regex,
)
continue
logger.info(
"Returning random choice out of the remaining clients.",
clients=[f"{host}:{port}" for (host, port, client) in filtered],
)
host, port, client = random.choice(filtered) # nosec
return host, int(port), client
msg = f"Failed to find a docker host for task id {task_id}."
logger.error(msg)
raise NoAvailableHostsError(msg)
| fastlane-queue/fastlane | fastlane/worker/docker/pool.py | pool.py | py | 3,559 | python | en | code | 49 | github-code | 90 |
18289081179 | from collections import deque
H, W = map(int, input().split())
maze = [list(input()) for i in range(H)]
XY = [(-1, 0), (1, 0), (0, 1), (0, -1)]
t_max = 0
for i in range(H):
for j in range(W):
step = [[0]*W for _ in range(H)]
visited = [[False]*W for _ in range(H)]
queue = deque([(i, j)])
while queue:
x, y = queue.popleft()
if maze[x][y] == "#":
continue
if visited[x][y]:
continue
for dx, dy in XY:
nx = x + dx
ny = y + dy
if 0 <= nx <= H-1 and 0 <= ny <= W-1:
step[nx][ny] = step[x][y] + 1
queue.append((nx, ny))
t_max = max(t_max, step[nx][ny])
visited[x][y] = True
print(t_max-1)
| Aasthaengg/IBMdataset | Python_codes/p02803/s610727198.py | s610727198.py | py | 827 | python | en | code | 0 | github-code | 90 |
9974014026 | # -*- coding: utf-8 -*-
# Auther : jianlong
from animal.cat import cat
from animal.dog.dog import dog
import time
from test1 import test1
from test.second import second_test
from test.alist import alit
def upper(str):
result = ''
try:
result = str.upper()
except Exception as e:
print("程序出错了")
print(e)
return result
result = upper(1)
print(result)
second_test()
cat()
dog()
time.time()
print(result)
result = test1.test()
print(result)
alit()
| jianlongIT/python_test | pythonlearn/test2.py | test2.py | py | 499 | python | en | code | 0 | github-code | 90 |
73521622697 | from django.contrib.auth import get_user_model
from djoser.views import UserViewSet
from rest_framework import mixins, status, viewsets
from rest_framework.generics import get_object_or_404
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from users.models import Subscribe
from users.serializers import FollowCreateSerializer, FollowerSerializer
User = get_user_model()
class CustomUserViewSet(UserViewSet):
pagination_class = PageNumberPagination
class FollowViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = FollowerSerializer
def get_queryset(self):
return self.request.user.subscriptions.all()
class SubscribeViewSet(viewsets.GenericViewSet,
mixins.CreateModelMixin, mixins.DestroyModelMixin):
serializer_class = FollowCreateSerializer
def get_queryset(self):
return get_object_or_404(
User, id=self.kwargs.get('user_id')
)
def create(self, request, user_id):
user = request.user
author = get_object_or_404(User, id=user_id)
if user == author:
return Response(
'Нельзя подписаться на себя',
status=status.HTTP_400_BAD_REQUEST
)
if Subscribe.objects.filter(user=user, author=author).exists():
return Response(
'Такая подписка уже существует',
status=status.HTTP_400_BAD_REQUEST
)
follow = Subscribe.objects.create(user=user, author=author)
serializer = FollowerSerializer(
follow.author, context={'request': request})
return Response(serializer.data, status=status.HTTP_201_CREATED)
def delete(self, request, user_id, format=None):
author = get_object_or_404(
User, id=user_id
)
try:
subscribe = get_object_or_404(
Subscribe,
user=self.request.user,
author=author
)
except NameError:
msg = f'Автор {author} отсутствут в Ваших подписках.'
return Response(
{'errors': msg}, status=status.HTTP_400_BAD_REQUEST
)
subscribe.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| OlegZhigulin/foodgram-project-react | backend/foodgram/users/views.py | views.py | py | 2,426 | python | en | code | 0 | github-code | 90 |
32661312631 | import json
import pickle
import logging
from collections import defaultdict
from typing import Any, Dict, List, Iterable, Text
from overrides import overrides
import torch
from allennlp.data.fields import (
MetadataField,
TextField,
IndexField,
ListField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("p3_jsonl")
class P3ClusterReader(DatasetReader):
def __init__(
self,
model_name: str = "google/t5-xl-lm-adapt",
max_query_length: int = 512,
max_answer_length: int = 256,
return_original_input: bool = False,
**kwargs,
) -> None:
super().__init__(
manual_distributed_sharding=True,
manual_multiprocess_sharding=True,
**kwargs,
)
self.return_original_input = return_original_input
self._transformer_model_name = model_name
self._tokenizer = PretrainedTransformerTokenizer(model_name)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(model_name)
}
self._max_query_length = max_query_length
self._max_answer_length = max_answer_length
self._stats = None
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
for instance in self.shard_iterable(self.__read(file_path)):
yield instance
def __read(self, file_path: str) -> Iterable[Instance]:
self._stats = defaultdict(int)
logger.info(f"Reading data from {file_path}")
for line in open(file_path):
instance_data = json.loads(line)
if "target" not in instance_data or "input" not in instance_data:
self._stats["Instances without inputs or targets (skipped)"] += 1
continue
if not isinstance(instance_data["target"], str):
self._stats["Instances whose targets are not strings (skipped)"] += 1
continue
target = instance_data["target"].strip()
if "answer_choices" not in instance_data:
self._stats["Instances without answer options (kept)"] += 1
answer_options = [target]
else:
answer_options = [c.strip() for c in instance_data["answer_choices"]]
if target not in answer_options:
answer_options.append(target)
self._stats["Instances with targets not in answer choices (kept)"] += 1
yield self.text_to_instance(
instance_data["input"],
target,
answer_options
)
logger.info("Dataset stats:")
for key, value in self._stats.items():
logger.info(f"\t{key}: {value}")
def text_to_instance(
self, # type: ignore # pylint: disable=arguments-differ
input_text: str,
target: str,
options: List[str],
metadata: Dict[str, Any] = None,
) -> Instance:
fields = {}
tokenized_input = self._tokenizer.tokenize(input_text)
if len(tokenized_input) > self._max_query_length:
self._stats["Truncated inputs"] += 1
tokenized_input = tokenized_input[:self._max_query_length]
input_field = TextField(tokenized_input)
fields["prompt_and_input"] = input_field
if self.return_original_input:
fields['pretokenized_input'] = input_text
answer_option_fields = []
for option in options:
tokenized_option = self._tokenizer.tokenize(option)
if len(tokenized_option) > self._max_answer_length:
self._stats["Truncated options"] += 1
tokenized_option = tokenized_option[:self._max_answer_length]
answer_option_fields.append(TextField(tokenized_option))
options_list_field = ListField(answer_option_fields)
fields["answer_options"] = options_list_field
answer_index = None
for i, option in enumerate(options):
if target in option:
answer_index = i
break
fields["correct_answer_index"] = IndexField(answer_index, options_list_field)
if metadata is not None:
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["prompt_and_input"].token_indexers = self._token_indexers
for field in instance.fields["answer_options"].field_list:
field.token_indexers = self._token_indexers
| allenai/data-efficient-finetuning | attribution/p3_jsonl_reader.py | p3_jsonl_reader.py | py | 4,866 | python | en | code | 27 | github-code | 90 |
28839287921 | #!/usr/bin/env python3
import sys
import urllib.parse
infn = sys.argv[1]
data = open(infn).readlines()
grep_v_in_l = [
'Copyright owners',
'Ad revenue',
'Content found during',
'some territories',
'cannot be monetized',
'Copyright owner',
'On behalf',
'GmbH',
'LLC',
'Content used',
'Claim type',
'Impact on the video',
'Actions',
'Blocked in all',
'or monetized',
'/',
]
grep_v_eq_l = [
'1.00\n'
]
out_lines = []
accum = []
for line in data:
skip = False
for grep in grep_v_in_l:
if grep in line:
skip = True
break
if skip:
continue
for grep in grep_v_eq_l:
if line == grep:
skip = True
break
if skip:
continue
line = line.replace(' – ', ' => ')
accum.append(line)
if ' => ' in line:
if len(accum) == 3:
out_line = accum[1].strip() + ' - ' + accum[0].strip()
elif len(accum) == 4:
out_line = accum[2].strip() + ' - ' + accum[1].strip()
else:
print(len(accum), accum)
raise IndexError
out_lines.append(out_line + '\n')
out_lines.append(accum[-1])
out_lines.append('URL ' + urllib.parse.quote(out_line) + '\n')
out_lines.append('\n')
accum = []
print(''.join(out_lines))
| npinto/dotfiles | utils/.utils/yt-tracklist-clean.py | yt-tracklist-clean.py | py | 1,370 | python | en | code | 10 | github-code | 90 |
8583862455 | ## @package app.wh_freq_app
from app.app import App
from ui.wh_freq_app.main_window import MainWindow
## Handles startup for the WH Question app.
class WhFreqApp(App):
## Constructor
# @param self
def __init__(self):
super(WhFreqApp, self).__init__(
'wh_freq_app',
App.APP_TYPES.GUI,
'icons/open_icon_library-standard/icons/png/64x64/actions/edit-find-5.png'
)
## See superclass description.
def start(self):
MainWindow()
| babylanguagelab/bll_app | wayne/app/wh_freq_app.py | wh_freq_app.py | py | 509 | python | en | code | 0 | github-code | 90 |
36318187395 | import sys
import optparse
from optparse import Option, BadOptionError
from trigger.cli import cmdoptions
from trigger.cli.cmdparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter, parse_opts
from trigger.cli.utils import get_prog, get_userinput_boolean,is_true
from trigger.log import Logger
from trigger.trigger_logic import FakeTrigger
class Command(object):
name = None
usage = None
hidden = None
summary = ""
def __init__(self):
self.parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**self.parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
# set logger
self.logger = Logger()
self.logger.add_consumers(
(Logger.VERBOSE_DEBUG, sys.stdout),
)
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def run(self, args):
"""
The sub command class should overide this method
"""
NotImplemented
def execute(self, args=None):
"""
The main interface for exectute the command
"""
try:
self.run(args)
except Exception:
sys.stderr.write("ERROR: %s \n" % str(sys.exc_info()[1]))
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
class ActionOneCommand(Command):
name = "action_one"
usage = """%prog """
summary = "action_one command"
def __init__(self):
super(ActionOneCommand, self).__init__()
self.parser.add_option(Option(
'--start-from',
dest='start_from',
action='store',
default=None,
help="the fake option of action_one"
))
def run(self, args):
try:
options, _ = self.parse_args(args)
except BadOptionError:
sys.stderr.write("ERROR: %s \n" % str(sys.exc_info()[1]))
return
if not options.start_from:
sys.stderr.write("ERROR: %s \s" % "please provide the start_from parameter")
else:
sys.stdout.write("start_from = {}\n".format(options.start_from))
fake_obj = FakeTrigger()
fake_obj.process("action_one")
class ActionTwoCommand(Command):
name = "action_two"
usage = """%prog """
summary = "action_two command"
def __init__(self):
super(ActionTwoCommand, self).__init__()
self.parser.add_option(Option(
'--envname',
dest='envname',
action='store',
help="the name of environment"
))
def run(self, args):
try:
options, _ = self.parse_args(args)
msg = "Do you really want to process this %s [y/n]: " % options.envname
user_input = get_userinput_boolean(msg)
if is_true(user_input):
fake_obj = FakeTrigger()
fake_obj.process("action_two")
sys.stdout.write("the action on [%s] has been processed \n" % options.envname)
except BadOptionError:
sys.stderr.write("ERROR: %s" % str(sys.exc_info()[1]))
return
class ShowCommand(Command):
name = "show"
usage = """%prog """
summary = "show the information for the matrix"
def __init__(self):
super(ShowCommand, self).__init__()
def get_prog(self):
return "%s %s" % (get_prog(), self.name)
def run(self, args):
# define the subclass here
class EnvCommand(self.__class__):
name = "show env"
subcommand = "env"
usage = """%prog"""
summary = "show the information for the matrix"
def __init__(self):
super(EnvCommand, self).__init__()
self.parser.add_option(Option(
'--envname',
dest='envname',
action='store',
default=None,
help="the name of environment"
))
def run(self, args):
try:
options, _ = self.parse_args(args)
if options.envname:
sys.stdout.write(options.envname)
sys.stdout.write("\n")
else:
sys.stdout.write("\n")
except BadOptionError:
sys.stderr.write("ERROR: %s" % str(sys.exc_info()[1]))
return
class ImageCommand(self.__class__):
name = "show image"
subcommand = "image"
usage = """%prog"""
summary = "show the information for the matrix"
def __init__(self):
super(ImageCommand, self).__init__()
def run(self, args):
sys.stdout.write("show images")
sys.stdout.write("\n")
sub_commands = {
EnvCommand.subcommand: EnvCommand,
ImageCommand.subcommand: ImageCommand
}
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': self.get_prog(),
}
sub_command, args_else = parse_opts(args, get_commands_summary(sub_commands), parser_kw,
cmdoptions.general_group)
sub_commands[sub_command]().run(args_else)
COMMANDS = {
ActionOneCommand.name: ActionOneCommand,
ActionTwoCommand.name: ActionTwoCommand,
ShowCommand.name: ShowCommand,
}
def get_commands_summary(commands):
"""Yields sorted (command name, command summary) tuples."""
cmd_items = commands.items()
for name, command_class in cmd_items:
yield (name, command_class.summary)
| weiwongfaye/python_cli_template | trigger/cli/commands.py | commands.py | py | 6,453 | python | en | code | 0 | github-code | 90 |
1699089750 | """
high level Api
"""
import asyncio
import datetime as dt
import logging
from itertools import product
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import pandas as pd
from .enums import ErrorBehaviour
from .enums import SecurityIdType
from .errors import BloombergErrors
from .handlers import RequestHandler
from .handlers import SubscriptionHandler
from .instruments_requests import CurveLookupRequest
from .instruments_requests import GovernmentLookupRequest
from .instruments_requests import SecurityLookupRequest
from .requests import FieldSearchRequest
from .requests import HistoricalDataRequest
from .requests import ReferenceDataRequest
from .requests import Subscription
from .utils import log
from .utils.misc import split_into_chunks
# pylint: disable=ungrouped-imports
try:
import blpapi
except ImportError:
from async_blp.utils import env_test as blpapi
LOGGER = log.get_logger()
class AsyncBloomberg:
"""
Async wrapper of blpapi
"""
# pylint: disable=too-many-arguments
def __init__(self,
host: str = '127.0.0.1',
port: int = 8194,
log_level: int = logging.WARNING,
loop: asyncio.AbstractEventLoop = None,
error_behaviour: ErrorBehaviour = ErrorBehaviour.IGNORE,
max_sessions: int = 5,
max_securities_per_request: int = 100,
max_fields_per_request: int = 50,
):
try:
self._loop = loop or asyncio.get_running_loop()
except RuntimeError:
raise RuntimeError('Please run AsyncBloomberg inside asyncio '
'loop or explicitly provide one')
self._max_fields_per_request = max_fields_per_request
self._max_securities_per_request = max_securities_per_request
self._max_sessions = max_sessions
self._error_behaviour = error_behaviour
self._session_options = blpapi.SessionOptions()
self._session_options.setServerHost(host)
self._session_options.setServerPort(port)
self._request_handlers: List[RequestHandler] = []
self._subscription_handler: Optional[SubscriptionHandler] = None
log.set_logger(log_level)
async def stop(self):
"""
Stop all started sessions. If you try to use `AsyncBloomberg` after
calling this method, it will attempt to open new sessions.
If you stop session before receiving full response from Bloomberg,
you may lose some of the data.
This method waits for all handlers to successfully
stop their sessions.
"""
for handler in self._request_handlers:
handler.stop_session()
if self._subscription_handler:
self._subscription_handler.stop_session()
all_events = [self._subscription_handler.session_stopped.wait()]
else: # pragma: no cover
all_events = []
all_events.extend(handler.session_stopped.wait()
for handler in self._request_handlers)
await asyncio.gather(*all_events)
async def get_reference_data(
self,
securities: List[str],
fields: List[str],
security_id_type: Optional[SecurityIdType] = None,
overrides=None,
) -> Tuple[pd.DataFrame, BloombergErrors]:
"""
Return reference data from Bloomberg
"""
chunks = self._split_requests(securities, fields)
request_tasks = []
for security_chunk, fields_chunk in chunks:
handler = self._choose_handler()
request = ReferenceDataRequest(security_chunk,
fields_chunk,
security_id_type,
overrides,
self._error_behaviour,
self._loop)
request_tasks.append(asyncio.create_task(request.process()))
asyncio.create_task(handler.send_requests([request]))
requests_result = await asyncio.gather(*request_tasks)
result_df = pd.DataFrame(index=securities, columns=fields)
errors = BloombergErrors()
for data, error in requests_result:
result_df.loc[data.index, data.columns] = data
errors += error
return result_df, errors
async def search_fields(self,
query: str,
overrides=None,
) -> pd.DataFrame:
"""
Return reference data from Bloomberg
"""
request = FieldSearchRequest(query,
overrides,
self._error_behaviour,
self._loop)
handler = self._choose_handler()
asyncio.create_task(handler.send_requests([request]))
requests_result = await request.process()
data, _ = requests_result
return data
async def get_historical_data(
self,
securities: List[str],
fields: List[str],
start_date: dt.date,
end_date: dt.date,
security_id_type: Optional[SecurityIdType] = None,
overrides=None,
) -> Tuple[pd.DataFrame, BloombergErrors]:
"""
Return historical data from Bloomberg
"""
chunks = self._split_requests(securities, fields)
tasks = []
for security_chunk, fields_chunk in chunks:
handler = self._choose_handler()
request = HistoricalDataRequest(security_chunk,
fields_chunk,
start_date,
end_date,
security_id_type,
overrides,
self._error_behaviour,
self._loop)
tasks.append(asyncio.create_task(request.process()))
asyncio.create_task(handler.send_requests([request]))
requests_result = await asyncio.gather(*tasks)
all_dates = pd.date_range(start_date, end_date)
index = pd.MultiIndex.from_product([all_dates, securities],
names=['date', 'security'])
result_df = pd.DataFrame(index=index,
columns=fields)
errors = BloombergErrors()
for data, error in requests_result:
result_df.loc[data.index, data.columns] = data
errors += BloombergErrors()
return result_df, errors
async def subscribe(
self,
securities: List[str],
fields: List[str],
security_id_type: Optional[SecurityIdType] = None,
overrides=None,
) -> None:
"""
Subscribe to receive periodical updates from Bloomberg
"""
subscription = Subscription(securities,
fields,
security_id_type,
overrides,
self._error_behaviour,
self._loop)
if self._subscription_handler is None:
self._subscription_handler = SubscriptionHandler(
self._session_options,
self._loop)
await self._subscription_handler.subscribe([subscription])
async def read_subscriptions(self) -> pd.DataFrame:
"""
Receive all currently available subscription data
"""
if self._subscription_handler is None:
raise RuntimeError('You have to subscribe before reading '
'subscription data')
return await self._subscription_handler.read_subscribers()
async def security_lookup(self,
query: str,
options: Dict[str, str] = None,
max_results: int = 10):
options = options or {}
handler = self._choose_handler()
request = SecurityLookupRequest(query, max_results, options,
self._error_behaviour, self._loop)
task = asyncio.create_task(request.process())
asyncio.create_task(handler.send_requests([request]))
return await task
async def curve_lookup(self,
query: str,
options: Dict[str, str] = None,
max_results: int = 10):
options = options or {}
handler = self._choose_handler()
request = CurveLookupRequest(query, max_results, options,
self._error_behaviour, self._loop)
task = asyncio.create_task(request.process())
asyncio.create_task(handler.send_requests([request]))
return await task
async def government_lookup(self,
query: str,
options: Dict[str, str] = None,
max_results: int = 10):
options = options or {}
handler = self._choose_handler()
request = GovernmentLookupRequest(query, max_results, options,
self._error_behaviour, self._loop)
task = asyncio.create_task(request.process())
asyncio.create_task(handler.send_requests([request]))
return await task
def _choose_handler(self) -> RequestHandler:
"""
Return the most suitable handler to handle new request using
the following rules:
1) If there are free handlers (with no current requests),
return one of them
2) If new handler can be created (`max_sessions` is not reached),
return new handler
3) Otherwise, return the handler with the smallest load
"""
free_handlers = [handler
for handler in self._request_handlers
if not handler.current_load]
if free_handlers:
return free_handlers[0]
if len(self._request_handlers) < self._max_sessions:
handler = RequestHandler(self._session_options, self._loop)
self._request_handlers.append(handler)
return handler
return min([handler for handler in self._request_handlers],
key=lambda handler: handler.current_load)
def _split_requests(self,
securities: List[str],
fields: List[str]):
securities_chunks = split_into_chunks(securities,
self._max_securities_per_request)
fields_chunks = split_into_chunks(fields,
self._max_fields_per_request)
return product(securities_chunks, fields_chunks)
| rockscie/async_blp | async_blp/async_blp.py | async_blp.py | py | 11,272 | python | en | code | 13 | github-code | 90 |
42039518730 | """
Given a directed acyclic graph, with n vertices numbered from 0 to n-1, and an array edges where edges[i] = [fromi, toi] represents a directed edge from node fromi to node toi.
Find the smallest set of vertices from which all nodes in the graph are reachable. It's guaranteed that a unique solution exists.
Notice that you can return the vertices in any order.
Example 1:
Input: n = 6, edges = [[0,1],[0,2],[2,5],[3,4],[4,2]]
Output: [0,3]
Explanation: It's not possible to reach all the nodes from a single vertex. From 0 we can reach [0,1,2,5]. From 3 we can reach [3,4,2,5]. So we output [0,3].
https://leetcode.com/problems/minimum-number-of-vertices-to-reach-all-nodes/
"""
class Solution:
def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:
# idea: make a set of n number.
# pop all the j's from the edges and the return the set value.
vertices_set = set(range(n))
for i, j in edges:
if j in vertices_set: vertices_set.remove(j)
return vertices_set
| nilay-gpt/LeetCode-Solutions | graphs/min_nodes_to_visit_all.py | min_nodes_to_visit_all.py | py | 1,064 | python | en | code | 2 | github-code | 90 |
13680901281 | import numpy as np
import csv
def get_train_data_for_class(train_X,train_Y,class_label):
class_X = np.copy(train_X)
class_Y = np.copy(train_Y)
class_Y = np.where(class_Y == class_label,1,0)
return class_X,class_Y
def import_data():
X = np.genfromtxt("train_X_lg_v2.csv",dtype=np.float128,delimiter = ',',skip_header=1)
Y = np.genfromtxt("train_Y_lg_v2.csv",dtype=np.float128,delimiter = ",")
return X,Y
def sigmoid(Z):
s = 1 / (1 + np.exp(-Z))
return s
def compute_gradient_of_cost_function(X,Y,W,b):
m = len(X)
z = np.dot(X,W) + b
A = sigmoid(z)
dz = A - Y
dw = 1/m * np.dot(dz.T,X)
db = 1/m * np.sum(dz)
return dw.T,db
def compute_cost(X,Y,W,b):
m = len(X)
z = np.dot(X,W) + b
A = sigmoid(z)
A[A == 1] = 0.99999
A[A == 0] = 0.00001
cost = -1/m * np.sum(np.multiply(Y, np.log(A)) + np.multiply((1-Y), np.log(1-A)))
return cost
def optimize_weights_using_gradient_descent(X,Y,W,b,num_iterations,learning_rate):
"""iter_num = 0
previous_itr_cost = 0
while True:
iter_num += 1
dw,db = compute_gradient_of_cost_function(X,Y,W,b)
W = W - (learning_rate * dw)
b = b - (learning_rate * db)
cost = compute_cost(X,Y,W,b)
if iter_num%20000 ==0:
print(iter_num,cost)
if abs(cost - previous_itr_cost) < 0.0000001:
print(iter_num,cost)
break
previous_itr_cost = cost"""
for i in range(1,num_iterations+1):
dw,db = compute_gradient_of_cost_function(X,Y,W,b)
W = W - (learning_rate * dw)
b = b - (learning_rate * db)
cost = compute_cost(X,Y,W,b)
if i%50000 == 0:
print(i,cost)
return W,b
def train_model(X,Y):
learning_rate = [0.008,0.0079,0.00008,0.000001]
set_of_w = []
for i in range(4):
X,Y = get_train_data_for_class(X,Y,i)
Y = Y.reshape(len(X),1)
W = np.zeros((X.shape[1],1))
b = 0.0
W,b = optimize_weights_using_gradient_descent(X,Y,W,b,200000,learning_rate[i])
W = np.insert(W,0,b,axis = 0)
set_of_w.append(W.T[0])
set_of_w = np.array(set_of_w)
return set_of_w
def save_model(w,weights_file_name):
with open(weights_file_name,'w') as weights_file:
wr = csv.writer(weights_file)
wr.writerows(w)
weights_file.close()
if __name__ == "__main__":
X,Y = import_data()
W = train_model(X,Y)
save_model(W,"WEIGHT_FILE.csv")
| shubh-cmd/ML-algorithm | logistic_regression/train.py | train.py | py | 2,539 | python | en | code | 0 | github-code | 90 |
28890045538 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 13:38:27 2018
@author: eo
"""
import cv2
import numpy as np
from functools import partial
class FrameLab:
def __init__(self, name="Unnamed Frame Processor"):
# Name this object to use for plotting/feedback
self._name = name
# Variables used for keeping track of image dimensions/changes
self._historyWHC = []
self._crop_points = ()
# Storage for masking/background differences
self._bgImage = None
self._resources = {}
# Allocate variables for implementing a framestack
self._pointerList = []
self._stackList = []
self._stackSizeList = []
# Variables used to manage function sequencing
self._setSequence = False
self._functionSequence = []
self._funcIndex = 0
# Variables used to handle temporal subsampling
self._countUpdates = np.int64(0)
self._requestBreak = False
self._requestHoldFrame = False
self._holdFrame = None
self._changed = False
self._timeResourcesList = []
# Variables used to store processed frames
self._intermediates = []
self._seqDictionary = {}
# .................................................................................................................
def __str__(self):
return self._name
# .................................................................................................................
def __repr__(self):
print("")
outStringList = []
outStringList.append(" ".join(["FrameLab sequence:", self._name]))
for eachFunc in self._functionSequence:
if type(eachFunc) is partial:
funcString = "".join([" ", eachFunc.func.__name__, "()"])
else:
funcString = "".join([" ", eachFunc.__name__, "()"])
outStringList.append(funcString)
return "\n".join(outStringList)
# -----------------------------------------------------------------------------------------------------------------
#%% Sequence Utilities
# .................................................................................................................
def startSequence(self, setInput=None):
# Quick sanity check to avoid weirdness from calling start multiple times
if self._setSequence:
print("")
print(self._name)
print("Error, a sequence has already been started!")
print("")
raise SyntaxError
# If a sequence was already created, give a warning about clearing the function list
if len(self._functionSequence) > 0:
print("")
print(self._name)
print("WARNING:")
print(" A function sequence already exists and will be cleared!")
# Setup expected input dimensions
self._setInput(setInput)
# Clear any existing functions and set flag
self._funcIndex = 0
self._functionSequence = []
self._intermediates = []
self._setSequence = True
# .................................................................................................................
def endSequence(self, storeIntermediates=False):
# Quick sanity check! Don't allow sequencing without a function sequence
if len(self._functionSequence) == 0:
print("")
print(self._name)
print("Can't end sequence because there is no function sequence!")
print("")
FrameLab._quit()
# Make sure a start command was given before calling this function
if not self._setSequence:
print("")
print(self._name)
print("No sequence to end!")
print("Must call .startSequence() and processing functions before calling .endSequence()")
print("")
FrameLab._quit()
# End sequencing
self._setSequence = False
# Enable storage of intermediate frames
if storeIntermediates:
# Allocate space for intermediate frame storage
self._intermediates = [None] * (1 + len(self._functionSequence)) # Add 1 to store output
self.update = self._updateWithIntermediates
return
# If no special settings, replace update function with no-storage version
self.update = self._updateWithNoStorage
# .................................................................................................................
def update(self, inFrame):
# The endSequence() function will select an appropriate update() function
# In case a user doesn't set endSequence, display the warning below
print("")
print(self._name)
print("Need use .endSequence() function before using update!")
print("")
raise SyntaxError
# .................................................................................................................
def _updateWithNoStorage(self, inFrame):
# Assume that the output will change on this update (this may be altered by a break later)
self._changed = True
# Repeatedly apply each function in the func. sequence on the result from the previous function
prevFrame = inFrame.copy()
for idx, eachFunction in enumerate(self._functionSequence):
self._funcIndex = idx # Allows for debugging
prevFrame = eachFunction(prevFrame)
# Allow functions in the function list to break the update loop (dangerous!)
if self._requestBreak:
self._changed = False
break
# Store a copy of the output if requested by one of the sequence functions
if self._requestHoldFrame:
self._holdFrame = prevFrame.copy()
self._requestHoldFrame = False
# Update processing count
self._countUpdates += 1
return prevFrame
# .................................................................................................................
def _updateWithIntermediates(self, inFrame):
# Assume that the output will change on this update (this may be altered by a break later)
self._changed = True
# Store input frame in the first list index (which has been allocated by endSequence function)
self._intermediates[0] = inFrame.copy()
for idx, eachFunction in enumerate(self._functionSequence):
nextIndex = 1 + idx
self._funcIndex = idx
self._intermediates[nextIndex] = eachFunction(self._intermediates[idx]).copy()
if self._requestBreak:
self._changed = False
break
if self._requestHoldFrame:
self._holdFrame = self._intermediates[nextIndex].copy()
self._requestHoldFrame = False
self._countUpdates += 1
return self._intermediates[nextIndex].copy()
# -----------------------------------------------------------------------------------------------------------------
#%% Input utilities
# .................................................................................................................
def _setInput(self, setInput):
if type(setInput) is cv2.VideoCapture:
self._setInputFromVideoCapture(setInput)
return
if type(setInput) is FrameLab:
self._setInputFromOtherFrameLab(setInput)
return
if type(setInput) is np.ndarray:
self._setInputFromImage(setInput)
return
if type(setInput) in [tuple, list]:
inWidth = setInput[0]
inHeight = setInput[1]
inChannels = setInput[2] if len(setInput) > 2 else 3 # Assume a 3-channel image if not specified
self._recordDimensions(inWidth, inHeight, inChannels)
return
if setInput is None:
# Not gonna set input size... hope for the best!
self._recordDimensions(None, None, None)
return
print("")
print(self._name)
print("Unrecognized input!")
print("")
FrameLab._quit()
# .................................................................................................................
def _setInputFromOtherFrameLab(self, framelabObj):
otherWHC = framelabObj.getDimensions()
if None in otherWHC or len(otherWHC) == 0:
self._recordDimensions(None, None, None)
print("")
print(self._name)
print("Error setting input based on other FrameLab object!")
print(framelabObj._name, "does not have valid dimensions set (most likely it's processing does not require storage)")
print("Explicit dimensions may need to be provided if another error occurs.")
print("")
else:
self._recordDimensions(otherWHC)
# .................................................................................................................
def _setInputFromImage(self, inFrame):
inDimensions = inFrame.shape
inWidth = inDimensions[1]
inHeight = inDimensions[0]
inChannels = inDimensions[2] if len(inDimensions) > 2 else 1
self._recordDimensions(inWidth, inHeight, inChannels)
# .................................................................................................................
def _setInputFromVideoCapture(self, videoCaptureObject):
# OpenCV enumerator values
vc_width = 3
vc_height = 4
# Set dimension tracker variables based on video frames
vidWidth = int(videoCaptureObject.get(vc_width))
vidHeight = int(videoCaptureObject.get(vc_height))
self._recordDimensions(vidWidth, vidHeight, 3)
# .................................................................................................................
def appendFrameLab(self, framelabObj):
raise NotImplementedError
# -----------------------------------------------------------------------------------------------------------------
#%% Sequence functions
# .................................................................................................................
def customFunction(self, inputFunction, **kwargs):
# Record the change from (presumably) 3 channel BGR to to a single grayscale channel
inWidth, inHeight, inChannels = self.getDimensions()
if None in [inWidth, inHeight, inChannels]:
print("")
print(self._name)
print("Error running custom function:", inputFunction.__name__)
print("Must supply input frame dimensions prior to using custom functions")
print("Use startSequence(setInput) to specific input dimensions!")
raise AttributeError
# Call input function will all keyword arguments
customFunc = partial(inputFunction, **kwargs)
# Figure out what this custom function does to image dimensions by passing a dummy frame through it
dummyFrame = np.zeros((inHeight, inWidth, inChannels), dtype=np.uint8)
try:
outFrame = customFunc(dummyFrame)
except Exception as e:
print("")
print(self._name)
print("Error running custom function:", customFunc.func.__name__)
print("Tried inputting frame of dimensions:")
print("WHC:", " x ".join([str(inWidth), str(inHeight), str(inChannels)]))
print("")
raise e
# Update dimension record to account for any resizing this function performs
outDimensions = outFrame.shape
outWidth = outDimensions[1]
outHeight = outDimensions[0]
outChannels = outDimensions[2] if len(outDimensions) > 2 else 1
self._recordDimensions(outWidth, outHeight, outChannels)
# Some feedback
print("")
print("Custom function:", customFunc.func.__name__)
print(" Input size (WHC):", " x ".join([str(inWidth), str(inHeight), str(inChannels)]))
print(" Output size (WHC):", " x ".join([str(outWidth), str(outHeight), str(outChannels)]))
return self._seqReturn(customFunc)
# .................................................................................................................
def grayscale(self):
# Record the change from (presumably) 3 channel BGR to to a single grayscale channel
width, height, channels = self.getDimensions()
self._recordDimensions(width, height, 1)
# OpenCV: cv2.cvtColor(src, code)
grayFunc = partial(cv2.cvtColor, code=cv2.COLOR_BGR2GRAY)
return self._seqReturn(grayFunc)
# .................................................................................................................
def resize(self, dimensionsWH=None, scale_factorXY=None):
# Sanity check
if (dimensionsWH is None) and (scale_factorXY is None):
print("")
print(self._name)
print("Must set dimensions or scaling parameter when resizing!")
print("")
raise AttributeError
# Set width/height using scaling values (if provided)
if scale_factorXY is not None:
lastWidth, lastHeight, lastChannels = self.getDimensions()
scaledWidth = np.int(np.round(lastWidth*scale_factorXY[0]))
scaledHeight = np.int(np.round(lastHeight*scale_factorXY[1]))
# OpenCV: cv2.resize(src, fx, fy)
resizeFunc = partial(cv2.resize, dsize=None, fx=scale_factorXY[0], fy=scale_factorXY[1])
# Set width/height directly (if provided, overrides scaling if present)
if dimensionsWH is not None:
scaledWidth = dimensionsWH[0]
scaledHeight = dimensionsWH[1]
# Check if the resize is not needed
last_width, last_height, last_channels = self.getDimensions()
if last_width == scaledWidth and last_height == scaledHeight:
print("")
print(self._name)
print("No resizing performed, since input already matches target dimensions!")
# Define a pass-through function and return it
def no_resize(inFrame): return inFrame
resizeFunc = no_resize
else:
# OpenCV: cv2.resize(src, dsize)
resizeFunc = partial(cv2.resize, dsize=(scaledWidth, scaledHeight))
# Record the change in image dimensions
self._recordDimensions(scaledWidth, scaledHeight)
return self._seqReturn(resizeFunc)
# .................................................................................................................
def mask(self, maskImage):
# Warn if no mask is supplied
if maskImage is None:
self._error_out(AttributeError, "No mask supplied!")
# Make sure the mask image is correctly sized for bitwise_and operation
maskImage = self._matchToSelfDimensions(maskImage, "mask", "masking")
# Resizing can mess up mask images, so re-threshold the image
maskImage = cv2.threshold(maskImage, 200, 255, cv2.THRESH_BINARY)[1]
# OpenCV: cv2.bitwise_and(src1, src2, mask=optional)
# Using RGB mask as src2 seems much faster than using the optional mask input!
maskFunc = partial(cv2.bitwise_and, src2=maskImage)
return self._seqReturn(maskFunc)
# .................................................................................................................
def crop(self, cropImage):
# Warn if the input image is bad
if type(cropImage) is not (np.ndarray):
self._error_out(AttributeError, "No crop mask supplied!")
# Make sure the crop mask is correctly sized for bitwise_anding
cropImage = self._matchToSelfDimensions(cropImage, "crop image", "cropping")
# Resizing can mess up binary images, so re-threshold the image
cropImage = cv2.threshold(cropImage, 200, 255, cv2.THRESH_BINARY)[1]
# Get cropping point indices
_, crop_points = self.cropped_mask(cropImage)
self._crop_points = crop_points
# Create function for cropping the input image
def crop_(inFrame, cropping_points):
return inFrame[cropping_points[0]:cropping_points[1], cropping_points[2]:cropping_points[3]]
# Store cropping image with crop points
crop_func = partial(crop_, cropping_points=crop_points)
# Record the change in image dimensions
self._recordDimensions((crop_points[3] - crop_points[2]), (crop_points[1] - crop_points[0]))
return self._seqReturn(crop_func)
# .................................................................................................................
def cropAndMask(self, cropMaskImage):
# Warn if the input image is bad
if type(cropMaskImage) is not (np.ndarray):
self._error_out(AttributeError, "No crop mask supplied!")
# Make sure the cropmask is correctly sized for bitwise_anding
cropMaskImage = self._matchToSelfDimensions(cropMaskImage, "cropmask image", "cropping")
# Resizing can mess up binary images, so re-threshold the image
cropMaskImage = cv2.threshold(cropMaskImage, 200, 255, cv2.THRESH_BINARY)[1]
# Get cropping point indices and mask
cropped_input_mask, crop_points = self.cropped_mask(cropMaskImage)
self._crop_points = crop_points
# Create function for cropping and applying the input cropmask
def cropAndMask_(inFrame, cropping_points, mask_image):
# Crop the incoming frame
cropped_image = inFrame[cropping_points[0]:cropping_points[1], cropping_points[2]:cropping_points[3]]
# OpenCV: cv2.bitwise_and(src1, src2)
return cv2.bitwise_and(src1 = cropped_image, src2 = cropped_image, mask = mask_image)
# Store cropping image with crop points
cropmask_func = partial(cropAndMask_, cropping_points=crop_points, mask_image=cropped_input_mask)
# Record the change in image dimensions
self._recordDimensions((crop_points[3] - crop_points[2]), (crop_points[1] - crop_points[0]))
return self._seqReturn(cropmask_func)
# .................................................................................................................
def blur(self, kernelSize=(3,3), kernelSigma=(0,0)):
# OpenCV: cv2.GaussianBlur(src, ksize, sigmaX, sigmaY)
blurFunc = partial(cv2.GaussianBlur, ksize=kernelSize, sigmaX=kernelSigma[0], sigmaY=kernelSigma[1])
return self._seqReturn(blurFunc)
# .................................................................................................................
def diffWithBG(self, bgImage):
# Warn if no background image is supplied
if bgImage is None:
print("")
print(self._name)
print("No background image supplied!")
print("")
raise AttributeError
# Make sure the background image is correctly sized for background subtraction operation
bgImage = self._matchToSelfDimensions(bgImage, "background", "background subtraction")
# Copy background to internal storage
self._bgImage = bgImage.copy()
# Use internal function so that internal self._bgImage can be used in difference (and updated dynamically)
def diffWithBG_(inFrame):
# OpenCV: cv2.absdiff(src1, src2)
return cv2.absdiff(inFrame, self._bgImage)
return self._seqReturn(diffWithBG_)
# .................................................................................................................
def diffWithSelf(self, backwardStep=1):
# Update minimum stack sizing requirements, since self difference requires at least 2 frames
minSize = 1 + abs(backwardStep)
stackIndex, stackSize = self._buildNewStack(minSize)
# Function for getting an absolute difference
def diffWithSelf_(inFrame, stackIndex=0, backStep=1):
# Add inFrame to the stack before performing difference
self._addToStack(inFrame, stackIndex)
# OpenCV: cv2.absdiff(src1, src2)
return cv2.absdiff(inFrame, self._returnStackFrame(stackIndex=stackIndex, relIndex=backStep))
selfDiffFunc = partial(diffWithSelf_, stackIndex=stackIndex, backStep=backwardStep)
return self._seqReturn(selfDiffFunc)
# .................................................................................................................
def diffWith(self, storage_name, initial_frame=None):
# Check that we aren't already using the storage name
if storage_name in self._resources:
print("")
print("Error: {}".format(self._name))
print(" Storage name", storage_name, "already in use!")
print("")
raise AttributeError
# Store initial frame if provided
self._resources[storage_name] = None if initial_frame is None else initial_frame.copy()
def diffWith_(inFrame, resource_key):
# Get the resource in case it was changed
diff_frame = self._resources[resource_key]
# If no resource is present, just pass back the incoming frame (i.e. no difference)
if diff_frame is None:
return inFrame
# OpenCV: cv2.absdiff(src1, src2)
return cv2.absdiff(inFrame, diff_frame)
diffFunc = partial(diffWith_, resource_key=storage_name)
return self._seqReturn(diffFunc)
# .................................................................................................................
def morphology(self, kernelSize=(3,3), kernelShape=cv2.MORPH_RECT, morphKernel=None, operation=cv2.MORPH_CLOSE):
# Generate the morphological kernel if it isn't supplied
if morphKernel is None:
morphKernel = cv2.getStructuringElement(kernelShape, kernelSize)
# OpenCV: cv2.morphologyEx(src, op, kernel)
morphFunc = partial(cv2.morphologyEx, op=operation, kernel=morphKernel)
return self._seqReturn(morphFunc)
# .................................................................................................................
def threshold(self, thresholdLevel=127):
# Function for getting the 1-index return argument (frame data) from the OpenCV function
def threshold_(inFrame, threshVal):
# OpenCV: cv2.threshold(src, thresh, maxval, type)
return cv2.threshold(inFrame, thresh=threshVal, maxval=255, type=cv2.THRESH_BINARY)[1]
threshFunc = partial(threshold_, threshVal=thresholdLevel)
return self._seqReturn(threshFunc)
# .................................................................................................................
def andWithSelf(self, backwardStep=1):
# Set minimum stack size requirement to perform ANDing across previous frames
minSize = 1 + abs(backwardStep)
stackIndex, _ = self._buildNewStack(minSize)
# Function for ANDing together two frames in a framestack
def andWithSelf_(inFrame, stackIndex=0, backStep=1):
# Add inFrame to the stack before ANDing frames
self._addToStack(inFrame, stackIndex)
# OpenCV: cv2.bitwise_and(src1, src2)
return cv2.bitwise_and(inFrame, self._returnStackFrame(stackIndex=stackIndex, relIndex=backStep))
selfAndFunc = partial(andWithSelf_, stackIndex=stackIndex, backStep=backwardStep)
return self._seqReturn(selfAndFunc)
# .................................................................................................................
def andWith(self, storage_name, initial_frame=None):
# Check that we aren't already using the storage name
if storage_name in self._resources:
print("")
print("Error: {}".format(self._name))
print(" Storage name", storage_name, "already in use!")
print("")
raise AttributeError
# Store initial frame if provided
self._resources[storage_name] = None if initial_frame is None else initial_frame.copy()
def andWith_(self, inFrame, resource_key):
# Get the resource in case it was changed
and_frame = self._resources[resource_key]
# If no resource is present, just pass back the incoming frame (i.e. no ANDing)
if and_frame is None:
return inFrame
# OpenCV: cv2.bitwise_and(src1, src2)
return cv2.bitwise_and(inFrame, and_frame)
# OpenCV: cv2.bitwise_and(src1, src2)
andFunc = partial(andWith_, resource_key=storage_name)
return self._seqReturn(andFunc)
# .................................................................................................................
def orWithSelf(self, backwardStep=1):
# Set minimum stack size requirement to perform ORing across previous frames
minSize = 1 + abs(backwardStep)
stackIndex, _ = self._buildNewStack(minSize)
# Function for ORing together two frames in a framestack
def orWithSelf_(inFrame, stackIndex=0, backStep=1):
# Add inFrame to the stack before ORing frames
self._addToStack(inFrame, stackIndex)
# OpenCV: cv2.bitwise_or(src1, src2)
return cv2.bitwise_or(inFrame, self._returnStackFrame(stackIndex=stackIndex, relIndex=backStep))
selfAndFunc = partial(orWithSelf_, stackIndex=stackIndex, backStep=backwardStep)
return self._seqReturn(selfAndFunc)
# .................................................................................................................
def orWith(self, storage_name, initial_frame=None):
# Check that we aren't already using the storage name
if storage_name in self._resources:
print("")
print("Error: {}".format(self._name))
print(" Storage name", storage_name, "already in use!")
print("")
raise AttributeError
# Store initial frame if provided
self._resources[storage_name] = None if initial_frame is None else initial_frame.copy()
def orWith_(self, inFrame, resource_key):
# Get the resource in case it was changed
or_frame = self._resources[resource_key]
# If no resource is present, just pass back the incoming frame (i.e. no ORing)
if or_frame is None:
return inFrame
# OpenCV: cv2.bitwise_or(src1, src2)
return cv2.bitwise_or(inFrame, or_frame)
# OpenCV: cv2.bitwise_or(src1, src2)
orFunc = partial(orWith_, resource_key=storage_name)
return self._seqReturn(orFunc)
# .................................................................................................................
def invert(self):
return self._seqReturn(cv2.bitwise_not)
# .................................................................................................................
def backSum(self, numToSum):
# Set minimum stack size requirement to sum enough frames
minSize = abs(numToSum)
stackIndex, _ = self._buildNewStack(minSize)
def backSum_(inFrame, listIndex, framesToSum):
# Add inFrame to the stack before performing summation
self._addToStack(inFrame, listIndex)
# Get convenient variables
stackSize = self._stackSizeList[listIndex]
startPoint = self._pointerList[listIndex]
endPoint = self._wrapPointer(startPoint, 1 - framesToSum, stackSize)
# Figure out which stack indices to include in the summation
indexingVector = np.arange(stackSize)
if endPoint > startPoint:
selectionVector = np.logical_or(indexingVector <= startPoint, indexingVector >= endPoint)
else:
# endPoint < startPoint
selectionVector = np.logical_and(indexingVector >= endPoint, indexingVector <= startPoint)
sumFrame = np.sum(self._stackList[listIndex][selectionVector], axis=0, dtype=np.uint16)
return np.uint8(np.clip(sumFrame, 0, 255))
sumFunc = partial(backSum_, listIndex=stackIndex, framesToSum=numToSum)
return self._seqReturn(sumFunc)
# .................................................................................................................
def temporalSubsampleByFrames(self, sampleEveryNFrames):
# Quick sanity check
if sampleEveryNFrames < 1:
print("")
print(self._name)
print("Error, temporal subsampling needs a frame jump of at least 1!")
print("")
raise AttributeError
# Warning about requiring integer sampling indices (non-integer sampling could be implemented however...)
if type(sampleEveryNFrames) is not int:
print("")
print(self._name)
print("Warning! Temporal subsampling requires integer inputs!")
print("Got:", sampleEveryNFrames)
print("Converted to:", int(sampleEveryNFrames))
sampleEveryNFrames = int(sampleEveryNFrames)
def storeSubsamples(inFrame, sampleRate):#, listIndex):
# Only record/update frames on subsample indices. Otherwise, pass previous frame through
sampleCycleIndex = (self._countUpdates % sampleRate)
subsampleUpdate = (sampleCycleIndex == 0)
if subsampleUpdate:
#self._addToStack(inFrame, listIndex)
self._requestHoldFrame = True
self._requestBreak = False
return inFrame.copy()
# No subsample update, so just pass the previously stored frame and request a break to the update loop!
self._requestBreak = True
return self._holdFrame
subsampleFunc = partial(storeSubsamples, sampleRate=sampleEveryNFrames)#, listIndex=stackIndex)
return self._seqReturn(subsampleFunc)
# .................................................................................................................
def temporalSubsample(self, timedelta=None, hours=None, minutes=None, seconds=None):
import datetime as dt
# Quick sanity check, don't let all keyword arguments be empty
if (timedelta, hours, minutes, seconds) == (None, None, None, None):
print("")
print("Must supply at least 1 keyword argument to temporalSubsample() function!")
print("")
raise TypeError
# If a timedelta value isn't supplied, build a timedelta out of individual time components
if timedelta is None:
hours = 0 if hours is None else hours
minutes = 0 if minutes is None else minutes
seconds = 0 if seconds is None else seconds
timedelta = dt.timedelta(hours=hours, minutes=minutes, seconds=seconds)
# Make sure timedelta is valid
if type(timedelta) is not dt.timedelta:
print("")
print("Input must be a datetime.timedelta object!")
print("")
raise TypeError
# Initialize next update time as now, to force immediate update
timeIndex = len(self._timeResourcesList)
self._timeResourcesList.append(dt.datetime.now())
def downsample(inFrame, timestep, timeResourceIndex):
# Check if we need to perform an update
currentTime = dt.datetime.now()
nextUpdateTime = self._timeResourcesList[timeResourceIndex]
if currentTime >= nextUpdateTime:
# Store result on the processing after this iteration (in holdFrame)
self._requestHoldFrame = True
self._requestBreak = False
# Update the record of when to process next update
nextUpdateTime = currentTime + timestep
self._timeResourcesList[timeResourceIndex] = nextUpdateTime
return inFrame.copy()
# No subsampling update, so just pass previously stored frame and request a break to update loop!
self._requestBreak = True
return self._holdFrame
subsampleFunc = partial(downsample, timestep=timedelta, timeResourceIndex=timeIndex)
return self._seqReturn(subsampleFunc)
# .................................................................................................................
def norm(self, order=np.inf):
# Update channel size, since norm-ing will reduce to 1 channel
lastWidth, lastHeight, lastChannels = self.getDimensions()
self._recordDimensions(lastWidth, lastHeight, 1)
# Handle incorrect input (single channel) by passing the incoming frame through
if lastChannels == 1:
print("")
print(self._name)
print("Warning:")
print("norm() operation cannot be applied to an input with 1 channel!")
print("Skipping norm operation...")
def noNorm(inFrame):
return inFrame
return self._seqReturn(noNorm)
def norm_(inFrame, inOrder):
# Numpy: np.linalg.norm(x, ord, axis)
return np.uint8(np.linalg.norm(inFrame, ord=inOrder, axis=2))
normFunc = partial(norm_, inOrder=order)
return self._seqReturn(normFunc)
# .................................................................................................................
def outputAs(self, outputKeyName):
def outputAs_(inFrame, keyName):
self._seqDictionary[keyName] = inFrame.copy()
return inFrame
saveFunc = partial(outputAs_, keyName=outputKeyName)
return self._seqReturn(saveFunc)
# .................................................................................................................
def replaceResource(self, storage_name, updated_value):
# Make sure we have an existing storage spot
if storage_name not in self._resources:
print("")
print("Error: {}".format(self._name))
print(" Storage name", storage_name, "does not exist!")
print("")
raise AttributeError
# Assuming everything went well, so store the new value. May run into 'copying' issues though...
self._resources[storage_name] = updated_value
# .................................................................................................................
def _seqReturn(self, funcRef):
if self._setSequence:
self._functionSequence.append(funcRef)
return len(self._functionSequence)
else:
return funcRef
# -----------------------------------------------------------------------------------------------------------------
#%% Stack functions
# .................................................................................................................
def _buildNewStack(self, stackSize):
# Set frame and stack dimensions (using lastWidth/lastHeight/lastChannel buffers)
frameDimensions = list(self._efficientDimensions())
stackDimensions = [stackSize] + frameDimensions
try:
# Allocate space for the new (empty) stack
emptyFrame = np.zeros(frameDimensions, dtype=np.uint8)
newStack = np.full(stackDimensions, emptyFrame, dtype=np.uint8)
except ValueError:
print("")
print(self._name)
print("Error building storage!")
print("Input size is likely set incorrectly.")
print("An example input can be set using .startSequence() function.")
print("For example:")
print(" .startSequence(exampleImage)")
print(" .startSequence(VideoCaptureObject)")
print(" .startSequence(OtherFrameLabObject)")
FrameLab._quit()
# Add stack, sizing info and initial pointer to the appropriate lists
stackIndex = len(self._stackList)
self._stackSizeList.append(stackSize)
self._stackList.append(newStack)
self._pointerList.append(0)
return stackIndex, stackSize
# .................................................................................................................
# Function for adding frames to a framestack
def _addToStack(self, inFrame, stackIndex, copyFrame=True):
# Update the stack pointer
pointer = self._advancePointer(stackIndex)
try:
# Store the new frame at the updated pointer value
self._stackList[stackIndex][pointer] = inFrame.copy() if copyFrame else inFrame
except ValueError:
# Errors can occur when frame shapes don't match up to expectations
print("")
print(self._name)
print("Error storing frame data!")
print(" Trying to copy image of shape:", inFrame.shape)
print(" into frame stack with shape:", self._stackList[stackIndex].shape[1:])
print(" Probably need to use/adjust setInput() FrameLab function to use proper image dimensions!")
FrameLab._quit()
# .................................................................................................................
def _advancePointer(self, listIndex):
# Get some convenience variables
stackSize = self._stackSizeList[listIndex]
currPointer = self._pointerList[listIndex]
# Calculate new pointer and replace old value
newPointer = (currPointer + 1) % stackSize
self._pointerList[listIndex] = newPointer
return newPointer
# .................................................................................................................
def _returnStackFrame(self, stackIndex, relIndex=0):
# Get convenience variables
stackSize = self._stackSizeList[stackIndex]
currPointer = self._pointerList[stackIndex]
frameIndex = FrameLab._wrapPointer(currPointer, -relIndex, stackSize)
return self._stackList[stackIndex][frameIndex]
# -----------------------------------------------------------------------------------------------------------------
#%% Helper functions
# .................................................................................................................
def getDimensions(self):
return self._historyWHC[-1]
# .................................................................................................................
def _recordDimensions(self, *args):
# Get length for convenience
argLength = len(args)
# Initialize outputs
prevDimensions = self._historyWHC[-1] if len(self._historyWHC) > 0 else (None, None, None)
newWidth, newHeight, newChannels = prevDimensions
# Give an error if we get the wrong number of inputs
if argLength not in (1,2,3):
print("")
print("Error recording dimensions!")
print("Input must be either (channel inputs are optional):")
print(" - width, height, channels")
print(" - (width, height, channels)")
print(" - [width, height, channels]")
print(" - dimensions from another FrameLab object")
print("")
raise TypeError
# For separated inputs
if argLength > 1:
newWidth = args[0]
newHeight = args[1]
newChannels = args[2] if argLength > 2 else newChannels
# For the case of single-entry inputs
if argLength == 1:
# For the case of tuples or lists
argVal = args[0]
if type(argVal) in [tuple, list]:
newWidth = argVal[0]
newHeight = argVal[1]
newChannels = argVal[2] if len(argVal) > 2 else newChannels
# For the case of inputing another FrameLab object
if type(argVal) is FrameLab:
newWidth, newHeight, newChannels = argVal.getDimensions()
# If all went well, we'll record the new dimensions in the internal history variable
self._historyWHC.append((newWidth, newHeight, newChannels))
# .................................................................................................................
def _efficientDimensions(self):
lastWidth, lastHeight, lastChannels = self.getDimensions()
dimensions = (lastHeight, lastWidth, lastChannels) if lastChannels > 1 else (lastHeight, lastWidth)
return dimensions
# .................................................................................................................
def _matchToSelfDimensions(self, inImage, imageName="image", operationName="processing"):
# Get current image size to compare to incoming image
lastWidth, lastHeight, lastChannels = self.getDimensions()
# Figure out if input/self are color images
selfIsColor = (lastChannels > 1)
selfIsGrayscale = not selfIsColor
inputIsColor = FrameLab.isColor(inImage)
inputIsGrayscale = not inputIsColor
# Convert input image to grayscale if the previous 'self' frames are grayscale
if inputIsColor and selfIsGrayscale:
inImage = FrameLab.toGray(inImage)
# Convert grayscale image to BGR if the incoming frames are BGR
if selfIsColor and inputIsGrayscale:
print("")
print("WARNING:")
print(" Grayscale ", imageName, " converted to BGR image to perform ", operationName, "!", sep="")
inImage = FrameLab.toBGR(inImage)
# Match mask size to the image size
inputDimensions = inImage.shape
selfDimensions = self._efficientDimensions()
if inputDimensions != selfDimensions:
print("")
print("WARNING:")
print(" ", imageName.capitalize(), "has been resized to match frame processing!")
dsizeFormat = (selfDimensions[1], selfDimensions[0])
inImage = cv2.resize(inImage, dsize=dsizeFormat)
return inImage
# .................................................................................................................
def retrieveOutput(self, keyName):
return self._seqDictionary[keyName]
# .................................................................................................................
def _error_out(self, exception_type, exception_message=""):
out_msg = self._name if exception_message == "" else (exception_message + "\n({})".format(self._name))
raise exception_type(out_msg)
# .................................................................................................................
@staticmethod
def _wrapPointer(pointer, indexShift, stackSize):
return (pointer + indexShift) % stackSize
# .................................................................................................................
@staticmethod
def _quit():
import os
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
print("")
print("File:", frameinfo.filename)
print("Line:", frameinfo.lineno)
print("")
if any('SPYDER' in name for name in os.environ): raise SystemExit() # Crash to stop that menacing spyder IDE
quit() # Works nicely everywhere else!
# .................................................................................................................
@staticmethod
def toGray(inFrame):
return cv2.cvtColor(inFrame, cv2.COLOR_BGR2GRAY)
# .................................................................................................................
@staticmethod
def toBGR(inFrame):
return cv2.cvtColor(inFrame, cv2.COLOR_GRAY2BGR)
# .................................................................................................................
@staticmethod
def isColor(npImage):
return (len(npImage.shape) > 2)
# .................................................................................................................
@staticmethod
def cropped_mask(mask_image):
# Convert to a single channel if needed
if len(mask_image.shape) > 2:
mask_image = cv2.cvtColor(mask_image, cv2.COLOR_BGR2GRAY)
# Find extent of mask so we can use co-ordinates for cropping
_, cropContour, _ = cv2.findContours(mask_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cropX, cropY, cropWidth, cropHeight = cv2.boundingRect(cropContour[0])
# Create some convenience variables for the crop indexing
cropX1, cropX2 = cropX, cropX + cropWidth
cropY1, cropY2 = cropY, cropY + cropHeight
crop_points = (cropY1, cropY2, cropX1, cropX2)
# First crop the input mask
return mask_image[cropY1:cropY2, cropX1:cropX2], crop_points
# .................................................................................................................
def crop_offsets(self):
return self._crop_points[2], self._crop_points[0]
# .................................................................................................................
def changed(self):
return self._changed
# .................................................................................................................
def replaceBG(self, newBg):
# Warning if new background doesn't match old background
if newBg.shape != self._bgImage.shape:
print("")
print("New background dimensions do not match original background!")
print("Old:", self._bgImage.shape)
print("New:", newBg.shape)
self._bgImage = newBg.copy()
# -----------------------------------------------------------------------------------------------------------------
#%% Debugging functions
# .................................................................................................................
def state(self):
numUpdates = self._countUpdates
lastFuncIndex = self._funcIndex
lastFunction = self._functionSequence[lastFuncIndex]
if type(lastFunction) is partial:
lastFunctionName = lastFunction.func.__name__
else:
lastFunctionName = lastFunction.__name__
print("")
print("Number of full update cycles:", numUpdates)
print("Stopped at function:", lastFunctionName, "()")
print("(Function index ", lastFuncIndex, ")", sep="")
# .................................................................................................................
def displaySequence(self, numToDisplay=None):
# Warning and quit if storage isn't enabled
if len(self._intermediates) == 0:
print("")
print("No intermediate frames were stored!")
print("Need to enable storage to display intermediate frames using:")
print("")
print(" .endSequence(storeIntermediates=True)")
return
# Set maximum indices
seqStart = 0
seqEnd = len(self._functionSequence)
# Initialize display to show all images
startVal = seqStart
stopVal = seqEnd
# Allow for more specific frame display selections
if numToDisplay is not None:
# CHECK FOR LIST INPUTS TO SELECT SPECIFIC FRAMES!
# Allow positive/negative indexing to display frames
if numToDisplay > 0:
startVal = seqStart
stopVal = min(numToDisplay, seqEnd)
else:
stopVal = seqEnd
startVal = max(seqStart, seqEnd + numToDisplay)
# Show input frame
cv2.imshow("Input", self._intermediates[0])
cv2.waitKey(200)
# Display intermediate frames
for idx in range(startVal, stopVal):
eachFunc = self._functionSequence[idx]
funcName = eachFunc.func.__name__ if type(eachFunc) is partial else eachFunc.__name__
intermIndex = 1 + idx
cv2.imshow(str(intermIndex) + " - " + funcName, self._intermediates[intermIndex])
cv2.waitKey(200)
# Can't q/esc quit out of these windows, so better give the user some help...
print("Use closeall() or cv2.destroyAllWindows() to close images.")
# .................................................................................................................
def collage(self, dimensionsWH=(1280, 720), maxCols=4):
# Warning and quit if storage isn't enabled
numIntermediates = len(self._intermediates)
if numIntermediates <= 0:
print("")
print("No intermediate frames were stored!")
print("Need to enable storage to display intermediate frames using:")
print("")
print(" .endSequence(storeIntermediates=True)")
return np.zeros((50,50,3), dtype=np.uint8)
# Figure out how many rows and columns to have in the collage
numCols = min(maxCols, numIntermediates)
numRows = int(1 + np.floor((numIntermediates-1)/numCols))
# Set the frame size for each piece of the collage
maxWidth = int(np.floor(dimensionsWH[0] / numCols))
maxHeight = int(np.floor(dimensionsWH[1] / numRows))
# Set the fill color for the border
borderFill = (50, 50, 50)
# Get scaled copies of each intermediate frame to use in the collage
collageImages = []
for idx, eachInterFrame in enumerate(self._intermediates):
# Get the frame data and it's dimensions
interHeight, interWidth = eachInterFrame.shape[0:2]
# Resize the frame to fit the collage
scaleVal = min(maxWidth/interWidth, maxHeight/interHeight, 1)
scaledFrame = cv2.resize(eachInterFrame, dsize=None, fx=scaleVal, fy=scaleVal)
# Figure out the size of the borders on the left/right of the image (if any)
widthBorder = maxWidth - scaledFrame.shape[1]
leftBorder = int(widthBorder/2)
rightBorder = widthBorder - leftBorder
# Figure out the size of the borders on the top/bottom of the image (if any)
heightBorder = maxHeight - scaledFrame.shape[0]
topBorder = int(heightBorder/2)
botBorder = heightBorder - topBorder
# Convert to BGR if needed
numChannels = scaledFrame.shape[2] if len(scaledFrame.shape) > 2 else 1
if numChannels < 3:
scaledFrame = FrameLab.toBGR(scaledFrame)
# Place borders around each image so that all scaled frames have the same height/width
collageFrame = cv2.copyMakeBorder(scaledFrame,
topBorder,
botBorder,
leftBorder,
rightBorder,
borderType=cv2.BORDER_CONSTANT,
value=borderFill)
# Print function name in top left corner of the images
if idx == 0:
funcName = "0: Input"
else:
eachFunc = self._functionSequence[max(0, idx-1)]
funcName = str(idx) + ": " + eachFunc.func.__name__ if type(eachFunc) is partial else eachFunc.__name__
cv2.putText(collageFrame, funcName, (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255),1)
#cv2.putText(img, text, pos, font, scale, col, thic)
# Finally, draw a rectangle around image border to visually separate collage images
cv2.rectangle(collageFrame, (-10,-10), (maxWidth-1, maxHeight-1), (0,0,0), 1)
# Add frame to the collage set
collageImages.append(collageFrame)
# Add blank frames if images don't completely fill out collage
blankFrame = np.full((maxHeight, maxWidth, 3), borderFill, dtype=np.uint8)
numBlanks = numCols*numRows - numIntermediates
for idx in range(numBlanks):
collageImages.append(blankFrame.copy())
# Stack frames, row-by-row
rowImages = [np.hstack(collageImages[(rowIdx*numCols):((1+rowIdx)*numCols)]) for rowIdx in range(numRows)]
# Create final collage image by vertically stacking row images
collageOut = np.vstack(rowImages)
return collageOut
# .................................................................................................................
def blame(self, iterations=100, exampleFrame=None):
# Set up functions for getting the process timing
tickFreq = cv2.getTickFrequency()
calculate_time_ms = lambda t1, t2: 1000*(t2 - t1)/tickFreq
print("")
print("-------------------------------------------------------------------")
print("Blame report:", self._name)
print("-------------------------------------------------------------------")
# Generate an example frame if one isn't given
if exampleFrame is None:
print("")
print("Example frame not supplied. Using an image of random colors.")
print("This may affect processing times!")
inWidth, inHeight, inChannels = self._historyWHC[0] # Get input dimensions
exampleFrame = np.random.randint(0, 256, (inHeight, inWidth, inChannels), dtype=np.uint8)
# Allocate storage for timing
procTimes = np.zeros((len(self._functionSequence), iterations), dtype=np.float)
startTime = 0
endTime = 0
# Some feedback
print("")
print("Beginning frame processor timing ({:.0f} iterations)...".format(iterations))
for k in range(iterations):
# Repeatedly apply each function in the func. sequence on the result from the previous function
prevFrame = exampleFrame.copy()
for idx, eachFunction in enumerate(self._functionSequence):
# Start process timer
startTime = cv2.getTickCount()
prevFrame = eachFunction(prevFrame)
# Stop process timer and add to accumulator
endTime = cv2.getTickCount()
procTimes[idx, k] += calculate_time_ms(startTime, endTime)
# Allow functions in the function list to break the update loop
if self._requestBreak: break # Will leave zero times for all following functions!
# Get processing time stats
avgProcTimes = np.mean(procTimes, axis=1)
stDevs = np.std(procTimes, axis=1)
timesAsPercents = 100*avgProcTimes/np.sum(avgProcTimes)
# Get totals stats
totalTimes = np.sum(procTimes, axis=0)
totalTimeAvg = np.mean(totalTimes) # Should be identical to sum of proc. averages
totalTimeStDev = np.std(totalTimes) # Should be very different from sum of proc. stDevs (n-th root?)
totalAsPercent = 100*totalTimeAvg/np.sum(avgProcTimes) # Sanity check, should be 100%
# Some useful info before printing...
funcNames = [ef.func.__name__ if type(ef) is partial else ef.__name__ for ef in self._functionSequence]
longestName = max([len(eachName) for eachName in funcNames])
# Print out average processing times
print("")
print("Total run time (ms):", "{:.3f}".format(np.sum(totalTimes)))
print("Average processing times based on", iterations, "iterations (ms)")
for idx, eachFunc in enumerate(self._functionSequence):
funcName = funcNames[idx]
timeString = "{:.3f} +/- {:.3f} ({:.1f}%)".format(avgProcTimes[idx], stDevs[idx], timesAsPercents[idx])
print(" ", funcName.ljust(longestName), ": ", timeString, sep="")
totalString = "{:.3f} +/- {:.3f} ({:.1f}%)".format(totalTimeAvg, totalTimeStDev, totalAsPercent)
print(" ", "TOTAL".ljust(longestName), ": ", totalString, sep="")
# Bit of a warning
print("")
print("Note:")
print("These timing numbers do not account for the time needed")
print("to copy frames through the processing sequence or the")
print("python for loop. Actual process timing will be longer!")
print("")
print("-------------------------------------------------------------------")
print("-------------------------------------------------------------------")
return totalTimeAvg, totalTimeStDev
# .................................................................................................................
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
class Variable_FrameLab(FrameLab):
def __init__(self, name="Unnamed Variable Frame Processor"):
super().__init__(name)
# .................................................................................................................
def resize(self, dimensionsWH_control=None, scaleXY_control=None):
# WARNING:
# This function can mess up any proceeding functions expecting specific input frame dimensions!
# This can be fixed in the future, but for now be careful!
# Convert inputs to Control objects if needed
dimensionsWH_control = self._convert_to_Control(dimensionsWH_control)
scaleXY_control = self._convert_to_Control(scaleXY_control)
def resize_(inFrame, dimWH_control, facXY_control):
# Get control settings for convenience
dimWH = dimWH_control.value
facXY = facXY_control.value
# If both controls are none, just return the input frame un-modified
if (dimWH is None) and (facXY is None):
return inFrame
# Return dimension based resizing if provided
if dimWH is not None:
dimWH = (dimWH, dimWH) if type(dimWH) is int else dimWH
return cv2.resize(inFrame, dsize=dimWH)
# Return scale factor based resizing if provided
if facXY is not None:
facXY = (facXY, facXY) if type(facXY) is float else facXY
return cv2.resize(inFrame, dsize=None, fx=facXY[0], fy=facXY[1])
# If we didn't already return, then both inputs are none and we should just return the input unmodified
return inFrame
resizeFunc = partial(resize_, dimWH_control=dimensionsWH_control, facXY_control=scaleXY_control)
return self._seqReturn(resizeFunc)
# .................................................................................................................
def blur(self,
kernelSize_control=(3,3),
kernelSigma_control=(0,0)):
# Convert inputs to Control objects if needed
kernelSize_control = self._convert_to_Control(kernelSize_control)
kernelSigma_control = self._convert_to_Control(kernelSigma_control)
# Create a function that can access controls before applying blur
def variable_blur(inFrame, ksize_control, kSigma_control):
# Get control values
blurSize = ksize_control.value
sigmaSize = kSigma_control.value
# Disable blurring if the kernel is too small
if max(blurSize) < 2:
return inFrame
# OpenCV: cv2.GaussianBlur(src, ksize, sigmaX, sigmaY)
return cv2.GaussianBlur(inFrame, ksize=blurSize, sigmaX=sigmaSize[0], sigmaY=sigmaSize[1])
# Build output function
blurFunc = partial(variable_blur,
ksize_control=kernelSize_control,
kSigma_control=kernelSigma_control)
return self._seqReturn(blurFunc)
# .................................................................................................................
def morphology(self,
kernelSize_control=(3,3),
kernelShape_control=cv2.MORPH_RECT,
operation_control=cv2.MORPH_CLOSE):
# Convert inputs to Control objects if needed
kernelSize_control = self._convert_to_Control(kernelSize_control)
kernelShape_control = self._convert_to_Control(kernelShape_control)
operation_control = self._convert_to_Control(operation_control)
def variable_morphology(inFrame, kernelSize_control, kernelShape_control, operation_control):
# Get the parameters needed to create the kernel
kernelShape = kernelShape_control.value
kernelSize = kernelSize_control.value
# Disable morphology if the kernel is too small
if max(kernelSize) < 2:
return inFrame
# Generate the morphological kernel based on control values
new_kernel = cv2.getStructuringElement(shape = kernelShape,
ksize = kernelSize)
# OpenCV: cv2.morphologyEx(src, op, kernel)
return cv2.morphologyEx(inFrame, op = operation_control.value, kernel = new_kernel)
# Build output function including controls
morphFunc = partial(variable_morphology,
kernelSize_control = kernelSize_control,
kernelShape_control = kernelShape_control,
operation_control = operation_control)
return self._seqReturn(morphFunc)
# .................................................................................................................
def threshold(self, thresholdLevel_control=127):
# Convert inputs to Control objects if needed
thresholdLevel_control = self._convert_to_Control(thresholdLevel_control)
# Function for getting the 1-index return argument (frame data) from the OpenCV function
def variable_threshold(inFrame, threshCtrl):
# Get thresholding value
new_thresh = thresholdLevel_control.value
# Disable thresholding if the threshold is too small
if new_thresh < 1:
return inFrame
# OpenCV: cv2.threshold(src, thresh, maxval, type)
return cv2.threshold(inFrame, thresh=new_thresh, maxval=255, type=cv2.THRESH_BINARY)[1]
threshFunc = partial(variable_threshold, threshCtrl=thresholdLevel_control)
return self._seqReturn(threshFunc)
# .................................................................................................................
def diffWithSelf(self, backwardStep_control=1):
# Convert inputs to Control objects if needed
backwardStep_control = self._convert_to_Control(backwardStep_control)
# Figure out how deep the stack needs to be
maxStep = backwardStep_control.max_value
maxStep = backwardStep_control.value if maxStep is None else maxStep
# Update minimum stack sizing requirements, since self difference requires at least 2 frames
minSize = 1 + maxStep
stackIndex, _ = self._buildNewStack(minSize)
# Function for getting an absolute difference
def variable_diffWithSelf(inFrame, stackIndex, backStep_control):
#def getDiff(inFrame, stackIndex=0, backStep=1):
# Add inFrame to the stack before performing difference
self._addToStack(inFrame, stackIndex)
# Get the backward step control value
back_step = backStep_control.value
# Disable backward step if the step is too low
if back_step < 1:
return inFrame
# OpenCV: cv2.absdiff(src1, src2)
return cv2.absdiff(inFrame, self._returnStackFrame(stackIndex=stackIndex,
relIndex=back_step))
selfDiffFunc = partial(variable_diffWithSelf, stackIndex=stackIndex, backStep_control=backwardStep_control)
return self._seqReturn(selfDiffFunc)
# .................................................................................................................
def backSum(self, numToSum_control=1):
# Convert inputs to Control objects if needed
numToSum_control = self._convert_to_Control(numToSum_control)
# Figure out how deep the stack needs to be
maxSize = numToSum_control.max_value
maxSize = numToSum_control.value if maxSize is None else maxSize
# Set minimum stack size requirement to sum enough frames
minSize = 1 + maxSize
stackIndex, _ = self._buildNewStack(minSize)
def variable_backSum(inFrame, listIndex, framesToSum_control):
# Add inFrame to the stack before performing summation
self._addToStack(inFrame, listIndex)
# Get the backward sum index from the controls
backward_index = framesToSum_control.value
# Disable backward sum if the index is too low
if backward_index < 1:
return inFrame
# Get convenient variables
stackSize = self._stackSizeList[listIndex]
startPoint = self._pointerList[listIndex]
endPoint = self._wrapPointer(startPoint, 0 - backward_index, stackSize)
# Figure out which stack indices to include in the summation
indexingVector = np.arange(stackSize)
if endPoint > startPoint:
selectionVector = np.logical_or(indexingVector <= startPoint, indexingVector >= endPoint)
else:
# endPoint < startPoint
selectionVector = np.logical_and(indexingVector >= endPoint, indexingVector <= startPoint)
sumFrame = np.sum(self._stackList[listIndex][selectionVector], axis=0, dtype=np.uint16)
return np.uint8(np.clip(sumFrame, 0, 255))
sumFunc = partial(variable_backSum, listIndex=stackIndex, framesToSum_control=numToSum_control)
return self._seqReturn(sumFunc)
# .................................................................................................................
def mask(self, maskImage_control):
# Convert inputs to Control objects if needed
maskImage_control = self._convert_to_Control(maskImage_control)
def variable_mask(inFrame, mask_control):
# First check if the mask needs to be resized
mask_image = mask_control.value
maskWH = mask_image.shape[0:2][::-1]
imageWH = inFrame.shape[0:2][::-1]
# Resize if the mask doesn't match the incoming image dimensions
if np.any(maskWH != imageWH):
print("")
print("Warning: {}".format(self._name))
print(" Mask size is mismatched with incoming image!")
print(" Incoming size: {} x {}".format(*imageWH))
print(" Mask size: {} x {}".format(*maskWH))
print(" Mask will be resized!")
# Resizing can mess up mask images, so make sure to re-threshold the image
mask_image = cv2.resize(mask_image, dsize=imageWH)
mask_image = cv2.threshold(mask_image, 200, 255, cv2.THRESH_BINARY)[1]
mask_control.update(mask_image)
# OpenCV: cv2.bitwise_and(src1, src2)
return cv2.bitwise_and(src1=inFrame, src2=mask_image)
maskFunc = partial(variable_mask, mask_control=maskImage_control)
return self._seqReturn(maskFunc)
# .................................................................................................................
@staticmethod
def _convert_to_Control(input_data, feedback=False):
# Convert incoming data to a control variable
if type(input_data) != Variable_FrameLab.Control:
# Provide feedback, if enabled
if feedback:
print("")
print("Input data is type ({}), converting to Control object!".format(type(input_data)))
return Variable_FrameLab.Control(name = "unnamed",
value = input_data)
return input_data
# .................................................................................................................
# .................................................................................................................
class Control:
def __init__(self, name="", *,
value=None, min_value=None, max_value=None,
update_func=None,
report_func = None,
initial_input=None):
# Store handy values
self._name = name
self._value = value
self._min_value = min_value
self._max_value = max_value
self._initial_value = initial_input
# Store custom update function (if provided)
self._update_func = lambda x: x
if update_func is not None:
self.change_update_function(update_func)
# Store custom reporting function (if provided)
self._report_func = lambda x: x
if report_func is not None:
self.change_report_function(report_func)
# If an input is provided, pass it through the update function and store it as the initial value
if initial_input is not None:
self.update(initial_input)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@property
def name(self):
return self._name
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@property
def value(self):
return self._value
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def report(self):
return self._report_func(self._value)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def update(self, new_value):
# Handle input bounding, if needed
new_value = new_value if self._max_value is None else min(new_value, self._max_value)
new_value = new_value if self._min_value is None else max(new_value, self._min_value)
# Update internal value
self._value = self._update_func(new_value)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def update_direct(self, new_value):
# Update internal value directly (no update function)
self._value = new_value
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def update_from_window_trackbar(self, window_ref):
# Assume the trackbar was set up with trackbar config
val_changed, new_val = window_ref.readTrackbar(self._name)
if val_changed:
self.update(new_val)
return val_changed
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def change_update_function(self, new_function):
self._update_func = new_function
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def change_report_function(self, new_function):
self._report_func = new_function
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def trackbar_config(self):
trackbar_config = {"bar_name": self._name,
"start_value": self._initial_value,
"max_value": self._max_value}
return trackbar_config
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@property
def min_value(self):
return self._min_value
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@property
def max_value(self):
return self._max_value
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@property
def min_max_values(self):
return self._min_value, self._max_value
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# ---------------------------------------------------------------------------------------------------------------------
#%% Testing
if __name__ == "__main__":
closeall = cv2.destroyAllWindows
videoObj = cv2.VideoCapture(0)#"/home/eo/Desktop/PythonData/Shared/videos/dtb2.avi")
_, bgImg = videoObj.read()
# Not a practical processor. Just for example
gg = FrameLab("Glow Tar")
gg.startSequence(videoObj)
gg.resize((500,500))
gg.blur((5,5))
gg.blur((11,11))
gg.blur((21,21))
gg.outputAs("Dummy1")
gg.grayscale()
#gg.diffWithBG(bgImg)
gg.diffWithSelf(2)
gg.outputAs("Dummy2")
#gg.norm() # Much slower than grayscale
gg.backSum(1)
gg.backSum(2)
gg.backSum(3)
gg.backSum(4)
gg.backSum(5)
#gg.backSum(6)
#gg.outputAs("Dummy3")
#gg.backSum(7)
gg.morphology((5,5))
gg.andWithSelf(3)
gg.andWithSelf(2)
gg.andWithSelf(1)
gg.endSequence(storeIntermediates=True)
# Blame example
gg.blame(150)#, exampleFrame=bgImg)
# Create window with control bars
def blankFunc(*args, **kwargs): return
dispWin = cv2.namedWindow("Display")
cv2.createTrackbar("blurSize", "Display", 5, 25, blankFunc)
frameCount = 0
while videoObj.isOpened():
recFrame, inFrame = videoObj.read()
if not recFrame:
print("No more frames!")
break
frameCount += 1
procFrame = gg.update(inFrame)
cv2.imshow("Dummy 1", gg.retrieveOutput("Dummy1"))
cv2.imshow("Dummy 2", gg.retrieveOutput("Dummy2"))
cv2.imshow("Collage", gg.collage(dimensionsWH=(1280,720), maxCols=7))
cv2.imshow("Output Frame", procFrame)
# Get key press values
keyPress = cv2.waitKey(1) & 0xFF
if (keyPress == ord('q')) | (keyPress == 27): # q or Esc key to close window
print("")
print("Key pressed to stop!")
break
# Clean up this window
videoObj.release()
cv2.destroyAllWindows()
# ---------------------------------------------------------------------------------------------------------------------
#%% Scrap
| EricPacefactory/eolib | legacy/framelab_legacy.py | framelab_legacy.py | py | 81,129 | python | en | code | 0 | github-code | 90 |
21396394329 | # -*- coding: utf-8 -*-
#2017/10/27 CRP_assignment.py : CRPにおけるテーブル数と着席客数をグラフで表す
import random as rm
import matplotlib.pyplot as plt
from collections import Counter
def chinese_restaurant_process(num_customers, alpha):
if num_customers <= 0:
return []
table_assignments = [1] # first customer sits at table 1
next_open_table = 2 # index of the next empty table
# Now generate table assignments for the rest of the customers.
for i in range(1, num_customers):
if rm.random() < float(alpha)/ (alpha + i - 1):
# Customer sits at a new table.
table_assignments.append(next_open_table)
next_open_table += 1
else:
# Customer sits at an existing table.
# He chooses which table to sit at by giving equal weight to each
# customer already sitting at a table.
which_table = table_assignments[rm.randint(0, len(table_assignments)-1)]
table_assignments.append(which_table)
return table_assignments
def main():
#入力からnum_customersとalphaを取得
print ("input number of customers")
num_customers = int(input())
print ("input alpha (>=1, int)")
alpha = int(input())
result = chinese_restaurant_process(num_customers, alpha)
counter = 0
#使用テーブル数を出力
print("number of table:",max(result))
#結果をヒストグラムとして出力
plt.hist(result,rwidth=0.5, color = "orange", bins = max(result))
result_count = Counter(result)
plt.xlim([0,max(result)])
plt.ylim([0,result_count.most_common()[0][1]+10])
plt.savefig("CRP_assignment.png")
if __name__ == '__main__':
main()
| KanaOzaki/Nonparametric | CRP_assignment.py | CRP_assignment.py | py | 1,651 | python | en | code | 0 | github-code | 90 |
25525407138 | from flask import request
from app import app
from app.db import postgre
from app.utils import wrappers, logger
logger = logger.Logger(__name__)
def sendFeedback():
logger.info("API Handler feedback/send")
try:
message = request.json['message']
except Exception:
return {
'status': 'error',
'message': 'Message required'
}
return postgre.feedback.saveFeedback(message)
@app.route('/api/feedback/send', methods = ['POST'])
@wrappers.nullable()
def sendFeedbackWeb():
return sendFeedback()
| codingjerk/ztd.blunders-web | app/api/feedback/send.py | send.py | py | 567 | python | en | code | 0 | github-code | 90 |
17950103520 | import requests
import json
from codes import *
def geoCodeLocation(inputString):
location = inputString.replace(" ","+")
url = ('https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s'% (location, google_api_key))
content = requests.get(url)
result = content.json()
if (result['status'] == 'OK'):
return result['results'][0]['geometry']['location']
else:
return None
def findARestaurant(mealType, location):
loc = geoCodeLocation(location)
if not loc:
return 'No Google data for %s in %s'%(mealType, location)
url="""https://api.foursquare.com/v2/venues/search?client_id=%s&client_secret=%s&v=20130815&ll=%s,%s&query=%s"""\
%(CLIENT_ID,CLIENT_SECRET,loc['lat'], loc['lng'], mealType)
result = requests.get(url).json()
try:
venue = result['response']['venues'][0]
restaurant_name = venue['name']
restaurant_address = venue['location']['formattedAddress']
except IndexError:
return None
venue_id = venue['id']
url = ('https://api.foursquare.com/v2/venues/%s/photos?client_id=%s&v=20150603&client_secret=%s' % (
(venue_id, CLIENT_ID, CLIENT_SECRET)))
result = (requests.get(url)).json()
if result['response']['photos']['items']:
image_ref = result['response']['photos']['items']
img = next((s for s in image_ref if s), None)
img_url = (img and img['prefix']+"300x300"+img['suffix']) or None
else:
img_url = 'http://pixabay.com/get/8926af5eb597ca51ca4c/1433440765/cheeseburger-34314_1280.png?direct'
restaurantInfo = {'name': restaurant_name, 'address': restaurant_address, 'image': img_url}
return restaurantInfo
| ivandodo/lesson2 | geocode.py | geocode.py | py | 1,710 | python | en | code | 0 | github-code | 90 |
73501449577 | import sys
input = sys.stdin.readline
# 다익스트라는 바로 연결되어 있는 값 중에서 최단 경로를 선택하는 알고리즘이였다면,
# 플로이드 워셜 알고리즘은 현재의 노드를 통해서 갈 수 있는 최단 경로를 업데이트 하는 방식이다.
# a 에서 b로 가는 값을 선택할 때, a에서 b로 바로 갈 수 있는 값과 k노드를 통해 a에서 k와 k에서 b를 통해 가는 방식 중 최소 값을 선택한다.
# 점화식을 통해 모든 경로를 해보기 때문에 O(n^3)의 시간복잡도를 갖는다.
# 2차원 배열을 선언하여 a에서 b로 가는 INFINITE 값으로 초기화하고 자기 자신으로 가는 즉, a에서 a로 가는 경로를 최소 값 0으로 초기화한다.
# a에서 b로 직접 가는 경로와 k를 통해 a에서 b 로 가는 경로를 비교하여 최단 경로를 얻기 위해 3중 포문을 이용하여 모든 경로를 탐색한다.
INF = int(1e9)
n, m = map(int, input().rstrip().split())
graph = [[INF for _ in range(n + 1)] for _ in range(n + 1)]
for y in range(1, n + 1) :
for x in range(1, n + 1) :
if x == y :
graph[y][x] = 0
for _ in range(m) :
node, to_node, value = map(int, input().split())
graph[to_node][node] = value
for k in range(1, n + 1) :
for a in range(1, n + 1) :
for b in range(1, n + 1) :
graph[a][b] = min(graph[a][b], graph[a][k] + graph[k][b])
for a in range(1, n + 1) :
for b in range(1, n + 1) :
if graph[a][b] == 1e9 :
print("INFINITY", end = " ")
else :
print(graph[a][b], end = " ")
print()
| Err0rCode7/algorithm | baekjoon/dijkstra_graph/floyd-warshall.py | floyd-warshall.py | py | 1,565 | python | ko | code | 0 | github-code | 90 |
14289069923 | #!/usr/bin/env python3
import subprocess
import logging
# Configure logging
logging.basicConfig(level=logging.INFO, filename='railway_logs.txt', filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s')
# Create a logger instance
logger = logging.getLogger('RailwayLogger')
# Stream logs to both console and file
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler('railway_logs.txt')
logger.addHandler(console_handler)
logger.addHandler(file_handler)
# Command to stream logs from Railway app deployment
command = "railway logs -d"
# Start streaming logs
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, universal_newlines=True)
# Read logs from the process output and stream to file and console
for log in process.stdout:
log = log.strip()
logger.info(log)
# Close the process and log file handlers
process.stdout.close()
process.wait()
logging.shutdown()
| eastcoreesolis/railwaystreamlogs | test.py | test.py | py | 948 | python | en | code | 0 | github-code | 90 |
11173899370 | from django.conf import settings
from mongoengine import MultipleObjectsReturned, DoesNotExist
from dms.models import UserProfile
__author__ = 'asseym'
def get_profile(phone):
return _mobile_user(phone)
def _mobile_user(phone):
char_index = settings.NUMBER_OF_CHARS_IN_PHONE_NUMBER
try:
if len(phone) > char_index:
mobile_user = UserProfile.objects.get(phone__endswith=phone[-1*char_index:len(phone)])
else:
mobile_user = UserProfile.objects.get(phone=phone)
return mobile_user
except MultipleObjectsReturned:
if len(phone) > char_index:
mobile_user = UserProfile.objects(phone__endswith=phone[-1*char_index:len(phone)]).first()
else:
mobile_user = UserProfile.objects(phone=phone).first()
return mobile_user
except DoesNotExist:
return None
except TypeError:
return None
def get_user_district_locations(user):
""" Returns a list of locations in a user's district"""
profile = UserProfile.objects(user=user).first()
if profile and profile.location:
if profile.location.type == 'district':
district = profile.location
else:
district = profile.location.parent
if district:
return [str(district.id)] + [str(l.id) for l in district.children()]
return []
def get_user_district_coordinates(user):
""" Returns a list of a users location coordinates"""
profile = UserProfile.objects(user=user).first()
if profile and profile.location:
if profile.location.type == 'district':
district = profile.location
else:
district = profile.location.parent
if district:
return district.latlong
else:
return []
return []
| unicefuganda/necoc | dms/utils/user_profile_utils.py | user_profile_utils.py | py | 1,903 | python | en | code | 1 | github-code | 90 |
72017890857 | # -*- coding: utf-8 -*-
"""Server for the Raspi Webapp
"""
import sys
import os
import signal
import json as j
import time
from sanic import Sanic
from sanic.response import json
from sanic.response import file
import src.raspi.webapp.mw_adapter_server as mw_adapter_server
from src.raspi.lib import zmq_ack
from src.raspi.config import config as cfg
from src.raspi.lib import heartbeat as hb
import src.raspi.lib.log as log
logger = log.getLogger("SoulTrain.webapp.server")
app = Sanic()
app.name = "PrenTeam28WebApp"
app.static('/static', os.path.join(os.path.dirname(__file__), 'static'))
middlewareData = None
hb_last_sent = 0.0
is_simulation_mode = True
# SIGINT handler (when pressing Ctrl+C)
def signal_int_handler(sig, frame):
print("Ctrl+C Pressed. Exit...")
sys.exit(0)
# Routes
@app.route('/')
async def index(request):
''' index returns index.html available under / '''
return await file(os.path.join(os.path.dirname(__file__), "index.html"))
@app.route('/favicon.ico')
async def favicon(request):
''' favicon returns favicon.ico available under /favicon.ico '''
return await file(os.path.join(os.path.dirname(__file__), "favicon.ico"))
@app.route('/api')
async def api(request):
global middlewareData
''' api returns the API JSON available under /api '''
direction = 'undefined'
if middlewareData is not None:
phase = middlewareData['phase']
phase_message = middlewareData['phase_message']
speed = middlewareData['speed']
distance = middlewareData['distance']
x_acceleration = middlewareData['x_acceleration']
y_acceleration = middlewareData['y_acceleration']
z_acceleration = middlewareData['z_acceleration']
direction = middlewareData['direction']
number = middlewareData['number']
cube = middlewareData['cube']
crane = middlewareData['crane']
linedetector = middlewareData['linedetector']
numberdetector = middlewareData['numberdetector']
movement = middlewareData['movement']
acoustic = middlewareData['acoustic']
controlflow = middlewareData['controlflow']
return json({
'phase': str(phase),
'phaseMessage': str(phase_message),
'speed': str(speed),
'distance': str(distance),
'xAcceleration': str(x_acceleration),
'yAcceleration': str(y_acceleration),
'zAcceleration': str(z_acceleration),
'direction': str(direction),
'number': str(number),
'cube': str(cube),
'crane': str(crane),
'heartBeatLineDetector': str(linedetector),
'heartBeatNumberDetector': str(numberdetector),
'heartBeatMovement': str(movement),
'heartBeatAcoustic': str(acoustic),
'heartBeatControlFlow': str(controlflow)
})
@app.route('/sound/<sound_nr>')
async def play_sound(request, sound_nr):
mw_adapter_server.send_acoustic_cmd(int(sound_nr))
return json({'received': True})
@app.route('/speed/<speed>')
async def send_speed(request, speed):
global middlewareData
middlewareData['speed_ack'] = False
mw_adapter_server.send_move_cmd(int(speed))
return json({'received': True})
@app.route('/crane/<state>')
async def send_crane_cmd(request, state):
global middlewareData
middlewareData['crane_ack'] = False
if int(state) == 1:
mw_adapter_server.send_phase(cfg.PHASE_GRAB_CUBE)
mw_adapter_server.send_crane_cmd(1)
else:
mw_adapter_server.reset_tiny()
mw_adapter_server.send_crane_cmd(0)
return json({'received': True})
class Payload(object):
def __init__(self, json_string):
self.__dict__ = j.loads(json_string)
@app.post('/controlflow')
async def send_controlflow_cmd(request):
json_string = request.body.decode('utf-8')
p = Payload(json_string)
if 'start' in p.command:
mw_adapter_server.reset_tiny()
mw_adapter_server.clear_states() #clear states when starting controlflow
mw_adapter_server.send_sys_cmd(p.command, dict(p.phases))
return json({'received': True})
@app.route('/mode/<state>')
async def set_mode(request, state):
global is_simulation_mode
if int(state) == 1:
logger.info("Switched to Simulation Mode!")
mw_adapter_server.send_phase(cfg.PHASE_FIND_CUBE) #otherwise tiny will not send this state
is_simulation_mode = True
else:
logger.info("Switched to ControlFlow Mode!")
is_simulation_mode = False
return json({'received': True})
@app.route('/resettiny')
async def reset_tiny(request):
mw_adapter_server.reset_tiny()
return json({'received': True})
# Middleware handling
async def periodic_middleware_task(app):
global middlewareData
global hb_last_sent
''' periodic task for handling middleware '''
#send heartbeat
if (hb_last_sent+(float(cfg.HB_INTERVAL)/1000)) < time.time():
hb_last_sent = time.time()
mw_adapter_server.send_hb()
middlewareData = mw_adapter_server.get_data()
#Change crane value if we receive acknowledge
if zmq_ack.ACK_RECV_CRANE_CMD in middlewareData.keys():
if middlewareData[zmq_ack.ACK_RECV_CRANE_CMD] is True:
logger.info("received crane cmd ack from movement")
middlewareData['crane_ack'] = True
middlewareData[zmq_ack.ACK_RECV_CRANE_CMD] = False
#Change speed ack value if we receive acknowledge
if zmq_ack.ACK_RECV_MOVE_CMD in middlewareData.keys():
if middlewareData[zmq_ack.ACK_RECV_MOVE_CMD] is True:
logger.info("received move cmd ack from movement")
middlewareData['speed_ack'] = True
middlewareData[zmq_ack.ACK_RECV_MOVE_CMD] = False
app.add_task(periodic_middleware_task(app))
if __name__ == '__main__':
mw_adapter_server.clear_states() #set default values
signal.signal(signal.SIGINT, signal_int_handler)
app.add_task(periodic_middleware_task(app))
app.run(host='0.0.0.0', port=2828, debug=False, access_log=False)
| Inux/pren | src/raspi/webapp/server.py | server.py | py | 6,047 | python | en | code | 1 | github-code | 90 |
17853233852 | #Escriba un programa que pida un número de jugadores y tire un dado para cada jugador.
import random #importamos la librería random
cant_jugadores = int(input("Ingrese la cantidad de jugadores: "))
if (cant_jugadores <= 0) :
print ("Cantidad de jugadores errónea")
else :
for i in range(cant_jugadores) :
print (f"Jugador {i+1}: {random.randrange(1,7)}")
#nos devolverá un valor random entre 1 y 6
| EmiSchonhals1/Mi-ruta-de-aprendizaje-de-Python | xx MiniJuegos xx/1_dado_x_jugador.py | 1_dado_x_jugador.py | py | 432 | python | es | code | 0 | github-code | 90 |
19199194178 | import matplotlib.pyplot as plt
## Ploting Rotas and Polo
def plot_polo_rotas(rotas_geo, polo_geo, title=''):
ax = rotas_geo.plot(color='red', alpha=1, edgecolor='k')
polo_geo.plot(ax=ax, color='green', alpha=0.5)
plt.title(title)
plt.show()
def plot_only_rotas(rotas_geo, title=''):
rotas_geo.plot(alpha=0.5, edgecolor='k', cmap='tab10')
plt.title(title)
plt.show() | rodrigoelemesmo/posicionador | posicionador/utils.py | utils.py | py | 414 | python | en | code | 0 | github-code | 90 |
3311454235 | import numpy as np
import matplotlib.pyplot as plt
# Behaviour of kron
# >>> np.kron([1,10,100], [5,6,7])
# array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
# >>> np.kron([5,6,7], [1,10,100])
# array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
def codeSignal(inVal, chipLen):
# make ten copies of the signal
bitWidth = 10
# create a signal in range [-1,1] of length chipVals.
str = format(inVal, 'b')
dblVal = np.zeros(len(str))
for i in range(len(str)):
dblVal[i] = float(str[i])
dblVal -= 0.5 # center on [-0.5,0.5]
dblVal *= 2.0 # center on [-1,1]
# zero pad the signal to be of length chips.
if(len(dblVal) > chipLen):
raise ValueError("length of signal > chipLen")
padLen = chipLen - len(dblVal)
dblVal = np.pad(dblVal, (padLen,0), 'constant', constant_values=(-1,0))
# stretch out the signal.
out = np.kron(dblVal, np.ones(bitWidth))
return out | e2choy/dsp | code_signal.py | code_signal.py | py | 942 | python | en | code | 0 | github-code | 90 |
14497391676 | import numpy as np
from neuron import h
class ArtificialCell:
def __init__(self, event_times):
# Convert event times into nrn vector
self.nrn_eventvec = h.Vector()
self.nrn_eventvec.from_python(event_times)
# load eventvec into VecStim object
self.nrn_vecstim = h.VecStim()
self.nrn_vecstim.play(self.nrn_eventvec)
# create the cell and artificial NetCon
self.nrn_netcon = h.NetCon(self.nrn_vecstim, None)
def artificial_cell(event_times):
# Convert event times into nrn vector
nrn_eventvec = h.Vector()
nrn_eventvec.from_python(event_times)
# load eventvec into VecStim object
nrn_vecstim = h.VecStim()
nrn_vecstim.play(nrn_eventvec)
# create the cell and artificial NetCon
nrn_netcon = h.NetCon(nrn_vecstim, None)
return nrn_netcon
def demo(is_class=False, is_list=False):
print('Begin demo')
# create parallel context
pc = h.ParallelContext()
rank = int(pc.id())
class OuterClass:
def __init__(self):
self.feeds = list()
for gid in range(20):
pc.set_gid2node(gid, rank)
spike_times = np.random.rand(20)
if is_class:
feed = ArtificialCell(spike_times)
if is_list:
self.feeds.append(feed)
pc.cell(gid, self.feeds[-1].nrn_netcon)
else:
pc.cell(gid, feed.nrn_netcon)
else:
nrn_netcon = artificial_cell(spike_times)
if is_list:
self.feeds.append(nrn_netcon)
pc.cell(gid, self.feeds[-1])
else:
pc.cell(gid, nrn_netcon)
print(f'gid={gid}, is_class={is_class}, is_list={is_list}')
OuterClass()
pc.gid_clear()
pc.done()
print('Completed: demo')
# run demos
################################################
demo(is_class=True, is_list=True)
# none of these work
demo(is_class=False, is_list=True)
demo(is_class=True, is_list=False)
demo(is_class=False, is_list=False)
| jasmainak/netcon_bug | debug.py | debug.py | py | 2,195 | python | en | code | 0 | github-code | 90 |
40946614929 | """
Machine shop example
Covers:
- Interrupts
- Resources: PreemptiveResource
Scenario:
A workshop has *n* identical machines. A stream of jobs (enough to
keep the machines busy) arrives. Each machine breaks down
periodically. Repairs are carried out by one repairman. The repairman
has other, less important tasks to perform, too. Broken machines
preempt theses tasks. The repairman continues them when he is done
with the machine repair. The workshop works continuously.
"""
import random
import simpy
# RANDOM_SEED = 42
PT_MEAN = 10.0 # Avg. processing time in minutes
PT_SIGMA = 2.0 # Sigma of processing time
MTTF = 300.0 # Mean time to failure in minutes
BREAK_MEAN = 1 / MTTF # Param. for expovariate distribution
REPAIR_TIME = 30.0 # Time it takes to repair a machine in minutes
JOB_DURATION = 30.0 # Duration of other jobs in minutes
NUM_MACHINES = 10 # Number of machines in the machine shop
WEEKS = 1 # Simulation time in weeks
SIM_TIME = WEEKS * 7 * 24 * 60 # Simulation time in minutes
class Machine(object):
"""A machine produces parts and my get broken every now and then.
If it breaks, it requests a *repairman* and continues the production
after the it is repaired.
A machine has a *name* and a numberof *parts_made* thus far.
"""
def __init__(self, env, name, repairman):
self.env = env
self.name = name
self.parts_made = 0
self.broken = False
# add 2 methods break_machine and repair_machine
env.process(self.break_machine())
env.process(self.working(repairman))
def break_machine(self):
while True:
yield env.timeout(random.expovariate(BREAK_MEAN))
if self.broken:
self.process.interrupt()
def working(self, repairman):
# keep on working until broken
while True:
part_made_in = random.randrange(1, 10)
while part_made_in:
try:
# Working on the part
start = self.env.now
yield self.env.timeout(part_made_in)
part_made_in = 0 # Set to 0 to exit while loop.
except simpy.Interrupt:
self.broken = True
part_made_in -= self.env.now - start # How much time left?
# Request a repairman. This will preempt its "other_job".
with repairman.request(priority=1) as req:
yield req
yield self.env.timeout(REPAIR_TIME)
self.broken = False
self.parts_made += 1
def other_jobs(env, repairman):
"""The repairman's other (unimportant) job."""
while True:
# Start a new job
part_made_in = JOB_DURATION
while part_made_in:
# Retry the job until it is done.
# It's priority is lower than that of machine repairs.
with repairman.request(priority=2) as req:
yield req
try:
start = env.now
yield env.timeout(part_made_in)
part_made_in = 0
except simpy.Interrupt:
part_made_in -= env.now - start
env = simpy.Environment()
repairman = simpy.PreemptiveResource(env, capacity=1)
machines = [Machine(env, 'Machine %d' % i, repairman)
for i in range(NUM_MACHINES)]
env.process(other_jobs(env, repairman))
env.run(until=SIM_TIME)
# Analyis/results
print('Machine shop results after %s weeks' % WEEKS)
for machine in machines:
print('%s made %d parts.' % (machine.name, machine.parts_made))
print(f"Average parts made per machine: {sum(machine.parts_made for machine in machines) / NUM_MACHINES}")
# plot the results
num_of_machines = [ machine.parts_made for machine in machines]
print(num_of_machines)
import matplotlib.pyplot as plt
plt.plot(num_of_machines, label="parts made", color="red", marker="o", linestyle="solid", linewidth=2, markersize=5)
plt.xlabel("Machines")
plt.ylabel("Parts made")
plt.xticks(range(len(num_of_machines)), [machine.name for machine in machines])
plt.legend()
# rotate x labels
plt.xticks(rotation=90)
plt.ylim(min(num_of_machines), max(num_of_machines) + 1)
plt.show()
plt.tight_layout()
# plt.savefig("machine_shop.png") | adarshanand67/CS415-Modelling-and-Simulations | simpy docs/machine_shop copy.py | machine_shop copy.py | py | 4,477 | python | en | code | 0 | github-code | 90 |
18463640659 | import numpy as np
s = np.array(list(input()))
t = np.array(list(input()))
dp = np.zeros((len(s)+1, len(t)+1), dtype=int)
equal = s[:, None] == t[None, :]
for i in range(len(s)):
dp[i+1, 1:] = np.maximum(dp[i, :-1]+equal[i], dp[i, 1:])
dp[i+1] = np.maximum.accumulate(dp[i+1])
i = len(s)
j = len(t)
ans = []
while i>0 and j>0:
if s[i-1] == t[j-1]:
ans.append(s[i-1])
i -= 1
j -= 1
elif dp[i][j] == dp[i-1][j]:
i -= 1
else :
j -= 1
print(''.join(ans[::-1]))
| Aasthaengg/IBMdataset | Python_codes/p03165/s940347275.py | s940347275.py | py | 522 | python | en | code | 0 | github-code | 90 |
1849049256 | import sys
input = sys.stdin.readline
dp = [[0 for i in range(30)] for j in range(30)]
def binomial(n,k):
if n==k or k==0:
return 1
if dp[n][k] != 0:
return dp[n][k]
dp[n][k] = binomial(n-1,k-1)+binomial(n-1,k)
return dp[n][k]
T = int(input())
for _ in range(T):
N,M = map(int, input().split())
print(binomial(M,N)) | GANGESHOTTEOK/yaman-algorithm | 07_DP/AN/BOJ1010.py | BOJ1010.py | py | 359 | python | en | code | 2 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.