seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18401918609 | def main():
from collections import Counter
n, m, *abc = map(int, open(0).read().split())
a = Counter(abc[:n])
*bc, = zip(*[iter(abc[n:])] * 2)
bc = sorted(bc, key=lambda x: x[1], reverse=True)
c = 0
for i, j in bc:
a[j] += i
c += i
if c > n:
break
cnt = n
ans = 0
keys = sorted(list(a.keys()), reverse=True)
for k in keys:
m = a[k]
if m <= cnt:
ans += k * m
cnt -= m
else:
ans += k * cnt
break
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03038/s381288078.py | s381288078.py | py | 605 | python | en | code | 0 | github-code | 90 |
19381581559 | #!/bin/python3
from src.Operations import Operations
from src.Balances import Balances
from src.LiquidityPool import LiquidityPool
from src.Arguments import Arguments
from src.util.format_argument import format_argument
from src.exceptions.insufficient_funds_exception import InsufficientFundsException
from datetime import datetime
from math import floor
import sys
import random
import os
def main():
all_params = {
"--subscriber-count": "the amonut of investors",
"--subscriber-growth-percentage": "a percent value (0-100) representing the user growth per day",
"--execution-duration-in-days": "the literal runtime of the program in days",
"--stagnation-day": "a number from 0-1 representing the fractional day when no new buys are made",
"--min-transactions-per-day": "the minimum number of transactions to simulate per day",
"--max-transactions-per-day": "the maximum number of transactions to simulate per day",
"--rebound-trigger-percentage": "the inflation percent that will trigger an inflationary reset",
"--interest-rate-percentage": "the percent interest rate wallets will attain",
"--interest-period-in-days": "the time period in days when all accounts should attain interest",
"--buy-sell-ratio": "the ratio of buy to sell orders of the simulation (a number from 0-1)",
"--token-y-count": "the amount of token y to initialize the liquidity pool with",
"--token-x-count": "the amount of token x to initialize the liquidity pool with",
"--min-transaction-amount": "the minimum token y a user should buy in the simulation",
"--max-transaction-amount": "the maximum token y a user should buy in the simulation",
"--test-wallet": "a custom wallet to run through the simulation",
"--test-wallet-balance": "the balance to initialize the test wallet with",
}
arguments = Arguments()
if sys.argv[-1] == "--help":
print("Required parameters are:\n")
table_data = []
max_word_length = 0
for param in dict.keys(all_params):
table_data.append([param, all_params[param]])
if max_word_length < len(param):
max_word_length = len(param)
for row in table_data:
print ("\t" + "".join(word.ljust(max_word_length + 1) for word in row))
print("\nwhere token x is the input token and token y is the output token for a \033[1mbuy transaction\033[1m\n")
sys.exit(0)
if len(sys.argv) < len(dict.keys(all_params)):
print("missing argument. All parameters are required, use --help for more information")
sys.exit(1)
param = sys.argv[1:]
for arg in range(0, len(param), 2):
if param[arg] in dict.keys(all_params):
if param[arg + 1] == None:
print("missing argument for "+param[arg])
sys.exit(1)
value = None
try:
expected_type: str = type(getattr(arguments, format_argument(param[arg]))).__name__
if expected_type == "int":
value = int(param[arg + 1])
if expected_type == "float":
value = float(param[arg + 1])
if expected_type == "str":
value = str(param[arg + 1])
if expected_type == "bool":
value = param[arg + 1].lower() == "true"
except ValueError:
print("invalid argument type for "+param[arg]+". Argument must be of type "+ type(getattr(arguments, format_argument(param[arg]))).__name__)
sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
setattr(arguments, format_argument(param[arg]), value)
else:
print("invalid argument "+arg+" passed. Use --help for more information")
sys.exit(1)
smart_contract = Balances(arguments.interest_rate_percentage)
liquidity_pool = LiquidityPool(
arguments.token_x_count,
arguments.token_y_count,
)
# Populate balances
user_addresses = []
for _ in range(arguments.subscriber_count):
address = random.getrandbits(128)
smart_contract.setWalletBalance(address, 0)
user_addresses.append(address)
output_path = "./output"
if not os.path.exists(output_path):
os.makedirs(output_path)
folder_path = "%s/%s"%(output_path, str(datetime.now()))
os.makedirs(folder_path)
price_log_path = "%s/%s"%(folder_path, "prices.csv")
price_log_file = open(price_log_path, "a")
price_log_file.write("price_y,total_x_supply,total_y_supply,day,transaction_no\n")
price_log_file.close()
transaction_log_path = "%s/%s"%(folder_path, "transactions.csv")
transaction_log_file = open(transaction_log_path, "a")
transaction_log_file.write("transaction,buyer,quantity,wallet_balance,day,transaction_no\n")
transaction_log_file.close()
test_wallet_value_log_path = "%s/%s"%(folder_path, "test-wallet-value.csv")
test_wallet_value_file = open(test_wallet_value_log_path, "a")
test_wallet_value_file.write("value,day\n")
test_wallet_value_file.close()
for day_count in range(arguments.execution_duration_in_days):
smart_contract.addInterest()
liquidity_pool.resetTotalDailyWithdrawl()
# Transactions per day
for transaction_count in range(random.randint(arguments.min_transactions_per_day, arguments.max_transactions_per_day)):
selected_address = user_addresses[random.randint(0, len(user_addresses)-1)]
selected_operation: Operations = random.choices(list(Operations), weights=(arguments.buy_sell_ratio, 1-arguments.buy_sell_ratio), k=1)[0]
wallet_balance = smart_contract.getWalletBalance(selected_address)
quantity = 0
if day_count >= round(arguments.stagnation_day * arguments.execution_duration_in_days):
selected_operation = selected_operation = Operations.SELL
if wallet_balance == 0 or selected_operation == Operations.BUY:
selected_operation = Operations.BUY
quantity = liquidity_pool.buyY(
random.randint(
arguments.min_transaction_amount,
arguments.max_transaction_amount,
)
)
wallet_balance += quantity
smart_contract.setWalletBalance(selected_address, wallet_balance)
if liquidity_pool.getPriceY() > liquidity_pool.getAllTimeHighY():
liquidity_pool.setAllTimeHighY(liquidity_pool.getPriceY())
liquidity_pool.setAllTimeHighXTokenCount(liquidity_pool.getX())
liquidity_pool.setAllTimeHighYTokenCount(liquidity_pool.getY())
try:
if selected_operation == Operations.SELL:
user_holding = (wallet_balance / liquidity_pool.getY() * 100)
dtp_ratio: float = user_holding < 1 and 1 or \
1 <= user_holding <= 10 and liquidity_pool.getDailyTakeProfitCoefficient() / user_holding or 0.1
total_withdrawl = liquidity_pool.getTotalDailyWithdrawl(selected_address)
origin_wallet_balance = total_withdrawl + wallet_balance
daily_limit = origin_wallet_balance * (dtp_ratio / 100)
quantity = random.uniform(
0,
daily_limit,
)
able_to_sell = quantity > 0 and total_withdrawl + quantity <= daily_limit
if able_to_sell:
liquidity_pool.sellY(quantity)
liquidity_pool.setTotalDailyWithdrawl(selected_address, total_withdrawl + quantity)
wallet_balance -= quantity
smart_contract.setWalletBalance(selected_address, wallet_balance)
if liquidity_pool.calculateInflationPercent() >= arguments.rebound_trigger_percentage:
rebound_amount = \
liquidity_pool.getAllTimeHighXTokenCount() / (liquidity_pool.getAllTimeHighYTokenCount() / liquidity_pool.getAllTimeHighXTokenCount()) - \
liquidity_pool.getX() / (liquidity_pool.getY() / liquidity_pool.getX())
smart_contract.triggerRebound(rebound_amount)
if not able_to_sell:
continue
except InsufficientFundsException as e:
print(e)
continue
price_log_file = open(price_log_path, "a")
price_log_file.write("%f,%f,%f,%d,%d\n"%(liquidity_pool.getPriceY(), liquidity_pool.getX(), liquidity_pool.getY(), day_count + 1, transaction_count))
price_log_file.close()
transaction_log_file = open(transaction_log_path, "a")
transaction_log_file.write("%s,%s,%f,%s,%d,%d\n"%(selected_operation.name, selected_address, quantity, wallet_balance, day_count + 1, transaction_count))
transaction_log_file.close()
test_wallet_value_file = open(test_wallet_value_log_path, "a")
test_wallet_value_file.write("%f,%s\n"%(arguments.test_wallet_balance * liquidity_pool.getPriceY(), day_count + 1))
test_wallet_value_file.close()
# User growth
current_user_addresses = user_addresses
for _ in range(floor(len(current_user_addresses) * arguments.subscriber_growth_percentage / 100)):
address = random.getrandbits(128)
smart_contract.setWalletBalance(address, 0)
user_addresses.append(address)
sys.exit(0)
if __name__ == "__main__":
main() | otboss/Uniqo-Token-Market-Simulator | main.py | main.py | py | 9,908 | python | en | code | 0 | github-code | 90 |
42828070717 | #####################################################
# #
# This file provides the numerical evidence for #
# Conjecture 1 in the article. The details of the #
# numerics are explained in Appendix B, section 2. #
# #
# One can run this file by opening a terminal #
# and typing #
# #
# python3 numerical_evidence_CKS2018_bounds.py #
# #
# The results of the numerics will be stored in the #
# folder `output`. #
# #
# This file is part of the supporting material #
# belonging to: #
# "Robust self-testing of two-qubit states" #
# Tim Coopmans, Jędrzej Kaniewski and Christian #
# Schaffner (2018) #
# arXiv: #
# #
#####################################################
import numpy as np
import CKS2018
def get_smallest_eigenvalue_of_hermitian(A):
"""
Returns the smallest eigenvalue of
a hermitian matrix `A`.
"""
(eigenvalues, __ ) = np.linalg.eig(A)
smallest_eigenvalue = min(eigenvalues)
# Since a hermitian matrix has real
# eigenvalues, we may cast its
# smallest eigenvalue to a real number
return np.real(smallest_eigenvalue)
def create_twodimensional_grid(x_start, x_stop, num_x, y_start, y_stop, num_y):
x_points_list = np.linspace(start=x_start,
stop=x_stop,
num=num_x,
endpoint=True)
y_points_list = np.linspace(start=y_start,
stop=y_stop,
num=num_y,
endpoint=True)
return [(x, y) for x in x_points_list for y in y_points_list]
def get_minimum_of_smallest_eigenvalue_of_hermitian_over_grid(A, grid, **additional_parameters_for_A):
"""
This method first iterates over points (a,b)
in a grid and at each point computes the smallest
eigenvalue of a hermitian matrix `A(a,b)`.
It subsequently returns the minimum of
these smallest eigenvalues.
Parameters
----------
A : method that takes parameters `a` and `b`, the grid
coordinates, and possibly additional parameters
as specified in `additional_parameters_for_A`.
grid : list of tuples (`a`, `b`), where `a` and `b`
are floats.
"""
# set a start value
a = grid[0][0]
b = grid[0][1]
current_minimum_eigenvalue = get_smallest_eigenvalue_of_hermitian(A(a=a, b=b, **additional_parameters_for_A))
# compute the minimum of the smallest eigenvalue
for (a, b) in grid:
smallest = get_smallest_eigenvalue_of_hermitian(A(a=a, b=b, **additional_parameters_for_A))
if current_minimum_eigenvalue > smallest:
current_minimum_eigenvalue = smallest
return current_minimum_eigenvalue
def write_data_to_csv_file(data_save_location, dictionary, description_preamble):
with open(data_save_location, "w+") as datafile:
datafile.write(description_preamble)
for key, value in dictionary.items():
datafile.write("{}, {}\n".format(key, value))
datafile.close()
def get_description_string(number_of_points, number_of_alphas, worst_case_minimum_description):
return \
"""#
# This file contains values for alpha in [0, 2), the parameter of
# the family of tilted CHSH operators W_{alpha}, and the minimum
# eigenvalue of the operator
# T_{alpha} = K_{alpha} - s_{alpha} * W_{alpha} - mu_{alpha} * Identity,
# minimized over a discretization of [0, pi/2] x [0, pi/4].
# Here, K_{alpha}, s_{alpha} and mu_{alpha} are defined as in the
# article.
#
# Used parameters:
# - number_of_points : %s
# - number_of_alphas : %s
#
#
# The first column contains values for alpha, the second the minimum
# eigenvalue of T_{alpha} over the grid.
#%s#
"""%(number_of_points, number_of_alphas, worst_case_minimum_description)
if __name__ == "__main__":
#################################
# Parameters of the numerics #
#################################
# Grid coarseness
# For the analysis in the article,
# the parameter number_of_points
# was set to 100
number_of_points = 100
# For the analysis in the article,
# the parameter number_of_alphas
# was set to 2000
number_of_alphas = 2000
alpha_start = 0.
alpha_stop = 2. - 2./number_of_alphas
#################################
# Performing the numerics #
#################################
alpha_list = np.linspace(start=alpha_start,
stop=alpha_stop,
num=number_of_alphas,
endpoint=True)
# Create the grid: discretization of [0, pi/4] x [0, pi/2]
grid = create_twodimensional_grid(x_start=0.,
x_stop=np.pi/4.,
num_x=number_of_points,
y_start=0.,
y_stop=np.pi/2.,
num_y=2*number_of_points)
# We store the minimum of the smallest eigenvalue for each alpha in a dictionary
minevals = {}
# Compute the minimum of the smallest eigenvalues for each alpha
print("\nNow computing the minimum eigenvalues for:")
for alpha in alpha_list:
print("")
print(r" + \alpha = {} (will go up to but not including 2)".format(alpha))
minevals[alpha] = get_minimum_of_smallest_eigenvalue_of_hermitian_over_grid(CKS2018.T,
grid=grid,
alpha=alpha)
print(" Minimum of smallest eigenvalues: {}".format(minevals[alpha]))
# Compute the "worst case", i.e. the minimum over all minima of smallest eigenvalues
alpha_of_minimum = min(minevals, key=minevals.get)
worst_case_minimum = minevals[alpha_of_minimum]
worst_case_minimum_description=\
"""
# The minimum eigenvalue found over the entire grid is {}, which
# is achieved for alpha={}.
""".format(worst_case_minimum, alpha_of_minimum)
print(worst_case_minimum_description)
#################################
# Storing the results #
#################################
# Save the data
description_preamble = get_description_string(number_of_points=number_of_points,
number_of_alphas=number_of_alphas,
worst_case_minimum_description=worst_case_minimum_description)
data_save_location = "output/minimum_eigenvalues.csv"
write_data_to_csv_file(data_save_location=data_save_location,
dictionary=minevals,
description_preamble=description_preamble)
# User output
print("The result of computing the minimum eigenvalues can be found in {}"\
.format(data_save_location))
| timcp/Self-Testing_Pure_TwoQubit_States | numerical_evidence_CKS2018_bounds.py | numerical_evidence_CKS2018_bounds.py | py | 7,453 | python | en | code | 2 | github-code | 90 |
1863879154 | import description_objects
"""
summary
The classes that will convert the input received as a string for all the
objects in the definition module into that object are included in this module.
created: 24.05.2020 by kemalbayramag@gmail.com
"""
class ParseVariable:
variable_string=None
variable_name=None
variable_value=None
variable_type=None
def __init__(self,variable_string):
self.variable_string=variable_string
self.parse()
def getType(self,character):
numbers=["1","2","3","4","5","6","7","8","9","0","-","+"]
listCharacters=["{","[","("]
if(character[0] in numbers):
return "Numeric"
elif(character[0] in listCharacters):
return "List"
else:
return "Text"
def parse(self):
name=""
value=""
case=False
for i in range(len(self.variable_string)):# a = 5
if(self.variable_string[i]=="="):
case=True
continue
if(case==False):
name=name+self.variable_string[i]
else:
value=value+self.variable_string[i]
name=name.strip() #deletes spaces if entry with spaces
value=value.strip() #deletes spaces if entry with spaces
self.variable_name=name
self.variable_value=value
self.variable_type=self.getType(value)
def getVariables(self):
return description_objects.Variables(self.variable_name,self.variable_type,self.variable_value)
class ParseBodies:
body_string = ""
variables = []
body = ""
def __init__(self,body_string):
self.body_string = body_string
self.parse()
def isBadLine(self,line):
keywords = ["def ","class ","for ","switch ","as ", "assert ",
"break ", "continue ", "del ", " elif", "else:",
"except ", "finally ", "from ", "global ",
"if(", "import ", " in ", " is ", "lambda ", " not",
" or ", "pass ", "return ", "try:", "except ",
"while(", " with ", " yield "]
for word in keywords:
if(word in line):
return True
return False
def parse(self):
body_lines = self.body_string.splitlines()
for line in body_lines:
if( not(self.isBadLine(line)) and ("=" in line) and line.strip()[0]!="#"):
self.variables.append(ParseVariable(line).getVariables())
else:
self.body = self.body + line +"\n"
def getBody(self):
return description_objects.Bodies(self.variables,self.body)
class ParseFunctions:
function_string = ""
name = ""
return_parameter = ""
parameters = ""
body = ""
def __init__(self,function_string):
self.function_string=function_string
self.parse()
def parse(self):
split_lines = self.function_string.splitlines()
for line in split_lines:
if("def"+" " in line):
temp = line.split()[1].strip() #def getJson(self): ---> getJson(self):
temp = temp.split("(") #getJson(self): ----> getJson , self):
self.name = temp[0] #getJson(self): ----> getJson
self.parameters = temp[1][:-2] #self): ----> self
elif("return " in line):
temp = line.split()[1].strip() #return jsonDefinition ----> jsonDefinition
self.return_parameter = temp
else:
self.body = self.body + line +"\n"
self.body = ParseBodies(self.body).getBody()
def getFunction(self):
return description_objects.Functions(self.name,self.parameters,self.return_parameter,self.body)
class ParseClasses:
name=None
functions=[]
body=None
classString=""
importedModules = []
modulReferences = []
def __init__(self,classString,moduleReferences):
self.classString=classString
self.modulReferences = moduleReferences
self.setParameters()
def indent(self,string):
indent=0
while(indent<len(string) and not(string[indent].isalpha())):
indent=indent+1
return indent
def setParameters(self):
lines=self.classString.splitlines()
indent=-1
temp=""
body_temp=""
counter=0
temp_functions=[]
i=0
while(i<len(lines)):
if("class"+" " in lines[i]): #class ParseClasses: ---> ParseClasses
self.name = lines[i].split()[-1][:-1]
i=i+1
elif("def"+" " in lines[i]):
temp=lines[i]
counter=i+1
indent=self.indent(lines[i])
new_indent=self.indent(lines[counter])
while(new_indent>indent and (counter+1)<len(lines)):
temp=temp+lines[counter]+"\n"
counter=counter+1
new_indent=self.indent(lines[counter])
temp_functions.append(temp.strip())
i=counter
else:
body_temp=body_temp+lines[i] +"\n"
i=i+1
#set body with body string
b=ParseBodies(body_temp).getBody()
self.body=b
#set functions on temp_functtions array
for function in temp_functions:
f=ParseFunctions(function).getFunction()
f.getName()
self.functions.append(f)
def setImportedModules(self):
referenceName = ""
for reference in self.modulReferences:
referenceElements = reference.split("-")
if(referenceElements[0] == "standart" and len(referenceElements) == 2):#standart-modulname
referenceName = referenceElements[1]
elif(referenceElements[0] == "standart" and len(referenceElements) == 3):#standart-modulname-mdlname
referenceName = referenceElements[2]
elif(referenceElements[0] == "advenced" and len(referenceElements) == 3):#advenced-modulname-classname
referenceName = referenceElements[2]
elif(referenceElements[0] == "advenced" and len(referenceElements) == 4):#advenced-modulname-classname-clsname
referenceName = referenceElements[3]
classLines = self.classString.splitlines()
for line in classLines:
if(referenceName in line):
self.importedModules.append(reference)#import modulname as alias
break
def getClass(self):
return description_objects.Classes(self.name,self.functions,self.body,self.importedModules)
class ParseModules:
classes=[]
body=None
functions=[]
modul_string=""
name=""
imported_modules = []
def __init__(self,name,modul_string):
self.modul_string=modul_string
self.name=name
self.setImportedModules()
self.clearWhiteLines()
self.setParameters()
def setImportedModules(self):
lines = self.modul_string.splitlines()
for line in lines:
if("from " in line):#from modulname import classname
index = line.strip().split().index("from ")
temp = "advenced-"
temp = temp + line.split()[index + 1]#module name
temp = temp + "-" + line.split()[index + 3]
if("as" in line):#from modulname import classname as clsname
temp = temp + "-" + line.split()[index + 5]
self.imported_modules.append(temp)
elif("import " in line):#import modulname
index = line.strip().split().index("import ")
temp = "standart-"
temp = temp + line.split()[index+1]
if("as" in line):#import modulname as mdl
temp = temp + "-" + line.split()[index+3]
self.imported_modules.append(temp)
def indent(self,string):
indent=0
while(indent<len(string) and not(string[indent].isalpha())):
indent=indent+1
return indent
def isWhiteLine(self,line):
for i in line:
if(i!=" "):
return False
return True
def clearWhiteLines(self):
lines = self.modul_string.splitlines()
temp_modul_string = ""
for line in lines:
if(self.isWhiteLine(line)==False):
temp_modul_string =temp_modul_string + line +"\n"
self.modul_string = temp_modul_string
def setParameters(self):
if(("class" not in self.modul_string) and ("def" not in self.modul_string)):
self.body=ParseBodies(self.modul_string)
elif("class" not in self.modul_string):
self.classes=ParseClasses(self.modul_string)
else:
lines=self.modul_string.splitlines()
i=0
counter=0
indent=0
new_indent=0
temp_class=""
temp_classes=[]
temp_body=""
while(i<len(lines)):
line_split=lines[i].split(" ")
if("class" in line_split):
temp_class=""
indent=self.indent(lines[i])
counter=i
new_indent=self.indent(lines[counter+1])
while(new_indent>indent and (counter+1)<len(lines)):
temp_class+=lines[counter]+" "
print(lines[counter]+"\n")
counter=counter+1
new_indent=self.indent(lines[counter])
i=counter
temp_classes.append(temp_class.strip())
else:
temp_body+=lines[i]+"\n"
i=i+1
body_class=ParseClasses(temp_body,self.imported_modules).getClass()
self.body=body_class#the bodies of the modules are class type data
for clsa in temp_classes:
c=ParseClasses(clsa,self.imported_modules).getClass()
self.classes.append(c)
def getModule(self):
return description_objects.Modules(self.name,self.classes,self.body,self.imported_modules)
class ParseProject:
modules=[]
project_name=""
def __init__(self,project_name):
self.project_name=project_name
def addModul(self,name,modul_string):
modul=ParseModules(name,modul_string)
self.modules.append(modul)
return True
def deleteModul(self,name):
modules=[]
for modul in self.modules:
if(modules.getName()!=name):
modules.append(modul)
self.modules=modules
return True
def updateModul(self,name,modul_string):
for i in range(len(self.modules)):
if(self.modules[i].getName()==name):
modul=ParseModules(name,modul_string)
self.modules[i]=modul
return True
return False
def getProject(self):
project=description_objects.Project(self.project_name,self.modules)
return project
| kemalbayram61/Python-Project-Visualizer | parse_objects.py | parse_objects.py | py | 11,508 | python | en | code | 1 | github-code | 90 |
21767399052 | import os
import time
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from page_objects.form_page import FormPage
from parameterized import parameterized
CHROME_EXECUTABLE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "chromedriver")
DATA_SET = [("Online Consumer Trends 2020",), ("The Ultimate Guide to Headless Commerce",),("Zero Party Data revolution in Ecommerce",),]
class TestSearchEbook(unittest.TestCase):
TEST_NAME = "Maciej Duda"
TEST_EMAIL = "maciej.duda+testrekrutacja@salesmanago.com"
TEST_COMPANY = "salesmanago"
TEST_PHONE = "555555555"
TEST_SITE = "www.salesmanago.pl"
def setUp(self):
self.driver = webdriver.Chrome(CHROME_EXECUTABLE_PATH)
self.driver.maximize_window()
self.driver.get("https://www.salesmanago.com/")
self.form_page = FormPage(self.driver)
cookies_accept = self.driver.find_element(By.ID, 'close-cookies')
if cookies_accept:
cookies_accept.click()
# menu
self.driver.find_element(By.XPATH, '/html/body/nav[2]/div/div/ul[1]/li[4]').click()
self.driver.find_element(By.XPATH, '/html/body/nav[2]/div/div/ul[1]/li[4]/div/div/div[1]/div/ul/a[1]').click()
time.sleep(3)
# close live chat
self.driver.switch_to.frame(self.driver.find_element(By.XPATH, '//*[@id="hubspot-messages-iframe-container"]/iframe'))
self.driver.find_element(By.XPATH, '/html/body/div/div[1]/span/div/button').click()
self.driver.find_element(By.XPATH, '/html/body/div/div[1]/span/div/button').click()
self.driver.switch_to.default_content()
def tearDown(self):
self.driver.quit()
@parameterized.expand(DATA_SET)
def test_download_ebook(self, ebook_name):
all_ebooks= self.driver.find_elements(By.CLASS_NAME, 'ebook__img--container')
for ebook in all_ebooks:
time.sleep(1)
ebook.click()
self.driver.switch_to.window(self.driver.window_handles[-1])
title = self.driver.find_element(By.CLASS_NAME, 'ebook__title')
if title.text.strip().replace("\n", " ") == ebook_name:
print("Ebook found")
self.form_page.fill_form(self.TEST_NAME,self.TEST_EMAIL,self.TEST_COMPANY,self.TEST_PHONE,self.TEST_SITE)
self.form_page.download_file(ebook_name)
break
else:
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
assert self.form_page.is_file_download(ebook_name) is True
if __name__ == '__main__':
unittest.main()
| dudamaciej/Python-Selenium | tests/test.py | test.py | py | 2,711 | python | en | code | 0 | github-code | 90 |
24743972899 | #The purpose of this file is to build the CSV file that I will use to build the data visualization
#importing files that will help the program run
from clean_csv import *
from lang import *
from tweet import *
def main():
#Creating the objects that will be used in this program.
tweet = Tweets()
clean = Clean_CSV()
#Letting the user know that the program is starting
print('Program Starting')
#This method will get all of the tweets, analyze them and then place them into a csv file
tweet.get_tweet()
#This method will average the sentiment values by state and move that data into a new CSV file
clean.new_csv()
#Letting the user know that the program has finished running
print('Program Finished')
main() | ravenusmc/twitter_analysis | data_collection/main.py | main.py | py | 757 | python | en | code | 0 | github-code | 90 |
29415504753 | # Helper code
import collections
# An item can be represented as a namedtuple
Item = collections.namedtuple('Item', ['weight', 'value'])
# Naive Approach based on Recursion
def knapsack_max_value(knapsack_max_weight, items):
lastIndex = len(items) - 1
return knapsack_recursive(knapsack_max_weight, items, lastIndex)
def knapsack_recursive(capacity, items, lastIndex):
# Base case
if (capacity <= 0) or (lastIndex<0):
return 0
# Put the item in the knapsack
valueA = 0
if (items[lastIndex].weight <= capacity):
valueA = items[lastIndex].value + knapsack_recursive(capacity - items[lastIndex].weight, items, lastIndex - 1)
# Do not put the item in the knapsack
valueB = knapsack_recursive(capacity, items, lastIndex - 1)
# Pick the maximum of the two results
result = max(valueA, valueB)
return result
tests = [
{
'correct_output': 14,
'input':
{
'knapsack_max_weight': 15,
'items': [Item(10, 7), Item(9, 8), Item(5, 6)]}},
{
'correct_output': 13,
'input':
{
'knapsack_max_weight': 25,
'items': [Item(10, 2), Item(29, 10), Item(5, 7), Item(5, 3), Item(5, 1), Item(24, 12)]}}]
for test in tests:
assert test['correct_output'] == knapsack_max_value(**test['input'])
| lorenzowind/python-programming | Data Structures & Algorithms/Project Advanced Algorithms/exercises/knapsack_recursive.py | knapsack_recursive.py | py | 1,382 | python | en | code | 1 | github-code | 90 |
74410853096 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
df = pd.read_csv('Iris.csv')
#Check null value
df.isnull().any()
#Check Datatype of the flowers features
df.dtypes
#check a quick summary of dataset.
df.describe()
"""It seems most of summary data are pretty normal.
Unless I notice that the PetalWidthCm have a weird value.
It has a minimum value of 0.1 while the maximum one is actually 2.5.
Let’s try to plot the PetalWithCm."""
df['PetalWidthCm'].plot.hist()
plt.show()
"""That seems very weird, about 50 flowers in this
dataset have values between 0.1 and 0.5. Let’s check the file
."""
"""Check The Relationship Between Columns"""
sns.pairplot(df, hue='Species')
"""Splitting The Dataset"""
all_inputs = df[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']].values
all_classes = df['Species'].values
"""Split the IRIS dataset into 2 subsets: Training Dataset (70% rows) and Testing Dataset (30% rows)"""
(train_inputs, test_inputs, train_classes, test_classes) = train_test_split(all_inputs, all_classes, train_size=0.7, random_state=1)
"""Classification with Decision Tree library from sklearn
Finally, we reach the last part that is the classification itself.
We will use the decision tree classifier from the scikit-learn.
The accuracy of our model achieves 95% without doing too much.
"""
dtc = DecisionTreeClassifier()
dtc.fit(train_inputs, train_classes)
dtc.score(test_inputs, test_classes)
#Added not part of it
DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)
| Jimoh1993/Udemy-Data-Analysis-Visualization-Bootcamp-by-Python-Data-Analytics-Data-Science | Decision Trees on Iris Dataset.py | Decision Trees on Iris Dataset.py | py | 2,011 | python | en | code | 0 | github-code | 90 |
29549646987 | def ice(x, y):
for di in range(4):
nx, ny = x + dx[di], y + dy[di]
if 0 <= nx < n and 0 <= ny < m and not lst[nx][ny]:
lst[nx][ny] = 1
ice(nx, ny)
dx = (-1, 0, 1, 0)
dy = (0, -1, 0, 1)
n, m = map(int, input().split())
lst = [list(map(int, input())) for _ in range(n)]
cnt = 0
for i in range(n):
for j in range(m):
if not lst[i][j]:
ice(i, j)
cnt += 1
print(cnt) | moqoru/TIL | 220930_Algorithm/220902/이것이 코딩테스트다 연습/음료수 얼려 먹기.py | 음료수 얼려 먹기.py | py | 444 | python | en | code | 0 | github-code | 90 |
34285908617 | # -*- coding: utf-8 -*-
""" Models for the video application keep track of uploaded videos and converted versions"""
from __future__ import unicode_literals
import os
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.conf import settings
from django.dispatch import receiver
from django.core.files.storage import FileSystemStorage
@python_2_unicode_compatible
class Video(models.Model):
"""A video file stored on the site"""
# video file name relative to MEDIA_ROOT
videofile = models.FileField(
# Translators: Video: videofile
_("Video file in h264 mp4 format"), upload_to=settings.VIDEO_UPLOAD_LOCATION)
class Meta:
verbose_name = _('Video')
verbose_name_plural = _('Videos')
def __str__(self):
return self.videofile.name
class GlossVideoStorage(FileSystemStorage):
"""Implement our shadowing video storage system"""
def __init__(self, location=settings.MEDIA_ROOT, base_url=settings.MEDIA_URL):
super(GlossVideoStorage, self).__init__(location, base_url)
def get_valid_name(self, name):
"""Generate a valid name, we use directories named for the
first two digits in the filename to partition the videos"""
(targetdir, basename) = os.path.split(name)
path = os.path.join(str(basename)[:2].upper(), str(basename))
result = os.path.join(targetdir, path)
return result
def url(self, name):
return settings.MEDIA_URL + name
storage = GlossVideoStorage()
@python_2_unicode_compatible
class GlossVideo(models.Model):
"""A video that represents a particular idgloss"""
title = models.CharField(_("Title"), blank=True, unique=False, max_length=100,
help_text=_("Descriptive name of the video."))
videofile = models.FileField(_("Video file"), upload_to=settings.GLOSS_VIDEO_DIRECTORY, storage=storage,
help_text=_("Video file."))
posterfile = models.FileField(_("Poster file"), upload_to=os.path.join(settings.GLOSS_VIDEO_DIRECTORY, "posters"),
storage=storage, blank=True, help_text=_("Still image representation of the video."),
default="")
gloss = models.ForeignKey('dictionary.Gloss', verbose_name=_("Gloss"), null=True,
help_text=_("The gloss this GlossVideo is related to."))
dataset = models.ForeignKey('dictionary.Dataset', verbose_name=_("Glossvideo dataset"), null=True,
help_text=_("Dataset of a GlossVideo, derived from gloss (if any) or chosen when video "
"was uploaded."))
# Translators: GlossVideo: version
version = models.IntegerField(_("Version"), default=0,
help_text=_("A number that represents the order of the Glossvideo's in "
"a gloss page. Smaller number means higher priority."))
class Meta:
ordering = ['version']
verbose_name = _('Gloss video')
verbose_name_plural = _('Gloss videos')
def save(self, *args, **kwargs):
# Save object so that we can access the saved fields.
super(GlossVideo, self).save(*args, **kwargs)
# If the GlossVideo object has a Gloss set, rename that glosses videos (that aren't correctly named).
if self.videofile and hasattr(self, 'gloss') and self.gloss is not None:
self.rename_video()
try:
self.dataset = self.gloss.dataset
except AttributeError:
pass
def get_absolute_url(self):
# return self.videofile.url
return self.videofile.url.replace('\\', '/')
def rename_video(self):
"""Rename the video and the video to correct path if the glossvideo object has a foreignkey to a gloss."""
# Do not rename the file if glossvideo doesn't have a gloss.
if hasattr(self, 'gloss') and self.gloss is not None:
# Get file extensions
ext = os.path.splitext(self.videofile.path)[1]
# Create the base filename for the video based on the new self.gloss.idgloss
new_filename = GlossVideo.create_filename(self.gloss.idgloss, self.gloss.pk, self.pk, ext)
# Create new_path by joining 'glossvideo' and the two first letters from gloss.idgloss
new_path = os.path.join('glossvideo', str(self.gloss.idgloss[:2]).upper(), new_filename)
full_new_path = os.path.join(settings.MEDIA_ROOT, new_path)
# Check if a file already exists in the path we try to save to or if this file already is in that path.
if not (os.path.isfile(full_new_path) or self.videofile == new_path):
try:
# Rename the file in the system, get old_path from self.videofile.path.
os.renames(self.videofile.path, full_new_path)
except IOError:
# If there is a problem moving the file, don't change self.videofile, it would not match
return
except OSError:
# If the sourcefile does not exist, raise OSError
raise OSError(str(self.pk) + ' ' + str(self.videofile))
# Change the self.videofile to the new path
self.videofile = new_path
self.save()
@staticmethod
def create_filename(idgloss, glosspk, videopk, ext):
"""Returns a correctly named filename"""
return str(idgloss) + "-" + str(glosspk) + "_vid" + str(videopk) + ext
def create_poster_filename(self, ext):
"""Returns a preferred filename of posterfile. Ext is the file extension without the dot."""
if self.gloss:
return str(self.gloss.idgloss) + "-" + str(self.gloss.pk) + "_vid" + str(self.pk) + "_poster." + ext
return self.videofile.name + "." + ext
@staticmethod
def rename_glosses_videos(gloss):
"""Renames the filenames of selected Glosses videos to match the Gloss name"""
glossvideos = GlossVideo.objects.filter(gloss=gloss)
for glossvideo in glossvideos:
glossvideo.rename_video()
def get_extension(self):
"""Returns videofiles extension."""
return os.path.splitext(self.videofile.path)[1]
def has_poster(self):
"""Returns true if the glossvideo has a poster file."""
if self.posterfile:
return True
return False
def __str__(self):
return self.videofile.name
| ISOF-ITD/teckenlistor | signbank/video/models.py | models.py | py | 6,712 | python | en | code | 1 | github-code | 90 |
18583119699 | import sys
Q = int(sys.stdin.readline().strip())
LR = []
for i in range(Q):
LR.append([int(x) for x in sys.stdin.readline().strip().split()])
N = 10**5+1
Primes = list(range(N+1))
Primes[1] = 0
for p in Primes:
if p * p > N+1: break
if p == 0: continue
for i in range(p + p, N+1, p):
Primes[i] = 0
primes = [p for p in Primes if p > 0]
likes = []
r = 0
for n in range(N):
if n%2 == 1 and Primes[n] > 0 and Primes[n//2+1] > 0:
r += 1
likes.append(r)
for i in range(Q):
print(likes[LR[i][1]] - likes[LR[i][0]-1])
| Aasthaengg/IBMdataset | Python_codes/p03476/s981259240.py | s981259240.py | py | 561 | python | en | code | 0 | github-code | 90 |
45976750839 | import numpy
expences = [int(e) for e in open("input.txt").readlines()]
ex = numpy.tile(numpy.array(expences), (len(expences), 1))
sums = ex + ex.transpose()
indexes = numpy.where(sums==2020)[0]
num1, num2 = expences[indexes[0]], expences[indexes[1]]
print(num1, num2, num1*num2)
| susannmt/adventofcode | adventofcode2020/day1/vectorized_sum20.py | vectorized_sum20.py | py | 283 | python | en | code | 1 | github-code | 90 |
9115573083 | from calendar import c
import enum
from rest_framework import serializers
from django.contrib.auth.hashers import make_password
from .models import *
from .forms import *
from cloudinary.forms import cl_init_js_callbacks
class ModelSerializer(serializers.ModelSerializer):
class Meta:
model = Model
fields = '__all__'
class MakeSerializer(serializers.ModelSerializer):
make_models = ModelSerializer(many=True)
class Meta:
model = Make
# fields = '__all__'
fields = ['make_id','make_type','make_models']
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class FuelSerializer(serializers.ModelSerializer):
class Meta:
model = Fuel
fields = '__all__'
class TransmissionSerializer(serializers.ModelSerializer):
class Meta:
model = Transmission
fields = '__all__'
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = '__all__'
class PhotoSerializer(serializers.ModelSerializer):
photo_id = serializers.IntegerField(required=False)
class Meta:
model = Photo
fields = '__all__'
read_only_fields = ('salePost_id',)
class SalePostSerializer(serializers.ModelSerializer):
photos = PhotoSerializer(many=True)
model_type = serializers.ReadOnlyField(source='model_id.model_type')
make_type = serializers.ReadOnlyField(source='model_id.make_id.make_type')
category_type = serializers.ReadOnlyField(source='category_id.category_type')
transmission_type = serializers.ReadOnlyField(source='transmission_id.transmission_type')
fuel_type = serializers.ReadOnlyField(source='fuel_id.fuel_type')
region_type = serializers.ReadOnlyField(source='region_id.region_type')
class Meta:
model = SalePost
fields = '__all__'
def create(self, validated_data):
photos = validated_data.pop('photos')
salepost = SalePost.objects.create(**validated_data)
print(salepost)
for photo in photos:
Photo.objects.create(**photo, salePost_id=salepost)
print("photos cloudinary", photo)
return salepost
def update(self,instance,validated_data):
instance.salePost_description = validated_data.get('salePost_description', instance.salePost_description)
instance.salePost_yearModel = validated_data.get('salePost_yearModel', instance.salePost_yearModel)
instance.salePost_yearManufacturing = validated_data.get('salePost_yearManufacturing', instance.salePost_yearManufacturing)
instance.salePost_kilometer = validated_data.get('salePost_kilometer', instance.salePost_kilometer)
instance.salePost_cylinder = validated_data.get('salePost_cylinder', instance.salePost_cylinder)
instance.salePost_door = validated_data.get('salePost_door', instance.salePost_door)
instance.salePost_color = validated_data.get('salePost_color', instance.salePost_color)
instance.salePost_price = validated_data.get('salePost_price', instance.salePost_price)
instance.model_id = validated_data.get('model_id', instance.model_id)
instance.category_id = validated_data.get('category_id', instance.category_id)
instance.transmission_id = validated_data.get('transmission_id', instance.transmission_id)
instance.fuel_id = validated_data.get('fuel_id', instance.fuel_id)
instance.region_id = validated_data.get('region_id', instance.region_id)
instance.save()
photos = validated_data.pop('photos')
keep_photos = []
# existing_photos = [p.photo_id for p in instance.photos]
for photo in photos:
if "photo_id" in photo.keys():
print("sí existe")
if Photo.objects.filter(pk=photo["photo_id"]).exists():
p = Photo.objects.get(pk=photo["photo_id"])
print(p)
p.photo_url = photo.get('photo_url', p.photo_url)
p.save()
keep_photos.append(p.photo_id)
print(keep_photos)
else:
continue
else:
print("no tiene id")
p = Photo.objects.create(**photo,salePost_id=instance)
keep_photos.append(p.photo_id)
print(keep_photos)
print(photo)
for photo in instance.photos.values():
print(photo["photo_id"])
if photo["photo_id"] not in keep_photos:
pp = Photo.objects.get(pk=photo["photo_id"])
pp.delete()
return instance
class ExtentUserSerializer(serializers.ModelSerializer):
# user_id = serializers.IntegerField(required=False)
class Meta:
model = ExtentUser
fields = '__all__'
read_only_fields = ('user_id',)
class UserSerializer(serializers.ModelSerializer):
extentuser = ExtentUserSerializer()
class Meta:
model = User
fields = '__all__'
# fields = ['id','first_name','last_name','email','username','password','extentuser']
def create(self,validated_data):
extent_user = validated_data.pop('extentuser')
password = validated_data.pop('password')
validated_data['password'] = make_password(password)
user = User.objects.create(**validated_data)
ExtentUser.objects.create(**extent_user, user_id=user)
return user
def update(self, instance, validated_data):
print(instance.id)
extent_user = validated_data.pop('extentuser')
instance.first_name = validated_data.get("first_name", instance.first_name)
instance.last_name = validated_data.get("last_name", instance.last_name)
instance.username = validated_data.get("username", instance.username)
instance.email = validated_data.get("email", instance.email)
password = validated_data.pop('password')
validated_data['password'] = make_password(password)
instance.password = validated_data.get("password", instance.password)
instance.extentuser = validated_data.get("extentuser", instance.extentuser)
instance.save()
new_extentuser = ExtentUser.objects.get(pk=instance.id)
new_extentuser.extentUser_dni = extent_user.get('extentUser_dni',new_extentuser.extentUser_dni)
new_extentuser.extentUser_cellphone = extent_user.get('extentUser_cellphone',new_extentuser.extentUser_cellphone)
new_extentuser.save()
return instance | JordanTrz/Antawa-proyecto | backend/api/serializers.py | serializers.py | py | 6,103 | python | en | code | 0 | github-code | 90 |
20370681030 | # Databricks notebook source
# MAGIC %md
# MAGIC
# MAGIC ## Run the output of recommended optimize statements as a single run or schedule as a periodic job
# MAGIC
# MAGIC <h4> Run this after the delta optimizer is finished </h4>
# MAGIC
# MAGIC #### 3 Modes:
# MAGIC
# MAGIC <ul> 1. <b>include_all_tables</b>: this mode optimizes all tables in the databases that the delta optimizer was provided at the profiling stage
# MAGIC <ul> 2. <b> use_include_list</b> : this mode only optimizes tables that you explicitly WANT to INCLUDE that is a subset of the database monitored in the profiling stage. Must provide fully qualified tables names for now (i.e. hive_metastore.iot_dashboard.silver_sensors,etc.).
# MAGIC <ul> 3. <b> use_exlude_list</b> : this mode optimizes all tables in the databases monitored EXCEPT the list provided. Must provide fully qualified table names for now.
# MAGIC
# MAGIC
# MAGIC #### Roadmap:
# MAGIC
# MAGIC 1. Be more selective about type of analyze statements depending on size of table and update frquency. (less frequently updated tables dont need it as much)
# MAGIC 2. Use DLT metaprogramming framework to run in parallel (performance implications)
# MAGIC 3. Use Jobs API to automatically set up a daily / hourly job for this. This is NOT always recommended by default. The optimize timing greatly depends on the ETL pipelines
# MAGIC 4. Dyanmically decide how often to run ANALYZE TABLE commands based on table size mapping (job that does this for you)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ### Run Commands in Particular Order:
# MAGIC
# MAGIC <li> 1. ALTER TABLE
# MAGIC <li> 2. Column Reordering
# MAGIC <li> 3. OPTIMIZE TABLE
# MAGIC <li> 4. ANALYZE TABLE
# COMMAND ----------
from pyspark.sql.functions import *
# COMMAND ----------
from deltaoptimizer import DeltaOptimizerBase, DeltaProfiler, QueryProfiler, DeltaOptimizer
# COMMAND ----------
dbutils.widgets.dropdown("table_mode", "include_all_tables", ["include_all_tables", "use_exclude_list", "use_include_list"])
dbutils.widgets.text("exclude_list(csv)", "")
dbutils.widgets.text("include_list(csv)", "")
dbutils.widgets.text("Optimizer Output Database:", "hive_metastore.delta_optimizer")
# COMMAND ----------
optimizer_location = dbutils.widgets.get("Optimizer Output Database:").strip()
delta_optimizer = DeltaOptimizer(database_name=optimizer_location)
# COMMAND ----------
## This table by default has only 1 file, so it shouldnt be expensive to collect
table_mode = dbutils.widgets.get("table_mode")
include_table_list = [i.strip() for i in dbutils.widgets.get("include_list(csv)").split(",")]
exclude_table_list = [i.strip() for i in dbutils.widgets.get("exclude_list(csv)").split(",")]
if table_mode == "include_all_tables":
config_row = (delta_optimizer.get_results()
.collect()
)
elif table_mode == "use_include_list":
config_row = (delta_optimizer.get_results()
.filter(col("TableName").isin(*include_table_list))
.collect()
)
elif table_mode == "use_exclude_list":
config_row = (delta_optimizer.get_results()
.filter(~col("TableName").isin(*exclude_table_list))
.collect()
)
# COMMAND ----------
# DBTITLE 1,Step 1 - Get Table Properties Config
config_tbl_prop = [i[3] for i in config_row]
print(f"Running {len(config_tbl_prop)} TBL PROPERTIES (file size and re-writes) commands: \n {config_tbl_prop}")
# COMMAND ----------
# DBTITLE 1,Run TBL Properties Commands
for i in config_tbl_prop:
try:
print(f"Running TABLE PROPERTIES command for {i}...")
spark.sql(i)
print(f"Completed TABLE PROPERTIES command for {i}!\n")
except Exception as e:
print(f"TABLE PROPERTIES failed with error: {str(e)}\n")
# COMMAND ----------
print(col_list)
# COMMAND ----------
# DBTITLE 1,Move Z-Order columns to front
col_list = config_tbl_prop = [i[5] for i in config_row]
### This is a recursive step, ordering needs to happend one at a time
## Starting simple, just moving ZORDEr cols to front, but this can become more nuanced
for i in col_list:
for j in i:
try:
spark.sql(j)
print(f"Completed column order change for table {i} and column {j}")
except Exception as e:
print(f"Unable to change order (usually means cause its an Id column and doesnt need reordering anyways...skipping to next columns) \n with error: {str(e)} \n ")
# COMMAND ----------
# DBTITLE 1,Step 2 - Get config for OPTIMIZE Commands
## This table by default has only 1 file, so it shouldnt be expensive to collect
config_optim = [i[2] for i in config_row]
print(f"Running {len(config_optim)} OPTIMIZE commands: \n {config_optim}")
# COMMAND ----------
# DBTITLE 1,Run through OPTIMIZE commands
for i in config_optim:
try:
print(f"Running OPTIMIZE command for {i}...")
spark.sql(i)
print(f"Completed OPTIMIZE command for {i}!\n ")
except Exception as e:
print(f"Optimize failed with error: {str(e)}\n")
# COMMAND ----------
# DBTITLE 1,Step 3 - Get Config for ANALYZE TABLE commands
## This table by default has only 1 file, so it shouldnt be expensive to collect
config_tbl_stats = [i[4] for i in config_row]
print(f"Running {len(config_tbl_stats)} TBL PROPERTIES (file size and re-writes) commands: \n {config_tbl_stats}")
# COMMAND ----------
# DBTITLE 1,Run through Config for ANALYZE
for i in config_tbl_stats:
try:
print(f"Running ANALYZE TABLE command for {i}...")
spark.sql(i)
print(f"Completed ANALYZE TABLE command for {i}!\n")
except Exception as e:
print(f"ANALYZE TABLE failed with error: {str(e)}\n")
| AbePabbathi/lakehouse-tacklebox | 30-performance/delta-optimizer/customer-facing-delta-optimizer/Step 2_ Strategy Runner.py | Step 2_ Strategy Runner.py | py | 5,715 | python | en | code | 21 | github-code | 90 |
20171470548 | class Phone:
manufactured='china'
def __init__(self,brand,price,color):
self.brand=brand
self.price=price
self.color=color
def send_sms(self,number,text):
sms=f'sending:{text} to {number}'
return sms
my_phone=Phone('Realme',13000,'blue')
print(my_phone.brand,my_phone.manufactured,my_phone.color,my_phone.price)
dad_phone=Phone('Symphony',1000,'black')
print(dad_phone.brand,dad_phone.manufactured,dad_phone.color,dad_phone.price) | itskawsarjamil/python_practices | wk-3/8 intro to class/init.py | init.py | py | 506 | python | en | code | 0 | github-code | 90 |
72758700458 | import sys
input = sys.stdin.readline
n, m = map(int,input().split())
nls = []
count = 0
for _ in range(n):
nls.append(input())
for _ in range(m):
b = input()
if b in nls: #b가 nls에 있는지 체크
count += 1
print(count)
| chlendyd7/Algorithm | Algorithm_BackJoon/14425.py | 14425.py | py | 248 | python | en | code | 0 | github-code | 90 |
22315171742 | """
문제 설명
rows x columns 크기인 행렬이 있습니다. 행렬에는 1부터 rows x columns까지의 숫자가 한 줄씩 순서대로 적혀있습니다. 이 행렬에서 직사각형 모양의 범위를 여러 번 선택해, 테두리 부분에 있는 숫자들을 시계방향으로 회전시키려 합니다. 각 회전은 (x1, y1, x2, y2)인 정수 4개로 표현하며, 그 의미는 다음과 같습니다.
x1 행 y1 열부터 x2 행 y2 열까지의 영역에 해당하는 직사각형에서 테두리에 있는 숫자들을 한 칸씩 시계방향으로 회전합니다.
다음은 6 x 6 크기 행렬의 예시입니다.
grid_example.png
이 행렬에 (2, 2, 5, 4) 회전을 적용하면, 아래 그림과 같이 2행 2열부터 5행 4열까지 영역의 테두리가 시계방향으로 회전합니다. 이때, 중앙의 15와 21이 있는 영역은 회전하지 않는 것을 주의하세요.
rotation_example.png
행렬의 세로 길이(행 개수) rows, 가로 길이(열 개수) columns, 그리고 회전들의 목록 queries가 주어질 때, 각 회전들을 배열에 적용한 뒤, 그 회전에 의해 위치가 바뀐 숫자들 중 가장 작은 숫자들을 순서대로 배열에 담아 return 하도록 solution 함수를 완성해주세요.
"""
def solution(rows, columns, queries):
matrix = [[row * columns + col + 1 for col in range(columns)] for row in range(rows)]
answer = []
for t, l, b, r in queries:
top, left, bottom, right = t-1, l-1, b-1, r-1
tmp = matrix[top][left]
minimum = tmp
for y in range(top, bottom):
value = matrix[y+1][left]
matrix[y][left] = value
minimum = min(minimum, value)
for x in range(left, right):
value = matrix[bottom][x+1]
matrix[bottom][x] = value
minimum = min(minimum, value)
for y in range(bottom, top, -1):
value = matrix[y-1][right]
matrix[y][right] = value
minimum = min(minimum, value)
for x in range(right, left, -1):
value = matrix[top][x-1]
matrix[top][x] = value
minimum = min(minimum, value)
matrix[top][left+1] = tmp
answer.append(minimum)
return answer | polkmn222/programmers | python/연습문제/level2/행렬 테두리 회전하기.py | 행렬 테두리 회전하기.py | py | 2,337 | python | ko | code | 1 | github-code | 90 |
3421800875 | # Hello World program in Python
class Solution:
round = 0
def checkElements(self, input):
lenList = []
maxLen = 0
for i in range(len(input)):
self.round += 1
print("=======")
print("Round: " + str(self.round))
lenList.append(1)
print("inx: " + str(i) + ", val:" + str(input[i]))
print("---- loop in i by j ----")
for j in range(i):
if input[i] > input[j]:
print("Previous lenList: " + str(lenList))
print("inx i: " + str(i) + ", i.val: " + str(input[i]))
print("inx j: " + str(j) + ", j.val: " + str(input[j]))
lenList[i] = max(lenList[i], lenList[j] + 1)
print("Updated lenList: " + str(lenList))
print("----------------")
maxLen = max(lenList[i], maxLen)
if maxLen == 3:
return True
return False
# data = [1, 3, 5, 2, 3, 4] # Return True
data = [1, 10, 4, 5]
# data = [1, 2, 1, 1, 3, 1] # Return True
# data = [1, 2, 1, 1, 1, 1] # Return False
# data = [5, 3, 1] # Return False
sol = Solution()
result = sol.checkElements(data)
print("===== Function Over =====")
print(result)
# ========= RUNTIME LOG ===========
# =======
# Round: 1
# inx: 0, val:1
# ---- loop in i by j ----
# =======
# Round: 2
# inx: 1, val:10
# ---- loop in i by j ----
# Previous lenList: [1, 1]
# inx i: 1, i.val: 10
# inx j: 0, j.val: 1
# Updated lenList: [1, 2]
# ----------------
# =======
# Round: 3
# inx: 2, val:4
# ---- loop in i by j ----
# Previous lenList: [1, 2, 1]
# inx i: 2, i.val: 4
# inx j: 0, j.val: 1
# Updated lenList: [1, 2, 2]
# ----------------
# =======
# Round: 4
# inx: 3, val:5
# ---- loop in i by j ----
# Previous lenList: [1, 2, 2, 1]
# inx i: 3, i.val: 5
# inx j: 0, j.val: 1
# Updated lenList: [1, 2, 2, 2]
# ----------------
# Previous lenList: [1, 2, 2, 2]
# inx i: 3, i.val: 5
# inx j: 2, j.val: 4
# Updated lenList: [1, 2, 2, 3]
# ----------------
# ===== Function Over =====
# True | wangdu1005/Learn-Algorithm-And-Data-Structure | Google_Find_Increasing_inx_and_val_3_Elements.py | Google_Find_Increasing_inx_and_val_3_Elements.py | py | 2,121 | python | en | code | 0 | github-code | 90 |
461759769 | import os
import pygame
import scene
import config
import common
import group
import title_sprite
import game
import intro
import common
import pytweener
import menu
from sprite import Sprite
class Presents(scene.Scene):
"Muestra el logotipo de gcoop y el texto: 'presenta...'"
def __init__(self, world):
pygame.mixer.init()
common.play_music('intro.wav')
scene.Scene.__init__(self, world)
self.sprites = group.Group()
self.background = common.load("presents/background.png", False, (config.WIDTH, config.HEIGHT))
self.gcoop = GcoopLogo()
self.presents = PresentsText()
self.sprites.add(self.gcoop)
self.draw_background()
self.counter = 0
def draw_background(self):
self.world.screen.fill((255, 255, 255))
self.world.screen.blit(self.background, (0, 0))
pygame.display.flip()
def update(self):
self.sprites.update()
self.counter += 1
if self.counter == 90:
self.presents.start()
self.sprites.add(self.presents)
elif self.counter > 200:
self.go_to_intro_scene()
def draw(self, screen):
self.sprites.clear(screen, self.background)
pygame.display.update(self.sprites.draw(screen))
def on_event(self, event):
if self.counter > 50:
if event.type == pygame.MOUSEBUTTONDOWN:
self.go_to_intro_scene()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.go_to_menu_inmediately()
else:
self.go_to_intro_scene()
def go_to_menu_inmediately(self):
new_scene = menu.Menu(self.world)
self.world.change_scene(new_scene)
def go_to_intro_scene(self):
new_scene = intro.Intro1(self.world)
self.world.change_scene(new_scene)
class PresentsText(Sprite):
"El texto de indica: 'presents'"
def __init__(self):
Sprite.__init__(self)
self.image = common.load('presents/presents.png', False, (config.WIDTH * 0.3, 0))
self.rect = self.image.get_rect()
self.rect.centerx = config.WIDTH / 2
self.rect.y = config.HEIGHT * 0.8
self.alpha = 0
self.update()
def start(self):
common.tweener.addTween(self, alpha=255, tweenTime=500,
tweenType=pytweener.Easing.Linear.easeNone)
def update(self):
if self.alpha != 128:
self.image.set_alpha(self.alpha)
class GcoopLogo(Sprite):
"El logotipo de gcoop."
def __init__(self):
Sprite.__init__(self)
self.original_image = common.load('presents/gcoop.png', False, (int(config.WIDTH * 0.6), 0))
self.image = self.original_image
self.alpha = 0
self.rect = self.image.get_rect()
self.rect.centerx = config.WIDTH / 2
self.center = self.rect.center
self.y = config.HEIGHT * 0.1
w, h = self.image.get_width(), self.image.get_height()
self.width = 0
self.height = 0
common.tweener.addTween(self, width=w, tweenTime=1700,
tweenType=pytweener.Easing.Elastic.easeInOut)
common.tweener.addTween(self, height=h, tweenTime=1800,
tweenType=pytweener.Easing.Elastic.easeInOut)
common.tweener.addTween(self, alpha=255, tweenTime=500,
tweenType=pytweener.Easing.Linear.easeNone)
self.update()
def update(self):
self.rect.y = self.y
new_size = (max(0, int(self.width)), max(0, int(self.height)))
self.image = pygame.transform.scale(self.original_image, new_size)
# evita un bug de pygame en mi equipo, una imagen
# con transparencia a la mitad exacta produce un
# interlineado feo...
if self.alpha != 128:
self.image.set_alpha(self.alpha)
else:
self.image.set_alpha(127)
self.rect.width = self.width
self.rect.center = (config.WIDTH / 2, config.HEIGHT * 0.4)
| gcoop-libre/ayni | src/presents.py | presents.py | py | 4,059 | python | en | code | 5 | github-code | 90 |
29654713081 |
#Tengo lista de items
A = [] #Digamos que tiene cosas
HT = {} #HT vacia
for item in A:
if item in HT.keys():
HT[item] += 1
else:
HT[item] = 1
# Ahora para checar
item = None
count = 0
for i in t.items():
if i[1] >= count:
count = i[1]
item = i[0]
return item
#Para la b:
if count >= n/2 +1:
return item
else:
return None | miguel-mzbi/SolutionsCrackingTheCode | test.py | test.py | py | 376 | python | es | code | 0 | github-code | 90 |
17068243512 | # go 배열의 값 : 해당 칸의 다음칸의 인덱스
go = [0] * 33
score = [0] * 33
# 파랑칸에 멈추지 않고 외곽으로만 도는 경우 : index 0 ~ 21
# index 21은 도착칸
# go 배열의 값은 해당 칸의 다음 칸의 index
for i in range(21):
go[i] = i + 1
go[21] = 21 # 도착칸
go[22], go[23], go[24] = 23, 24, 25
go[25], go[26], go[27] = 26, 27, 20
go[28], go[29] = 29, 25
go[30], go[31], go[32] = 31, 32, 25
# 위의 구현에 맞추어 점수값 저장하는 score 배열 초기화
for i in range(21):
score[i] = i * 2
score[22], score[23], score[24] = 13, 16, 19
score[25], score[26], score[27] = 25, 30, 35
score[28], score[29] = 22, 24,
score[30], score[31], score[32] = 28, 27, 26
# 분기점(파랑칸 3개) 저장
branch = dict()
branch[5] = 22
branch[10] = 28
branch[15] = 30
# 4^10의 경우에 대해 dfs를 이용하여 모두 시뮬레이션
def pick(n, ans):
global max_ans
# 10번 다 골랐다면,
if n == 10:
max_ans = max(max_ans, ans)
return
for i in range(4):
x = horse_pos[i]
backup_x = horse_pos[i]
move_cnt = dice[n]
# 파랑칸(분기점 위에 있다면)
if x == 5 or x == 10 or x == 15:
x = branch[x]
# 한칸 이미 이동 했으므로
move_cnt -= 1
if x + move_cnt <= 21:
x += move_cnt
else:
for _ in range(move_cnt):
x = go[x]
# 해당 칸에 말이 이미 존재하는데, 도착점이 아니라면. 안되는 경우이므로
if board_temp[x] and x != 21:
continue
board_temp[backup_x] = 0
board_temp[x] = 1
horse_pos[i] = x
pick(n+1, ans+score[x])
# 원상 복구
board_temp[backup_x] = 1
board_temp[x] = 0
horse_pos[i] = backup_x
dice = list(map(int, input().split()))
# 각각 말 4개의 위치(인덱스) 저장
horse_pos = [0] * 4
# 게임판의 상태 임시 저장(몇번 말이 존재하는지)
board_temp = [0] * 33
max_ans = 0
pick(0, 0)
print(max_ans) | sudo-bin/algorithm_study | solved/boj/17825_주사위윷놀이.py | 17825_주사위윷놀이.py | py | 2,107 | python | ko | code | 0 | github-code | 90 |
43945324626 | import json
import requests
url = 'https://api.foursquare.com/v2/venues/search'
params = dict(client_id='PASTE_YOUR_KEY_HERE',
client_secret='PASTE_YOUR_SECRET_HERE',
v='20180323',
ll='42.3495694,-71.0836727',
query='george howell',
limit=1)
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
# If this file is called as a standalone program:
if __name__ == '__main__':
# Run a sample location for debugging
print(data)
print('\n Venue name: {} \n Venue address: {}'
.format(data['response']['venues'][0]['name'],
data['response']['venues'][0]['location']['address']))
| br3ndonland/udacity-fsnd | 4-web-apps/javascript-ajax-apis/foursquare/foursquare-explore.py | foursquare-explore.py | py | 704 | python | en | code | 75 | github-code | 90 |
34406148930 | from setuptools import setup, find_packages
README = 'provide --dry-run functionality for your application'
requires = []
tests_require = [ 'pytest', ]
setup(name='dryable',
version='1.2.0',
description=README,
long_description=README,
url='https://github.com/haarcuba/dryable',
author='Yoav Kleinberger',
author_email='haarcuba@gmail.com',
keywords='subprocess',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={
'testing': tests_require,
},
install_requires=requires,
entry_points={},
classifiers = [
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| haarcuba/dryable | setup.py | setup.py | py | 863 | python | en | code | 41 | github-code | 90 |
36664406465 | import numpy as np
import matplotlib.pyplot as plt
'''
find the minimum point by using simulated annealing method
'''
def obj_function(x):
y = x ** 3 - 60 * x ** 2 - 4 * x + 6
return y
# plot
# x = np.linspace(0, 100, 1000)
# y = obj_function(x)
# plt.plot(x, y)
# plt.show()
if __name__ == '__main__':
T = 1000 # initiate temperature
Tmin = 10 # minimum temperature
x = np.random.uniform(low=0, high=100)
k = 50 # iterations of internal circulation
y = 0 # initiate result
t = 0 # iteration of outer circulation
delta = 0.99 # decay rate
while T > Tmin:
for i in range(k):
y = obj_function(x)
# generate a new x (core part)
xNew = x + np.random.uniform(low=-0.055, high=0.055) * T
if 0 <= xNew <= 100:
yNew = obj_function(xNew)
if yNew < y:
x = xNew
else:
p = np.exp(-(yNew - y) / T) # acceptable rate
r = np.random.uniform(low=0, high=1)
# if acceptable rate bigger than a random number, accept it
if p > r:
x = xNew
t += 1
T *= delta
print('No. %d: x = %f, y = %f' % (t, x, y))
| zhengxiang1994/optimization | simulated_annealing.py | simulated_annealing.py | py | 1,303 | python | en | code | 1 | github-code | 90 |
19305761220 | import os
import sys
import logging
import numpy as np
import h5py
import scipy.io
class HRTF(object):
def __init__(self, nbChannels, samplingRate, maxLength=None):
self.nbChannels = nbChannels
self.elevations = None
self.azimuths = None
self.distances = None
self.impulses = None
self.channels = None
self.samplingRate = samplingRate
class CipicHRTF(HRTF):
def __init__(self, filename, samplingRate):
super(CipicHRTF, self).__init__(nbChannels=2,
samplingRate=44100.0)
self.filename = filename
if self.filename.split('.')[-1] == 'mat':
try:
elevations_vals = np.linspace(-45, 230.625, num=50)
azimuths_vals = np.concatenate(
([-80, -65, -55], np.linspace(-45, 45, num=19), [55, 65, 80]))
self.elevations = []
self.azimuths = []
for i in range(len(azimuths_vals)):
for j in range(len(elevations_vals)):
self.azimuths.append(azimuths_vals[i])
self.elevations.append(elevations_vals[j])
self.elevations = np.array(self.elevations)
self.azimuths = np.array(self.azimuths)
self.impulses = self._loadImpulsesFromFileMat()
self.channels = ['left', 'right']
except FileNotFoundError as err:
print('File ' + filename + ' not found')
print(err)
elif self.filename.split('.')[-1] == 'sofa':
try:
self.impulses = self._loadImpulsesFromFileSofa()
self.channels = ['left', 'right']
self.elevations, self.azimuths, self.distances = self._loadPositionsFromFileSofa()
self.elevations, self.azimuths = np.round(verticalPolarToInterauralPolarCoordinates(self.elevations, self.azimuths), 3)
except FileNotFoundError as err:
print(err)
print('File ' + filename + ' not found')
else:
print('File',self.filename.split('.')[-1],'not supported. Only mat and sofa are supported.')
def _loadImpulsesFromFileMat(self):
elevations_vals = np.linspace(-45, 230.625, num=50)
azimuths_vals = np.concatenate(
([-80, -65, -55], np.linspace(-45, 45, num=19), [55, 65, 80]))
# Load CIPIC HRTF data
cipic = scipy.io.loadmat(self.filename)
hrirLeft = np.transpose(cipic['hrir_l'], [2, 0, 1])
hrirRight = np.transpose(cipic['hrir_r'], [2, 0, 1])
# Store impulse responses in time domain
N = len(hrirLeft[:, 0, 0])
impulses = np.zeros((len(azimuths_vals)*len(
elevations_vals), self.nbChannels, N))
count = 0
for i in range(len(azimuths_vals)):
for j in range(len(elevations_vals)):
impulses[count, 0, :] = hrirLeft[:, i, j]
impulses[count, 1, :] = hrirRight[:, i, j]
count += 1
return impulses
def _loadImpulsesFromFileSofa(self):
# Load CIPIC HRTF data
impulses = np.array(h5py.File(self.filename,'r')["Data.IR"].value.tolist())
return impulses
def _loadPositionsFromFileSofa(self):
# Load CIPIC HRTF data
positions = np.array(h5py.File(self.filename,'r')["SourcePosition"].value.tolist())
azimuths = positions[:,0]
elevations = positions[:,1]
distance = positions[:,2]
return elevations, azimuths, distance
def setFileImpulses(self, impulses):
try:
hrtf = h5py.File(self.filename,'a')
hrtf["Data.IR"][:] = impulses[:]
hrtf.close()
except FileNotFoundError as err:
print(err)
def setFilePositions(self, elevations, azimuths):
try:
hrtf = h5py.File(self.filename,'a')
hrtf["SourcePosition"][:,0] = azimuths
hrtf["SourcePosition"][:,1] = elevations
hrtf.close()
except FileNotFoundError as err:
print(err)
def interauralPolarToVerticalPolarCoordinates(elevations, azimuths):
elevations = np.atleast_1d(elevations)
azimuths = np.atleast_1d(azimuths)
# Convert interaural-polar coordinates to 3D cartesian coordinates on the
# unit sphere
x = np.cos(azimuths * np.pi / 180.0) * np.cos(elevations * np.pi / 180.0)
y = np.sin(azimuths * np.pi / 180.0) * -1.0
z = np.cos(azimuths * np.pi / 180.0) * np.sin(elevations * np.pi / 180.0)
assert np.allclose(x**2 + y**2 + z**2, np.ones_like(elevations))
# Convert 3D cartesian coordinates on the unit sphere to vertical-polar
# coordinates
azimuths = np.arctan2(-y, x) * 180.0 / np.pi
elevations = np.arcsin(z) * 180.0 / np.pi
azimuths[azimuths < -46] += 360.0
return elevations, azimuths
def verticalPolarToInterauralPolarCoordinates(elevation, azimuths):
# Convert vertical-polar coordinates to 3D cartesian coordinates on the
# unit sphere
x = np.cos(elevation * np.pi / 180.0) * np.sin(azimuths * np.pi / 180.0)
y = np.cos(elevation * np.pi / 180.0) * np.cos(azimuths * np.pi / 180.0)
z = np.sin(elevation * np.pi / 180.0) * 1.0
assert np.allclose(x**2 + y**2 + z**2, np.ones_like(elevation))
# Convert 3D cartesian coordinates on the unit sphere to interaural-polar
# coordinates
azimuths = np.arcsin(x) * 180.0 / np.pi
elevation = np.arctan2(z, y) * 180.0 / np.pi
elevation[elevation < -46] += 360.0
return elevation, azimuths
def verticalPolarToCipicCoordinates(elevation, azimut):
elevation, azimut = verticalPolarToInterauralPolarCoordinates(
elevation, azimut)
elevation = np.arctan2(np.sin(elevation * np.pi / 180),
np.cos(elevation * np.pi / 180)) * 180.0 / np.pi
if isinstance(elevation, np.ndarray):
elevation[elevation < -90.0] += 360.0
else:
if elevation < -90:
elevation += 360.0
return elevation, azimut
def get_hrtf_mat(hrtf_folder, num):
num_str = str(num)
if num < 100:
num_str = '0' + num_str
if num < 10:
num_str = '0' + num_str
return CipicHRTF(hrtf_folder + '/subject_' + str(num_str) + '/hrir_final.mat', 44100.0)
def get_hrtf_sofa(hrtf_folder, num):
num_str = str(num)
if num < 100:
num_str = '0' + num_str
if num < 10:
num_str = '0' + num_str
return CipicHRTF(hrtf_folder + '/subject_' + str(num_str) + '.sofa', 44100.0)
def create_cipic_hrtf(template_filename, filename, impulses, elevations, azimuths):
try:
reference = h5py.File(template_filename,'r')
hrtf = h5py.File(filename,'w')
for key in list(reference.keys()):
reference.copy(key, hrtf)
elevations, azimuths = interauralPolarToVerticalPolarCoordinates(elevations, azimuths)
hrtf["Data.IR"][:] = impulses[:]
hrtf["SourcePosition"][:,0] = azimuths
hrtf["SourcePosition"][:,1] = elevations
hrtf.close()
except FileNotFoundError as err:
print(err)
| codyjhsieh/HRTFCNN | utils/hrtf.py | hrtf.py | py | 7,229 | python | en | code | 13 | github-code | 90 |
21730877001 | from PIL import Image
# 读入图片
img = Image.open('000100.png')
# 获取图片的宽度和高度
width, height = img.size
# 创建一个与原图大小相同的空图
new_img = Image.new('RGB', (width, height))
# 逐像素拷贝像素值
for x in range(width):
for y in range(height):
pixel = img.getpixel((x, y))
new_img.putpixel((x, y), pixel)
# 保存新的图片
new_img = new_img.resize((1920, 1080))
new_img.save('000100e_new.png')
| niushuqing123/Undergraduate-Final-Year-Project | 数据1/make_new_img单张.py | make_new_img单张.py | py | 497 | python | en | code | 0 | github-code | 90 |
40936263650 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datasist.structdata import detect_outliers
from category_encoders.binary import BinaryEncoder
## other
#from imblearn.over_sampling import SMOTE
## sklearn -- preprocessing
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler, OneHotEncoder, LabelEncoder, OrdinalEncoder, PolynomialFeatures
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn_features.transformers import DataFrameSelector
from sklearn.model_selection import train_test_split, cross_val_predict, cross_val_score, cross_validate, GridSearchCV
from sklearn.impute import SimpleImputer
from sklearn.feature_selection import SelectKBest, mutual_info_regression
## sklearn -- models
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.tree import DecisionTreeRegressor
## skelarn -- metrics
from sklearn.metrics import f1_score, accuracy_score, r2_score, mean_squared_error
## ensemble models
from sklearn.ensemble import VotingRegressor, AdaBoostRegressor, GradientBoostingRegressor
from xgboost import XGBRegressor
import re
# read dataset
df = pd.read_csv("houses.csv", na_values=['Unknown', '', 'na', 'nan', 'NA', 'NAN', '-' ])
# Drop index column and description column
df.drop(['Ad List', 'description'], axis=1, inplace=True)
# trim and replace space inside columns names with _
df.columns = df.columns.str.lower().str.strip().str.strip().str.replace(' ', '_')
df.rename(columns={'#_of_floors':'number_of_floors'}, inplace=True)
# drop duplicates
df.drop_duplicates(inplace= True)
df.reset_index(inplace= True, drop= True)
# convert property_size to numerical
def fix_property_size(value:str):
return value.replace('sq.ft.', '').strip()
df['property_size'] = df['property_size'].apply(fix_property_size).astype(float)
# drop category column
df.drop(['category'], axis=1, inplace=True)
# convert price to float
def fix_price(value:str):
if isinstance(value, float):
return value
else:
modified_value = value.replace(' ', '').replace('RM', '')
return float(modified_value)
df['price'] = df['price'].apply(fix_price)
# convert discrete fields to Int64
df['bedroom'] = df['bedroom'].astype('Int64')
df['bathroom'] = df['bathroom'].astype('Int64')
df['completion_year'] = df['completion_year'].astype('Int64')
df['number_of_floors'] = df['number_of_floors'].astype('Int64')
df['total_units'] = df['total_units'].astype('Int64')
df['parking_lot'] = df['parking_lot'].astype('Int64')
# convert Firm_Number to int64
def fix_firm_number(value:str):
try:
if isinstance(value, np.float64):
return value
elif isinstance(value, str):
if value.startswith('E'):
modified_value = value.replace('E', '')
return np.float64(modified_value)
else:
return np.float64(value)
else:
return np.nan
except:
return np.nan
df['firm_number'] = df['firm_number'].apply(fix_firm_number)
# create column for each nearby service
df['nearby_highway'] = df['highway'].isna() == False
df['nearby_hospital'] = df['hospital'].isna() == False
df['nearby_railway_station'] = df['nearby_railway_station'].isna() == False
df['nearby_mall'] = (df['nearby_mall'].isna() == False) | (df['mall'].isna() == False)
df['nearby_railway_station'] = df['railway_station'].isna() == False
df['nearby_school'] = df['nearby_school'].isna() == False
df['nearby_bus_stop'] = df['bus_stop'].isna() == False
df['nearby_park'] = df['park'].isna() == False
df['nearby_school'] = df['school'].isna() == False
df.drop(columns=['highway', 'hospital', 'mall', 'railway_station', 'bus_stop', 'park', 'school'], axis=1, inplace=True)
# split facilities into separate columns
def split_facilities_into_separate_columns(df):
for index in df.index:
facilities = df.loc[index, 'facilities']
if type(facilities) == str:
parts = facilities.split(',')
# loop on each facility
for part in parts:
part = part.strip()
col_name = 'facility_' + part
if col_name not in df.columns:
# initialize new column with value False
df.loc[:, col_name] = False
# indicate that the current row has this facility
df.loc[index, col_name] = True
return df
df = split_facilities_into_separate_columns(df)
# trim and replace space inside columns names with _
df.columns = df.columns.str.lower().str.strip().str.strip().str.replace(' ', '_')
df.drop(['facility_10'], axis=1, inplace=True)
df.drop(['facilities'], axis=1, inplace=True)
# split ren_number into two columns
def split_agent_number(value):
if isinstance(value, float):
return pd.Series([np.nan, float(value)])
else:
parts = value.split(' ')
return pd.Series([parts[0], float(parts[1])])
df[['ren_type', 'ren_number']] = df['ren_number'].apply(split_agent_number)
df['ren_number'] = df['ren_number'].astype('Int64')
# extract city, state from address
states = ['Johor', 'Kedah', 'Kelantan', 'Malacca', 'Negeri Sembilan', 'Pahang', 'Penang',
'Perak', 'Perlis', 'Sabah', 'Sarawak', 'Selangor', 'Terengganu', 'Kuala Lumpur', 'Labuan', 'Putrajaya']
cities = ['Kajang', 'Seberang Perai', 'Subang Jaya', 'Klang', 'Johor Bahru', 'Shah Alam', 'George Town', 'Petaling Jaya',
'Selayang', 'Ipoh', 'Seremban', 'Iskandar Puteri', 'Kuantan', 'Sungai Petani', 'Ampang Jaya', 'Kota Kinabalu',
'Melaka City', 'Sandakan', 'Alor Setar', 'Tawau', 'Batu Pahat', 'Kota Bharu', 'Kuala Terengganu', 'Kuching',
'Sepang', 'Kulim', 'Muar', 'Pasir Gudang', 'Kuala Langat', 'Kulai', 'Kangar',
'Kuala Selangor', 'Padawan', 'Miri', 'Manjung', 'Hulu Selangor', 'Taiping', 'Bintulu', 'Kubang Pasu', 'Kluang',
'Pasir Mas', 'Lahad Datu', 'Alor Gajah', 'Kemaman', 'Hang Tuah Jaya', 'Tumpat', 'Pontian', 'Teluk Intan', 'Sibu',
'Temerloh', 'Semporna', 'Kerian', 'Tangkak', 'Penampang', 'Kota Samarahan', 'Ketereh', 'Dungun', 'Bachok',
'Besut', 'Segamat', 'Keningau', 'Tanah Merah', 'Papar', 'Ampang', 'Setapak', 'Bayan Baru', 'Puchong', 'Wangsa Maju',
'Simpang Ampat', 'Cheras', 'Semenyih', 'Iskandar Puteri', 'Bangi', 'Ayer Keroh', 'Setia Alam', 'Sentul', 'Cyberjaya',
'Seri Kembangan', 'Gelugor', 'Skudai', 'Ayer Itam', ' Tanjung Bungah', 'Rawang', 'Gelang Patah', 'Nusajaya', 'Damansara Perdana']
def extract_address_fields(value):
if isinstance(value, str):
## extract postal code, it is a number consists of 5 digits
#postal_code = np.nan
#postal_codes = re.findall(r'\d{5}', value)
#if len(postal_codes) > 0:
# postal_code = postal_codes[0]
## extract street names and numbers, they exist on the format
#postal_codes = re.findall(r'[^,]+\d+/\d+|[^,]+\d+', value)
#if len(postal_codes) > 0:
# print(postal_codes)
## extract city, state they are the last two parts in the string
address_parts = [x.strip() for x in value.split(',')]
#print(list(set(cities) & set(address_parts)))
#result = address_parts[-2:]
result = list()
found_states = list(set(states) & set(address_parts))
if len(found_states) > 0:
result = np.append(result, found_states[0])
found_cities = list(set(cities) & set(address_parts))
if len(found_cities) > 0:
result = np.append(result, found_cities[0])
#result = np.append(result, postal_code)
return pd.Series(result)
else:
return pd.Series([np.nan, np.nan])
df[['state', 'city']] = df['address'].apply(extract_address_fields)
df.drop(['address'], axis=1, inplace=True)
# detect outliers of property_size
cols = np.array(['property_size'])
for col in cols:
outliers_indices = detect_outliers(df, 0, [col])
col_median = df[col].median()
df.loc[outliers_indices, col] = col_median
# take log of the price
df['price'] = np.log(df['price'])
# split the dataset
## Features and target
X = df.drop(columns=['price'], axis=1)
y = df['price']
## to full train and test
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, shuffle=True, random_state=50)
#preprocessing
## Slice cols
int_cols = X_train.select_dtypes(include=['Int64']).columns.tolist()
float_cols = X_train.select_dtypes(include=['float64']).columns.tolist()
bool_cols = X_train.select_dtypes(include=['bool']).columns.tolist()
#small_categ_cols = ['tenure_type', 'property_type', 'floor_range', 'land_title', 'firm_type', 'ren_type', 'state']
small_categ_cols = ['tenure_type', 'property_type', 'floor_range', 'land_title', 'ren_type', 'city', 'state']
categ_cols = X_train.select_dtypes(include=['object']).columns.tolist()
other_categ_cols = list(set(categ_cols) - set(small_categ_cols))
print(X_train.columns)
print('*'*10)
#pd.set_option('display.max_columns', None)
#print(X_train.iloc[0:2, :])
## Int
int_pipline = Pipeline(steps=[
('selector', DataFrameSelector(int_cols)),
('imputer', SimpleImputer(strategy='most_frequent')),
# ('scaler', MinMaxScaler())
])
## Float
float_pipline = Pipeline(steps=[
('selector', DataFrameSelector(float_cols)),
('imputer', SimpleImputer(strategy='median')),
# ('scaler', MinMaxScaler())
])
## Bool
bool_pipline = Pipeline(steps=[
('selector', DataFrameSelector(bool_cols)),
# ('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', OrdinalEncoder())
#,
#('scaler', MinMaxScaler())
])
## Categorical
small_categ_pipline = Pipeline(steps=[
('selector', DataFrameSelector(small_categ_cols)),
('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', OneHotEncoder( sparse_output=False, handle_unknown='ignore')),
#('scaler', MinMaxScaler())
])
other_categ_pipline = Pipeline(steps=[
('selector', DataFrameSelector(other_categ_cols)),
('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value = -1)),
#('encoder', BinaryEncoder(drop_invariant=True))
#,
#('scaler', MinMaxScaler())
])
## Combine all
all_pipeline = FeatureUnion(transformer_list=[
('int', int_pipline),
('float', float_pipline),
('bool', bool_pipline),
('small_categ', small_categ_pipline),
('other_categ', other_categ_pipline)
])
all_pipeline = Pipeline(steps=[('pipeline', all_pipeline),
('FeatureSelection', SelectKBest(mutual_info_regression, k= int(0.9 * X_train.shape[1]))),
('Scaler', RobustScaler())])
## apply
_ = all_pipeline.fit(X_train, y_train)
def process_new(X_new):
''' This function is to apply the pipeline to user data. Taking a list.
Args:
*****
(X_new: List) --> the user input as a List
Returns:
*****
(X_processed: 2D numpy array) --> The processed numpy array of user input
'''
df_new = pd.DataFrame([X_new], columns=X_train.columns)
print(df_new)
# Adjust the datatype
for col in X_train:
if X_train[col].dtype == bool:
df_new[col] = df_new[col] =='True'
else:
df_new[col] = df_new[col].astype(X_train[col].dtype)
# df_new['bedroom'] = df_new['bedroom'].astype('Int64')
# df_new['bathroom'] = df_new['bathroom'].astype('Int64')
# df_new['property_size'] = df_new['property_size'].astype('float64')
# df_new['nearby_school'] = df_new['nearby_school'] =='True'
# df_new['nearby_mall'] = df_new['nearby_mall'] =='True'
# df_new['building_name'] = df_new['building_name'].astype('object')
# df_new['developer'] = df_new['developer'].astype('object')
# df_new['tenure_type'] = df_new['tenure_type'].astype('object')
# df_new['completion_year'] = df_new['completion_year'].astype('Int64')
# df_new['number_of_floors'] = df_new['number_of_floors'].astype('Int64')
# df_new['total_units'] = df_new['total_units'].astype('Int64')
# df_new['property_type'] = df_new['property_type'].astype('object')
# df_new['parking_lot'] = df_new['parking_lot'].astype('Int64')
# df_new['floor_range'] = df_new['floor_range'].astype('object')
# df_new['land_title'] = df_new['land_title'].astype('object')
# df_new['firm_type'] = df_new['firm_type'].astype('object')
# df_new['firm_number'] = df_new['firm_number'].astype('float64')
# df_new['ren_number'] = df_new['ren_number'].astype('Int64')
# df_new['nearby_railway_station'] = df_new['nearby_railway_station'] =='True'
# df_new['nearby_highway'] = df_new['nearby_highway'] =='True'
# df_new['nearby_hospital'] = df_new['nearby_hospital'] =='True'
# df_new['nearby_bus_stop'] = df_new['nearby_bus_stop'] =='True'
# df_new['nearby_park'] = df_new['nearby_park'] =='True'
# df_new['facility_parking'] = df_new['facility_parking'] =='True'
# df_new['facility_security'] = df_new['facility_security'] =='True'
# df_new['facility_swimming_pool'] = df_new['facility_swimming_pool'] =='True'
# df_new['facility_playground'] = df_new['facility_playground'] =='True'
# df_new['facility_barbeque_area'] = df_new['facility_barbeque_area'] =='True'
# df_new['facility_jogging_track'] = df_new['facility_jogging_track'] =='True'
# df_new['facility_minimart'] = df_new['facility_minimart'] =='True'
# df_new['facility_lift'] = df_new['facility_lift'] =='True'
# df_new['facility_gymnasium'] = df_new['facility_gymnasium'] =='True'
# df_new['facility_multipurpose_hall'] = df_new['facility_multipurpose_hall'] =='True'
# df_new['facility_sauna'] = df_new['facility_sauna'] =='True'
# df_new['facility_tennis_court'] = df_new['facility_tennis_court'] =='True'
# df_new['facility_club_house'] = df_new['facility_club_house'] =='True'
# df_new['facility_squash_court'] = df_new['facility_squash_court'] =='True'
# df_new['ren_type'] = df_new['ren_type'].astype('object')
# df_new['state'] = df_new['state'].astype('object')
# df_new['city'] = df_new['city'].astype('object')
print(f'df_new[developer] = {df_new["developer"].loc[0]}')
# Feature Engineering
# Apply the pipeline
X_processed = all_pipeline.transform(df_new)
return X_processed
| aahmedsherif/FinalProject | utils.py | utils.py | py | 15,381 | python | en | code | 0 | github-code | 90 |
72201288298 | # Easy
# You are given the heads of two sorted linked lists list1 and list2.
# Merge the two lists in a one sorted list. The list should be made by splicing together the nodes of the first two lists.
# Return the head of the merged linked list.
#
#
#
# Example 1:
# Input: list1 = [1,2,4], list2 = [1,3,4]
# Output: [1,1,2,3,4,4]
# Constraints:
#
# The number of nodes in both lists is in the range [0, 50].
# -100 <= Node.val <= 100
# Both list1 and list2 are sorted in non-decreasing order.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
if not list1:
return list2
if not list2:
return list1
if list1.val < list2.val:
head = list1
tempHead = list2
else:
head = list2
tempHead = list1
curHead = head.next
prev = head
while curHead and tempHead:
if curHead.val > tempHead.val:
prev.next = tempHead
tempHead = tempHead.next
prev.next.next = curHead
else:
curHead = curHead.next
prev = prev.next
if tempHead:
prev.next = tempHead
return head
## T = O(n + m); S = O(1) | ArmanTursun/coding_questions | LeetCode/Easy/21. Merge Two Sorted Lists/21. Merge Two Sorted Lists.py | 21. Merge Two Sorted Lists.py | py | 1,451 | python | en | code | 0 | github-code | 90 |
17005171001 | class Node:
def __init__(self, keys=[], leaf=False):
self.leaf = leaf
self.keys = keys
self.pointers = []
class BPlusTree:
def __init__(self, t):
self.root = None
self.t = t
def insert(self, key):
# Se a raiz da árvore estiver vazia, crie um novo nó raiz
if not self.root:
self.root = Node([key], leaf=True)
return
# Encontre o nó folha onde o valor deve ser inserido
current_node = self.find_leaf_node(key)
# Se o nó já estiver cheio, divida-o em dois nós
if len(current_node.keys) == 2 * self.t - 1:
self.split_node(current_node)
# Depois de dividir o nó, encontre novamente o nó folha onde o valor deve ser inserido
current_node = self.find_leaf_node(key)
# Adicione a chave ao nó folha
current_node.keys.append(key)
current_node.keys.sort()
def delete(self, key):
# Encontre o nó folha onde a chave está localizada
leaf_node = self.find_leaf_node(key)
# Se a chave não estiver presente no nó, retorne um erro
if key not in leaf_node.keys:
return "Key not found"
# Remova a chave do nó
leaf_node.keys.remove(key)
# Se o nó estiver vazio e não for a raiz da árvore, una-o com outro nó
if len(leaf_node.keys) == 0 and leaf_node != self.root:
self.merge_nodes(leaf_node)
return False
def search(self, key):
# Encontre o nó folha onde a chave pode estar localizada
leaf_node = self.find_leaf_node(key)
# Verifique se a chave está presente no nó
return key in leaf_node.keys
def split_node(self, node):
# Obtenha o índice da chave central do nó
middle_index = len(node.keys) // 2
# Crie dois novos nós com as chaves a esquerda e a direita da chave central
left_node = Node(node.keys[:middle_index])
right_node = Node(node.keys[middle_index + 1:])
# Se o nó for um nó folha, atualize os apontadores para os novos nós
if node.leaf:
left_node.pointers = node.pointers[:middle_index + 1]
right_node.pointers = node.pointers[middle_index + 1:]
else:
# Se o nó não for um nó folha, atualize os apontadores para os filhos dos novos nós
left_node.pointers = node.pointers[:middle_index + 1]
right_node.pointers = node.pointers[middle_index:]
# Atualize os apontadores do nó pai para os novos nós
if node.parent:
node_index = node.parent.pointers.index(node)
node.parent.pointers = node.parent.pointers[:node_index] + [
left_node, right_node] + node.parent.pointers[node_index + 1:]
# Adicione a chave central do nó original ao nó pai
node.parent.keys.append(node.keys[middle_index])
node.parent.keys.sort()
else:
# Se o nó não tiver um pai, crie um novo nó pai com a chave central do nó original
self.root = Node([node.keys[middle_index]],
pointers=[left_node, right_node])
# Atualize os pais dos novos nós
left_node.parent = right_node.parent = self.root
def merge_nodes(self, node):
# Encontre o índice do nó na lista de filhos do pai
node_index = node.parent.pointers.index(node)
# Encontre o nó irmão a esquerda ou a direita do nó
if node_index == 0:
sibling_node = node.parent.pointers[node_index + 1]
else:
sibling_node = node.parent.pointers[node_index - 1]
# Se o nó irmão tiver menos de t - 1 chaves, una-o com o nó
if len(sibling_node.keys) < self.t - 1:
# Adicione as chaves e os apontadores do nó ao nó irmão
sibling_node.keys += node.keys
sibling_node.keys.sort()
if node.leaf:
sibling_node.pointers += node.pointers
else:
sibling_node.pointers += node.pointers[1:]
# Atualize os apontadores do pai para apontar para o nó irmão
node.parent.pointers.remove(node)
# Se o nó pai ficar vazio, una-o com o nó irmão
if len(node.parent.keys) == 0:
self.merge_nodes(node.parent)
def find_leaf_node(self, key):
# Inicie a busca na raiz da árvore
current_node = self.root
while not current_node.leaf:
# Percorra os apontadores do nó até encontrar o nó mais adequado para a chave
for i in range(len(current_node.keys)):
if key < current_node.keys[i]:
current_node = current_node.pointers[i]
break
else:
current_node = current_node.pointers[-1]
# Retorne o nó folha encontrado
return current_node
if __name__ == '__main__':
tree = BPlusTree(3)
tree.insert(5)
tree.insert(4)
tree.insert(7)
tree.insert(2)
assert tree.search(5) == True
assert tree.search(4) == True
assert tree.search(7) == True
assert tree.search(2) == True
tree.delete(5)
assert tree.search(5) == False
tree.delete(4)
assert tree.search(4) == False
tree.delete(7)
assert tree.search(7) == False
# class BPlusTreeNode:
# # inicializa um novo nó da árvore B+
# def __init__(self, is_leaf=False):
# self.is_leaf = is_leaf
# self.keys = []
# self.children = []
# # insere um novo elemento na árvore B+
# def insert(self, key, value):
# # se o nó é uma folha, basta adicionar o elemento à lista de chaves
# if self.is_leaf:
# self.keys.append((key, value))
# return
# if not self.keys:
# self.keys.append([])
# self.keys[0].append([0])
# # se o nó não é uma folha, encontre o filho apropriado para inserir o elemento
# index = 0
# while index < len(self.keys) and key > self.keys[index][0]:
# index += 1
# self.children[index].insert(key, value)
# # recupera um elemento da árvore B+
# def get(self, key):
# # se o nó é uma folha, procura o elemento na lista de chaves
# if self.is_leaf:
# for k, v in self.keys:
# if k == key:
# return v
# return None
# # se o nó não é uma folha, encontra o filho apropriado para recuperar o elemento
# index = 0
# while index < len(self.keys) and key > self.keys[index][0]:
# index += 1
# return self.children[index].get(key)
# # remove um elemento da árvore B+
# def remove(self, key):
# # se o nó é uma folha, basta remover o elemento da lista de chaves
# if self.is_leaf:
# for i, (k, v) in enumerate(self.keys):
# if k == key:
# del self.keys[i]
# return
# return
# # se o nó não é uma folha, encontra o filho apropriado para remover o elemento
# index = 0
# while index < len(self.keys) and key > self.keys[index][0]:
# index += 1
# self.children[index].remove(key)
# # imprime a árvore B+
# def __str__(self):
# # se o nó é uma folha, imprime suas chaves
# if self.is_leaf:
# return "Leaf: " + str(self.keys)
# # se o nó não é uma folha, imprime cada um de seus filhos
# result = ""
# for i, (key, value) in enumerate(self.keys):
# result += str(self.children[i]) + " " + str(key) + " "
# result += str(self.children[-1])
# return result
# if __name__ == "__main__":
# cria uma nova árvore B+
# tree = BPlusTreeNode()
# # insere alguns elementos na árvore
# tree.insert(1, "Hello")
# tree.insert(2, "World")
# tree.insert(3, "!")
# # imprime a árvore
# print(tree)
| josuelopes512/BigNumberCalculator | BPlusTreeNode.py | BPlusTreeNode.py | py | 8,136 | python | pt | code | 0 | github-code | 90 |
25677303290 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,render_to_response
from django.http import JsonResponse, HttpResponse
# Create your views here.
def home(request):
c = {}
return render_to_response('general/home.html',c)
def calculate_significance(request):
d = {}
control_visitors = int( request.GET['control_visitors'] )
variation_visitors = int( request.GET['variation_visitors'] )
control_conversions = int( request.GET['control_conversions'] )
variation_conversions = int( request.GET['variation_conversions'] )
significance = ( ( control_visitors * variation_visitors) + (control_conversions * control_conversions) ) /10
if significance > 0.9 :
significance = 0.9
d['significance'] = str(significance)
return JsonResponse(d)
| wiserthanever/sigcalc | general/views.py | views.py | py | 807 | python | en | code | 0 | github-code | 90 |
17959405539 | N = int(input())
P = list(map(int,input().split()))
ans = 0
for n in range(N-1):
if P[n]==n+1 and P[n+1]==n+2:
ans+=1
P[n+1] = n+1
elif P[n]==n+1 and P[n+1]!=n+2:
ans+=1
if P[-1]==N:
print(ans+1)
else:
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03612/s226415113.py | s226415113.py | py | 234 | python | en | code | 0 | github-code | 90 |
20368550551 | class Solution:
def nextPermutation(self, nums: List[int]) -> None:
refpoint = None
for index in range(len(nums)-1,0,-1):
if nums[index-1]<nums[index]:
refpoint = index-1
break
temp = []
if refpoint is not None:
for index in range(len(nums)-1,-1,-1):
if nums[index] > nums[refpoint]:
nums[index],nums[refpoint] = nums[refpoint],nums[index]
temp = nums[:refpoint+1] + sorted(nums[refpoint+1:])
break
for i,j in enumerate(temp):
nums[i]=j
else:
nums.reverse()
| RishabhSinha07/Competitive_Problems_Daily | 31-next-permutation/31-next-permutation.py | 31-next-permutation.py | py | 687 | python | en | code | 1 | github-code | 90 |
18070825339 | data=list(input().split())
a=0
b=0
for i in range(0,3):
if data[i]=='5':
a=a+1
elif data[i]=='7':
b=b+1
if (a==2 and b==1):
print('YES')
else:
print('NO') | Aasthaengg/IBMdataset | Python_codes/p04043/s304948561.py | s304948561.py | py | 186 | python | en | code | 0 | github-code | 90 |
38108557247 | # -*- coding: utf-8 -*-
import os
import sys
import serial
import hashlib
from binascii import *
from libs import bflb_utils
try:
from serial.tools.list_ports import comports
except ImportError:
raise exception.GetSerialPortsError(os.name)
class FileSerial(object):
def _int_to_hex(self, data):
hex_size = hex(data).replace("0x", "0x00")[-4:]
low_hex_size = hex_size[-2:]
hight_hex_size = hex_size[:-2]
return low_hex_size, hight_hex_size
def _str_to_hex(self, data):
message = hexlify(data)
new_message = ""
for i in range(0, len(message), 2):
new_message += message[i:i + 2].decode() + " "
return new_message
def _get_file_hash(self, file_path):
with open(file_path, "rb") as f:
message = f.read()
data_sha = hashlib.sha256()
data_sha.update(message)
return data_sha.hexdigest()
def open_listen(self, dev_com, baudrate, file_path, chunk_size=4096, timeout=1):
sdio_file_ser_dict = {}
file_dict = {}
if sys.platform.startswith("win"):
for p, d, h in comports():
if not p:
continue
if "PID=1D6B" in h.upper():
ser_value = h.split(" ")[2][4:]
if ser_value not in sdio_file_ser_dict:
sdio_file_ser_dict[ser_value] = p
else:
if "LOCATION" in h.upper():
file_dict[sdio_file_ser_dict[ser_value]] = p
else:
file_dict[p] = sdio_file_ser_dict[ser_value]
elif sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
for p, d, h in comports():
if not p:
continue
if "PID=1D6B" in h.upper():
ser_value = h.split(" ")[2][4:]
if ser_value not in sdio_file_ser_dict:
sdio_file_ser_dict[ser_value] = p
else:
if sdio_file_ser_dict[ser_value] > p:
file_dict[p] = sdio_file_ser_dict[ser_value]
else:
file_dict[sdio_file_ser_dict[ser_value]] = p
if " (" in dev_com:
sdio_dev = dev_com[:dev_com.find(" (")]
else:
sdio_dev = dev_com
file_com = file_dict[sdio_dev]
print(file_com)
_ser = serial.Serial(file_com,
int(baudrate),
timeout=5.0,
xonxoff=False,
rtscts=False,
write_timeout=None,
dsrdtr=False)
_ser.timeout = timeout
size = os.path.getsize(file_path)
hex_size = hex(size).replace("0x", "0x0000000")
first_message = bytes.fromhex("F0 00 04 00 {} {} {} {}".format(
hex_size[-2:], hex_size[-4:-2], hex_size[-6:-4], hex_size[-8:-6]))
_ser.write(first_message)
recv_message = _ser.read(2)
if recv_message != b"OK":
_ser.close()
bflb_utils.printf("Send Failed! Error 1: First send failed")
return False
with open(file_path, "rb") as f:
while True:
message = f.read(chunk_size)
if message:
len_mess = len(message)
low_middle_size, hight_middle_size = self._int_to_hex(len_mess)
if recv_message == b"OK":
recv_message = b""
new_message = self._str_to_hex(message)
middle_message = bytes.fromhex("F1 00 {} {} {}".format(
low_middle_size, hight_middle_size, new_message))
_ser.write(middle_message)
recv_message = _ser.read(2)
message = ""
else:
_ser.close()
bflb_utils.printf("Send Failed! Error 2: File send failed")
return False
else:
break
recv_message = b""
end_message = bytes.fromhex("F2 00 00 00")
_ser.write(end_message)
recv_message = _ser.read(66)
if b"OK" in recv_message:
file_sha256 = self._get_file_hash(file_path)
if recv_message[2:].hex() == file_sha256:
_ser.write(b"check hash")
_ser.close()
return True
else:
_ser.close()
bflb_utils.printf("Send Failed! Error 4: Hash check failed")
return False
else:
_ser.close()
bflb_utils.printf("Send Failed! Error 3: Hash return failed")
return False
if __name__ == "__main__":
fs = FileSerial()
fs.open_listen("/dev/ttyACM1", 2000000,
"/home/tanjiaxi/git/bouffalo_dev_cube/chips/bl602/img_create_mcu/img_if.bin")
| llamaonaskateboard/bflb-mcu-tool | bflb_mcu_tool/libs/bflb_file_serial.py | bflb_file_serial.py | py | 5,298 | python | en | code | 4 | github-code | 90 |
30954053097 | from configparser import ConfigParser
from pathlib import Path
import pytest
import shutil
import random
import string
import os
from media_library_sqlite import MediaLibrarySQLite
from main import MediaLibraryBuilder
@pytest.fixture(scope="function")
def setup_test_environment():
# 删除数据库文件
db_file = Path("media_library.db")
if db_file.exists():
db_file.unlink()
shutil.rmtree(str(Path('./a/b/c')), ignore_errors=True)
Path("./a/b/c").mkdir(parents=True, exist_ok=True)
# 创建测试所需的文件和目录
test_dir = Path("./a/b/c")
shutil.rmtree(str(test_dir), ignore_errors=True)
test_dir.mkdir(parents=True, exist_ok=True)
# 创建随机文件
file_extensions = [".mp4", ".jpg", ".txt"]
for _ in range(30):
file_name = ''.join(random.choices(string.ascii_lowercase, k=10))
file_extension = random.choice(file_extensions)
file_path = test_dir / (file_name + file_extension)
file_size = random.randint(100, 100000)
with open(file_path, "wb") as f:
f.write(os.urandom(file_size))
yield
def re_generate_directory():
test_dir = Path("./a/b/c")
shutil.rmtree(Path('./a/b/c'), ignore_errors=True)
Path("./a/b/c").mkdir(parents=True, exist_ok=True)
file_extensions = [".mp4", ".jpg", ".txt"]
for _ in range(30):
file_name = ''.join(random.choices(string.ascii_lowercase, k=10))
file_extension = random.choice(file_extensions)
file_path = test_dir / (file_name + file_extension)
file_size = random.randint(100, 100000)
with open(file_path, "wb") as f:
f.write(os.urandom(file_size))
def load_config(config_file):
config = ConfigParser()
config.read(config_file)
return config
def test_rebuild_function(setup_test_environment, monkeypatch):
# 模拟内部函数的行为
def mock_rebuild_data(self):
table_name = Path(self.target_dir).name
data_list = self.create_data()
hlink_sql = MediaLibrarySQLite(table_name, data_list)
hlink_sql.rebuild_data()
return set(row[0] for row in hlink_sql.get_data())
# 使用monkeypatch替换内部函数
monkeypatch.setattr(MediaLibraryBuilder, "rebuild_data", mock_rebuild_data)
# 调用外部函数
config_file = 'config.ini'
config = load_config(config_file)
before = []
for section_name in config.sections():
target_directory = config.get(section_name, 'target_directory')
file_extensions = config.get(section_name, 'file_extensions').split()
db = MediaLibraryBuilder(target_dir=target_directory, extensions=file_extensions)
before.append(db.rebuild_data())
re_generate_directory()
after = []
for section_name in config.sections():
target_directory = config.get(section_name, 'target_directory')
file_extensions = config.get(section_name, 'file_extensions').split()
db = MediaLibraryBuilder(target_dir=target_directory, extensions=file_extensions)
after.append(db.rebuild_data())
# 验证结果是否符合预期
assert not before == after
| Aquaakuma/py_hlink | test_rebuild.py | test_rebuild.py | py | 3,182 | python | en | code | 0 | github-code | 90 |
26969302219 | import time
import gspread
from PyQt5 import QtCore
from gspread.exceptions import APIError, NoValidUrlKeyFound
from faq_manual import credentials
from status_urls import StatusUrl
class GDoc(QtCore.QThread):
bugStatus = QtCore.pyqtSignal(str)
progressStatus = QtCore.pyqtSignal(str)
validateStatus = QtCore.pyqtSignal(str)
monitorStatus = QtCore.pyqtSignal(dict)
def __init__(self, parent=None):
super().__init__(parent)
self.name = None
self.urls = None
self.worksheet = None
self.open_book = None
self.get_values_sheet = None
self.link_thread = StatusUrl()
self.gdoc_report_thread = GDocReport()
self.link_thread.infoStatus.connect(self.create_report)
self.credentials = credentials
self.gc = gspread.service_account_from_dict(self.credentials)
self.gdoc_report_thread.progressStatus.connect(self.emit_signal)
def emit_signal(self):
self.progressStatus.emit('finish')
def init_name(self, name):
self.name = name
def run(self):
self.progressStatus.emit('start')
try:
self.open_book = self.gc.open_by_url(self.name)
self.get_values_sheet = self.open_book.get_worksheet(0).col_values(1)
try:
rows = len(self.open_book.get_worksheet(0).get()) + 10
self.worksheet = self.open_book.add_worksheet(title="Status URL", rows=rows, cols="20")
except APIError:
sheet_report = self.open_book.worksheet("Status URL")
self.open_book.del_worksheet(sheet_report)
rows = len(self.open_book.get_worksheet(0).get()) + 10
self.worksheet = self.open_book.add_worksheet(title="Status URL", rows=rows, cols="20")
self.worksheet.update('A1:E1',
[['url', 'status url', 'redirect url', 'count redirect url',
'status redirect url']])
self.urls = self.get_values_sheet
self.link_thread.init_url(self.urls)
self.link_thread.start()
except NoValidUrlKeyFound:
self.bugStatus.emit('incorrect link')
except APIError:
self.bugStatus.emit('access error')
def create_report(self, info_status):
if info_status == 'ok':
self.monitorStatus.emit(self.link_thread.data)
self.gdoc_report_thread.init_links(self.link_thread.status_urls)
self.gdoc_report_thread.init_worksheet(self.worksheet)
self.gdoc_report_thread.start()
def stop(self):
self.gdoc_report_thread.exit()
self.gdoc_report_thread.wait(2000)
self.gdoc_report_thread.terminate()
self.validateStatus.emit('stop')
class GDocReport(QtCore.QThread):
progressStatus = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self.links = None
self.worksheet = None
def init_links(self, links):
self.links = links
def init_worksheet(self, worksheet):
self.worksheet = worksheet
def run(self):
row = 2
for link in self.links:
if len(link) == 5:
self.worksheet.update(f'A{row}:E{row}', [[link['url'], link['status_url'], link['redirect_url'],
link['count_redirect_url'], link['status_redirect_url']]])
time.sleep(1)
else:
self.worksheet.update(f'A{row}:E{row}', [[link['url'], link['status_url'], 'undefined', 'undefined',
'no redirect']])
time.sleep(1)
row += 1
self.progressStatus.emit('done')
| EugeneOregon/LinkValidator | gdoc.py | gdoc.py | py | 3,818 | python | en | code | 0 | github-code | 90 |
3175442607 | import socket, sys
from struct import * #importing required libraries
# Description string with banner and information
desc = "\n" + r"""
""""\t\t\t""""__________ __ ____ __. __
""""\t\t\t""""\______ \____________/ |_ | |/ _| ____ ____ ____ | | __ ___________ ______
""""\t\t\t"""" | ___/ _ \_ __ \ __\ | < / \ / _ \_/ ___\| |/ // __ \_ __ \/ ___/
""""\t\t\t"""" | | ( <_> ) | \/| | | | \| | ( <_> ) \___| <\ ___/| | \/\___ \
""""\t\t\t"""" |____| \____/|__| |__| |____|__ \___| /\____/ \___ >__|_ \\___ >__| /____ >
""""\t\t\t"""" \/ \/ \/ \/ \/ \/
""" "\n\t\t\t\tProvide Source and Destination IP as well as range of ports to scan\n\t\t\t\t\tCheck Flags using -h flag"
# Function for calculating the checksum of packet
def checksum(msg):
# Calculate the 16-bit one's complement of the one's complement sum
# of all 16-bit words in the msg
s = 0
for i in range(0, len(msg), 2):
w = msg[i] + (msg[i + 1] << 8)
s += w
s = (s & 0xffff) + (s >> 16)
# Take the one's complement of the sum
checksum = ~s & 0xffff
checksum += (0x400)
return checksum
# Function for assembling the packet by packing IP/TCP headers
def assemble_packet(sourceip, destip, dest_port, scan):
packet = '';
# source and destination IPs
source_ip = sourceip
dest_ip = destip # or socket.gethostbyname('www.google.com')
# ip header fields
ip_ihl = 5
ip_ver = 4
ip_tos = 0
ip_tot_len = 0 # kernel will fill the correct total length
ip_id = 54321 #Id of this packet
ip_frag_off = 0
ip_ttl = 255
ip_proto = socket.IPPROTO_TCP
ip_check = 0 # kernel will fill the correct checksum
ip_saddr = socket.inet_aton ( source_ip ) #Spoof the source ip address if you want to
ip_daddr = socket.inet_aton ( dest_ip )
ip_ihl_ver = (ip_ver << 4) + ip_ihl
# the ! in the pack format string means network order
ip_header = pack('!BBHHHBBH4s4s' , ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off, ip_ttl, ip_proto, ip_check, ip_saddr, ip_daddr)
# tcp header fields
tcp_source = 4455 # source port
tcp_dest = dest_port # destination port
tcp_seq = 454
tcp_ack_seq = 0
tcp_doff = 5 #4 bit field, size of tcp header, 5 * 4 = 20 bytes
#tcp flags for NULL scan
tcp_fin = 0
tcp_syn = 0
tcp_rst = 0
tcp_psh = 0
tcp_ack = 0
tcp_urg = 0
# tcp flags fot xmas scan
if "xmas" in scan:
tcp_fin = 1
tcp_psh = 1
tcp_urg = 1
# TCP flags for Syn Scan
elif "syn" in scan:
tcp_syn = 1
# TCP flags for fin Scan
elif "fin" in scan:
tcp_fin = 1
# TCP flags for ack Scan
elif "ack" in scan:
tcp_ack = 1
tcp_window = socket.htons (5840) # maximum allowed window size
tcp_check = 0
tcp_urg_ptr = 0
tcp_offset_res = (tcp_doff << 4) + 0
tcp_flags = tcp_fin + (tcp_syn << 1) + (tcp_rst << 2) + (tcp_psh <<3) + (tcp_ack << 4) + (tcp_urg << 5)
# the ! in the pack format string means network order
tcp_header = pack('!HHLLBBHHH' , tcp_source, tcp_dest, tcp_seq, tcp_ack_seq, tcp_offset_res, tcp_flags, tcp_window, tcp_check, tcp_urg_ptr)
#user_data = b'Hello, how are you'
user_data=b'test'
# pseudo header fields
source_address = socket.inet_aton( source_ip )
dest_address = socket.inet_aton(dest_ip)
placeholder = 0
protocol = socket.IPPROTO_TCP
tcp_length = len(tcp_header) + len(user_data)
# packing TCP header
psh = pack('!4s4sBBH' , source_address , dest_address , placeholder , protocol , tcp_length);
psh = psh + tcp_header
# Getting check sum for the packet
tcp_check = checksum(psh)
# make the tcp header again and fill the correct checksum - remember checksum is NOT in network byte order
try:
tcp_header = pack('!HHLLBBH' , tcp_source, tcp_dest, tcp_seq, tcp_ack_seq, tcp_offset_res, tcp_flags, tcp_window) + pack('H' , tcp_check) + pack('!H' , tcp_urg_ptr)
except:
return
# final full packet - syn packets dont have any data
# Getting the full TCP packet and returning to scan for ports
packet = ip_header + tcp_header
return packet | mhuzaifi0604/Port-Knocker | Asembler.py | Asembler.py | py | 4,508 | python | en | code | 14 | github-code | 90 |
33832067881 | """
SlackOutput is a class that implements the BaseOutputProvider interface for Slack messages.
"""
import dataclasses
import os
import pydantic
import requests
from keep.contextmanager.contextmanager import ContextManager
from keep.exceptions.provider_exception import ProviderException
from keep.providers.base.base_provider import BaseProvider
from keep.providers.models.provider_config import ProviderConfig
@pydantic.dataclasses.dataclass
class SlackProviderAuthConfig:
"""Slack authentication configuration."""
webhook_url: str = dataclasses.field(
metadata={
"required": True,
"description": "Slack Webhook Url",
"sensitive": True,
},
default="",
)
access_token: str = dataclasses.field(
metadata={
"required": False,
"sensitive": True,
"hidden": True,
},
default="",
)
class SlackProvider(BaseProvider):
OAUTH2_URL = os.environ.get("SLACK_OAUTH2_URL")
SLACK_CLIENT_ID = os.environ.get("SLACK_CLIENT_ID")
SLACK_CLIENT_SECRET = os.environ.get("SLACK_CLIENT_SECRET")
SLACK_API = "https://slack.com/api"
def __init__(
self, context_manager: ContextManager, provider_id: str, config: ProviderConfig
):
super().__init__(context_manager, provider_id, config)
def validate_config(self):
self.authentication_config = SlackProviderAuthConfig(
**self.config.authentication
)
if (
not self.authentication_config.webhook_url
and not self.authentication_config.access_token
):
raise Exception("Slack webhook url OR Slack access token is required")
def dispose(self):
"""
No need to dispose of anything, so just do nothing.
"""
pass
@staticmethod
def oauth2_logic(**payload):
"""
Logic for handling oauth2 callback.
Args:
payload (dict): The payload from the oauth2 callback.
Returns:
dict: The provider configuration.
"""
code = payload.get("code")
if not code:
raise Exception("No code provided")
exchange_request_payload = {
**payload,
"client_id": SlackProvider.SLACK_CLIENT_ID,
"client_secret": SlackProvider.SLACK_CLIENT_SECRET,
}
response = requests.post(
f"{SlackProvider.SLACK_API}/oauth.v2.access",
data=exchange_request_payload,
)
response_json = response.json()
if not response.ok or not response_json.get("ok"):
raise Exception(
response_json.get("error"),
)
return {"access_token": response_json.get("access_token")}
def notify(self, message="", blocks=[], channel="", **kwargs: dict):
"""
Notify alert message to Slack using the Slack Incoming Webhook API
https://api.slack.com/messaging/webhooks
Args:
kwargs (dict): The providers with context
"""
self.logger.debug("Notifying alert message to Slack")
if not message:
message = blocks[0].get("text")
if self.authentication_config.webhook_url:
self.logger.debug("Notifying alert message to Slack using webhook url")
response = requests.post(
self.authentication_config.webhook_url,
json={"text": message, "blocks": blocks},
)
if not response.ok:
raise ProviderException(
f"{self.__class__.__name__} failed to notify alert message to Slack: {response.text}"
)
elif self.authentication_config.access_token:
self.logger.debug("Notifying alert message to Slack using access token")
if not channel:
raise ProviderException("Channel is required (E.g. C12345)")
payload = {
"channel": channel,
"text": message,
"blocks": blocks,
"token": self.authentication_config.access_token,
}
response = requests.post(
f"{SlackProvider.SLACK_API}/chat.postMessage", data=payload
)
response_json = response.json()
if not response.ok or not response_json.get("ok"):
raise ProviderException(
f"Failed to notify alert message to Slack: {response_json.get('error')}"
)
self.logger.debug("Alert message notified to Slack")
if __name__ == "__main__":
# Output debug messages
import logging
logging.basicConfig(level=logging.DEBUG, handlers=[logging.StreamHandler()])
context_manager = ContextManager(
tenant_id="singletenant",
workflow_id="test",
)
# Load environment variables
import os
slack_webhook_url = os.environ.get("SLACK_WEBHOOK_URL")
# Initalize the provider and provider config
config = ProviderConfig(
id="slack-test",
description="Slack Output Provider",
authentication={"webhook_url": slack_webhook_url},
)
provider = SlackProvider(context_manager, provider_id="slack", config=config)
provider.notify(message="Simple alert showing context with name: John Doe")
| keephq/keep | keep/providers/slack_provider/slack_provider.py | slack_provider.py | py | 5,359 | python | en | code | 2,348 | github-code | 90 |
40106952791 | #!/user/bin env python
#-*- coding:utf8 -*-
import Pecker
import numpy as np
from math import sqrt,hypot
Slice = 10. #每筆畫精細度設定為10mm
if __name__ == '__main__':
board_len = float(raw_input('Length of Board: '))
init = np.array(map(float,raw_input('Input Init Pos: ').split()))
tmp = np.array([0.,0.])
init_pos = Pecker.PosCaculator(init,tmp,board_len)
# print init_pos
while True:
pos = np.array(map(float,raw_input('Input Position:').split()))
if len(pos)<1: break #不輸入值直接Enter即可結束程式
Pecker.SliceMove(tmp,pos,board_len,init,init_pos,None,Slice,False)
tmp = pos
raw_input('Finished! Press <Enter> to terminate the program.') | jwc911037/PeckerWriter | Dump/old/PosGoExample.py | PosGoExample.py | py | 736 | python | en | code | 0 | github-code | 90 |
18454785023 | from datetime import datetime, timedelta
class MemoizationError(Exception):
'''Raised when memoization process fails.'''
pass
class memoize:
'''Memorizes various results of the function provided dependent on
passed parameters.
Methods:
memoized(self, *resolver)
Properties:
func, default_key, timeout
Read-only: summorial_memory
'''
# parameters according to the example given
def __init__(self, func, *resolver, timeout = 5000):
# the current default failure handling "int object not callable"
# does not deliver the correct error information. That is why
# substitute information is added.
if not func or not callable(func):
raise MemoizationError("Failed to initialize a memoize instance.\n"
"No function provided.")
if not resolver:
raise MemoizationError("Failed to initialize a memoize instance.\n"
"No resolver provided.")
# for caching a dictionary is used. Lists
# cannot be used because if the sum of arguments
# was used as key and if there was a function that
# returned enormous values the lists would become
# astronomicly large.
# The following is the format of the stored data:
# {(key): [(calculation date), (result)], (key):
# [(calculation date), (result)], ...}.
self.__summorial_memory = {}
self.default_key = 0
self.func = func
self.timeout = timeout
self.default_key = sum(list(resolver))
self.__summorial_memory = {self.default_key: [datetime.now(), self.func(*resolver)]}
def memoized(self, *resolver):
if not resolver:
return self.__summorial_memory[self.default_key][1]
resolver_sum = sum(list(resolver))
dt = datetime.now()
if resolver_sum in self.__summorial_memory: #what happens to timeout values? Are they deleted?
duration = dt - self.__summorial_memory[resolver_sum][0]
if duration < timedelta(milliseconds=self.timeout):
return self.__summorial_memory[resolver_sum][1]
else:
self.__summorial_memory[resolver_sum][1] = self.func(*resolver)
self.__summorial_memory[resolver_sum][0] = dt
return self.__summorial_memory[resolver_sum][1]
else:
self.__summorial_memory.update({resolver_sum:[dt, self.func(*resolver)]})
return self.__summorial_memory[resolver_sum][1]
# Read-only field accessors
def summorialmemory(self):
'''dictionary with all values and their definition times'''
return self.__summorial_memory
| kamzyd/5hzS-Gj3s-L9Is-2FR4 | memoization/memoize.py | memoize.py | py | 2,790 | python | en | code | 0 | github-code | 90 |
6656267484 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import runpy
import shutil
import sys
from pathlib import Path
import pytest
@pytest.fixture
def dicom_file():
pydicom = pytest.importorskip("pydicom")
return pydicom.data.get_testdata_file("CT_small.dcm")
@pytest.fixture
def dicom_folder(dicom_file, tmp_path):
parent = Path(tmp_path)
parent.mkdir(exist_ok=True)
file_list = []
for i in range(3):
child = Path(dicom_file).with_suffix("").stem
child = Path(parent, f"{child}_{i}").with_suffix(".dcm")
shutil.copy(dicom_file, child)
file_list.append(child)
return file_list
def test_dicom_types(dicom_folder, capsys, tmp_path):
sys.argv = [
sys.argv[0],
str(tmp_path),
]
runpy.run_module("dicom_utils.cli.dicom_types", run_name="__main__", alter_sys=True)
captured = capsys.readouterr()
assert "1 - None - ORIGINAL|PRIMARY|AXIAL" in captured.out
assert len(captured.out.split("\n")) == 2
| medcognetics/dicom-utils | tests/test_main/test_dicom_types.py | test_dicom_types.py | py | 991 | python | en | code | 0 | github-code | 90 |
38304771230 |
# given a string, compute recursively a new string where all the lowercase 'x' chars have been moved to the end of the string
def end_x(str):
if not len(str):
return ''
if str[0] != 'x':
return str[0] + end_x(str[1:])
else:
return end_x(str[1:]) + 'x'
print(end_x('xxre'))
print(end_x('xxhixx'))
print(end_x('xhixhix'))
| jemtca/CodingBat | Python/Recursion-1/end_x.py | end_x.py | py | 331 | python | en | code | 0 | github-code | 90 |
44939285088 | val_1 = float(input('Wpisz pierwszą dowolną liczbę: '))
val_2 = float(input('Wpisz drugą dowolną liczbę: '))
suma = val_1 + val_2
roznica = val_1 - val_2
iloraz = val_1 / val_2
iloczyn = val_1 * val_2
print(f'Suma to: {suma}.')
print(f'Różnica to: {roznica}.')
print(f'Iloraz to: {iloraz}.')
print(f'Iloczyn to: {iloczyn}.')
# float // float -> float
# 12.0 // 5.0 -> 2.0
# float / float -> float
# 12.0 / 5.0 -> 2.4
| Korki-Pola/korki-pola | 2.-programowanie/zadania/04-python-keywords-cd/zadanie-2.py | zadanie-2.py | py | 433 | python | pl | code | 0 | github-code | 90 |
42039542940 | """
There are a total of n courses you have to take labelled from 0 to n - 1.
Some courses may have prerequisites, for example, if prerequisites[i] = [ai, bi] this means you must take the course bi before the course ai.
Given the total number of courses numCourses and a list of the prerequisite pairs, return the ordering of courses you should take to finish all courses.
If there are many valid answers, return any of them. If it is impossible to finish all courses, return an empty array.
Example 1:
Input: numCourses = 2, prerequisites = [[1,0]]
Output: [0,1]
Explanation: There are a total of 2 courses to take. To take course 1 you should have finished course 0. So the correct course order is [0,1].
Example 2:
Input: numCourses = 4, prerequisites = [[1,0],[2,0],[3,1],[3,2]]
Output: [0,2,1,3]
Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3].
Example 3:
Input: numCourses = 1, prerequisites = []
Output: [0]
https://leetcode.com/problems/course-schedule-ii/
"""
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
"""
same as 207
BFS approch and reverse at the end to get the topological order
> create graph to have the index list with the prerequisites items in it.
> create indegree to have the indexes having the indegree details.
> create stack with all the indgree as 0 in it.
"""
graph = [[] for _ in range(numCourses)]
indegree = [0 for _ in range(numCourses)]
# make graph relations
for i, j in prerequisites:
graph[i].append(j)
indegree[j] += 1
stack = []
# for i in range(numCourses):
# if indegree[i]==0: stack.append(i)
stack = [i for i in range(numCourses) if not indegree[i]]
for i in stack:
for j in graph[i]:
indegree[j] -= 1
if indegree[j] == 0: stack.append(j)
return stack[::-1] if len(stack) == numCourses else []
| nilay-gpt/LeetCode-Solutions | graphs/topological_sort/course_scheduleII.py | course_scheduleII.py | py | 2,270 | python | en | code | 2 | github-code | 90 |
11359649095 | def adding_dict(student_name): # Ф-ция заполнения данных
dic = made_dic(reading_data(student_name)) # Ф-ция создаёт из строки вида "Kat: ; M: 1 5 3 5 4; R: 5 4 5" список словарей
print(reading_data(student_name))
# print(dic)
hold = '0'
subj_tamp = ' ' + input('Ведите название предмета \n: ') # Добавили пробел для читаемости
for i in range(len(dic)):
if subj_tamp in dic[i]: # В цикле если предмет есть в списке в словаре какого-либо эл-та
print(subj_tamp, dic[i][subj_tamp]) # Выводим предмет и строку оценок
while hold !='':
hold = input('Ведите оценку от 1 до 5: Если все отметки проставлены нажмите ENTER\n: ')
if hold == '1' or hold == '2' or hold == '3' or hold == '4' or hold == '5':
dic[i][subj_tamp] += ' ' + hold # Выставляем оценки, если это верные цифры
print(subj_tamp, dic[i][subj_tamp])
write_csv(student_name, dic) # Запуск ф-ции записи
else: # Если предмета нет, добавляем словарь в список
while hold != '':
hold2 = input('Сохраните нговые данные, нажмите любую букву, ENTER и запустите программу\nДля отмены нажмите только ENTER\n')
if hold2.isalpha() == True:
dic.append(dict({subj_tamp: ''}))
write_csv(student_name, dic) # Запуск ф-ции записи
hold =''
print(reading_data(student_name)) # Выводим ученика, предмет и строку оценок
return dic
def write_csv(student_name, dic_lst): # Ф-ция добавления данных по предмету в файл csv
file = ''
student_name += '.csv' # Запись имени файла с расширением
for i in dic_lst:
for k, v in i.items(): # Для каждого эл-та списка выодим ключ и значение
file += str(k) + ':' # Дописываем разделители для последующего открытия
file += str(v) + ';'
file = file[:-1] # Убираем лишнее
with open(student_name, 'w+', encoding='utf-8') as data: data.write(file) # Запись строки в файл
def reading_data(student_name): # Ф-ция чтения из файла
student_name += '.csv' # Запись имени файла с расширением
with open(student_name, 'r', encoding='utf-8') as data:
dic = data.read() # Чтение из файла
return dic
def made_dic(file_srt): # Ф-ция создаёт из строки вида "kot: ; M: 1 5 3 5 4; R: 5 4 5" список словарей
dic_lst = []
lst = list(file_srt.split(';')) # Делим строку на список
for i in lst: # Для каждого эл-та списка словарь первый эл-т - ключ второй - значение
i = i.split(':')
dic_lst.append({i[0]: i[1]})
return dic_lst
def create_file_csv(student_name): # Ф-ция создания файла
student_name += '.csv'
with open(student_name, 'w+', encoding='utf-8') as data:
data.write('\n')
#student_name = 'Kat' # Kat: ; M: 1 5 3 5 4; R: 5 4 5
#adding_dict(student_name)
| SB44444/PythonSeminar_8_tasks_1 | tamp_read.py | tamp_read.py | py | 3,870 | python | ru | code | 0 | github-code | 90 |
35716412801 | from dataclasses import dataclass
import pytest
from secfsdstools.a_utils.dbutils import DB, DBStateAcessor
sql_create = """
CREATE TABLE IF NOT EXISTS testtable1
(
col1,
col2
)
"""
sql_create_status = """
CREATE TABLE IF NOT EXISTS status
(
keyName,
value,
PRIMARY KEY (keyName)
);
"""
@pytest.fixture
def db(tmp_path) -> DB:
return DB(db_dir=str(tmp_path))
@pytest.fixture
def dbstatus(tmp_path) -> DBStateAcessor:
db = DBStateAcessor(db_dir=str(tmp_path))
with db.get_connection() as conn:
db.execute_single(sql_create_status, conn)
return db
@dataclass
class DataRow:
col1: str
col2: str
def test_db_file_exists_no():
assert DB(db_dir='blabli').db_file_exists() is False
def test_db_file_exists_yes(db: DB):
with db.get_connection() as conn:
db.execute_single(sql_create, conn)
assert db.db_file_exists() is True
def test_table_exists(db: DB):
assert db.table_exists('testtable1') is False
with db.get_connection() as conn:
db.execute_single(sql_create, conn)
assert db.table_exists('testtable1') is True
assert db.table_exists('bla') is False
def test_db(db: DB):
# create simple table
with db.get_connection() as conn:
db.execute_single(sql_create, conn)
# read the content
result = db.execute_fetchall("SELECT * FROM testtable1")
assert len(result) == 0
# insert entries with execute_many
sql = "INSERT INTO testtable1 ('col1', 'col2') VALUES (?, ?)"
with db.get_connection() as conn:
db.execute_many(sql, [('row1-1', 'row1-2'), ('row2-1', 'row2-2')], conn)
# read the content without typing
result = db.execute_fetchall("SELECT * FROM testtable1")
assert len(result) == 2
# read directly into dataframe
result_df = db.execute_read_as_df("SELECT * FROM testtable1")
assert result_df.shape == (2, 2)
# read directly as type
result_type = db.execute_fetchall_typed(sql="SELECT * FROM testtable1", T=DataRow)
assert len(result_type) == 2
assert result_type[0].col1 == 'row1-1'
assert result_type[0].col2 == 'row1-2'
print('success')
def test_insert_dataclass(db: DB):
@dataclass
class Row:
col1: str
col2: int
# create simple table
with db.get_connection() as conn:
db.execute_single(sql_create, conn)
data = Row(col1='col1', col2=123)
insert_sql = db.create_insert_statement_for_dataclass(table_name='testtable1', data=data)
assert insert_sql == "INSERT INTO testtable1 ('col1', 'col2') VALUES ('col1', 123)"
# --- Testing DBStateAccessor
def test_insert_and_overwrite(dbstatus: DBStateAcessor):
key = 'key1'
# read none existing key
assert dbstatus.get_key(key=key) is None
# insert new key
dbstatus.set_key(key=key, value='value1')
# read key
assert dbstatus.get_key(key=key) == 'value1'
# overwrite key
dbstatus.set_key(key=key, value='value2')
# read key
assert dbstatus.get_key(key=key) == 'value2'
| HansjoergW/sec-fincancial-statement-data-set | tests/a_utils/test_dbutils.py | test_dbutils.py | py | 3,103 | python | en | code | 12 | github-code | 90 |
4264271063 | notas = (2, 4, 6, 8)
def contenido(lista, indice):
try:
resultado = lista[indice]
except:
resultado = None
return resultado
# Calcular la media
indice = 0
suma = 0
while contenido(notas, indice) != None:
suma = suma + notas[indice]
indice = indice + 1
media = suma / indice
# Presentar la media
print("Numero de items: ", indice)
print("Nota Total.....: ", suma)
print("Nota Media.....: ", media)
| acelerarepos/pensamiento-computacional | media05.py | media05.py | py | 450 | python | pt | code | 0 | github-code | 90 |
35071334741 | from datetime import date
import time
from openerp.osv import orm, fields
from openerp.osv import fields, osv
from openerp.tools.translate import _
class payment_order_create(osv.osv_memory):
_inherit = 'payment.order.create'
def create_payment(self, cr, uid, ids, context=None):
flag = 0
partner_pool = self.pool.get('res.partner')
payment_pool = self.pool.get('payment.order.create').browse(cr, uid, ids, context=context)[0]
if not payment_pool.entries:
flag = 1
for payment in payment_pool.entries:
part_obj = partner_pool.browse(cr, uid, payment.partner_id.id, context=context)
for bank in part_obj.bank_ids:
if bank.eft_check == True:
flag = 1
if flag == 0:
raise osv.except_osv(_('Warning!'), _('Select a bank where EFT is True for the partner!'))
res=super(payment_order_create, self).create_payment( cr, uid, ids, context=context)
return res
payment_order_create()
class payment_line(osv.osv):
_inherit = 'payment.line'
def _get_total(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
currency_obj = self.pool.get('res.currency')
move_obj=self.pool.get('account.move.line')
inv_obj=self.pool.get('account.invoice')
if context is None:
context = {}
res = {}
# Added Code in the For Loop.....
# Connected to Account account.move.line table with the help of move id retrieved the name of reference.
# Connected to Account Invoice account.invoice with the help of reference and retrieved the residual (balance).
for line in self.browse(cursor, user, ids, context=context):
ctx = context.copy()
ctx['date'] = line.order_id.date_done or time.strftime('%Y-%m-%d')
currency_output= currency_obj.compute(cursor, user, line.currency.id,
line.company_currency.id,
line.move_line_id.stored_invoice_id.residual, context=ctx)
res[line.id] = currency_output
return res
_columns={
'total': fields.function(_get_total, string='Total Amount',
type='float', store=True,
help='Balance Amount for Particular Invoice'),
}
class payment_order(osv.osv):
_inherit="payment.order"
_columns={
'company_filter':fields.boolean('Group Companies'),
'fixed_message':fields.char('EFT Message',size=8,required=1),
}
class eft_export(orm.Model):
'''EFT Export'''
_name = 'banking.export.eft'
_description = __doc__
_rec_name = 'identification'
_columns = {
'payment_order_ids': fields.many2many(
'payment.order',
'account_payment_order_clieop_rel',
'banking_export_clieop_id', 'account_order_id',
'Payment Orders',
readonly=True),
'testcode':
fields.selection([('T', _('Yes')), ('P', _('No'))],
'Test Run', readonly=True),
'daynumber':
fields.integer('ClieOp Transaction nr of the Day', readonly=True),
'duplicates':
fields.integer('Number of Duplicates', readonly=True),
'prefered_date':
fields.date('Prefered Processing Date',readonly=True),
'no_transactions':
fields.integer('Number of Transactions', readonly=True),
'check_no_accounts':
fields.char('Check Number Accounts', size=5, readonly=True),
'total_amount':
fields.float('Total Amount', readonly=True),
'identification':
fields.char('Identification', size=6, readonly=True, select=True),
'filetype':
fields.selection([
('CREDBET', 'Payment Batch'),
('SALARIS', 'Salary Payment Batch'),
('INCASSO', 'Direct Debit Batch'),
], 'File Type', size=7, readonly=True, select=True),
'date_generated':
fields.date('Generation Date', readonly=True, select=True),
'file':
fields.binary('ClieOp File', readonly=True,),
'filename': fields.char(
'File Name', size=32,
),
'state':
fields.selection([
('draft', 'Draft'),
('sent', 'Sent'),
('done', 'Reconciled'),
], 'State', readonly=True),
}
def get_daynr(self, cr, uid, context=None):
'''
Return highest day number
'''
last = 1
last_ids = self.search(cr, uid, [
('date_generated', '=',
fields.date.context_today(self, cr,uid,context))
], context=context)
if last_ids:
last = 1 + max([x['daynumber'] for x in self.read(
cr, uid, last_ids, ['daynumber'],
context=context)])
return last
_defaults = {
'date_generated': fields.date.context_today,
'duplicates': 1,
'state': 'draft',
'daynumber': get_daynr,
}
| excedogit/GameFarm | gff_account_eft_export/eft_export.py | eft_export.py | py | 5,227 | python | en | code | 0 | github-code | 90 |
30628245598 | ## BUDGET ANALYSIS (v2)
import sys
import time
import random
stored = {}
total_list = [0]
budget_store = [0]
# -------------------- PROGRESS BAR CODE ------------------------
def updt(total, progress):
barLength, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(barLength * progress))
text = "\r[{}] {:.0f}% {}".format(
"■" * block + "-" * (barLength - block), round(progress * 100, 0),
status)
sys.stdout.write(text)
sys.stdout.flush()
random_num = random.randint(10,60)
runs = random_num # -------------------- TIME SELECT ---------------------------
# -------------------- CHART ------------------------
def chart(stored):
print('ITEM \t\t COST')
print('-'*25)
for item in stored:
print(item, '\t\t',stored[item])
# -------------------- ITEM ADD ------------------------
def item_add(item_input, cost_input):
if item_input.isalpha() == True and cost_input >= 0:
stored[item_input] = cost_input
chart(stored)
print('_'*75)
user_input = input("Do you want to continue entering budget items? Enter yes or no: ").lower()
print(user_input)
print('_'*75)
loop(user_input)
else:
print("ERROR1")
# -------------------- CONTINUE ENTER CHECK ------------------------
def loop(user_input):
while user_input == 'yes' or user_input == 'y':
print('_'*75)
item_input, cost_input = input("Please enter the item to add to your budget and the cost of that item: ").split()
print('_'*75)
item_input = str(item_input).lower()
cost_input = float(cost_input)
#chart(stored)
item_add(item_input, cost_input)
if user_input == 'no' or user_input == 'n':
print('_'*75)
print("Program ENDED")
print('_'*75)
chart(stored)
budget_check()
else:
print('_'*75)
print("You did not select Yes or No. Program ENDED")
print('_'*75)
chart(stored)
sys.exit()
# -------------------- BUDGET ENTER ------------------------
def budget(budget_store):
print('_'*75)
budget_input = input("Please enter your budget for this month: ")
print("You entered: ","$",budget_input)
print('_'*75)
# ---------------------- INTEGER CHECK -----------------------
try:
val = float(budget_input)
budget_store.append(float(budget_input))
print('_'*75)
user_input = input("Do you want to continue entering budget items? Enter yes or no: ").lower()
print("You entered: ",user_input)
print('_'*75)
loop(user_input)
except ValueError:
print("You did no enter an integer. Program ENDED")
sys.exit()
# -------------------- BUDGET CHECK ------------------------
def budget_check():
for val in stored.values():
total_list.append(int(val))
for val in stored.values():
total = sum(total_list)
total = int(total)
print('-'*25)
print("Calculating your total")
print('-'*25)
# ---------------------- PROGRESS BAR -----------------------
for run_num in range(runs):
time.sleep(.1)
updt(runs, run_num + 1)
print('-'*25)
print("Total: ",'\t', total)
print('-'*25)
break
# ---------------------- ----------- -----------------------
budget_store.remove(int(0))
if budget_store[0] < total:
difference = total - budget_store[0]
print("Over budget by: ", difference)
print('-'*25)
sys.exit()
elif budget_store[0] > total:
difference = budget_store[0] - total
print("Under budget by: ", difference)
print('-'*25)
sys.exit()
else:
print("ERROR3")
budget(budget_store)
| edunzer/MIS285_PYTHON_PROGRAMMING | RANDOM/4.3_hard_with_progress_bar.py | 4.3_hard_with_progress_bar.py | py | 3,914 | python | en | code | 0 | github-code | 90 |
40491794953 | #!/usr/bin/env python3
import os
import sys
import glob
import subprocess
import multiprocessing
import shlex
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--basic', action='store_true', help='Test only basic functionality')
parser.add_argument('--verbose', action='store_true', help='Print all commands tested')
parser.add_argument('--serial', action='store_true', help='Run only one command at a time')
parser.add_argument('--synth', action='store_true', help='Test Yosys synthesis (slower)')
parser.add_argument('--coverage', action='store_true', help='Run Metron under kcov for coverage testing')
options = parser.parse_args()
################################################################################
def main():
print()
print_b(" ### ### ####### ######## ###### ###### ### ## ")
print_b(" #### #### ## ## ## ## ## ## #### ## ")
print_b(" ## #### ## ##### ## ###### ## ## ## ## ## ")
print_b(" ## ## ## ## ## ## ## ## ## ## ## ## ")
print_b(" ## ## ####### ## ## ## ###### ## #### ")
############################################################
print()
print_b("Refreshing build")
os.system("./build.py")
if options.basic:
if os.system("ninja bin/metron"):
print("Build failed!")
return -1
else:
if os.system("ninja"):
print("Build failed!")
return -1
if options.coverage:
print("Wiping old coverage run")
os.system("rm -rf coverage")
############################################################
errors = 0
print_b("Wiping tests/metron_sv/*")
os.system("rm tests/metron_sv/*")
print_b("Checking that examples convert to SV cleanly")
errors += check_commands_good([
f"bin/metron -c examples/uart/metron/uart_top.h",
f"bin/metron -c examples/rvsimple/metron/toplevel.h",
f"bin/metron -c examples/pong/metron/pong.h",
])
print()
metron_good = sorted(glob.glob("tests/metron_good/*.h"))
metron_bad = sorted(glob.glob("tests/metron_bad/*.h"))
print_b("Checking that all headers in tests/metron_good compile")
errors += check_commands_good([
f"g++ -I. --std=gnu++2a -fsyntax-only -c {filename}"
for filename in metron_good
])
print()
print_b("Checking that all headers in tests/metron_bad compile")
errors += check_commands_good([
f"g++ -I. --std=gnu++2a -fsyntax-only -c {filename}"
for filename in metron_bad
])
print()
print_b("Checking that all test cases in metron_good convert to SV cleanly")
errors += check_commands_good([
f"bin/metron -c {filename} -o {filename.replace('_good', '_sv').replace('.h', '.sv')}"
for filename in metron_good
])
print()
print_b("Checking that all test cases in metron_bad fail conversion")
errors += check_commands_bad([
f"bin/metron -c {filename} -o {filename.replace('_bad', '_sv').replace('.h', '.sv')}"
for filename in metron_bad
])
print()
metron_sv = sorted(glob.glob("tests/metron_sv/*.sv"))
print_b("Checking that all converted files match their golden version, if present")
errors += check_commands_good([
f"diff {filename} {filename.replace('_sv', '_golden')}"
for filename in metron_sv
])
print()
################################################################################
# These tests are skipped in basic mode
if not options.basic:
print_b("Checking that all converted files can be parsed by Verilator")
errors += check_commands_good([
f"verilator -I. --lint-only {filename}"
for filename in metron_sv
])
print()
if options.synth:
print_b("Checking that all converted files can be synthesized by Yosys")
errors += check_commands_good([
f"yosys -q -p 'read_verilog -I. -sv {filename}; dump; synth_ice40 -json /dev/null'"
for filename in metron_sv
])
else:
print_b("Checking that all converted files can be parsed by Yosys")
errors += check_commands_good([
f"yosys -q -p 'read_verilog -I. -sv {filename};'"
for filename in metron_sv
])
print()
print_b("Checking that all converted files can be parsed by Icarus")
errors += check_commands_good([
f"iverilog -g2012 -Wall -I. -o /dev/null {filename}"
for filename in metron_sv
])
print()
print_b("Running misc bad commands")
errors += check_commands_bad([
f"bin/metron skjdlsfjkhdfsjhdf.h",
f"bin/metron -c skjdlsfjkhdfsjhdf.h",
])
print()
print_b("Running standalone tests")
errors += check_commands_good([
"bin/tests/metron_test",
"bin/examples/uart",
"bin/examples/uart_vl",
"bin/examples/uart_iv",
"bin/examples/rvsimple",
"bin/examples/rvsimple_vl",
"bin/examples/rvsimple_ref",
])
print()
# Lockstep tests are slow because compiler...
print_b("Testing lockstep simulations")
errors += test_lockstep()
print()
# Various tests to isolate quirks/bugs in Verilator/Yosys/Icarus
verilator_good = sorted(glob.glob("tests/tools_good/verilator*.sv"))
verilator_bad = sorted(glob.glob("tests/tools_bad/verilator*.sv"))
yosys_good = sorted(glob.glob("tests/tools_good/yosys*.sv"))
yosys_bad = sorted(glob.glob("tests/tools_bad/yosys*.sv"))
icarus_good = sorted(glob.glob("tests/tools_good/icarus*.sv"))
icarus_bad = sorted(glob.glob("tests/tools_bad/icarus*.sv"))
print_b("Checking Verilator quirks")
errors += check_verilator_good(verilator_good)
errors += check_verilator_bad (verilator_bad)
errors += check_verilator_good(yosys_good)
errors += check_verilator_good(yosys_bad)
errors += check_verilator_good(icarus_good)
errors += check_verilator_good(icarus_bad)
print()
print_b("Checking Yosys quirks")
errors += check_yosys_good(verilator_good)
errors += check_yosys_good(verilator_bad)
errors += check_yosys_good(yosys_good)
errors += check_yosys_bad (yosys_bad)
errors += check_yosys_good(icarus_good)
errors += check_yosys_good(icarus_bad)
print()
print_b("Checking Icarus quirks")
errors += check_icarus_good(verilator_good)
errors += check_icarus_good(verilator_bad)
errors += check_icarus_good(yosys_good)
errors += check_icarus_good(yosys_bad)
errors += check_icarus_good(icarus_good)
errors += check_icarus_bad (icarus_bad)
print()
############################################################
print()
print_b(f"Total failures : {errors}")
print()
if errors > 0:
print_r(" ####### ##### ## ## ")
print_r(" ## ## ## ## ## ")
print_r(" ##### ####### ## ## ")
print_r(" ## ## ## ## ## ")
print_r(" ## ## ## ## ####### ")
else:
print_g(" ###### ##### ####### ####### ")
print_g(" ## ## ## ## ## ## ")
print_g(" ###### ####### ####### ####### ")
print_g(" ## ## ## ## ## ")
print_g(" ## ## ## ####### ####### ")
return errors
################################################################################
def get_pool():
max_threads = multiprocessing.cpu_count()
if options.coverage:
max_threads = 1
if options.serial:
max_threads = 1
return multiprocessing.Pool(max_threads)
def print_c(color, *args):
sys.stdout.write(
f"\u001b[38;2;{(color >> 16) & 0xFF};{(color >> 8) & 0xFF};{(color >> 0) & 0xFF}m"
)
print(*args)
sys.stdout.write("\u001b[0m")
sys.stdout.flush()
def print_r(*args):
print_c(0xFF8080, *args)
def print_g(*args):
print_c(0x80FF80, *args)
def print_b(*args):
print_c(0x8080FF, *args)
################################################################################
def prep_cmd(cmd):
cmd = cmd.strip()
kcov_prefix = "kcov --exclude-region=KCOV_OFF:KCOV_ON --include-pattern=Metron --exclude-pattern=submodules --exclude-line=debugbreak coverage"
if options.coverage and cmd.startswith("bin/metron "):
cmd = kcov_prefix + " " + cmd
args = [arg for arg in shlex.split(cmd) if len(arg)]
return args
def check_cmd_good(cmd):
if options.verbose:
print(cmd)
else:
print(".", end="")
sys.stdout.flush()
prepped = prep_cmd(cmd)
cmd_result = subprocess.run(
prepped, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="charmap"
)
if cmd_result.returncode:
print()
print_r(f"Command failed: {cmd}")
return 1
return 0
def check_cmd_bad(cmd, expected_outputs=[], expected_errors=[]):
if options.verbose:
print(cmd)
else:
print(".", end="")
sys.stdout.flush()
cmd_result = subprocess.run(
prep_cmd(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="charmap",
)
if cmd_result.returncode == 0:
print()
print_r(f"Command passed: {cmd}")
return 1
elif cmd_result.returncode == 34048:
print()
print_r(f"Command threw an exception: {cmd}")
return 1
else:
for text in expected_outputs:
if not text in cmd_result.stout:
print()
print_r(f"Command {cmd}")
print_r(f'Did not produce expected output "{text}"')
return 1
for err in expected_errors:
if not err in cmd_result.stout:
print()
print_r(f"Command {cmd}")
print_r(f'Did not produce expected error "{err}"')
return 1
return 0
def check_commands_good(commands):
result = sum(get_pool().map(check_cmd_good, commands))
return result
def check_commands_bad(commands):
result = sum(get_pool().map(check_cmd_bad, commands))
return result
###############################################################################
def check_verilator_good(filenames):
return check_commands_good([
f"verilator -I. --lint-only {filename}"
for filename in filenames
])
def check_verilator_bad(filenames):
return check_commands_good([
f"verilator -I. --lint-only {filename}"
for filename in filenames
])
def check_yosys_good(filenames):
return check_commands_good([
f"yosys -q -p 'read_verilog -I. -sv {filename}; dump; synth_ice40 -json /dev/null'"
for filename in filenames
])
def check_yosys_bad(filenames):
return check_commands_bad([
f"yosys -q -p 'read_verilog -I. -sv {filename}; dump; synth_ice40 -json /dev/null'"
for filename in filenames
])
def check_icarus_good(filenames):
return check_commands_good([
f"iverilog -g2012 -Wall -I. -o /dev/null {filename}"
for filename in filenames
])
def check_icarus_bad(filenames):
return check_commands_bad([
f"iverilog -g2012 -Wall -I. -o /dev/null {filename}"
for filename in filenames
])
###############################################################################
# Make sure all lines starting with "// X " in the source text appear in the
# output
def check_bad_expected_errors(filename):
lines = open(filename).readlines()
expected_errors = [line[4:].strip() for line in lines if line.startswith("// X ")]
cmd = f"bin/metron -c {filename} -o {filename.replace('_good', '_sv').replace('.h', '.sv')}"
return check_cmd_bad(cmd, expected_errors, [])
################################################################################
def build_lockstep(filename):
test_name = filename.rstrip(".h")
# Test source is the same for all lockstep tests, we just change the
# included files.
test_src = f"tests/test_lockstep.cpp"
mt_root = f"tests/metron_lockstep"
sv_root = f"gen/{mt_root}/metron_sv"
vl_root = f"gen/{mt_root}/metron_vl"
# Our lockstep test top modules are all named "Module". Verilator will
# name the top module after the <test_name>.sv filename.
mt_top = f"Module"
vl_top = f"V{test_name}"
mt_header = f"{mt_root}/{test_name}.h"
vl_header = f"{vl_root}/V{test_name}.h"
vl_obj = f"{vl_root}/V{test_name}__ALL.o"
test_obj = f"obj/{mt_root}/{test_name}.o"
test_bin = f"bin/{mt_root}/{test_name}"
includes = f"-I. -Isymlinks -I{sv_root} -I/usr/local/share/verilator/include"
errors = 0
cmd = f"bin/metron -q -c {mt_root}/{test_name}.h -o {sv_root}/{test_name}.sv"
errors += check_cmd_good(cmd)
cmd = f"verilator {includes} --cc {test_name}.sv -Mdir {vl_root}"
errors += check_cmd_good(cmd)
cmd = f"make -C {vl_root} -f V{test_name}.mk"
errors += check_cmd_good(cmd)
cmd = f"g++ -O3 -std=gnu++2a -DMT_TOP={mt_top} -DVL_TOP={vl_top} -DMT_HEADER={mt_header} -DVL_HEADER={vl_header} {includes} -c {test_src} -o {test_obj}"
errors += check_cmd_good(cmd)
cmd = f"g++ {test_obj} {vl_obj} obj/symlinks/metrolib/core/Utils.o obj/verilated.o obj/verilated_threads.o -o {test_bin}"
errors += check_cmd_good(cmd)
return errors
################################################################################
def test_lockstep():
tests = [
"counter.h",
"lfsr.h",
"funcs_and_tasks.h",
"lockstep_bad.h",
"timeout_bad.h",
]
os.system(f"mkdir -p gen/tests/metron_lockstep")
os.system(f"mkdir -p obj/tests/metron_lockstep")
os.system(f"mkdir -p bin/tests/metron_lockstep")
# Build all the lockstep tests
errors = 0
errors += sum(get_pool().map(build_lockstep, tests))
# These lockstep tests should pass
errors += check_cmd_good("bin/tests/metron_lockstep/counter")
errors += check_cmd_good("bin/tests/metron_lockstep/lfsr")
errors += check_cmd_good("bin/tests/metron_lockstep/funcs_and_tasks")
# These two are expected to fail to test the lockstep test system
errors += check_cmd_bad("bin/tests/metron_lockstep/lockstep_bad")
errors += check_cmd_bad("bin/tests/metron_lockstep/timeout_bad")
return errors
################################################################################
if __name__ == "__main__":
sys.exit(main())
| FreddyYJ/PyVerilog | run_tests.py | run_tests.py | py | 14,822 | python | en | code | 0 | github-code | 90 |
34132815180 | def find_highest_bidder(Bidders):
max_bid = None
max_bidder = None
for Name, Bid in Bidders.items():
if max_bid == None or Bid > max_bid :
max_bid = Bid
max_bidder = Name
return Name
def main():
print("Welcome to the silent auction program.")
bidders = {}
finished = False
while not finished:
name = input("What is your name? ")
amount = int(input("What is your bid? Rs."))
bidders[name] = amount #dictionary storing name and bid values
again = input("Are there any other bidders? Type 'yes' or 'no' : ").lower().strip()
if again == 'no':
break
elif again == 'yes':
continue
highest_bidder = find_highest_bidder(bidders)
print(f"The winner is {highest_bidder} with a bid of Rs.{bidders[highest_bidder]}")
main() | ajaythumala/100-Days-of-Code | 100/day_9/silent_auction.py | silent_auction.py | py | 902 | python | en | code | 0 | github-code | 90 |
70591347818 | import ast
import discord
import config
import traceback
import datetime
import market
import os
import psutil
from discord.ext import commands, tasks, menus
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_option, create_choice
class InventoryMenu(menus.ListPageSource):
def __init__(self, data, max=25):
super().__init__(data, per_page=10)
self.max = max
self.og = data
async def format_page(self, menu, entries):
offset = menu.current_page * self.per_page
desc = ""
for i, v in enumerate(entries, start=offset):
n = config.breads[v['index']]['name']
special_string = v.get('special', None)
if special_string is not None:
special_string = f" `{special_string}`"
else:
special_string = ""
desc += f"{config.breads[v['index']]['emoji']} · **{n}**{special_string}\n"
embed = discord.Embed(
title="Bread Inventory",
color=config.MAINCOLOR,
description=desc
)
e_cost = int((self.max/config.expand_amount) * config.expand_cost)
embed.add_field(name="<:BreadStaff:815484321590804491> Storage Expansion", value=f"`pan expand`\nCost: `{e_cost}` <:BreadCoin:815842873937100800>\n*+{config.expand_amount} slots*")
embed.set_footer(text=f"Showing {menu.current_page + 1}/{menu._source.get_max_pages()} | Storage Capacity: {len(self.og)}/{self.max}")
return embed
class CustomMenuManager(menus.MenuPages):
async def send_initial_message(self, ctx, channel):
"""|coro|
The default implementation of :meth:`Menu.send_initial_message`
for the interactive pagination session.
This implementation shows the first page of the source.
"""
page = await self._source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
return await config.reply(ctx, **kwargs)
class Information(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
self.timer_loop.start()
def cog_unload(self):
self.timer_loop.cancel()
@tasks.loop(minutes=1)
async def timer_loop(self):
sent_timers = []
expired_timers = []
for timer in self.bot.mongo.db.timers.find({'time': {'$lte': datetime.datetime.utcnow()}, 'expired': False}):
expired_timers.append(timer)
user = self.bot.get_user(timer['owner'])
if user is None:
try:
user = await self.bot.fetch_user(timer['owner'])
except:
continue
if timer['message'].strip(" ") == "":
timer['message'] = "No message provided."
embed = discord.Embed(color=config.MAINCOLOR, title="Time's up!", description=timer['message'], timestamp=timer['created'])
if 'link' in timer:
embed.description += f"\n\n[message link]({timer['link']})"
embed.set_footer(text="This timer was scheduled for")
try:
await user.send(embed=embed)
sent_timers.append(timer)
except:
continue
self.bot.mongo.db.timers.update_many({'_id': {'$in': list(x['_id'] for x in expired_timers)}}, {'$set': {'expired': True}})
self.bot.mongo.db.timers.update_many({'_id': {'$in': list(x['_id'] for x in sent_timers)}}, {'$set': {'sent': True}})
async def inventory_command(self, ctx):
user = self.bot.mongo.get_user(ctx.author.id)
desc = ""
if len(user['inventory']) < 1:
desc = "You have no bread. Try managing your bakery with `pan bakery`."
embed = discord.Embed(
title="Bread Inventory",
color=config.MAINCOLOR,
description=desc
)
embed.set_footer(text=f"Showing 0/0")
await config.reply(ctx, embed=embed)
else:
pages = CustomMenuManager(source=InventoryMenu(user['inventory'], max=user.get('inventory_capacity', 25)), clear_reactions_after=True)
await pages.start(ctx)
async def remind_command(self, ctx, args):
user = self.bot.mongo.get_user(ctx.author.id)
if args is None:
await config.reply(ctx, "<:melonpan:815857424996630548> `Please tell me the timer length and note: e.g. 'pan remind 1h 2m 4s take out the bread'`")
return
splitted = args.split(" ")
time = {"h": 0, "m": 0, "s": 0, "d": 0}
message = []
for word in splitted:
if word[len(word) - 1].lower() in ['h', 'm', 's', 'd']:
try:
time[word[len(word) - 1].lower()] += int(word[:len(word) - 1])
except:
message.append(word)
else:
message.append(word)
length = datetime.timedelta(days=time['d'], hours=time['h'], minutes=time['m'], seconds=time['s'])
if length.total_seconds() < 1:
await config.reply(ctx, "<:melonpan:815857424996630548> `Please tell me the timer length and note: e.g. 'pan remind 1h 2m 4s take out the bread'`")
return
remind_time = datetime.datetime.utcnow() + length
message = " ".join(message)
if message in [" ", "", None]:
message = "something"
embed = discord.Embed(color=config.MAINCOLOR, timestamp=remind_time)
embed.set_footer(text=f"I will remind you about {message} at >")
msg = await config.reply(ctx, embed=embed)
self.bot.mongo.db.timers.insert_one({'owner': ctx.author.id, 'link': msg.jump_url, 'time': remind_time, 'created': datetime.datetime.utcnow(), 'message': message, 'id': ctx.message.id, 'sent': False, 'expired': False})
async def reminders_command(self, ctx):
timers = self.bot.mongo.db.timers.find({'owner': ctx.author.id, 'expired': False})
desc = ""
for timer in timers:
msg = timer['message']
if len(msg) > 35:
msg = msg[:32] + "..."
s = (timer['time'] - datetime.datetime.utcnow()).total_seconds()
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
desc += f" • {msg} - **{round(hours)}h {round(minutes)}m {round(seconds)}s**\n"
if desc == "":
desc = "You have no timers. Create one with `pan timer <time> <message>`\ne.g. `pan timer 120m 30s take out the sourdough bread`"
embed = discord.Embed(color=config.MAINCOLOR, title="Timers", description = desc)
await config.reply(ctx, embed=embed)
async def bal_command(self, ctx, member):
if member is None:
member = ctx.author
user = self.bot.mongo.get_user(member.id)
assets = 0
for item in user['inventory']:
r = config.breads[item['index']]
item_price = market.ItemPrice(r['price'], r['volitility'], item['index'])
today_price = round(item_price.get_price(market.get_day_of_year_active()))
assets += today_price
embed=discord.Embed(
title="Baker Balance",
description=f"**Pocket**: `{user['money']}` <:BreadCoin:815842873937100800>\n**Bread Worth**: `{assets}` <:BreadCoin:815842873937100800>\n**Total Assets**: `{user['money'] + assets}` <:BreadCoin:815842873937100800>",
color=config.MAINCOLOR
)
await config.reply(ctx, embed=embed)
async def info_command(self, ctx):
embed = discord.Embed(title="Melonpan Bot Info", color=config.MAINCOLOR, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=str(self.bot.user.avatar_url))
embed.description = f"[Github Repo](https://github.com/kajdev/melonpan)\n[Bread Server](https://discord.gg/bread)\n[Support Server](https://discord.gg/ueYyZVJGcf)\n[Patreon](https://www.patreon.com/MelonpanBot)"
u = 0
for g in self.bot.guilds:
u+=g.member_count
embed.add_field(name="Discord Stats", value=f"Guilds: `{len(self.bot.guilds)}`\nUsers: `{u}`\nAvg MSG/s: `{round(config.get_avg_messages(), 3)}`")
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info()[0]/2.**30 # memory use in GB...I think
cpuUse = py.cpu_percent()
embed.add_field(name="System Usage", value=f"Memory: `{round(memoryUse*1000, 3)} MB`\nCPU: `{round(cpuUse, 3)} %`")
embed.set_footer(text=f"discord.py v{discord.__version__}")
await ctx.send(embed=embed)
async def stats_command(self, ctx, member):
if member is None:
member = ctx.author
user = self.bot.mongo.get_user(member.id)
embed=discord.Embed(
title="Baker Info",
color=config.MAINCOLOR
)
embed.set_thumbnail(url=member.avatar_url)
if isinstance(member, discord.Member) and member.guild != 814958240009420830:
guild = self.bot.get_guild(814958240009420830)
try:
mem = await guild.fetch_member(member.id)
except:
mem = None
elif isinstance(member, discord.User):
guild = self.bot.get_guild(814958240009420830)
try:
mem = await guild.fetch_member(member.id)
except:
mem = None
else:
mem = member
if mem is not None:
if 814964592076652554 in [x.id for x in mem.roles]:
if 7 not in user['badges']:
user['badges'].append(7)
if len(user.get('badges', [])) > 0:
embed.description = " ".join(config.badges[x]['emoji'] for x in user.get('badges', []))
fav = {'name': "None", 'amount': 0}
total = 0
for x, y in user['baked'].items():
total += y
if y > fav['amount']:
fav = {'name': config.breads[int(x)]['name'], 'amount': y}
embed.add_field(name="Baking Stats", value=f"Favorite Bread: **{fav['name']}** ({fav['amount']} bakes)\nBreads Baked: **{total}**\nBreadCoin: **{user['money']}** <:BreadCoin:815842873937100800>\nOvens: **{user['oven_count']}**\nInventory Capacity: **{user.get('inventory_capacity', 25)}**")
await ctx.send(embed=embed)
@cog_ext.cog_slash(name="inventory",
description="Show your inventory.")
async def inventory_slash(self, ctx: SlashContext):
await self.inventory_command(ctx)
@cog_ext.cog_slash(name="remind",
description="Set a reminder for a specific time in the future.",
options=[
create_option(
name="time",
description="specific duration, e.g. (5h 32m 23s) (leave blank to check reminders)",
option_type=3,
required=False
),
create_option(
name="message",
description="What do you want to be reminded about?",
option_type=3,
required=False
)
])
async def remind_slash(self, ctx: SlashContext, time:str=None, message:str="something"):
if time is None:
await self.reminders_command(ctx)
else:
await self.remind_command(ctx, time+" "+message)
@cog_ext.cog_slash(name="balance",
description="Show your BreadCoin balance.",
options=[
create_option(
name="member",
description="The member to show balance for.",
option_type=6,
required=False
)
])
async def bal_slash(self, ctx: SlashContext, member=None):
await self.bal_command(ctx, member)
@cog_ext.cog_slash(name="stats",
description="Show stats for yourself or another baker.",
options=[
create_option(
name="member",
description="The member to show stats for.",
option_type=6,
required=False
)
])
async def stats_slash(self, ctx: SlashContext, member=None):
await self.stats_command(ctx, member)
@commands.command(aliases=['list', 'bread', 'all', 'breadlist', 'listbread', 'allbread'])
async def breads(self, ctx):
embed = discord.Embed(title="All Items", color=config.MAINCOLOR, description = "*use `pan shop <bread>` to get more specific price info about an item.*\n\n")
for bread in config.breads:
embed.description += f"> {bread['emoji']} **{bread['name']}**\n"
await config.reply(ctx, embed=embed)
@commands.command(aliases=['i', 'inv', 'items', 'in', 'bag'])
async def inventory(self, ctx):
await self.inventory_command(ctx)
@commands.command(aliases=['timer', 'time', 'r', 'remindme', 'rm'])
async def remind(self, ctx, *, args:str=None):
await self.remind_command(ctx, args)
@commands.command(aliases=['timers'])
async def reminders(self, ctx):
await self.reminders_command(ctx)
@commands.command(aliases=['money', 'balance', 'm', 'wallet', 'breadcoin', 'coin', 'coins'])
async def bal(self, ctx, member : discord.Member = None):
await self.bal_command(ctx, member)
@commands.command(aliases=['about'])
async def info(self, ctx):
await self.info_command(ctx)
@commands.command(aliases=['stat', 'profile', 'user'])
async def stats(self, ctx, member : discord.Member = None):
await self.stats_command(ctx, member)
def setup(bot):
bot.add_cog(Information(bot))
| KAJdev/Melonpan | Cogs/Information.py | Information.py | py | 13,758 | python | en | code | 5 | github-code | 90 |
1351899675 | from pathlib import Path
import hashlib
def get_str_md5(fh):
for st_r in fh.readlines():
md5_hex = hashlib.md5(st_r.encode()).hexdigest()
yield st_r, md5_hex
p = Path('.')
f_name = p.cwd() / 'recipes.txt'
f = open(f_name, 'r', encoding='UTF-8')
for ist_r, imd5_hex in get_str_md5(f):
print(f'{ist_r} {imd5_hex}')
| Sergey-Gorb/apyles4 | apyles4job2.py | apyles4job2.py | py | 342 | python | en | code | 0 | github-code | 90 |
18524036539 | import sys
input = sys.stdin.readline
N, M = map(int, input().split())
a = []
for _ in range(N):
x, y, z = map(int, input().split())
a.append((x, y, z))
res = -float("inf")
for k in range(8):
dp = [[-float("inf")] * (M + 1) for _ in range(N + 1)]
for i in range(N):
dp[i][0] = 0
x, y, z = a[i]
x *= (-1) ** ((k & 1 == 1))
y *= (-1) ** ((k & 2 == 2))
z *= (-1) ** ((k & 4 == 4))
#print(k, x, y, z)
for j in range(M + 1):
if dp[i][j] == -float("inf") and (j > 0): continue
dp[i + 1][j] = max(dp[i + 1][j], dp[i][j])
if j < M:
dp[i + 1][j + 1] = max(dp[i + 1][j + 1], dp[i][j] + x + y + z)
#print(dp)
res = max(res, dp[-1][-1])
print(res) | Aasthaengg/IBMdataset | Python_codes/p03326/s757622160.py | s757622160.py | py | 701 | python | en | code | 0 | github-code | 90 |
11508691231 | import argparse
from ndbc_analysis_utilities.BuoyDataUtilities import getActiveBOI, getMonthlyDF, getMonthName, getNthPercentileSampleWithoutPMF
from ndbc_analysis_utilities.NDBCBuoy import NDBCBuoy
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime
def calcNGoodDays(dates: pd.core.series.Series) -> int:
uniqueDays = set(dates.dt.day)
return len(uniqueDays)
def getNGoodDaysPerYear(df: pd.core.frame.DataFrame, years: list[int], month: int, minPeriod: float, wvhtPercentile: float) -> list[int]:
monthDF = getMonthlyDF(df, month)
goodWvht = getNthPercentileSampleWithoutPMF(monthDF['WVHT'].to_numpy(), wvhtPercentile)
goodDaySamples = monthDF[(monthDF['WVHT'] >= goodWvht) & (monthDF['DPD'] >= minPeriod)]
print(f'percentage of good day samples = {len(goodDaySamples) / len(monthDF) * 100:.2f}')
nGoodDaysPerYear = []
for y in years:
goodDaySamplesForThisYear = goodDaySamples[goodDaySamples['Date'].dt.year == y]
nGoodDaysPerYear.append(calcNGoodDays(goodDaySamplesForThisYear['Date']))
return nGoodDaysPerYear
def plotGoodDaysPerYear(nGoodDays: list, years: list, stationID: str, showPlot: bool, minPeriod: float, wvhtPercentile: float, month: int):
fig, ax = plt.subplots()
ax.plot(years, nGoodDays, 'o-', color='royalblue', zorder=2)
ax.set_title(f'Station {stationID} good days per year in {getMonthName(month)}')
ax.set_xlabel('Year')
ax.set_ylabel('# of good days')
ax.grid(zorder=1)
ax.text(0.55, 0.95, f'period >= {minPeriod} s and wvht >= {wvhtPercentile}th %', transform=ax.transAxes, fontsize=8, zorder=2)
ax.set_xticks(years)
ax.set_ylim([-0.5, ax.get_ylim()[1]])
if showPlot:
plt.show()
else:
plt.savefig(f'station_{stationID}_NGoodDaysPerYear.png', format='png')
def makeNGoodDaysPlots(activeBOI: dict, args: argparse.Namespace):
thisYear = datetime.datetime.now().year
years = list(range(thisYear - args.nYears, thisYear))
for stationID in activeBOI:
thisBuoy = NDBCBuoy(stationID)
thisBuoy.nYearsBack = args.nYears
thisBuoy.nHistoricalMonths = 12
thisBuoy.buildHistoricalDataFrame()
nGoodDays = getNGoodDaysPerYear(thisBuoy.dataFrameHistorical, years, args.month, args.minPeriod, args.wvhtPercentile)
plotGoodDaysPerYear(nGoodDays, years, stationID, args.show, args.minPeriod, args.wvhtPercentile, args.month)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bf", type=str, required=True, help="text file name containing buoys of interest")
parser.add_argument("--nYears", type=int, required=True, help="# of years to include in historical data")
parser.add_argument("--minPeriod", type=float, required=True, help="minimum swell period [s] for filtering historical data")
parser.add_argument("--wvhtPercentile", type=float, required=True, help="selected measurements need to have wvht measurements at or above this percentile")
parser.add_argument("--month", type=int, required=True, help="month to look at (1-12)")
parser.add_argument("--show", action='store_true', help="use this flag if you want to display the figures instead of saving them")
args = parser.parse_args()
activeBOI = getActiveBOI(args.bf)
makeNGoodDaysPlots(activeBOI, args)
if __name__ == "__main__":
main()
| ewackerbarth1/Buoy_Data | PlotNGoodDaysEachYear.py | PlotNGoodDaysEachYear.py | py | 3,386 | python | en | code | 0 | github-code | 90 |
18241131929 | def main():
N, K, C = map(int, input().split())
S = input()
# greedy
head, tail = [-C - 1] * (K + 1), [N + C + 1] * (K + 1)
idx = 0
for i in range(N):
if S[i] == 'o' and i - head[idx] > C:
idx += 1
head[idx] = i
if idx == K:
break
idx = K
for i in range(N - 1, -1, -1):
if S[i] == 'o' and tail[idx] - i > C:
idx -= 1
tail[idx] = i
if idx == 0:
break
# ans
for i in range(K):
if head[i + 1] == tail[i]:
print(tail[i] + 1)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02721/s190544189.py | s190544189.py | py | 642 | python | en | code | 0 | github-code | 90 |
5162768798 | words = input().split()
palindrome = input()
palindromes_list = []
for word in words:
# if word == word[::-1]: - much slower
rev_list = reversed(word)
rev_word = "".join(rev_list)
if rev_word == word:
palindromes_list.append(word)
print(palindromes_list)
count = words.count(palindrome)
print(f"Found palindrome {count} times") | bongoslav/SoftUni-Software-Engineering | 1.Python-Fundamentals/05. List Advanced/Lab/L04.py | L04.py | py | 354 | python | en | code | 0 | github-code | 90 |
4093725064 | #!/usr/bin/env python3
import sys
file1_name=sys.argv[1]
file2_name=sys.argv[2]
file1=open(file1_name,"r")
file2=open(file2_name,"r")
data1 = file1.read().rstrip()
data2 = file2.read().rstrip()
if(len(data1)!=len(data2)):
print("Length mismatch\n")
len_min = min([len(data1),len(data2)])
mismatches = [i for i in range(len_min) if data1[i] != data2[i]]
for pos in mismatches:
print("Missmatch at {0}: \n\tIn {1}: {2}\n\tIn {3}: {4}\n".format(pos,file1_name,data1[pos],file2_name,data2[pos]))
| MartinMosbeck/HW_SW_CoDesign_LU | tools/scripts/compare.py | compare.py | py | 511 | python | en | code | 0 | github-code | 90 |
16948503381 | class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
i = 0
while m != n:
m >>= 1
n >>= 1
i += 1
return m << i
def num_to_binary(self, m: int):
if m == 0:
return "0"
res = ""
while m > 0:
rem = m % 2
res = str(rem) + res
m = m // 2
return res
def binary_to_num(self, x: str):
res = 0
for i in x:
res = 2 * res + int(i)
return res
m = 5; n = 7
m = 0; n = 1
# m = 1; n = 1
# m = 2; n = 1
# m = 6; n = 7
s = Solution()
# print(s.num_to_binary(0))
# print(s.binary_to_num("101"))
print(s.rangeBitwiseAnd(m,n))
| iamsuman/algorithms | iv/Leetcode/medium/201_bitwise_and.py | 201_bitwise_and.py | py | 711 | python | en | code | 2 | github-code | 90 |
28768454916 | #general imports
import logging
#specific imports from std
from time import sleep, time
from uuid import uuid4
from functools import partial
from ipaddress import ip_address
#imports from 3rd party
from secp256k1_zkp import PrivateKey
#general leer imports
from leer.syncer import Syncer
from leer.core.utils import DOSException
from leer.core.parameters.constants import serialized_genesis_block
#storage space imports
from leer.core.storage.storage_space import StorageSpace
from leer.core.chains.headers_manager import HeadersManager
from leer.core.chains.blockchain import Blockchain
from leer.core.storage.txos_storage import TXOsStorage
from leer.core.storage.headers_storage import HeadersStorage
from leer.core.storage.blocks_storage import BlocksStorage
from leer.core.storage.excesses_storage import ExcessesStorage
from leer.core.storage.utxo_index_storage import UTXOIndex
from leer.core.storage.mempool_tx import MempoolTx
#primitives imports
from leer.core.lubbadubdub.address import Address
from leer.core.lubbadubdub.transaction import Transaction
from leer.core.primitives.transaction_skeleton import TransactionSkeleton
#ops imports
from leer.core.core_operations.core_context import CoreContext
from leer.core.core_operations.sending_assets import notify_all_nodes_about_tx
from leer.core.core_operations.receiving_assets import process_new_headers, process_new_blocks, process_new_txos, process_tbm_tx
from leer.core.core_operations.sending_metadata import send_tip_info, notify_all_nodes_about_new_tip, send_find_common_root
from leer.core.core_operations.process_metadata import metadata_handlers
from leer.core.core_operations.notifications import set_notify_wallet_hook, set_value_to_queue
from leer.core.core_operations.downloading import download_status_checks
from leer.core.core_operations.process_requests import request_handlers
from leer.core.core_operations.handle_mining import mining_operations
from leer.core.core_operations.blockchain_initialization import init_blockchain, validate_state, set_ask_for_blocks_hook, set_ask_for_txouts_hook
logger = logging.getLogger("core_loop")
storage_space = None
def init_storage_space(config):
global storage_space
_path = config["location"]["basedir"]
storage_space=StorageSpace(_path)
with storage_space.env.begin(write=True) as wtx:
hs = HeadersStorage(storage_space, wtx=wtx)
hm = HeadersManager(storage_space, do_not_check_pow=config.get('testnet_options', {}).get('do_not_check_pow', False))
bs = BlocksStorage(storage_space, wtx=wtx)
es = ExcessesStorage(storage_space, wtx=wtx)
ts = TXOsStorage(storage_space, wtx=wtx)
bc = Blockchain(storage_space)
mptx = MempoolTx(storage_space, config["fee_policy"], config.get("mining", {}))
utxoi = UTXOIndex(storage_space, wtx=wtx)
init_blockchain(storage_space, wtx=wtx, logger=logger)
validate_state(storage_space, rtx=wtx, logger=logger)
def is_ip_port_array(x):
res = True
for _ in x:
try:
address, port = ip_address(_[0]), int(_[1])
except:
res=False
break
return res
def core_loop(syncer, config):
init_storage_space(config)
nodes = {}
requests = {} # requests to other node's subprocesses
requests_cache = {"blocks":[], "txouts":[]} # requests of assets to other nodes
set_ask_for_blocks_hook(storage_space.blockchain, requests_cache)
set_ask_for_txouts_hook(storage_space.blocks_storage, requests_cache)
if config['wallet']:
set_notify_wallet_hook(storage_space.blockchain, syncer.queues['Wallet'])
message_queue = syncer.queues['Blockchain']
message_queue.put({"action":"give nodes list reminder"})
message_queue.put({"action":"check requests cache"})
#set logging
default_log_level = logging.INFO;
if "logging" in config:#debug, info, warning, error, critical
loglevels = { "debug":logging.DEBUG, "info":logging.INFO, "warning":logging.WARNING, "error":logging.ERROR, "critical":logging.CRITICAL}
if "base" in config["logging"] and config["logging"]["base"] in loglevels:
logger.setLevel(loglevels[config["logging"]["base"]])
if "core" in config["logging"] and config["logging"]["core"] in loglevels:
#its ok to rewrite
logger.setLevel(loglevels[config["logging"]["core"]])
is_benchmark = config.get('testnet_options', {}).get('benchmark', False)
no_pow = config.get('testnet_options', {}).get('do_not_check_pow', False)
def get_new_address(timeout=2.5): #blocking
_id = str(uuid4())
syncer.queues['Wallet'].put({'action':'give new address', 'id':_id, 'sender': "Blockchain"})
result = None
start_time=time()
while True:
put_back = [] #We wait for specific message, all others will wait for being processed
while not message_queue.empty():
message = message_queue.get()
if (not 'id' in message) or (not message['id']==_id):
put_back.append(message)
continue
result = message['result']
break
for message in put_back:
message_queue.put(message)
if result:
break
sleep(0.01)
if time()-start_time>timeout:
raise Exception("get_new_address timeout: probably wallet has collapsed or not running")
if result=='error':
raise Exception("Can not get_new_address: error on wallet side")
address = Address()
logger.info("Receiving address %s (len %d)"%( result, len(result)))
address.deserialize_raw(result)
return address
def send_message(destination, message):
logger.debug("Sending message to %s:\t\t %s"%(str(destination), str(message)))
if not 'id' in message:
message['id'] = uuid4()
if not 'sender' in message:
message['sender'] = "Blockchain"
syncer.queues[destination].put(message)
def send_to_network(message):
send_message("NetworkManager", message)
notify = partial(set_value_to_queue, syncer.queues["Notifications"], "Blockchain")
core_context = CoreContext(storage_space, logger, nodes, notify, send_message, get_new_address, config)
logger.debug("Start of core loop")
with storage_space.env.begin(write=True) as rtx: #Set basic chain info, so wallet and other services can start work
notify("blockchain height", storage_space.blockchain.current_height(rtx=rtx))
notify("best header", storage_space.headers_manager.best_header_height)
while True:
sleep(0.05)
put_back_messages = []
notify("core workload", "idle")
while not message_queue.empty():
message = message_queue.get()
if 'time' in message and message['time']>time(): # delay this message
put_back_messages.append(message)
continue
if (('result' in message) and message['result']=="processed") or \
(('result' in message) and message['result']=="set") or \
(('action' in message) and message['action']=="give nodes list reminder") or \
(('action' in message) and message['action']=="check requests cache") or \
(('action' in message) and message['action']=="take nodes list") or \
(('result' in message) and is_ip_port_array(message['result'])):
logger.debug("Processing message %s"%message)
else:
if 'action' in message:
logger.info("Processing message `%s`"%message['action'])
else:
logger.info("Processing message %s"%message)
if not 'action' in message: #it is response
if message['id'] in requests: # response is awaited
if requests[message['id']]=="give nodes list":
requests.pop(message['id'])
message_queue.put({"action":"take nodes list", "nodes":message["result"]})
else:
pass #Drop
continue
try:
if ("node" in message) and (not message["node"] in nodes):
nodes[message["node"]]={'node':message["node"]}
if message["action"] == "take the headers":
notify("core workload", "processing new headers")
with storage_space.env.begin(write=True) as wtx:
process_new_headers(message, nodes[message["node"]], wtx, core_context)
notify("best header", storage_space.headers_manager.best_header_height)
if message["action"] == "take the blocks":
notify("core workload", "processing new blocks")
with storage_space.env.begin(write=True) as wtx:
initial_tip = storage_space.blockchain.current_tip(rtx=wtx)
process_new_blocks(message, wtx, core_context)
after_tip = storage_space.blockchain.current_tip(rtx=wtx)
notify("blockchain height", storage_space.blockchain.current_height(rtx=wtx))
if not after_tip==initial_tip:
notify_all_nodes_about_new_tip(nodes, rtx=wtx, core=core_context, _except=[], _payload_except=[])
look_forward(nodes, send_to_network, rtx=wtx)
if message["action"] == "take the txos":
notify("core workload", "processing new txos")
with storage_space.env.begin(write=True) as wtx:
process_new_txos(message, wtx=wtx, core=core_context)
#After downloading new txos some blocs may become downloaded
notify("blockchain height", storage_space.blockchain.current_height(rtx=wtx))
look_forward(nodes, send_to_network, rtx=wtx)
if message["action"] in request_handlers: #blocks, headers, txos and tbm
notify("core workload", "processing "+message["action"])
with storage_space.env.begin(write=False) as rtx:
request_handlers[message["action"]](message, rtx=rtx, core=core_context)
if message["action"] in metadata_handlers: # take tip, find common root [response]
with storage_space.env.begin(write=False) as rtx:
metadata_handlers[message["action"]](message, nodes[message["node"]], rtx=rtx, core=core_context)
if message["action"] == "take TBM transaction":
notify("core workload", "processing mempool tx")
with storage_space.env.begin(write=False) as rtx:
process_tbm_tx(message, rtx=rtx, core=core_context)
if message["action"] == "give tip height":
with storage_space.env.begin(write=False) as rtx:
_ch=storage_space.blockchain.current_height(rtx=rtx)
send_message(message["sender"], {"id": message["id"], "result": _ch})
notify("blockchain height", _ch)
except DOSException as e:
logger.info("DOS Exception %s"%str(e))
#raise e #TODO send to NM
except Exception as e:
raise e
if message["action"] == "give block info":
notify("core workload", "reading block info")
try:
with storage_space.env.begin(write=False) as rtx:
block_info = compose_block_info(message["block_num"], rtx=rtx)
send_message(message["sender"], {"id": message["id"], "result":block_info})
except Exception as e:
send_message(message["sender"], {"id": message["id"], "result":"error", "error":str(e)})
if message["action"] == "put arbitrary mining work" and is_benchmark:
if not no_pow:
raise Exception("`put arbitrary mining work` is only allowed for disabled pow checks")
notify("core workload", "putting arbitrary mining work")
message["nonce"] = b"\x00"*8
message['partial_hash'] = list(storage_space.mempool_tx.work_block_assoc.inner_dict.keys())[-1]
message['action'] = "take mining work"
if message["action"] in mining_operations: #getwork, gbt, submitblock, submitwork
notify("core workload", "processing" + message["action"])
with storage_space.env.begin(write=True) as wtx:
mining_operations[message["action"]](message, wtx, core_context)
if message["action"] == "set mining address" and is_benchmark:
address = Address()
address.deserialize_raw(message["address"])
core_context.mining_address = address
if message["action"] == "give synchronization status":
with storage_space.env.begin(write=False) as rtx:
our_height = storage_space.blockchain.current_height(rtx=rtx)
best_known_header = storage_space.headers_manager.best_header_height
try:
best_advertised_height = max([nodes[node]["height"] for node in nodes if "height" in nodes[node]])
except:
best_advertised_height = None
send_message(message["sender"], {"id": message["id"],
"result": {'height': our_height,
'best_known_header': best_known_header,
'best_advertised_height': best_advertised_height}})
notify("best header", best_known_header)
notify("blockchain height", our_height)
notify("best advertised height", best_advertised_height)
if message["action"] == "add tx to mempool":
notify("core workload", "processing local transaction")
response = {"id": message["id"]}
#deserialization
try:
ser_tx = message["tx"]
tx = Transaction(txos_storage = storage_space.txos_storage, excesses_storage = storage_space.excesses_storage)
with storage_space.env.begin(write=False) as rtx:
tx.deserialize(ser_tx, rtx)
storage_space.mempool_tx.add_tx(tx, rtx=rtx)
tx_skel = TransactionSkeleton(tx=tx)
notify_all_nodes_about_tx(tx_skel.serialize(rich_format=True, max_size=40000), core_context, _except=[], mode=1)
response['result']="generated"
except Exception as e:
response['result'] = 'error'
response['error'] = str(e)
logger.error("Problem in tx: %s"%str(e))
send_message(message["sender"], response)
#message from core_loop
if message["action"] in download_status_checks: # txouts and blocks download status checks
with storage_space.env.begin(write=True) as rtx:
ret_mes = download_status_checks[message["action"]](message, rtx, core_context)
if ret_mes:
put_back_messages.append(ret_mes)
if message["action"] == "take nodes list":
for node in message["nodes"]:
if not node in nodes: #Do not overwrite
nodes[node]={"node":node}
disconnected_nodes = []
for existing_node in nodes:
if not existing_node in message["nodes"]:
disconnected_nodes.append(existing_node)
for dn in disconnected_nodes:
nodes.pop(dn)
if message["action"] == "give nodes list reminder":
_id = str(uuid4())
send_to_network({"action":"give intrinsic nodes list", "sender":"Blockchain", "id":_id})
requests[_id] = "give nodes list"
put_back_messages.append({"action": "give nodes list reminder", "time":int(time())+3} )
if message["action"] == "stop":
logger.info("Core loop stops")
return
if message["action"] == "shutdown":
initiator = message["sender"]
logger.info("Shutdown initiated by %s"%initiator)
for receiver in ['NetworkManager', 'Blockchain', 'RPCManager', 'Notifications', 'Wallet']:
send_message(receiver, {"action":"stop", "sender":initiator})
if message["action"] == "check requests cache":
put_back_messages.append({"action": "check requests cache", "time":int(time())+5} )
for k in requests_cache:
if not len(requests_cache[k]):
continue
copy = list(set(requests_cache[k]))
copy = sorted(copy, key= lambda x: requests_cache[k].index(x)) #preserve order of downloaded objects
if k=="blocks":
chunk_size=20
while len(copy):
request, copy = copy[:chunk_size], copy[chunk_size:]
new_message = {"action": "check blocks download status", "block_hashes":request,
"already_asked_nodes": [], "id": str(uuid4()),
"time": -1 }
message_queue.put(new_message)
requests_cache[k] = []
if k=="txouts":
chunk_size=30
while len(copy):
request, copy = copy[:chunk_size], copy[chunk_size:]
new_message = {"action": "check txouts download status", "txos_hashes": request,
"already_asked_nodes": [], "id": str(uuid4()),
"time": -1 }
message_queue.put(new_message)
requests_cache[k] = []
for _message in put_back_messages:
message_queue.put(_message)
try:
with storage_space.env.begin(write=True) as rtx:
check_sync_status(nodes, rtx=rtx, core_context=core_context)
try:
best_advertised_height = max([nodes[node]["height"] for node in nodes if "height" in nodes[node]])
except:
best_advertised_height = None
notify("best advertised height", best_advertised_height)
except Exception as e:
logger.error(e)
def look_forward(nodes, send_to_network, rtx):
if storage_space.headers_manager.best_header_height < storage_space.blockchain.current_height(rtx=rtx)+100:
for node_index in nodes:
node = nodes[node_index]
if ('height' in node) and (node['height']>storage_space.headers_manager.best_header_height):
our_tip_hash = storage_space.blockchain.current_tip(rtx=rtx)
send_find_common_root(storage_space.headers_storage.get(our_tip_hash,rtx=rtx), node['node'], send = send_to_network)
break
def compose_block_info(block_num, rtx):
ct = storage_space.blockchain.current_tip(rtx=rtx)
ch = storage_space.blockchain.current_height(rtx=rtx)
if block_num>ch:
raise Exception("Unknown block")
target_hash = ct
if block_num<ch:
target_hash = storage_space.headers_manager.find_ancestor_with_height(ct, block_num, rtx=rtx)
block = storage_space.blocks_storage.get(target_hash, rtx=rtx)
result = {'hash':target_hash.hex()}
result['target']=float(block.header.target)
result['supply']=block.header.supply
result['timestamp']=block.header.timestamp
result['height'] = block.header.height
result['inputs']=[]
result['outputs']=[]
for i in block.transaction_skeleton.input_indexes:
index=i.hex()
address=storage_space.txos_storage.find(i, rtx=rtx).address.to_text()
result['inputs'].append((index, address))
for o in block.transaction_skeleton.output_indexes:
index=o.hex()
txo = storage_space.txos_storage.find(o, rtx=rtx)
address=txo.address.to_text()
lock_height = txo.lock_height
relay_fee = txo.relay_fee
version = txo.version
amount = txo.value
result['outputs'].append(({"output_id":index, "address":address, "lock_height":lock_height, "relay_fee":relay_fee, "version":version, "amount":amount}))
return result
def check_sync_status(nodes, rtx, core_context):
for node_index in nodes:
node = nodes[node_index]
if ((not "last_update" in node) or node["last_update"]+300<time()) and ((not "last_send" in node) or node["last_send"]+5<time()):
#logger.info("\n node last_update %d was %.4f sec\n"%(("last_update" in node), (time()-node["last_update"] if ("last_update" in node) else 0 )))
send_tip_info(node_info = node, rtx=rtx, core=core_context)
| WTRMQDev/leer | leer/core/core_loop.py | core_loop.py | py | 19,441 | python | en | code | 5 | github-code | 90 |
18462637289 | import sys
#import numpy as np
#from collections import defaultdict
import math
#from collections import deque
input = sys.stdin.readline
def main():
n = int(input())
dp = [0]*n
dp[0] = list(map(int,input().split()))
for i in range(1,n):
dp[i] = list(map(int,input().split()))
dp[i][0] += max(dp[i-1][1],dp[i-1][2])
dp[i][1] += max(dp[i - 1][0], dp[i - 1][2])
dp[i][2] += max(dp[i - 1][0], dp[i - 1][1])
print(max(dp[n-1]))
if __name__ == "__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03162/s700969248.py | s700969248.py | py | 523 | python | en | code | 0 | github-code | 90 |
32762919309 | import re
# Strip punctuation and normalize each review to lowercase. Check if the keyword appears in the review.
# Increment that keywords frequency per occurance. Sort the resultant dict by frequency and lexographically if frequencies are equal.
# O(R*W + WlgW) time, R is number of reviews, W is number of keywords. Sort takes O(WlgW) time.
# O(W) space, freq map has number of elements equal to the number of keywords + W elements for sorted frequencies since the sort is not inplace.
def topKFreqKeywords(reviews, keywords, k):
keywordFreq = {}
for review in reviews:
# Convert to lowercase, replace all non lowercase chars, split into word list
cleanReview = review.lower().replace('[^a-z]', '').split()
for keyword in keywords:
if keyword in cleanReview:
if keyword not in keywordFreq:
keywordFreq[keyword] = 1
else:
keywordFreq[keyword] += 1
# Sort by alphabetically ascending keywords and descending freq values
kSortedFreqs = sorted(keywordFreq.items(), key=lambda x: (-x[1], x[0]))[:k]
return [key[0] for key in kSortedFreqs]
k = 2
keywords = ["anacell", "cetracular", "betacellular"]
reviews = [
"Anacell provides the best services in the city",
"betacellular has awesome services",
"Best services provided by anacell, everyone should use anacell",
]
result = topKFreqKeywords(reviews, keywords, k)
print(result)
assert ["anacell", "betacellular"] == result
k = 2
keywords = ["anacell", "betacellular", "cetracular", "deltacellular", "eurocell"]
reviews = [
"I love anacell Best services; Best services provided by anacell",
"betacellular has great services",
"deltacellular provides much better services than betacellular",
"cetracular is worse than anacell",
"Betacellular is better than deltacellular.",
]
result = topKFreqKeywords(reviews, keywords, k)
print(result)
assert ["betacellular", "anacell"] == result | kelr/practice-stuff | leetcode/amazon-topkkeywords.py | amazon-topkkeywords.py | py | 2,023 | python | en | code | 0 | github-code | 90 |
74412921256 | '''
Created on August 11th 2016
@author: Thierry Souche
'''
from bottle import Bottle, request, run
from bson.objectid import ObjectId
from common.constants import oidIsValid
from common.constants import setserver_address, setserver_port
from common.constants import setserver_routes
from server.backend import Backend
"""
This script must be run in order to start the server.
Unit test can be run with test_setserver.py, provided that the bottle server
will have been started with the command line:
> cd /
> python /data/code/setgame/server/setserver.py
"""
if __name__ == "__main__":
# initiate the server class
backend = Backend()
# starts the web server and listens to the 8080 port
# initiate the web server
webserver = Bottle()
# declare the routes
# this route is used for checking that the server is up
@webserver.route(setserver_routes('hello', False))
def hello():
return "<p>Coucou les gens !!!</p>"
# this route is for test purpose
@webserver.route(setserver_routes('reset', False))
def reset():
return backend.reset()
# this route enable to check if a nickname is still available to register a
# new player to the Set game server
@webserver.route(setserver_routes('nickname_available', False) + "<nickname>")
def isNicknameAvailable(nickname):
return backend.isNicknameAvailable(nickname)
# this route enable to register players to the Set game server
@webserver.route(setserver_routes('register_player', False) + "<nickname>")
def registerPlayer(nickname):
passwordHash = request.query.get('passwordHash')
return backend.registerPlayer(nickname, passwordHash)
# this route enable to de-register isolated players to a yet-to-start game
@webserver.route(setserver_routes('deregister_player', False) + "<playerid_str>")
def deRegisterPlayer(playerid_str):
if oidIsValid(playerid_str):
result = backend.deRegisterPlayer(ObjectId(playerid_str))
else:
result = {'status': "ko", 'reason': "invalid playerID"}
return result
# this route enable to return the login details of a player from its nickname
@webserver.route(setserver_routes('get_player_details', False) + "<nickname>")
def getPlayerLoginDetails(nickname):
return backend.getPlayerLoginDetails(nickname)
# this route enable to return the gameID of a player from its playerID
@webserver.route(setserver_routes('get_gameid', False) + "<playerid_str>")
def getGameID(playerid_str):
if oidIsValid(playerid_str):
result = backend.getGameID(ObjectId(playerid_str))
if result['status'] == "ok":
result['gameID'] = str(result['gameID'])
else:
result = {'status': "ko", 'reason': "invalid playerID"}
return result
# this route enable enlist isolated players to a yet-to-start game
@webserver.route(setserver_routes('enlist_player', False) + "<playerid_str>")
def enlistPlayer(playerid_str):
# check that the string passed is a valid ObjectId, and if so
# call the backend.
if oidIsValid(playerid_str):
result = backend.enlistPlayer(ObjectId(playerid_str))
if result['status'] == "ok":
gameid_str = str(result['gameID'])
result = {'status': "ok", 'gameID': gameid_str}
else:
result = {'status': "ko"}
return result
# this route enable to register constituted teams and start a game
@webserver.route(setserver_routes('enlist_team', False))
def enlistTeam():
pid_list = []
result = request.query.getall('playerIDlist')
# check that the strings passed are valid ObjectId, and if so
# add them into the list of players to be enlisted.
for playerid_str in result:
if oidIsValid(playerid_str):
pid_list.append({'playerID': ObjectId(playerid_str)})
result2 = backend.enlistTeam(pid_list)
if result2['status'] == "ok":
gameid_str = str(result2['gameID'])
result2 = {'status': "ok", 'gameID': gameid_str}
return result2
# this route enable to collect the turnCounter
@webserver.route(setserver_routes('get_turn', False) + "<gameid_str>")
def getTurnCounter(gameid_str):
# check that the string passed is a valid ObjectId, and if so
# call the backend.
if oidIsValid(gameid_str):
gameID = ObjectId(gameid_str)
answer = backend.getTurnCounter(gameID)
if answer['status'] == "ok":
result = {
'status': "ok",
'turnCounter': str(answer['turnCounter'])
}
else:
result = answer
else:
result = {'status': "ko", 'reason': "invalid gameID"}
return result
# this route enable to collect the turnCounter
@webserver.route(setserver_routes('get_game_finished', False) + "<gameid_str>")
def getGameFinished(gameid_str):
# check that the string passed is a valid ObjectId, and if so
# call the backend.
if oidIsValid(gameid_str):
gameID = ObjectId(gameid_str)
answer = backend.getGameFinished(gameID)
if answer['status'] == "ok":
result = {
'status': "ok",
'gameFinished': str(answer['gameFinished'])
}
else:
result = answer
else:
result = {'status': "ko", 'reason': "invalid gameID"}
return result
# this route enable to collect the nicknames of the team-mates
@webserver.route(setserver_routes('get_nicknames', False) + "<playerid_str>")
def getNicknames(playerid_str):
# check that the string passed is a valid ObjectId, and if so
# call the backend.
if oidIsValid(playerid_str):
playerID = ObjectId(playerid_str)
result = {'status': "ok", 'nicknames': backend.getNicknames(playerID)}
else:
result = {'status': "ko"}
return result
# this route enable to soft-stop a game
@webserver.route(setserver_routes('soft_stop', False) + "<gameid_str>")
def stopGame(gameid_str):
# it needs (amongst other things) to read the 'hard' flag.
if oidIsValid(gameid_str):
gameID = ObjectId(gameid_str)
result = backend.stopGame(gameID)
else:
result = {'status': "ko", 'reason': "invalid gameID"}
return result
# this route enable to hard-stop a game
@webserver.route(setserver_routes('hard_stop', False) + "<gameid_str>")
def stopGame(gameid_str):
# it needs (amongst other things) to read the 'hard' flag.
if oidIsValid(gameid_str):
result = backend.stopGame(ObjectId(gameid_str), True)
else:
result = {'status': "ko", 'reason': "invalid gameID"}
return result
# this route enable to collect the generic details of a game
@webserver.route(setserver_routes('get_game_details', False) + "<gameid_str>")
def getGameDetails(gameid_str):
if oidIsValid(gameid_str):
result = backend.getDetails(ObjectId(gameid_str))
else:
result = {'status': "ko", 'reason': "invalid gameID"}
return result
# this route enable to collect the current step
@webserver.route(setserver_routes('get_step', False) + "<gameid_str>")
def getStep(gameid_str):
if oidIsValid(gameid_str):
result = backend.getStep(ObjectId(gameid_str))
else:
result = {'status': "ko", 'reason': "invalid gameID"}
return result
# this route enable to collect the full history of the game
@webserver.route(setserver_routes('get_history', False) + "<gameid_str>")
def getHistory(gameid_str):
if oidIsValid(gameid_str):
result = backend.getHistory(ObjectId(gameid_str))
else:
result = {'status': "ko", 'reason': "invalid gameID"}
return result
# this route enable a client to propose a set of 3 cards to the server
@webserver.route(setserver_routes('propose_set', False) + "<playerid_str>")
def proposeSet(playerid_str):
if oidIsValid(playerid_str):
playerID = ObjectId(playerid_str)
set_dict = request.query.getall('set')
set_list = []
for s in set_dict:
try:
set_list.append(int(s))
except:
result = {'status': "ko", 'reason': "invalid set"}
result = backend.proposeSet(playerID, set_list)
else:
result = {'status': "ko", 'reason': "invalid playerID"}
return result
# this route enable test cases (register reference test players)
@webserver.route(setserver_routes('test_reg_ref_players', False))
def ForTestOnly_RegisterRefPlayers():
# registers the 6 reference test players.
result = backend.ForTestOnly_RegisterRefPlayers()
return result
# this route enable test cases (enlist reference test players)
@webserver.route(setserver_routes('test_enlist_ref_players', False))
def ForTestOnly_EnlistRefPlayers():
# registers the 6 reference test players.
result = backend.ForTestOnly_EnlistRefPlayers()
if result['status'] == "ok":
result['gameID'] = str(result['gameID'])
return result
# this route enable test cases (delist all players)
@webserver.route(setserver_routes('test_delist_players', False))
def ForTestOnly_DelistAllPlayers():
# registers the 6 reference test players.
result = backend.ForTestOnly_DelistAllPlayers()
return {'status': "ok", 'number_delisted': result}
# this route enable to load and play to its end a reference test game
@webserver.route(setserver_routes('test_load_ref_game', False))
def ForTestOnly_LoadRefGame():
# load the reference test game indicated by 'test_data_index'
index = request.query.get('test_data_index')
try:
test_data_index = int(index)
if test_data_index in (0,1):
result = backend.ForTestOnly_LoadRefGame(test_data_index)
if result['status'] == "ok":
gid_str = str(result['gameID'])
result = {'status': "ok", 'gameID': gid_str}
else:
result = {'status': "ko", 'reason': "wrong index value"}
except:
result = {'status': "ko", 'reason': "invalid index"}
return result
# this route enable to roll back a reference test game
@webserver.route(setserver_routes('test_back_to_turn', False) + "<index>/<turn>")
def ForTestOnly_BackToTurn(index, turn):
# assuming a reference game was properly loaded, it enable to roll back
# the finished game and get back to a given turn.
try:
index = int(index)
except:
return {'status': "ko", 'reason': "invalid index arguments"}
try:
turn = int(turn)
except:
return {'status': "ko", 'reason': "invalid turn arguments"}
return backend.ForTestOnly_GetBackToTurn(int(index), int(turn))
run(webserver, host=setserver_address, port=setserver_port,
reloader=True, debug=True)
| tsouche/setgame | server/setserver.py | setserver.py | py | 11,595 | python | en | code | 0 | github-code | 90 |
26093583560 | from model_loader import *
import os
import numpy as np
import tensorflow as tf
import pdb
from sklearn.metrics import f1_score
if __name__ == "__main__":
DATA_DIR = "../example_datasets/PUF_4x64/"
INPUTS_FILE = "f_4x64_100000.txt"
LABELS_FILE = "r_4x64_100000.txt"
# DATA_DIR = "../example_datasets/bkp/"
# INPUTS_FILE = "f_4x64_16384.txt"
# LABELS_FILE = "r_4x64_16384.txt"
# DATA_DIR = "../example_datasets/PUF_4x64/alt/"
# INPUTS_FILE = "f_4x64_50000_test.txt"
# LABELS_FILE = "r_4x64_50000_test.txt"
# read inputs
test_x = []
fp = open(os.path.join(DATA_DIR, INPUTS_FILE), "r")
for line in fp:
line = line.split()
test_x.append([int(x) for x in line])
test_x = np.array(test_x)
fp.close()
# read labels
test_y = []
fp = open(os.path.join(DATA_DIR, LABELS_FILE), "r")
for line in fp:
line = line.split()
# change -1 into zeros for easier evaluation
test_y.append([int(x) > 0 for x in line])
fp.close()
test_y = np.array(test_y)
# load trained model
loader = ModelLoader("./PUF/24-06-2017-20-11-21/generation_5000/")
feed_dict = loader.feed_dict
tf.import_graph_def(loader.graph_def, name="")
with tf.Session() as sess:
feed_dict["inputs:0"] = test_x
output = sess.run(["TanhActivation1_out:0"], feed_dict=feed_dict)
class_output = output[0] > 0
result = class_output == test_y
print("Test accuracy: ", np.mean(result))
print("F1 score: ", f1_score(test_y, class_output))
| lkrizan/ECF_deep_learning | demo/PUF_evaluator.py | PUF_evaluator.py | py | 1,568 | python | en | code | 1 | github-code | 90 |
71124242856 | import math
import cairo
from mod_python import apache
from igraph import *
import MySQLdb
def index():
WIDTH, HEIGHT = 32, 32
surface = cairo.ImageSurface.create_for_data (cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
ctx = cairo.Context (surface)
ctx.scale (WIDTH/200.0, HEIGHT/200.0) # Normalizing the canvas
##pat = cairo.LinearGradient (0.0, 0.0, 0.0, 1.0)
##pat.add_color_stop_rgba (1, 0.7, 0, 0, 0.5) # First stop, 50% opacity
##pat.add_color_stop_rgba (0, 0.9, 0.7, 0.2, 1) # Last stop, 100% opacity
##ctx.rectangle (0, 0, 1, 1) # Rectangle(x0, y0, x1, y1)
##ctx.set_source (pat)
##ctx.fill ()
##ctx.translate (0.1, 0.1) # Changing the current transformation matrix
cx,cy = (100, 100)
w, h = (180, 180)
ctx.move_to(cx-w*0.15, cy-h*0.35)
ctx.line_to(cx-w*0.15, cy+h*0.20)
ctx.move_to(cx+w*0.15, cy-h*0.35)
ctx.line_to(cx+w*0.15, cy+h*0.20)
ctx.move_to(cx+w*0.15, cy-h*0.10)
ctx.line_to(cx-w*0.15, cy-h*0.10)
ctx.move_to(cx-w*0.4, cy-h*0.5)
ctx.line_to(cx-w*0.4, cy)
ctx.curve_to(cx-w*0.4, cy, cx-w*0.4, cy+h*0.25, cx, cy+h*0.5)
ctx.curve_to(cx, cy+h*0.5, cx+w*0.4, cy+h*0.25, cx+w*0.4, cy)
ctx.line_to(cx+w*0.4, cy-h*0.5)
ctx.line_to(cx-w*0.4, cy-h*0.5)
ctx.set_source_rgb (1, 0, 0.2) # Solid color
ctx.set_line_width (6)
ctx.stroke ()
surface.write_to_png ("img/example.png") # Output to PNG
| laironald/Govt-rddash | py/ron.py | ron.py | py | 1,331 | python | en | code | 0 | github-code | 90 |
18154481649 | n, k = map(int, input().split())
L, R = [], []
MOD = 998244353
for _ in range(k):
t1, t2 = map(int, input().split())
L.append(t1)
R.append(t2)
dp = [0] * (n+1)
dp[1] = 1
acc = [0] * (n+1)
acc[1] = 1
for idx1 in range(2, n+1):
for idx2 in range(k):
if idx1 - L[idx2] < 0:
continue
dp[idx1] += (acc[idx1-L[idx2]] - acc[max(0, idx1-R[idx2]-1)]) % MOD
dp[idx1] %= MOD
acc[idx1] = (acc[idx1-1] + dp[idx1]) % MOD
print(dp[-1]) | Aasthaengg/IBMdataset | Python_codes/p02549/s847665768.py | s847665768.py | py | 479 | python | en | code | 0 | github-code | 90 |
28381506411 | # -*- coding: utf-8 -*-
import io
import base64
from PIL import Image
import PIL.PdfImagePlugin # activate PDF support in PIL
from odoo import models, fields, api
from PyPDF2 import PdfFileMerger
from odoo.tools import pdf
import PyPDF2
from io import BytesIO
from reportlab.pdfgen import canvas
from PyPDF2 import PdfFileReader, PdfFileWriter
from reportlab.lib.utils import ImageReader
from PyPDF2.pdf import PageObject
class AccountMove(models.Model):
_inherit = 'account.move'
custom_report_file = fields.Binary("Rapport généré", attachment=True, copy=False)
invoice_report_model_id = fields.Many2one('ir.actions.report', string="Modèle d'impression", domain="[('model', '=', 'account.move')]")
@api.model
def create(self, vals):
res = super(AccountMove, self).create(vals)
if vals.get('partner_id'):
partner = self.env['res.partner'].sudo().browse(vals.get('partner_id'))
if partner:
if partner.invoice_report_model_id:
res.invoice_report_model_id = partner.invoice_report_model_id.id
return res
def write(self, vals):
res = super(AccountMove, self).write(vals)
for rec in self:
if vals.get('partner_id'):
rec.invoice_report_model_id = rec.partner_id.invoice_report_model_id.id
return res
def action_invoice_sent(self):
sup = super(AccountMove, self).action_invoice_sent()
sup['context']['default_is_print'] = False
return sup
def action_post(self):
res = super(AccountMove, self).action_post()
for rec in self:
if rec.move_type == 'out_invoice':
rec.print_custom_report()
return res
def print_custom_report(self):
self.custom_report_file = False
pdfs = []
custom_invoice = self.env.ref('gecop_model_reports.gecop_invoice_report_id')
if self.partner_id.invoice_report_model_id:
invoice_report = self.partner_id.invoice_report_model_id
elif custom_invoice:
invoice_report = custom_invoice
else:
invoice_report = self.env.ref('account.account_invoices', raise_if_not_found=True)
invoice_pdf, _ = invoice_report._render_qweb_pdf(self.id)
if invoice_pdf:
pdfs.append(invoice_pdf)
if self.invoice_line_ids:
if self.invoice_line_ids.sale_line_ids:
sale_order_id = self.invoice_line_ids.sale_line_ids.mapped('order_id')
# sale_rep = self.env.ref('gecop_model_reports.sale_report_first')
if sale_order_id:
if sale_order_id.partner_id.sale_report_model_id:
sale_report = sale_order_id.partner_id.sale_report_model_id
# elif sale_rep:
# sale_report = sale_rep
else:
sale_report = self.env.ref('sale.action_report_saleorder', raise_if_not_found=True)
sale_pdf, _ = sale_report._render_qweb_pdf(sale_order_id.id)
if sale_pdf:
pdfs.append(sale_pdf)
if sale_order_id.fsm_task_ids:
intervention_report = self.env.ref('sale_fsm_extend.project_task_report_id', raise_if_not_found=True)
if intervention_report:
tasks_pdf, _ = intervention_report._render_qweb_pdf(sale_order_id.fsm_task_ids.ids)
if tasks_pdf:
pdfs.append(tasks_pdf)
if self.attachment_ids:
for a in self.attachment_ids:
if a.name.split('.')[1] in ['pdf','PDF']:
name_invoice = self.name.replace('/', '_')
if name_invoice not in a.name:
try:
reader = PdfFileReader(io.BytesIO(base64.b64decode(a.datas)), strict=False,
overwriteWarnings=False)
except Exception:
continue
writer = PdfFileWriter()
for page_number in range(0, reader.getNumPages()):
page = reader.getPage(page_number)
writer.addPage(page)
_buffer = io.BytesIO()
writer.write(_buffer)
my_pdf = _buffer.getvalue()
pdfs.append(my_pdf)
if any(a.name.split('.')[1] in ['JPG', 'JPEG', 'PNG', 'png', 'jpg', 'jpeg'] for a in
self.attachment_ids):
for a in self.attachment_ids.filtered(
lambda u: u.name.split('.')[1] in ['JPG', 'JPEG', 'PNG', 'png', 'jpg', 'jpeg']):
try:
image_reader = ImageReader(io.BytesIO(base64.b64decode(a.datas)))
writer = PdfFileWriter()
except:
continue
packet = io.BytesIO()
can = canvas.Canvas(packet)
can.drawImage(image_reader, x=160, y=280, width=300, height=300)
can.save()
can.showPage()
packet.seek(0)
_pdf = PdfFileReader(packet, overwriteWarnings=False)
writer.addPage(_pdf.getPage(0))
writer.write(packet)
img_pdf = packet.getvalue()
pdfs.append(img_pdf)
mg = pdf.merge_pdf(pdfs)
self.custom_report_file = base64.b64encode(mg)
| SyentysDevCenter/Fprs_old | invoice_custom_report/models/account_move.py | account_move.py | py | 5,701 | python | en | code | 0 | github-code | 90 |
2789427453 | # Python functions used in different scripts
import numpy as np
import pandas as pd
import subprocess
import os
import config_vars as cfg
def create_fastq_symlink_nh(gem_id, fastq_path_df, symlink_path):
"""Creates a symbolic link to a fastq file using cellranger notation for non-hashed samples
(see https://support.10xgenomics.com/single-cell-gene-expression/software/downloads/latest?)
Args:
gem_id: identifier of the Gelbeads-in-Emulsion (GEM) well that will be used as prefix in the symlink
fastq_path_df: pandas dataframe with the fastq paths for that gem_id, which comes from the file "fastq_paths.csv"
symlink_path: string specifying where to create the symlinks
Returns:
None
"""
pair_ids = np.unique(fastq_path_df["pair_id"])
for i in range(len(pair_ids)):
filt = (fastq_path_df["pair_id"] == pair_ids[i])
pair_df = fastq_path_df.loc[filt, :]
for j in pair_df.index:
fastq_path = pair_df.loc[j, "fastq_path"]
lane = str(i + 1)
read = pair_df.loc[j, "read"]
read = read.replace("R", "")
subprocess.run(["ln", "-s", fastq_path, "{}/{}_S1_L00{}_R{}_001.fastq.gz".format(symlink_path, gem_id, lane, read)])
def make_cellranger_nh(gem_id, jobscript_path, fastq_path, expected_cells):
"""Creates a cellranger script for a non-hashed sample
Args:
gem_id: identifier of the Gelbeads-in-Emulsion (GEM) well
jobscript_path: path to save the jobscript
fastq_path: path to the fastq files
expected_cells: expected number of high-quality cells in this experiment
Returns:
None
"""
job_script_file = open("{}/{}.cmd".format(jobscript_path, gem_id), "w")
job_script = """#!/bin/bash
#SBATCH --job-name="{}"
#SBATCH --workdir=.
#SBATCH --mail-type=all
#SBATCH --mail-user=ramon.massoni@cnag.crg.eu
#SBATCH --error=./log/{}_%x_%J.err
#SBATCH --output=./log/{}_%x_%J.out
#SBATCH --time=20:00:00
#SBATCH --cpus-per-task=16
echo [`date "+%Y-%m-%d %T"`] starting job on $HOSTNAME
{} count --fastqs {} --id {} --chemistry SC3Pv3 --expect-cells {} --localcores 24 --localmem 64 --transcriptome {};
echo [`date "+%Y-%m-%d %T"`] job finished""".format(gem_id, gem_id, gem_id, cfg.cellranger_path, fastq_path, gem_id, expected_cells, cfg.reference_path)
job_script_file.write(job_script)
job_script_file.close()
def create_fastq_symlink_h(gem_id, library, lib_type, fastq_path_df, symlink_path):
"""Creates a symbolic link to a fastq file using cellranger notation for hashed samples
(see https://support.10xgenomics.com/single-cell-gene-expression/software/downloads/latest?)
Args:
gem_id: identifier of the Gelbeads-in-Emulsion (GEM) well
library: Illumina library id
lib_type: type of library (cDNA or HTO)
fastq_path_df: pandas dataframe with the fastq paths for that gem_id, which comes from the file "fastq_paths.csv"
symlink_path: string specifying where to create the symlinks (fastq/HTO or fastq/cDNA)
Returns:
None
"""
fastq_path_sub = fastq_path_df.loc[fastq_path_df["library_id"] == library, :]
pair_ids = np.unique(fastq_path_sub["pair_id"])
for i in range(len(pair_ids)):
filt = (fastq_path_df["pair_id"] == pair_ids[i])
pair_df = fastq_path_df.loc[filt, :]
for j in pair_df.index:
lane = str(i + 1)
symlink_path_lane = "{}/lane{}".format(symlink_path, lane)
if not os.path.exists(symlink_path_lane):
os.mkdir(symlink_path_lane)
fastq_path = pair_df.loc[j, "fastq_path"]
read = pair_df.loc[j, "read"]
read = read.replace("R", "")
if lib_type == "hashed_hto":
gem_id_sp = "{}_HTO".format(gem_id)
elif lib_type == "hashed_cdna":
gem_id_sp = gem_id
subprocess.run(["ln", "-s", fastq_path, "{}/{}_S1_L00{}_R{}_001.fastq.gz".format(symlink_path_lane, gem_id_sp, lane, read)])
def write_libraries_csv(gem_id, gem_id_path):
"""Creates the file "libraries.csv" which is required by cellranger in feature-barcoding analysis.
(see https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/using/feature-bc-analysis)
Args:
gem_id: identifier of the Gelbeads-in-Emulsion (GEM) well
fastq_path: path to GEM-specific directory
gem_id_path: absolute path to gem_id-specific directory.
Returns:
None
"""
lib_csv = open("{}/libraries.csv".format(gem_id_path), "w")
lib_csv.write("fastqs,sample,library_type")
fastq_dirs = os.listdir("{}/fastq".format(gem_id_path))
for d in fastq_dirs:
if d == "HTO":
gem_id_sp = "{}_HTO".format(gem_id)
lib_type = "Antibody Capture"
elif d == "cDNA":
gem_id_sp = gem_id
lib_type = "Gene Expression"
fastq_sub_dirs = os.listdir("{}/fastq/{}".format(gem_id_path, d))
for sub_d in fastq_sub_dirs:
sub_d_abs_path = "{}/fastq/{}/{}".format(gem_id_path, d, sub_d)
output_line = "\n{},{},{}".format(sub_d_abs_path, gem_id_sp, lib_type)
lib_csv.write(output_line)
lib_csv.close()
def make_cellranger_h(gem_id, jobscript_path, expected_cells):
"""Creates a cellranger script for a hashed GEM well
Args:
gem_id: identifier of the Gelbeads-in-Emulsion (GEM) well
jobscript_path: path to save the jobscript
expected_cells: expected number of high-quality cells in this experiment
Returns:
None
"""
job_script_file = open("{}/{}.cmd".format(jobscript_path, gem_id), "w")
job_script = """#!/bin/bash
#SBATCH --job-name="{}"
#SBATCH --workdir=.
#SBATCH --mail-type=all
#SBATCH --mail-user=ramon.massoni@cnag.crg.eu
#SBATCH --error=./log/{}_%x_%J.err
#SBATCH --output=./log/{}_%x_%J.out
#SBATCH --time=20:00:00
#SBATCH --cpus-per-task=20
echo [`date "+%Y-%m-%d %T"`] starting job on $HOSTNAME
{} count --libraries libraries.csv --feature-ref feature_reference.csv --id {} --chemistry SC3Pv3 --expect-cells {} --localcores 24 --localmem 62 --transcriptome {};
echo [`date "+%Y-%m-%d %T"`] job finished""".format(gem_id, gem_id, gem_id, cfg.cellranger_path, gem_id, expected_cells, cfg.reference_path)
job_script_file.write(job_script)
job_script_file.close()
| Single-Cell-Genomics-Group-CNAG-CRG/TonsilAtlas | scRNA-seq/1-cellranger_mapping/scripts/utils.py | utils.py | py | 6,462 | python | en | code | 12 | github-code | 90 |
24491707308 | from db import DB
def get_db():
return DB()
class BaseModel:
def __init__(self, tablename='Weibo'):
self.tablename = tablename
self.create()
def create(self):
with get_db() as db:
exists = db.execute("select count(name) from sqlite_master where type = 'table' and name = '{0}'".format(self.tablename))
if exists.fetchone()[0] == 1:
pass
else:
db.execute('''CREATE TABLE {}
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
TITLE TEXT NOT NULL,
CONTENT TEXT NOT NULL,
IMG TEXT NOT NULL,
URL CHAR(200),
CREATEDATE TIMESTAMP default (datetime('now', 'localtime')));'''.format(self.tablename))
def insert(self, data):
with get_db() as db:
_exists = self.get_id(data[0])
if len(_exists) > 0:
return
else:
db.execute("INSERT INTO {0} (TITLE,CONTENT,URL,IMG) \
VALUES {1}".format(self.tablename, data))
def select(self):
with get_db() as db:
result = db.execute("SELECT * from {0} ORDER BY CREATEDATE DESC".format(self.tablename)).fetchall()
_r = []
for row in result:
_r.append({
'id': row[0],
'title': row[1],
'content': row[2],
'img': row[3],
'url': row[4],
'create_date': row[5]
})
return _r
def update(self, id, data):
with get_db() as db:
db.execute("UPDATE {0} SET CONTENT = '{2}' WHERE ID={1}".format(self.tablename, id, data))
def set_img(self, id, data):
with get_db() as db:
db.execute("UPDATE {0} SET IMG = '{2}' WHERE ID={1}".format(self.tablename, id, data))
def delete(self):
with get_db() as db:
db.execute("DELETE FROM {0};".format(self.tablename))
def get_id(self, title):
with get_db() as db:
result = db.execute("SELECT id from {0} where TITLE = '{1}'".format(self.tablename,title)).fetchall()
_r = []
for row in result:
_r.append({
'id': row[0]
})
return _r
class BaiduModal(BaseModel):
def __init__(self):
BaseModel.__init__(self, tablename='Baidu')
class ZhihuModal(BaseModel):
def __init__(self):
BaseModel.__init__(self, tablename='Zhihu')
class WeixinModal(BaseModel):
def __init__(self):
BaseModel.__init__(self, tablename='Weixin')
if __name__ == '__main__':
model = BaseModel()
model.create() | fdfinger/swipe-edit-for-wx-xiuxianrebang | model.py | model.py | py | 2,598 | python | en | code | 0 | github-code | 90 |
4110532181 | import fileinput
import math
def main():
testFileProvided = True; # Change this to False to test on the same train data
trainDataFile = 'traindata.txt'
trainLabelFile = 'trainlabels.txt'
testDataFile = 'testdata.txt'
testLabelFile = 'testlabels.txt'
######################################
# train file processing
######################################
# Read training data and store them
trainData = []
with open( trainDataFile ) as f:
trainData = f.read().splitlines()
# Read training label and store them
trainLabel = []
with open( trainLabelFile ) as f:
trainLabel = f.read().splitlines()
if len(trainLabel) != len (trainData ):
print ( 'The train label and train data should have the same length' )
exit()
######################################
# test file processing
######################################
if testFileProvided:
# Read test data and store them
testData = []
with open( testDataFile ) as f:
testData = f.read().splitlines()
# Read test label and store them
testLabel = []
with open( testLabelFile ) as f:
testLabel = f.read().splitlines()
if len(testLabel) != len (testData ):
print ( 'The test label and test data should have the same length' )
exit()
else:
testData = trainData
testLabel = trainLabel
######################################
# Split keywords in each entry and store their count
######################################
yesTable = {}
noTable = {}
numOfYesWords = 0
numOfNoWords = 0
yesCount = 0
noCount = 0
numOfTerms = 0
for i in range( len(trainLabel) ):
words = trainData[i].split()
if trainLabel[i] == '1' :
yesCount +=1
for word in words:
numOfYesWords +=1
if word in yesTable:
yesTable[word] +=1
else:
yesTable[word] = 1
if word not in noTable:
numOfTerms +=1
elif trainLabel[i] == '0' :
noCount +=1
for word in words:
numOfNoWords +=1
if word in noTable:
noTable[word] +=1
else:
noTable[word] = 1
if word not in yesTable:
numOfTerms +=1
#####################################################
# Start testing
####################################################
successCount = 0;
failCount = 0;
testCasesCount = 0
for testString in testData:
######################################
# Split keywords and store them in test table
######################################
testcase = testString.split()
testTable = {}
for word in testcase:
if word in testTable:
testTable[word] +=1
else:
testTable[word] = 1
#print ( testTable )
######################################
# processing calculation
######################################
yesPercentage = float (yesCount) / (yesCount + noCount)
noPercentage = float (noCount) / (yesCount + noCount)
yesNumerator = float ( numOfYesWords + numOfTerms )
noNumerator = float ( numOfNoWords + numOfTerms )
for key in testTable:
if key in yesTable:
rep = yesTable[key]
else:
rep = 0
yesVal = (rep+1) / yesNumerator
yesPercentage *= math.pow( yesVal, testTable[key] )
for key in testTable:
if key in noTable:
rep = noTable[key]
else:
rep = 0
noVal = (rep+1) / noNumerator
noPercentage *= math.pow( noVal, testTable[key] )
if yesPercentage > noPercentage:
ans = '1'
else:
ans = '0'
if ans == testLabel[testCasesCount]:
successCount +=1
else:
failCount +=1
testCasesCount+=1
print ( "Success count: %d"% successCount )
print ( "Fail count: %d"% failCount )
percentage = successCount*100.0/testCasesCount
print ( "Success Rate: %f" % percentage )
main() | WanjinYoo/Data-Mining | Linear classification/q4/main.py | main.py | py | 3,626 | python | en | code | 0 | github-code | 90 |
417132665 | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.common.keys import Keys
import os
driver = webdriver.Chrome()
driver.get('http://www.naver.com')
element=driver.find_element(By.ID,'query')
element.send_keys(Keys.RETURN)
driver.find_element(By.ID,'gnb_login_button').click()
uid='nave'
upw='vaerrr3231313'
input_js ='document.getElementById("id").value="{id}";document.getElementById("pw").value="{pw}";'.format(id=uid,pw=upw)
#js코드로
time.sleep(1)
driver.excute_script(input_js)
time.sleep(1)
driver.find_element(By.ID,"log.login").click()
time.sleep(1)
os.system("pause") | SoftBankCorp/AI_semicon | new.py | new.py | py | 644 | python | en | code | 0 | github-code | 90 |
8312554439 | import socket
import cv2
import struct
import numpy as np
import os
# Set up the socket for communication
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Create a socket object with AF_INET (IPv4) address family and SOCK_STREAM (TCP) type
# In the case of a webcam server, where real-time video streaming is involved, using UDP (User Datagram Protocol) could be an alternative.
# UDP is a connectionless and unreliable protocol that does not guarantee data delivery or order.
# It is often used in situations where real-time data transmission is more important than reliability,
# such as video streaming or online gaming. UDP can be faster than TCP due to its lower overhead, but it may
# result in packet loss or out-of-order delivery.
# server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_ip = 'X' # Server IP address
server_port = 2099 # Server port number
# Connect to the server
client_socket.connect((server_ip, server_port))
# Function to receive and display framesq
def receive_frame():
screenshot_dir = 'screenshots'
# Set the directory name for storing screenshots.
screenshot_counter = 1
# Initialize the screenshot counter
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
# If the screenshot directory doesn't exist, create it.
# os.path.exists() checks if the specified directory path exists.
# os.makedirs() creates the directory if it doesn't exist.
while True:
try:
# Receive frame size
size_bytes = client_socket.recv(4)
# Receive 4 bytes (32 bits) of data from the client socket.
# client_socket.recv(4) receives data from the socket.
# The received data is stored in the size_bytes variable.
if not size_bytes:
break
# If size_bytes is empty (no data received), break the loop.
frame_size = struct.unpack('>I', size_bytes)[0]
# Unpack the received binary data (size_bytes) using the '>I' format.
# struct.unpack() interprets the packed binary data according to the specified format.
# '>I' represents a big-endian unsigned integer of size 4 bytes (32 bits).
# [0] is used to access the first element of the unpacked data, which represents the frame size.
frame_data = b''
# Initialize an empty byte string to store the received frame data.
while len(frame_data) < frame_size:
data = client_socket.recv(frame_size - len(frame_data))
# Receive the remaining frame data by subtracting the length of the received frame data from the expected frame size.
# The received data is stored in the data variable.
if not data:
break
# If no data is received, break the loop.
frame_data += data
# Append the received data to the frame_data byte string.
if len(frame_data) == 0:
break
# If the received frame data is empty, break the loop.
frame_np = np.frombuffer(frame_data, dtype=np.uint8)
# Convert the frame data byte string to a NumPy array of type uint8.
# np.frombuffer() creates a NumPy array from the provided buffer object (frame_data).
# dtype=np.uint8 specifies that the array elements should be of unsigned 8-bit integer type.
frame = cv2.imdecode(frame_np, cv2.IMREAD_COLOR)
# Decode the NumPy array (frame_np) as an image using OpenCV's imdecode() function.
# cv2.imdecode() reads the image data from a buffer and returns it as an image.
# The second argument, cv2.IMREAD_COLOR, indicates that the image should be loaded in color format.
if frame is not None:
cv2.imshow('Video', frame)
# Display the received frame in a window titled 'Video'.
# cv2.imshow() is used to display an image in a window.
# The 'Video' window shows the frame if it is not None.
# Check if the key 'p' is pressed
if cv2.waitKey(1) & 0xFF == ord('p'):
# Generate the screenshot file name with an incremented counter
screenshot_name = os.path.join(screenshot_dir, f'screenshot{str(screenshot_counter)}.png')
# Save the current frame as a screenshot
cv2.imwrite(screenshot_name, frame)
# Print a message indicating the successful saving of the screenshot
print("Screenshot saved:", screenshot_name)
# Increment the screenshot counter for the next screenshot
screenshot_counter += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# If the 'q' key is pressed, break the loop and exit the program.
# cv2.waitKey(1) waits for a key event for 1 millisecond and returns the key value.
# If the returned key value is equal to the ASCII value of 'q' (ord('q')), break the loop.
except Exception as e:
print("Error receiving frame:", str(e))
break
# If an exception occurs during the frame receiving process, print the error message and break the loop.
client_socket.close()
# Close the client socket connection.
# client_socket.close() is used to terminate the socket connection.
cv2.destroyAllWindows()
# Close any active OpenCV windows.
# cv2.destroyAllWindows() destroys all the created windows.
# Call the function to start receiving and displaying frames
receive_frame()
| Keilwerth11270/pi-webcam | webcam_client.py | webcam_client.py | py | 5,765 | python | en | code | 0 | github-code | 90 |
22364845286 | n, s = input().split()
n, s = int(n), int(s)
bidders = []
for i in range(n):
t, b = input().split()
bidders.append((int(b), t))
bidders.sort(reverse=True)
ans = []
for i in range(n):
if bidders[i][0] <= s:
ans.append(bidders[i][1])
s -= bidders[i][0]
if s > 0:
ans = []
print(len(ans))
for name in ans:
print(name)
| leonardoAnjos16/Competitive-Programming | Other/intergalactic_bidding.py | intergalactic_bidding.py | py | 356 | python | en | code | 4 | github-code | 90 |
29006350373 | from cvxpy.expressions.variable import Variable
from cvxpy.problems.objective import Minimize
from cvxpy.reductions.matrix_stuffing import extract_mip_idx, MatrixStuffing
from cvxpy.reductions.cvx_attr2constr import convex_attributes
from cvxpy.reductions.utilities import are_args_affine
class ConeMatrixStuffing(MatrixStuffing):
"""Construct matrices for linear cone problems.
Linear cone problems are assumed to have a linear objective and cone
constraints which may have zero or more arguments, all of which must be
affine.
minimize c'x
subject to cone_constr1(A_1*x + b_1, ...)
...
cone_constrK(A_i*x + b_i, ...)
"""
def accepts(self, problem):
return (type(problem.objective) == Minimize
and problem.objective.expr.is_affine()
and not convex_attributes(problem.variables())
and are_args_affine(problem.constraints))
def stuffed_objective(self, problem, extractor):
# Extract to c.T * x + r
C, R = extractor.affine(problem.objective.expr)
c = C.toarray().flatten()
boolean, integer = extract_mip_idx(problem.variables())
x = Variable(extractor.N, boolean=boolean, integer=integer)
new_obj = c.T * x + 0
return new_obj, x, R[0]
| johnjaniczek/SFCLS | venv/lib/python3.5/site-packages/cvxpy/reductions/dcp2cone/cone_matrix_stuffing.py | cone_matrix_stuffing.py | py | 1,321 | python | en | code | 12 | github-code | 90 |
72020121258 |
from pandas.core.frame import DataFrame
# Pandas将列表(List)转换为数据框(Dataframe)
a=[[1,2,3,4],[5,6,7,8]] #包含两个不同的子列表[1,2,3,4]和[5,6,7,8]
data=DataFrame(a) #这时候是以行为标准写入的
print(data)
a=[1,2,3,4] #列表a
b=[5,6,7,8] #列表b
c={"a" : a, "b" : b} #将列表a,b转换成字典
data=DataFrame(c) #将字典转换成为数据框
print(data)
| todaygood/note-python | practise/dataFrame1.py | dataFrame1.py | py | 444 | python | zh | code | 0 | github-code | 90 |
7545624681 | from keras import layers
from keras import models
import rssi_data as train_data
import read_test_data as test_data
import tensorflow as tf
import numpy as np
import pandas as pd
def one_hot_conversion(a,b):
m = np.zeros(3)
n = np.zeros(5)
m[int(float(a))] = 1
n[int(float(b))] = 1
tmp = np.append(m,n)
return tmp
fpath1 = "trainingData2.csv"
fpath2 = "validationData2.csv"
def read_data(fpath):
train_df = pd.read_csv(fpath, header=0)
# print(fpath + "finished reading. ")
xl_length = len(train_df)
# training data
x = []
# lables
y_ = []
# dictionary: {building_floor:position}
pos = {}
idx = np.arange(xl_length)
np.random.shuffle(idx)
rows = np.asarray(train_df.iloc[:,:]).astype(float)
sub_rows = np.asarray(train_df.iloc[:,0:520]).astype(float)
# print("data finished processing - stage 1.")
for i in idx:
row = rows[i]
label = one_hot_conversion(row[523],row[522])
y_.append(label)
row = (sub_rows[i] + 110) * 255 / 110
ax = [np.zeros(9)]
sub_x = np.append(row, ax)
# print("sub_x medium shape: ", sub_x.shape)
sub_x = sub_x.reshape(23,23,1)
# print("sub_x modified shape: ", sub_x.shape)
x.append(sub_x)
# print("data finished processing - stage 2.")
x = np.array(x)
y_ = np.array(y_)
return x, y_
x, y = read_data(fpath1)
train_val_split = int(0.9 * len(x)) # mask index array
# train
x_train = np.array(x[:train_val_split])
y_train = np.array(y[:train_val_split])
# validation
x_val = np.array(x[train_val_split:])
y_val = np.array(y[train_val_split:])
# test
x_test, y_test = read_data(fpath2)
x_test = np.array(x_test)
y_test = np.array(y_test)
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(23,23,1)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(8, activation='sigmoid'))
model.summary()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(x, y, epochs=20,batch_size=64)
test_loss, test_acc = model.evaluate(x_test,y_test)
print('Test accuracy:', test_acc)
predictions = model.predict(x_test)
b = 0
b_f = 0
for i in range(1,len(x_test)):
sub1 = np.argmax(predictions[-i][:3])
sub2 = np.argmax(predictions[-i][3:8])
sub4 = np.argmax(y_test[-i][:3])
sub5 = np.argmax(y_test[-i][3:8])
if (sub1 == sub4) and (sub2 == sub5):
b_f += 1
b += 1
else:
if (sub1 == sub4):
b += 1
s = len(x_test)
a1 = 100 * b / s
a2 = 100 * b_f / s
print("For ",s," test data:")
print("building accuracy: ", a1,"%")
print("building + floor prediction accuracy: ", a2,"%")
| dabaitudiu/ML_in_Wi-Fi_positioning | Stage5/CNN_BF.py | CNN_BF.py | py | 3,153 | python | en | code | 3 | github-code | 90 |
86585971402 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from timeit import default_timer as timer
"""
RUN AT YOUR OWN RISK, THIS IS MULTITHREADED BENCHMARKING DONE WITH
THE CODE FROM THE LAST PROBLEM. IT WILL USE ALL CPU RESOURCES AND CAN
UPSET ANTIVIRUS PROGRAMS, (THOUGH I MANAGED TO MAKE THAT ISSUE GO AWAY FOR ME)
IF YOU WANT TO RUN IT, I SUGGEST THE FOLLOWING SETTINGS AT THE BOTTOM:
beg_scale = .5
end_scale = 1
num_point = 5
IN THE BOTTOM IF-STATEMENT. THESE ARE NOT THE DEFAULT VALUES.
"""
def corona(N_scale):
start = timer()
#* Initialize parameters (system size, grid spacing, etc.)
method = 3
rN = int(61*N_scale)
pN = int(91*N_scale)
animate=0
graph =0
n = 3
rL = 2 # System size (length)
pL = 2*np.pi
rh = rL/(rN-1) # Grid spacing
ph = pL/(pN-1)
r = np.arange(0,rN)*rh+1 # r coordinate
phi = np.arange(0,pN)*ph # phi coordinate
x = np.array([i*np.cos(j) for i in r for j in phi])
y = np.array([i*np.sin(j) for i in r for j in phi])
R,P = np.meshgrid(r,phi) # for plotting, note the reversal in x and y
X, Y = R*np.cos(P), R*np.sin(P)
plot_interval = 50 # interval to plot animation, setting it smaller slows the program down alot
#* Select over-relaxation factor (SOR only)
if( method == 3 ):
rad = .5*(np.cos(np.pi/rN) + np.cos(np.pi/pN))
omegaOpt = 2/(1+np.sqrt(1-rad**2)) # Theoretical optimum
#print('Theoretical optimum omega = ',omegaOpt)
omega = omegaOpt#float(input('Enter desired omega: '))
#* Set initial guess as first term in separation of variables soln.
A0 = 1 # Potential at r=1
# phi = phi0 * 4/(np.pi*np.sinh(np.pi)) * np.outer(np.sin(np.pi*x/L),np.sinh(np.pi*y/L))
A=np.zeros((rN,pN)) # try this to see it evolve better
#* Set boundary conditions
# first index is the radius and second index is phi (rows,cols)
for i in range(pN): # Apply inner boundary conditions
A[0,i] = np.cos(n*phi[i])
A[-1,:] = A[-2,:] # Apply outer boundary condition
A[:,0] = A[:,-1] # Periodic Boundary conditions about phi = 0
#print('Potential is zero on all other boundaries')
#plt.ion()
#* Loop until desired fractional change per iteration is obtained
# start_time=cputime # Reset the cputime counter
newphi = np.copy(phi) # Copy of the solution (used only by Jacobi)
iterMax = pN**2 # Set max to avoid excessively long runs
changeDesired = 1.0e-4 # Stop when the change is given fraction
#print('Desired fractional change = ',changeDesired)
change = np.array([])
for iterk in range(0,iterMax):
changeSum = 0.0
## SOR method ##
for i in range(1,rN-1): # Loop over interior points only
for j in range(1,pN-1):
newA = 0.25*omega*(A[i+1,j]+A[i-1,j]+ A[i,j-1]+A[i,j+1]) + (1-omega)*A[i,j]
changeSum = changeSum + abs(1-A[i,j]/newA)
A[i,j] = newA
# Update boundary conditions
for i in range(pN): # Apply inner boundary conditions
A[0,i] = np.cos(n*phi[i])
A[-1,:] = A[-2,:] # Apply outer boundary condition
A[:,0] = A[:,-1] # Periodic Boundary conditions about phi = 0
#* Check if fractional change is small enough to halt the iteration
change = np.append(change,changeSum/(pN-2)**2)
#if( iterk%10 < 1 ):
#print('After %d iterations, fractional change = %f'%( iter,change[-1]))
if( change[-1] < changeDesired ):
#print('Desired accuracy achieved after %d iterations'%iter)
#print('Breaking out of main loop')
break
# animate
if(animate ==1 and iterk%plot_interval<1):
fig = plt.figure(2) # Clear figure 2 window and bring forward
plt.clf()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, A.T, rstride=1, cstride=1, cmap=cm.jet,linewidth=0, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('potential after '+str(iterk)+' iterations')
plt.draw()
plt.show()
plt.pause(0.1)
# total_time = cputime - start_time # get the total cpu time
#* Plot final estimate of potential as contour and surface plots
#plt.ioff()
if graph ==1:
plt.figure(1);plt.clf()
contourLevels = np.arange(0,1,0.1) #
plt.contour(X,Y,A.T,contourLevels) # Contour plot
# clabel(cs,contourLabels) # Add labels to selected contour levels
plt.xlabel('x')
plt.ylabel('y')
plt.title(r'$\Phi(x,y)$')
fig = plt.figure(2) # Clear figure 2 window and bring forward
plt.clf()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, A.T, rstride=1, cstride=1, cmap=cm.jet,linewidth=0, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('potential after '+str(iterk)+' iterations')
#* Plot the fractional change versus iteration
plt.figure(3);
plt.clf()
plt.semilogy(change)
plt.xlabel('Iteration')
plt.ylabel('Fractional change')
if method==1:
title=' Jacobi method'
elif(method==2):
title =' Gauss-Seidel method'
elif(method==3):
title=' SOR method, $\Omega$ ='+str(omega)
plt.title(r'Iterations ='+str(iterk)+title+' pN='+str(pN))
plt.grid(True)
fig = plt.figure(4) # Clear figure 2 window and bring forward
plt.clf()
ax = fig.gca(projection='3d')
A_analytic = np.zeros((rN,pN))
for i in range(rN):
for j in range(pN):
A_analytic[i,j] = ( ( (3**(2*n)/r[i]**2) + r[i]**n)/(3**(2*n) ) )*np.cos(n*phi[j])
surf = ax.plot_surface(X, Y, A_analytic.T, rstride=1, cstride=1, cmap=cm.jet,linewidth=0, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('potential from analytic soln')
plt.show()
stop = timer()
time = stop-start
return (iterk,time)
"""
Lets benchmark convergence speed for a bunch of different grid sizes
"""
if __name__ == "__main__":
import multiprocessing
beg_scale = .5
end_scale = 6
num_point = 500
times = [] # Append tuple of (scale, time, interations)
scales = np.linspace(beg_scale,end_scale,num_point)
# Start parallel pool
p = multiprocessing.Pool()
result = p.map(corona,scales)
p.close()
p.join()
for i in range(len(result)):
times.append( (scales[i], result[i][1], result[i][0]) )
import pandas as pd
df = pd.DataFrame(times)
df.columns = ["Scale","Time","Iterations"]
df.to_csv("outputfile.csv",index=False)
| akswart/phys416code | hw8/code/hw8_exC_parallel_step_opt.py | hw8_exC_parallel_step_opt.py | py | 7,084 | python | en | code | 0 | github-code | 90 |
74761151657 | """Desafio 053 - Detector de palíndromo (Aula 01 a 13):
Crie um programa que leia uma frase qualquer e diga se ela é um palíndromo, desconsiderando os espaços."""
# Ler uma frase
reverso = '' # Variável para armazenar a versão invertida da frase
compara = '' # Variável para armazenar a versão original da frase
frase = str(input('Digite a frase: ')).strip().upper() # Lê a frase, remove espaços em branco e converte para letras maiúsculas
print(frase) # Imprime a frase original
# Loop para inverter a frase
for letra in frase:
reverso = letra + reverso
compara = compara + letra
# Verifica se a versão invertida é igual à versão original
if reverso == compara:
print(f'{frase} ao contrário é {reverso}:\nÉ UM PALÍNDROMO!')
else:
print(f'{frase} ao contrário é {reverso}:\nNÃO É UM PALÍNDROMO!')
# USANDO FATIAMENTO SEM FOR frase[::-1] | dualsgo/meus-estudos | cursoemvideo_python/setembro/exercicios/ex_053.py | ex_053.py | py | 884 | python | pt | code | 0 | github-code | 90 |
36880685153 | # Local copy of sacad. Downloaded from: https://github.com/desbma/sacad
import sacad
import asyncio
from os import path
from utils import slugify
# Finds and downloads album art
def downloadAlbumArt(album: str, artist: str, is_single: bool = False):
try:
if is_single:
fileName = f'{slugify(artist)} - {slugify(album)} - Single'
else:
fileName = f'{slugify(artist)} - {slugify(album)}'
success = asyncio.get_event_loop().run_until_complete(sacad.search_and_download(album, # Album name
artist, # Artist name
"png", # File format, or None if you don't care
1024, # Preferred album size
path.expanduser(f'~/Music/Albums/{fileName}.png'), # Output path
size_tolerance_prct=25,
is_single=is_single))
return success
except Exception as e:
print(f'ERROR! findAlbumArt.py - downloadAlbumArt. Passed in album: {album}, artist: {artist}. Error: {e}')
return False
| MattHalloran/MusicFinder | findAlbumArt.py | findAlbumArt.py | py | 1,134 | python | en | code | 0 | github-code | 90 |
18539283459 | import sys
h,w=map(int,input().split())
l=[]
for i in range(h):
s=[x for x in input()]
l.append(s)
for i in range(w):
im=max(i-1,0)
ip=min(i+1,w-1)
for j in range(h):
jm=max(j-1,0)
jp=min(j+1,h-1)
if l[j][i]=="#":
if l[jm][i]=="#" or l[jp][i]=="#" or l[j][ip]=="#" or l[j][im]=="#":
continue
else:
print("No")
sys.exit()
else:
continue
print("Yes") | Aasthaengg/IBMdataset | Python_codes/p03361/s704666823.py | s704666823.py | py | 426 | python | en | code | 0 | github-code | 90 |
7016401996 | from . import db
class News(db.Model):
__tablename__="news"
title=db.Column(db.String(20),primary_key=True,unique=True,nullable=False,doc="标题")
text=db.Column(db.String(20),nullable=True,doc="内容")
time=db.Column(db.String(20),nullable=True,doc='时间')
author = db.Column(db.String(20), nullable=True, doc='发布人')
def __init__(self,title,text,time,author):
self.title=title
self.text=text
self.time=time
self.author=author
def save(self):
db.session.add(self)
db.session.commit()
return self
def update(self):
db.session.commit()
return self
| NEPU1960/yjs | main/model/News.py | News.py | py | 663 | python | en | code | 0 | github-code | 90 |
7407735126 | import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from textblob import Word
import re
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv(r'E:\Study\PROJECT\emotion.data')
# Dropping rows with other emotion labels
#data = data.drop(data[data.emotions == 'anger'].index)
#data = data.drop(data[data.emotions == 'fear'].index)
#data = data.drop(data[data.emotions == 'love'].index)
#data = data.drop(data[data.emotions == 'surprise'].index)
# Making all letters lowercase
data['text'] = data['text'].apply(lambda x: " ".join(x.lower() for x in x.split()))
# Removing Punctuation, Symbols
data['text'] = data['text'].str.replace('[^\w\s]',' ')
# Removing Stop Words using NLTK
stop = stopwords.words('english')
data['text'] = data['text'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
#Lemmatisation
data['text'] = data['text'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
#Correcting Letter Repetitions
def de_repeat(text):
pattern = re.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1", text)
data['text'] = data['text'].apply(lambda x: " ".join(de_repeat(x) for x in x.split()))
#Encoding output labels
lbl_enc = preprocessing.LabelEncoder()
y = lbl_enc.fit_transform(data.emotions.values)
# Splitting into training and testing data in 90:10 ratio
X_train, X_val, y_train, y_val = train_test_split(data.text.values, y, stratify=y, random_state=42, test_size=0.1, shuffle=True)
# Extracting TF-IDF parameters
tfidf = TfidfVectorizer(max_features=1000, analyzer='word',ngram_range=(1,3))
X_train_tfidf = tfidf.fit_transform(X_train)
X_val_tfidf = tfidf.fit_transform(X_val)
# Extracting Count Vectors Parameters
count_vect = CountVectorizer(analyzer='word')
count_vect.fit(data['text'])
X_train_count = count_vect.transform(X_train)
X_val_count = count_vect.transform(X_val)
# Model 1: Multinomial Naive Bayes Classifier
nb = MultinomialNB()
nb.fit(X_train_tfidf, y_train)
y_pred = nb.predict(X_val_tfidf)
print('naive bayes tfidf accuracy %s' % accuracy_score(y_pred, y_val))
# Model 2: Linear SVM
lsvm = SGDClassifier(alpha=0.001, random_state=5, max_iter=15, tol=None)
lsvm.fit(X_train_tfidf, y_train)
y_pred = lsvm.predict(X_val_tfidf)
print('svm using tfidf accuracy %s' % accuracy_score(y_pred, y_val))
# Model 3: logistic regression
logreg = LogisticRegression(C=1)
logreg.fit(X_train_tfidf, y_train)
y_pred = logreg.predict(X_val_tfidf)
print('log reg tfidf accuracy %s' % accuracy_score(y_pred, y_val))
# Model 4: Random Forest Classifier
rf = RandomForestClassifier(n_estimators=500)
rf.fit(X_train_tfidf, y_train)
y_pred = rf.predict(X_val_tfidf)
print('random forest tfidf accuracy %s' % accuracy_score(y_pred, y_val))
## Building models using count vectors feature
# Model 1: Multinomial Naive Bayes Classifier
nb = MultinomialNB()
nb.fit(X_train_count, y_train)
y_pred = nb.predict(X_val_count)
print('naive bayes count vectors accuracy %s' % accuracy_score(y_pred, y_val))
# Model 2: Linear SVM
lsvm = SGDClassifier(alpha=0.001, random_state=5, max_iter=15, tol=None)
lsvm.fit(X_train_count, y_train)
y_pred = lsvm.predict(X_val_count)
print('lsvm using count vectors accuracy %s' % accuracy_score(y_pred, y_val))
# Model 3: Logistic Regression
logreg = LogisticRegression(C=1)
logreg.fit(X_train_count, y_train)
y_pred = logreg.predict(X_val_count)
print('log reg count vectors accuracy %s' % accuracy_score(y_pred, y_val))
# Model 4: Random Forest Classifier
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train_count, y_train)
y_pred = rf.predict(X_val_count)
print('random forest with count vectors accuracy %s' % accuracy_score(y_pred, y_val))
| Souvikdebroy/NLP | EmotionDetection.py | EmotionDetection.py | py | 4,223 | python | en | code | 0 | github-code | 90 |
35220237081 | from Constants import*
class Snake:
def __init__(self):
self.body = [(4, 3), (4, 2), (4, 1)]
self.direction = (1, 0)
def move(self, grow):
new_head = (self.body[0][0] + self.direction[0], self.body[0][1] + self.direction[1])
self.body.insert(0, new_head)
if not grow:
self.body.pop()
def change_direction(self, new_direction):
if (new_direction[0] * -1, new_direction[1] * -1) != self.direction:
self.direction = new_direction
def check_collision(self):
if (
self.body[0][0] < 0
or self.body[0][0] >= GRID_WIDTH
or self.body[0][1] < 0
or self.body[0][1] >= GRID_HEIGHT
or self.body[0] in self.body[1:]
):
return True
return False
def check_food_collision(self, food):
if self.body[0] == food:
return True
return False | sawsbuck/Python-Fun | TheSnakeGame/Snake.py | Snake.py | py | 939 | python | en | code | 0 | github-code | 90 |
30610125838 | """added ready_for_planning column in Request
Revision ID: bd7caeb72cc8
Revises: 4d9e9849b0e3
Create Date: 2021-10-15 17:44:24.038430
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd7caeb72cc8'
down_revision = '4d9e9849b0e3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('request', sa.Column('ready_for_planning', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('request', 'ready_for_planning')
# ### end Alembic commands ###
| aykhazanchi/id2207-mmse | migrations/versions/bd7caeb72cc8_added_ready_for_planning_column_in_.py | bd7caeb72cc8_added_ready_for_planning_column_in_.py | py | 707 | python | en | code | 0 | github-code | 90 |
7701986155 | # HTTP Package
# https://www.googleapis.com/books/v1/volumes?q=isbn:1101904224
import urllib.request
import json
import textwrap
# using google books API, we are pulling a GET request
with urllib.request.urlopen("https://www.googleapis.com/books/v1/volumes?q=isbn:1101904224") as f: # if this runs we create it as a var f
text = f.read()
decodedtext = text.decode('utf-8') # this is decoded so we can print it out
print(textwrap.fill(decodedtext, width=50)) # some formatting
print() # new line
# this API returns a JSON object, Going to the link we can see this
obj = json.loads(decodedtext)
# if we look at the JSON return info we can see a Key, 'kind'
print(obj['kind'])
# getting some diff info
print(obj['items'][0]['searchInfo']['textSnippet']) # 0 is there for the list accessing the List[0], first item | Coryf65/Python_LearningPath | 6 TheStandardLibrary/Ch04/04_07/04_07_Finish.py | 04_07_Finish.py | py | 828 | python | en | code | 0 | github-code | 90 |
28051688358 | from typing import Tuple, List, Set
from main_package.fieldEntities.ant import Ant
from main_package.field import *
from main_package.fieldEntities.base import Base
from main_package.fieldEntities.food import Food
from main_package.interfaces.attackable import Attackable
logging.basicConfig(level=logging.INFO)
class gameBoard:
log = logging.getLogger(__name__)
validForAttack = [FieldTypeEnum.ANT, FieldTypeEnum.BASE]
def __init__(self, xdim: int = 10, ydim: int = 10):
"""
Initializes an empty board of the given dimensions
:param xdim: x dimension (exclusive)
:param ydim: y dimension (exclusive)
"""
self.xdim = xdim
self.ydim = ydim
self.gameBoard = [[Field(xpos=x, ypos=y) for x in range(xdim)] for y in range(ydim)]
self.ants: dict[str, Ant] = {}
self.playerBases = {}
self.players = []
def getVisibleFields(self, playerName) -> List[Field] or None:
if playerName not in self.players:
self.log.error("Player {} is not a valid player".format(playerName))
return None
visibleFields: List[Field] = []
for antId in self.getAntIdsOfPlayer(playerName):
ant = self.getAnt(antId)
visibleFields += self.getNeighbouringFields(ant.fieldPosition)
visibleFields.append(ant.fieldPosition)
base: Base = self.getBase(playerName)
visibleFields += self.getNeighbouringFields(base.fieldPosition)
visibleFields.append(base.fieldPosition)
return list(set(visibleFields)) # eliminate duplicates
def getPlayerGameState(self, playerName) -> dict:
gameState = {"visibleFields": []}
for field in self.getVisibleFields(playerName):
fieldData = {"position": field.getPos(),
"type": field.type.name,
"occupyingEntityId": field.entity.getId() if field.entity is not None else ""}
gameState["visibleFields"].append(fieldData)
gameState["visibleEntities"] = {"ants": {}, "food": {}, "bases": {}}
for entity in self.getVisibleEntities(playerName):
if entity.getEntityType() == EntityType.BASE:
baseEntity: Base = entity
gameState["visibleEntities"]["bases"][baseEntity.getId()] = {
"owner": baseEntity.getOwner(),
"health": baseEntity.getRemainingHealth()
}
if entity.getEntityType() == EntityType.FOOD:
foodEntity: Food = entity
gameState["visibleEntities"]["food"][foodEntity.getId()] = {
"quantity": foodEntity.getRemainingFoodQuantity()
}
if entity.getEntityType() == EntityType.ANT:
antEntity: Ant = entity
# if the player owns the ant provide extra information
detailedInfo = {}
if antEntity.getOwner() == playerName:
detailedInfo = antEntity.getAntDetailedInfo()
gameState["visibleEntities"]["ants"][antEntity.getId()] = {
"owner": antEntity.getOwner(),
"health": antEntity.getRemainingHealth(),
"detailedInfo": detailedInfo
}
return gameState
def getVisibleEntities(self, playerName) -> List[Entity]:
visibleFields = self.getVisibleFields(playerName)
visibleEntities = []
for field in visibleFields:
if field.entity is not None:
visibleEntities.append(field.entity)
return visibleEntities
def createBase(self, xpos: int, ypos: int, player: str) -> bool:
# the base cannot be right at the board edge or outside the board
if 0 >= xpos or xpos >= self.xdim or 0 >= ypos or ypos >= self.ydim:
logging.error("Base cannot be placed outside board or at the board edge, field Dimensions {}, placement {}"
.format((self.xdim, self.ydim), (xpos, ypos)))
return False
# field where base is placed must be empty
field = self.getField(xpos, ypos)
if field.type != FieldTypeEnum.EMPTY:
logging.error("Base cannot be placed on field that is not empty. Field is {}".format(field.type))
return False
if player in self.playerBases.keys():
logging.error("Player " + player + " already has a base")
return False
# placing base
base = Base(player)
field.setEntity(base)
self.playerBases[player] = base
self.players.append(player)
self.log.info("base for player {} created at coordinates ({},{})".format(player,xpos,ypos))
return True
def getBoardString(self):
boardString = ""
for y in range(len(self.gameBoard)):
yRow = self.gameBoard[y]
for x in range(len(yRow)):
field = yRow[x]
boardString += " " + str(field) + " "
boardString += "\n"
return boardString
def getField(self, x: int, y: int):
if not 0 <= x < self.xdim or not 0 <= y < self.ydim:
self.log.error("Invalid board position {}".format((x, y)))
return None
return self.gameBoard[y][x]
def getNeighbouringFieldCoordinates(self, xpos, ypos) -> List[Tuple]:
neighbouringFieldCoords = []
for y_offset in [-1, 0, 1]:
for x_offset in [-1, 0, 1]:
if x_offset == y_offset == 0:
continue # ignoring centre field, we want neighbours
xcoord = xpos + x_offset
ycoord = ypos + y_offset
if 0 <= xcoord < self.xdim and 0 <= ycoord < self.ydim:
coords = (xcoord, ycoord)
neighbouringFieldCoords.append(coords)
return neighbouringFieldCoords
def getNeighbouringFields(self, field) -> List:
assert (isinstance(field, Field))
neighbours = []
for coords in self.getNeighbouringFieldCoordinates(field.xpos, field.ypos):
neighbour = self.getField(coords[0], coords[1])
if neighbour is not None:
neighbours.append(neighbour)
return neighbours
def getAntIdsOfPlayer(self, playerName: str) -> List[str] or None:
if playerName not in self.players:
self.log.error("Player {} is not a valid player".format(playerName))
return None
playersAnts = list(filter(lambda ant: ant.playerName == playerName, self.ants.values()))
return list(map(lambda ant: ant.getId(), playersAnts))
def createAnt(self, xpos: int, ypos: int, antId: str, player: str) -> bool:
# TODO player can only create ants in sections visible to them
# check if an ant with this id already exists
antId = str.lower(antId)
if self.getAnt(antId) is not None:
self.log.error("Ant with id {} already exists".format(antId))
return False
# check if in board
if not 0 <= xpos < self.xdim or not 0 <= ypos < self.ydim:
self.log.error("position " + (xpos, ypos) + " is outside of the board dimensions " + (self.xdim, self.ydim))
return False
# placement checks
placementDesitnation: Field = self.getField(xpos, ypos)
neighbouring_fields: List[Field] = self.getNeighbouringFields(placementDesitnation)
if not any(f.type == FieldTypeEnum.BASE for f in neighbouring_fields):
self.log.error("Invalid Placement, no adjacent base")
return False
elif placementDesitnation.type is not FieldTypeEnum.EMPTY:
self.log.error("Invalid Placement, field not empty")
return False
# check if player owns base near which they want to place ant
base: Base = next(filter(lambda x: x.type == FieldTypeEnum.BASE, neighbouring_fields)).entity
if base.player != player:
self.log.error("Player {} does not own the adjacent base".format(player))
return False
# set field to ant
self.ants[antId] = Ant(antId, player)
placementDesitnation.setEntity(self.ants[antId])
self.log.info("Ant with id {} created at position ({},{})"
.format(antId, placementDesitnation.xpos, placementDesitnation.ypos))
return True
def moveAnt(self, antId: str, xpos: int, ypos: int) -> bool:
antId = str.lower(antId)
# ensure that antId is valid
if self.getAnt(antId) is None:
return False
ant = self.ants[antId]
# determine valid fields for movement
fields = self.getNeighbouringFields(ant.fieldPosition)
validFields = filter(lambda x: x.type == FieldTypeEnum.EMPTY, fields)
# is movement valid ?
fieldToMoveTo: Field = None
for field in validFields:
if field.xpos == xpos and field.ypos == ypos:
fieldToMoveTo = field
break
currentx = ant.fieldPosition.xpos
currenty = ant.fieldPosition.ypos
if fieldToMoveTo is None:
self.log.error("Movement of antId={} ({},{})->({},{}) is not valid.".format(
ant.antId, currentx, currenty, xpos, ypos))
return False
# do move
ant.fieldPosition.resetToEmpty() # reset old field
fieldToMoveTo.setEntity(ant)
self.log.info("Ant antId={} moved ({},{})->({},{}) ".format(ant.antId, currentx, currenty, xpos, ypos))
return True
def attack(self, antId: str, xpos: int, ypos: int) -> bool:
antId = str.lower(antId)
# ensure that antId is valid
attackingAnt = self.getAnt(antId)
if attackingAnt is None:
return False
# ensure target is valid
neighbouringFields = self.getNeighbouringFields(attackingAnt.fieldPosition)
fieldToAttack = self.getField(xpos, ypos)
if fieldToAttack not in neighbouringFields:
self.log.error("The field {} is not in range of ant {}".format((xpos, ypos), antId))
return False
if fieldToAttack.type not in gameBoard.validForAttack:
self.log.error("The field {} is not a valid attack target for ant {}".format((xpos, ypos), antId))
return False
target: Attackable = fieldToAttack.entity # TODO: might need "attackable" interface later
attackingAnt.doAttack(target)
return True
def tick(self):
""" tick checks status of all ants ant takes required actions accordingly (e.g remove ants with health < 1)"""
antsToRemove: [str] = []
for (antId, entity) in self.ants.items():
if entity.isDead(): antsToRemove.append(antId)
for antId in antsToRemove:
deadAnt: Ant = self.ants[antId]
self.log.info("Ant {} Killed !".format(deadAnt.antId))
deadAnt.fieldPosition.resetToEmpty()
del self.ants[antId]
""" remove destroyed bases"""
basesToRemove: [str] = []
for (playerName, entity) in self.playerBases.items():
if entity.isDead(): basesToRemove.append(playerName)
for playerName in basesToRemove:
destroyedBase: Base = self.playerBases[playerName]
self.log.info("Base of player {} destroyed !".format(destroyedBase.player))
destroyedBase.fieldPosition.resetToEmpty()
del self.playerBases[playerName]
def getAnt(self, antId: str) -> Ant or None:
antId = str.lower(antId)
if antId not in self.ants.keys():
self.log.error("No ant with id: {}".format(antId))
return None
return self.ants[antId]
def getBase(self, playerName: str) -> Base or None:
if playerName not in self.playerBases.keys():
self.log.error("Player {} has no base".format(playerName))
return None
return self.playerBases[playerName]
def createFood(self, xpos: int, ypos: int, magnitude: int) -> bool:
targetField = self.getField(xpos, ypos)
if targetField is None or targetField.type is not FieldTypeEnum.EMPTY:
self.log.error("Invalid target ({},{}) for placing food.".format(xpos, ypos))
return False
if magnitude <= 0 or magnitude != magnitude: # test for negative or nan
self.log.error("Invalid food magnitude value {}".format(magnitude))
return False
foodEntity = food.Food(magnitude)
targetField.setEntity(foodEntity)
return True
def feed(self, antId: str, targetXpos: int, targetYpos: int) -> bool:
# checking if an ant with the given id exists
if self.getAnt(antId) is None:
self.log.error("No ant with Id {}".format(antId))
return False
feedingAnt: Ant = self.getAnt(antId)
targetField = self.getField(targetXpos, targetYpos)
# check if ant is next to food field
if targetField not in self.getNeighbouringFields(feedingAnt.fieldPosition):
self.log.error("Ant with id {} is not in range of targeted field ({},{})".format(antId, targetXpos, targetYpos))
return False
# check if field is food
if targetField.type != FieldTypeEnum.FOOD:
self.log.error("Ant with id {} tried to feed on a non food field ({},{})".format(antId,targetXpos,targetYpos))
return False
# ants have a food capacity and feeding speed value
foodEntity: Food = targetField.entity
return feedingAnt.feed(foodEntity)
| socialgorithm/hive | main_package/gameBoard.py | gameBoard.py | py | 13,692 | python | en | code | 0 | github-code | 90 |
15422794717 | import tkinter
from tkinter import *
from PIL import Image, ImageTk
root = Tk()
# Create a photoimage object of the image in the path
image1 = Image.open("../docs/logos-design/RacingInsights_Icon.png")
image1 = image1.resize((100, 100))
test = ImageTk.PhotoImage(image1)
label1 = tkinter.Label(image=test)
label1.image = test
# Position image
label1.place(relx=0.5,rely=0.5)
root.mainloop() | RacingInsights/RacingInsights-V1 | example_code/image_example.py | image_example.py | py | 393 | python | en | code | 0 | github-code | 90 |
27090045558 | from spack import *
class Doxygen(CMakePackage):
"""Doxygen is the de facto standard tool for generating documentation
from annotated C++ sources, but it also supports other popular programming
languages such as C, Objective-C, C#, PHP, Java, Python, IDL (Corba,
Microsoft, and UNO/OpenOffice flavors), Fortran, VHDL, Tcl, and to some
extent D.."""
homepage = "http://www.stack.nl/~dimitri/doxygen/"
url = "http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.10.src.tar.gz"
version('1.8.14', '41d8821133e8d8104280030553e2b42b')
version('1.8.12', '08e0f7850c4d22cb5188da226b209a96')
version('1.8.11', 'f4697a444feaed739cfa2f0644abc19b')
version('1.8.10', '79767ccd986f12a0f949015efb5f058f')
# graphviz appears to be a run-time optional dependency
variant('graphviz', default=False,
description='Build with dot command support from Graphviz.')
depends_on("cmake@2.8.12:", type='build')
depends_on("flex", type='build')
depends_on("bison", type='build')
# optional dependencies
depends_on("graphviz", when="+graphviz", type='run')
# Support C++14's std::shared_ptr. For details about this patch, see
# https://github.com/Sleepyowl/doxygen/commit/6c380ba91ae41c6d5c409a5163119318932ae2a3?diff=unified
# Also - https://github.com/doxygen/doxygen/pull/6588
patch('shared_ptr.patch', when='@1.8.14')
| matzke1/spack | var/spack/repos/builtin/packages/doxygen/package.py | package.py | py | 1,404 | python | en | code | 2 | github-code | 90 |
37396484740 | # -*- coding: utf-8 -*-
"""
@File name : SortingAlgorithms.py
@Date : 2020-02-09 16:45
@Description : Thanks to: https://blog.csdn.net/weixin_41190227/article/details/86600821
* Compare Sort:
Bubble Sort, Select Sort, Insertion Sort, Shell Sort, Merge Sort, Quick Sort, Heap Sort,
* Non-compare Sort:
Counting Sort, Bucket Sort, Raidx Sort
@Author : VickeeX
"""
def bubble_sort(arr):
"""
Time complexity:
avg: O(n^2); best: O(n); worst: O(n^2)
note: "best: O(n)" while use swapTag
Space complexity:
O(1)
"""
if len(arr) == 0:
return arr
for i in range(len(arr)):
tag = True
for j in range(len(arr) - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
tag = False
if tag:
break
return arr
def select_sort(arr, tag):
"""
Time complexity:
avg: O(n^2); best: O(n^2); worst: O(n^2)
Space complexity:
O(1)
表现稳定,适合数据规模较小的情况
"""
if len(arr) == 0:
return arr
def min_select_sort(arr):
for i in range(len(arr)):
min_idx = i
for j in range(i + 1, len(arr)):
if arr[min_idx] > arr[j]:
min_idx = j
arr[i], arr[min_idx] = arr[min_idx], arr[i]
return arr
def max_select_sort(arr):
for i in range(len(arr) - 1, -1, -1):
max_idx = i
for j in range(i):
if arr[max_idx] < arr[j]:
max_idx = j
arr[i], arr[max_idx] = arr[max_idx], arr[i]
return arr
if tag:
return min_select_sort(arr)
else:
return max_select_sort(arr)
def insertion_sort(arr):
"""
Time complexity:
avg: O(n^2); best: O(n); worst: O(n^2)
Space complexity:
O(1)
表现稳定,适合数据规模较小的情况
"""
if len(arr) == 0:
return arr
for i in range(len(arr) - 1):
tmp, j = arr[i + 1], i
while j >= 0 and tmp < arr[j]:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = tmp
return arr
def shell_sort(arr):
"""
Time complexity:
avg: O(nlgn); best: O(nlgn); worst: O(nlgn)
Space complexity:
O(1)
缩小增量的插入排序
"""
if len(arr) == 0:
return arr
gap = len(arr) // 2
while gap > 0:
for i in range(gap, len(arr)):
tmp, idx = arr[i], i - gap
while idx >= 0 and arr[idx] > tmp:
arr[idx + gap] = arr[idx]
idx -= gap
arr[idx + gap] = tmp
gap //= 2
return arr
def merge_sort(arr):
"""
Time complexity:
avg: O(nlgn); best: O(nlgn); worst: O(nlgn)
Space complexity:
O(n)
"""
if len(arr) < 2:
return arr
def merge(left, right):
tmp, l, r, li, ri = [], len(left), len(right), 0, 0
for idx in range(l + r):
if li >= l or (li < l and ri < r and left[li] > right[ri]):
tmp.append(right[ri])
ri += 1
else:
tmp.append(left[li])
li += 1
return tmp
mid = len(arr) // 2
left_arr = merge_sort(arr[:mid])
right_arr = merge_sort(arr[mid:])
return merge(left_arr, right_arr)
def quick_sort(arr):
"""
choose a pivot as base
sort list as two parts: smaller than pivot, bigger than pivot
recursive sort the left part and right part
Time complexity:
avg: O(nlgn); best: O(nlgn); worst: O(n^2)
Space complexity:
O(nlgn) or O(lgn) : O(1*lgn) here as O(1) for partition and O(lgn) for recursion times
"""
def recursion(ar, start, end):
if len(ar) == 0 or start < 0 or end >= len(ar) or start > end:
return []
idx = partition(ar, start, end)
if idx > start:
recursion(ar, start, idx - 1)
if idx < end:
recursion(ar, idx + 1, end)
return ar
def partition(ar, start, end):
from random import randint
pivot = randint(start, end)
idx = start - 1 # the last position of smaller part
ar[pivot], ar[end] = ar[end], ar[pivot]
for i in range(start, end + 1):
if ar[i] <= ar[end]: # use <= to put the pivot to the middle of partition
idx += 1
ar[i], ar[idx] = ar[idx], ar[i]
# # if use "ar[i] < ar[end]"
# idx += 1
# ar[idx], ar[end] = ar[end], ar[idx]
return idx
return recursion(arr, 0, len(arr) - 1)
def heap_sort(arr):
"""
build a standard max heap
each round: swap the max heap(first element) to expand the sorted part, and adjust the left part
Time complexity:
avg: O(nlgn); best: O(nlgn); worst: O(nlgn)
Space complexity:
O(lg1): in place
"""
if len(arr) < 2:
return arr
def adjust(ar, i, l):
# l is the total length of unsorted partition
idx = i
for j in (i * 2, i * 2 + 1): # if left or right child is bigger
if j < l and ar[j] > ar[idx]:
idx = j
if idx != i: # if any swap, adjust the sub tree to max heap
ar[idx], ar[i] = ar[i], ar[idx]
adjust(ar, idx, l)
# build max heap first by adjusting each sub tree as heap
for i in range(len(arr) // 2 - 1, -1, -1):
adjust(arr, i, len(arr))
# each round: move the heap to sorted right partition, and adjust left unsorted partition
for i in range(len(arr) - 1, 0, -1):
arr[0], arr[i] = arr[i], arr[0]
adjust(arr, 0, i)
return arr
def counting_sort(arr):
"""
find the min and max num of array
count the occurrences of i, store into the i'th place of tmp array
fill the original array according to counts
note: here use bias to reduce the extra space of tmp array
Time complexity:
avg: O(n+k); best: O(n+k); worst: O(n+k)
k = max_num - min_num while use bias
Space complexity:
O(k)
"""
if len(arr) < 2:
return arr
min_n = max_n = arr[0]
for n in arr: # find the min and max num
if n < min_n:
min_n = n
if n > max_n:
max_n = n
bias, counts = 0 - min_n, [0] * (max_n - min_n + 1) # use bias to reduce extra space
for n in arr:
counts[n + bias] += 1
idx = 0
for n, count in enumerate(counts):
if count != 0:
arr[idx:idx + count] = [n - bias] * count
idx += count
return arr
def bucket_sort(arr, bucket_size):
"""
upper level of counting sort
put nums to buckets according to nums' value mapping
use bucket sort recursively or other sort method to sort each bucket
factually, bucket_size is the gap of values instead size of buckets ——> bucket_size==1, but bucket may be [1,1,1]
Time complexity:
avg: O(n+k); best: O(n+k); worst: O(n^2)
k = max_num - min_num while use bias
Space complexity:
O(n+k)
"""
if len(arr) < 2:
return arr
min_n = max_n = arr[0]
for n in arr:
if n < min_n:
min_n = n
if n > max_n:
max_n = n
bucket_count, buckets = (max_n - min_n) // bucket_size + 1, []
for i in range(bucket_count):
buckets.append([])
for n in arr: # map num to buckets as back buckets have larger numbers
buckets[(n - min_n) // bucket_size].append(n)
ans = []
for bucket in buckets:
if bucket_size == 1: # do not use "ans.append(bucket[0])" as the bucket may have repeated nums
ans += bucket
else:
if bucket_count == 1: # to avoid endless loop in one bucket
bucket_size -= 1
tmp = bucket_sort(bucket, bucket_size)
ans += tmp
return ans
def raidx_sort(arr):
"""
sort from low digit to high digit
Time complexity:
avg: O(n*k); best: O(n*k); worst: O(n*k)
k = max_num - min_num while use bias
Space complexity:
O(n+k)
"""
if len(arr) < 2:
return arr
max_n = max(arr)
max_digit, buckets, mod, div = len(str(max_n)), [], 10, 1
for i in range(10):
buckets.append([])
for i in range(max_digit):
for n in arr:
buckets[(n % mod) // div].append(n)
arr = []
for bucket in buckets:
for n in bucket:
arr.append(n)
bucket.clear()
mod, div = mod * 10, div * 10
return arr
# TODO: doubt the time complexity of Bucket Sort and Raidx Sort
| VickeeX/LeetCodePy | collections_for_interview/SortingAlgorithms.py | SortingAlgorithms.py | py | 8,852 | python | en | code | 0 | github-code | 90 |
14065068331 | import unittest
from datetime import datetime
import cf_units as unit
import numpy as np
import pytest
from iris.coords import CellMethod, DimCoord
from iris.cube import Cube
from iris.exceptions import CoordinateNotFoundError
from iris.tests import IrisTest
from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
ConvertProbabilitiesToPercentiles as Plugin,
)
from improver.metadata.probabilistic import find_threshold_coordinate
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_probability_cube,
)
from .ecc_test_data import (
ECC_TEMPERATURE_PROBABILITIES,
ECC_TEMPERATURE_THRESHOLDS,
set_up_spot_test_cube,
)
class Test__add_bounds_to_thresholds_and_probabilities(IrisTest):
"""
Test the _add_bounds_to_thresholds_and_probabilities method of the
ConvertProbabilitiesToPercentiles.
"""
def setUp(self):
"""Set up data for testing."""
self.probabilities_for_cdf = ECC_TEMPERATURE_PROBABILITIES.reshape(3, 9)
self.threshold_points = ECC_TEMPERATURE_THRESHOLDS
self.bounds_pairing = (-40, 50)
def test_basic(self):
"""Test that the plugin returns two numpy arrays."""
result = Plugin()._add_bounds_to_thresholds_and_probabilities(
self.threshold_points, self.probabilities_for_cdf, self.bounds_pairing
)
self.assertIsInstance(result[0], np.ndarray)
self.assertIsInstance(result[1], np.ndarray)
def test_bounds_of_threshold_points(self):
"""
Test that the plugin returns the expected results for the
threshold_points, where they've been padded with the values from
the bounds_pairing.
"""
result = Plugin()._add_bounds_to_thresholds_and_probabilities(
self.threshold_points, self.probabilities_for_cdf, self.bounds_pairing
)
self.assertArrayAlmostEqual(result[0][0], self.bounds_pairing[0])
self.assertArrayAlmostEqual(result[0][-1], self.bounds_pairing[1])
def test_probability_data(self):
"""
Test that the plugin returns the expected results for the
probabilities, where they've been padded with zeros and ones to
represent the extreme ends of the Cumulative Distribution Function.
"""
zero_array = np.zeros(self.probabilities_for_cdf[:, 0].shape)
one_array = np.ones(self.probabilities_for_cdf[:, 0].shape)
result = Plugin()._add_bounds_to_thresholds_and_probabilities(
self.threshold_points, self.probabilities_for_cdf, self.bounds_pairing
)
self.assertArrayAlmostEqual(result[1][:, 0], zero_array)
self.assertArrayAlmostEqual(result[1][:, -1], one_array)
def test_endpoints_of_distribution_exceeded(self):
"""
Test that the plugin raises a ValueError when the constant
end points of the distribution are exceeded by a threshold value
used in the forecast.
"""
probabilities_for_cdf = np.array([[0.05, 0.7, 0.95]])
threshold_points = np.array([8, 10, 60])
msg = (
"The calculated threshold values \\[-40 8 10 60 50\\] are "
"not in ascending order as required for the cumulative distribution "
"function \\(CDF\\). This is due to the threshold values exceeding "
"the range given by the ECC bounds \\(-40, 50\\)."
)
with self.assertRaisesRegex(ValueError, msg):
Plugin()._add_bounds_to_thresholds_and_probabilities(
threshold_points, probabilities_for_cdf, self.bounds_pairing
)
def test_endpoints_of_distribution_exceeded_warning(self):
"""
Test that the plugin raises a warning message when the constant
end points of the distribution are exceeded by a threshold value
used in the forecast and the ecc_bounds_warning keyword argument
has been specified.
"""
probabilities_for_cdf = np.array([[0.05, 0.7, 0.95]])
threshold_points = np.array([8, 10, 60])
plugin = Plugin(ecc_bounds_warning=True)
warning_msg = (
"The calculated threshold values \\[-40 8 10 60 50\\] are "
"not in ascending order as required for the cumulative distribution "
"function \\(CDF\\). This is due to the threshold values exceeding "
"the range given by the ECC bounds \\(-40, 50\\). The threshold "
"points that have exceeded the existing bounds will be used as "
"new bounds."
)
with pytest.warns(UserWarning, match=warning_msg):
plugin._add_bounds_to_thresholds_and_probabilities(
threshold_points, probabilities_for_cdf, self.bounds_pairing
)
def test_new_endpoints_generation(self):
"""Test that the plugin re-applies the threshold bounds using the
maximum and minimum threshold points values when the original bounds
have been exceeded and ecc_bounds_warning has been set."""
probabilities_for_cdf = np.array([[0.05, 0.7, 0.95]])
threshold_points = np.array([-50, 10, 60])
plugin = Plugin(ecc_bounds_warning=True)
result = plugin._add_bounds_to_thresholds_and_probabilities(
threshold_points, probabilities_for_cdf, self.bounds_pairing
)
self.assertEqual(max(result[0]), max(threshold_points))
self.assertEqual(min(result[0]), min(threshold_points))
class Test__probabilities_to_percentiles(IrisTest):
"""Test the _probabilities_to_percentiles method of the
ConvertProbabilitiesToPercentiles plugin."""
def setUp(self):
"""Set up temperature cube."""
self.cube = set_up_probability_cube(
ECC_TEMPERATURE_PROBABILITIES,
ECC_TEMPERATURE_THRESHOLDS,
threshold_units="degC",
)
self.percentiles = [10, 50, 90]
def test_basic(self):
"""Test that the plugin returns an Iris.cube.Cube with the expected name"""
result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)
self.assertIsInstance(result, Cube)
self.assertEqual(result.name(), "air_temperature")
def test_unknown_thresholding(self):
"""Test an error is raised for "between thresholds" probability cubes"""
self.cube.coord(var_name="threshold").attributes[
"spp__relative_to_threshold"
] = "between"
msg = "Probabilities to percentiles only implemented for"
with self.assertRaisesRegex(NotImplementedError, msg):
Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)
def test_percentile_coord(self):
"""Test that the plugin returns an Iris.cube.Cube with an appropriate
percentile coordinate with suitable units.
"""
result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)
self.assertIsInstance(result.coord("percentile"), DimCoord)
self.assertArrayEqual(result.coord("percentile").points, self.percentiles)
self.assertEqual(result.coord("percentile").units, unit.Unit("%"))
def test_transpose_cube_dimensions(self):
"""
Test that the plugin returns an the expected data, when comparing
input cubes which have dimensions in a different order.
"""
# Calculate result for nontransposed cube.
nontransposed_result = Plugin()._probabilities_to_percentiles(
self.cube, self.percentiles
)
# Calculate result for transposed cube.
# Original cube dimensions are [P, Y, X].
# Transposed cube dimensions are [X, Y, P].
self.cube.transpose([2, 1, 0])
transposed_result = Plugin()._probabilities_to_percentiles(
self.cube, self.percentiles
)
# Result cube will be [P, X, Y]
# Transpose cube to be [P, Y, X]
transposed_result.transpose([0, 2, 1])
self.assertArrayAlmostEqual(nontransposed_result.data, transposed_result.data)
def test_simple_check_data_above(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles when input probabilities are given
for being above a threshold.
The input cube contains probabilities that values are above a given
threshold.
"""
expected = np.array([8.15384615, 9.38461538, 11.6])
expected = expected[:, np.newaxis, np.newaxis]
data = np.array([0.95, 0.3, 0.05])
data = data[:, np.newaxis, np.newaxis]
cube = set_up_probability_cube(
data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units="degC"
)
result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)
self.assertArrayAlmostEqual(result.data, expected)
def test_simple_check_data_below(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles when input probabilities are given
for being below a threshold.
The input cube contains probabilities that values are below a given
threshold.
"""
expected = np.array([8.4, 10.61538462, 11.84615385])
expected = expected[:, np.newaxis, np.newaxis]
data = np.array([0.95, 0.3, 0.05])[::-1]
data = data[:, np.newaxis, np.newaxis]
cube = set_up_probability_cube(
data.astype(np.float32),
ECC_TEMPERATURE_THRESHOLDS,
threshold_units="degC",
spp__relative_to_threshold="below",
)
result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)
self.assertArrayAlmostEqual(result.data, expected)
def test_check_data_multiple_timesteps(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles.
"""
expected = np.array(
[
[[[8.0, 8.0], [-28.0, 8.666667]], [[8.0, -46], [8.0, -46]]],
[[[12.0, 12.0], [12.0, 12.0]], [[10.5, 10.0], [10.5, 10.0]]],
[[[36.0, 36.0], [36.0, 36.0]], [[11.5, 11.333333], [11.5, 12.0]]],
],
dtype=np.float32,
)
cube = set_up_probability_cube(
np.zeros((3, 2, 2), dtype=np.float32),
ECC_TEMPERATURE_THRESHOLDS,
threshold_units="degC",
time=datetime(2015, 11, 23, 7),
frt=datetime(2015, 11, 23, 6),
)
cube = add_coordinate(
cube,
[datetime(2015, 11, 23, 7), datetime(2015, 11, 23, 8)],
"time",
is_datetime=True,
order=[1, 0, 2, 3],
)
cube.data = np.array(
[
[[[0.8, 0.8], [0.7, 0.9]], [[0.8, 0.6], [0.8, 0.6]]],
[[[0.6, 0.6], [0.6, 0.6]], [[0.5, 0.4], [0.5, 0.4]]],
[[[0.4, 0.4], [0.4, 0.4]], [[0.1, 0.1], [0.1, 0.2]]],
],
dtype=np.float32,
)
percentiles = [20, 60, 80]
result = Plugin()._probabilities_to_percentiles(cube, percentiles)
self.assertArrayAlmostEqual(result.data, expected, decimal=5)
def test_probabilities_not_monotonically_increasing(self):
"""
Test that the plugin raises a Warning when the probabilities
of the Cumulative Distribution Function are not monotonically
increasing.
"""
data = np.array([0.05, 0.7, 0.95])
data = data[:, np.newaxis, np.newaxis]
cube = set_up_probability_cube(
data.astype(np.float32), ECC_TEMPERATURE_THRESHOLDS, threshold_units="degC"
)
warning_msg = "The probability values used to construct the"
with pytest.warns(UserWarning, match=warning_msg):
Plugin()._probabilities_to_percentiles(cube, self.percentiles)
def test_result_cube_has_no_air_temperature_threshold_coordinate(self):
"""
Test that the plugin returns a cube with coordinates that
do not include a threshold-type coordinate.
"""
result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)
try:
threshold_coord = find_threshold_coordinate(result)
except CoordinateNotFoundError:
threshold_coord = None
self.assertIsNone(threshold_coord)
def test_check_data(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles.
"""
data = np.array(
[
[[16.8, 8.0, 10.4], [-46, 8.0, -78.4], [-78.4, -86.5, -89.2]],
[[36.0, 10.0, 12.0], [10.0, 10.0, 8.0], [8.0, -32.5, -46.0]],
[[55.2, 36.0, 50.4], [36.0, 11.6, 12.0], [11.0, 9.0, -2.8]],
],
dtype=np.float32,
)
result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)
self.assertArrayAlmostEqual(result.data, data, decimal=4)
def test_check_single_threshold(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles, if a single threshold is used for
constructing the percentiles.
"""
data = np.array(
[
[[13.2, 8.0, 13.2], [-46.0, 8.0, -78.4], [-78.4, -86.5, -89.2]],
[[34, 31.1111, 34.0], [27.5, 31.1111, 8.0], [8.0, -32.5, -46.0]],
[[54.8, 54.2222, 54.8], [53.5, 54.2222, 49.6], [49.6, 34, -2.8]],
],
dtype=np.float32,
)
threshold_coord = find_threshold_coordinate(self.cube)
cube = next(self.cube.slices_over(threshold_coord))
result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)
self.assertArrayAlmostEqual(result.data, data, decimal=4)
def test_lots_of_probability_thresholds(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles, if there are lots of thresholds.
"""
data = np.array(
[
[[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],
[[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],
[
[26.099998, 26.099998, 26.099998],
[26.099998, 26.099998, 26.099998],
[26.099998, 26.099998, 26.099998],
],
],
dtype=np.float32,
)
input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T
cube = set_up_probability_cube(
input_probs.astype(np.float32),
np.arange(30).astype(np.float32),
threshold_units="degC",
)
result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)
self.assertArrayAlmostEqual(result.data, data)
def test_lots_of_percentiles(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles, if lots of percentile values are
requested.
"""
data = np.array(
[
[[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],
[[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],
[[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],
[[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],
[[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],
[
[38.4, 10.333333, 16.8],
[10.333333, 10.2, 8.5],
[8.333333, -25.75, -40.6],
],
[[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],
[
[48.0, 11.666667, 36.0],
[11.666667, 11.0, 10.5],
[9.666667, 1.25, -19.0],
],
[[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],
[[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],
],
dtype=np.float32,
)
percentiles = np.arange(5, 100, 10)
result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)
self.assertArrayAlmostEqual(result.data, data, decimal=5)
def test_check_data_spot_forecasts(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for the percentiles for spot forecasts.
"""
data = np.array(
[
[16.8, 8, 10.4, -46, 8, -78.4, -78.4, -86.5, -89.2],
[36.0, 10.0, 12.0, 10.0, 10.0, 8.0, 8.0, -32.5, -46.0],
[55.2, 36, 50.4, 36, 11.6, 12.0, 11.0, 9.0, -2.8],
],
dtype=np.float32,
)
cube = set_up_spot_test_cube(cube_type="probability")
result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)
self.assertArrayAlmostEqual(result.data, data, decimal=4)
def test_masked_data_below(self):
"""Test that if mask_percentiles is true, data is masked as
expected when input probability data is below a threshold"""
expected_mask = np.full_like(self.cube.data, False, dtype=bool)
expected_mask[:, 0, 0] = True
expected_mask[1, 0, 2] = True
expected_mask[2, 0] = True
expected_mask[2, 1, 2] = True
expected_mask[2, 1, 0] = True
cube = set_up_probability_cube(
1 - self.cube.data,
[200, 1000, 15000],
variable_name=(
"cloud_base_height_assuming_only_consider_cloud_"
"area_fraction_greater_than_4p5_oktas"
),
threshold_units="m",
spp__relative_to_threshold="below",
)
result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(
cube, self.percentiles
)
self.assertArrayEqual(result.data.mask, expected_mask)
def test_masked_data_above(self):
"""Test that if mask_percentiles is true, data is masked as expected
when input probability data is above a threshold"""
expected_mask = np.full_like(self.cube.data, False, dtype=bool)
expected_mask[:, 0, 0] = True
expected_mask[1, 0, 2] = True
expected_mask[2, 0] = True
expected_mask[2, 1, 2] = True
expected_mask[2, 1, 0] = True
cube = set_up_probability_cube(
self.cube.data,
[200, 1000, 15000],
variable_name=(
"cloud_base_height_assuming_only_consider_cloud_"
"area_fraction_greater_than_4p5_oktas"
),
threshold_units="m",
spp__relative_to_threshold="above",
)
result = Plugin(mask_percentiles=True)._probabilities_to_percentiles(
cube, self.percentiles
)
self.assertArrayEqual(result.data.mask, expected_mask)
class Test_process(IrisTest):
"""
Test the process method of the ConvertProbabilitiesToPercentiles plugin.
"""
def setUp(self):
"""Set up temperature probability cube and expected output percentiles."""
self.cube = set_up_probability_cube(
ECC_TEMPERATURE_PROBABILITIES,
ECC_TEMPERATURE_THRESHOLDS,
threshold_units="degC",
)
self.percentile_25 = np.array(
[[24.0, 8.75, 11.0], [8.33333333, 8.75, -46.0], [-46.0, -66.25, -73.0]],
dtype=np.float32,
)
self.percentile_50 = np.array(
[[36.0, 10.0, 12.0], [10.0, 10.0, 8.0], [8.0, -32.5, -46.0]],
dtype=np.float32,
)
self.percentile_75 = np.array(
[
[48.0, 11.66666667, 36.0],
[11.66666667, 11.0, 10.5],
[9.66666667, 1.25, -19.0],
],
dtype=np.float32,
)
def test_check_data_specifying_no_of_percentiles(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for a specific number of percentiles.
"""
expected_data = np.array(
[self.percentile_25, self.percentile_50, self.percentile_75]
)
result = Plugin().process(self.cube, no_of_percentiles=3)
self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)
def test_check_data_specifying_single_percentile(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for a specific percentile passes in as a single realization
list.
"""
expected_data = np.array(self.percentile_25)
result = Plugin().process(self.cube, percentiles=[25])
self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)
def test_check_data_specifying_single_percentile_not_as_list(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for a specific percentile passed in as a value.
"""
expected_data = np.array(self.percentile_25)
result = Plugin().process(self.cube, percentiles=25)
self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)
def test_check_data_specifying_percentiles(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values for a specific set of percentiles.
"""
expected_data = np.array(
[self.percentile_25, self.percentile_50, self.percentile_75]
)
result = Plugin().process(self.cube, percentiles=[25, 50, 75])
self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)
def test_check_data_not_specifying_percentiles(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values without specifying the number of percentiles.
"""
expected_data = np.array(
[self.percentile_25, self.percentile_50, self.percentile_75]
)
result = Plugin().process(self.cube)
self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)
def test_check_data_masked_input_data(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values when the input data is masked.
"""
cube = self.cube.copy()
cube.data[:, 0, 0] = np.nan
cube.data = np.ma.masked_invalid(cube.data)
expected_data = np.array(
[self.percentile_25, self.percentile_50, self.percentile_75]
)
expected_data[:, 0, 0] = np.nan
expected_data = np.ma.masked_invalid(expected_data)
result = Plugin().process(cube)
self.assertArrayAlmostEqual(result.data.data, expected_data.data, decimal=5)
self.assertArrayEqual(result.data.mask, expected_data.mask)
def test_check_data_masked_input_data_non_nans(self):
"""
Test that the plugin returns an Iris.cube.Cube with the expected
data values when the input data is masked without underlying nans.
"""
cube = self.cube.copy()
cube.data[:, 0, 0] = 1000
cube.data = np.ma.masked_equal(cube.data, 1000)
expected_data = np.array(
[self.percentile_25, self.percentile_50, self.percentile_75]
)
expected_data[:, 0, 0] = np.nan
expected_data = np.ma.masked_invalid(expected_data)
result = Plugin().process(cube)
self.assertArrayAlmostEqual(result.data.data, expected_data.data, decimal=5)
self.assertArrayEqual(result.data.mask, expected_data.mask)
def test_check_data_over_specifying_percentiles(self):
"""
Test that the plugin raises a suitable error when both a number and set
or percentiles are specified.
"""
msg = "Cannot specify both no_of_percentiles and percentiles"
with self.assertRaisesRegex(ValueError, msg):
Plugin().process(self.cube, no_of_percentiles=3, percentiles=[25, 50, 75])
def test_metadata(self):
"""Test name and cell methods are updated as expected after conversion"""
threshold_coord = find_threshold_coordinate(self.cube)
expected_name = threshold_coord.name()
expected_units = threshold_coord.units
# add a cell method indicating "max in period" for the underlying data
self.cube.add_cell_method(
CellMethod("max", coords="time", comments=f"of {expected_name}")
)
expected_cell_method = CellMethod("max", coords="time")
result = Plugin().process(self.cube)
self.assertEqual(result.name(), expected_name)
self.assertEqual(result.units, expected_units)
self.assertEqual(result.cell_methods[0], expected_cell_method)
def test_vicinity_metadata(self):
"""Test vicinity cube name is correctly regenerated after processing"""
self.cube.rename("probability_of_air_temperature_in_vicinity_above_threshold")
result = Plugin().process(self.cube)
self.assertEqual(result.name(), "air_temperature_in_vicinity")
if __name__ == "__main__":
unittest.main()
| metoppv/improver | improver_tests/ensemble_copula_coupling/test_ConvertProbabilitiesToPercentiles.py | test_ConvertProbabilitiesToPercentiles.py | py | 25,522 | python | en | code | 95 | github-code | 90 |
2798366476 | import json
from datetime import datetime
from http.server import HTTPServer, BaseHTTPRequestHandler
#
# Abstraction
#
class NotFoundError(BaseException):
pass
# Very crude url dispatching
def dispatch(request, urlpatterns):
for url in urlpatterns:
if request.path == url['path']:
return url['view'](request)
raise NotFoundError(f'404: "{request.path}" Not Found')
# Very crude template rendering
def render(request, template_path, context):
with open(template_path) as template:
return template.read().format(**context)
# Very crude reverse
def reverse(name, urlpatterns):
for url in urlpatterns:
if url['name'] == name:
return url['path']
def home(request):
context = {
'title': 'Hello Jango!',
'body': 'Tiny gimped version of Django',
'date': datetime.now()
}
return render(request, './home.html', context)
def about(request):
return json.dumps({
'message': 'Jango Works!~'
})
urlpatterns = [
{'path': '/', 'view': home, 'name': 'home'},
{'path': '/about', 'view': about, 'name': 'about'}
]
class JangoRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
response = dispatch(self, urlpatterns)
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(response, 'utf8'))
except NotFoundError:
self.send_response_only(404)
def run(server_class=HTTPServer, handler_class=JangoRequestHandler):
server_address = ('', 8000)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
if __name__ == '__main__':
run()
| PdxCodeGuild/class_salmon | 4 Django/examples/jango/jango.py | jango.py | py | 1,688 | python | en | code | 5 | github-code | 90 |
9863818255 | # # 벽 부수고 이동하기
# from collections import deque
# n, m = map(int,input().split())
# graph = []
# for i in range(n):
# graph.append(list(map(int,input())))
# # print("my_list : ", my_list)
# dx = [1, -1, 0, 0]
# dy = [0, 0, 1, -1]
# def bfs(a, b):
# queue = deque()
# queue.append([a, b])
# x, y = queue.popleft()
# while(queue):
# for i in range(4):
# nx = x + dx[i]
# ny = y + dy[i]
# if x < 0 and x >= n and y < 0 and y >= m:
# continue
# if graph[nx][ny] == 1:
# continue
# if graph[nx][ny] == 0:
# graph[nx][ny] = graph[x][y] + 1
# queue.append((nx, ny))
# # 가장 오른쪽 아래까지의 최단 거리 반환
# if graph[n - 1][m - 1] == 0:
# return -1
# else:
# return graph[n - 1][m - 1]
# print(bfs(0, 0))
# 미로 탐색
from collections import deque
n, m = map(int, input().split())
graph = []
for i in range(n):
graph.append(list(map(int, input())))
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
def bfs(x, y, graph):
# 큐 구현을 위해 deque 라이브러리 사용
queue = deque()
queue.append((x, y))
while queue:
x, y = queue.popleft()
# 현재 위치에서 네 방향으로 위치 확인
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
if graph[nx][ny] == 1:
continue
if graph[nx][ny] == 0:
graph[nx][ny] = graph[x][y] + 1
queue.append((nx, ny))
# 가장 오른쪽 아래까지의 최단 거리 반환
if graph[n - 1][m - 1] == 0:
return -1
else:
return graph[n - 1][m - 1] + 1
one_list = []
value_list = []
for (i, numi) in enumerate(graph):
for (j, numj) in enumerate(numi):
if numj == 1:
one_list.append([i, j])
for (i, num) in enumerate(one_list):
x, y = num
copy_graph = graph
copy_graph[x][y] = 0
bfs(x, y, copy_graph)
value_list.append(bfs(x, y, copy_graph))
print("value_list : ", value_list)
# print(bfs(0, 0))
# | KIMJINMINININN/Algorithm-Study | Baekjoon/DFS_BFS/2206.py | 2206.py | py | 2,058 | python | en | code | 0 | github-code | 90 |
25696534022 | """
Created: 23 Mar. 2020
Author: Jordan Prechac
"""
from django.db import models
from django.utils.translation import gettext_lazy as _
from revibe.utils.classes import default_repr
# -----------------------------------------------------------------------------
class File(models.Model):
_type_choices = (
('audio', 'Audio'),
('image', 'Image'),
('other', 'Other'),
)
file = models.FileField(
null=False, blank=False,
verbose_name=_("file"),
help_text=_("The file's file")
)
display_name = models.CharField(
max_length=255,
null=True, blank=True,
verbose_name=_("display name"),
help_text=_("File display name")
)
file_type = models.CharField(
max_length=100,
choices=_type_choices,
null=True, blank=True,
verbose_name=_("file type"),
help_text=_("The kind of file this is. Helps with filtering and sorting. Not required.")
)
owner = models.ForeignKey(
to='content.Artist',
on_delete=models.CASCADE,
related_name="files",
limit_choices_to={"platform": "Revibe"},
null=False, blank=False,
verbose_name=_("owner"),
help_text=_("The artist that uploaded and owns this file")
)
shared_with = models.ManyToManyField(
to='content.artist',
related_name='shared_files',
through='cloud_storage.fileshare',
verbose_name=_("shared with"),
help_text=_("Artists that have edit access to the file")
)
created_date = models.DateTimeField(
auto_now_add=True
)
last_changed = models.DateTimeField(
auto_now=True
)
def __str__(self):
if self.display_name not in [None, "", " "]:
return self.display_name
return self.file.name
def __repr__(self):
return default_repr(self)
class Meta:
verbose_name = "file"
verbose_name_plural = "files"
class FileShare(models.Model):
file = models.ForeignKey(
to='cloud_storage.file',
on_delete=models.CASCADE,
related_name='file_shares',
null=False, blank=False,
verbose_name=_("file"),
help_text=_("The related File object")
)
artist = models.ForeignKey(
to='content.artist',
on_delete=models.CASCADE,
related_name='file_shares',
limit_choices_to={"platform": "Revibe"},
null=False, blank=False,
verbose_name=_("artist"),
help_text=_("The Artist the file is shared with")
)
date_created = models.DateTimeField(
auto_now_add=True
)
def __str__(self):
return str(self.file)
def __repr__(self):
return default_repr(self)
class Meta:
verbose_name = "shared file"
verbose_name_plural = "shared files"
| Revibe-Music/core-services | cloud_storage/models.py | models.py | py | 2,885 | python | en | code | 2 | github-code | 90 |
10304824777 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/28 11:43
# @Author : DollA
# @File : stark.py
# @Software: PyCharm
from django.conf.urls import url
from django.shortcuts import HttpResponse, render, redirect
import functools
from types import FunctionType
from django.utils.safestring import mark_safe
from django.urls import reverse
from django import forms
from django.db.models import Q
from django.http import QueryDict
from django.db.models.fields.related import ForeignKey, ManyToManyField
from stark.utils.pagination import Pagination
class ModelConfigMapping(object):
"""
封装用于模型类用于注册的属性
"""
def __init__(self, model, config, prev):
"""
:param model: 表模型本身
:param config: 配置类
:param prev: 扩展配置类的标识,如pri表示私有,pub表示公共
"""
self.model = model
self.config = config
self.prev = prev
# 获取选择文本值函数
def get_chioce_text(head, field):
"""
在数据列表页面,用于显示choice field的文本信息
:param head:表头名称
:param field: 字段名称
:return:
"""
def inner(self,row=None, isHeader=False):
if isHeader:
return head
func_name = 'get_%s_display' % field
return getattr(row, func_name)()
return inner
# 组合搜索显示每一行
class Row(object):
"""
1、要显示的数据样式
全部 条件一 条件二
2和3在Option的get_queryset中执行
2、当条件是选择项时:
循环显示该字段choice的列表
3、当条件是外键时
通过外键获取到表名_field.remote_field.model
获取到Queryset
4、组合搜索的逻辑
当全部被选中时,清空这一行限制条件,表示全部的数据
"""
def __init__(self, data_list, option, query_dict):
"""
:param data_list: 元组或者Queryset,包含了数据库对象(Queryset)或者选择项(元组)
:param option: option对象
:param query_dict: request.GET请求
"""
self.data_list = data_list
self.option = option
self.query_dict = query_dict
def __iter__(self):
"""
这是一个在一次请求之后,重新定制组合条件各个按钮的样式和URL的过程
将Row对象,变成了生成器
1、获取请求GET字典的copy
2、获取请求中跟该行有关字段的键值对field=value
3、修改【全部】的URL
如果请求中有该字段,删除该键值对,作为当前【全部】的URL
如果没有,将全部设置为active,并设置原URL为当前【全部】的URL
4、展示组合搜索条件
1,循环拿到的data_list,并展示
提取text、value
2、修改样式
如果不是多选
若该value在请求的queryset中,从中删除该字段原有键值对,显示当前的为active
若不在,将键值对添加到queryset中
如果是多选
如果获取的列表中已经有自己的value了
:return:
"""
yield '<div class="whole">'
# 拷贝后,修改
total_query_dict = self.query_dict.copy()
total_query_dict._mutable = True
origin_value_list = self.query_dict.getlist(self.option.field) # ['2',]
if origin_value_list:
total_query_dict.pop(self.option.field)
yield '<a href="?%s">全部</a>' % total_query_dict.urlencode()
else:
yield '<a class="active" href="?%s">全部</a>' % total_query_dict.urlencode()
yield '</div>'
yield '<div class="others">'
for item in self.data_list: # item queryset中的一个对象
val = str(self.option.get_value(item))
text = self.option.get_text(item)
query_dict = self.query_dict.copy()
query_dict._mutable = True
if not self.option.isMulti:
if val in origin_value_list:
query_dict.pop(self.option.field)
yield '<a class="active" href="?%s">%s</a>' % (query_dict.urlencode(), text)
else:
query_dict[self.option.field] = val
yield '<a href="?%s">%s</a>' % (query_dict.urlencode(), text)
else:
# 多选
# 当前field选中值
multi_val_list = query_dict.getlist(self.option.field)
if val in origin_value_list:
# 特殊情况,有自己的值
multi_val_list.remove(val)
# 重新赋值
query_dict.setlist(self.option.field, multi_val_list)
yield '<a class="active" href="?%s">%s</a>' % (query_dict.urlencode(), text)
else:
# 添加选中项
multi_val_list.append(val)
query_dict.setlist(self.option.field, multi_val_list)
yield '<a href="?%s">%s</a>' % (query_dict.urlencode(), text)
yield '</div>'
# 组合搜索配置项
class Option(object):
"""
将用于组合搜索的字段,通过get_queryset生成Row对象
"""
def __init__(self, field, condition=None, text_func=None, value_func=None, isChoice=False, isMulti=False,
isFk=False):
"""
:param field: 字符串--数据的字段名,如name、age
:param condition: 条件,可以定义显示在数据列表页面的数据范围
:param text_func: 显示文本信息的方法,返回用于显示给人看数据
:param value_func: 显示value值的方法,返回用于发送到后端,做为搜索条件的值
:param isChoice: 布尔值,是否是选择项
:param isMulti: 布尔值,是否是多选项
:param isFk: 布尔值,是否是外键,暂时无需求
"""
self.field = field
self.isChoice = isChoice
if not condition:
condition = {}
self.condition = condition
self.text_func = text_func
self.value_func = value_func
self.isMulti = isMulti
self.isFk = isFk
def get_queryset(self, _field, model_class, query_dict):
"""
获取字段每一行数据Row对象
1、如果是外键
2、如果是选择
3、其他
:param _field: 数据库类的字段对象
:param model_class: 类本身
:param query_dict: request.GET请求
:return:
"""
if isinstance(_field, ForeignKey) or isinstance(_field, ManyToManyField):
row = Row(_field.remote_field.model.objects.filter(**self.condition), self, query_dict)
else:
if self.isChoice:
row = Row(_field.choices, self, query_dict)
else:
row = Row(model_class.objects.filter(**self.condition), self, query_dict)
return row
def get_text(self, item):
"""
获取组合搜索显示的文本内容
如果定制了text_func,就用;
如果没有定制,就显示对象
:param item:显示出来的queryset中的每一个数据
:return:
"""
if self.text_func:
return self.text_func(item)
return str(item)
def get_value(self, item):
"""
获取组合搜索显示文本对应的主键值,
包装到URL中,为搜索提供条件
:param item:
:return:
"""
if self.value_func:
return self.value_func(item)
if self.isChoice:
return item[0]
return item.pk
class ChangeListParameters(object):
"""
封装列表页面需要的所有功能
"""
def __init__(self, config, queryset, search_list, search_condition, con):
self.config = config
# 批量操作功能
action_func_list = config.get_action_list()
self.action_list = [{'text': func.text, 'name': func.__name__} for func in action_func_list]
# 添加按钮,‘add’参数为准备反向解析URL用
self.add_btn = config.get_add_btn('add')
# 经过条件查询得出的数据对象列表
self.queryset = queryset
# 表格要显示的列标题
self.list_display = config.get_list_display()
# # 搜索用的参数打包进来
# self.search_list=search_list
# self.search_condition=search_condition
# self.con=con
self.list_filter = config.get_filter_list()
def gen_list_filter_rows(self):
"""
获取每一行搜索关键词列表
yield到模板中
:return:
"""
######### 组合搜索 #########
# 显示每一行搜索关键词列表
for option in self.list_filter:
# 获取模型字段对象
_field = self.config.model_class._meta.get_field(option.field)
# 传入参数--行对象,数据类本身,GET请求
row = option.get_queryset(_field, self.config.model_class, self.config.request.GET)
yield row
class StarkConfig(object):
order_by = [] # 排序规则.可以默认给一个id,但是,不是每个表都有id?!!默认设置空
# 也可以在自己的APP中的stark使用钩子
list_display = [] # 默认的表头,默认的不应该有值,那么应该作何展示??!!
model_form_class = None # 为自定义留下接口,MordelForm
###########对应批量操作功能的函数##########
def multi_delete(self, request):
"""
批量删除
:return:
"""
pk_list = request.POST.getlist('pk')
self.model_class.objects.filter(pk__in=pk_list).delete()
def multi_init(self, request):
"""
初始化
:return:
"""
pass
multi_delete.text = '批量删除'
multi_init.text = '初始化'
action_list = [] # 批量操作功能列表,默认为空
search_list = [] # 定制搜索范围,即那一列
filter_list = [] # 用于组合搜索显示的条件,如男、女或者高级、中级、低级
def __init__(self, model_class, site, prev):
"""
:param model_class: 表模型
:param site: AdminSite注册类实例对象
:param prev: 扩展配置用,如pri表示私用,pub表示公共
"""
self.model_class = model_class
self.site = site
self.prev = prev
self.request = None
self.back_condition_key = "_filter" # 保留之前的搜索条件用的key
self.request = None
# 装饰器
def wrapper(self, func):
"""
为视图函数执行之前或者之后,预留钩子函数
wraps-->为了保证在调用func时,保留函数原信息
:param func: 对应的视图函数
:return:
"""
@functools.wraps(func)
def inner(request, *args, **kwargs):
self.request = request
return func(request, *args, **kwargs)
return inner
###################页面显示自定制功能checkbox、编辑、删除#################
def display_checkbox(self, row=None, isHeader=False):
"""
多选框
1、isHeader是Ture,就是表头
2、否则返回字符串标签(一个多选框,以该行数据pk为value)
:param row: 每一行数据对象,由templatetags中迭代queryset获取每一行数据row对象
:param isHeader: 布尔值,是否是表头
:return:字符串标签,作为每一行数据,利用templatetags渲染到前端
"""
if isHeader:
return "选择"
return mark_safe("<input type='checkbox' name='pk' value='%s'/>" % row.pk)
def display_edit(self, row=None, isHeader=False):
if isHeader:
return "编辑"
# 传递row是因为编辑或者删除反向解析的URL中需要该行数据对象的PK值
reverse_url_edit = self.reverse_url('change', row=row)
return mark_safe('<a href="%s"><i class="fa fa-edit" aria-hidden="true"></i></a>' % reverse_url_edit)
def display_del(self, row=None, isHeader=False):
if isHeader:
return "删除"
reverse_url_del = self.reverse_url('del', row=row)
return mark_safe('<a href="%s"><i class="fa fa-trash-o" aria-hidden="true"></i></a>' % reverse_url_del)
def display_del_edit(self, row=None, isHeader=False):
if isHeader:
return "操作"
reverse_url_edit = self.reverse_url('change', row=row)
reverse_url_del = self.reverse_url('del', row=row)
del_edit_tag = """<a href="%s"><i class="fa fa-edit" aria-hidden="true"></i></a></a> |
<a href="%s"><i class="fa fa-trash-o" aria-hidden="true"></i></a>
""" % (reverse_url_edit, reverse_url_del)
return mark_safe(del_edit_tag)
# 排序规则
def get_order_by(self):
return self.order_by
# 获取定义表头的显示,显示哪些列的数据,默认显示了删除和编辑
def get_list_display(self):
val = []
val.extend(self.list_display)
val.append(StarkConfig.display_del_edit) # 不能用self
return val
############################所有表的增删改查都在这里###########################
# 添加按钮
def get_add_btn(self, display_type):
"""
每个表页面的添加按钮指向的URL,需要反向解析
也是是否显示添加按钮的钩子函数
:param display_type: 即字符串“add”
:return:
"""
reverse_url_add = self.reverse_url(display_type)
return mark_safe('<a href="%s" class="btn btn-success">添加</a>' % reverse_url_add)
# 获取批量操作功能列表
def get_action_list(self):
val = []
val.extend(self.action_list)
return val
def get_action_dict(self):
"""
生产用于过滤非法请求的字典
因为是反射到该类中的所有方法,为了防止用户手动修改成其他方法,而执行。
所以改成字典,再添加判断在字典中是否有发送过来的这个方法
:return:
"""
val = {}
for item in self.action_list:
val[item.__name__] = item
return val
# 获取搜索列,即搜索范围
def get_search_list(self):
val = []
val.extend(self.search_list)
return val
# 获取搜索框条件
def get_search_condition(self, request):
"""
1、获取到搜索范围列表
2、从request中获取到用户输入的条件
如果没有,默认则为空
3、生成Q对象con,定义OR连接条件
4、将加工后的条件都添加到con中
:param request:
:return: 搜索范围、用户输入的条件、包含条件的con实例对象
"""
search_list = self.get_search_list()
search_condition = request.GET.get('q', '').strip()
# 连接条件
con = Q()
con.connector = 'OR'
if search_condition:
for field in search_list:
con.children.append(('%s__contains' % field, search_condition))
return search_list, search_condition, con
# 获取组合搜索关键字
def get_filter_list(self):
val = []
val.extend(self.filter_list)
return val
# 获取用户点击组合搜索条件
def get_list_filter_condition(self):
"""
1、获取到filter_list(),即要作为搜索条件的哪些字段,元素是Option对象
2、循环filter_list并加工条件
:return:所有选中的条件组成的字典
"""
# 组合搜素,在之前filter的基础上再加一次filter
filter_list = self.get_filter_list()
comb_condition = {}
for option in filter_list:
element = self.request.GET.getlist(option.field)
if element:
comb_condition['%s__in' % option.field] = element
return comb_condition
# 作为钩子函数,用来定制公户和私户的区别,
def get_queryset(self):
return self.model_class.objects
def changelist_view(self, request):
"""
数据表格的处理和展示
展示内容
1、数据表格
2、批量操作
3、添加按钮
4、
:param request:
:return:
"""
####批量操作Action####
if request.method == 'POST':
action_name = request.POST.get('action')
# 为过滤非法请求而使用字典,判断请求的方法在不在字典中
action_dict = self.get_action_dict()
if action_name not in action_dict:
return HttpResponse('非法请求')
response = getattr(self, action_name)(request)
if response:
# 返回值的作用是,提高体验感,做玩操作后,应该有展示该动作执行后的效果
return response
####关键字搜索####
search_list, search_condition, con = self.get_search_condition(request)
#####处理分页#####
page = self.request.GET.get('page')
# 数据总条数
obj_tatal_count = self.model_class.objects.filter(con).count()
# 原条件URL,直接copy后传递过去,默认不能修改
query_params = request.GET.copy()
# 设置_mutable,或者用QueryDict方法转换成字典,就可以修改了
query_params._mutable = True
page_obj = Pagination(page, obj_tatal_count, request.path_info, query_params, per_page=7)
# 表格要显示的数据对象列表
orign_queryset = self.get_queryset()
queryset = orign_queryset.filter(con).filter(**self.get_list_filter_condition()).order_by(
*self.get_order_by()).distinct()[page_obj.start:page_obj.end]
# 传参数类对象的应用
change_list_parameter = ChangeListParameters(self, queryset, search_list, search_condition, con)
return render(request, 'stark/changelist.html', locals())
# 获取modelForm类
def get_model_form_class(self):
"""
预留的自定制ModelForm钩子函数
如果没有定制,使用ADDModelForm
:return:
"""
if self.model_form_class:
return self.model_form_class
class AddModelForm(forms.ModelForm):
"""
定制添加Form
"""
class Meta:
model = self.model_class
fields = "__all__"
return AddModelForm
def save(self, form, modify=False):
"""
为保存数据阶段预留的钩子函数
:param form:
:param modify: False是新增,True是修改
:return:
"""
return form.save()
def add_view(self, request):
"""
所有添加页面,都在此函数处理
:param request:
:return:
"""
AddModelForm = self.get_model_form_class()
if request.method == 'POST':
form = AddModelForm(request.POST)
if form.is_valid():
self.save(form, modify=False)
reserve_url_list = self.reverse_url('changelist')
return redirect(reserve_url_list)
form = AddModelForm()
return render(request, 'stark/change.html', locals())
def change_view(self, request, pk):
"""
编辑视图
:param request:
:param pk:对象PK
:return:
"""
obj = self.model_class.objects.filter(pk=pk).first()
if not obj:
return HttpResponse('数据不存在')
# 用的是和ADD一样的ModelForm 可通过钩子函数自定制
ModelForm = self.get_model_form_class()
form = ModelForm(instance=obj)
if request.method == 'POST':
form = ModelForm(request.POST, instance=obj)
if form.is_valid():
# modify表示,是不是修改
self.save(form, modify=True)
reverse_url_list = self.reverse_url('changelist')
return redirect(reverse_url_list)
return render(request, 'stark/change.html', locals())
def delete_view(self, request, pk):
"""
删除方案一:
get请求:
点击列表中的删除按钮显示选择删除或者取消的页面
点击取消,回到列表
post请求:
点击确定,删除后,回到列表
删除方案二:模态对话框
本次采用方案一
:param request:
:param pk: 对象的pk值
:return:
"""
reverse_url_list = self.reverse_url('changelist')
if request.method == 'POST':
self.model_class.objects.filter(pk=pk).delete()
return redirect(reverse_url_list)
return render(request, 'stark/delete.html', locals())
# 获取url的别名name
def get_url_name(self, display_type):
"""
从self中获取表模型类
:param display_type: 显示类型,如删除del
:return:
"""
# 获取APP名
app_label = self.model_class._meta.app_label
# 获取数据类名小写
model_name = self.model_class._meta.model_name
if self.prev:
name = '%s_%s_%s_%s' % (app_label, model_name, self.prev, display_type)
else:
name = '%s_%s_%s' % (app_label, model_name, display_type)
return name
# 定义了基本的增删改查URL
def get_urls(self):
"""
提供增删改车基本的4个URL
:return:
"""
urlpatterns = [
url(r'^list/$', self.wrapper(self.changelist_view), name=self.get_url_name('changelist')),
url(r'^add/$', self.wrapper(self.add_view), name=self.get_url_name('add')),
url(r'^(?P<pk>\d+)/change/$', self.wrapper(self.change_view), name=self.get_url_name('change')),
url(r'^(?P<pk>\d+)/del/$', self.wrapper(self.delete_view), name=self.get_url_name('del')),
]
# 扩展url
extra = self.extra_url()
if extra:
urlpatterns.extend(extra)
return urlpatterns
# 扩展URL
def extra_url(self):
"""
钩子函数:
自定义添加其他URL方法
:return:
"""
pass
# 反向解析URL
def reverse_url(self, display_type, row=None):
"""
1、一般的反向解析
2、保留跳转前的条件的反向解析
3、带搜索条件的反向解析
:param display_type: url的类型,如删除del、编辑change...
:param row: 一行数据对象
:return:
"""
namespace = self.site.namespace
name = '%s:%s' % (namespace, self.get_url_name(display_type))
if row:
reverse_url = reverse(name, kwargs={'pk': row.pk})
else:
reverse_url = reverse(name)
# 获取跳转前的搜索条件,并保留
if display_type == 'changelist':
# 如果有back_condition_key,将对应的键值对添加到反向解析出的URL后面
origin_condition = self.request.GET.get(self.back_condition_key)
if not origin_condition:
return reverse_url
reverse_url_with_search_condition = '%s?%s' % (reverse_url, origin_condition)
return reverse_url_with_search_condition
# 带搜索条件,拼接反向解析的URL
if not self.request.GET:
# 没有请求组合条件,直接返回
return reverse_url
param_str = self.request.GET.urlencode()
new_query_dict = QueryDict(mutable=True)
# 将之前的条件写入Querysite
new_query_dict[self.back_condition_key] = param_str
reverse_url_with_search_condition = '%s?%s' % (reverse_url, new_query_dict.urlencode())
return reverse_url_with_search_condition
@property
def urls(self):
return self.get_urls()
class AdminSite(object):
"""
注册类
"""
def __init__(self):
self._registry = []
self.app_name = 'stark'
self.namespace = 'stark'
def registry(self, model_class, stark_config=None, prev=None):
"""
用与注册类的方法
:param model_class:需要注册的类
:param stark_config: 封装了基本URL和增删改查方法的类
:param prev:当我们需要对同一个model自定制几个不同的config时使用的变量,例子:pri表示私有,pub表示公共
:return:
"""
if not stark_config:
stark_config = StarkConfig
# ModelConfigMapping 封装注册对象的属性,包括表模型类、config配置类、自定制prev名
self._registry.append(ModelConfigMapping(model_class, stark_config(model_class, self, prev), prev))
def get_urls(self):
"""
1、获取APP名、类名小写、prev(非必须)
2、拼接成路径,
3、利用元组形式的include,加入固定的增删改查路由关系,拼接出每个表独有的增删改查路径
:return:
"""
urlpatterns = []
for item in self._registry:
app_label = item.model._meta.app_label # 类所在APP名
model_name = item.model._meta.model_name # 类名小写
if item.prev:
# (item.config.urls, None, None) 组成一个include,item.config调用config配置内中固定的增删改查URL
temp = url(r'^%s/%s/%s/' % (app_label, model_name, item.prev), (item.config.urls, None, None))
else:
temp = url(r'^%s/%s/' % (app_label, model_name), (item.config.urls, None, None))
urlpatterns.append(temp)
return urlpatterns
@property
def urls(self):
"""
提供每个表最终形态的URL
:return:
"""
return self.get_urls(), self.app_name, self.namespace
site = AdminSite()
| youxiaodao/stark | stark/service/stark.py | stark.py | py | 26,592 | python | zh | code | 0 | github-code | 90 |
35752429211 | #!/usr/bin/env python
import sys
input= sys.stdin.readline
sys.setrecursionlimit(1000000)
n=int(input())
visit=[0]*(n+1)
ans=[0]*(n+1)
e=[[] for _ in range(n+1)]
for i in range(n-1):
a,b=map(int,input().split())
e[a].append(b)
e[b].append(a)
def dfs(e,v,visit):
visit[v]=1
for i in e[v]:
if not visit[i]:
ans[i]=v
dfs(e,i,visit)
dfs(e,1,visit)
for i in range(2,n+1):
print(ans[i])
| hansojin/python | graph/bj11725_recur.py | bj11725_recur.py | py | 443 | python | en | code | 0 | github-code | 90 |
18516103729 | n = int(input())
a = list(map(int, input().split()))
a.insert(0, -1)
a.append(-1)
m = []
cnt = 0
for i in range(1, len(a)):
if a[i] != a[i-1]:
if cnt != 0:
m.append(cnt)
cnt = 1
else:
cnt += 1
total = 0
for i in m:
total += i // 2
print(total)
| Aasthaengg/IBMdataset | Python_codes/p03296/s947906677.py | s947906677.py | py | 292 | python | en | code | 0 | github-code | 90 |
18353778369 | from collections import deque
N, Q = map(int, input().split())
graph = [[] for _ in range(N+1)]
for _ in range(N-1):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
PX = [tuple(map(int, input().split())) for _ in range(Q)]
counter = [0] * (N + 1)
for px in PX:
p, x = px
counter[p] += x
q = deque()
visited = [-1] * (N+1)
visited[1] = 1
q.append(1)
while q:
parent = q.pop()
nodes = graph[parent]
for node in nodes:
if visited[node] == -1:
visited[node] = 1
q.append(node)
counter[node] += counter[parent]
for i in range(1, N+1):
print(counter[i], end=' ') | Aasthaengg/IBMdataset | Python_codes/p02936/s763810074.py | s763810074.py | py | 663 | python | en | code | 0 | github-code | 90 |
35051746087 | #!/usr/bin/env python3
import unittest
from types import SimpleNamespace
from unittest import mock
from somaticsniper_tool import utils as MOD
class ThisTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
class Test__get_region_from_name(ThisTestCase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_method_returns_expected(self):
path = "/foo/bar/chr1-1-12345.mpileup"
expected = "chr1:1-12345"
found, base = MOD.get_region_from_name(path)
self.assertEqual(found, expected)
class Test_run_subprocess_command(ThisTestCase):
def setUp(self):
super().setUp()
self.mocks = SimpleNamespace(subprocess=mock.MagicMock(spec_set=MOD.subprocess))
def tearDown(self):
super().tearDown()
def _setup_popen(self, cmd, stdout=None, stderr=None, do_raise=False):
stdout = stdout or b""
stderr = stderr or b""
mock_popen = mock.MagicMock(spec_set=MOD.subprocess.Popen)
mock_returncode = 0
type(mock_popen).returncode = mock.PropertyMock(return_value=mock_returncode)
self.mocks.subprocess.Popen.return_value = mock_popen
if do_raise:
mock_popen.communicate.side_effect = [
MOD.subprocess.TimeoutExpired(cmd, 3600),
(stdout, stderr),
]
else:
mock_popen.communicate.return_value = (stdout, stderr)
return mock_popen
def test_popen_command_not_split_on_shell_is_True(self):
cmd_str = "ls /foo/bar"
expected_cmd = cmd_str
timeout = 3600
mock_popen = self._setup_popen(cmd_str)
MOD.run_subprocess_command(cmd_str, timeout, shell=True, _di=self.mocks)
self.mocks.subprocess.Popen.assert_called_once_with(expected_cmd, shell=True)
def test_popen_command_split_on_shell_is_False_or_not_given(self):
cmd_str = "ls /foo/bar"
expected_cmd = MOD.shlex.split(cmd_str)
timeout = 3600
mock_popen = self._setup_popen(cmd_str)
MOD.run_subprocess_command(cmd_str, timeout, _di=self.mocks)
self.mocks.subprocess.Popen.assert_called_once_with(expected_cmd)
def test_popen_killed_on_timeout_expired(self):
expected_communicate_calls = (
mock.call(timeout=3600),
mock.call(),
)
cmd_str = "ls /foo/bar"
mock_popen = self._setup_popen(cmd_str, do_raise=True)
timeout = 3600
with self.assertRaises(ValueError):
MOD.run_subprocess_command(cmd_str, timeout, _di=self.mocks)
mock_popen.communicate.assert_has_calls(expected_communicate_calls)
mock_popen.kill.assert_called_once_with()
def test_popen_returns_decoded_stdout_stderr(self):
cmd_str = "ls /foo/bar"
stdout = b"stdout"
stderr = b"stderr"
timeout = 3600
mock_popen = self._setup_popen(cmd_str, stdout=stdout, stderr=stderr)
found = MOD.run_subprocess_command(cmd_str, timeout, _di=self.mocks)
self.assertEqual(found.stdout, stdout.decode())
self.assertEqual(found.stderr, stderr.decode())
def test_returns_None_when_communicate_returns_None(self):
cmd_str = "ls /foo/bar"
expected = MOD.PopenReturn(stdout=None, stderr=None)
mock_popen = mock.MagicMock(spec_set=MOD.subprocess.Popen)
mock_returncode = 0
type(mock_popen).returncode = mock.PropertyMock(return_value=mock_returncode)
mock_popen.communicate.return_value = (None, None)
timeout = 3600
self.mocks.subprocess.Popen.return_value = mock_popen
found = MOD.run_subprocess_command(cmd_str, timeout, _di=self.mocks)
self.assertEqual(expected, found)
def test_returns_str_when_communicate_returns_str(self):
cmd_str = "ls /foo/bar"
expected = MOD.PopenReturn(stdout="foo", stderr="bar")
mock_popen = mock.MagicMock(spec_set=MOD.subprocess.Popen)
mock_returncode = 0
type(mock_popen).returncode = mock.PropertyMock(return_value=mock_returncode)
mock_popen.communicate.return_value = ("foo", "bar")
timeout = 3600
self.mocks.subprocess.Popen.return_value = mock_popen
found = MOD.run_subprocess_command(cmd_str, timeout, _di=self.mocks)
self.assertEqual(expected, found)
class Test_merge_outputs(ThisTestCase):
def setUp(self):
super().setUp()
self.mocks = SimpleNamespace(open=mock.MagicMock(spec_set=open))
def test_open_called_on_input_files(self):
input_files = ['foo', 'bar', 'baz']
MOD.merge_outputs(input_files, mock.Mock(), _di=self.mocks)
for f in input_files:
with self.subTest(f=f):
self.mocks.open.assert_any_call(f)
# __END__
| NCI-GDC/somaticsniper-tool | tests/test_utils.py | test_utils.py | py | 4,897 | python | en | code | 0 | github-code | 90 |
29878297735 | import asyncio
from pprint import pprint
from asyncdb import AsyncDB
from asyncdb.exceptions import default_exception_handler
async def connect(db):
async with await db.connection() as conn:
pprint(await conn.test_connection())
await conn.create(
name='tests',
fields=[
{"name": "id", "type": "integer"},
{"name": "name", "type": "text"}
]
)
many = "INSERT INTO tests VALUES(?, ?)"
examples = [(2, "def"), (3, "ghi"), (4, "jkl")]
print(": Executing Insert of many entries: ")
await conn.execute_many(many, *examples)
result, error = await conn.query("SELECT * FROM tests")
if error:
print(error)
for row in result:
print('>>', row)
table = """
CREATE TABLE airports (
iata VARCHAR PRIMARY KEY,
city VARCHAR,
country VARCHAR
)
"""
result = await conn.execute(table)
print('CREATED ?: ', result)
data = [
("ORD", "Chicago", "United States"),
("JFK", "New York City", "United States"),
("CDG", "Paris", "France"),
("LHR", "London", "United Kingdom"),
("DME", "Moscow", "Russia"),
("SVO", "Moscow", "Russia"),
]
airports = "INSERT INTO airports VALUES(?, ?, ?)"
await conn.executemany(airports, *data)
# a_country = "United States"
# a_city = "Moscow"
query = "SELECT * FROM airports WHERE country=? OR city=?"
# async with await conn.fetch(query, (a_country, a_city)) as result:
# for row in result:
# print(row)
# using prepare
print('Using Cursor Objects: ')
b_country = 'France'
b_city = 'London'
async with conn.cursor(query, (b_country, b_city)) as cursor:
async for row in cursor:
print(row)
# its an iterable
print("Also: Using Context Manager: ")
async with cursor:
print(await cursor.fetch_all())
# this returns a cursor based object
if __name__ == "__main__":
loop = asyncio.new_event_loop()
loop.set_exception_handler(default_exception_handler)
driver = AsyncDB("duckdb", params={"database": ":memory:"}, loop=loop)
loop.run_until_complete(connect(driver))
| phenobarbital/asyncdb | examples/test_duckdb.py | test_duckdb.py | py | 2,451 | python | en | code | 23 | github-code | 90 |
17504179447 | from .base import *
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
DEBUG = False
ALLOWED_HOSTS = ['*']
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'papaiz-dashboard'
AWS_QUERYSTRING_AUTH = False | lldenisll/backend_papaiz | config/settings/prod.py | prod.py | py | 509 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.