seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
4668807175 | class Budget:
def __init__ (self, a):
ff = (open("budgetsaves.txt").read()).strip()
ff = eval(ff)
b = dict (ff)
if b[a]:
self.a = b[a]
self.dict = b
else:
b[a] = 0
self.a = b[a]
self.dict = b
self.budget = a
def Funds_depo(self):
c = int(input('How much do you want to deposite?'))
d = c + int(self.a)
print (z + ' budget balance is', d)
self.dict[a] = d
def Funds_withd(self):
c = int(input('How much do you want to withdraw?'))
d = int(self.a) - c
print (z, 'budget balance is', d)
self.dict[a] = d
def balance(self):
y = 0
for i in self.dict:
x = self.dict[i]
y = x + y
print ('Total budget is', y)
print (self.budget, 'budget is', self.a)
def Funds_trans(self):
x = input('Transfer from?')
y = input('Transfer to?')
z = int(input('How much to transfer'))
if self.dict[x] > Z:
xx = self.dict[x] - z
yy = self.dict[y] + z
self.dict[x] = xx
self.dict[y] = yy
print ('Transaction successful')
else :
print ('Cannot tranfer amount')
| Code-Tony/classPractice | bugetApp.py | bugetApp.py | py | 1,348 | python | en | code | 0 | github-code | 13 |
36977614387 | # -*- coding: utf-8 -*-
from setuptools import setup
long_description = open("README.md").read()
for line in open('neuroml/__init__.py'):
if line.startswith("__version__"):
version = line.split("=")[1].strip()[1:-1]
setup(
data_files=[("",['neuroml/test/Purk2M9s.nml'])],
name = "libNeuroML",
version = version,
packages = ['neuroml', 'neuroml.test','neuroml.nml','neuroml.hdf5'],
package_data = {'neuroml.test': ['*.nml'], 'neuroml.nml': ['*.xsd']},
author = "libNeuroML authors and contributors",
author_email = "vellamike@gmail.com, p.gleeson@gmail.com",
description = "A Python library for working with NeuroML descriptions of neuronal models",
long_description = long_description,
install_requires=['lxml'],
license = "BSD",
url="http://libneuroml.readthedocs.org/en/latest/",
classifiers = [
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering']
)
| usama57/libNeuroML | setup.py | setup.py | py | 1,193 | python | en | code | null | github-code | 13 |
7716068765 | """
%pip install -U numpy
%pip install -U pandas
%pip install -U requests
%pip install -U bs4
%pip install -U selenium
%pip install -U matplotlib
%pip install -U seaborn
%pip install -U plotly
%pip install -U scikit-learn
%pip install -U python-dateutil
%pip install -U lxml
%pip install -U colorama
%pip install -U datetime
%pip install selenium
"""
# %%
import sys
import os
import datetime
import shutil as shu
from colorama import Style as st
from colorama import Fore
from colorama import Back as bk
start_time = datetime.datetime.now()
w, h = shu.get_terminal_size()
def printSeparator():
print(Fore.GREEN + '-' * w + Fore.WHITE)
def logStep(msg):
l1 = len(msg)
l2 = w - l1 - 27
print(Fore.WHITE + str(datetime.datetime.now()) + " " +
Fore.YELLOW + msg + Fore.WHITE + "-" * l2 )
sys.stdout.flush()
def printDFinfo(name,dfName):
printSeparator()
print('Name: ',name)
printSeparator()
print(dfName.info())
printSeparator()
print(f'Row Count :{Fore.RED}')
print(dfName.count(),Fore.WHITE)
printSeparator()
print(dfName.head())
printSeparator()
# %%
import warnings as warn
import numpy as np
import pandas as pd
import requests
import datetime
from bs4 import BeautifulSoup
from dateutil.relativedelta import *
from selenium import webdriver
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import classification_report, precision_score
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
warn.filterwarnings("ignore", category=Warning)
warn.filterwarnings("ignore", category=DeprecationWarning)
warn.filterwarnings("ignore", category=FutureWarning)
warn.filterwarnings("ignore", category=UserWarning)
# %%
start_time00 = datetime.datetime.now()
logStep("ENVIRONMENT PREPARATION")
print(F"Copyright : {sys.copyright}")
print(F"OS Platform : {sys.platform}")
print(F"OS Name : {os.name}")
print(F"OS HOME : {os.environ.get('HOME')}")
print(F"OS uName : {os.uname().sysname}")
print(F"OS NodeName : {os.uname().nodename}")
print(F"OS Release : {os.uname().release}")
print(F"OS Release Ver : {os.uname().version}")
print(F"OS Machine : {os.uname().machine}")
print(F"Process ID : {os.getpid()}")
print(F"Parent Process : {os.getppid()}")
print(F"OS User : {os.getlogin()}")
print(F"OS User ID : {os.getuid()}")
print(F"OS Group ID : {os.getgid()}")
print(F"OS Effective ID : {os.geteuid()}")
print(F"OS Effective GID : {os.getegid()}")
print(F"Current dir : {os.getcwd()}")
print(F"Python version : {sys.version}")
print(F"Version info : {sys.version_info}")
print(F"Python API Ver : {sys.api_version}")
print(F"Executable : {sys.executable}")
print(F"Spark UI : http://localhost:4040")
print(F"Spark submit : {sys.argv[0]}")
print(F"Hadoop Home : {os.environ.get('HADOOP_HOME')}")
print(F"Java Home : {os.environ.get('JAVA_HOME')}")
print(F"Current Working Directory : {os.getcwd()}")
logStep("DONE");
end_time = datetime.datetime.now()
step00_elapsed_time = end_time - start_time00
logStep(F"ELAPSED TIME: {step00_elapsed_time} seconds")
# %% [markdown]
# Build required data structures - Race DF
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Race DF")
races = {'season' : [], 'round' : [], 'circuit_id': [], 'lat' : [],
'long' : [], 'country': [], 'date' : [], 'url' : []}
url = 'https://ergast.com/api/f1/{}.json'
for year in list(range(2022,2023)):
r = requests.get(url.format(year))
json = r.json()
for item in json['MRData']['RaceTable']['Races']:
try: races['season'].append(int(item['season']))
except Exception as e: races['season'].append(None)
try: races['round'].append(int(item['round']))
except Exception as e: races['round'].append(None)
try: races['circuit_id'].append(item['Circuit']['circuitId'])
except Exception as e: races['circuit_id'].append(None)
try: races['lat'].append(float(item['Circuit']['Location']['lat']))
except Exception as e: races['lat'].append(None)
try: races['long'].append(float(item['Circuit']['Location']['long']))
except Exception as e: races['long'].append(None)
try: races['country'].append(item['Circuit']['Location']['country'])
except Exception as e: races['country'].append(None)
try: races['date'].append(item['date'])
except Exception as e: races['date'].append(None)
try: races['url'].append(item['url'])
except Exception as e: races['url'].append(None)
races = pd.DataFrame(races)
printDFinfo('races',races)
logStep("DONE");
end_time = datetime.datetime.now()
step01_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step01_elapsed_time} seconds")
# %% [markdown]
# Build required data structures - Rounds DF
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Rounds DF")
rounds = []
i = 0
for year in np.array(races.season.unique()):
rounds.append([year, list(races[races.season == year]['round'])])
print(rounds[i][0],rounds[i][1],len(rounds[i][1]))
i = i + 1
logStep("DONE");
end_time = datetime.datetime.now()
step02_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step02_elapsed_time} seconds")
# %% [markdown]
# Build required data structures - Race Results DF
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Results DF")
results = {'season' : [], 'round' : [], 'circuit_id' : [], 'driver' : [],
'date_of_birth': [], 'nationality': [], 'constructor' : [], 'grid' : [],
'time' : [], 'status' : [], 'points' : [], 'podium' : []}
url = 'http://ergast.com/api/f1/{}/{}/results.json'
for n in list(range(len(rounds))):
for i in rounds[n][1]:
r = requests.get(url.format(rounds[n][0], i))
json = r.json()
for item in json['MRData']['RaceTable']['Races'][0]['Results']:
try: results['season'].append(int(json['MRData']['RaceTable']['Races'][0]['season']))
except Exception as e: results['season'].append(None)
try: results['round'].append(int(json['MRData']['RaceTable']['Races'][0]['round']))
except Exception as e: results['round'].append(None)
try: results['circuit_id'].append(json['MRData']['RaceTable']['Races'][0]['Circuit']['circuitId'])
except Exception as e: results['circuit_id'].append(None)
try: results['driver'].append(item['Driver']['driverId'])
except Exception as e: results['driver'].append(None)
try: results['date_of_birth'].append(item['Driver']['dateOfBirth'])
except Exception as e: results['date_of_birth'].append(None)
try: results['nationality'].append(item['Driver']['nationality'])
except Exception as e: results['nationality'].append(None)
try: results['constructor'].append(item['Constructor']['constructorId'])
except Exception as e: results['constructor'].append(None)
try: results['grid'].append(int(item['grid']))
except Exception as e: results['grid'].append(None)
try: results['time'].append(int(item['Time']['millis']))
except Exception as e: results['time'].append(None)
try: results['status'].append(item['status'])
except Exception as e: results['status'].append(None)
try: results['points'].append(int(item['points']))
except Exception as e: results['points'].append(None)
try: results['podium'].append(int(item['position']))
except Exception as e: results['podium'].append(None)
results = pd.DataFrame(results)
printDFinfo('results',results)
logStep("DONE");
end_time = datetime.datetime.now()
step03_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step03_elapsed_time} seconds")
# %% [markdown]
# Build required data structures - Driver Standings DF
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Driver Standings DF")
driver_standings = {'season' : [], 'round' : [],
'driver' : [], 'driver_points' : [],
'driver_wins': [], 'driver_standings_pos': []}
url = 'https://ergast.com/api/f1/{}/{}/driverStandings.json'
def lookup (df, team, points):
df['lookup1'] = df.season.astype(str) + df[team] + df['round'].astype(str)
df['lookup2'] = df.season.astype(str) + df[team] + (df['round']-1).astype(str)
new_df = df.merge(df[['lookup1', points]], how = 'left', left_on='lookup2',right_on='lookup1')
new_df.drop(['lookup1_x', 'lookup2', 'lookup1_y'], axis = 1, inplace = True)
new_df.rename(columns={f'{points}_x': f'{points}_after_race', f'{points}_y': points}, inplace = True)
new_df[points].fillna(0, inplace = True)
return new_df
for n in list(range(len(rounds))):
for i in rounds[n][1]:
r = requests.get(url.format(rounds[n][0], i))
json = r.json()
for item in json['MRData']['StandingsTable']['StandingsLists'][0]['DriverStandings']:
try: driver_standings['season'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['season']))
except Exception as e: driver_standings['season'].append(None)
try: driver_standings['round'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['round']))
except Exception as e: driver_standings['round'].append(None)
try: driver_standings['driver'].append(item['Driver']['driverId'])
except Exception as e: driver_standings['driver'].append(None)
try: driver_standings['driver_points'].append(int(item['points']))
except Exception as e: driver_standings['driver_points'].append(None)
try: driver_standings['driver_wins'].append(int(item['wins']))
except Exception as e: driver_standings['driver_wins'].append(None)
try: driver_standings['driver_standings_pos'].append(int(item['position']))
except Exception as e: driver_standings['driver_standings_pos'].append(None)
driver_standings = pd.DataFrame(driver_standings)
driver_standings = lookup(driver_standings, 'driver', 'driver_points')
driver_standings = lookup(driver_standings, 'driver', 'driver_wins')
driver_standings = lookup(driver_standings, 'driver', 'driver_standings_pos')
driver_standings.drop(['driver_points_after_race', 'driver_wins_after_race', 'driver_standings_pos_after_race'], axis = 1, inplace = True)
printDFinfo('driver_standings',driver_standings)
logStep("DONE");
end_time = datetime.datetime.now()
step04_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step04_elapsed_time} seconds")
# %% [markdown]
# Build required data structures - Constructor DF
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Constructor DF")
constructor_rounds = rounds
constructor_standings = {'season' : [], 'round' : [],
'constructor' : [], 'constructor_points' : [],
'constructor_wins': [], 'constructor_standings_pos': []}
url = 'https://ergast.com/api/f1/{}/{}/constructorStandings.json'
for n in list(range(len(constructor_rounds))):
for i in constructor_rounds[n][1]:
r = requests.get(url.format(constructor_rounds[n][0], i))
json = r.json()
for item in json['MRData']['StandingsTable']['StandingsLists'][0]['ConstructorStandings']:
try: constructor_standings['season'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['season']))
except Exception as e: constructor_standings['season'].append(None)
try: constructor_standings['round'].append(int(json['MRData']['StandingsTable']['StandingsLists'][0]['round']))
except Exception as e: constructor_standings['round'].append(None)
try: constructor_standings['constructor'].append(item['Constructor']['constructorId'])
except Exception as e: constructor_standings['constructor'].append(None)
try: constructor_standings['constructor_points'].append(int(item['points']))
except Exception as e: constructor_standings['constructor_points'].append(None)
try: constructor_standings['constructor_wins'].append(int(item['wins']))
except Exception as e: constructor_standings['constructor_wins'].append(None)
try: constructor_standings['constructor_standings_pos'].append(int(item['position']))
except Exception as e: constructor_standings['constructor_standings_pos'].append(None)
constructor_standings = pd.DataFrame(constructor_standings)
constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_points')
constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_wins')
constructor_standings = lookup(constructor_standings, 'constructor', 'constructor_standings_pos')
constructor_standings.drop(['constructor_points_after_race','constructor_wins_after_race','constructor_standings_pos_after_race'],axis=1,inplace=True)
printDFinfo('Constructor_Standings',constructor_standings)
logStep("DONE");
end_time = datetime.datetime.now()
step05_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step05_elapsed_time} seconds")
# %% [markdown]
# Build required data structures - Qualifying DF
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Qualifying DF")
qualifying_results = pd.DataFrame()
for year in list(range(2022,2023)):
url = 'https://www.formula1.com/en/results.html/{}/races.html'
r = requests.get(url.format(year))
soup = BeautifulSoup(r.text, 'html.parser')
# find links to all circuits for a certain year
year_links = []
for page in soup.find_all('a', attrs = {'class':"resultsarchive-filter-item-link FilterTrigger"}):
link = page.get('href')
if f'/en/results.html/{year}/races/' in link:
year_links.append(link)
# for each circuit, switch to the starting grid page and read table
year_df = pd.DataFrame()
new_url = 'https://www.formula1.com{}'
for n, link in list(enumerate(year_links)):
link = link.replace('race-result.html', 'starting-grid.html')
df = pd.read_html(new_url.format(link))
df = df[0]
df['season'] = year
df['round'] = n+1
for col in df:
if 'Unnamed' in col:
df.drop(col, axis = 1, inplace = True)
year_df = pd.concat([year_df, df])
# concatenate all tables from all years
qualifying_results = pd.concat([qualifying_results, year_df])
qualifying_results.rename(columns = {'Pos': 'grid', 'Driver': 'driver_name', 'Car': 'car', 'Time': 'qualifying_time'}, inplace = True)
qualifying_results.drop('No', axis = 1, inplace = True)
qualifying_results.qualifying_time = qualifying_results.grid
printDFinfo('Qualifying_Results',qualifying_results)
logStep("DONE");
end_time = datetime.datetime.now()
step06_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step06_elapsed_time} seconds")
# %% [markdown]
# Build required data structures - Weather DF
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Weather DF")
weather = races.iloc[:,[0,1,2]]
info = []
# read wikipedia tables
for link in races.url:
try:
df = pd.read_html(link)[0]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[1]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[2]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[3]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
driver = webdriver.Chrome()
driver.get(link)
# click language button
button = driver.find_element_by_link_text('Italiano')
button.click()
# find weather in italian with selenium
clima = driver.find_element_by_xpath('//*[@id="mw-content-text"]/div/table[1]/tbody/tr[9]/td').text
info.append(clima)
except Exception as e:
info.append('not found')
# append column with weather information to dataframe
weather['weather'] = info
# set up a dictionary to convert weather information into keywords
weather_dict = {'weather_warm' : ['soleggiato', 'clear' ,
'warm' , 'hot' ,
'sunny' , 'fine' ,
'mild' , 'sereno' ],
'weather_cold' : ['cold' , 'fresh' ,
'chilly' , 'cool' ],
'weather_dry' : ['dry' , 'asciutto' ],
'weather_wet' : ['showers' , 'wet' ,
'rain' , 'pioggia' ,
'damp' , 'thunderstorms',
'rainy' ],
'weather_cloudy': ['overcast' , 'nuvoloso' ,
'clouds' , 'cloudy' ,
'grey' , 'coperto']}
# map new df according to weather dictionary
weather_df = pd.DataFrame(columns = weather_dict.keys())
for col in weather_df:
weather_df[col] = weather['weather'].map(lambda x: 1 if any(i in weather_dict[col] for i in x.lower().split()) else 0)
weather_info = pd.concat([weather, weather_df], axis = 1)
printDFinfo('weather_info',weather_info)
logStep("DONE");
end_time = datetime.datetime.now()
step07_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step07_elapsed_time} seconds")
# %% [markdown]
# Merge all DFs into one single DF to be used by ML training and prediction
# %%
start_time = datetime.datetime.now()
logStep("Build required data structures - Merge DFs")
df1 = pd.merge(races, weather, how='inner', on=['season', 'round', 'circuit_id']).drop(['lat', 'long','country','weather'],axis = 1)
df2 = pd.merge(df1, results, how='inner', on=['season', 'round', 'circuit_id']).drop(['points', 'status', 'time'],axis = 1)
df3 = pd.merge(df2, driver_standings, how='left', on=['season', 'round', 'driver'])
df4 = pd.merge(df3, constructor_standings, how='left', on=['season', 'round', 'constructor'])
final_df = pd.merge(df4, qualifying_results, how='inner', on=['season', 'round', 'grid']).drop(['driver_name', 'car'],axis = 1)
final_df['date'] = pd.to_datetime(final_df.date)
final_df['date_of_birth'] = pd.to_datetime(final_df.date_of_birth)
final_df['driver_age'] = final_df.apply(lambda x: relativedelta(x['date'], x['date_of_birth']).years, axis=1)
final_df.drop(['date', 'date_of_birth'], axis = 1, inplace = True)
for col in ['driver_points', 'driver_wins', 'driver_standings_pos', 'constructor_points', 'constructor_wins' , 'constructor_standings_pos']:
final_df[col].fillna(0, inplace = True)
final_df[col] = final_df[col].map(lambda x: int(x))
final_df.dropna(inplace = True )
final_df = final_df[final_df['qualifying_time'] != 0]
final_df.sort_values(['season', 'round', 'grid'], inplace = True)
final_df['qualifying_time_diff'] = final_df.groupby(['season', 'round']).qualifying_time.diff()
final_df['qualifying_time'] = final_df.groupby(['season','round']).qualifying_time_diff.cumsum().fillna(0)
final_df.drop('qualifying_time_diff', axis = 1, inplace = True)
final_df.reset_index(inplace = True, drop = True)
printDFinfo('final_df',final_df)
logStep("DONE");
end_time = datetime.datetime.now()
step08_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step08_elapsed_time} seconds")
# %% [markdown]
# Prepare score functions - Regression and Classification
# %%
start_time = datetime.datetime.now()
logStep("Prepare score functions - Regression and Classification")
scaler = StandardScaler()
def score_regression(model):
score = 0
for circuit in df['round'].unique():
test = df[(df['round'] == circuit)]
X_test = test.drop(['driver', 'podium'], axis = 1)
y_test = test.podium
X_test = pd.DataFrame(scaler.transform(X_test), columns = X_test.columns)
prediction_df = pd.DataFrame(model.predict(X_test), columns = ['results'])
prediction_df['podium'] = y_test.reset_index(drop = True)
prediction_df['actual'] = prediction_df.podium.map(lambda x: 1 if x == 1 else 0)
prediction_df.sort_values('results', ascending = True, inplace = True)
prediction_df.reset_index(inplace = True, drop = True)
prediction_df['predicted'] = prediction_df.index
prediction_df['predicted'] = prediction_df.predicted.map(lambda x: 1 if x == 0 else 0)
score += precision_score(prediction_df.actual, prediction_df.predicted)
model_score = score / df['round'].unique().max()
return model_score, prediction_df
def score_classification(model):
score = 0
for circuit in df['round'].unique():
test = df[(df['round'] == circuit)]
X_test = test.drop(['driver', 'podium'], axis = 1)
y_test = test.podium
X_test = pd.DataFrame(scaler.transform(X_test), columns = X_test.columns)
prediction_df = pd.DataFrame(model.predict_proba(X_test), columns = ['proba_0', 'proba_1'])
prediction_df['actual'] = y_test.reset_index(drop = True)
prediction_df.sort_values('proba_1', ascending = False, inplace = True)
prediction_df.reset_index(inplace = True, drop = True)
prediction_df['predicted'] = prediction_df.index
prediction_df['predicted'] = prediction_df.predicted.map(lambda x: 1 if x == 0 else 0)
score += precision_score(prediction_df.actual, prediction_df.predicted)
model_score = score / df['round'].unique().max()
return model_score, prediction_df
logStep("DONE");
end_time = datetime.datetime.now()
step09_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step09_elapsed_time} seconds")
# %% [markdown]
# Scaling
# %%
start_time = datetime.datetime.now()
logStep("Scaling and Splitting")
df = final_df.copy()
df.reset_index(inplace = True, drop = True)
df = df.drop(['circuit_id'] , axis = 1)
df = df.drop(['url'] , axis = 1)
df = df.drop(['nationality'], axis = 1)
df = df.drop(['constructor'], axis = 1)
df.podium = df.podium.map(lambda x: 1 if x == 1 else 0)
train = df[df.season <2023]
X_train = train.drop(['driver', 'podium'], axis = 1)
y_train = train.podium
X_train = pd.DataFrame(scaler.fit_transform(X_train), columns = X_train.columns)
logStep("DONE");
end_time = datetime.datetime.now()
step10_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step10_elapsed_time} seconds")
# %% [markdown]
# Linear Regression
# %%
start_time = datetime.datetime.now()
logStep("Linear Regression")
comparison_dict = {'model': [], 'params': [], 'score': []}
params={'fit_intercept': [True, False]}
for fit_intercept in params['fit_intercept']:
model_params = (fit_intercept)
model = LinearRegression(fit_intercept = fit_intercept)
model.fit(X_train, y_train)
model_score, model_prediction = score_regression(model)
comparison_dict['model'].append('linear_regression')
comparison_dict['params'].append(model_params)
comparison_dict['score'].append(model_score)
print(pd.DataFrame(comparison_dict).groupby('model')['score'].max())
predictions = model.predict(X_train)
target_names = df.driver.unique()
newpred = np.zeros(len(predictions))
for i in range(len(predictions)):
newpred[i] = predictions[i]
p = np.nan_to_num(newpred)
j = 0
for i in p:
i = i * 100
i = int(i)
p[j] = i
j = j + 1
y = np.nan_to_num(y_train)
print(classification_report(y, p, target_names = target_names,labels=np.unique(p)))
logStep("DONE");
end_time = datetime.datetime.now()
step11_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step11_elapsed_time} seconds")
# %% [markdown]
# Random Forest Regressor
# %%
start_time = datetime.datetime.now()
logStep("Random Forest Regressor")
params={'criterion': ['friedman_mse'],
'max_features': [0.8, 1, None],
'max_depth': [None]}
for criterion in params['criterion']:
for max_features in params['max_features']:
for max_depth in params['max_depth']:
model_params = (criterion, max_features, max_depth)
model = RandomForestRegressor(criterion = criterion,
max_features = max_features,
max_depth = max_depth,
random_state = 1)
model.fit(X_train, y_train)
model_score, prediction_df = score_regression(model)
comparison_dict['model'].append('random_forest_regressor')
comparison_dict['params'].append(model_params)
comparison_dict['score'].append(model_score)
print(pd.DataFrame(comparison_dict).groupby('model')['score'].max())
predictions = model.predict(X_train)
target_names = df.driver.unique()
newpred = np.zeros(len(predictions))
for i in range(len(predictions)):
newpred[i] = predictions[i]
p = np.nan_to_num(newpred)
j = 0
for i in p:
i = i * 100
i = int(i)
p[j] = i
j = j + 1
y = np.nan_to_num(y_train)
print(classification_report(y, p, target_names = target_names,labels=np.unique(p)))
logStep("DONE");
end_time = datetime.datetime.now()
step12_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step12_elapsed_time} seconds")
# %% [markdown]
# Logistic Regression
# %%
start_time = datetime.datetime.now()
logStep("Logistic Regression")
params={'penalty': ['l1', 'l2'],
'solver' : ['saga', 'liblinear'],
'C' : np.logspace(-3,1,20)}
for penalty in params['penalty']:
for solver in params['solver']:
for c in params['C']:
model_params = (penalty, solver, c)
model = LogisticRegression(penalty = penalty, solver = solver, C = c, max_iter = 10000)
model.fit(X_train, y_train)
model_score, model_prediction = score_classification(model)
comparison_dict['model'].append('logistic_regression')
comparison_dict['params'].append(model_params)
comparison_dict['score'].append(model_score)
print(pd.DataFrame(comparison_dict).groupby('model')['score'].max())
predictions = model.predict(X_train)
target_names = df.driver.unique()
# Print the predictions
newpred = np.zeros(len(predictions))
for i in range(len(predictions)):
newpred[i] = predictions[i]
p = np.nan_to_num(newpred)
j = 0
for i in p:
i = i * 100
i = int(i)
p[j] = i
j = j + 1
y = np.nan_to_num(y_train)
print(classification_report(y, p, target_names = target_names,labels=np.unique(p)))
logStep("DONE");
end_time = datetime.datetime.now()
step13_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step13_elapsed_time} seconds")
# %% [markdown]
# Random Forest Classifier
# %%
start_time = datetime.datetime.now()
logStep("Random Forest Classifier")
params = {
'criterion' : ['gini', 'entropy'],
'max_features': [None],
'max_depth' : [None]
}
for criterion in params['criterion']:
for max_features in params['max_features']:
for max_depth in params['max_depth']:
model_params = (criterion, max_features, max_depth)
model = RandomForestClassifier(criterion = criterion, max_features = max_features, max_depth = max_depth)
model.fit(X_train, y_train)
model_score, model_prediction = score_classification(model)
comparison_dict['model'].append('random_forest_classifier')
comparison_dict['params'].append(model_params)
comparison_dict['score'].append(model_score)
print(pd.DataFrame(comparison_dict).groupby('model')['score'].max())
predictions = model.predict(X_train)
target_names = df.driver.unique()
newpred = np.zeros(len(predictions))
for i in range(len(predictions)):
newpred[i] = predictions[i]
p = np.nan_to_num(newpred)
j = 0
for i in p:
i = i * 100
i = int(i)
p[j] = i
j = j + 1
y = np.nan_to_num(y_train)
print(classification_report(y, p, target_names = target_names,labels=np.unique(p)))
logStep("DONE");
end_time = datetime.datetime.now()
step14_elapsed_time = end_time - start_time
logStep(F"ELAPSED TIME: {step14_elapsed_time} seconds")
# %% [markdown]
# Neural Network
# %%
start_time = datetime.datetime.now()
logStep("Neural Network Classifier")
params={'hidden_layer_sizes': [(60,20,40,5), (50,25,50,10)],
'activation' : ['tanh', 'relu'],
'solver' : 'adam',
'alpha' : np.logspace(-4,2,20)}
for hidden_layer_sizes in params['hidden_layer_sizes']:
for activation in params['activation']:
for alpha in params['alpha']:
model_params = (hidden_layer_sizes, activation, 'adam', alpha )
model = MLPClassifier(hidden_layer_sizes = hidden_layer_sizes, activation = activation, solver = 'adam', alpha = alpha, random_state = 1)
model.fit(X_train, y_train)
model_score, model_prediction = score_classification(model)
comparison_dict['model'].append('neural_network_classifier')
comparison_dict['params'].append(model_params)
comparison_dict['score'].append(model_score)
print(pd.DataFrame(comparison_dict).groupby('model')['score'].max())
predictions = model.predict(X_train)
target_names = df.driver.unique()
newpred = np.zeros(len(predictions))
for i in range(len(predictions)):
newpred[i] = predictions[i]
p = np.nan_to_num(newpred)
j = 0
for i in p:
i = i * 100
i = int(i)
p[j] = i
j = j + 1
y = np.nan_to_num(y_train)
print(classification_report(y, p, target_names = target_names,labels=np.unique(p)))
logStep("DONE");
end_time = datetime.datetime.now()
step15_elapsed_time = end_time - start_time
end_time_tt = datetime.datetime.now()
steptt_elapsed_time = end_time_tt - start_time00
logStep(F"ELAPSED TIME: {step15_elapsed_time} seconds")
logStep(F"TOT ELA TIME: {steptt_elapsed_time} seconds")
| joseeneas/RB_Module_24_Project_04 | Notebooks/ML_Complex_API.PY | ML_Complex_API.PY | py | 32,862 | python | en | code | 0 | github-code | 13 |
30669927429 | def reverse(num):
temp = 0
while(num > 0):
temp *= 10
temp += (num % 10)
num = int(num / 10)
return temp
def Lychrel(num):
num += reverse(num)
for i in range(50):
temp = reverse(num)
if (temp == num):
return False
else:
num += temp
return True
total = 0
for i in range(1, 10001):
if (Lychrel(i) == True):
total += 1
print(total)
# Answer: 249 | menduhkesici/Project_Euler | Problem 55 - Lychrel numbers.py | Problem 55 - Lychrel numbers.py | py | 481 | python | en | code | 0 | github-code | 13 |
12210329673 | import Setup
import os
import sys
from os import environ as environment
from typing import List
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from lib.Common import get_team
from replays.Replay import Replay, Team
from analysis.Replay import get_ptbase_tslice, get_ptbase_tslice_side
from replays.Ward import Ward, WardType
from dotenv import load_dotenv
import matplotlib.pyplot as plt
# DB Setups
session = Setup.get_fullDB()
team_session = Setup.get_team_session()
load_dotenv(dotenv_path="../setup.env")
# teamName = "Royal Never Give Up"
teamName = "Hippomaniacs"
team = get_team(team_session, teamName)
# Royal never give up test case for bad pos
# r_query = team.get_replays(session).filter(Replay.replayID == 4857623860)
r_query = team.get_replays(session).filter(Replay.replayID == 4901403209)
d_query = team.get_replays(session)
#d_query = team.get_replays(session).filter(Replay.replayID == 4901517396)
wards_radiant = get_ptbase_tslice_side(session, r_query, team=team,
Type=Ward,
side=Team.RADIANT,
start=-2*60, end=20*60)
wards_radiant = wards_radiant.filter(Ward.ward_type == WardType.OBSERVER)
wards_dire, _ = get_ptbase_tslice(session, d_query, team=team,
Type=Ward,
start=-2*60, end=20*60)
wards_dire = wards_dire.filter(Ward.ward_type == WardType.OBSERVER)
from analysis.ward_vis import build_ward_table
data = build_ward_table(wards_radiant, session, team_session, team)
ddata = build_ward_table(wards_dire, session, team_session, team)
from analysis.ward_vis import plot_full_text, plot_num_table, plot_eye_scatter, plot_drafts, plot_drafts_above
fig, ax = plt.subplots(figsize=(10, 13))
plot_full_text(data, ax)
# plot_drafts(r_query, ax, r_name=teamName)
# fig.savefig("r_full_text.png", bbox_inches='tight')
# fig, ax = plt.subplots(figsize=(10, 13))
# plot_full_text(ddata, ax)
# plot_drafts(d_query, ax, d_name=teamName)
# fig.savefig("d_full_text.png", bbox_inches='tight')
# fig, ax = plt.subplots(figsize=(10, 13))
# plot_num_table(data, ax)
# plot_drafts(r_query, ax, r_name=teamName)
# fig.savefig("r_table.png", bbox_inches='tight')
# fig, ax = plt.subplots(figsize=(10, 13))
# plot_num_table(ddata, ax)
# plot_drafts(d_query, ax, d_name=teamName)
# fig.savefig("d_table.png", bbox_inches='tight')
# fig, ax = plt.subplots(figsize=(10, 13))
# plot_eye_scatter(data, ax)
# plot_drafts(r_query, ax, r_name=teamName)
# fig.savefig("eye_large.png", bbox_inches='tight')
# fig, ax = plt.subplots(figsize=(10, 13))
# plot_eye_scatter(data, ax, size=(12, 9))
# plot_drafts(r_query, ax, r_name=teamName)
# fig.savefig("eye_small.png", bbox_inches='tight')
# https://stackoverflow.com/questions/19306510/determine-matplotlib-axis-size-in-pixels
def get_ax_size(ax_in, fig_in):
bbox = ax_in.get_window_extent()\
.transformed(fig_in.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig_in.dpi
height *= fig_in.dpi
return width, height
fig, ax = plt.subplots(figsize=(10, 13))
width, height = get_ax_size(ax, fig)
extras = plot_eye_scatter(data, ax, size=(18, 14))
drafts = plot_drafts_above(r_query, ax, width, r_name=teamName)
#fig.savefig("r_eye_med.png", bbox_inches='tight')
fig.savefig("r_eye_med.png", bbox_extra_artists=(*drafts, *extras), bbox_inches='tight')
# fig, ax = plt.subplots(figsize=(10, 13))
# plot_eye_scatter(ddata, ax, size=(18, 14))
# plot_drafts(d_query, ax, d_name=teamName)
# fig.savefig("d_eye_med.png", bbox_inches='tight') | DrCognito/StaticAnalysis | src/StaticAnalysis/tests/ward_newvis2.py | ward_newvis2.py | py | 3,673 | python | en | code | 0 | github-code | 13 |
20434221886 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
class Grid:
def __init__(self, rows, columns, start):
self.rows = rows
self.columns = columns
#current location will be seen through instance variales i and j
self.i = start[0]
self.j = start[1]
#Setter -- sets rewards and actions simultaneously
def set(self, rewards, actions):
#rewards and actions is a dictionary where the key is i,j coordinate representing state --> value = numerical reward / list of possible actions
self.rewards = rewards
self.actions = actions
# actions show all actions -- even the impossible ones, the grid class will ignore those since it will not be in the actions dictionary
#Useful when you need to set the state such as in iterative policy evaluation
def set_state(self,s):
self.i = s[0]
self.j = s[1]
#defines current position
def current_state(self,s):
return(self.i, self.j)
#returns true if it is a terminal state, false if not
def is_terminal(self,s):
return s not in self.actions
# Takes the action as an argument
def move(self, action):
#check if action is in actions dictionary
if action in self.actions[(self.i, self.j)]:
#following array convention where i=0 to n moves from top to bottom and j =0 to n from left to right
if action == "U":
self.i -= 1
if action == "D":
self.i += 1
if action == "R":
self.j += 1
if action == "L":
self.j -= 1
return self.rewards.get((self.i, self.j), 0)
#Environment undos the action done by you
def undo_move(self, action):
if action == "U":
self.i += 1
if action == "D":
self.i -= 1
if action == "R":
self.j -= 1
if action == "L":
self.j += 1
#Checking if current state is in the set of all allowed states
#Since we should never reach this point, we put an assert function to stop the code in case of failure
assert(self.current_state() in self.all_states())
#Checks if we are in the terminal state and then provides true in case it is
def game_over(self):
return (self.i, self.j) not in self.actions
#Calculates all states from which we can take an action, not including terminal states and states returning a reward
def all_states(self):
return set(self.actions.keys() | self.rewards.keys())
# def all_states(self):
# return set(self.actions.keys()) | set(self.rewards.keys())
# # Action_keys = list(self.actions.keys())
# # Reward_keys = list(self.rewards.keys())
# return set (Action_keys | Reward_keys)
#Defines the grid and the movement in the grid including rewards and actions
def standard_grid():
# x means you cant go there -- such as a wall
# s means start position
# number defines the reward at that state
# . . . 1 -- grid first line
# . x . -1 -- grid second line
# s . . . -- grid third line
# g shows the grid of 3*4 = 3 rows, 4 columns, with starting position at (2,0) which means third row, first column
# rewards defines the rewards at the specific positions
g = Grid(3,4,(2,0))
rewards = { (0,3): 1, (1,3): -1}
actions = {
(0, 0): ('D', 'R'),
(0, 1): ('L', 'R'),
(0, 2): ('L', 'D', 'R'),
(1, 0): ('U', 'D'),
(1, 2): ('U', 'D', 'R'),
(2, 0): ('U', 'R'),
(2, 1): ('L', 'R'),
(2, 2): ('L', 'R', 'U'),
(2, 3): ('L', 'U'),
}
g.set (rewards, actions)
return g
#Shows the penalisation for each move so that we can achieve the target efficiently
def negative_grid():
g = standard_grid()
g.rewards.update({
(0, 0): step_cost,
(0, 1): step_cost,
(0, 2): step_cost,
(1, 0): step_cost,
(1, 2): step_cost,
(2, 0): step_cost,
(2, 1): step_cost,
(2, 2): step_cost,
(2, 3): step_cost,
})
return g
def play_game(agent, env):
pass
# In[ ]:
| aashya/Reinforcement-Learning | Grid_world.py | Grid_world.py | py | 3,775 | python | en | code | 0 | github-code | 13 |
4511030690 | #
# @lc app=leetcode.cn id=765 lang=python
#
# [765] 情侣牵手
#
# https://leetcode-cn.com/problems/couples-holding-hands/description/
#
# algorithms
# Hard (59.81%)
# Likes: 238
# Dislikes: 0
# Total Accepted: 24.1K
# Total Submissions: 36K
# Testcase Example: '[0,2,1,3]'
#
# N 对情侣坐在连续排列的 2N 个座位上,想要牵到对方的手。 计算最少交换座位的次数,以便每对情侣可以并肩坐在一起。
# 一次交换可选择任意两人,让他们站起来交换座位。
#
# 人和座位用 0 到 2N-1 的整数表示,情侣们按顺序编号,第一对是 (0, 1),第二对是 (2, 3),以此类推,最后一对是 (2N-2,
# 2N-1)。
#
# 这些情侣的初始座位 row[i] 是由最初始坐在第 i 个座位上的人决定的。
#
# 示例 1:
#
#
# 输入: row = [0, 2, 1, 3]
# 输出: 1
# 解释: 我们只需要交换row[1]和row[2]的位置即可。
#
#
# 示例 2:
#
#
# 输入: row = [3, 2, 0, 1]
# 输出: 0
# 解释: 无需交换座位,所有的情侣都已经可以手牵手了。
#
#
# 说明:
#
#
# len(row) 是偶数且数值在 [4, 60]范围内。
# 可以保证row 是序列 0...len(row)-1 的一个全排列。
#
#
#
# @lc code=start
class UF(object):
def __init__(self, N):
self.F = [0] * N
for i in range(0, N, 2):
self.F[i] = i
self.F[i+1] = i
# 合并次数
self.uc = 0
def Find(self, x):
if x == self.F[x]:
return x
self.F[x] = self.Find(self.F[x])
return self.F[x]
def Union(self, x, y):
xpar = self.Find(x)
ypar = self.Find(y)
if xpar != ypar:
self.F[xpar] = ypar
self.uc += 1
class Solution(object):
def minSwapsCouples(self, A):
if not A or len(A) == 0:
return 0
N = len(A)
uf = UF(N)
for i in range(0, N, 2):
uf.Union(A[i], A[i+1])
return uf.uc
# @lc code=end
| lagoueduCol/Algorithm-Dryad | 07.UF/765.情侣牵手.py | 765.情侣牵手.py | py | 2,038 | python | zh | code | 134 | github-code | 13 |
21662849895 | class Solution:
def findRelativeRanks(self, score: List[int]) -> List[str]:
place = sorted(score, reverse=True)
medal = {
1: 'Gold Medal',
2: 'Silver Medal',
3: 'Bronze Medal'
}
rank = {s: str(i + 1) if i >= 3 else medal[i + 1] for i, s in enumerate(place)}
return [rank[s] for s in score] | SkadiArtemis/VSCODE_Project | leetcode_tasks/0506_relative_ranks.py | 0506_relative_ranks.py | py | 372 | python | en | code | 0 | github-code | 13 |
28942856080 | # Shane Hagan
# CodeChef Burgers Problem
# Date: 06/17/2022
# Read the number of test cases.
testCases = int(raw_input())
for i in range(testCases):
# Read integers x, y
(x, y) = map(int, raw_input().split(' '))
# print the min number of either burgers or buns
print(min(x,y))
# ran successfully with the test cases and outputs:
'''
Input
4
2 2
2 3
3 2
23 17
Output
2
2
2
17
'''
| HaganShane/Side-Projects | CodeChef/Burger.py | Burger.py | py | 405 | python | en | code | 0 | github-code | 13 |
10079080434 | ## @file SALst.py
# @title SALst
# @author Jame Tran
# @date Feb.16, 2019
from StdntAllocTypes import *
from AALst import *
from DCapALst import *
## @brief A class containing a list of ('macid', SInfoT) tuples, and methods to add, delete
# and recieve information on. Also contains methods to recieve average and sort by generic lambda
# functions, and method to allocate students
class SALst:
s = []
## @brief A method to initalize the list
@staticmethod
def init():
SALst.s = []
## @brief A method to add an (m, i) tuple to the list
# @details First checks the list to see if the m value is already in there
# if it is, raise a KeyError. Else, add the (m, i) tuple to the list
# @param1 a string representing mac ids
# @param2 Student information of type SInfoT
@staticmethod
def add_stdnt(m, i):
in_list = False
for element in SALst.s or []:
if m == element[0]:
in_list = True
break
if (in_list is False):
(SALst.s).append([m, i])
else:
raise KeyError
## @brief A method to remove a (m, i) tuple corresponding to a macid m
# @details First check whether m is in list. If it isnt, raise KeyError Exception
# Else, remove (m, i) tuple corresponding to m
# @param1 m of type string, corresponding to mac id
@staticmethod
def remove(m):
in_list = False
for element in SALst.s or []:
if m in element:
in_list = True
the_element = element
break
if (in_list):
(SALst.s).remove(the_element)
else:
raise KeyError
## @brief A method to check whether or not a (m, i) tuple corresponding to
# input m is in the list
# @param1 m of type string, where 'm' is a macid
# @return boolean of whether or not (m,i) is in list
@staticmethod
def elm(m):
for element in SALst.s or []:
if m in element:
return True
## @brief A method to return student info give a macid
# @param1 takes in a string pertaining to macid
# @return student information of type SInfotT
@staticmethod
def info(m):
for element in SALst.s or []:
if m in element:
return element[1]
## @brief a method to sort the list by a generic lambda expression
# @details First, sorts the list in descending order. Next, takes in
# a generic lambda expression as input and uses it to filter list using filter()
# returns filterd list
# @param1 lambda expression
# @return List of tuples (m, i) where m is macid of type string and i is student info
# of type SInfoT
@staticmethod
def sort(f):
if SALst.s == []:
raise Exception('List is empty')
for i in SALst.s:
for j in range(len(SALst.s) - 1):
if ((SALst.s)[j])[1].gpa < ((SALst.s)[j+1])[1].gpa:
temp = (SALst.s)[j+1]
(SALst.s)[j + 1] = (SALst.s)[j]
(SALst.s)[j] = temp
pre_filter = []
for elem in SALst.s:
pre_filter.append(elem[1])
a = list(filter(f, pre_filter))
final_list = []
for elem in SALst.s:
for j in a:
if j in elem:
final_list.append(elem[0])
return final_list
@staticmethod
def getS():
print(SALst.s)
## @brief A method to calculate the average gpa of the list filtered by generic
# lambda method
# @details takes in list and filters it using lambda method. Then returns the average
# GPA. If the list is empty, raise a ValueError
# @param1 generic lambda expression
# @return average of type float
@staticmethod
def average(f):
pre_filter = []
for elem in SALst.s:
pre_filter.append(elem[1])
filtered = list(filter(f, pre_filter))
if filtered == []:
raise ValueError
sum = 0
for i in filtered:
sum = sum + i.gpa
average = sum / len(filtered)
return average
## @brief A method to allocate the students into their preferred programs
# @details The method only allocates students with GPA 4.0 or higher. First, students
# with freechoice are sorted by GPA and directly placed into the programs of their choice.
# Next, students without free choice are allocated in based on department capacity. If students'
# First choice: if it is full, students are moved to their second choice, and so on.
# If a student is unable to be assigned, raise a RuntimeError
@staticmethod
def allocate():
AALst.init()
F = SALst.sort(lambda t: t.freechoice and t.gpa >= 4.0)
for m in F:
ch = (SALst.info(m)).choices
AALst.add_stdnt(ch.next(), m)
S = SALst.sort(lambda t: not t.freechoice and t.gpa >= 4.0)
for m in S:
ch = (SALst.info(m)).choices
alloc = False
while ((not alloc) and (not ch.end())):
d = ch.next()
if AALst.num_alloc(d) < DCapALst.capacity(d):
AALst.add_stdnt(d, m)
alloc = True
if not alloc:
raise RuntimeError
| JameTran/Automated-Admissions- | src/SALst.py | SALst.py | py | 5,409 | python | en | code | 0 | github-code | 13 |
11032604827 | from typing import List
import hdbscan
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.style as style
import numpy as np
import pandas as pd
from random import shuffle
from joblib import Parallel, delayed, Memory
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, silhouette_samples
class CosineClusters():
def __init__(self, num_clusters: int = 100, Euclidean=False):
self.clusters = []
self.item_cluster = {}
self.Euclidean = Euclidean
# Create Initial Cluster
for i in range(0, num_clusters):
self.clusters.append(Cluster())
def add_random_training_items(self, index_unlabelled, unlabelled):
cur_index = 0
for index, item in zip(index_unlabelled, unlabelled):
self.clusters[cur_index].add_to_cluster(index, item)
formulation_id = index
self.item_cluster[formulation_id] = self.clusters[cur_index]
cur_index += 1
if cur_index >= len(self.clusters):
cur_index = 0
def add_items_to_best_cluster(self, index_unlabelled, unlabelled):
added = 0
for index, item in zip(index_unlabelled, unlabelled):
print(added)
new = self.add_item_to_best_cluster(index, item)
if new:
added += 1
return added
def add_item_to_best_cluster(self, index, item):
best_cluster = None
best_fit = float("-inf")
previous_cluster = None
formulation_id = index
if formulation_id in self.item_cluster:
previous_cluster = self.item_cluster[formulation_id]
previous_cluster.remove_from_cluster(formulation_id, item)
for cluster in self.clusters:
fit = cluster.cosine_similarity(item, Euclidean=self.Euclidean)
if fit > best_fit:
best_fit = fit
best_cluster = cluster
best_cluster.add_to_cluster(index, item)
self.item_cluster[formulation_id] = best_cluster
if best_cluster == previous_cluster:
return False
else:
return True
def get_centroids(self, number_per_cluster=1):
centroids = []
for cluster in self.clusters:
centroids.append(cluster.get_centroid(number_per_cluster))
return centroids
def get_outliers(self, number_per_cluster=1):
outliers = []
for cluster in self.clusters:
outliers.append(cluster.get_outlier(number_per_cluster))
return outliers
def get_randoms(self, number_per_cluster=1):
randoms = []
for cluster in self.clusters:
randoms.append(cluster.get_random_members(number_per_cluster))
return randoms
class Cluster():
feature_idx = {}
def __init__(self, Euclidean=False):
self.members = {}
self.feature_vector = None
self.Euclidean = Euclidean
self.distance = []
def add_to_cluster(self, index, item):
formulation_id = index
data = item
self.members[formulation_id] = item
try:
if self.feature_vector == None:
self.feature_vector = data
except:
self.feature_vector = self.feature_vector + data
# for feature in features:
# while len(self.feature_vector) <= feature:
# self.feature_vector.append(0)
# self.feature_vector[feature] += 1
def remove_from_cluster(self, index, item):
formulation_id = index
data = item
exists = self.members.pop(formulation_id, False)
if exists is not None:
self.feature_vector = self.feature_vector - data
def cosine_similarity(self, item, Euclidean=False):
data = item
center_vec = self.feature_vector / len(list(self.members.keys()))
#item_tensor = torch.FloatTensor(data)
#center_tensor = torch.FloatTensor(center_vec)
if Euclidean:
similarity = -np.sqrt(np.sum(np.square(data - center_vec)))
return similarity
else:
similarity = F.cosine_similarity(item_tensor, center_tensor, 0)
return similarity.item() # converts to float
def size(self):
return len(self.members.keys())
def distance_sort(self):
self.distance = []
for formulation_id in self.members.keys():
item = self.members[formulation_id]
similarity = self.cosine_similarity(item, Euclidean=self.Euclidean)
# self.distance.append([similarity, item[0], item[1]])
self.distance.append([similarity, formulation_id, item])
self.distance.sort(reverse=True, key=lambda x: x[0])
return self.distance
def get_centroid(self, number=1):
if len(self.members) == 0:
return []
return self.distance_sort()[:number]
def get_outlier(self, number=1):
if len(self.members) == 0:
return {}
return self.distance_sort()[-number:]
def get_random_members(self, number=1):
if len(self.members) == 0:
return []
_ = self.distance_sort()
randoms = []
for i in range(0, number):
randoms.append(_[np.random.randint(len(self.members))])
return randoms
class KMeans_Cluster():
def __init__(self, unlabeled_data: np.ndarray, n_clusters: int = 5, n_init: str = 'k-means++',
max_iteration: int = 500,
algorithm: str = 'auto'):
self.kmeans = KMeans(n_clusters=n_clusters, init=n_init, max_iter=max_iteration, algorithm=algorithm,
random_state=42)
self.unlabeled_data_index = unlabeled_data[0]
self.unlabeled_data = unlabeled_data[1]
self.n_init = n_init
self.algorithm = algorithm
def kmeans_fit(self):
self.kmeans.fit(self.unlabeled_data)
def kmeans_intertia(self):
self.kmeans.inertia_
return self.kmeans.inertia_
def elbow_method(self, clusters: int = 5):
SSE = []
for cluster in range(1, clusters):
kmeans = KMeans(n_clusters=cluster, init=self.n_init, algorithm=self.algorithm)
kmeans.fit(self.unlabeled_data)
SSE.append(kmeans.inertia_)
frame = pd.DataFrame({'Cluster': range(1, clusters), 'SSE': SSE})
plt.figure(figsize=(12, 6))
plt.plot(frame['Cluster'], frame['SSE'], marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.show()
def kMeansRes(self, scaled_data, k: int, alpha: float = 0.01):
'''
:param scaled_data: matrix - Scaled data rows are samples and columns are the features for clustering
:param k: int - current k for applying kmeans
:param alpha: float - manually turned factor that gives a penality to the number of clusters
:return scaled inertia:
'''
interia_o = np.square((scaled_data - scaled_data.mean(axis=0))).sum()
# git k-means
kmeans = KMeans(n_clusters=k, init=self.n_init, algorithm=self.algorithm, random_state=0).fit(scaled_data)
scaled_inertia = (kmeans.inertia_ / interia_o) + (alpha * k)
return scaled_inertia
def chooseBestKforKmeansParallel(self, k_range, alpha: float = 0.01):
print('Finding Best K for KMeans...')
ans = Parallel(n_jobs=-1, verbose=10)(
delayed(self.kMeansRes)(self.unlabeled_data, k, alpha) for k in range(1, k_range))
ans = list(zip(range(1, k_range), ans))
results = pd.DataFrame(ans, columns=['k', 'Scaled Inertia']).set_index('k')
best_k = results.idxmin()[0]
print('Best K for Clustering: ', best_k)
return best_k, results
def kmeans_transform(self, data):
transformed_array = self.kmeans.transform(data)
return transformed_array
def kmeans_predict(self, data):
predict_array = self.kmeans.predict(data)
return predict_array
def kmeans_labels(self):
labels = self.kmeans.labels_
return labels
def kmeans_centres(self):
centres = self.kmeans.cluster_centers_
return centres
def create_array(self, percentile: float = 95.0, threshold: float = 1.0, n_instances: int = 100,
dist_measuring: str = 'euclidean'):
x_val = self.unlabeled_data.copy()
clusters = self.kmeans_labels()
centroids = self.kmeans_centres()
points = np.empty((0, len(x_val[0])), float)
distances = np.empty((0, len(x_val[0])), float)
for i, center_elem in enumerate(centroids):
# CDIST is used to calculate the distance between centre and other points
distances = np.append(distances, cdist([center_elem], x_val[clusters == 1], 'euclidean'))
points = np.append(points, x_val[clusters == i], axis=0)
distance_df = pd.DataFrame(distances)
x_val = pd.DataFrame(x_val)
x_val['distances'] = distance_df
x_val['original_index'] = self.unlabeled_data_index
x_val['label_cluster'] = clusters
# x_val[f'{percentile}th_percentile'] = np.percentile(distances,percentile)
distribution_instances = round(n_instances / len(set(clusters)))
distance_points = {}
for i in list(set(clusters)):
print('Cluster: ', i)
temp_df = x_val[x_val['label_cluster'].isin([i])]
# distance_points = np.empty((0, len(temp_df[0])), float)
points = np.empty((0, temp_df.shape[1] - 3), float)
for index, value in temp_df.iterrows():
if points.shape[0] <= distribution_instances:
convert_series = value.to_frame().T
convert_series['original_index'] = convert_series['original_index'].astype(int)
convert_series['label_cluster'] = convert_series['label_cluster'].astype(int)
formulation_id = convert_series['original_index'].values[0]
data = convert_series.drop(columns=['distances', 'original_index', 'label_cluster'])
index = index
if points.shape[0] >= 1:
distance = cdist(points[-1:, :], data, dist_measuring)
if distance >= threshold:
distance_points[formulation_id] = distance[0][0]
points = np.append(points, data, axis=0)
# distance_points = np.append(distance_points, cdist(points[-1],data,'euclidean'))
else:
points = np.append(points, data, axis=0)
print('Completed distance measuring...')
distances_df = pd.DataFrame.from_dict(distance_points, orient='index')
distances_df = distances_df.rename(columns={0: 'distances_local'})
result = pd.merge(x_val, distances_df, left_on='original_index', right_index=True)
results_index = result['original_index']
distance_score = result['distances_local']
result.drop(columns=['distances', 'original_index', 'label_cluster', 'distances_local'], inplace=True)
return result, results_index, distance_score
def silhouette(self, X: np.ndarray, range_clusters: List[int] = [2, 3, 4, 5, 6, 7, 8, 9]):
silhouette_avg_n_clusters = []
for n_clusters in range_clusters:
# Initiliaze Clusterer with n_clusters value and a random generator seed of 10 for reproducibility
clusterer = KMeans(n_clusters=n_clusters, init=self.n_init, algorithm=self.algorithm, random_state=42)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels, n_jobs = -1)
print("For n_clusters = ", n_clusters,
"The average silhouette score is: ", silhouette_avg)
silhouette_avg_n_clusters.append(silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1 to 1 but in this example lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters +1)*10 is for inserting blank space between silhouette plots of individual clusters
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette score for sample belonging to cluster i, and sort them
ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_between(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers in the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
style.use("fivethirtyeight")
plt.plot(range_clusters, silhouette_avg_n_clusters)
plt.xlabel("Number of Clusters (k)")
plt.ylabel("silhouette score")
plt.show()
class HDBScan():
def __init__(self, unlabeled_data):
self.hdbscan = hdbscan.HDBSCAN()
#HDBSCAN(algorithm='best', alpha=1.0, approx_min_span_tree=True,
#gen_min_span_tree=False, leaf_size=40, memory=Memory(cachedir=None),
#metric='euclidean', min_cluster_size=5, min_samples=None, p=None)
self.unlabeled_data_index = unlabeled_data[0]
self.unlabeled_data = unlabeled_data[1]
def hdbscan_fit(self):
self.hdbscan.fit(self.unlabeled_data)
def hdbscan_labels(self):
return self.hdbscan.labels_
def distance_sort(self, threshold: float = 1.0, n_instances: int = 100,
dist_measuring: str = 'euclidean'):
x_val = self.unlabeled_data.copy()
clusters = self.hdbscan_labels()
x_val['original_index'] = self.unlabeled_data_index
x_val['label_cluster'] = clusters
# x_val[f'{percentile}th_percentile'] = np.percentile(distances,percentile)
distribution_instances = round(n_instances / len(set(clusters)))
distance_points = {}
for i in list(set(clusters)):
print('Cluster: ', i)
temp_df = x_val[x_val['label_cluster'].isin([i])]
# distance_points = np.empty((0, len(temp_df[0])), float)
points = np.empty((0, temp_df.shape[1] - 3), float)
for index, value in temp_df.iterrows():
if points.shape[0] <= distribution_instances:
convert_series = value.to_frame().T
convert_series['original_index'] = convert_series['original_index'].astype(int)
convert_series['label_cluster'] = convert_series['label_cluster'].astype(int)
formulation_id = convert_series['original_index'].values[0]
data = convert_series.drop(columns=['distances', 'original_index', 'label_cluster'])
index = index
if points.shape[0] >= 1:
distance = cdist(points[-1:, :], data, dist_measuring)
if distance >= threshold:
distance_points[formulation_id] = distance[0][0]
points = np.append(points, data, axis=0)
# distance_points = np.append(distance_points, cdist(points[-1],data,'euclidean'))
else:
points = np.append(points, data, axis=0)
print('Completed distance measuring...')
distances_df = pd.DataFrame.from_dict(distance_points, orient='index')
distances_df = distances_df.rename(columns={0: 'distances_local'})
result = pd.merge(x_val, distances_df, left_on='original_index', right_index=True)
results_index = result['original_index']
distance_score = result['distances_local']
result.drop(columns=['distances', 'original_index', 'label_cluster', 'distances_local'], inplace=True)
return result, results_index, distance_score | calvinp0/AL_Master_ChemEng | Cluster.py | Cluster.py | py | 18,764 | python | en | code | 0 | github-code | 13 |
38297309455 | import pandas as pd
import sys
df = pd.read_excel(r'Book.xlsx')
def get_days(code):
column_name4 = 'Code'
df[column_name4].fillna('', inplace=True)
result4 = df[df[column_name4].str.contains(code)]
matching_row_numbers4 = result4.index.tolist()
Day_for_course = []
for i in range(len(matching_row_numbers4)):
cell4 = df.at[matching_row_numbers4[i], 'Day']
if not pd.isna(cell4):
Day_for_course.append(cell4)
return Day_for_course
def get_time_slots(code):
column_name = 'Code'
df[column_name].fillna('', inplace=True)
result = df[df[column_name].str.contains(code)]
matching_row_numbers = result.index.tolist()
time_list_for_course = []
for i in range(len(matching_row_numbers)):
cell = df.at[matching_row_numbers[i], 'Time']
if not pd.isna(cell):
time_list_for_course.append(cell)
return time_list_for_course
def get_credit_amount(code):
column_name2 = 'Code'
df[column_name2].fillna('', inplace=True)
result3 = df[df[column_name2].str.contains(code)]
matching_row_numbers1 = result3.index.tolist()
credit = []
for i in range(len(matching_row_numbers1)):
cell1 = df.at[matching_row_numbers1[i], 'Credits']
if not pd.isna(cell1):
if cell1 not in credit:
credit.append(cell1)
return credit[0]
def course_name_to_code(name):
column_name4 = 'Course Title'
df[column_name4].fillna('', inplace=True)
result4 = df[df[column_name4].str.contains(name)]
matching_row_numbers4 = result4.index.tolist()
Code_for_course = []
for i in range(len(matching_row_numbers4)):
cell4 = df.at[matching_row_numbers4[i], 'Code List']
if not pd.isna(cell4):
Code_for_course.append(cell4)
return Code_for_course[0]
def get_name(code):
column_name = 'Code'
df[column_name].fillna('', inplace=True)
result = df[df[column_name].str.contains(code)]
matching_row_numbers = result.index.tolist()
time_list_for_course = []
for i in range(len(matching_row_numbers)):
cell = df.at[matching_row_numbers[i], 'Course Title']
if not pd.isna(cell):
time_list_for_course.append(cell)
return time_list_for_course[0]
class Course:
def __init__(self, code):
if len(code) > 6:
self.code = course_name_to_code(code)
else:
self.code = code
#print(self.code)
self.time_slots = get_time_slots(self.code)
self.name = get_name(self.code)
self.credits = get_credit_amount(self.code)
self.days = get_days(self.code)
| STONERJ25/sch | excelread.py | excelread.py | py | 2,720 | python | en | code | 0 | github-code | 13 |
33643918993 | #!/usr/bin/env python
from redis import Redis
redis = Redis()
while True:
print('input member:score> ', end='')
ipt = input()
if ipt == 'show': # command 'show'
ranking = redis.zrange('ranking', 0, 5, withscores=True)[::-1]
for i, m in enumerate(ranking):
values = {
'rank': i+1,
'member': m[0].decode(),
'point': m[1]
}
print('{rank}: {member} ({point}pt)'.format(**values))
continue
member, score = args.split(':')
redis.zadd('ranking', member, int(score))
print('good bye')
| nasa9084/samples | sorted_set_ranking.py | sorted_set_ranking.py | py | 609 | python | en | code | 0 | github-code | 13 |
6446606661 |
#!/usr/bin/python
#
#
# Ravikumar , June 14,2023
#
# code referred / studied from many sites /books and implemented my own.
#
#
#This class defined just for testing purpose . used as DB table
#
class notify:
id:int = 0
notification:list ={}
@classmethod
def insertvalue(cls,details,status):
detailrecord = [details,status]
cls.id += 1
cls.notification[cls.id] = detailrecord
@classmethod
def display(cls):
print('incremental id:', cls.id)
print('details:',cls.notification)
if __name__ =='__main__':
notify.insertvalue('ravi',0)
notify.insertvalue('ravi',1)
notify.display() | Ravianarc/02-automailreports | common_project_libs/notifystaticStorageClass.py | notifystaticStorageClass.py | py | 662 | python | en | code | 0 | github-code | 13 |
19350025644 | import os
import re
import setuptools
NAME = "mmd_twosample"
AUTHOR = "Jacquelyn Shelton"
AUTHOR_EMAIL = "jacquelyn.ann.shelton@gmail.com"
DESCRIPTION = "Various kernel-based statistical hypothesis tests for the two-sample problem"
LICENSE = "GPL"
KEYWORDS = "statistical test"
URL = "https://github.com/fatflake/" + NAME
README = ".github/README.md"
CLASSIFIERS = [
"python3",
"developmenstatus3",
]
INSTALL_REQUIRES = [
"numpy",
]
ENTRY_POINTS = {
}
SCRIPTS = [
]
HERE = os.path.dirname(__file__)
def read(file):
with open(os.path.join(HERE, file), "r") as fh:
return fh.read()
VERSION = re.search(
r'__version__ = [\'"]([^\'"]*)[\'"]',
read(NAME.replace("-", "_") + "/__init__.py")
).group(1)
LONG_DESCRIPTION = read(README)
if __name__ == "__main__":
setuptools.setup(
name=NAME,
version=VERSION,
packages=setuptools.find_packages(),
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
license=LICENSE,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
entry_points=ENTRY_POINTS,
scripts=SCRIPTS,
include_package_data=True
)
| fatflake/mmd-twosample | setup.py | setup.py | py | 1,315 | python | en | code | 0 | github-code | 13 |
13646690147 | import datetime as dt
from datetime import datetime
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.latest_only import LatestOnlyOperator
from airflow.utils.dates import days_ago
from airflow.utils.trigger_rule import TriggerRule
from call_back.notify import succes_callback,failure_callback
args = {
'owner': 'airflow',
'depends_on_past': False,
"email":["abhinavk1236@gmail.com"],
"email_on_failure": True
}
dags=DAG(
dag_id="dag_latest_only",
schedule_interval='@daily',
start_date=datetime(2021,8,8,0,0,0),
default_args=args,
tags=['example'],
on_success_callback=succes_callback,
on_failure_callback=failure_callback,
catchup=False
)
with dags as dag:
latest_only = LatestOnlyOperator(task_id='latest_only')
task1 = DummyOperator(task_id='task1')
# task2 = DummyOperator(task_id='task2')
task3 = DummyOperator(task_id='task3')
task4 = DummyOperator(task_id='task4', trigger_rule=TriggerRule.ALL_DONE)
latest_only >> task1 >> [task3, task4]
# task2 >> [task3, task4] | Abhinavk1243/airflow-learning | airflow/dags/da_latest_only.py | da_latest_only.py | py | 1,103 | python | en | code | 0 | github-code | 13 |
29566529972 | # Simple convolutional NN based classifier of German road traffic signs
# Import libraries
import streamlit as st
from PIL import Image, ImageOps
import cv2
# Set up the basic md for the webpage that will be generated by streamlit
st.title("German Traffic Sign Classifier")
st.header("German Traffic Sign Classifier")
st.text("Upload A German Traffic Sign: ")
st.text("Sample images are here to download: ")
from sign_classifier import sign_classifier
# Import the image to be classified:
uploaded_file = st.file_uploader("Choose a traffic sign ...", type="jpg")
if uploaded_file is not None:
image = Image.open(uploaded_file)
st.image(image, caption='Uploaded traffic sign.', use_column_width=True)
st.write("")
st.write("Classifying...")
# Call the trained CNN based model to classify the image
label = sign_classifier(image,'traffic.h5')
st.write(label)
| rupindeeplearning/DL_TrafficSigns | app.py | app.py | py | 899 | python | en | code | 0 | github-code | 13 |
74058898898 | import html
import re
from typing import Generator, Iterator, List, Optional, Set, Tuple
from mwparserfromhell import nodes, wikicode
from mwparserfromhell.nodes import extras
from mwparserfromhell.string_mixin import StringMixIn
from mwcomposerfromhell.namespace import (
ArticleNotFound,
ArticleResolver,
CanonicalTitle,
MagicWordNotFound,
ParentContext,
ParserFunctionNotFound,
)
from mwcomposerfromhell.nodes import Wikilink
# The markup for different lists mapped to the list tag and list item tag.
MARKUP_TO_LIST = {
# Unordered list.
"*": ("ul", "li"),
# Ordered list.
"#": ("ol", "li"),
# Definition lists.
";": ("dl", "dt"),
":": ("dl", "dd"),
}
# Tags which should not automatically be wrapped into a paragraph.
_NO_P_TAGS = {
# All block-level elements,
# see https://developer.mozilla.org/en-US/docs/Web/HTML/Block-level_elements
"address",
"article",
"aside",
"blockquote",
"details",
"dialog",
"dd",
"div",
"dl",
"dt",
"fieldset",
"figcaption",
"figure",
"footer",
"form",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"header",
"hgroup",
"hr",
"li",
"main",
"nav",
"ol",
"p",
"pre",
"section",
"table",
"ul",
# Others from trial and error.
"center",
"font",
"object",
}
# Tags that represent a list or list items.
LIST_TAGS = {"ul", "ol", "li", "dl", "dt", "dd"}
# Table markup.
TABLE_ROWS = {"!-", "|-"}
TABLE_CELLS = {"!", "|"}
# One or more line-breaks, including any spaces at the start of lines.
LINE_BREAK_PATTERN = re.compile(r"(\n(?: *\n)*)")
# Patterns used to strip comments.
LINE_BREAK_SPACES_PATTERN = re.compile(r"\n *")
SPACES_LINE_BREAK_PATTERN = re.compile(r" *\n")
# Link trails are any words followed by a space or new-line.
LINK_TRAIL_PATTERN = re.compile(r"^([a-zA-Z]+)\b")
class UnknownNode(Exception):
pass
class TemplateLoop(Exception):
"""A template loop was found, bail rendering."""
class HtmlComposingError(Exception):
pass
class WikiNodeVisitor:
def visit(
self,
node: StringMixIn,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
"""
Calculate the method to call to handle this node, passing along inputs to it.
:param node: The node to handle.
:param in_root: Whether this node is a direct descendant of the root Wikicode object.
:param ignore_whitespace: Whether to skip special whitespace handling.
:return: The result of handling this node (recursively).
"""
method_name = "visit_" + node.__class__.__name__
try:
method = getattr(self, method_name)
except AttributeError:
raise UnknownNode(f"Unknown node type: {node.__class__.__name__}")
return method(node, in_root, ignore_whitespace) # type: ignore[no-any-return]
class WikicodeToHtmlComposer(WikiNodeVisitor):
"""
Format HTML from parsed Wikicode.
Note that this is not currently re-usable.
See https://en.wikipedia.org/wiki/Help:Wikitext for a full definition.
"""
def __init__(
self,
resolver: Optional[ArticleResolver] = None,
red_links: bool = False,
expand_templates: bool = True,
context: Optional[ParentContext] = None,
open_templates: Optional[Set[str]] = None,
):
# Whether to render links to unknown articles as red links or normal links.
self._red_links = red_links
# Whether to expand transcluded templates.
self._expand_templates = expand_templates
self._pending_lists = [] # type: List[str]
# Track the currently open tags.
self._stack = [] # type: List[str]
# Track current templates to avoid a loop.
self._open_templates = open_templates or set()
self._context = context or {}
# A place to lookup templates.
if resolver is None:
resolver = ArticleResolver()
elif not isinstance(resolver, ArticleResolver):
raise ValueError("resolver must be an instance of ArticleResolver")
self._resolver = resolver
def _maybe_open_tag(self, in_root: bool) -> str:
"""
Handle the logic for whether this node gets wrapped in a list or a paragraph.
"""
# If the node is not currently in the "root" Wikicode, nothing is done.
if not in_root:
return ""
# Handle whether there's any lists to open.
if self._pending_lists:
# The overall algorithm for deciding which tags to open and which to
# close is nuanced:
#
# 1. Calculate the portion of lists and list items that are identical.
# 2. Close the end of what doesn't match.
# 3. Open the new tags.
result = ""
# The currently open lists.
stack_lists = [
list_tag for list_tag in self._stack if list_tag in LIST_TAGS
]
# Don't consider the latest list item to open in the comparison, it
# always needs to be opened.
shortest = min([len(stack_lists), len(self._pending_lists) - 1])
# Find the index of the last matching item.
for i in range(shortest):
if stack_lists[i] != self._pending_lists[i]:
break
else:
i = shortest
# Close anything past the matching items.
for stack_node in reversed(stack_lists[i:]):
result += self._close_stack(stack_node)
# Open any items that are left from the pending list.
for tag in self._pending_lists[i:]:
self._stack.append(tag)
result += f"<{tag}>"
# Reset the pending list.
self._pending_lists = []
return result
# Paragraphs do not go inside of other elements.
if not self._stack:
self._stack.append("p")
return "<p>"
# Otherwise, do nothing.
return ""
def _close_stack(self, tag: str) -> str:
"""Close tags that are on the stack. It closes all tags until ``tag`` is found."""
# For the given tag, close all tags behind it (in reverse order).
result = ""
while len(self._stack):
current_tag = self._stack.pop()
result += f"</{current_tag}>"
if current_tag == tag:
break
return result
def _get_last_table(self) -> int:
"""Return the index in the stack of the most recently opened table."""
# Find the part of the stack since the last table was opened.
last_table = -1
for it, stack_tag in enumerate(self._stack):
if stack_tag == "table":
last_table = it
# A table should always be found.
assert last_table != -1
return last_table
def _fix_nodes(
self,
nodes_iterator: Iterator[StringMixIn],
) -> Generator[StringMixIn, None, None]:
"""
Iterate through nodes making some fixes:
* Skip comment nodes.
* Combine adjacent text nodes.
* Handle link trails.
* Add proper spacing between table nodes.
"""
prev_node = None
for node in nodes_iterator:
# Skip comment nodes.
if isinstance(node, nodes.Comment):
continue
# Convert Wikilinks to the mwcomposerfromhell subclass.
if isinstance(node, nodes.Wikilink):
node = Wikilink(node.title, node.text)
# Two adjacent (after removing comment nodes) text nodes are combined.
if isinstance(prev_node, nodes.Text) and isinstance(node, nodes.Text):
# A removed comment strips any spaces on the line it was on,
# plus a single newline. In order to get all whitespace, look at
# both text nodes.
prev = LINE_BREAK_SPACES_PATTERN.sub("", prev_node.value, count=1)
current = SPACES_LINE_BREAK_PATTERN.sub("\n", node.value, count=1)
prev_node = nodes.Text(value=prev + current)
continue
# The start of the text from a text node can be added to a link
# occurring before it.
elif isinstance(prev_node, Wikilink) and isinstance(node, nodes.Text):
# Try to find a link trail and apply it to the previous link.
# TODO This should NOT apply if the previous link was to an image.
match = LINK_TRAIL_PATTERN.match(node.value)
if match:
# The link gets the link trail added to the text.
prev_node = Wikilink(prev_node.title, prev_node.text, match[1])
# The text gets the link trailed removed from it.
node = nodes.Text(value=node.value[len(match[1]) :])
# Adjacent table header or data nodes have a blank line between them.
elif isinstance(prev_node, nodes.Tag) and isinstance(node, nodes.Tag):
if (
prev_node.wiki_markup in TABLE_CELLS
and node.wiki_markup in TABLE_CELLS
):
yield prev_node
prev_node = nodes.Text(value="\n")
# Otherwise, yield the previous node and store the current one.
if prev_node:
yield prev_node
prev_node = node
# Yield the last node.
if prev_node:
yield prev_node
def _get_edit_link(self, canonical_title: CanonicalTitle, text: str) -> str:
"""Generate a link to an article's edit page."""
url = self._resolver.get_edit_url(canonical_title)
title = canonical_title.full_title + " (page does not exist)"
return f'<a href="{url}" class="new" title="{title}">' + text + "</a>"
def visit_Wikicode(
self,
node: wikicode.Wikicode,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
return "".join(
map(
lambda n: self.visit(n, in_root, ignore_whitespace),
self._fix_nodes(node.nodes),
)
)
def visit_Tag(
self,
node: nodes.Tag,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
result = ""
# List tags require a parent tag to be opened first, but get grouped
# together if one is already open.
if node.wiki_markup in MARKUP_TO_LIST:
list_tag, item_tag = MARKUP_TO_LIST[node.wiki_markup]
# Mark that this tag needs to be opened.
self._pending_lists.extend((list_tag, item_tag))
# ul and ol cannot be inside of a dl and a dl cannot be in a ul or
# ol.
if node.wiki_markup in ("*", "#"):
if "dl" in self._stack:
result += self._close_stack("dl")
else:
if "ol" in self._stack:
result += self._close_stack("ol")
if "ul" in self._stack:
result += self._close_stack("ul")
else:
tag = self.visit(node.tag).lower()
# nowiki tags do not end up in the resulting content, their contents
# should appears as if this tag does not exist.
if tag == "nowiki":
if node.contents:
return self.visit(node.contents, in_root)
return ""
# noinclude and includeonly tags do not end up in the resulting
# content. Whether or not their contents should appear depends on
# whether we are currently being being transcluded.
#
# See https://www.mediawiki.org/wiki/Transclusion
if tag == "noinclude":
if not self._open_templates and node.contents:
return self.visit(node.contents)
return ""
if tag == "includeonly":
if self._open_templates and node.contents:
return self.visit(node.contents)
return ""
# Maybe wrap the tag in a paragraph. This applies to inline tags,
# such as bold and italics, and line breaks.
if tag not in _NO_P_TAGS:
result += self._maybe_open_tag(in_root)
# If we're opening a table header or data element, ensure that a row
# is already open.
if node.wiki_markup in TABLE_CELLS:
# Open a new row if not currently in a row.
try:
self._stack.index("tr", self._get_last_table())
except ValueError:
self._stack.append("tr")
result += "<tr>\n"
# Because we sometimes open a new row without the contents directly
# tied to it (see above), we need to ensure that old rows are closed
# before opening a new one.
elif node.wiki_markup in TABLE_ROWS:
# If a row is currently open, close it.
try:
self._stack.index("tr", self._get_last_table())
result += self._close_stack("tr") + "\n"
except ValueError:
pass
# Certain tags are blacklisted from being parsed and get escaped instead.
valid_tag = tag not in {"a"}
# Create an HTML tag.
stack_open = "<" + tag
for attr in node.attributes:
# Extensions attributes should not be expanded. Replace the
# value with a Text node (instead of Wikicode).
#
# TODO It would be better to handle this in visit_Attribute, but
# that doens't have enough context to do so currently.
if tag == "pre":
attr = extras.Attribute(
name=attr.name,
value=nodes.Text(value=str(attr.value)),
quotes=attr.quotes,
pad_first=attr.pad_first,
pad_before_eq=attr.pad_before_eq,
pad_after_eq=attr.pad_after_eq,
)
stack_open += self.visit(attr)
if node.self_closing:
stack_open += " /"
stack_open += ">"
if not valid_tag:
stack_open = html.escape(stack_open)
result += stack_open
# The documentation says padding is BEFORE the final >, but for
# table nodes it seems to be the padding after it
if node.wiki_markup in {"{|"} | TABLE_ROWS:
result += node.padding
# If this is not a self-closing tag, add it to the stack.
if not node.self_closing:
self._stack.append(tag)
# Handle anything inside of the tag.
if node.contents:
# Ignore whitespace if it is already being ignored or this is a
# <pre> tag.
ignore_whitespace = ignore_whitespace or tag == "pre"
result += self.visit(node.contents, ignore_whitespace=ignore_whitespace)
# If this is not self-closing, close this tag and any other open tags
# after it.
# TODO This only happens to work because lists are not self-closing.
if not node.self_closing:
stack_end = self._close_stack(tag)
if not valid_tag:
stack_end = html.escape(stack_end)
result += stack_end
return result
def visit_Attribute(
self,
node: extras.Attribute,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
# Render the name of the attribute.
name = self.visit(node.name).lower()
if node.value is not None:
# Render the value, and then sanitize it a bit:
# * Remove white space prefix / suffix.
# * Replace new lines with spaces.
# * Undo the HTML entity conversion for ampersands.
value = self.visit(node.value)
value = value.strip().replace("\n", " ").replace("&", "&")
else:
# The value defaults to a blank string, if not provided.
value = ""
# If there's a trailing / on the tag, it is really a self-closing tag.
if name and name[-1] == "/":
name = name[:-1]
# Return the attribute.
return f'{node.pad_first}{name}="{value}"'
def visit_Heading(
self,
node: nodes.Heading,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
return f"<h{node.level}>" + self.visit(node.title) + f"</h{node.level}>"
def visit_Wikilink(
self,
node: Wikilink,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
result = self._maybe_open_tag(in_root)
# Get the rendered title.
title = self.visit(node.title)
canonical_title = self._resolver.resolve_article(title, default_namespace="")
url = self._resolver.get_article_url(canonical_title)
# The text is either what was provided or the non-canonicalized title.
if node.text:
text = self.visit(node.text)
else:
text = title
text += node.trail or ""
# Figure out whether the article exists or not.
article_exists = True
if self._red_links:
try:
self._resolver.get_article(title, default_namespace="")
except ArticleNotFound:
article_exists = False
# Display text can be optionally specified. Fall back to the article
# title if it is not given.
if article_exists:
return (
result
+ f'<a href="{url}" title="{canonical_title.title}">'
+ text
+ "</a>"
)
else:
return result + self._get_edit_link(canonical_title, text)
def visit_ExternalLink(
self,
node: nodes.ExternalLink,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
"""
Generate the HTML for an external link.
External links come in a few forms:
* A raw link: https://en.wikipedia.org/
* A bracketed link: [https://en.wikipedia.org/]
* A link with a title: [https://en.wikipedia.org/ Wikipedia]
"""
result = self._maybe_open_tag(in_root)
# Display text can be optionally specified. Fall back to the URL if it
# is not given.
text = self.visit(node.title or node.url)
extra = ""
if not node.brackets:
extra = 'rel="nofollow" class="external free" '
return (
result
+ "<a "
+ extra
+ 'href="'
+ self.visit(node.url)
+ '">'
+ text
+ "</a>"
)
def visit_Comment(
self,
node: nodes.Comment,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
"""HTML comments just get ignored."""
return ""
def visit_Text(
self,
node: nodes.Text,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
"""
Handle a text element, including HTML escaping contents.
This has some special logic in it to deal with spacing, this includes:
* Handling of preformatted text.
* Paragraphs.
"""
# Escape HTML entities in the text.
text_result: str = html.escape(node.value, quote=False)
# Certain tags avoid any special whitespace handling, e.g. <pre> tags
# and template keys. Just return the contents after escaping HTML
# entities.
if ignore_whitespace:
return text_result
result = ""
# Each line of content is handled separately.
lines = list(
filter(None, text_result.splitlines(keepends=True))
) # type: List[str]
# This needs to be a new-line and start with a space.
start = 0
in_section_pre = False
for it, line in enumerate(lines):
# The first line can only be preformatted if:
# * It is in the root Wikicode object.
# * The stack is empty.
# * There are not any pending lists.
if it == 0 and (not in_root or self._stack or self._pending_lists):
continue
# If the line is purely whitespace (+ a new-line) then it is part
# of whatever the current section is.
if len(line) > 1 and not line.strip():
continue
# Calculate when text changes to/from normal text to preformatted
# text.
#
# The first clause describes what is necessary for preformatted
# text, a line starting with a space, some text content (followed by
# a new-line).
#
# Note that lines that are purely whitespace are caught above.
if (len(line) > 2 and line[0] == " ") != in_section_pre:
# If this is the start of a new section, add the previous one.
if in_section_pre:
# The first space at the start of each line gets removed.
result += (
"<pre>"
+ "".join(map(lambda l: l[1:], lines[start:it]))
+ "</pre>"
)
else:
result += self._handle_text("".join(lines[start:it]), in_root)
start = it
in_section_pre = not in_section_pre
# Need to handle the final section.
if in_section_pre:
# The first space at the start of each line gets removed.
result += (
"<pre>" + "".join(map(lambda l: l[1:], lines[start:])) + "</pre>\n"
)
else:
result += self._handle_text("".join(lines[start:]), in_root)
return result
def _handle_text(self, text_result: str, in_root: bool) -> str:
"""The raw text node handler, this has the logic for opening paragraphs."""
result = ""
# Handle newlines, which modify paragraphs and how elements get closed.
# Filter out blank strings after splitting on newlines.
chunks = list(filter(None, LINE_BREAK_PATTERN.split(text_result)))
for it, chunk in enumerate(chunks):
# Each chunk will either be all newlines, or just content.
if "\n" in chunk:
# Lines which only consist of whitespace get normalized to empty.
line_breaks = len(chunk.replace(" ", ""))
if it > 0 or line_breaks == 1 or line_breaks == 2:
result += "\n"
# If more than two newlines exist, close previous paragraphs.
if line_breaks >= 2:
result += self._close_stack("p")
# If this node isn't nested inside of anything else it might be
# wrapped in a paragraph.
if not self._stack and in_root:
# A paragraph with a line break is added for every two
# additional newlines.
additional_p = max((line_breaks - 2) // 2, 0)
result += additional_p * "<p><br />\n</p>"
# If there is more content after this set of newlines, or
# this is the last chunk of content and there are 3 line
# breaks.
last_chunk = it == len(chunks) - 1
if not last_chunk or (last_chunk and line_breaks == 3):
result += "<p>"
self._stack.append("p")
# An odd number of newlines get a line break inside of
# the paragraph.
if line_breaks > 1 and line_breaks % 2 == 1:
result += "<br />\n"
else:
result += self._maybe_open_tag(in_root)
result += chunk
return result
def visit_Template(
self,
node: nodes.Template,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
"""
Handle a transclusion. This can be one of several things (in order):
1. A substitution.
2. A variable.
3. A call to a parser function.
4. A template transclusion.
"""
# Render the key into a string. This handles weird nested cases, e.g.
# {{f{{text|oo}}bar}}.
template_name = self.visit(node.name).strip()
# Because each parameter's name and value might include other templates,
# etc. these need to be rendered in the context of the template call.
context = [] # type: List[Tuple[str, str, bool]]
for param in node.params:
# See https://meta.wikimedia.org/wiki/Help:Template#Parameters
# for information about stripping whitespace around parameters.
param_name = self.visit(param.name, ignore_whitespace=True).strip()
param_value = self.visit(param.value)
# Only named parameters strip whitespace around the value.
if param.showkey:
param_value = param_value.strip()
# Append them to a list so the order is kept the same.
context.append((param_name, param_value, param.showkey))
# Handle subst / safesubst.
start, _, more = template_name.partition(":")
# TODO This is a bit hacked together right now.
if start.strip() in ("subst", "safesubst"):
template_name = more
if self._expand_templates:
return str(node)
# Check if a variable is being used.
#
# https://www.mediawiki.org/wiki/Help:Magic_words#Variables
try:
function = self._resolver.get_magic_word(template_name)
except MagicWordNotFound:
pass
else:
return self._maybe_open_tag(in_root) + function()
# if the name starts with a # it is a parser function.
if template_name and template_name[0] == "#":
function_name, _, param = template_name.partition(":")
try:
parser_function = self._resolver.get_parser_function(function_name)
except ParserFunctionNotFound:
# If the parser function isn't found for whatever reason the
# raw text gets used.
return str(node)
else:
# Call the function with the current template call (and any
# parent template call information).
return self._maybe_open_tag(in_root) + parser_function(
param, context, self._context
)
# Otherwise, this is a normal template.
# Ensure that we don't end up in an infinite loop of templates.
# TODO This should check the canonical template name.
if template_name in self._open_templates:
raise TemplateLoop(template_name)
self._open_templates.add(template_name)
try:
template = self._resolver.get_article(template_name, "Template")
except ArticleNotFound as e:
# Template was not found.
result = self._maybe_open_tag(in_root)
# When transcluding a non-template
if self._red_links:
# Render an edit link.
canonical_title = e.args[0]
# Remove it from the open templates.
self._open_templates.remove(template_name)
return result + self._get_edit_link(canonical_title, template_name)
else:
# Remove it from the open templates.
self._open_templates.remove(template_name)
# Otherwise, simply output the template call.
return result + self._maybe_open_tag(in_root) + str(node)
else:
# Render the template in only the context of its parameters. Note
# that parameters might shadow each other, but that's OK.
template_context = {c[0]: c[1] for c in context}
composer = WikicodeToHtmlComposer(
resolver=self._resolver,
red_links=self._red_links,
expand_templates=self._expand_templates,
context=template_context,
open_templates=self._open_templates,
)
result = composer.visit(template, in_root and self._expand_templates)
# Ensure the stack is closed at the end.
result += composer.close_all()
# Remove it from the open templates.
self._open_templates.remove(template_name)
return result
def visit_Argument(
self,
node: nodes.Argument,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
# There's no provided values, so just render the string.
# Templates have special handling for Arguments.
param_name = self.visit(node.name).strip()
# Get the parameter's value from the context (the call to the
# template we're rendering).
try:
return self._context[param_name]
except KeyError:
# No parameter with this name was given.
# Use a default value if it exists, otherwise just render the
# parameter as a string.
if node.default is not None:
# Render the default value.
return self.visit(node.default)
else:
return str(node)
def visit_HTMLEntity(
self,
node: nodes.HTMLEntity,
in_root: bool = False,
ignore_whitespace: bool = False,
) -> str:
# Write the original HTML entity.
return self._maybe_open_tag(in_root) + str(node)
def compose(self, node: StringMixIn) -> str:
"""Converts Wikicode or Node objects to HTML."""
try:
return self.visit(node, True) + self.close_all()
except TemplateLoop as e:
# The template name is the first argument.
template_name = e.args[0]
# TODO Should this create an ExternalLink and use that?
canonical_title = self._resolver.resolve_article(template_name, "Template")
url = self._resolver.get_article_url(canonical_title)
return (
'<p><span class="error">Template loop detected: '
+ '<a href="{url}" title="{template_name}">{template_name}</a>'
+ "</span>\n</p>"
).format(url=url, template_name=canonical_title.full_title)
def close_all(self) -> str:
"""Close all items on the stack."""
return "".join(f"</{current_tag}>" for current_tag in reversed(self._stack))
| clokep/mwcomposerfromhell | mwcomposerfromhell/composer.py | composer.py | py | 31,587 | python | en | code | 7 | github-code | 13 |
13509691235 | '''
TEST DESCRIPTION
Testing Resource: http://docs.python.org/2/library/unittest.html
Created on )__________(, 2013
@author: Troll
'''
import unittest # This is the main resource
# Example resource to test
# from utils.message.processing import remove_tags as tagRemover
# Testing class: This is the testing suite
class StupidTest(unittest.TestCase):
# Standard setup: This is the "test fixtures" = setup
def setUp(self):
self.seq = range(10)
# A test case
# All tests must start with the letters "test" so that PyUnit notices them
def test_matt_is_dumb(self):
empty = ''
# call self."assertion statement"
self.assertEquals(empty, empty)
self.assertTrue(True, "weiner")
# Another thing that idk
if __name__ == '__main__':
unittest.main() | Robrowski/TrollUniversity | TrollUniversity/test/testing_template.py | testing_template.py | py | 839 | python | en | code | 1 | github-code | 13 |
6321244588 | import pandas as pd
from git import Repo
import re, signal, sys, time, pwn, pdb, os
def handler_signal(signal, frame):
print('\n\n[!] Out ..........\n')
sys.exit(1)
def extract(url):
repo = Repo(url)
commits = list(repo.iter_commits('develop'))
return commits
def transform(commits, KEY_WORDS):
coincidencias = []
for commit in commits:
for word in KEY_WORDS:
if re.search(word, commit.message, re.IGNORECASE):
coincidencias.append(str(f'Commit: {commit.hexsha} -> {commit.message}'))
return coincidencias
def load(coincidencias):
for i in coincidencias:
print(i)
if __name__ == '__main__':
signal.signal(signal.SIGINT, handler_signal)
DIR_REPO = "./skale/skale-manager"
KEY_WORDS = ['credentials','password','key'] #,'password','username','key'
commits = extract(DIR_REPO)
coincidencias = transform(commits, KEY_WORDS)
load(coincidencias) | NatLey30/Git-Leaks | git_leaks.py | git_leaks.py | py | 954 | python | en | code | 0 | github-code | 13 |
73325194896 | from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework import views
from core.permissions import MpinAuthenticated
class AuthRequiredView(object):
"""
Base authentication class to be inherited in every view which required
authenticated access.
To access the resources which are inherited from this class, client needs
to pass the authentication token in headers.
@author: Deep Adeshra <dp974154@dal.ca>
"""
permission_classes = [MpinAuthenticated]
authentication_classes = [TokenAuthentication]
class AbstractBaseAPIView(views.APIView):
"""
Base API view class to be inherited in entire application to remove the
redundant code for validations of request body.
@author: Deep Adeshra <dp974154@dal.ca>
"""
class Meta:
abstract = True
def __init__(self, **kwargs):
self.serializer = None
super().__init__(**kwargs)
def validate_request_data(self, request, **kwargs):
"""
Validates request from the serializer class and also pass request
attribute to serializer so that it could be accessed there.
"""
if getattr(self, 'serializer_class') is None:
return
self.serializer = self.serializer_class(data=request.data,
context={'request': request})
self.serializer.is_valid(raise_exception=True)
def post(self, request, **kwargs):
"""
Validate the payload received from the request from the given
serializer.
"""
return self.validate_request_data(request, **kwargs)
def patch(self, request, **kwargs):
"""
Validates the payload received from the request from the given
serializer.
"""
return self.validate_request_data(request, **kwargs)
| Ayushverma8/Group-Project_CSCI-5308 | password_vault_backend/core/views.py | views.py | py | 1,926 | python | en | code | 1 | github-code | 13 |
31665103881 | #Print("N^P=", power (N,P))
from collections import defaultdict
import sys
sys.setrecursionlimit(5000)
def def_val():
return False
memo = defaultdict(def_val)
def power(N,P):
if P==0:
return 1
elif P==1:
return N
else:
if N not in memo:
memo[N]= (N*power(N,P-1))
return memo[N]
else:
memo[N]
import math
N,P = map(int,input().split())
#X = int(input())
#arr = list(map(int,input().split()))
#ans = int(math.pow(N,P))
#ans = N**P
ans = power(N,P)
print(str(N)+"^"+str(P),"=",ans)
| tuhiniris/Coding-and-Stuff | bsky.py | bsky.py | py | 513 | python | en | code | 0 | github-code | 13 |
22404208413 | import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from LoadData import DataUtils
sess = tf.InteractiveSession()
def main():
trainFile_x = './MNIST_data/train-images.idx3-ubyte'
trainFile_y = './MNIST_data/train-labels.idx1-ubyte'
testFile_x = './MNIST_data/t10k-images.idx3-ubyte'
testFile_y = './MNIST/MNIST_data/t10k-labels.idx1-ubyte'
train_X = DataUtils( fileName = trainFile_x ).getImage()
train_Y = DataUtils( fileName = trainFile_y ).getLabel()
test_X = DataUtils( testFile_x ).getImage()
test_Y = DataUtils( testFile_y ).getLabel()
return train_X, train_Y, test_X, test_Y
def data_test():
# Loading the dataset
train_X, train_Y, test_X, test_Y = fit_data()
print(train_X.shape, train_Y.shape, test_X.shape, test_Y.shape)
index = 0
image = train_X[:, index]
print( image.shape )
image = image.reshape(28, -1)
print( image.shape )
plt.imshow( image )
plt.show()
print( "Y = " + str( np.squeeze( train_Y[:, index] ) ) )
print( "Y = " + str( np.argmax( train_Y[:, index] ) ) )
def fit_data():
train_X, train_Y, test_X, test_Y = main()
train_X, test_X, = train_X.T, test_X.T
train_Y = one_hot_matrix( train_Y, 10 )
test_Y = one_hot_matrix( test_Y, 10 )
print ( train_X.shape, train_Y.shape, test_X.shape, test_Y.shape )
return train_X, train_Y, test_X, test_Y
def one_hot_matrix( labels, C ):
C = tf.constant( C, name = "C" )
one_hot_matrix = tf.one_hot( labels, C, axis = 0 )
one_hot = sess.run( one_hot_matrix )
return one_hot
def create_placeholeers( n_x, n_y ):
X = tf.placeholder( tf.float32, [n_x, None] )
Y = tf.placeholder( tf.float32, [n_y, None] )
return X, Y
def initialize_parameters():
# tf.set_random_seed( 1 )
W1 = tf.get_variable( "W1", [40, 784], initializer = tf.contrib.layers.xavier_initializer() )
b1 = tf.get_variable( "b1", [40, 1], initializer = tf.zeros_initializer() )
W2 = tf.get_variable( "W2", [20, 40], initializer = tf.contrib.layers.xavier_initializer() )
b2 = tf.get_variable( "b2", [20, 1], initializer = tf.zeros_initializer() )
W3 = tf.get_variable( "W3", [10, 20], initializer = tf.contrib.layers.xavier_initializer() )
b3 = tf.get_variable( "b3", [10, 1], initializer = tf.zeros_initializer() )
parameters = {"W1" : W1,
"b1" : b1,
"W2" : W2,
"b2" : b2,
"W3" : W3,
"b3" : b3}
return parameters
def forward_propagation( X, parameters ):
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
Z1 = tf.matmul( W1, X ) + b1
A1 = tf.nn.relu( Z1 )
Z2 = tf.matmul( W2, A1 ) + b2
A2 = tf.nn.relu( Z2 )
Z3 = tf.matmul( W3, A2 ) + b3
return Z3
def compute_cost( Z3, Y ):
logits = tf.transpose( Z3 )
labels = tf.transpose( Y )
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits = logits, labels = labels ) )
return cost
def model( train_X, train_Y, test_X, test_Y, learning_rate = 0.0001, num_epochs = 1000, minibatch_size = 32, print_cost = True ):
ops.reset_default_graph()
( n_x, m ) = train_X.shape
n_y = train_Y.shape[0]
costs = []
X, Y = create_placeholeers( n_x, n_y )
parameters = initialize_parameters()
Z3 = forward_propagation( X, parameters )
cost = compute_cost( Z3, Y )
optimizer = tf.train.AdamOptimizer( learning_rate = learning_rate ).minimize( cost )
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run( init )
for epoch in range( num_epochs ):
epoch_cost = 0
num_minibatches = int( m / minibatch_size )
minibatches = random_mini_batches( train_X, train_Y, minibatch_size )
for minibatch in minibatches:
( minibatch_X, minibatch_Y ) = minibatch
_, minibatch_cost = sess.run( [optimizer, cost], feed_dict = {X : minibatch_X, Y : minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches #全局成本行函数
if print_cost == True and epoch % 100 == 0:
print ( "Cost after epoch %i: %f" % ( epoch, epoch_cost ) )
if print_cost == True and epoch % 5 == 0:
costs.append( epoch_cost )
plt.plot( np.squeeze( costs ) )
plt.ylabel( "cost" )
plt.xlabel( "iteration ( per tens)" )
plt.title( "Learning rate = " + str( learning_rate ) )
plt.show()
parameters = sess.run( parameters )
print ( "Parameters have been trained" )
correct_prediction = tf.equal( tf.argmax( Z3 ), tf.argmax( Y ) )
accuracy = tf.reduce_mean( tf.cast( correct_prediction, "float" ) )
print( "Train Accuracy:", accuracy.eval( {X : train_X, Y : train_Y} ) )
print( "Test Accuracy:", accuracy.eval( {X : test_X, Y : test_Y} ) )
sess.close()
return parameters
def random_mini_batches( X, Y, mini_batch_size ):
m = X.shape[1]
mini_batches = []
# Step 1: Shuffle ( X, Y )
permutation = list( np.random.permutation( m ) )
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
# Step 2: Partition ( shuffed_X, shuffed_Y ), Minus the end case
num_complete_minibatches = math.floor( m / mini_batch_size )
for k in range( 0, num_complete_minibatches ):
mini_batch_X = shuffled_X[:, k * mini_batch_size : mini_batch_size * ( k + 1 )]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : mini_batch_size * ( k + 1 )]
mini_batch = ( mini_batch_X, mini_batch_Y )
mini_batches.append( mini_batch )
# Handing the end case ( last mini_batch < mini_batch_size )
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, mini_batch_size * num_complete_minibatches : m]
mini_batch_Y = shuffled_Y[:, mini_batch_size * num_complete_minibatches : m]
mini_batch = ( mini_batch_X, mini_batch_Y )
mini_batches.append( mini_batch )
return mini_batches
def predict( parameters, X1, index ):
X, Y = create_placeholeers(X1.shape[0], 0)
Z3 = forward_propagation(X, parameters)
perdictions = tf.argmax(Z3[:, index])
with tf.Session() as sess:
print("Machine:", perdictions.eval( {X : X1} ) )
def print_iamge( X, Y, index ):
image = X[:, index]
image = image.reshape( 28, -1 )
plt.imshow(image)
plt.show()
print("Y = " + str( np.argmax( Y[:, index] ) ) )
# init = tf.global_variables_initializer()
# sess.run(init)
# train_X, train_Y, test_X, test_Y = fit_data()
# predict( initialize_parameters(), test_X, 0 )
# print_iamge( test_X, test_Y, 6 )
| IronMastiff/MNIST | Util.py | Util.py | py | 6,907 | python | en | code | 2 | github-code | 13 |
71223525777 | #!/usr/bin/env python3
import sys, string
with open(sys.argv[1]) as f:
data = [ l.strip() for l in f ]
score = 0
for l in data:
a, b = l[:len(l)//2], l[len(l)//2:]
c, = set(a) & set(b)
score += string.ascii_letters.index(c) + 1
print(f"Part 1: {score}")
score = 0
for i in range(0,len(data),3):
c, = set(data[i]) & set(data[i+1]) & set(data[i+2])
score += string.ascii_letters.index(c) + 1
print(f"Part 2: {score}")
| ivanpesin/aoc | 2022/2022.03/sol.py | sol.py | py | 446 | python | en | code | 0 | github-code | 13 |
22524185294 | import binascii
import glob
import gzip
import itertools
import math
import multiprocessing as mp
import numpy as np
import queue
import random
import shufflebuffer as sb
import struct
import sys
import threading
import time
import unittest
# 16 planes, 1 side to move, 1 x 362 probs, 1 winner = 19 lines
DATA_ITEM_LINES = 16 + 1 + 1 + 1
def remap_vertex(vertex, symmetry):
"""
Remap a go board coordinate according to a symmetry.
"""
assert vertex >= 0 and vertex < 361
x = vertex % 19
y = vertex // 19
if symmetry >= 4:
x, y = y, x
symmetry -= 4
if symmetry == 1 or symmetry == 3:
x = 19 - x - 1
if symmetry == 2 or symmetry == 3:
y = 19 - y - 1
return y * 19 + x
# Interface for a chunk data source.
class ChunkDataSrc:
def __init__(self, items):
self.items = items
def next(self):
if not self.items:
return None
return self.items.pop()
class ChunkParser:
def __init__(self, chunkdatasrc, shuffle_size=1, sample=1,
buffer_size=1, batch_size=256, workers=None):
"""
Read data and yield batches of raw tensors.
'chunkdatasrc' is an object yeilding chunkdata
'shuffle_size' is the size of the shuffle buffer.
'sample' is the rate to down-sample.
'workers' is the number of child workers to use.
The data is represented in a number of formats through this dataflow
pipeline. In order, they are:
chunk: The name of a file containing chunkdata
chunkdata: type Bytes. Either mutiple records of v1 format,
or multiple records of v2 format.
v1: The original text format describing a move. 19 lines long.
VERY slow to decode. Typically around 2500 bytes long.
Used only for backward compatability.
v2: Packed binary representation of v1. Fixed length,
no record seperator. The most compact format.
Data in the shuffle buffer is held in this
format as it allows the largest possible shuffle buffer.
Very fast to decode. Preferred format to use on disk.
2176 bytes long.
raw: A byte string holding raw tensors contenated together.
This is used to pass data from the workers to the parent.
Exists because TensorFlow doesn't have a fast way to
unpack bit vectors.
7950 bytes long.
"""
# Build probility reflection tables.
# The last element is 'pass' and is identity mapped.
self.prob_reflection_table = [
[remap_vertex(vertex, sym)
for vertex in range(361)]+[361] for sym in range(8)]
# Build full 16-plane reflection tables.
self.full_reflection_table = [
np.array([remap_vertex(vertex, sym) + p * 361
for p in range(16) for vertex in range(361)])
for sym in range(8)]
# Convert both to np.array.
# This avoids a conversion step when they're actually used.
self.prob_reflection_table = [
np.array(x, dtype=np.int64) for x in self.prob_reflection_table ]
self.full_reflection_table = [
np.array(x, dtype=np.int64) for x in self.full_reflection_table ]
# Build the all-zeros and all-ones flat planes, used for color-to-move.
self.flat_planes = [ b'\1'*361 + b'\0'*361, b'\0'*361 + b'\1'*361 ]
# set the down-sampling rate
self.sample = sample
# set the mini-batch size
self.batch_size = batch_size
# set number of elements in the shuffle buffer.
self.shuffle_size = shuffle_size
# Start worker processes, leave 2 for TensorFlow
if workers is None:
workers = max(1, mp.cpu_count() - 2)
print("Using {} worker processes.".format(workers))
# Start the child workers running
self.readers = []
for _ in range(workers):
read, write = mp.Pipe(duplex=False)
mp.Process(target=self.task,
args=(chunkdatasrc, write),
daemon=True).start()
self.readers.append(read)
write.close()
self.init_structs()
def init_structs(self):
# struct.Struct doesn't pickle, so it needs to be separately
# constructed in workers.
# V2 Format
# int32 version (4 bytes)
# (19*19+1) float32 probabilities (1448 bytes)
# 19*19*16 packed bit planes (722 bytes)
# uint8 side_to_move (1 byte)
# uint8 is_winner (1 byte)
self.v2_struct = struct.Struct('4s1448s722sBB')
# Struct used to return data from child workers.
# float32 winner
# float32*392 probs
# uint*6498 planes
# (order is to ensure that no padding is required to
# make float32 be 32-bit aligned)
self.raw_struct = struct.Struct('4s1448s6498s')
def convert_v1_to_v2(self, text_item):
"""
Convert v1 text format to v2 packed binary format
Converts a set of 19 lines of text into a byte string
[[plane_1],[plane_2],...],...
[probabilities],...
winner,...
"""
# We start by building a list of 16 planes,
# each being a 19*19 == 361 element array
# of type np.uint8
planes = []
for plane in range(0, 16):
# first 360 first bits are 90 hex chars, encoded MSB
hex_string = text_item[plane][0:90]
try:
array = np.unpackbits(np.frombuffer(
bytearray.fromhex(hex_string), dtype=np.uint8))
except:
return False, None
# Remaining bit that didn't fit. Encoded LSB so
# it needs to be specially handled.
last_digit = text_item[plane][90]
if not (last_digit == "0" or last_digit == "1"):
return False, None
# Apply symmetry and append
planes.append(array)
planes.append(np.array([last_digit], dtype=np.uint8))
# We flatten to a single array of len 16*19*19, type=np.uint8
planes = np.concatenate(planes)
# and then to a byte string
planes = np.packbits(planes).tobytes()
# Get the 'side to move'
stm = text_item[16][0]
if not(stm == "0" or stm == "1"):
return False, None
stm = int(stm)
# Load the probabilities.
probabilities = np.array(text_item[17].split()).astype(np.float32)
if np.any(np.isnan(probabilities)):
# Work around a bug in leela-zero v0.3, skipping any
# positions that have a NaN in the probabilities list.
return False, None
if not(len(probabilities) == 362):
return False, None
probs = probabilities.tobytes()
if not(len(probs) == 362 * 4):
return False, None
# Load the game winner color.
winner = float(text_item[18])
if not(winner == 1.0 or winner == -1.0):
return False, None
winner = int((winner + 1) / 2)
version = struct.pack('i', 1)
return True, self.v2_struct.pack(version, probs, planes, stm, winner)
def v2_apply_symmetry(self, symmetry, content):
"""
Apply a random symmetry to a v2 record.
"""
assert symmetry >= 0 and symmetry < 8
# unpack the record.
(ver, probs, planes, to_move, winner) = self.v2_struct.unpack(content)
planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8))
# We use the full length reflection tables to apply symmetry
# to all 16 planes simultaneously
planes = planes[self.full_reflection_table[symmetry]]
assert len(planes) == 19*19*16
planes = np.packbits(planes)
planes = planes.tobytes()
probs = np.frombuffer(probs, dtype=np.float32)
# Apply symmetries to the probabilities.
probs = probs[self.prob_reflection_table[symmetry]]
assert len(probs) == 362
probs = probs.tobytes()
# repack record.
return self.v2_struct.pack(ver, probs, planes, to_move, winner)
def convert_v2_to_tuple(self, content):
"""
Convert v2 binary training data to packed tensors
v2 struct format is
int32 ver
float probs[19*18+1]
byte planes[19*19*16/8]
byte to_move
byte winner
packed tensor formats are
float32 winner
float32*362 probs
uint8*6498 planes
"""
(ver, probs, planes, to_move, winner) = self.v2_struct.unpack(content)
# Unpack planes.
planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8))
assert len(planes) == 19*19*16
# Now we add the two final planes, being the 'color to move' planes.
stm = to_move
assert stm == 0 or stm == 1
# Flattern all planes to a single byte string
planes = planes.tobytes() + self.flat_planes[stm]
assert len(planes) == (18 * 19 * 19), len(planes)
winner = float(winner * 2 - 1)
assert winner == 1.0 or winner == -1.0, winner
winner = struct.pack('f', winner)
return (planes, probs, winner)
def convert_chunkdata_to_v2(self, chunkdata):
"""
Take chunk of unknown format, and return it as a list of
v2 format records.
"""
if chunkdata[0:4] == b'\1\0\0\0':
#print("V2 chunkdata")
for i in range(0, len(chunkdata), self.v2_struct.size):
if self.sample > 1:
# Downsample, using only 1/Nth of the items.
if random.randint(0, self.sample-1) != 0:
continue # Skip this record.
yield chunkdata[i:i+self.v2_struct.size]
else:
#print("V1 chunkdata")
file_chunkdata = chunkdata.splitlines()
result = []
for i in range(0, len(file_chunkdata), DATA_ITEM_LINES):
if self.sample > 1:
# Downsample, using only 1/Nth of the items.
if random.randint(0, self.sample-1) != 0:
continue # Skip this record.
item = file_chunkdata[i:i+DATA_ITEM_LINES]
str_items = [str(line, 'ascii') for line in item]
success, data = self.convert_v1_to_v2(str_items)
if success:
yield data
def task(self, chunkdatasrc, writer):
"""
Run in fork'ed process, read data from chunkdatasrc,
parsing, shuffling and sending v2 data through pipe back
to main process.
"""
self.init_structs()
while True:
chunkdata = chunkdatasrc.next()
if chunkdata is None:
break
for item in self.convert_chunkdata_to_v2(chunkdata):
# Apply a random symmetry
symmetry = random.randrange(8)
item = self.v2_apply_symmetry(symmetry, item)
writer.send_bytes(item)
def v2_gen(self):
"""
Read v2 records from child workers, shuffle, and yield
records.
"""
sbuff = sb.ShuffleBuffer(self.v2_struct.size, self.shuffle_size)
while len(self.readers):
#for r in mp.connection.wait(self.readers):
for r in self.readers:
try:
s = r.recv_bytes()
s = sbuff.insert_or_replace(s)
if s is None:
continue # shuffle buffer not yet full
yield s
except EOFError:
print("Reader EOF")
self.readers.remove(r)
# drain the shuffle buffer.
while True:
s = sbuff.extract()
if s is None:
return
yield s
def tuple_gen(self, gen):
"""
Take a generator producing v2 records and convert them to tuples.
applying a random symmetry on the way.
"""
for r in gen:
yield self.convert_v2_to_tuple(r)
def batch_gen(self, gen):
"""
Pack multiple records into a single batch
"""
# Get N records. We flatten the returned generator to
# a list because we need to reuse it.
while True:
s = list(itertools.islice(gen, self.batch_size))
if not len(s):
return
yield ( b''.join([x[0] for x in s]),
b''.join([x[1] for x in s]),
b''.join([x[2] for x in s]) )
def parse(self):
"""
Read data from child workers and yield batches
of raw tensors
"""
gen = self.v2_gen() # read from workers
gen = self.tuple_gen(gen) # convert v2->tuple
gen = self.batch_gen(gen) # assemble into batches
for b in gen:
yield b
# Tests to check that records can round-trip successfully
class ChunkParserTest(unittest.TestCase):
def generate_fake_pos(self):
"""
Generate a random game position.
Result is ([[361] * 18], [362], [1])
"""
# 1. 18 binary planes of length 361
planes = [np.random.randint(2, size=361).tolist()
for plane in range(16)]
stm = float(np.random.randint(2))
planes.append([stm] * 361)
planes.append([1. - stm] * 361)
# 2. 362 probs
probs = np.random.randint(3, size=362).tolist()
# 3. And a winner: 1 or -1
winner = [ 2 * float(np.random.randint(2)) - 1 ]
return (planes, probs, winner)
def test_parsing(self):
"""
Test game position decoding pipeline.
We generate a V1 record, and feed it all the way
through the parsing pipeline to final tensors,
checking that what we get out is what we put in.
"""
batch_size=256
# First, build a random game position.
planes, probs, winner = self.generate_fake_pos()
# Convert that to a v1 text record.
items = []
for p in range(16):
# generate first 360 bits
h = np.packbits([int(x) for x in planes[p][0:360]]).tobytes().hex()
# then add the stray single bit
h += str(planes[p][360]) + "\n"
items.append(h)
# then side to move
items.append(str(int(planes[17][0])) + "\n")
# then probabilities
items.append(' '.join([str(x) for x in probs]) + "\n")
# and finally if the side to move is a winner
items.append(str(int(winner[0])) + "\n")
# Convert to a chunkdata byte string.
chunkdata = ''.join(items).encode('ascii')
# feed batch_size copies into parser
chunkdatasrc = ChunkDataSrc([chunkdata for _ in range(batch_size*2)])
parser = ChunkParser(chunkdatasrc,
shuffle_size=1, workers=1, batch_size=batch_size)
# Get one batch from the parser.
batchgen = parser.parse()
data = next(batchgen)
# Convert batch to python lists.
batch = ( np.reshape(np.frombuffer(data[0], dtype=np.uint8),
(batch_size, 18, 19*19)).tolist(),
np.reshape(np.frombuffer(data[1], dtype=np.float32),
(batch_size, 19*19+1)).tolist(),
np.reshape(np.frombuffer(data[2], dtype=np.float32),
(batch_size, 1)).tolist() )
# Check that every record in the batch is a some valid symmetry
# of the original data.
for i in range(batch_size):
data = (batch[0][i], batch[1][i], batch[2][i])
# We have an unknown symmetry, so search for a matching one.
result = False
for symmetry in range(8):
# Apply the symmetry to the original
sym_planes = [
[plane[remap_vertex(vertex, symmetry)]
for vertex in range(361)]
for plane in planes]
sym_probs = [
probs[remap_vertex(vertex, symmetry)]
for vertex in range(361)] + [probs[361]]
if symmetry == 0:
assert sym_planes == planes
assert sym_probs == probs
# Check that what we got out matches what we put in.
if data == (sym_planes, sym_probs, winner):
result = True
break
# Check that there is at least one matching symmetry.
assert result == True
print("Test parse passes")
# drain parser
for _ in batchgen:
pass
if __name__ == '__main__':
unittest.main()
| leela-zero/leela-zero | training/tf/chunkparser.py | chunkparser.py | py | 17,266 | python | en | code | 5,191 | github-code | 13 |
9825077079 | from django.contrib import admin
from orderapp.models import Order, Cart
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
list_display = ('num', 'title', 'price', 'pay_type', 'pay_status', 'receiver', 'receiver_phone', 'receiver_address')
fields = ('num', 'title', 'price', 'pay_type', 'pay_status', 'receiver', 'receiver_phone', 'receiver_address')
class CartAdmin(admin.ModelAdmin):
list_display = ('user', 'no')
admin.site.register(Order, OrderAdmin)
admin.site.register(Cart, CartAdmin)
| heaven1124/hijango | orderapp/admin.py | admin.py | py | 525 | python | en | code | 0 | github-code | 13 |
15623974743 | from flask import jsonify,request,flash,Flask,render_template,redirect,session,url_for,Response,abort,json
from flask_wtf.csrf import CSRFProtect,CSRFError
from flask_sqlalchemy import *
from techmarketplace.Form import RegisterForm, LoginForm,AdminLoginForm,TwoFactorForm
from flask_login import login_user,logout_user,current_user
import io
import pyqrcode
import os
from flask_paranoid import Paranoid
app = Flask(__name__,template_folder='backend')
app.config['SECRET_KEY'] = os.urandom(32)
app.config['UPLOAD_FOLDER'] = 'static\\upload'
with app.app_context():
from techmarketplace.api.routes import adminAPI
from techmarketplace import AdminModels
app.register_blueprint(adminAPI.admin_blueprint)
# try:
# x = AdminModels.Admin('Jamess', 'password123', 96279135)
# AdminModels.database.session.add(x)
# AdminModels.database.session.commit()
# except:
# AdminModels.database.session.rollback()
paranoid = Paranoid(app)
paranoid.redirect_view = 'https://google.com'
@app.route('/')
def login():
print(session)
if 'user' in session or current_user.is_authenticated:
abort(404)
form = AdminLoginForm()
return render_template('adminLogin.html',form=form)
@app.route('/twofactor')
def twofactor():
if 'user' in session:
admin = AdminModels.Admin.query.filter_by(username=session['user']).first()
if admin.TFA:
form = TwoFactorForm()
return render_template('twofactorPage.html',form=form) ,200 ,{
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'}
else:
abort(404)
else:
abort(404)
@app.route('/TwoFactorSetUp')
def TwoFactorSetup():
if 'user' not in session:
abort(404)
admin = AdminModels.Admin.query.filter_by(username=session['user']).first()
if admin is None:
abort(404)
if admin.TFA:
abort(404)
form = TwoFactorForm()
return render_template('TwoFactorSetUp.html',form=form),200 ,{
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': '0'}
# @app.route('/a')
# def admin_customer():
# return self.render('index.html')
if __name__ == '__main__':
# this works
# app.config.update(
# SESSION_COOKIE_SECURE = True,
# SESSION_COOKIE_HTTPONLY = True,
# SESSION_COOKIE_SAMESITE='Lax',
# )
app.run(debug=True,host='127.0.0.1',port=5001) | noobProgrammer35/ASPJ | techmarketplace/AdminApplication.py | AdminApplication.py | py | 2,610 | python | en | code | 0 | github-code | 13 |
73859805138 | from typing import List, Optional
from mev_inspect.classifiers.helpers import get_debt_transfer, get_received_transfer
from mev_inspect.schemas.classifiers import (
ClassifiedTrace,
ClassifierSpec,
DecodedCallTrace,
LiquidationClassifier,
TransferClassifier,
)
from mev_inspect.schemas.liquidations import Liquidation
from mev_inspect.schemas.traces import Protocol
from mev_inspect.schemas.transfers import Transfer
class AaveLiquidationClassifier(LiquidationClassifier):
@staticmethod
def parse_liquidation(
liquidation_trace: DecodedCallTrace,
child_transfers: List[Transfer],
child_traces: List[ClassifiedTrace],
) -> Optional[Liquidation]:
liquidator = liquidation_trace.from_address
liquidated = liquidation_trace.inputs["_user"]
debt_token_address = liquidation_trace.inputs["_reserve"]
received_token_address = liquidation_trace.inputs["_collateral"]
debt_purchase_amount = None
received_amount = None
debt_transfer = get_debt_transfer(liquidator, child_transfers)
received_transfer = get_received_transfer(liquidator, child_transfers)
if debt_transfer is not None and received_transfer is not None:
debt_token_address = debt_transfer.token_address
debt_purchase_amount = debt_transfer.amount
received_token_address = received_transfer.token_address
received_amount = received_transfer.amount
return Liquidation(
liquidated_user=liquidated,
debt_token_address=debt_token_address,
liquidator_user=liquidator,
debt_purchase_amount=debt_purchase_amount,
protocol=Protocol.aave,
received_amount=received_amount,
received_token_address=received_token_address,
transaction_hash=liquidation_trace.transaction_hash,
trace_address=liquidation_trace.trace_address,
block_number=liquidation_trace.block_number,
error=liquidation_trace.error,
)
else:
return None
class AaveTransferClassifier(TransferClassifier):
@staticmethod
def get_transfer(trace: DecodedCallTrace) -> Transfer:
return Transfer(
block_number=trace.block_number,
transaction_hash=trace.transaction_hash,
trace_address=trace.trace_address,
amount=trace.inputs["value"],
to_address=trace.inputs["to"],
from_address=trace.inputs["from"],
token_address=trace.to_address,
)
AAVE_SPEC = ClassifierSpec(
abi_name="AaveLendingPool",
protocol=Protocol.aave,
classifiers={
"liquidationCall(address,address,address,uint256,bool)": AaveLiquidationClassifier,
},
)
ATOKENS_SPEC = ClassifierSpec(
abi_name="aTokens",
protocol=Protocol.aave,
classifiers={
"transferOnLiquidation(address,address,uint256)": AaveTransferClassifier,
},
)
AAVE_CLASSIFIER_SPECS: List[ClassifierSpec] = [AAVE_SPEC, ATOKENS_SPEC]
| flashbots/mev-inspect-py | mev_inspect/classifiers/specs/aave.py | aave.py | py | 3,128 | python | en | code | 750 | github-code | 13 |
41981264645 | ########################################################################
# Module: Get_ProductionWorker_Info.py
# Exercise: Ch11_Exercise_1.py
# Purpose: Employee and ProductionWorker Exercise #1 in Ch 11
# Last Update Date: 11/28/18
# Author: Lisa Nydick
########################################################################
from employee import ProductionWorker
def main():
worker_name = ''
worker_number = 0
shift_number = 0
hourly_rate = 0
worker_name, worker_number, shift_number, hourly_rate = get_user_input()
worker = ProductionWorker(worker_name, worker_number, shift_number, hourly_rate)
print_worker(worker)
def get_user_input():
worker_name = ''
worker_number = 0
shift_number = 0
hourly_rate = 0
worker_name = input('Enter the worker name: ')
worker_number = int(input('Enter the worker number: '))
while shift_number != 1 and shift_number != 2:
shift_number = int(input('Enter the shift number. 1= Day, 2=Night: '))
hourly_rate = float(input('Enter the hourly pay rate: '))
return worker_name, worker_number, shift_number, hourly_rate
def print_worker(worker):
print('Worker Name:', worker.get_name())
print('Worker Number:', worker.get_number())
print('Worker Shift_Number:', worker.get_shift_number())
print('Hourly Rate:', worker.get_hourly_rate())
main()
| lan33-ccac/CIT-119 | Get_ProductionWorker_Info.py | Get_ProductionWorker_Info.py | py | 1,426 | python | en | code | 0 | github-code | 13 |
20449721766 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 12 11:07:17 2020
@author: Jian Cao
"""
## Environment ----------------------------------------------------------------
import os
credential_path = ''
os.system('export GOOGLE_APPLICATION_CREDENTIALS="{}"'.format(credential_path))
os.chdir('/home/jccit_caltech_edu/COVID-19/')
from google.cloud import storage
from google.cloud.storage import Blob
import datetime
import time
import pandas as pd
import subprocess
from itertools import compress
## Functions ------------------------------------------------------------------
# Function that compresses file
def compress_file(path, out_path, filename):
out_filename = filename.split('.')[0]
subprocess.call(['7z', 'a', out_path + out_filename + '.7z', path + filename])
print(out_filename + ' is compressed.')
# return({"filename":out_filename+'.7z', "path":out_path})
# Function that list the files
def list_files(path, pattern):
out = pd.DataFrame([x for x in os.listdir(path) if pattern in x],
columns = ['files'])
out['time'] = [datetime.datetime.strptime(x[18:37], '%Y-%m-%d-%H-%M-%S') for x in out['files']]
out = out.sort_values(by = 'time', ascending = True).reset_index()
return(out['files'])
# Function that connects Cloud Storage
def connect_storage(bucket_name):
client = storage.Client()
bucket = client.get_bucket(bucket_name)
return(bucket)
# Function that uploads files
def upload_to_storage(path, filename, bucket, bucket_folder, bucket_name):
blob = Blob(bucket_folder + filename, bucket)
try:
blob.upload_from_filename(path + filename)
print(filename +
' is uploaded to "{}:{}"'.format(bucket_name, bucket_folder))
retry = False
except:
print(filename + ' needs to be re-uploaded.')
retry = True
return({'filename':filename, 'retry':retry})
# Function that list the old files
def list_old_files(files, days):
timestamps = [datetime.datetime.strptime(x[18:37], '%Y-%m-%d-%H-%M-%S') for x in files]
out = [files[i] for i in list(compress(range(len(files)), [x < datetime.datetime.now()-datetime.timedelta(days=days) for x in timestamps]))]
return out
# main
def main(bucket_name, bucket_folder, path_7z, path_tweets,
marker, wait_retry = 5, wait_next = 900):
while True:
retry = False
# Compress files
files_tweets = list_files(path_tweets, '.txt')
for file_tweets in files_tweets:
compress_file(path_tweets, path_7z, file_tweets)
os.remove(path_tweets + file_tweets)
# Upload files
bucket = connect_storage(bucket_name)
files_7z = list_files(path_7z, '.7z').tolist()
if '.7z' in marker:
files_upload = files_7z[files_7z.index(marker)+1:]
else:
files_upload = files_7z
for file_7z in files_upload:
response = upload_to_storage(path_7z, file_7z, bucket,
bucket_folder, bucket_name)
if not response['retry']:
marker = file_7z
else:
retry = True
continue
# Delete old files
files_old = list_old_files(files_7z, 7)
if files_old:
for file_old in files_old:
os.remove(path_7z + file_old)
# Finish
if retry:
print('Retrying...')
time.sleep(wait_retry)
else:
print('Waiting for next update...')
time.sleep(wait_next)
## main -----------------------------------------------------------------------
if __name__ == "__main__":
main()
| jian-frank-cao/MonitoringTwitter | GCP/instance_to_cloud_storage.py | instance_to_cloud_storage.py | py | 3,800 | python | en | code | 6 | github-code | 13 |
71270822417 | import sys
sys.path.append('..')
from enum import Enum
from util.intcode import Intcode
from time import sleep
class Dir(Enum):
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
def succ(self):
return Dir((self.value + 1) % 4)
def pred(self):
return Dir((self.value - 1) % 4)
def xy(self):
if self.value == 0:
return (0, 1)
elif self.value == 1:
return (1, 0)
elif self.value == 2:
return (0, -1)
elif self.value == 3:
return (-1, 0)
else:
return (0, 0)
def sol1(code, dbg=False):
colour = {}
current_pos = [0, 0]
current_dir = Dir(0)
computer = Intcode(code, [])
next_input = 0
while not computer.done():
computer.add_input(next_input)
while not computer.done() and len(computer.outputs) < 2:
computer.execute_command()
if computer.done():
break
paint, d = computer.outputs
current_dir = current_dir.pred() if d == 0 else current_dir.succ()
computer.outputs = []
colour[tuple(current_pos)] = paint
current_pos[0] += current_dir.xy()[0]
current_pos[1] += current_dir.xy()[1]
next_input = colour.get(tuple(current_pos), 0)
return len(colour)
def identifier(colour):
c_xmax = max([x for (x, y) in colour.keys()])
c_xmin = min([x for (x, y) in colour.keys()])
c_ymax = max([y for (x, y) in colour.keys()])
c_ymin = min([y for (x, y) in colour.keys()])
x_range = c_xmax - c_xmin
y_range = c_ymax - c_ymin
identifier = [None] * (y_range + 1)
for i in range(y_range + 1):
identifier[i] = [' '] * (x_range + 1)
for pos, c in colour.items():
if c == 1:
identifier[-pos[1]][pos[0] - c_xmin] = '*'
return '\n'.join(map(lambda x: ''.join(x), identifier))
def sol2(code, show_progress=False):
colour = {}
current_pos = [0, 0]
current_dir = Dir(0)
computer = Intcode(code, [])
next_input = 1
while not computer.done():
computer.add_input(next_input)
while not computer.done() and len(computer.outputs) < 2:
computer.execute_command()
if computer.done():
break
paint, d = computer.outputs
current_dir = current_dir.pred() if d == 0 else current_dir.succ()
computer.outputs = []
colour[tuple(current_pos)] = paint
current_pos[0] += current_dir.xy()[0]
current_pos[1] += current_dir.xy()[1]
next_input = colour.get(tuple(current_pos), 0)
if show_progress:
print(identifier(colour))
print()
sleep(0.05)
return identifier(colour)
def main():
with open(sys.argv[1]) as f:
data = list(map(int, f.read().strip().split(',')))
print(sol1(data[:]))
print(sol2(data[:]))
if __name__ == '__main__':
main() | Lammatian/AdventOfCode | 2019/11/sol.py | sol.py | py | 2,942 | python | en | code | 1 | github-code | 13 |
19871110834 | #!/usr/bin/python
#
# Block header comment
#
#
import sys, imp, atexit
sys.path.append("/home/courses/cs3214/software/pexpect-dpty/");
import pexpect, shellio, signal, time, os, re, proc_check
#Ensure the shell process is terminated
def force_shell_termination(shell_process):
c.close(force=True)
#pulling in the regular expression and other definitions
definitions_scriptname = sys.argv[1]
def_module = imp.load_source('', definitions_scriptname)
logfile = None
if hasattr(def_module, 'logfile'):
logfile = def_module.logfile
#spawn an instance of the shell
c = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile)
atexit.register(force_shell_termination, shell_process=c)
#Set up to start a single pipe test.
c.sendline("echo hello | cat");
assert c.expect_exact("hello") == 0, "hello was printed";
c.sendline("ls > 1_pipe_test.txt");
c.sendline("ls");
#chexking to see if the file is present after adding it.
assert c.expect_exact("1_pipe_test.txt") == 0, "file present";
c.sendline("ls | cat");
assert c.expect_exact("1_pipe_test.txt") == 0, "file present in piping";
shellio.success()
| NealSchneier/School | shell/group114/esh/src/advanced/single_pipe_test.py | single_pipe_test.py | py | 1,113 | python | en | code | 0 | github-code | 13 |
36187042116 | import os
import numpy as np
import itk
from itk import TubeTK as ttk
#from itkwidgets import view
'''TubeTK Doc: https://public.kitware.com/Wiki/TubeTK/Documentation'''
# Baseline images assisting with brain mask extraction #
N = 8
BaseLineFld = '/media/peirong/PR/TubeTK_Data'
readerList = ["003", "010", "026", "034", "045", "056", "063", "071"]
# General setting for itk image reader #
ImageType = itk.Image[itk.F, 3]
ReaderType = itk.ImageFileReader[ImageType]
def read_img(img_path):
reader = ReaderType.New(FileName = img_path)
reader.Update()
return reader.GetOutput()
def resample(itk_img, new_spacing = [1., 1., 1.], save_path = None):
res = ttk.ResampleImage.New(Input = itk_img)
res.SetSpacing(new_spacing) #res.SetMakeHighResIso(True)
res.Update()
iso_img = res.GetOutput()
if save_path:
itk.imwrite(iso_img, save_path)
return iso_img
def register_images(moving_img, fixed_img, save_path = None):
imReg = ttk.RegisterImages[ImageType].New() # standard protocol for within patient within visit registration
imReg.SetFixedImage(fixed_img)
imReg.SetMovingImage(moving_img)
imReg.SetReportProgress(False) # True: print register process
imReg.SetExpectedOffsetMagnitude(40)
imReg.SetExpectedRotationMagnitude(0.01)
imReg.SetExpectedScaleMagnitude(0.01)
imReg.SetRigidMaxIterations(500)
imReg.SetRigidSamplingRatio(0.1)
imReg.SetRegistration("RIGID")
imReg.SetMetric("MATTES_MI_METRIC")
imReg.Update()
# NOTE: interpolation arg to be chosen
registered_moving_img = imReg.GetFinalMovingImage("LINEAR_INTERPOLATION")
if save_path:
itk.imwrite(registered_moving_img, save_path)
return registered_moving_img
def extract_brain(itk_img1, itk_img2, itk_img3, save_path = None):
'''
Three same-subject registered images for more robust results
'''
imBase = []
imBaseB = []
for i in range(0, N):
name = os.path.join(BaseLineFld, "Normal"+readerList[i]+"-FLASH.mha")
nameB = os.path.join(BaseLineFld, "Normal"+readerList[i]+"-FLASH-Brain.mha")
imBase.append(read_img(name))
imBaseB.append(read_img(nameB))
imMath = ttk.ImageMath.New(Input = itk_img2)
imMath.Blur(2)
itk_img_Blur = imMath.GetOutput()
regB = []
regBB = []
#print('Start')
for i in range(0,N):
#print(i)
imMath.SetInput(imBase[i])
imMath.Blur(2)
imBaseBlur = imMath.GetOutput()
regBTo1 = ttk.RegisterImages[ImageType].New(FixedImage = itk_img_Blur, MovingImage = imBaseBlur)
regBTo1.SetReportProgress(False) # True: print register process
regBTo1.SetExpectedOffsetMagnitude(40)
regBTo1.SetExpectedRotationMagnitude(0.01)
regBTo1.SetExpectedScaleMagnitude(0.1)
regBTo1.SetRigidMaxIterations(500)
regBTo1.SetAffineMaxIterations(500)
regBTo1.SetRigidSamplingRatio(0.1)
regBTo1.SetAffineSamplingRatio(0.1)
regBTo1.SetInitialMethodEnum("INIT_WITH_IMAGE_CENTERS")
regBTo1.SetRegistration("PIPELINE_AFFINE")
regBTo1.SetMetric("MATTES_MI_METRIC")
#regBTo1.SetMetric("NORMALIZED_CORRELATION_METRIC") - Really slow!
#regBTo1.SetMetric("MEAN_SQUARED_ERROR_METRIC")
regBTo1.Update()
img = regBTo1.ResampleImage("LINEAR", imBase[i])
regB.append(img)
img = regBTo1.ResampleImage("LINEAR", imBaseB[i])
regBB.append(img)
regBBT = []
for i in range(0,N):
imMath = ttk.ImageMath[ImageType,ImageType].New(Input = regBB[i])
imMath.Threshold(0, 1, 0, 1)
img = imMath.GetOutput()
if i==0:
imMathSum = ttk.ImageMath[ImageType,ImageType].New(img)
imMathSum.AddImages(img, 1.0/N, 0)
sumBBT = imMathSum.GetOutput()
else:
imMathSum = ttk.ImageMath[ImageType,ImageType].New(sumBBT)
imMathSum.AddImages(img, 1, 1.0/N)
sumBBT = imMathSum.GetOutput()
#view(sumBBT)
insideMath = ttk.ImageMath[ImageType,ImageType].New(Input = sumBBT)
insideMath.Threshold(1, 1, 1, 0)
insideMath.Erode(5, 1, 0)
brainInside = insideMath.GetOutput()
outsideMath = ttk.ImageMath[ImageType,ImageType].New( Input = sumBBT )
outsideMath.Threshold(0, 0, 1, 0)
outsideMath.Erode(10, 1, 0)
brainOutsideAll = outsideMath.GetOutput()
outsideMath.Erode(20, 1, 0)
outsideMath.AddImages(brainOutsideAll, -1, 1)
brainOutside = outsideMath.GetOutput()
outsideMath.AddImages(brainInside, 1, 2)
brainCombinedMask = outsideMath.GetOutputUChar()
outsideMath.AddImages(itk_img2, 512, 1)
brainCombinedMaskView = outsideMath.GetOutput()
#view(brainCombinedMaskView) # Plotting #
LabelMapType = itk.Image[itk.UC,3]
segmenter = ttk.SegmentConnectedComponentsUsingParzenPDFs[ImageType,LabelMapType].New()
segmenter.SetFeatureImage(itk_img1)
segmenter.AddFeatureImage(itk_img2)
segmenter.AddFeatureImage(itk_img3)
segmenter.SetInputLabelMap( brainCombinedMask )
segmenter.SetObjectId(2)
segmenter.AddObjectId(1)
segmenter.SetVoidId(0)
segmenter.SetErodeDilateRadius(5)
segmenter.Update()
segmenter.ClassifyImages()
brainCombinedMaskClassified = segmenter.GetOutputLabelMap()
cast = itk.CastImageFilter[LabelMapType, ImageType].New()
cast.SetInput(brainCombinedMaskClassified)
cast.Update()
brainMaskF = cast.GetOutput()
brainMath = ttk.ImageMath[ImageType,ImageType].New(Input = brainMaskF)
brainMath.Threshold(2, 2, 1, 0)
brainMath.Dilate(2, 1, 0)
brainMask = brainMath.GetOutput()
if save_path:
itk.imwrite(brainMask, save_path)
#view(brain)
'''brainMath.SetInput(itk_img1) #
brainMath.ReplaceValuesOutsideMaskRange( brainMask, 1, 1, 0)
brain = brainMath.GetOutput()'''
return brainMath, brainMask
def masking(masker, itk_img, mask_img, save_path = None):
'''
Jointly use with extract_brain()
'''
masker.SetInput(itk_img)
masker.ReplaceValuesOutsideMaskRange(mask_img, 1, 1, 0 )
masked_img = masker.GetOutput()
if save_path is not None:
itk.imwrite(masked_img, save_path)
return masked_img
def enhance_vessel(mra_path, itk_path1, itk_path2, mask_path, save_path = None):
'''
Three same-subject registered images for more robust results
First image: MRA image for vessel enhance implementation
'''
mra_img = read_img(mra_path)
itk_img2 = read_img(itk_path1)
itk_img3 = read_img(itk_path2)
imMath = ttk.ImageMath[ImageType,ImageType].New()
imMath.SetInput(mra_img)
imMath.Blur(1.0)
imBlur = imMath.GetOutput()
imBlurArray = itk.GetArrayViewFromImage(imBlur)
numSeeds = 10
seedCoverage = 20
seedCoord = np.zeros([numSeeds,3])
for i in range(numSeeds):
seedCoord[i] = np.unravel_index(np.argmax(imBlurArray, axis = None), imBlurArray.shape)
indx = [int(seedCoord[i][0]),int(seedCoord[i][1]),int(seedCoord[i][2])]
minX = max(indx[0]-seedCoverage,0)
maxX = max(indx[0]+seedCoverage,imBlurArray.shape[0])
minY = max(indx[1]-seedCoverage,0)
maxY = max(indx[1]+seedCoverage,imBlurArray.shape[1])
minZ = max(indx[2]-seedCoverage,0)
maxZ = max(indx[2]+seedCoverage,imBlurArray.shape[2])
imBlurArray[minX:maxX,minY:maxY,minZ:maxZ]=0
indx.reverse()
seedCoord[:][i] = mra_img.TransformIndexToPhysicalPoint(indx)
#print(seedCoord)
# Manually extract a few vessels to form an image-specific training set
vSeg = ttk.SegmentTubes[ImageType].New()
vSeg.SetInput(mra_img)
vSeg.SetVerbose(True)
vSeg.SetMinRoundness(0.1)
vSeg.SetMinCurvature(0.001)
vSeg.SetRadiusInObjectSpace( 1 )
for i in range(numSeeds):
#print("**** Processing seed " + str(i) + " : " + str(seedCoord[i]))
vSeg.ExtractTubeInObjectSpace( seedCoord[i], i )
tubeMaskImage = vSeg.GetTubeMaskImage()
#view(tubeMaskImage)
LabelMapType = itk.Image[itk.UC,3]
trMask = ttk.ComputeTrainingMask[ImageType,LabelMapType].New()
trMask.SetInput(tubeMaskImage)
trMask.SetGap(3)
#trMask.SetObjectWidth(1)
trMask.SetNotObjectWidth(1)
trMask.Update()
fgMask = trMask.GetOutput()
enhancer = ttk.EnhanceTubesUsingDiscriminantAnalysis[ImageType,LabelMapType].New()
enhancer.SetInput(mra_img)
enhancer.AddInput(itk_img2)
enhancer.AddInput(itk_img3)
enhancer.SetLabelMap(fgMask)
enhancer.SetRidgeId(255)
enhancer.SetBackgroundId(127)
enhancer.SetUnknownId(0)
enhancer.SetTrainClassifier(True)
enhancer.SetUseIntensityOnly(True)
enhancer.SetScales([0.3333, 1, 2.5])
enhancer.Update()
enhancer.ClassifyImages()
#view(enhancer.GetClassProbabilityImage(0))
vess_prob = itk.SubtractImageFilter(Input1 = enhancer.GetClassProbabilityImage(0), Input2 = enhancer.GetClassProbabilityImage(1))
brainMask = itk.imread(mask_path, itk.F)
vess_prob_brain = itk.MultiplyImageFilter(Input1 = vess_prob, Input2 = brainMask)
if save_path:
#itk.imwrite(vess_prob, os.path.join(save_fld, "VesselEnhanced_Brain.mha"), compression = True)
itk.imwrite(vess_prob_brain, save_path, compression=True)
return vess_prob_brain
ImageDimension = 3
PixelType = itk.ctype('float')
ImageType = itk.Image[PixelType, ImageDimension]
img_reader = itk.ImageFileReader[ImageType].New()
img_writer = itk.ImageFileWriter[ImageType].New()
def anisotropic_smoothing(img_path, n_iter, diffusion_time = 3.5, anisotropic_lambda = 0.1, enhancement_type = 3, \
noise_scale = 3, feature_scale = 5, exponent = 3.5):
smoothed_paths = []
img_reader.SetFileName(img_path)
itk_img = img_reader.GetOutput()
for i_iter in range(n_iter):
smoother = itk.CoherenceEnhancingDiffusionImageFilter.New(itk_img)
smoother.SetDiffusionTime(diffusion_time)
smoother.SetLambda(anisotropic_lambda)
smoother.SetEnhancement(enhancement_type)
smoother.SetNoiseScale(noise_scale)
smoother.SetFeatureScale(feature_scale)
smoother.SetExponent(exponent)
smoother.Update()
itk_img = smoother.GetOutput()
'''# For checking #
smoothed_path = '%s_smoothed-%d.nii' % (img_path[:-4], (i_iter))
img_writer.SetFileName(smoothed_path)
img_writer.SetInput(itk_img)
img_writer.Update()
smoothed_paths.append(smoothed_path)'''
smoothed_path = '%s_smoothed%s' % (img_path[:-4], img_path[-4:])
img_writer.SetFileName(smoothed_path)
img_writer.SetInput(itk_img)
img_writer.Update()
return smoothed_path | uncbiag/D2-SONATA | Preprocess/IXI/itk_utils.py | itk_utils.py | py | 10,702 | python | en | code | 1 | github-code | 13 |
36560968443 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import cgi
import json
import os
import traceback
from autodqm import fetch, compare_hists
from autodqm.cerncert import CernCert
def handle_request(req):
err = None
try:
if req['type'] == "fetch_run":
data = fetch_run(req['series'], req['sample'], req['run'])
elif req['type'] == "process":
data = process(req['subsystem'],
req['data_series'], req['data_sample'], req['data_run'],
req['ref_series'], req['ref_sample'], req['ref_run'])
elif req['type'] == "get_subsystems":
data = get_subsystems()
elif req['type'] == "get_series":
data = get_series()
elif req['type'] == "get_samples":
data = get_samples(req['series'])
elif req['type'] == "get_runs":
data = get_runs(req['series'], req['sample'])
else:
raise error
except Exception as e:
err = e
tb = traceback.format_exc()
finally:
res = {}
if err:
res['error'] = {
'message': str(err),
'traceback': tb
}
else:
res['data'] = data
return res
def make_cert():
return CernCert(sslcert=os.getenv('ADQM_SSLCERT'),
sslkey=os.getenv('ADQM_SSLKEY'),
cainfo=os.getenv('ADQM_CERNCA'))
def fetch_run(series, sample, run):
cert = make_cert()
fetch.fetch(cert, series, sample, run, db=os.getenv('ADQM_DB'))
return {}
def process(subsystem,
data_series, data_sample, data_run,
ref_series, ref_sample, ref_run):
# Get root file paths
cert = make_cert()
data_path = fetch.fetch(cert,
data_series, data_sample, data_run,
db=os.getenv('ADQM_DB'))
ref_path = fetch.fetch(cert,
ref_series, ref_sample, ref_run,
db=os.getenv('ADQM_DB'))
# Get config and results/plugins directories
results_dir = os.path.join(os.getenv('ADQM_PUBLIC'), 'results')
plugin_dir = os.getenv('ADQM_PLUGINS')
with open(os.getenv('ADQM_CONFIG')) as config_file:
config = json.load(config_file)
# Process this query
results = compare_hists.process(config, subsystem,
data_series, data_sample, data_run, data_path,
ref_series, ref_sample, ref_run, ref_path,
output_dir=results_dir, plugin_dir=plugin_dir)
# Relativize the results paths
def relativize(p): return os.path.join(
'results', os.path.relpath(p, results_dir))
for r in results:
r['pdf_path'] = relativize(r['pdf_path'])
r['json_path'] = relativize(r['json_path'])
r['png_path'] = relativize(r['png_path'])
return {'items': results}
def get_subsystems():
with open(os.getenv('ADQM_CONFIG')) as config_file:
config = json.load(config_file)
return {'items': [{"name": s} for s in config]}
def get_series():
cert = make_cert()
return {'items': fetch.get_series(cert)}
def get_samples(series):
cert = make_cert()
return {'items': fetch.get_samples(cert, series)}
def get_runs(series, sample):
cert = make_cert()
return {'items': fetch.get_runs(cert, series, sample)}
class error(Exception):
pass
if __name__ == "__main__":
cgi_req = cgi.FieldStorage()
req = {}
for k in cgi_req.keys():
req[str(k)] = str(cgi_req[k].value)
res = handle_request(req)
print("Content-type: application/json")
print("Access-Control-Allow-Origin: *")
print("")
print(json.dumps(res))
| reasonablytall/cern | adqmtmp/AutoDQM/index.py | index.py | py | 3,800 | python | en | code | 0 | github-code | 13 |
74660429457 | '''
Author: David Kaplan
Advisor: Stephen Penny
Since the amount of memory that a typical `DataPreprocessing` object is >> 4Gb,
we need to make a custom saving method using netCDF files and separate the
attributes into a different object.
'''
import numpy as np
import time
import math
import os
import pickle
import logging
from netCDF4 import Dataset
from .transforms import Denormalize_arr as Denormalize
from .transforms import Normalize_arr as Normalize
import sys
sys.path.append('..')
from residnet.util import *
from residnet.constants import *
from residnet.datasets import HYCOM
'''
Future Versions:
- Zip files for saving
'''
class Settings:
'''
Acts as a container of the attributes for the DataPreprocessing class.
Necessary to separate them because the saving method is more complicated.
'''
def __init__(self, name = None, res_in = None, savepath = None,
filepath2d = None, filepath3d = None, years = None, dataset = None,
denorm_local = None, keys = None, num_days = None):
'''Once core arrays (subgrids, locations, norm_data) are built, do not
manipulate them as it fill make `loc_to_idx` invalid
-------------
Args
-------------
savepath (str)
- location to store the object
res_in (int)
- Number of pixels per subgrid
dataset (str)
- What dataset to load from
- example, 'hycom'
filepath2d (str)
- Location where the HYCOM netcdf 2D data is stored
filepath3d (str)
- Location where the HYCOM netcdf 3D data is stored
years (list[str]), ex: ['2006', '2007']
- A list of years to parse to get the data
denorm_local (bool)
- If True, normalizes a subgrid by the absolute max value of the
difference in the corners of the subgrid.
- If False, normalizes a subgrid by the absolute max value over the
entire dataset.
keys (list[str]), ex: ['ssh','temp', etc.]
- A list of the keys to parse from the data
num_days (int or None)
- Number of days to parse when loading in the data
settings (Settings or None)
- A settings object that overwrites the previous attributes
'''
# Set to default if it was not set during initialization
if name == None:
name = DEFAULT_DP_NAME
if res_in == None:
res_in = DEFAULT_DP_RES_IN
if filepath2d == None:
filepath2d = DEFAULT_DP_FILEPATH2D
if filepath3d == None:
filepath3d = DEFAULT_DP_FILEPATH3D
if dataset == None:
dataset = DEFAULT_DP_DATASET
if years == None:
years = DEFAULT_DP_YEARS
if denorm_local == None:
denorm_local = DEFAULT_DP_DENORM_LOCAL
if keys == None:
keys = DEFAULT_DP_KEYS
if num_days == None:
num_days = DEFAULT_DP_NUM_DAYS
if savepath == None:
savepath = 'output/datapreprocessing/{}_denormLocal{}_res{}/'.format(name,denorm_local,res_in)
self.savepath = savepath
self.res_in = res_in
self.filepath2d = filepath2d
self.filepath3d = filepath3d
self.dataset = dataset
self.years = years
self.denorm_local = denorm_local
self.keys = keys
self.num_days = num_days
# Non-passed in attributes
self.parsed = False # If the data has been parsed or not
self.num_samples = np.nan # Total number of samples parsed
# Make sure paths are in the right format
if self.filepath2d[-1] != '/':
self.filepath2d += '/'
if self.filepath3d[-1] != '/':
self.filepath3d += '/'
if self.savepath[-1] != '/':
self.savepath += '/'
check_savepath_valid(self.savepath)
def __str__(self):
s = 'Settings Object:\n'
s += '\tsavepath: {}\n'.format(self.savepath)
s += '\tResolution: {}\n'.format(self.res_in)
s += '\tfilepath2d: {}\n'.format(self.filepath2d)
s += '\tfilepath3d: {}\n'.format(self.filepath3d)
s += '\tyears: {}\n'.format(self.years)
s += '\tkeys: {}\n'.format(self.keys)
s += '\tnumber of days: {}\n'.format(self.num_days)
s += '\tnumber of samples: {}\n'.format(self.num_samples)
s += '\tdenorm_local? {}\n'.format(self.denorm_local)
s += '\tparsed? {}\n'.format(self.parsed)
return s
class DataPreprocessing:
'''Preprocesses raw data into subgrids that we can do interpolation and
and regression over.
'''
def __init__(self, *args, **kwargs):
if 'settings' not in kwargs:
self.settings = Settings(*args, **kwargs)
else:
if not isinstance(kwargs['settings'], Settings):
raise DataPreProcessingError('settings must be a `Settings` object')
self.settings = kwargs['settings']
self.subgrids = {}
self.locations = {} #key -> [year, day, lat_min, lat_max, lon_min, lon_max]
self.norm_data = {} #key -> [avg, range]
def __len__(self):
return self.settings.num_samples
def __str__(self):
s = str(self.settings)
s += 'num samples: {}\n'.format(self.settings.num_samples)
return s
def _valid(self,arr):
'''Determines if the array is valid.
checks if there are any nans or any weird numbers out of range
'''
return np.all(np.abs(np.ma.filled(arr.astype(int),99999999)) < 1000)
def parse_data(self):
'''Converts data from arrays loaded from netcdf's to arrays of individual subgrids.
First, it reads the raw data from netcdf.
Second, it parses the data into the internal data structure.
Lastly, it sets normalization factors
'''
if self.settings.parsed:
logging.info('Already parsed')
return
logging.debug('Reading in raw data.')
# Load the dataset
if self.settings.dataset == 'hycom':
raw_data = HYCOM(filepath2d = self.settings.filepath2d,
filepath3d = self.settings.filepath3d,
years = self.settings.years, keys = self.settings.keys)
else:
raise DataProcessingError('DataPreprocessing.parse: dataset `{}` not valid'.format(
self.settings.dataset))
logging.debug('Initializing variables.')
res_in = self.settings.res_in
corner_idxs = gen_corner_idxs(res_in)
# placeholder for current index
z = 0
# Loose upper bound of how many subgrids will be parsed in total
# Will trim at the end
num_subgrids = len(self.settings.years) * 366 * DEFAULT_DP_SPD[self.settings.res_in]
# Initialize the arrays so that we are not constantly appending arrays,
# instead we are just setting values
for ele in self.settings.keys:
self.subgrids[ele] = np.zeros(shape=(num_subgrids, res_in * res_in))
# [year, day, min_y, max_y, min_x, max_x]
self.locations[ele] = np.zeros(shape=(num_subgrids, 6))
# [ll, lr, ul, ur, resid_avg, resid_range]
self.norm_data[ele] = np.zeros(shape=(num_subgrids, 6))
# Iterate over every position and parse the raw data into the internal data structure
# Throw an exception once the total number of days has been read.
n_accepted = 0
last_time = time.time()
try:
num_days = 0
# Iterate over year
for year in raw_data.years:
logging.info('\n\nYear {}'.format(year))
# Iterate over day
for t in range(raw_data[year].num_days):
time_ = time.time() - last_time
logging.info('{}/{}, z {}, n_accpeted: {}, {:.5}s'.format(
t+1, raw_data[year].num_days,z,n_accepted,time_))
n_accepted = 0
last_time = time.time()
if num_days >= self.settings.num_days:
raise MaxDays('Maximum number of days parsed.')
num_days += 1
#Iterate over latitudes
for y_ in range(math.floor(len(raw_data.lat)/res_in)):
y = res_in * y_
# Iterate over longitudes
for x_ in range(math.floor(len(raw_data.lon)/res_in)):
x = res_in * x_
# Check if the subgrid is valid.
if self._valid(raw_data[year]['temp'][t,y : y + res_in,x : x + res_in].flatten()):
# Append flattened arrays for each key
n_accepted += 1
for key in self.settings.keys:
# Get corners, calculate statistics, set values
self.locations[key][z,:] = [year, t, y, y + res_in, x, x + res_in]
self.subgrids[key][z,:] = raw_data.data[year][key][
t, y : y + res_in, x : x + res_in].flatten()
corners = self.subgrids[key][z, corner_idxs]
avg = np.mean(corners)
norm = np.max(np.absolute(self.subgrids[key][z,corner_idxs] - avg))
self.norm_data[key][z,:] = np.array(np.append(corners, [avg, norm]))
# log
# logging.debug('subgrids:\n{}\n'.format(self.subgrids[key][z,:]))
# logging.debug('locations:\n{}\n'.format(self.locations[key][z,:]))
# logging.debug('corner_vals:\n{}\n'.format(corners))
# logging.debug('norm_data:\n{}\n'.format(self.norm_data[key][z,:]))
z += 1
# else:
# print('\n\ny: {}, x: {}'.format(x,y))
# print('not accepted')
# print('print(arr:\n{}'.format(raw_data[year]['temp'][t,
# y : y + res_in,x : x + res_in].reshape(6,6)))
except MaxDays:
logging.debug('Total number of days read: num_days: {}, year {}, day: {}'.format(num_days,year,t))
# Trim excess from arrays
for key in self.settings.keys:
self.subgrids[key] = self.subgrids[key][0:z, :]
self.locations[key] = self.locations[key][0:z, :]
self.norm_data[key] = self.norm_data[key][0:z, :]
# Setting the normalization constant to be the absolute maximum difference
# Between the average value and the corners.
# For each key, get the max norm, set it, and then normalize subgrids if necessary
if not self.settings.denorm_local:
logging.info('Denorm global')
for key in self.settings.keys:
logging.info('key: {}'.format(key))
max_range = np.max(self.norm_data[key][:,-1])
logging.info('max_range: {}'.format(max_range))
self.norm_data[key][:,-1] = max_range
# Set global variables
self.settings.parsed = True
self.settings.num_samples = z
logging.info('Total number of samples: {}'.format(z))
def split_data_idxs(self,division_format, **kwargs):
'''Creates index arrays for testing, validation, and testing sets.
`division_format` indicates the type of division that is done.
kwargs is auxiliary information needed for the division
Valid division formats: necessary in kwargs
- `k-fold`: `k` (int), `randomize` (bool)
* Creates `k` even sets of indices
* If randomize is True, it will also randomize the base set
- `year-fold`: None
* Makes each year a separate set
- `split`: `split_dict` (dict key -> floats), `randomize` (bool)
* Splits the data into the proportion indicated by the values of the dictionary.
* All the floats must be positive, greater than zero.
* If `randomize` is True, it also shuffles the indices
# Example split:
- split_dict = {'training': 0.7, 'validation': 0.1, 'testing': 0.2}
- 70% to training, 10% to validation, 20% to testing
Output:
d (dict)
- key -> idx arrays
- if the division format is `year-fold`, the keys are the years (strings)
- Otherwise they are just ints corresponding to each fold
'''
logging.debug('division_format: {}'.format(division_format))
logging.debug('kwargs: {}'.format(kwargs))
valid_formats = ['k-fold','year-fold','split']
if not division_format in valid_formats:
raise DataProcessingError('`{}` is an invalid split data format'.format(division_format))
idxs = np.arange(self.settings.num_samples)
d = {} # return dictionary
if division_format == 'year-fold':
# Get a random key to look at
key_ = self.settings.keys[0]
for i in range(len(self.locations[key_])):
y_ = self.locations[key_][i,0]
if y_ not in d:
d[y_] = []
d[y_].append(i)
return d
elif division_format == 'k-fold':
# Check to see if kwargs has k
if len(kwargs) != 2 or 'k' not in kwargs or 'randomize' not in kwargs:
raise DataProcessingError('Improper arguments for k-fold. Arguments: {}'.format(kwargs))
# This is just a special case for random, where the
# length of the array is k long and they are all equal
k = kwargs['k']
randomize = kwargs['randomize']
split = {}
for i in range(k):
split[i] = 1/k
elif division_format == 'split':
if len(kwargs) != 2 or 'split_dict' not in kwargs or 'randomize' not in kwargs:
raise DataProcessingError('Improper arguments for split. Arguments: {}'.format(kwargs))
split = kwargs['split_dict']
randomize = kwargs['randomize']
if (np.sum(list(split.values())) > 1) or (not np.all(i > 0 for i in list(split.values()))):
raise DataProcessingError('Improper arguments for Random. Improper values for lst')
if randomize:
logging.debug('Shuffling base array indices')
np.random.shuffle(idxs)
ss = {}
# Get the start and end index for each subset
# Set the rest of the examples to the last set
prev_idx = 0
keys = list(split.keys())
for i in range(len(keys)):
key = keys[i]
if i - 1 == len(keys):
end = len(keys)
else:
end = int(prev_idx + np.floor(self.settings.num_samples * split[key]))
ss[key] = (prev_idx, end)
logging.debug('{} start idx: {}, end idx: {}'.format(
key,prev_idx,end))
prev_idx = end
for key,val in ss.items():
start = ss[key][0]
end = ss[key][1]
d[key] = idxs[start: end]
return d
def make_array(self,idxs = None,input_keys = None,
output_key = None, norm_input = True):
'''Concatenates the corners together to form the input.
Returns the truth of the input and the denormalization information
as well.
Only makes the array for the designated indices passed in (idx).
If `idxs` are not set, make everything.
Creates a dictionary that maps the locations of the subgrids to the
place where it is put in the return datastructure.
idxs (int, list(int), None)
- if None, make everything
- if type(int), make everything up to index `idxs`
- if it is a list of ints, use these as the indices to draw them out
input_keys (list(str), Optional)
- List of input keys to put in the input
- If nothing is passed in, everything is made
output_key (str)
- Output truth.
- If nothing is passed in, 'temp' is used as the output
norm_input (bool)
- If True, it normalizes the corners of the input
- If False, no normalization of the input is done
- Default is True
'''
if not self.settings.parsed:
raise DataProcessingError('make_array: Data is not parsed yet.')
if idxs is None:
# Make everything
idxs = np.arange(self.settings.num_samples)
if type(idxs) == int:
idxs = np.arange(idxs)
# Defaults to all the keys
if input_keys == None:
input_keys = self.settings.keys
if output_key == None:
output_key = DEFAULT_DP_OUTPUT_KEY
# (str,int,int,int,int,int) -> int
loc_to_idx = {}
logging.debug('num samples: {}, input_keys: {}, output_key: {}'.format(
len(idxs), input_keys, output_key))
num_samples = len(idxs)
corner_idxs = gen_corner_idxs(self.settings.res_in)
# Multiplied by 4 because there are 4 corners
X = np.zeros(shape = (num_samples, len(input_keys) * 4))
output_array = np.zeros(shape = (num_samples, self.settings.res_in ** 2))
norm_data = np.zeros(shape = (num_samples, NORM_LENGTH))
locations = np.zeros(shape = (num_samples, LOCATIONS_LENGTH))
for i in range(num_samples):
if i % 50000 == 0:
logging.info('{}/{}'.format(i,num_samples))
idx = idxs[i]
arr = np.array([])
for key in input_keys:
temp_arr = np.array(self.subgrids[key][idx,corner_idxs])
if norm_input:
temp_arr = (temp_arr - self.norm_data[key][idx,-2])/ \
self.norm_data[key][idx,-1]
arr = np.append(arr, temp_arr)
X[i,:] = arr.copy()
output_array[i,:] = self.subgrids[output_key][idx,:]
norm_data[i,:] = self.norm_data[output_key][idx,:]
locations[i,:] = self.locations[output_key][idx,:]
key_ = tuple(locations[i,:].tolist())
loc_to_idx[key_] = i
return DataWrapper(
X = X,
input_keys = input_keys,
y_true = output_array,
output_key = output_key,
norm_data = norm_data,
locations = locations,
loc_to_idx = loc_to_idx,
res = self.settings.res_in)
def save(self, savepath = None):
'''Save the object. Use netcdf files because the object is potentially
very large (too large for pickle).
Store arrays in netcdf files, pickle Settings object.
-----------
Args
-----------
savepath (None or str)
- If savepath != None, overwrite self.savepath
'''
if savepath != None:
self.settings.savepath = savepath
check_savepath_valid(self.settings.savepath)
if self.settings.savepath[-1] != '/':
self.settings.savepath += '/'
# Make all paths that youre going to save into
basepath = self.settings.savepath
subgridspath = basepath + 'subgrids.nc'
locationspath = basepath + 'locations.nc'
norm_datapath = basepath + 'norm_data.nc'
settingspath = basepath + 'settings.pkl'
check_savepath_valid(subgridspath)
check_savepath_valid(locationspath)
check_savepath_valid(norm_datapath)
check_savepath_valid(settingspath)
# Save the data - pickle the settings object
self._save_data_obj_netcdf(subgridspath, self.subgrids)
self._save_data_obj_netcdf(locationspath, self.locations)
self._save_data_obj_netcdf(norm_datapath, self.norm_data)
saveobj(self.settings, settingspath)
@classmethod
def load(cls,loadpath):
'''Load object from the passed in load path
'''
if not os.path.exists(loadpath):
raise DataProcessingError('Data.load: {} is not a valid path'.format(loadpath))
if loadpath[-1] != '/':
loadpath += '/'
# Check if the necessary files exist
subgridspath = loadpath + 'subgrids.nc'
locationspath = loadpath + 'locations.nc'
norm_datapath = loadpath + 'norm_data.nc'
settingspath = loadpath + 'settings.pkl'
logging.debug('Loading settings')
ret = cls(settings = loadobj(settingspath))
logging.debug('Loading subgrids')
ret.subgrids = ret._load_data_obj_netcdf(subgridspath)
logging.debug('Loading locations')
ret.locations = ret._load_data_obj_netcdf(locationspath)
logging.debug('Loading norm_data')
ret.norm_data = ret._load_data_obj_netcdf(norm_datapath)
return ret
def _load_data_obj_netcdf(self,path):
f = Dataset(path, 'r', DEFAULT_DP_NETCDF_FORMAT)
# Get dimensions
num_samples = len(f.dimensions['num_samples'])
_dim = len(f.dimensions['_dim'])
d = {}
for key in self.settings.keys:
d[key] = f.variables[key][:].copy()
f.close()
return d
def _save_data_obj_netcdf(self, path, d):
'''Save data in netcdf
`d` is a ictionary of multidimensional arrays of the same size
'''
data = Dataset(path, 'w', format = DEFAULT_DP_NETCDF_FORMAT)
(num_samples, _dim) = d[self.settings.keys[0]].shape
# Create global variables
data.raw_data_source = 'HYCOM GOMl0.04 experiment 20.1, Naval Research Laboratory'
data.save_loc = path[0:-2]
data.geospatial_lat_min = '18.0916 degrees'
data.geospatial_lat_max = '31.9606 degrees'
data.geospatial_lon_min = '-98 degrees'
data.geospatial_lon_max = '-76.4 degrees'
# Create dimensions
data.createDimension('num_samples', num_samples)
data.createDimension('_dim', _dim)
# Create variables
save_dict = {}
for key in self.settings.keys:
save_dict[key] = data.createVariable(key, np.float32, ('num_samples', '_dim'))
# Set variables
for key in self.settings.keys:
save_dict[key][:] = d[key]
# Close file
data.close()
################
# Array Wrapper Classes
################
class DataWrapper(IOClass):
'''Wrapper class to hold a set of input and truth arrays.
Includes metadata about the arrays.
Provides functions for manipulating the arrays and for retreiving batches
of data (Used for training in comparison_methods.neural_network.TFWrapper)
'''
def __init__(self, X, input_keys, y_true, output_key,
norm_data, locations, loc_to_idx, res):
'''
-----------
args
-----------
X (numpy array)
- The array used as the input to the desired function. This function could be an
interpolation function or some other kind.
- In this implementation we can assume that this are the corners of a subgrid.
- The first dimension is how you index each of the samples, and the second dimension
are the actual samples.
input_keys (list or str)
- The keys that the input corresponds to.
y_true (numpy array)
- The desired output of each of the samples.
- The length of the first dimension of the `X` and `y_true` are identical.
output_key (str)
- The key to which the output corresponds to
norm_data (numpy array)
- The normalization data used to normalize and denormalize the input or output arrays
locations (numpy array)
- Indicates the location spatially and temporally where this subgrid belongs
loc_to_idx (dict [6-tuple -> int])
- maps the location of a subgrid as a tuple to an integer
normalized (bool)
- If true, it means that the output is normalized
res_in (int)
- Resolution that the corners are derived from the base.
'''
IOClass.__init__(self)
self.input_keys = input_keys
self.output_key = output_key
self.X = X
self.y_true = y_true
self.norm_data = norm_data
self.locations = locations
self.loc_to_idx = loc_to_idx
self.y_normalized = False
self.X_normalized = False
self.res = res
self.shape = {'X': self.X.shape, 'y_true': self.y_true.shape}
self.input_size = self.X.shape[1]
self.output_size = self.y_true.shape[1]
# Set to True once batching initialized
self.batching_initialized = False
def __len__(self):
return self.X.shape[0]
def transform_X(self, func):
'''
DOES NOT SET THE INPUT ARRAY TO WHAT IS RETURNED. MUST DO THAT MANUALLY.
'''
return self._transform(func, self.X)
def transform_y_true(self, func):
'''
DOES NOT SET THE OUTPUT ARRAY TO WHAT IS RETURNED. MUST DO THAT MANUALLY.
'''
return self._transform(func, self.y_true)
def denormalize(self, output = None):
'''
output (bool or None)
- If True, denormalize y_true
- If False, denormalize X
- If None, denormalize both
'''
if (output is None or output is True) and self.y_normalized:
self.y_true = Denormalize(
arr = self.y_true, norm_data = self.norm_data, res = self.res,
output = True)
self.y_normalized = False
if (output is None or output is False) and self.X_normalized:
self.X = Denormalize(
arr = self.X, norm_data = self.norm_data, res = self.res,
output = False)
self.X_normalized = False
return self
def normalize(self, output = None):
'''
output (bool or None)
- If True, denormalize y_true
- If False, denormalize X
- If None, denormalize both
'''
if (output is None or output is True) and not self.y_normalized:
self.y_true = Normalize(
arr = self.y_true, norm_data = self.norm_data, res = self.res,
output = True)
self.y_normalized = True
if (output is None or output is False) and not self.X_normalized:
self.X = Normalize(
arr = self.X, norm_data = self.norm_data, res = self.res,
output = False)
self.X_normalized = True
return self
def _transform(self, func, arr):
'''
Applys an arbitrary transform to the input array for each sample.
This function is called from `transform_X` and `transform_y_true`.
---------
args
---------
func (function with output)
- A function that takes in a flattened vector and outputs a value,
which is then set as the output vector.
- Assumes the dimension of the output is always the same.
array (numpy array)
- Array that the transformation is being done on
'''
return np.apply_along_axis(func,1,arr)
def delete_idxs(self, idxs):
'''OPPOSITE OF `keep_idxs`
Deletes the indices that are specified in `idxs`.
This is useful when you want to divide up a set of data into subsets
based on some preprocessing like clustering.
idxs (list(ints))
'''
if np.max(idxs) > self.X.shape[0] - 1:
logging.critical('max_idx: {}, len: {}'.format(np.max(idxs), self.X.shape[0] - 1))
raise DataProcessingError('InputDataWrapper.delete_idxs: Max idx greater than length')
# Invert the idxs and call keep_idxs
idxs = list(set(np.arange(self.X.shape[0]) - set(idxs)))
self.keep_idxs(idxs)
return self
def keep_idxs(self, idxs):
'''OPPOSITE OF `delete_idxs`
Keeps only the indices that are specified in `idxs`.
This is useful when you want to divide up a set of data into subsets
based on some preprocessing like clustering.
idxs (list(ints))
'''
if np.max(idxs) > self.X.shape[0] - 1:
logging.critical('max_idx: {}, len: {}'.format(np.max(idxs), self.X.shape[0] - 1))
raise DataProcessingError('InputDataWrapper.keep_idxs: Max idx greater than length')
self.X = self.X[idxs,:]
self.y_true = self.y_true[idxs,:]
self.norm_data = self.norm_data[idxs,:]
self.locations = self.locations[idxs,:]
self.shape = {'X': self.X.shape, 'y_true': self.y_true.shape}
# redo `self.loc_to_idx`
self.loc_to_idx = {}
for i in range(self.locations.shape[0]):
self.loc_to_idx[tuple(self.locations[i,:])] = i
return self
##############
# Batch training functionality
##############
def initialize_batching(self,batch_size):
'''
Set up function to allow batch retrieval of data
We cannot shuffle the original dataset because it would
mess up the `loc_to_idx` dictionary. Instead, we shuffle a
list of indices that we use to index the main arrays.
All functions are applied to an array of indices.
- shuffle
* shuffle the index array
- get_batch
* get the next set of data based on the index array
'''
self.batching_initialized = True
self.batch_size = batch_size
self.base_idx = 0
self.idxs = np.arange(self.X.shape[0])
np.random.shuffle(self.idxs)
return self
def shuffle(self):
if not self.batching_initialized:
raise DataProcessingError('Batching not initialized. Call `initialize_batching`')
np.random.shuffle(self.idxs)
return self
def get_batch(self):
'''Get the next batch of data
'''
if not self.batching_initialized:
raise DataProcessingError('Batching not initialized. Call `initialize_batching`')
Xs = self.X[self.idxs[self.base_idx: self.base_idx + self.batch_size], :]
ys = self.y_true[self.idxs[self.base_idx: self.base_idx + self.batch_size], :]
self.base_idx += self.batch_size
return Xs,ys
def restart_batching(self):
if not self.batching_initialized:
raise DataProcessingError('Batching not initialized. Call `initialize_batching`')
self.base_idx = 0
self.shuffle()
return self
class DataProcessingError(Exception):
pass
class MaxDays(Exception):
pass
| dkaplan65/residnet | residnet/data_processing/wrappers.py | wrappers.py | py | 31,423 | python | en | code | 2 | github-code | 13 |
42999784722 | from rostelecom.models import Project
class ProjectManager:
def __init__(self):
self.model = Project
def get_all_project(self):
instances = self.model.objects.all()
result_list = []
for instance in instances:
result = {"uid": instance.guid,
"name": instance.name,
"canvas": instance.canvas}
result_list.append(result)
return result_list
def create_project(self, params):
data = {"name": params.get("name"),
"canvas": params.get("canvas")}
instance = self.model.objects.create(**data)
result = {"uid": instance.guid,
"name": instance.name,
"canvas": instance.canvas}
return result
def get_project(self, guid):
instance = self.model.objects.filter(guid=guid).first()
result = {"uid": instance.guid,
"name": instance.name,
"canvas": instance.canvas}
return result
def update_project(self, params, guid):
instance = self.model.objects.filter(guid=guid).first()
name = params.get("name")
canvas = params.get("canvas")
if name:
instance.name = name
if canvas:
instance.canvas = canvas
instance.save()
result = {"uid": instance.guid,
"name": instance.name,
"canvas": instance.canvas}
return result
def delete_project(self, guid):
return self.model.objects.filter(guid=guid).delete()
| kulagind/Hackaton | rostelecom/project_manager.py | project_manager.py | py | 1,431 | python | en | code | 0 | github-code | 13 |
37365009356 | from delft.utilities.Tokenizer import tokenizeAndFilterSimple, tokenizeAndFilter
class TestTokenizer:
def test_tokenizer_filter_simple(self):
input = 'this is a test, but a stupid test!!'
output = tokenizeAndFilterSimple(input)
assert len(output) == 11
assert output == ['this', 'is', 'a', 'test', ',', 'but', 'a', 'stupid', 'test', '!', '!']
def test_tokenizer_filter(self):
input = 'this is a test, but a stupid test!!'
output = tokenizeAndFilter(input)
assert len(output) == 2
assert output[0] == ['this', 'is', 'a', 'test', ',', 'but', 'a', 'stupid', 'test', '!', '!']
assert output[1] == [(0, 4), (5, 7), (8, 9), (10, 14), (14, 15), (16, 19), (20, 21), (22, 28), (29, 33),
(33, 34), (34, 35)]
def test_tokenizer_filter_simple_with_breaklines(self):
input = '\nthis is yet \u2666 another, dummy... test,\na [stupid] test?!'
output = tokenizeAndFilterSimple(input)
assert len(output) == 19
assert output == ['this', 'is', 'yet', '\u2666', 'another', ',', 'dummy', '.', '.', '.', 'test', ',', 'a',
'[', 'stupid', ']', 'test', '?', '!']
def test_tokenizer_filter_with_breaklines(self):
input = '\nthis is yet \u2666 another, dummy... test,\na [stupid] test?!'
output = tokenizeAndFilter(input)
assert len(output) == 2
assert output[0] == ['this', 'is', 'yet', '\u2666', 'another', ',', 'dummy', '.', '.', '.', 'test', ',', 'a',
'[', 'stupid', ']', 'test', '?', '!']
assert output[1] == [(1, 5), (6, 8), (9, 12), (13, 14), (15, 22), (22, 23), (24, 29), (29, 30), (30, 31),
(31, 32), (33, 37), (37, 38), (39, 40), (41, 42), (42, 48), (48, 49), (50, 54), (54, 55),
(55, 56)]
| kermitt2/delft | tests/utils/test_tokenizer.py | test_tokenizer.py | py | 1,889 | python | en | code | 377 | github-code | 13 |
73607347856 | import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
import pytest
import requests
FIXTURES = 'tests/fixtures'
SCRIPT = 'website.py'
BUILDDIR = '_build'
INDEX = os.path.join(BUILDDIR, 'index.html')
CNAME = os.path.join(BUILDDIR, 'CNAME')
SCRIPT_FIXTURES = os.path.join(FIXTURES, SCRIPT)
BUILDDIR_FIXTURES = os.path.join(FIXTURES, BUILDDIR)
INDEX_FIXTURES = os.path.join(FIXTURES, INDEX)
CNAME_FIXTURES = os.path.join(FIXTURES, CNAME)
def parametrized_fixture(*args, **kwargs):
@pytest.fixture(scope='module', params=args + tuple(kwargs.keys()))
def fixture(request):
if request.param in kwargs:
return kwargs[request.param]
return request.param
return fixture
def is_true(option: str) -> bool:
'''
Whether the given command line option means true (i.e. it is not --no-*)
'''
return not option.startswith('--no-')
cname = parametrized_fixture(cname='--cname', no_cname='--no-cname')
push = parametrized_fixture(push='--push', no_push='--no-push')
serve_command = parametrized_fixture(serve=['serve'],
freeze_serve=['freeze', '--serve'])
port = parametrized_fixture(8001, 8080)
host = parametrized_fixture('localhost', '127.0.0.1', '0.0.0.0')
domain = parametrized_fixture('foo.bar', 'spam.eggs')
protocol = parametrized_fixture('http', 'https')
def run_cmd(cmd, **kwargs):
"""Same as ``subprocess.run``, but with more appropriate defaults"""
kwargs.setdefault('check', True)
kwargs.setdefault('timeout', 15)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('universal_newlines', True)
print('SH:', *cmd)
return subprocess.run(cmd, **kwargs)
class CommandFailed(Exception):
"""Raised when a command fails"""
# We use a custom exception because subprocess.CalledProcessError can't
# be raised from a completed Popen object. If that were possible
# (in a documented way), CalledProcessError would be a better choice.
class CommandNotFailed(Exception):
"""Raised when a command should have failed, but didn't"""
class ElsaRunner:
'''
Class for elsa fixture enabling blocking or background runs
If there is a local website.py in pwd, uses that one,
uses the one from fixtures instead.
'''
def run(self, *command, script=None, should_fail=False):
print('COMMAND: python website.py', *command)
try:
cr = subprocess.run(
self.create_command(command, script), check=not should_fail,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except subprocess.CalledProcessError as e:
sys.stdout.write(e.stdout)
sys.stderr.write(e.stderr)
raise CommandFailed('return code was {}'.format(e.returncode))
sys.stdout.write(cr.stdout)
sys.stderr.write(cr.stderr)
if should_fail and cr.returncode == 0:
raise CommandNotFailed('return code was 0')
return cr
@contextmanager
def run_bg(self, *command, script=None, should_fail=False,
assert_running_on=None):
print('COMMAND IN BACKGROUND: python website.py', *command)
port = self.parse_port(command)
proc = subprocess.Popen(self.create_command(command, script),
stderr=subprocess.PIPE,
universal_newlines=True)
# Wait for the server to start,
# i.e. wait for the first line on stderr:
# * Running on http://127.0.0.1:8003/ (Press CTRL+C to quit)
line = proc.stderr.readline()
sys.stderr.write(line)
if 'Traceback' in line:
# Get all of the traceback
_, errs = proc.communicate(timeout=1)
sys.stderr.write(errs)
else:
lines = [line.strip()]
# With the serve command, Flask is running in debug and restarts
# the server, so we'll also wait for next lines:
# * Restarting with stat
# * Debugger is active!
# * Debugger PIN: ...
# (The stdout lines might come in either order, depending on OS.)
if command[0] == 'serve':
for i in range(3):
line = proc.stderr.readline()
sys.stderr.write(line)
lines.append(line.strip())
# Here we test user-facing messages, which Flask is free to change.
# I see no better way to check that the --host option
# got through to the dev server.
if assert_running_on is not None:
msg = '* Running on {} (Press CTRL+C to quit)'
assert msg.format(assert_running_on) in lines
yield proc
try:
# Shutdown the server via POST request
url = 'http://localhost:{}/__shutdown__/'.format(port)
print('Shutting down via', url)
requests.post(url)
except Exception as e:
print(e)
try:
_, errs = proc.communicate(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
_, errs = proc.communicate()
sys.stderr.write(errs)
# werkzeug.server.shutdown does:
# * 247 on debug
# * 0 on non-debug
# * 15 on Windows
if proc.returncode not in (0, 15, 247):
if not should_fail:
raise CommandFailed(
'return code was {}'.format(proc.returncode))
elif should_fail:
raise CommandNotFailed(
'return code was {}'.format(proc.returncode))
def finalize(self):
self.lax_rmtree(BUILDDIR_FIXTURES)
@classmethod
def create_command(cls, command, script):
script = script or SCRIPT
if os.path.exists(script):
script = script
else:
script = os.path.join(FIXTURES, script)
command = tuple(str(item) for item in command)
return (sys.executable, script) + command
@classmethod
def parse_port(cls, command):
if '--port' in command:
return int(command[command.index('--port') + 1])
return 8003
@classmethod
def lax_rmtree(cls, path):
try:
shutil.rmtree(path)
except FileNotFoundError:
pass
@pytest.fixture
def elsa():
er = ElsaRunner()
try:
yield er
finally:
er.finalize()
def commit_info():
cmd = ['git', '--no-pager', 'show', 'gh-pages', '--no-color']
commit = run_cmd(cmd).stdout.strip()
print(commit)
return commit
def commits():
cmd = ['git', '--no-pager', 'log', '--pretty=format:%h %s', 'gh-pages']
commits = run_cmd(cmd).stdout.strip()
print(commits)
return commits.splitlines()
def assert_commit_author(commit):
assert 'Author: Tester Example <tester@example.org>' in commit
def was_pushed(*, remote='origin', branch='gh-pages'):
cmd = ['git', 'rev-parse', branch]
local = run_cmd(cmd).stdout.strip()
cmd = ['git', 'rev-parse', '{}/{}'.format(remote, branch)]
result = run_cmd(cmd, check=False)
if result.returncode == 128:
remote = None
else:
result.check_returncode()
remote = result.stdout.strip()
return remote == local
@pytest.fixture
def gitrepo(tmpdir):
'''
This fixture creates a git repository with website.py in tmpdir
'''
repo = tmpdir.mkdir('repo')
bare = tmpdir.mkdir('bare')
script = repo.join(SCRIPT)
with open(SCRIPT_FIXTURES) as f:
script.write(f.read())
with bare.as_cwd():
run_cmd(['git', 'init', '--bare'])
with repo.as_cwd():
run_cmd(['git', 'init'])
run_cmd(['git', 'add', SCRIPT])
run_cmd(['git', 'config', 'user.email', 'tester@example.org'])
run_cmd(['git', 'config', 'user.name', 'Tester Example'])
run_cmd(['git', 'remote', 'add', 'origin', str(bare)])
run_cmd(['git', 'commit', '-m', 'Initial commit'])
yield repo
def test_elsa_fixture_bad_exit_status(elsa):
with pytest.raises(CommandFailed):
elsa.run('not', 'a', 'chance')
def test_elsa_fixture_bad_exit_status_should_fail(elsa):
elsa.run('not', 'a', 'chance', should_fail=True)
def test_elsa_fixture_bad_exit_status_bg(elsa):
with pytest.raises(CommandFailed):
with elsa.run_bg('not', 'a', 'chance'):
pass
def test_elsa_fixture_bad_exit_status_bg_should_fail(elsa):
with elsa.run_bg('not', 'a', 'chance', should_fail=True):
pass
def test_elsa_fixture_good_exit_status_should_fail(elsa):
with pytest.raises(CommandNotFailed):
elsa.run('freeze', should_fail=True)
def test_elsa_fixture_good_exit_status_bg_should_fail(elsa):
with pytest.raises(CommandNotFailed):
with elsa.run_bg('serve', should_fail=True):
pass
def test_serve(elsa):
with elsa.run_bg('serve'):
assert 'SUCCESS' in requests.get('http://localhost:8003/').text
def test_port(elsa, port, serve_command):
host = '127.0.0.1'
with elsa.run_bg(
*serve_command, '--port', port,
assert_running_on='http://{}:{}/'.format(host, port),
):
url = 'http://localhost:{}/'.format(port)
assert 'SUCCESS' in requests.get(url).text
def test_host(elsa, host, serve_command):
port = 8080
with elsa.run_bg(
*serve_command, '--host', host, '--port', port,
assert_running_on='http://{}:{}/'.format(host, port),
):
url = 'http://localhost:{}/'.format(port)
assert 'SUCCESS' in requests.get(url).text
def test_cname(elsa, cname, serve_command):
code = 200 if is_true(cname) else 404
with elsa.run_bg(*serve_command, cname):
assert requests.get('http://localhost:8003/CNAME').status_code == code
def test_freeze(elsa):
elsa.run('freeze')
with open(INDEX_FIXTURES) as f:
assert 'SUCCESS' in f.read()
def test_freeze_mishmash(elsa):
with pytest.raises(CommandFailed):
# This script has a mime type mishmash
elsa.run('freeze', script='mishmash.py')
def test_freeze_different_warning_is_fine(elsa):
# This script has a PendingDeprecationWarning
elsa.run('freeze', script='warning.py')
# tests just success of the command
def test_freeze_mishmash_decent_error_msg(elsa, capsys):
elsa.run('freeze', script='mishmash.py', should_fail=True)
out, err = capsys.readouterr()
print('OUT', out)
print('ERR', err)
assert 'Traceback' not in err
assert 'does not match' in err
def test_freeze_cname(elsa):
elsa.run('freeze')
with open(CNAME_FIXTURES) as f:
assert f.read().strip() == 'example.org'
def test_freeze_no_cname(elsa):
elsa.run('freeze', '--no-cname')
assert not os.path.exists(CNAME_FIXTURES)
def test_freeze_base_url(elsa, protocol, domain):
url = '{}://{}'.format(protocol, domain)
elsa.run('freeze', '--base-url', url)
with open(CNAME_FIXTURES) as f:
assert f.read().strip() == domain
def test_freeze_serve(elsa):
with elsa.run_bg('freeze', '--serve'), open(INDEX_FIXTURES) as f:
assert 'SUCCESS' in f.read()
assert 'SUCCESS' in requests.get('http://localhost:8003/').text
def test_freeze_path(elsa, tmpdir, cname):
path = tmpdir.join('foo')
elsa.run('freeze', '--path', path, cname)
assert path.check(dir=True)
assert path.join('index.html').check(file=True)
assert is_true(cname) == path.join('CNAME').check()
def test_deploy_files(elsa, cname, push, gitrepo):
elsa.run('deploy', cname, push)
with open(INDEX) as f:
assert 'SUCCESS' in f.read()
assert is_true(cname) == os.path.exists(CNAME)
def test_deploy_git(elsa, cname, push, gitrepo):
elsa.run('deploy', cname, push)
commit = commit_info()
assert '.nojekyll' in commit
assert 'index.html' in commit
assert 'SUCCESS' in commit
assert is_true(cname) == ('CNAME' in commit)
assert_commit_author(commit)
assert is_true(push) == was_pushed()
def test_deploy_nopush_does_not_remove_remote_tracking_branch(elsa, gitrepo):
run_cmd(['git', 'checkout', '--orphan', 'gh-pages'])
run_cmd(['git', 'rm', SCRIPT, '-f'])
run_cmd(['touch', 'testfile1'])
run_cmd(['git', 'add', 'testfile1'])
run_cmd(['git', 'commit', '-m', 'commit 1'])
run_cmd(['touch', 'testfile2'])
run_cmd(['git', 'add', 'testfile2'])
run_cmd(['git', 'commit', '-m', 'commit 2'])
run_cmd(['git', 'push', '-u', 'origin', 'gh-pages'])
run_cmd(['git', 'checkout', 'master'])
elsa.run('deploy', '--no-push')
run_cmd(['git', 'checkout', 'gh-pages'])
assert len(commits()) == 1
run_cmd(['git', 'reset', '--hard', 'origin/gh-pages'])
assert len(commits()) == 2
def test_deploy_twice_only_one_commit(elsa, push, gitrepo):
elsa.run('deploy', push)
elsa.run('deploy', push)
assert len(commits()) == 1
assert 'SUCCESS' in commit_info()
def test_deploy_without_explicit_push_switch(elsa, gitrepo):
completed = elsa.run('deploy')
assert 'deprecated' in completed.stderr
assert was_pushed()
@pytest.mark.parametrize('path', ('custom_path', 'default_path'))
def test_freeze_and_deploy(elsa, tmpdir, path, gitrepo):
freeze_command = ['freeze']
deploy_command = ['deploy', '--no-push']
if path == 'custom_path':
path = tmpdir.join('foo')
args = ['--path', path]
freeze_command += args
deploy_command += args
elsa.run(*freeze_command)
elsa.run(*deploy_command)
commit = commit_info()
assert 'SUCCESS' in commit
assert_commit_author(commit)
def test_remote_not_displayed_when_pushing(elsa, gitrepo, capsys):
elsa.run('deploy', '--push')
out, err = capsys.readouterr()
print('OUT', out)
print('ERR', err)
assert '/bare' not in out
assert '/bare' not in err
def test_remote_not_displayed_when_pushing_fails(elsa, gitrepo, capsys):
url = 'https://example.com'
run_cmd(['git', 'remote', 'set-url', 'origin', url])
capsys.readouterr() # flush
elsa.run('deploy', '--push', should_fail=True)
out, err = capsys.readouterr()
print('OUT', out)
print('ERR', err)
assert url not in out
assert url not in err
def test_push_error_displayed_when_explicitly_asked_for(elsa, gitrepo, capsys):
url = 'https://example.com'
run_cmd(['git', 'remote', 'set-url', 'origin', url])
capsys.readouterr() # flush
elsa.run('deploy', '--push', '--show-git-push-stderr', should_fail=True)
out, err = capsys.readouterr()
print('OUT', out)
print('ERR', err)
assert url in err
assert 'not found' in err
def test_traceback_not_displayed_when_pushing_fails(elsa, gitrepo, capsys):
run_cmd(['git', 'remote', 'set-url', 'origin', 'foo'])
elsa.run('deploy', '--push', should_fail=True)
out, err = capsys.readouterr()
print('OUT', out)
print('ERR', err)
assert 'Traceback' not in err
assert 'Error: git push failed (exit status 128)' in err
def test_deploy_different_remote(elsa, push, gitrepo):
remote = 'foo'
run_cmd(['git', 'remote', 'rename', 'origin', remote])
elsa.run('deploy', push, '--remote', 'foo')
assert 'SUCCESS' in commit_info()
assert is_true(push) == was_pushed(remote=remote)
def test_invoke_cli(elsa):
elsa.run('freeze', script='custom_command.py')
with open(INDEX_FIXTURES) as f:
assert 'SUCCESS' in f.read()
result = elsa.run('custom', script='custom_command.py')
assert result.stdout.strip() == 'Custom command'
def test_freeze_verbose(elsa, capsys):
elsa.run('freeze', '--verbose')
captured = capsys.readouterr()
assert 'Frozen /' in captured.err.splitlines()
| pyvec/elsa | tests/test_commands.py | test_commands.py | py | 15,887 | python | en | code | 27 | github-code | 13 |
35028128350 | # A+B - 5
import sys
ipt = sys.stdin.readline
while 1: # 무한루프
A,B=list(map(int,ipt().rstrip().split()))
if A==B==0: # 0,0이 들어오면 종료
break
else:
print(A+B) | Jehyung-dev/Algorithm | 백준/Bronze/10952. A+B - 5/A+B - 5.py | A+B - 5.py | py | 218 | python | ko | code | 0 | github-code | 13 |
3277097916 | from flask import Flask
from flask import request
import json
app = Flask(__name__)
@app.route('/post', methods=['POST'])
# в учебнике есть эта часть
def main():
response = {
'session': request.json['session'],
'version': request.json['version'],
'response': {
'end_session': False
}
}
handle_dialog(response, request.json)
return json.dumps(response)
def handle_dialog(resp, reqst):
# проверка что это не первое сообщение
if reqst['request']['original_utterance']:
resp['response']['text'] = reqst['request']['original_utterance']
else:
# если сообщений до этого не было
resp['response']['text'] = "Привет! Я эхо-bot"
if __name__ == '__main__':
app.run()
| lord-protectorx/yandex-Alica | Storehouse/echo.py | echo.py | py | 851 | python | ru | code | 1 | github-code | 13 |
24690073676 | from typing import List, Tuple, Dict, Callable, Optional, cast, Any, Set
from collections import OrderedDict
import uuid
import statistics
import pickle
from holmes_extractor.word_matching.ontology import OntologyWordMatchingStrategy
from tqdm import tqdm
from spacy.tokens import Doc
from thinc.api import Model
from thinc.backends import get_current_ops, Ops
from thinc.loss import SequenceCategoricalCrossentropy
from thinc.layers import chain, Relu, Softmax
from thinc.optimizers import Adam
from thinc.types import Floats2d
from .errors import (
WrongModelDeserializationError,
FewerThanTwoClassificationsError,
DuplicateDocumentError,
NoPhraseletsAfterFilteringError,
IncompatibleAnalyzeDerivationalMorphologyDeserializationError,
)
from .structural_matching import Match, StructuralMatcher
from .ontology import Ontology
from .parsing import (
CorpusWordPosition,
LinguisticObjectFactory,
SearchPhrase,
SemanticAnalyzer,
SemanticMatchingHelper,
)
from .parsing import PhraseletInfo
class SupervisedTopicTrainingUtils:
def __init__(self, overlap_memory_size, one_hot):
self.overlap_memory_size = overlap_memory_size
self.one_hot = one_hot
def get_labels_to_classification_frequencies_dict(
self,
*,
matches: List[Match],
labels_to_classifications_dict: Optional[Dict[str, str]]
) -> Dict[str, Any]:
"""Builds a dictionary from search phrase (phraselet) labels to classification
frequencies. Depending on the training phase, which is signalled by the parameters, the
dictionary tracks either raw frequencies for each search phrase label or points to a
second dictionary from classification labels to frequencies.
Parameters:
matches -- the structural matches from which to build the dictionary
labels_to_classifications_dict -- a dictionary from document labels to document
classifications, or 'None' if the target dictionary should contain raw frequencies.
"""
def increment(search_phrase_label, document_label):
if labels_to_classifications_dict is not None:
if search_phrase_label not in labels_to_frequencies_dict:
classification_frequency_dict = {}
labels_to_frequencies_dict[
search_phrase_label
] = classification_frequency_dict
else:
classification_frequency_dict = labels_to_frequencies_dict[
search_phrase_label
]
classification = labels_to_classifications_dict[document_label]
if classification in classification_frequency_dict:
classification_frequency_dict[classification] += 1
else:
classification_frequency_dict[classification] = 1
else:
if search_phrase_label not in labels_to_frequencies_dict:
labels_to_frequencies_dict[search_phrase_label] = 1
else:
labels_to_frequencies_dict[search_phrase_label] += 1
def relation_match_involves_whole_word_containing_subwords(match):
# Where there are subwords, we suppress relation matches with the
# entire word. The same rule is not applied to single-word matches because
# it still makes sense to track words with more than three subwords.
return (
len(match.word_matches) > 1
and len(
[
word_match
for word_match in match.word_matches
if len(word_match.document_token._.holmes.subwords) > 0
and word_match.document_subword is None
]
)
> 0
)
labels_to_frequencies_dict: Dict[str, Any] = {}
matches = [
match
for match in matches
if not relation_match_involves_whole_word_containing_subwords(match)
]
matches = sorted(
matches,
key=lambda match: (
match.document_label,
match.index_within_document,
match.get_subword_index_for_sorting(),
),
)
for index, match in enumerate(matches):
this_document_label: Optional[str]
if self.one_hot:
if (
"this_document_label" not in locals()
) or this_document_label != match.document_label:
this_document_label = match.document_label
search_phrases_added_for_this_document = set()
if (
match.search_phrase_label
not in search_phrases_added_for_this_document
):
increment(match.search_phrase_label, match.document_label)
search_phrases_added_for_this_document.add(
match.search_phrase_label
)
else:
increment(match.search_phrase_label, match.document_label)
if not match.from_single_word_phraselet:
previous_match_index = index
number_of_analyzed_matches_counter = 0
while (
previous_match_index > 0
and number_of_analyzed_matches_counter <= self.overlap_memory_size
):
previous_match_index -= 1
previous_match = matches[previous_match_index]
if previous_match.document_label != match.document_label:
break
if previous_match.from_single_word_phraselet:
continue
if previous_match.search_phrase_label == match.search_phrase_label:
continue # otherwise coreference resolution leads to phrases being
# combined with themselves
number_of_analyzed_matches_counter += 1
previous_word_match_doc_indexes = [
word_match.get_document_index()
for word_match in previous_match.word_matches
]
for word_match in match.word_matches:
if (
word_match.get_document_index()
in previous_word_match_doc_indexes
):
# the same word is involved in both matches, so combine them
# into a new label
label_parts = sorted(
(
previous_match.search_phrase_label,
match.search_phrase_label,
)
)
combined_label = "/".join((label_parts[0], label_parts[1]))
if self.one_hot:
if (
combined_label
not in search_phrases_added_for_this_document
):
increment(combined_label, match.document_label)
search_phrases_added_for_this_document.add(
combined_label
)
else:
increment(combined_label, match.document_label)
return labels_to_frequencies_dict
def get_occurrence_dicts(
self,
*,
phraselet_labels_to_search_phrases: Dict[str, SearchPhrase],
semantic_matching_helper: SemanticMatchingHelper,
structural_matcher: StructuralMatcher,
sorted_label_dict: Dict[str, int],
overall_similarity_threshold: float,
training_document_labels_to_documents: Dict[str, Doc]
) -> List[Dict[int, int]]:
"""Matches documents against the currently stored phraselets and records the matches
in a custom sparse format.
Parameters:
phraselet_labels_to_search_phrases -- a dictionary from search phrase (phraselet)
labels to search phrase objects.
semantic_matching_helper -- the semantic matching helper to use.
structural_matcher -- the structural matcher to use for comparisons.
sorted_label_dict -- a dictionary from search phrase (phraselet) labels to their own
alphabetic sorting indexes.
overall_similarity_threshold -- the threshold for embedding-based matching.
training_document_labels_to_documents -- a dictionary.
"""
return_dicts: List[Dict[int, int]] = []
for doc_label in sorted(training_document_labels_to_documents.keys()):
this_document_dict: Dict[int, int] = {}
doc = training_document_labels_to_documents[doc_label]
document_labels_to_documents = {doc_label: doc}
reverse_dict: Dict[str, List[CorpusWordPosition]] = {}
semantic_matching_helper.add_to_reverse_dict(reverse_dict, doc, doc_label)
for (
label,
occurrences,
) in self.get_labels_to_classification_frequencies_dict(
matches=structural_matcher.match(
word_matching_strategies=semantic_matching_helper.main_word_matching_strategies
+ semantic_matching_helper.ontology_word_matching_strategies
+ semantic_matching_helper.embedding_word_matching_strategies,
document_labels_to_documents=document_labels_to_documents,
reverse_dict=reverse_dict,
search_phrases=phraselet_labels_to_search_phrases.values(),
match_depending_on_single_words=None,
compare_embeddings_on_root_words=False,
compare_embeddings_on_non_root_words=True,
reverse_matching_cwps=None,
embedding_reverse_matching_cwps=None,
process_initial_question_words=False,
overall_similarity_threshold=overall_similarity_threshold,
initial_question_word_overall_similarity_threshold=1.0,
),
labels_to_classifications_dict=None,
).items():
if self.one_hot:
occurrences = 1
if (
label in sorted_label_dict
): # may not be the case for compound labels
label_index = sorted_label_dict[label]
this_document_dict[label_index] = occurrences
return_dicts.append(this_document_dict)
return return_dicts
def get_thinc_model(
self, *, hidden_layer_sizes: List[int], input_width: int, output_width: int
) -> Model[List[Dict[int, int]], Floats2d]:
"""Generates the structure — without weights — of the Thinc neural network.
Parameters:
hidden_layer_sizes -- a list containing the number of neurons in each hidden layer.
input_width -- the input neuron width, which corresponds to the number of phraselets.
output_width -- the output neuron width, which corresponds to the number of classifications.
"""
def get_doc_infos(
input_len,
) -> Model[List[Dict[int, int]], Floats2d]:
model: Model[List[Dict[int, int]], Floats2d] = Model(
"doc_infos_forward", doc_infos_forward
)
model.attrs["input_len"] = input_len
return model
def doc_infos_forward(
model: Model[List[Dict[int, int]], Floats2d],
occurrence_dicts: List[Dict[int, int]],
is_train: bool,
) -> Tuple[Floats2d, Callable]:
def backprop(sparse_infos: Floats2d) -> List[Dict[int, int]]:
return []
input_len = model.attrs["input_len"]
return_matrix = model.ops.alloc2f(len(occurrence_dicts), input_len) + 0.0
for index, occurrence_dict in enumerate(occurrence_dicts):
for key, value in occurrence_dict.items():
return_matrix[index, key] = value
return return_matrix, backprop
hidden_layers: Model[Floats2d, Floats2d]
if len(hidden_layer_sizes) == 1:
hidden_layers = Relu(hidden_layer_sizes[0])
else:
hidden_layers = chain(*(Relu(size) for size in hidden_layer_sizes))
model: Model[List[Dict[int, int]], Floats2d] = chain(
get_doc_infos(input_width),
hidden_layers,
Softmax(output_width),
)
return model
class SupervisedTopicTrainingBasis:
"""Holder object for training documents and their classifications from which one or more
'SupervisedTopicModelTrainer' objects can be derived. This class is *NOT* threadsafe.
"""
def __init__(
self,
*,
linguistic_object_factory: LinguisticObjectFactory,
structural_matcher: StructuralMatcher,
classification_ontology: Optional[Ontology],
overlap_memory_size: int,
one_hot: bool,
match_all_words: bool,
overall_similarity_threshold: float,
verbose: bool
):
"""Parameters:
linguistic_object_factory -- the linguistic object factory to use
structural_matcher -- the structural matcher to use.
classification_ontology -- an Ontology object incorporating relationships between
classification labels.
overlap_memory_size -- how many non-word phraselet matches to the left should be
checked for words in common with a current match.
one_hot -- whether the same word or relationship matched multiple times should be
counted once only (value 'True') or multiple times (value 'False')
match_all_words -- whether all single words should be taken into account
(value 'True') or only single words with noun tags (value 'False')
overall_similarity_threshold -- the overall similarity threshold for embedding-based
matching. Defaults to *1.0*, which deactivates embedding-based matching.
verbose -- if 'True', information about training progress is outputted to the console.
"""
self.linguistic_object_factory = linguistic_object_factory
self.structural_matcher = structural_matcher
self.semantic_analyzer = linguistic_object_factory.semantic_analyzer
self.semantic_matching_helper = (
linguistic_object_factory.semantic_matching_helper
)
self.overall_similarity_threshold = overall_similarity_threshold
self.classification_ontology = classification_ontology
self.utils = SupervisedTopicTrainingUtils(overlap_memory_size, one_hot)
self.match_all_words = match_all_words
self.verbose = verbose
self.training_document_labels_to_documents: Dict[str, Doc] = {}
self.reverse_dict: Dict[str, List[CorpusWordPosition]] = {}
self.training_documents_labels_to_classifications_dict: Dict[str, str] = {}
self.additional_classification_labels: Set[str] = set()
self.classification_implication_dict: Dict[str, List[str]] = {}
self.labels_to_classification_frequencies: Optional[Dict[str, Any]] = None
self.phraselet_labels_to_phraselet_infos: Dict[str, PhraseletInfo] = {}
self.classifications: Optional[List[str]] = None
def parse_and_register_training_document(
self, text: str, classification: str, label: Optional[str] = None
) -> None:
"""Parses and registers a document to use for training.
Parameters:
text -- the document text
classification -- the classification label
label -- a label with which to identify the document in verbose training output,
or 'None' if a random label should be assigned.
"""
self.register_training_document(
self.semantic_analyzer.parse(text), classification, label
)
def register_training_document(
self, doc: Doc, classification: str, label: Optional[str]
) -> None:
"""Registers a pre-parsed document to use for training.
Parameters:
doc -- the document
classification -- the classification label
label -- a label with which to identify the document in verbose training output,
or 'None' if a random label should be assigned.
"""
if self.labels_to_classification_frequencies is not None:
raise RuntimeError(
"register_training_document() may not be called once prepare() has been called"
)
if label is None:
label = str(uuid.uuid4())
if label in self.training_document_labels_to_documents:
raise DuplicateDocumentError(label)
if self.verbose:
print("Registering document", label)
self.training_document_labels_to_documents[label] = doc
self.semantic_matching_helper.add_to_reverse_dict(self.reverse_dict, doc, label)
self.linguistic_object_factory.add_phraselets_to_dict(
doc,
phraselet_labels_to_phraselet_infos=self.phraselet_labels_to_phraselet_infos,
replace_with_hypernym_ancestors=True,
match_all_words=self.match_all_words,
ignore_relation_phraselets=False,
include_reverse_only=False,
stop_lemmas=self.semantic_matching_helper.supervised_document_classification_phraselet_stop_lemmas,
stop_tags=self.semantic_matching_helper.topic_matching_phraselet_stop_tags,
reverse_only_parent_lemmas=None,
words_to_corpus_frequencies=None,
maximum_corpus_frequency=None,
process_initial_question_words=False,
)
self.training_documents_labels_to_classifications_dict[label] = classification
def register_additional_classification_label(self, label: str) -> None:
"""Register an additional classification label which no training document has explicitly
but that should be assigned to documents whose explicit labels are related to the
additional classification label via the classification ontology.
"""
if self.labels_to_classification_frequencies is not None:
raise RuntimeError(
"register_additional_classification_label() may not be called once prepare() has "
" been called"
)
if (
self.classification_ontology is not None
and self.classification_ontology.contains_word(label)
):
self.additional_classification_labels.add(label)
def prepare(self) -> None:
"""Matches the phraselets derived from the training documents against the training
documents to generate frequencies that also include combined labels, and examines the
explicit classification labels, the additional classification labels and the
classification ontology to derive classification implications.
Once this method has been called, the instance no longer accepts new training documents
or additional classification labels.
"""
if self.labels_to_classification_frequencies is not None:
raise RuntimeError("prepare() may only be called once")
if self.verbose:
print("Matching documents against all phraselets")
search_phrases = (
self.linguistic_object_factory.create_search_phrases_from_phraselet_infos(
list(self.phraselet_labels_to_phraselet_infos.values())
).values()
)
self.labels_to_classification_frequencies = cast(
Dict[str, Dict[str, int]],
self.utils.get_labels_to_classification_frequencies_dict(
matches=self.structural_matcher.match(
word_matching_strategies=self.semantic_matching_helper.main_word_matching_strategies
+ self.semantic_matching_helper.ontology_word_matching_strategies
+ self.semantic_matching_helper.embedding_word_matching_strategies,
document_labels_to_documents=self.training_document_labels_to_documents,
reverse_dict=self.reverse_dict,
search_phrases=search_phrases,
match_depending_on_single_words=None,
compare_embeddings_on_root_words=False,
compare_embeddings_on_non_root_words=True,
reverse_matching_cwps=None,
embedding_reverse_matching_cwps=None,
process_initial_question_words=False,
overall_similarity_threshold=self.overall_similarity_threshold,
initial_question_word_overall_similarity_threshold=1.0,
),
labels_to_classifications_dict=self.training_documents_labels_to_classifications_dict,
),
)
self.classifications = sorted(
set(self.training_documents_labels_to_classifications_dict.values()).union(
self.additional_classification_labels
)
)
if len(self.classifications) < 2:
raise FewerThanTwoClassificationsError(len(self.classifications))
if self.classification_ontology is not None:
for parent in self.classifications:
for child in self.classifications:
if (
self.classification_ontology.matches(parent, [child])
is not None
):
if child in self.classification_implication_dict.keys():
self.classification_implication_dict[child].append(parent)
else:
self.classification_implication_dict[child] = [parent]
def train(
self,
*,
minimum_occurrences: int = 4,
cv_threshold: float = 1.0,
learning_rate: float = 0.001,
batch_size: int = 5,
max_epochs: int = 200,
convergence_threshold: float = 0.0001,
hidden_layer_sizes: Optional[List[int]] = None,
shuffle: bool = True,
normalize: bool = True
) -> "SupervisedTopicModelTrainer":
"""Trains a model based on the prepared state.
Parameters:
minimum_occurrences -- the minimum number of times a word or relationship has to
occur in the context of at least one single classification for the phraselet
to be accepted into the final model.
cv_threshold -- the minimum coefficient of variation a word or relationship has
to occur with respect to explicit classification labels for the phraselet to be
accepted into the final model.
learning_rate -- the learning rate for the Adam optimizer.
batch_size -- the number of documents in each training batch.
max_epochs -- the maximum number of training epochs.
convergence_threshold -- the threshold below which loss measurements after consecutive
epochs are regarded as equivalent. Training stops before *max_epochs* is reached
if equivalent results are achieved after four consecutive epochs.
hidden_layer_sizes -- a list containing the number of neurons in each hidden layer, or
'None' if the topology should be determined automatically.
shuffle -- *True* if documents should be shuffled during batching.
normalize -- *True* if normalization should be applied to the loss function.
"""
if self.labels_to_classification_frequencies is None:
raise RuntimeError(
"train() may only be called after prepare() has been called"
)
return SupervisedTopicModelTrainer(
training_basis=self,
linguistic_object_factory=self.linguistic_object_factory,
structural_matcher=self.structural_matcher,
labels_to_classification_frequencies=self.labels_to_classification_frequencies,
phraselet_infos=list(self.phraselet_labels_to_phraselet_infos.values()),
minimum_occurrences=minimum_occurrences,
cv_threshold=cv_threshold,
learning_rate=learning_rate,
batch_size=batch_size,
max_epochs=max_epochs,
convergence_threshold=convergence_threshold,
hidden_layer_sizes=hidden_layer_sizes,
shuffle=shuffle,
normalize=normalize,
utils=self.utils,
)
class SupervisedTopicModelTrainer:
"""Worker object used to train and generate models. This object could be removed from the public interface
(`SupervisedTopicTrainingBasis.train()` could return a `SupervisedTopicClassifier` directly) but has
been retained to facilitate testability.
This class is NOT threadsafe.
"""
def __init__(
self,
*,
training_basis: SupervisedTopicTrainingBasis,
linguistic_object_factory: LinguisticObjectFactory,
structural_matcher: StructuralMatcher,
labels_to_classification_frequencies: Dict[str, Dict[str, int]],
phraselet_infos: List[PhraseletInfo],
minimum_occurrences: int,
cv_threshold: float,
learning_rate: float,
batch_size: int,
max_epochs: int,
convergence_threshold: float,
hidden_layer_sizes: Optional[List[int]],
shuffle: bool,
normalize: bool,
utils: SupervisedTopicTrainingUtils
):
self.utils = utils
self.semantic_analyzer = linguistic_object_factory.semantic_analyzer
self.linguistic_object_factory = linguistic_object_factory
self.semantic_matching_helper = (
linguistic_object_factory.semantic_matching_helper
)
self.structural_matcher = structural_matcher
self.training_basis = training_basis
self.minimum_occurrences = minimum_occurrences
self.cv_threshold = cv_threshold
self.labels_to_classification_frequencies, self.phraselet_infos = self.filter(
labels_to_classification_frequencies, phraselet_infos
)
if len(self.phraselet_infos) == 0:
raise NoPhraseletsAfterFilteringError(
"".join(
(
"minimum_occurrences: ",
str(minimum_occurrences),
"; cv_threshold: ",
str(cv_threshold),
)
)
)
phraselet_labels_to_search_phrases = (
self.linguistic_object_factory.create_search_phrases_from_phraselet_infos(
self.phraselet_infos
)
)
self.sorted_label_dict = {}
for index, label in enumerate(
sorted(self.labels_to_classification_frequencies.keys())
):
self.sorted_label_dict[label] = index
if self.training_basis.verbose:
print("Matching documents against filtered phraselets")
self.occurrence_dicts = self.utils.get_occurrence_dicts(
phraselet_labels_to_search_phrases=phraselet_labels_to_search_phrases,
semantic_matching_helper=self.semantic_matching_helper,
structural_matcher=self.structural_matcher,
sorted_label_dict=self.sorted_label_dict,
overall_similarity_threshold=self.training_basis.overall_similarity_threshold,
training_document_labels_to_documents=self.training_basis.training_document_labels_to_documents,
)
self.output_matrix = self.record_classifications_for_training()
self._hidden_layer_sizes = hidden_layer_sizes
if self._hidden_layer_sizes is None or len(self._hidden_layer_sizes) == 0:
start = len(self.sorted_label_dict)
step = (
len(self.training_basis.classifications) # type:ignore[arg-type]
- len(self.sorted_label_dict)
) / 3
self._hidden_layer_sizes = [
start,
int(start + step),
int(start + (2 * step)),
]
if self.training_basis.verbose:
print("Hidden layer sizes:", self._hidden_layer_sizes)
self._thinc_model = self.utils.get_thinc_model(
hidden_layer_sizes=self._hidden_layer_sizes,
input_width=len(self.sorted_label_dict),
output_width=len(
self.training_basis.classifications # type:ignore[arg-type]
),
)
optimizer = Adam(learning_rate)
average_losses: List[float] = []
initialized = False
for epoch in range(1, max_epochs):
if self.training_basis.verbose:
print("Epoch", epoch)
batches = tqdm(
self._thinc_model.ops.multibatch(
batch_size,
self.occurrence_dicts,
self.output_matrix,
shuffle=shuffle,
)
)
else:
batches = self._thinc_model.ops.multibatch(
batch_size,
self.occurrence_dicts,
self.output_matrix,
shuffle=shuffle,
)
loss_calc = SequenceCategoricalCrossentropy(normalize=normalize)
losses = []
for X, Y in batches:
if not initialized:
self._thinc_model.initialize(X, Y)
initialized = True
Yh, backprop = cast(
Tuple[Floats2d, Callable], self._thinc_model.begin_update(X)
)
grads, loss = loss_calc(Yh, Y) # type:ignore[arg-type]
losses.append(loss.tolist()) # type: ignore[attr-defined]
backprop(
self._thinc_model.ops.asarray2f(grads) # type:ignore[arg-type]
)
self._thinc_model.finish_update(optimizer)
average_loss = round(sum(losses) / len(losses), 6)
if self.training_basis.verbose:
print("Average absolute loss:", average_loss)
print()
average_losses.append(average_loss)
if (
len(average_losses) >= 4
and abs(average_losses[-1] - average_losses[-2]) < convergence_threshold
and abs(average_losses[-2] - average_losses[-3]) < convergence_threshold
and abs(average_losses[-3] - average_losses[-4]) < convergence_threshold
):
if self.training_basis.verbose:
print("Neural network converged after", epoch, "epochs.")
break
def filter(
self,
labels_to_classification_frequencies: Dict[str, Dict[str, int]],
phraselet_infos: List[PhraseletInfo],
) -> Tuple[Dict[str, Dict[str, int]], List[PhraseletInfo]]:
"""Filters the phraselets in memory based on minimum_occurrences and cv_threshold."""
accepted = 0
underminimum_occurrences = 0
under_minimum_cv = 0
new_labels_to_classification_frequencies = {}
for (
label,
classification_frequencies,
) in labels_to_classification_frequencies.items():
at_least_minimum = False
working_classification_frequencies = classification_frequencies.copy()
for classification in working_classification_frequencies:
if (
working_classification_frequencies[classification]
>= self.minimum_occurrences
):
at_least_minimum = True
if not at_least_minimum:
underminimum_occurrences += 1
continue
frequency_list = list(working_classification_frequencies.values())
# We only want to take explicit classification labels into account, i.e. ignore the
# classification ontology.
number_of_classification_labels = len(
set(
self.training_basis.training_documents_labels_to_classifications_dict.values()
)
)
frequency_list.extend([0] * number_of_classification_labels)
frequency_list = frequency_list[:number_of_classification_labels]
if (
statistics.pstdev(frequency_list) / statistics.mean(frequency_list)
>= self.cv_threshold
):
accepted += 1
new_labels_to_classification_frequencies[
label
] = classification_frequencies
else:
under_minimum_cv += 1
if self.training_basis.verbose:
print(
"Filtered: accepted",
accepted,
"; removed minimum occurrences",
underminimum_occurrences,
"; removed cv threshold",
under_minimum_cv,
)
new_phraselet_infos = [
phraselet_info
for phraselet_info in phraselet_infos
if phraselet_info.label in new_labels_to_classification_frequencies.keys()
]
return new_labels_to_classification_frequencies, new_phraselet_infos
def record_classifications_for_training(self) -> Floats2d:
ops: Ops = get_current_ops()
output_matrix = (
ops.alloc2f(
len(
self.training_basis.training_documents_labels_to_classifications_dict
),
len(self.training_basis.classifications), # type:ignore[arg-type]
)
+ 0.0
)
for index, training_document_label in enumerate(
sorted(
self.training_basis.training_documents_labels_to_classifications_dict.keys()
)
):
classification = (
self.training_basis.training_documents_labels_to_classifications_dict[
training_document_label
]
)
classification_index = (
self.training_basis.classifications.index( # type:ignore[union-attr]
classification
)
)
output_matrix[index, classification_index] = 1.0
if classification in self.training_basis.classification_implication_dict:
for (
implied_classification
) in self.training_basis.classification_implication_dict[
classification
]:
implied_classification_index = self.training_basis.classifications.index( # type:ignore[union-attr]
implied_classification
)
output_matrix[index, implied_classification_index] = 1.0
return output_matrix
def classifier(self):
"""Returns a supervised topic classifier which contains no explicit references to the
training data and that can be serialized.
"""
model = SupervisedTopicClassifierModel(
semantic_analyzer_model=self.semantic_analyzer.model,
structural_matching_ontology=self.linguistic_object_factory.ontology,
phraselet_infos=self.phraselet_infos,
sorted_label_dict=self.sorted_label_dict,
classifications=self.training_basis.classifications,
overlap_memory_size=self.utils.overlap_memory_size,
one_hot=self.utils.one_hot,
analyze_derivational_morphology=self.structural_matcher.analyze_derivational_morphology,
hidden_layer_sizes=self._hidden_layer_sizes,
serialized_thinc_model=self._thinc_model.to_dict(),
)
return SupervisedTopicClassifier(
self.semantic_analyzer,
self.linguistic_object_factory,
self.structural_matcher,
model,
self.training_basis.overall_similarity_threshold,
self.training_basis.verbose,
)
class SupervisedTopicClassifierModel:
"""A serializable classifier model.
Parameters:
semantic_analyzer_model -- a string specifying the spaCy model with which this instance
was generated and with which it must be used.
structural_matching_ontology -- the ontology used for matching documents against this model
(not the classification ontology!)
phraselet_infos -- the phraselets used for structural matching
sorted_label_dict -- a dictionary from search phrase (phraselet) labels to their own
alphabetic sorting indexes.
classifications -- an ordered list of classification labels corresponding to the
neural network outputs
overlap_memory_size -- how many non-word phraselet matches to the left should be
checked for words in common with a current match.
one_hot -- whether the same word or relationship matched multiple times should be
counted once only (value 'True') or multiple times (value 'False')
analyze_derivational_morphology -- the value of this manager parameter that was in force
when the model was built. The same value has to be in force when the model is
deserialized and reused.
hidden_layer_sizes -- the definition of the topology of the neural-network hidden layers
serialized_thinc_model -- the serialized neural-network weights
"""
def __init__(
self,
*,
semantic_analyzer_model: str,
structural_matching_ontology: Ontology,
phraselet_infos: List[PhraseletInfo],
sorted_label_dict: Dict[str, int],
classifications: List[str],
overlap_memory_size: int,
one_hot: bool,
analyze_derivational_morphology: bool,
hidden_layer_sizes: List[int],
serialized_thinc_model: Dict
):
self.semantic_analyzer_model = semantic_analyzer_model
self.structural_matching_ontology = structural_matching_ontology
self.phraselet_infos = phraselet_infos
self.sorted_label_dict = sorted_label_dict
self.classifications = classifications
self.overlap_memory_size = overlap_memory_size
self.one_hot = one_hot
self.analyze_derivational_morphology = analyze_derivational_morphology
self.hidden_layer_sizes = hidden_layer_sizes
self.serialized_thinc_model = serialized_thinc_model
self.version = "1.0"
class SupervisedTopicClassifier:
"""Classifies new documents based on a pre-trained model."""
def __init__(
self,
semantic_analyzer: SemanticAnalyzer,
linguistic_object_factory: LinguisticObjectFactory,
structural_matcher: StructuralMatcher,
model: SupervisedTopicClassifierModel,
overall_similarity_threshold: float,
verbose: bool,
):
self.model = model
self.semantic_analyzer = semantic_analyzer
self.linguistic_object_factory = linguistic_object_factory
self.semantic_matching_helper = (
linguistic_object_factory.semantic_matching_helper
)
self.structural_matcher = structural_matcher
self.overall_similarity_threshold = overall_similarity_threshold
self.verbose = verbose
self.utils = SupervisedTopicTrainingUtils(
model.overlap_memory_size, model.one_hot
)
if self.semantic_analyzer.model != model.semantic_analyzer_model:
raise WrongModelDeserializationError(model.semantic_analyzer_model)
if (
self.structural_matcher.analyze_derivational_morphology
!= model.analyze_derivational_morphology
):
raise IncompatibleAnalyzeDerivationalMorphologyDeserializationError(
"".join(
(
"manager: ",
str(self.structural_matcher.analyze_derivational_morphology),
"; model: ",
str(model.analyze_derivational_morphology),
)
)
)
self.linguistic_object_factory.ontology = model.structural_matching_ontology
self.semantic_matching_helper = self.structural_matcher.semantic_matching_helper
if model.structural_matching_ontology is not None:
self.linguistic_object_factory.ontology_reverse_derivational_dict = (
self.semantic_analyzer.get_ontology_reverse_derivational_dict(
model.structural_matching_ontology
)
)
self.semantic_matching_helper.ontology_word_matching_strategies = [
OntologyWordMatchingStrategy(
self.semantic_matching_helper,
perform_coreference_resolution=self.structural_matcher.perform_coreference_resolution,
ontology=model.structural_matching_ontology,
analyze_derivational_morphology=model.analyze_derivational_morphology,
ontology_reverse_derivational_dict=self.linguistic_object_factory.ontology_reverse_derivational_dict,
)
]
self.phraselet_labels_to_search_phrases = (
self.linguistic_object_factory.create_search_phrases_from_phraselet_infos(
model.phraselet_infos
)
)
self.thinc_model = self.utils.get_thinc_model(
hidden_layer_sizes=model.hidden_layer_sizes,
input_width=len(model.sorted_label_dict),
output_width=len(model.classifications),
)
self.thinc_model.from_dict(model.serialized_thinc_model)
def parse_and_classify(self, text: str) -> Optional[OrderedDict]:
"""Returns a dictionary from classification labels to probabilities
ordered starting with the most probable, or *None* if the text did
not contain any words recognised by the model.
Parameter:
text -- the text to parse and classify.
"""
return self.classify(self.semantic_analyzer.parse(text))
def classify(self, doc: Doc) -> Optional[OrderedDict]:
"""Returns a dictionary from classification labels to probabilities
ordered starting with the most probable, or *None* if the text did
not contain any words recognised by the model.
Parameter:
doc -- the pre-parsed document to classify.
"""
if self.thinc_model is None:
raise RuntimeError("No model defined")
occurrence_dicts = self.utils.get_occurrence_dicts(
semantic_matching_helper=self.semantic_matching_helper,
structural_matcher=self.structural_matcher,
phraselet_labels_to_search_phrases=self.phraselet_labels_to_search_phrases,
sorted_label_dict=self.model.sorted_label_dict,
overall_similarity_threshold=self.overall_similarity_threshold,
training_document_labels_to_documents={"": doc},
)
if len(occurrence_dicts[0]) == 0:
return None
else:
return_dict = OrderedDict()
predictions = self.thinc_model.predict(occurrence_dicts)[0]
for i in (-predictions).argsort(): # type:ignore[attr-defined]
return_dict[self.model.classifications[i.item()]] = predictions[
i
].item()
return return_dict
def serialize_model(self) -> bytes:
return pickle.dumps(self.model)
| msg-systems/holmes-extractor | holmes_extractor/classification.py | classification.py | py | 44,286 | python | en | code | 386 | github-code | 13 |
41475606514 | if __name__ == '__main__' and __package__ is None:
from os import sys#, path
sys.path.append('../')
import torch
import torch.nn as nn
from utils import get_pad_same
from utils import cal_gan_from_op
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
self.param_inited = False
def init_weights(self, init_type='normal', gain=0.02):
'''
initialize network's weights
init_type: normal | xavier_normal | kaiming | orthogonal | xavier_unifrom
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
'''
def init_func(m):
classname = m.__class__.__name__
# print('classname',classname)
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier_normal':
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'xavier_unifrom':
nn.init.xavier_uniform_(m.weight.data, gain=gain)
else:
raise NotImplementedError()
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, gain)
nn.init.constant_(m.bias.data, 0.0)
self.init_apply(init_func)
def getParamList(self,x):
return list(x.parameters())
def init_apply(self, fn):
# print(self.__class__.__name__,'\t', id(self), '\t', self.param_inited)
for m in self.children():
if hasattr(m, 'param_inited'):
# print('\t [Is BaseModule]children', m.__class__.__name__,'\t',m.param_inited)
if m.param_inited is False:
m.init_apply(fn)
# print('\t\t init children')
else:
# print('\t [tNot BaseModule]children', m.__class__.__name__)
m.apply(fn)
if self.param_inited is False:
# print('\tinit myself')
fn(self)
self.param_inited=True
return self
class mySequential(nn.Sequential, BaseNetwork):
def __init__(self, *args):
super(mySequential, self).__init__(*args)
def forward(self, *inputs):
for module in self._modules.values():
if type(inputs) == tuple:
inputs = module(*inputs)
else:
inputs = module(inputs)
return inputs
class ConvSame(BaseNetwork):
def __init__(self, in_channels,out_channels,kernel_size,stride=1,dilation=1,
groups=1, bias=True, padding_mode='zeros', padding_value=0,
activation=None, norm=None, conv_layer=nn.Conv3d
):
super(ConvSame, self).__init__()
padding = get_pad_same(dilation, kernel_size)
# print('padding',padding)
pad = None
if padding > 0:
if padding_mode == 'zeros' or padding_mode == 'circular': # default conv padding
pass
elif padding_mode == 'constant':
pad = nn.ConstantPad3d(padding, padding_value)
padding=0
elif padding_mode == 'replicate':
pad = nn.ReplicationPad3d(padding)
padding=0
else:
raise RuntimeError('Import padding method is not supported')
if conv_layer is GatedConv:
blocks=[]
if pad is not None:
blocks.append(
mySequential(pad)
)
blocks.append(mySequential(
conv_layer(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, 'zeros',
norm, activation)
))
# self.ops = conv_layer(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, 'zeros',
# norm, activation)
self.ops = mySequential(*blocks)
else:
blocks=[]
if pad is not None:
blocks.append(
mySequential(pad)
)
blocks.append(mySequential(
conv_layer(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, 'zeros')
))
if norm is not None:
blocks.append(mySequential(
norm(out_channels,track_running_stats=False)
))
if activation is not None:
blocks.append(mySequential(
activation
))
self.ops = mySequential(*blocks)
def forward(self, x):
return self.ops(x)
# def ConvSame(in_channels,out_channels,kernel_size,stride=1,dilation=1,
# groups=1, bias=True, padding_mode='zeros',
# activation=None, norm=None, conv_layer=nn.Conv3d):
# padding = get_pad_same(dilation, kernel_size)
# if conv_layer is GatedConv:
# return conv_layer(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, 'zeros',
# norm, activation)
# blocks=[]
# blocks.append(mySequential(
# conv_layer(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, 'zeros')
# ))
# if norm is not None:
# blocks.append(mySequential(
# norm(out_channels,track_running_stats=False)
# ))
# if activation is not None:
# blocks.append(mySequential(
# activation
# ))
# return mySequential(*blocks)
class ConvTransposeSame(BaseNetwork):
def __init__(self, in_channels,out_channels,kernel_size,stride,dilation,
output_padding=0, activation=None, norm=None, bias=True, padding_mode='zeros', padding_value=0):
super(ConvTransposeSame, self).__init__()
padding = get_pad_same(dilation, kernel_size)
# print('convT:', padding)
pad = None
# if padding > 0:
# if padding_mode == 'zeros' or padding_mode == 'circular': # default conv padding
# pass
# elif padding_mode == 'constant':
# pad = nn.ConstantPad3d(padding, padding_value)
# padding=0
# elif padding_mode == 'replication':
# pad = nn.ReplicationPad3d(padding)
# padding=0
# else:
# raise RuntimeError('Import padding method is not supported')
blocks=[]
if pad is not None:
blocks.append(
mySequential(pad)
)
blocks.append(mySequential(
nn.ConvTranspose3d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
bias=bias,
output_padding=output_padding)
))
if norm is not None:
blocks.append(mySequential(
norm(out_channels,track_running_stats=False)
))
if activation is not None:
blocks.append(mySequential(
activation
))
self.ops = mySequential(*blocks)
def forward(self, x):
return self.ops(x)
# def ConvTransposeSame(in_channels,out_channels,kernel_size,stride,dilation,
# output_padding=0, activation=None, norm=None):
# pad = get_pad_same(dilation, kernel_size)
# blocks=[]
# blocks.append(mySequential(
# nn.ConvTranspose3d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=stride,
# dilation=dilation,
# padding=pad,
# output_padding=output_padding)
# ))
# if norm is not None:
# blocks.append(mySequential(
# norm(out_channels,track_running_stats=False)
# ))
# if activation is not None:
# blocks.append(mySequential(
# activation
# ))
# return mySequential(*blocks)
class ResSSC(BaseNetwork):
'''
x -> y1 = convertion? convert(x):x -> y2 = Convs(y1)
y = res_add? res_add(x) + y2 : y1 + y2
'''
def __init__(self,
in_channels,
out_channels,
kernel_size=3, stride=1, dilation=1,
residual_blocks=1,
activation=torch.nn.LeakyReLU(0.2, inplace=True),
res_add = True,
norm = nn.BatchNorm3d,
conv_layer=nn.Conv3d,
bias = True, padding_mode='zeros', padding_value=0,
):
super(ResSSC, self).__init__()
self.activation=activation
self.norm = norm(out_channels,track_running_stats=False) if norm is not None else None
#TODO: delete me or delete BatchNorm3D after test
if in_channels != out_channels:
blocks = []
blocks.append(mySequential(
ConvSame(in_channels, out_channels, kernel_size, stride, dilation, conv_layer=conv_layer, bias=bias, padding_mode='zeros', padding_value=padding_value)
))
if self.norm is not None:
blocks.append(mySequential(
norm(out_channels,track_running_stats=False)
))
if self.activation is not None:
blocks.append(mySequential(
self.activation
))
self.conversion = mySequential(*blocks)
residual_blocks -= 1
else:
self.conversion=None
if res_add:
self.res = ConvSame(in_channels, out_channels, 1, 1, 1, conv_layer=conv_layer, bias=bias, padding_mode='zeros', padding_value=padding_value)
else:
self.res=None
blocks = []
for n in range(residual_blocks):
blocks.append(mySequential(
ConvSame(out_channels, out_channels, kernel_size, stride,dilation, conv_layer=conv_layer, bias=bias, padding_mode='zeros', padding_value=padding_value)
))
if n + 1 < residual_blocks:
if norm is not None:
blocks.append(mySequential(
norm(out_channels,track_running_stats=False),
))
if self.activation is not None:
blocks.append(mySequential(
activation
))
self.middle = mySequential(*blocks)
def forward(self, x):
if self.res is not None:
res = self.res(x)
else:
res = x
if self.conversion is not None:
c = self.conversion(x)
else:
c = x
x = self.middle(c)
if self.res is not None:
x = res+x
else:
x = c + x
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
return self.activation(x)
else:
return x
class AddWithBNActivator(BaseNetwork):
def __init__(self, activator, norm):
super(AddWithBNActivator, self).__init__()
self.activator = activator
self.norm = norm
def forward(self, x, y):
out = torch.add(x,y)
if self.norm is not None:
out = self.norm(out)
if self.activator is not None:
out = self.activator(out)
return out
class ASPP(BaseNetwork): #Atrous Spatial Pyramid Pooling
def __init__(self, in_channels,out_channels,kernel_size:list,dilation:list,
activation=None, norm=None, conv_layer=nn.Conv3d):
super(ASPP, self).__init__()
blocks=[]
for i in range(len(kernel_size)):
blocks.append(
ConvSame(in_channels,out_channels,kernel_size=kernel_size[i],
stride=1,dilation=dilation[i],bias=False,
activation=activation,norm=norm,conv_layer=conv_layer)
)
blocks.append(
mySequential(
nn.AdaptiveAvgPool3d((None,None,None)),
ConvSame(in_channels,out_channels,1,1,1,bias=False,norm=norm,conv_layer=conv_layer,activation=activation
))
)
self.blocks = blocks
self.conv = ConvSame(out_channels*len(blocks),out_channels,kernel_size=1,
stride=1,dilation=1,bias=False,
activation=activation,norm=norm,conv_layer=conv_layer)
def forward(self,x):
y = [block(x) for block in self.blocks ]
# [print(r.shape) for r in y]
y = torch.cat(tuple(y), dim=1)
y = self.conv(y)
return y
class GatedConv(BaseNetwork):
"""
Gated Convlution layer with activation (default activation:LeakyReLU)
Params: same as conv3d
Input: The feature from last layer "I"
Output:\phi(f(I))*\sigmoid(g(I))
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
norm=None,
activation=torch.nn.LeakyReLU(0.2, inplace=True),
init_weights=True):
super(GatedConv, self).__init__()
self.activation = activation
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.mask_conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self.sigmoid = torch.nn.Sigmoid()
self.norm = norm(out_channels) if norm is not None else None
if init_weights:
if self.activation is not None:
nn.init.xavier_normal_(self.conv.weight.data, gain=cal_gan_from_op(self.activation))
else:
nn.init.xavier_normal_(self.conv.weight.data, gain=cal_gan_from_op(self.conv))
nn.init.xavier_uniform_(self.mask_conv.weight.data, gain=cal_gan_from_op(torch.nn.Sigmoid))
if bias:
nn.init.constant_(self.conv.bias.data, 0.0)
nn.init.constant_(self.mask_conv.bias.data, 0.0)
self.param_inited = True
def gated(self, mask):
#return torch.clamp(mask, -1, 1)
return self.sigmoid(mask)
def forward(self, input):
x = self.conv(input)
mask = self.mask_conv(input)
if self.activation is not None:
x = self.activation(x) * self.gated(mask)
else:
x = x * self.gated(mask)
if self.norm is not None:
return self.norm(x)
else:
return x
if __name__ == "__main__":
from config import Config
from torchviz import make_dot
config = Config('../config.yml.example')
g = GatedConv(1, 1, 3)
# x = torch.rand(config.BATCH_SIZE,1,64,64,64)
# aspp = ASPP(1,6,[1,3,3],[1,3,5],activation=torch.nn.LeakyReLU(0.2, inplace=True),norm=nn.BatchNorm3d)
# y = aspp(x)
# graph = make_dot(y)
# graph.view()
| ShunChengWu/SCFusion_Network | src/networks_base.py | networks_base.py | py | 16,194 | python | en | code | 6 | github-code | 13 |
24775354745 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from rest_framework import status
from rest_framework.response import Response
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.parsers import JSONParser
from cart.models import OrderItem, Order, Transaction
from shopping.models import Product
from user_auth.forms import Contact_Form
from user_auth.models import Profile, Services
from cart.extra import generate_order_id
import datetime
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
from paypal.standard.forms import PayPalPaymentsForm
from cart.models import OrderLogs
def make_payment(request):
# What you want the button to do.
paypal_dict = {
"business": "manojmnayala@gmail.com",
"amount": "10.00",
"item_name": "name of the item",
"invoice": "unique-invoice-id",
# "notify_url": request.build_absolute_uri(reverse('paypal-ipn')),
# "return": request.build_absolute_uri(reverse('your-return-view')),
# "cancel_return": request.build_absolute_uri(reverse('your-cancel-view')),
"custom": "premium_plan", # Custom command to correlate to some function later (optional)
"currency_code": "INR",
}
# Create the instance.
form = PayPalPaymentsForm(initial=paypal_dict)
context = {"form": form, 'Shopping': 'active'}
return render(request, "cart/payment.html", context)
def get_user_pending_order(request):
u = User.objects.get(pk=request.user.pk)
user_profile = Profile.objects.get(user_name=u)
ord = Order.objects.filter(owner=user_profile, is_ordered=False)
# print(ord.is_ordered)
if ord.exists():
return ord[0]
return 0
@login_required
@csrf_exempt
def add_to_cart(request, prod_id):
product = Product.objects.get(id=prod_id)
# print(product.stock)
# shippingcost = request.POST['ShippingCost']
# shippingcost = int(shippingcost)
# print('Shipping cost is', shippingcost)
# return HttpResponse('hello')
# if request.GET:
# vendorid = request.GET['vendorid']
# print(vendorid)
# ven_qty = VendorQty.objects.get(id=vendorid)
# ven = ven_qty.Vendor
# print(ven)
# print(ven_qty)
u = User.objects.get(pk=request.user.pk)
user_profile = Profile.objects.get(user_name=u)
# print(user_profile)
user_order, status = Order.objects.get_or_create(owner=user_profile, is_ordered=False)
# print(user_order, status)
if status:
ref_code = generate_order_id()
print(ref_code)
order_item, status = OrderItem.objects.get_or_create(product=product, ref_code=ref_code)
if request.method == 'POST' and 'select-vendor' in request.POST:
cost = request.POST['select-vendor']
order_item.delivery_cost = cost
order_item.save()
order_item.save()
user_order.items.add(order_item)
user_order.ref_code = ref_code
user_order.save()
else:
order_item, status = OrderItem.objects.get_or_create(product=product, ref_code=user_order.ref_code)
if request.method == 'POST' and 'select-vendor' in request.POST:
cost = request.POST['select-vendor']
order_item.delivery_cost = cost
order_item.save()
user_order.items.add(order_item)
user_order.save()
# if request.GET:
# nextto = request.GET["nextto"]
# return redirect(nextto)
# return reverse(redirect('cart:order_summary'), args=(shippingcost,))
if request.method == 'POST' and 'select-vendor' in request.POST:
cost = request.POST['select-vendor']
cost = str(cost)
return redirect(reverse('cart:order_summary', args=(cost,)))
return redirect(reverse('cart:order_summary', args=('0',)))
@login_required
def delete_from_cart(request, item_id):
item_to_delete = OrderItem.objects.filter(pk=item_id)
if item_to_delete.exists():
item_to_delete[0].delete()
return redirect(reverse('cart:order_summary', args=('0',)))
@login_required
def order_details(request, cost):
existing_order = get_user_pending_order(request)
context = {
'order': existing_order,
'Shopping': 'active'
}
if request.session.get('msg'):
context['msg'] = request.session.get('msg')
request.session['msg'] = None
return render(request, 'cart/order_summary.html', context)
@login_required
def checkout(request, **kwargs):
existing_order = get_user_pending_order(request)
context = {
'ordre': existing_order,
'Shopping': 'active'
}
return render(request, 'cart/checkout.html', context)
def get_user_details(request):
u = Profile.objects.get(user_name=request.user)
user_form = Contact_Form(instance=u)
if request.method == 'POST':
user_form = Contact_Form(request.POST, instance=u)
if user_form.is_valid():
return redirect(reverse('payment:process'))
return render(request, 'cart/get_user_details.html', {'form': user_form})
@login_required
def update_transaction_records(request):
order_to_purchase = get_user_pending_order(request)
request.session['order_id'] = order_to_purchase.pk
u = Profile.objects.get(user_name=request.user)
user_form = Contact_Form(instance=u)
if request.method == 'POST':
user_form = Contact_Form(request.POST, instance=u)
if user_form.is_valid():
user_form.save()
return redirect(reverse('payment:process'))
return render(request, 'cart/get_user_details.html', {'form': user_form})
def qtyupdate(request):
a = request.POST.get('item_id')
if request.method == "POST":
order_id = request.POST["order_id"]
item_id = request.POST["item_id"]
qty = request.POST["z"]
order = get_user_pending_order(request)
item = order.items.get(pk=item_id)
item.qty = qty
print("data: " + item_id + " " + order_id + " " + qty)
item.save()
order.save()
return HttpResponse(" ")
@api_view(['POST'])
def get_logs(request):
if Services.objects.filter(token=request.GET.get('api_key'), service_type='Products').exists():
if request.method == 'POST':
data = JSONParser().parse(request)
try:
user = data['user']
print('User is',user)
product = data['product']
print('Product is',product)
cost = data['cost']
print('Cost is',cost)
qty = data['qty']
print('qty is',qty)
# print(user, product, cost, qty)
OrderLogs.objects.create(user=user, product=product, cost=cost, qty=qty)
return Response('User Logs created', status=status.HTTP_200_OK)
except:
return Response('Data not valid/sufficient', status=status.HTTP_400_BAD_REQUEST)
else:
return Response('Invalid API Key', status=status.HTTP_400_BAD_REQUEST)
| jiteshm17/SportsHub | cart/views.py | views.py | py | 7,453 | python | en | code | 0 | github-code | 13 |
31155558706 |
import EatingContestEnv as ece
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
from rl.agents import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
def build_model(states, actions):
model = Sequential()
model.add(Dense(24, activation='relu', input_shape=states))
model.add(Dense(24, activation='relu'))
model.add(Dense(actions, activation='linear'))
return model
def build_agent(model, actions):
policy = BoltzmannQPolicy()
memory = SequentialMemory(limit=50000, window_length=1)
dqn = DQNAgent(model=model, memory=memory, policy=policy,
nb_actions=actions, nb_steps_warmup=10, target_model_update=1e-2)
return dqn
env = ece.EatingContest()
states = env.observation_space.shape
actions = env.action_space.n
model = build_model(states, actions)
model.summary()
agent = build_agent(model, actions)
agent.compile(Adam(lr=1e-3), metrics=['mae'])
agent.fit(env, nb_steps=50000, visualize=False, verbose=1)
scores = agent.test(env, nb_episodes=10, visualize=False)
print(np.mean(scores.history['episode_reward']))
| Foiros/EatingContest | main.py | main.py | py | 1,227 | python | en | code | 0 | github-code | 13 |
25013575854 | import RPi.GPIO as GPIO
import time
import LCD_display
import config
from web_requests import booking_stop_reservation
#Setup a GPIO pin on RPi
GPIO.setup(config.button_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def ending_reservation ():
#Function dealing with exding the reseravation after button is pushed for 2 seconds
print ("ending reservation")
GPIO.add_event_detect(config.button_pin, GPIO.BOTH, callback = button_callback, bouncetime = 50)
def button_deactivated ():
#Deactivate button, so it cannot be pressed outside the running session
GPIO.remove_event_detect(config.button_pin)
print("button deactivated")
def button_callback (button_pin):
#Function dealing with putton press
i = 0
if GPIO.input (button_pin) == GPIO.HIGH:
print("Button released")
i = 0
LCD_display.backlight (False)
else:
print("Button Pressed")
while GPIO.input (button_pin) == GPIO.LOW:
LCD_display.backlight(True)
i += 1
#LCD_display.write (i*symbol,2)
button_hold_time = 1.5 #hold time in seconds
time.sleep (button_hold_time/18)
if i > 19:
button_deactivated ()
booking_stop_reservation()
LCD_display.display ('Session ended','by user',"","", clear=True, backlight_status=True)
config.ended_by_user = True
time.sleep (2)
#LCD_display.session_ended ()
| TomasSpusta/pipi_reader | pipi_upload/button.py | button.py | py | 1,563 | python | en | code | 0 | github-code | 13 |
73939383699 | import time
import torch
import esm
import tensorflow as tf
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def get_tensor_shape(tensor, device):
if device == 'cpu':
shape = tensor.numpy().shape
else:
shape = tensor.cpu().detach().numpy().shape
return shape
def read_fasta(fasta, unalign=False, delimiter='None'):
f = open(fasta, 'r')
seqs = []
titles = []
for line in f:
if line[0] == '>':
titles.append(line.replace('\n', '').replace('>', ''))
else:
if unalign:
seqs.append(line.replace('\n', '').replace('-', ''))
else:
seqs.append(line.replace('\n', ''))
if delimiter is not None:
labels = [title.split(delimiter)[0] for title in titles]
return seqs, titles, labels
else:
return seqs, titles, None
def select_device():
free, total = torch.cuda.mem_get_info(device='cuda:0')
perc0 = ((total - free) / total) * 100
free, total = torch.cuda.mem_get_info(device='cuda:1')
perc1 = ((total - free) / total) * 100
print('GPU 0: %.2f%%' % perc0)
print('GPU 1: %.2f%%' % perc1)
if perc0 > perc1:
return "cuda:1"
else: return "cuda:0"
def get_GPU_memory(device):
current_memory = torch.cuda.memory_allocated(device)
reserved_memory = torch.cuda.memory_reserved(device)
max_reserved_memory = torch.cuda.max_memory_reserved(device)
print('.........GPU Memory.........')
print(f"Current GPU memory usage: {current_memory / 1024 / 1024:.2f} MB")
print(f"Reserved GPU memory: {reserved_memory / 1024 / 1024:.2f} MB")
print(f"Max Reserved GPU memory: {max_reserved_memory / 1024 / 1024:.2f} MB")
free, total = torch.cuda.mem_get_info(device)
print(f'Free {free / 1024 / 1024:.3f} MB ')
print(f'Total {total / 1024 / 1024:.3f} MB ')
print('...........................')
def plot_tSNEs(embeddings, outname, labels = None,
metric='euclidean', perplexity=30, n_iter=1500):
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, verbose=1, learning_rate='auto', init='pca',
perplexity=perplexity, n_iter=n_iter, early_exaggeration=12,
metric=metric)
tsne_results = tsne.fit_transform(embeddings)
plt.figure(figsize = (7, 7))
if labels is not None:
df = pd.DataFrame(dict(xaxis=tsne_results[:, 0],
yaxis=tsne_results[:, 1], kind=labels))
g = sns.scatterplot(data=df, x='xaxis', y='yaxis',hue='kind')
h, l = g.get_legend_handles_labels()
n = len(set(df['kind'].values.tolist()))
plt.legend(h[0:n+1], l[0:n+1])
else:
df = pd.DataFrame(dict(xaxis=tsne_results[:, 0],
yaxis=tsne_results[:, 1]))
g = sns.scatterplot(data=df, x='xaxis', y='yaxis')
plt.tight_layout()
plt.savefig('%s_tsne_%s_%d.pdf' % (outname, metric, perplexity), dpi=300)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
requiredArguments = parser.add_argument_group('required arguments')
requiredArguments.add_argument('-f', '--fasta', help='Fasta file',
required=True)
requiredArguments.add_argument('-o', '--out', help='Outname',
required=True)
parser.add_argument('-b', '--batch', help='Batch size',
default=10)
parser.add_argument('-v', '--verbose', help='Verbose', action='store_true',
default=False)
parser.add_argument('-m', '--model', help='ESM model it can be 650M or 3B',
default='650M')
parser.add_argument('-d', '--deli', help='Delimiter of the fasta file labels',
default=None)
args = parser.parse_args()
verbose = args.verbose
fasta = args.fasta
batch_size = int(args.batch)
delimiter = args.deli
model = args.model
outname = args.out
# Select CPU or GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
device = select_device()
if verbose:
print(device)
get_GPU_memory(device=device)
seqs, titles, labels = read_fasta(fasta, unalign=True, delimiter=delimiter)
data = [(titles[i], seqs[i]) for i in range(len(seqs))]
# Load ESM-2 model
print('Loading ESM-2 %s model' % model)
if model == '650M':
model, alphabet = esm.pretrained.esm2_t33_650M_UR50D()
layers = 33
elif model == '3B':
model, alphabet = esm.pretrained.esm2_t36_3B_UR50D()
layers = 36
elif model == '15B':
model, alphabet = esm.pretrained.esm2_t48_15B_UR50D()
layers = 48
else:
raise ValueError('Model must be either 650M or 3B')
model.to(device)
batch_converter = alphabet.get_batch_converter()
model.eval() # disables dropout for deterministic results
if verbose:
print('After loading the model')
get_GPU_memory(device=device)
# Get the sequence embeddings
start_time = time.time()
print('Embedding the sequences')
seqs_embeddings = []
for i in range(0, len(data), batch_size):
print('%d / %d' % (i, len(data)))
batch = data[i:i + batch_size]
batch_labels, batch_strs, batch_tokens = batch_converter(batch)
batch_lens = (batch_tokens != alphabet.padding_idx).sum(1)
batch_tokens = batch_tokens.to(device)
if verbose:
print('After tokenizing')
get_GPU_memory(device=device)
#print(batch_labels)
#print(batch_strs)
#print(batch_tokens)
shape = get_tensor_shape(batch_tokens, device)
print(shape)
# Extract per-residue representations
with torch.no_grad():
results = model(batch_tokens, repr_layers=[layers], return_contacts=False)
token_representations = results["representations"][layers]
if verbose:
print('After getting the embedding')
get_GPU_memory(device=device)
#print(token_representations)
shape = get_tensor_shape(token_representations, device)
print(shape)
# Generate per-sequence representations via averaging
sequence_representations = []
for i, tokens_len in enumerate(batch_lens):
seq_token_representations = token_representations[i, 1 : tokens_len - 1].mean(0)
if 'cuda' in device:
seq_token_representations = seq_token_representations.to('cpu')
sequence_representations.append(seq_token_representations)
if verbose:
shape = get_tensor_shape(sequence_representations[0], device)
print(shape)
for sequence_representation in sequence_representations:
seqs_embeddings.append(sequence_representation)
end_time = time.time()
execution_time = end_time - start_time
with open('%s_embeddings.pickle' % outname, 'wb') as handle:
pickle.dump(seqs_embeddings, handle,
protocol=pickle.HIGHEST_PROTOCOL)
if delimiter is not None:
with open('%s_labels.pickle' % outname, 'wb') as handle:
pickle.dump(labels, handle,
protocol=pickle.HIGHEST_PROTOCOL)
print("---------Finished embedding the sequences---------")
print('Execution time: %.2f s' % execution_time)
#perps = [10,15,20,30,40]
#metrics = ['l2', 'braycurtis', 'correlation', 'l1', 'manhattan', 'euclidean', 'cityblock' , 'minkowski','sqeuclidean', 'cosine', 'minkowski', 'nan_euclidean', 'canberra']
| IFilella/PLMAnalysis | scripts/get_ESM_embedding.py | get_ESM_embedding.py | py | 7,764 | python | en | code | 0 | github-code | 13 |
43508380246 | from dsmpy import root_resources
from dsmpy.utils.cmtcatalog import read_catalog
from dsmpy.event import Event
from dsmpy.spc.stf import SourceTimeFunction
from dsmpy.spc.stfcatalog import STFCatalog
import numpy as np
import glob
import os
def get_stf(event):
"""Returns a source time function in time domain.
Args:
event (Event): event
Returns:
ndarray: source time function, normalized so
that its integral is 1. The shape is (2, npts).
"""
dir_stf = _parse_dir_name(event)
if dir_stf is None:
return None
file_name = glob.glob(dir_stf + '/fctoptsource*')[0]
stf = np.loadtxt(file_name, skiprows=2)
start, end = _get_start_end(stf)
stf = stf[start:end]
stf_integral = _compute_integral(stf)
stf[:, 1] /= stf_integral
stf[:, 0] -= stf[0, 0]
return stf
def get_duration(event):
stf = get_stf(event)
if stf is None:
return None
return stf[-1, 0]
def create_catalog():
cmt_catalog = read_catalog()
stf_catalog = dict()
for event in cmt_catalog:
dir_name = _parse_dir_name(event)
if dir_name:
duration = get_duration(event)
if stf is not None:
stf = SourceTimeFunction(
'triangle', duration/2.)
stf_catalog[event.event_id] = stf
path = os.path.join(root_resources, 'scardec.pkl')
STFCatalog.save(path, stf_catalog)
def _parse_dir_name(event):
dir_scardec = os.path.join(root_resources, 'scardec')
# event_id_post2005 = _convert_name_to_post2005(event)
partial_scardec_dir_name = _convert_name_to_partial_scardec(event)
dirs = glob.glob(dir_scardec + '/*' + partial_scardec_dir_name + '*')
parsed_dir = None
for dir_ in dirs:
ss = int(dir_.split(partial_scardec_dir_name)[1][:2])
if np.abs(ss - event.centroid_time.second) <= 15:
parsed_dir = dir_
return parsed_dir
def _convert_name_to_partial_scardec(event):
if event.centroid_time is not None:
return event.centroid_time.strftime('%Y%m%d_%H%M')
def _convert_name_to_post2005(event):
if event.centroid_time is not None:
return event.centroid_time.strftime('%Y%m%d%H%M')
else:
return None
def _get_start_end(stf):
i_peak = np.argmax(stf[:, 1])
i = i_peak
while (stf[i, 1] > 0) and (i < len(stf)):
i += 1
i_end = i + 1
i = i_peak
while (stf[i, 1] > 0) and (i >= 0):
i -= 1
i_start = i
return i_start, i_end
def _compute_integral(stf):
integral = np.trapz(stf[:,1], stf[:,0])
return integral
# if __name__ == '__main__':
# create_catalog()
| afeborgeaud/dsmpy | dsmpy/utils/scardec.py | scardec.py | py | 2,688 | python | en | code | 10 | github-code | 13 |
3948826070 | import nextcord, os
from nextcord.ext import commands
from cogs.utils import config
class ArchiveCommand(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@nextcord.slash_command(
name="archive",
description="Archive un salon de rando",
)
async def archive_cmd(self, ctx: nextcord.Interaction):
# Create local folder to store pictures in the channel
date = ctx.channel.name.split("-", 1)[1]
os.makedirs("images" + os.sep + date, exist_ok=True)
# Save all images to the local folder
async for log in ctx.channel.history(limit=999999):
if log.attachments:
for attachment in log.attachments:
await attachment.save("images" + os.sep + date + os.sep + attachment.filename)
# Get archived category and create it if it doesn't exist
category = nextcord.utils.get(ctx.guild.categories, id=config.get(ctx.guild_id, "category_archived"))
if not category:
category = await ctx.guild.create_category_channel(config.get(ctx.guild_id, "category_archived"))
# Move channel to archived category
await ctx.channel.edit(category=category)
# Remove next hike role from all users
participants = [x for x in ctx.guild.members if config.get(ctx.guild_id, "role_next_rando") in [role.name for role in x.roles]]
for participant in participants:
role = nextcord.utils.get(participant.guild.roles, id=config.get(ctx.guild_id, "role_next_rando"))
await participant.remove_roles(role)
await ctx.send("Salon rando archivé", delete_after=60)
def setup(bot):
bot.add_cog(ArchiveCommand(bot))
| QuentiumYT/RandoBot | cogs/archive.py | archive.py | py | 1,731 | python | en | code | 0 | github-code | 13 |
72190480017 | #!/usr/bin/python3
from gi.repository import Gtk
class MyBuilder(Gtk.Builder):
def __init__(self):
Gtk.Builder.__init__(self)
self.add_from_file("ui.glade")
handlers = {
"encrypt": self.encrypt,
"decrypt": self.decrypt
}
self.connect_signals(handlers)
self.window = self.get_object("main_window")
self.textview1 = self.get_object("textview1")
self.textview2 = self.get_object("textview2")
self.entry1 = self.get_object("entry1")
self.statusbar1 = self.get_object("statusbar1")
self.alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.window.connect("delete-event", Gtk.main_quit)
self.window.show_all()
def encrypt(self, widget):
encrypt_buffer = self.textview1.get_buffer()
start_iter = encrypt_buffer.get_start_iter()
end_iter = encrypt_buffer.get_end_iter()
plain_text = encrypt_buffer.get_text(start_iter, end_iter, False)
if len(plain_text) == 0:
self.set_message("There is no text to encrypt...", "error")
else:
self.clear_messages("error")
encrypted_text = ""
for char in plain_text:
index = self.alphabet.find(char.upper())
if index >= 0:
if char.isupper():
encrypted_text += self.alphabet[::-1][index]
else:
encrypted_text += self.alphabet[::-1][index].lower()
else:
encrypted_text += char
self.textview2.get_buffer().set_text(encrypted_text)
def decrypt(self, widget):
if not self.check_password():
self.set_message("Incorrect password..." , "error")
else:
decrypt_buffer = self.textview2.get_buffer()
start_iter = decrypt_buffer.get_start_iter()
end_iter = decrypt_buffer.get_end_iter()
cipher_text = decrypt_buffer.get_text(start_iter, end_iter, False)
if len(cipher_text) == 0:
self.set_message("There is no text to decipher...", "error")
else:
self.clear_messages("error")
decrypted_text = ""
for char in cipher_text:
index = self.alphabet[::-1].find(char.upper())
if index >= 0:
if char.isupper():
decrypted_text += self.alphabet[index]
else:
decrypted_text += self.alphabet[index].lower()
else:
decrypted_text += char
self.textview1.get_buffer().set_text(decrypted_text)
def check_password(self):
return True if self.entry1.get_buffer().get_text() == "secret" else False
def set_message(self, text, context):
context_id = self.statusbar1.get_context_id(context)
self.statusbar1.push(context_id, text)
def clear_messages(self, context):
context_id = self.statusbar1.get_context_id(context)
self.statusbar1.remove_all(context_id)
if __name__ == "__main__":
program = MyBuilder()
Gtk.main()
| Akus93/AtBash | src/atbash.py | atbash.py | py | 3,244 | python | en | code | 0 | github-code | 13 |
32197425206 | def insertion_sort (arr):
for i in range(1,len(arr)):
j = i
while j > 0 and arr[j] < arr[j-1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
j = j - 1
return arr
def main():
arr1 = [154, 245, 568, 324, 654, 324]
arr2 = ['Mike', 'Bob', 'Sally', 'Jil', 'Jan']
print(insertion_sort (arr1))
print(insertion_sort (arr2))
main() | pa-ak/python-algorithm-design-manual-skiena | 1.1_insertion_sort.py | 1.1_insertion_sort.py | py | 378 | python | en | code | 0 | github-code | 13 |
8289307857 | from __future__ import unicode_literals
from django.db import migrations, models
import taggit.managers
from django.conf import settings
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(
verbose_name='ID', primary_key=True,
auto_created=True, serialize=False)),
('name', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
('destination', models.URLField(
max_length=2000, unique=True)),
('is_external', models.BooleanField(default=False)),
('added', models.DateTimeField(null=True, auto_now_add=True)),
('categories', taggit.managers.TaggableManager(
blank=True,
verbose_name='Tags',
to='taggit.Tag',
help_text='A comma-separated list of tags.',
through='taggit.TaggedItem',
)),
('owner', models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
)),
],
),
migrations.CreateModel(
name='LinkUsage',
fields=[
('id', models.AutoField(
verbose_name='ID', primary_key=True,
auto_created=True, serialize=False)),
('start', models.DateTimeField(auto_now_add=True)),
('link', models.ForeignKey(
to='links.Link', related_name='usage')),
('user', models.ForeignKey(
to=settings.AUTH_USER_MODEL,
related_name='usage')),
],
),
]
| dstl/lighthouse | apps/links/migrations/0001_initial.py | 0001_initial.py | py | 2,085 | python | en | code | 10 | github-code | 13 |
1194263293 | import sys
import json
import uuid
import http.client
import time
import os.path
import datetime
from copy import deepcopy
PATH=os.path.dirname(os.path.realpath(__file__))
class TAC_API(object):
def __init__(self,api="app.alcww.gumi.sg",device_id=str(uuid.uuid4()),secret_key=str(uuid.uuid4()),idfa=str(uuid.uuid4()),idfv=str(uuid.uuid4()),cuid='',print_req=False, debug=False):
object.__init__(self)
self.api=api
self.device_id=device_id
self.secret_key=secret_key
self.idfa=idfa
self.idfv=idfv
self.name=''
self.ticket=0
self.access_token=''
self.cuid=cuid
self.fuid=''
self.print_req=print_req #print request url
self.ap=0
self.debug=debug
if debug:
os.makedirs('debug',exist_ok=True)
###Note
# either load account data via setting the variables yourself,
# or use self.load_gu3c
# or use create_account
# JP logins are a bit tricky, they require the idfa and idfv from the login,
# they aren't saved in the gu3c.dat and have to be saved somewhere else
def get(self,string):
return getattr(self, string,False)
def app_start(self):
if not self.api=='alchemist.gu3.jp':
res=self.req_chk_player()
if res['result'] != 1:
input('Problem during player check:\t%s'%res)
self.req_bundle()
self.req_product()
self.req_achieve_auth()
login=self.req_login()
self.req_login_param()
#check if running quest
if 'btlid' in login['player'] and login['player']['btlid']:
print('account still in a quest, please wait ~15s, quest has to be finished first')
battle=self.resume_battle(login['player']['btlid'])
time.sleep(10)
self.end_battle(req_battle=battle)
time.sleep(5)
self.req_home()
def create_account(self,PRINT=False,debug=False):
self.device_id=''
self.device_id=self.req_register()
self.req_achieve_auth()
self.req_login()
login=(self.req_playnew(debug=debug))
self.name=login["player"]["name"]
if PRINT:
self.print_login(login)
def req_accesstoken(self,raw=False):
self.ticket=0
body = {
"access_token": "",
"param": {
"device_id": self.device_id,
"secret_key": self.secret_key,
"idfa": self.idfa, # Google advertising ID
"idfv": self.idfv,
"udid":""
}
}
res = self.api_request("/gauth/accesstoken", body,'POST',True)
if 'access_token' in res['body']:
self.access_token=res['body']['access_token']
else:
print('Failed receiving access_token',res)
return res if raw else res['body']
def req_chk_player(self,raw=False):
body = {
"access_token": "",
"param": {
"device_id": self.device_id,
"secret_key": self.secret_key,
"idfa": self.idfa, # Google advertising ID
}
}
res_body = self.api_request("/player/chkplayer", body,'POST', True)
try:
res_body['body']
except:
print('error: failed to retrieve chklayer')
res_body=self.req_chk_player(True)
return res_body if raw else res_body['body']
def req_achieve_auth(self):
return(self.api_connect('/achieve/auth',{},'GET'))
def req_register(self):
body={"access_token": "",
"param": {
"udid": "",
"secret_key": self.secret_key,
"idfv": self.idfv,
"idfa": self.idfa
}
}
res_body=self.api_connect('/gauth/register',body,'POST')
try:
ret = res_body['body']['device_id']
except:
print('error: failed to register')
print(res_body)
input('/')
ret=self.req_register()
return ret
def req_playnew(self,raw=False,debug=False):
body={
"param": {
"permanent_id": self.idfa,
}
}
if debug:
body['param']["debug"]=1
return self.api_request('/playnew',body,'POST',raw)
def req_home(self,raw=False):
return self.api_request('/home',{"param": {"is_multi_push": 1}},raw=raw)
def req_bundle(self,raw=False):
return self.api_request('/bundle',raw=raw)
def req_product(self,raw=False):
return self.api_request('/product',raw=raw)
def req_login(self,raw=False):
if not self.cuid:
login=self.api_request("/login",{"param":{"device":"HUAWEI HUAWEI MLA-L12","dlc":"apvr"}},raw=True)
if login['body']:
self.cuid=login['body']['player']['cuid']
self.fuid=login['body']['player']['fuid']
return login if raw else login['body']
return self.api_request("/login",{"param":{"device":"HUAWEI HUAWEI MLA-L12","dlc":"apvr"}},raw=raw)
def req_login_param(self,relogin=0,raw=False):
res = self.api_request('/login/param',{"param": {"relogin": int(relogin)}},raw=True)
for key,val in res['body'].items():
if type(val)==list and len(val) and type(val[0])==dict:
subkey='iname' if 'iname' in val[0] else 'i' if 'i' in val[0] else False
if key:
val={item[subkey]:item for item in val}
self.__setattr__(key,val)
return res if raw else res['body']
#### MAIL ##########################################################################
def req_mail(self,page=1,period=1,read=0,raw=False):
body = {
"param": {
"page": page,
"isPeriod": period,
"isRead": int(read)
}
}
res = self.api_request('/mail',body,raw=raw)
return res if raw else res['mails']
def read_mail(self,mailIds,page=1,period=1,raw=False):
if type(mailIds)!=list:
mailIds=[int(mailIds)]
body = {
"param": {
"mailids": mailIds,
"page": page,
"period": period
}
}
return self.api_request('/mail/read',body,raw=raw)
#### FRIEND ##########################################################################
def req_friend_fuid(self,fuid,raw=False):
ret=self.api_request('/friend/find',{'param':{'fuid':fuid}},'POST',True)
try:
friend=ret['body']['friends'][0]
return ret if raw else friend
except:
return ret
def req_friend_name(self,name,raw=False):
ret = self.api_request('/friend/search',{'param':{'name':name}},'POST',True)
try:
friend=ret['body']['friends'][0]
return ret if raw else friend
except:
return ret
#### BATTLE ##############################################################################
def req_quest_runs(self,raw=False):
res = self.req_login_param(raw=raw)
return res if raw else res['quests']
def req_used_units(self,raw=False):
return self.api_request('/btl/usedunit/multiple',{ "param": {"inames": ["quest", "arena", "tower_match"]}},raw=raw)
# returns array of {iname:,ranking:[unit_iname,job_iname,num],is_ready: just tells if can be used}
### Single Player
def req_quests(self,raw=False):
res = self.api_request('/btl/com',{"param": {"event": 1}},raw=raw)
return res if raw else res['quests']
def resume_battle(self,btlid,raw=False):
return self.api_request('/btl/com/resume',{'param':{'btlid':btlid}},raw=raw)
def req_battle(self,quest,partyid=0,fuid="",raw=False):
body={
"param": {
"iname": quest,
"partyid": partyid,
"req_at": UNIX_timestamp(),
"btlparam": {
"help": {
"fuid": fuid
}
},
"location": {
"lat": 0,
"lng": 0
}
}
}
ret = self.api_request('/btl/com/req',body,raw=True)
if ret['stat']!=0: #not enough AP or another error
print(ret)
return ret
else:
return ret if raw else ret['body']
def end_battle(self,btlid=False,beats=[],result='win',missions=[],trophies=False,bingos=False,raw=False,req_battle=False):
if req_battle:
btlid=req_battle['btlid']
empty={'gold':0,'secret':0}
beats=[1 if drop!=empty else 0 for drop in req_battle['btlinfo']['drops']]
if not btlid:
input('btlid is missing')
body={
"param": {
"btlid": btlid,
"btlendparam": {
"time": 0,
"result": result,
"beats": beats,
"steals": {
"items": [0]*len(beats),
"golds": [0]*len(beats)
},
"missions": missions,
"inputs": []
}
}
}
if trophies:
body["trophyprogs"]=self.Trophyprogs(trophies)
if bingos:
body["bingoprogs"]=self.Bingoprogs(bingos)
return self.api_request('btl/com/end',body,raw=raw)
### Multi Player
def req_multi_check(self,raw=False):
return self.api_request('/btl/multi/check',raw=raw) #device_id
def req_multi_room(self,quest,raw=False):
return self.api_request('btl/room',{"param": {"iname": quest}},raw=raw) #empty arry
def req_multi_room_make(self,quest,comment="",pwd="0",private=0,limit=0,unitlv=0,clear=0,raw=False):
body = {
"param": {
"iname": quest,
"comment": comment,
"pwd": pwd,
"private": int(private),
"req_at": UNIX_timestamp(),
"limit": int(limit),
"unitlv": unitlv,
"clear": int(clear)
}
}
return self.api_request('/btl/room/make',body,raw=raw) #roomid,app_id,token
def req_multi_battle(self,quest,token,partyid=1,host=1,plid=1,seat=1,raw=False):
body = {
"param": {
"iname": quest,
"partyid": partyid,
"token": token,
"host": str(host),
"plid": str(plid),
"seat": str(seat),
"btlparam": {
"help": {
"fuid": ""
}
},
"location": {
"lat": 0,
"lng": 0
}
}
}
return self.api_request('btl/multi/req',body,raw=raw)
def end_multi_battle(self,btlid,token,beats,result="win",fuids=[],raw=False):
body = {
"param": {
"btlid": btlid,
"btlendparam": {
"time": 0,
"result": result,
"beats": beats,
"steals": {
"items": [0]*len(beats),
"golds": [0]*len(beats)
},
"missions": [],
"inputs": [],
"token": token
},
"fuids": fuids
}
}
return self.api_request('btl/multi/end',body,raw=raw)
### Arena
def req_arena(self,raw=False):
return self.api_request("/btl/colo",raw=raw)
def req_arena_ranking(self,raw=False):
res_body = self.api_request("/btl/colo/ranking/world",{},'POST',True)
try:
ret = res_body['body']['coloenemies']
except:
print('error: failed to retrieve arena')
print(res_body)
raw=True
return res_body if raw else ret
def exec_arena(self,enemy_fuid,opp_rank,my_rank,result='win',beats=[1,1,1],trophies=False,bingos=False,raw=False):
body={
"param": {
"fuid": enemy_fuid,
"opp_rank": opp_rank,
"my_rank": my_rank,
"btlendparam": {
"time": 0,
"result": result,
"beats": beats,
"steals": {
"items": [0, 0, 0],
"golds": [0, 0, 0]
},
"missions": [],
"inputs": []
}
}
}
if trophies:
body["trophyprogs"]=self.Trophyprogs(trophies)
if bingos:
body["bingoprogs"]=self.Bingoprogs(bingos)
return self.api_request('/btl/colo/exec',body,raw=raw)
#### CHAT ###############################################################################
def req_chat(self,channel=1,limit=30,last_msg_id=0,raw=False):
body={
"param": {
"start_id": 0,
"channel": channel,
"limit": limit,
"exclude_id": last_msg_id,
"is_multi_push": 1
}
}
res = self.api_request('/chat/message',body,raw=raw)
return res if raw else res['messages']
def req_chat_room(self,roomtoken=1,limit=30,last_msg_id=0,raw=False):
body={
"param": {
"start_id": 0,
"roomtoken": roomtoken,
"limit": limit,
"exclude_id": last_msg_id,
"is_multi_push": 1
}
}
res = self.api_request('/chat/room/message',body,raw=raw)
return res if raw else res['messages']
#### SHOPS ################################################################################
def req_shopslist(self,typ='',items=True,raw=False): #types: limited,event, no type
res_body = self.api_request('/shop%s/shoplist'%('/%s'%typ if typ else ''),raw=raw)
try:
ret=res_body['body']['shops']
if items:
for shop in res_body["body"]["shops"]:
shop["shopitems"]= self.req_shop(shop["gname"],typ,raw=True)
if not raw:
shop['shopitems']=shop['shopitems']["body"]["shopitems"]
except:
print('error: failed to request shops')
print(res_body)
raw=True
return res_body if raw else ret
def req_shop(self,shopName,typ='',raw=False):
return self.api_request('/shop%s'%('/%s'%typ if typ else ''),{"param": {"shopName": shopName}},'POST',raw)
def update_shop(self,shopName,typ='',raw=False):
return self.api_request('/shop%s/update'%('/%s'%typ if typ else ''),{"param": {"iname": shopName}},raw=raw)
def buy_shop(self,shopName,typ='',id=1,num=1,raw=False):
body={
"param": {
"iname": shopName,
"id": id,
"buynum": num
}
}
return self.api_request('/shop%s/buy'%('/%s'%typ if typ else ''),body,raw=raw)
#### GACHA #################################################################################
def req_gacha(self,raw=False):
res=self.api_request("/gacha",{},'POST',raw)
return res if raw else res['gachas']
def exec_gacha(self,GachaID,raw=False):
return self.api_request("/gacha/exec",{"param":{"gachaid":GachaID,"free":0}},'POST',raw)
#### TROPHIES ##############################################################################
def req_trophy(self,raw=False):
res = self.req_login_param(raw=raw)
return res if raw else res['trophyprogs']
def exec_trophy(self,trophies,ymd=False, raw=False):
if type(trophies) == dict:
trophies = [trophies]
body = {
"param":{
"trophyprogs":self.Trophyprogs(trophies,ymd)
}
}
return self.api_request("/trophy/exec",body,raw=raw)
#### CHALLANGE BOARD/BINGOS ####################################################################
def req_bingo(self,raw=False):
res = self.req_login_param(raw=raw)
return res if raw else res['bingoprogs']
def exec_bingo(self,bingos,ymd=False,raw=False):
if type(bingos) == dict:
bingos = [bingos]
#multiple problems -> have to be solved in waves
# 1. some bingos require others to be completed first - flg_quests
# singular bingos have to be cleared for the board reward - parent_iname
#sorting stuff into category - parent - child
mbingos={}
for bingo in bingos:
iname=bingo['iname']
category=bingo['category'] if 'category' in bingo else ''
parent=bingo['parent_iname'] if 'parent_iname' in bingo else ''
if category not in mbingos:
mbingos[category]={}
if parent: #is child
if parent not in mbingos[category]:
mbingos[category][parent]={'childs':{}}
mbingos[category][parent]['childs'][iname]=bingo
else: #is parent
if iname not in mbingos[category]:
mbingos[category][iname]={'childs':{}}
mbingos[category][iname].update(bingo)
returns=[]
cleared=[]
while mbingos: #loop stuff and destroy mbingos during it until is is destored
sbingos=[]
#generate send list
del_category=[]
for category,citems in mbingos.items():
del_parent=[]
for piname, parent in citems.items():
exec=True
#check if it can be run
if 'flg_quests' in parent:
for flag in parent['flg_quests']:
if flag not in cleared:
exec=False
break
if not exec:
continue
#check if board can be cleared or childs have to be cleared first
if 'childs' in parent:
#execute childs
sbingos+=[child for ciname,child in parent['childs'].items()]
del parent['childs']
else:
if parent:
sbingos.append(parent)
cleared.append(piname)
del_parent.append(piname)
#cleanup
if del_parent:
for piname in del_parent:
del citems[piname]
if not citems: #empty
del_category.append(category)
if del_category:
for ciname in del_category:
del mbingos[ciname]
#request childs first
body = {
"param":{
"bingoprogs":self.Bingoprogs(sbingos,ymd)
}
}
returns.append(self.api_request("/bingo/exec",body,raw=raw))
return returns
#### Concept Cards #############################################################################
def req_cards(self,raw=False):
return self.api_request('/unit/concept',{"param":{"last_iid": 0}},raw=raw)
#### Unit #############################
def unit_set_skin(self,jobIDs,skin,raw=False):
body={
'param':{
'sets':[
{
'iid': jobid,
'iname': skin
}
for jobid in jobIDs
]
}
}
return self.api_request('/unit/skin/set',body=body,raw=raw)
#### Register Account Linking ##################################################################
def link_account(self,password='tagatame'):
if not self.cuid:
self.req_login()
body={
"ticket": "0",
"access_token": "",
"email": self.cuid,
"password": password,
"disable_validation_email": True,
"device_id": self.device_id,
"secret_key": self.secret_key,
"udid": ""
}
ret=self.api_connect('/auth/email/register',body,ignoreStat=True)
if ret["is_succeeded"]:
return True
else:
return False
def req_linked_account(self,cuid,password):
body={
"access_token": "",
"email": cuid,
"password": password,
"idfv": self.idfv,
"udid": ""
}
ret=self.api_connect('/auth/email/device',body,ignoreStat=True, no_access_token=True)
if 'device_id' in ret:
self.device_id=ret['device_id']
self.secret_key=ret['secret_key']
print('Login successfull, emulating login')
self.app_start()
else:
print('Login failed')
#### CONNECTION ###########################################################
def api_request(self,url,body={},request='POST',raw=False,retry=False):
if url[0]!= '/':
url='/%s'%url
res_body=self.api_connect(url,body,request)
try:
ret = res_body['body']
if 'player' in ret and 'stamina' in ret['player']:
self.ap=ret['player']['stamina']['pt']
except Exception as e:
print('error: failed to retrieve %s'%url)
print(e)
print(res_body)
if retry:
ret=self.api_request(url,body,request,raw,retry=False)
else:
raw=True
return res_body if raw else ret
def api_connect(self,url, body={},request="POST",api=False, ignoreStat=False, no_access_token=False):
#print(self.access_token)
if not api:
api=self.api
body['ticket']=self.ticket
#create headers
RID=str(uuid.uuid4()).replace('-','')
headers={
'X-GUMI-DEVICE-PLATFORM': 'android',
'X-GUMI-DEVICE-OS': 'android',
'X-Gumi-Game-Environment': 'sg_production',
"X-GUMI-TRANSACTION": RID,
'X-GUMI-REQUEST-ID': RID,
'X-GUMI-CLIENT': 'gscc ver.0.1',
'X-Gumi-User-Agent': json.dumps({
"device_model":"HUAWEI HUAWEI MLA-L12",
"device_vendor":"<unknown>",
"os_info":"Android OS 4.4.2 / API-19 (HUAWEIMLA-L12/381180418)",
"cpu_info":"ARMv7 VFPv3 NEON VMH","memory_size":"1.006GB"
}),
"User-Agent": "Dalvik/1.6.0 (Linux; U; Android 4.4.2; HUAWEI MLA-L12 Build/HUAWEIMLA-L12)",
"X-Unity-Version": "5.3.6p1",
"Content-Type": "application/json; charset=utf-8",
"Host": api,
"Connection": "Keep-Alive",
"Accept-Encoding": "gzip",
"Content-Length": len(json.dumps(body))
}
if url!="/gauth/accesstoken" and url!='/gauth/register' and not no_access_token:
if self.access_token == "":
self.access_token = self.req_accesstoken()['access_token']
headers["Authorization"] = "gauth " + self.access_token
if self.print_req:
print(api+url)
try:
con = http.client.HTTPSConnection(api)
con.connect()
con.request(request, url, json.dumps(body), headers)
res_body = con.getresponse().read()
#print(res_body)
con.close()
except http.client.RemoteDisconnected:
return self.api_connect(url, body)
try:
json_res= json.loads(res_body)
if self.debug:
with open(os.path.join('debug','{:%y%m%d-%H-%M-%S}{}.json'.format(datetime.datetime.utcnow(),url.replace('/','_'))),'wb') as f:
f.write(json.dumps(json_res, indent=4, ensure_ascii=False).encode('utf8'))
if not ignoreStat and json_res['stat'] in [5002,5003]:
print('Error 5002 ~ have to login again')
self.access_token=""
json_res = self.api_connect(url, body)
self.ticket+=1
return(json_res)
except Exception as e:
print(e)
print(url)
print(res_body)
if self.debug:
with open(os.path.join('debug','{:%y%m%d-%H-%M-%S}{}.json'.format(datetime.datetime.utcnow(),url.replace('/','_'))),'wb') as f:
f.write(res_body.encode('utf8'))
if '504 Gateway Time-out' in str(res_body):
print('Waiting 30s, then trying it again')
time.sleep(30)
elif str(e)=='maximum recursion depth exceeded while calling a Python object':
raise ValueError('max recursion')
else:
input('Unknown Error')
#return self.api_connect(url, body)
raise RecursionError('-')
def print_login(self,json_res=False,ret=False):
if not json_res:
json_res=self.req_login()
print("--------------------------------------------")
print("Name:", json_res["player"]["name"])
print("P. Lv:", json_res["player"]["lv"])
print("User Code:", json_res["player"]["cuid"])
print("Friend ID:", json_res["player"]["fuid"])
print("Created at:", json_res["player"]["created_at"])
print("Exp:", json_res["player"]["exp"])
print("Stamina:", json_res["player"]["stamina"]["pt"], "/", json_res["player"]["stamina"]["max"])
print("Zeni:", json_res["player"]["gold"])
print("Gems:", json_res["player"]["coin"]["paid"], "Paid,", json_res["player"]["coin"]["com"], "Shared,",json_res["player"]["coin"]["free"], "Free")
print("--------------------")
if ret:
return json_res
#### ETC ################################################
def Trophyprogs(self,trophies,ymd=False):
if type(trophies)==dict:
if 'iname' in trophies[0]:
trophies=[trophies]
else:
trophies=[item for key,item in trophies.items() if 'iname' in item]
#already formated?
if 'pts' in trophies[0]:
skeys=['iname','pts','ymd','rewarded_at']
return [{skey:item[skey] for skey in skeys if skey in item} for item in trophies] #clean-up
timestamp=self.ymd_timestamp(ymd)
return (
[
{
"iname": trophy['iname'],
"pts": [trophy['ival']] if 'ival' in trophy else [1],
"ymd":timestamp,
"rewarded_at":timestamp
}
for trophy in trophies
]
)
def Bingoprogs(self,bingos,ymd=False):
if type(bingos)==dict:
bingos=[bingos]
timestamp=self.ymd_timestamp(ymd)
return (
[
{
"iname": bingo['iname'],
"parent": bingo['parent_iname'] if 'parent_iname' in bingo else '',
"pts": [bingo['ival']] if 'ival' in bingo else [1],
"ymd":timestamp,
"rewarded_at":timestamp
}
for bingo in bingos
if bingo
]
)
#### timestamps ######################################
def ymd_timestamp(self,date=False,deltaH=8):
if 'app.alcww.gumi.sg' in self.api:
deltaH=8
elif 'alchemist.gu3.jp' in self.api:
deltaH=14
return ymd_timestamp(date=date,deltaH=deltaH)
#### decoding #########################################
def load_gu3c(self,path=os.path.join(PATH,"gu3c.dat")):
if not os.path.exists(path):
print("", path, " was not found.")
return
with open(path, "rb") as f:
content = bytearray(f.read())
print("Decrypting gu3c.dat...")
(self.device_id, self.secret_key) = decrypt(content).decode("utf-8").split(" ")
def save_gu3c(self,playerName=False,path=PATH):
if not playerName:
playerName=self.name
print("Saving gu3c.dat...")
print("Device_ID: ",self.device_id)
print("Secret_Key: ",self.secret_key)
#encryption
src = self.device_id + " " + self.secret_key
msg = bytearray(src,"utf8")
enc = bytes(encrypt(msg))
os.makedirs(path,exist_ok=True)
with open(os.path.join(path,'%s.gu3c.dat'%playerName), "wb") as f:
f.write(enc)
key = bytearray([0x08, 0x38, 0x55, 0x64, 0x17, 0xa0, 0x78, 0x4c, 0xf5, 0x97, 0x86, 0x4b, 0x16, 0xac, 0x9d, 0xd9,
0xaa, 0x1c, 0x81, 0x7a, 0x27, 0xae, 0x3f, 0x2c, 0xa1, 0x95, 0x80, 0xf4, 0xc8, 0x97, 0xd8, 0x6d,
0x98, 0x2c, 0x12, 0x5b, 0x88, 0x74, 0x13, 0xbe, 0xe6, 0x84, 0xda, 0xac, 0x14, 0x19, 0xf3, 0x38,
0x8a, 0xe2, 0x9d, 0x5d, 0xa0, 0x5c, 0x03, 0x71, 0xf6, 0x5b, 0x56, 0xb6, 0x48, 0x14, 0xe7, 0x16,
0xea, 0x44, 0x3b, 0xd0, 0xd8, 0x20, 0xd5, 0x65, 0xe9, 0xbe, 0xf9, 0xb2, 0xa8, 0x49, 0x1e, 0x80,
0x1e, 0xd8, 0x80, 0xf1, 0x3f, 0x71, 0x5f, 0x79, 0x92, 0xe3, 0xef, 0xb8, 0xbe, 0xe9, 0x63, 0x5a,
0x1e, 0xcf, 0x24, 0x5b, 0x87, 0x6b, 0xa2, 0xdc, 0x13, 0x3d, 0x7b, 0xfe, 0x19, 0x60, 0x53, 0xcf,
0x13, 0x03, 0x45, 0x4f, 0x0f, 0x84, 0xc8, 0x87, 0xac, 0x2a, 0xd5, 0xbc, 0x70, 0xbd, 0xfd, 0x66])
def encrypt(bytes_to_encrypt):
result = bytearray(len(bytes_to_encrypt))
for i in range(0, len(bytes_to_encrypt)):
previous = 0x99 if i == 0 else bytes_to_encrypt[i - 1]
result[i] = previous ^ bytes_to_encrypt[i] ^ key[i & 0x7f]
return result
def decrypt(encrypted_bytes):
result = bytearray(len(encrypted_bytes))
for i in range(0, len(encrypted_bytes)):
previous = 0x99 if i == 0 else result[i - 1]
result[i] = previous ^ encrypted_bytes[i] ^ key[i & 0x7f]
return result
#timestamps
def ymd_timestamp(date=False,deltaH=0):
if not date:
return '{:%y%m%d}'.format(datetime.datetime.utcnow() - datetime.timedelta(hours=deltaH))
else:
return '{:%y%m%d}'.format(datetime.date(*date) - datetime.timedelta(hours=deltaH))
def UNIX_timestamp(date=False):
if not date:
return round((datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).total_seconds())
else:
return round((datetime.date(*date) - datetime.datetime(1970,1,1)).total_seconds()) | K0lb3/Ouroboros | utils/The_Alchemist_Code/TAC_API.py | TAC_API.py | py | 24,934 | python | en | code | 5 | github-code | 13 |
71536154259 | import requests
from bs4 import BeautifulSoup
from money import Money
link_base = 'http://www.portaltransparencia.gov.br/copa2014/api/rest/empreendimento'
def run(link):
if link is None:
return
soup = get_html(link)
if soup is None:
return
empreendimentos = soup.find_all('copa:empreendimento')
total_previsto = 0
total_gasto = 0
for empreendimento in empreendimentos:
if empreendimento.ativo.text == 'true':
try:
valor_previsto = float(empreendimento.valorTotalPrevisto.text)
except AttributeError:
valor_previsto = 0
try:
valor_executado = float(empreendimento.valorPercentualExecucaoFisica.text) * valor_previsto / 100
except AttributeError:
valor_executado = 0
total_previsto += valor_previsto
total_gasto += valor_executado
print('Total previsto: {} - Total gasto: {}'.format(Money(total_previsto, 'BRL').format('pt_BR'), Money(total_gasto, 'BRL').format('pt_BR')))
def get_html(link):
response = requests.get(link)
return BeautifulSoup(response.text, 'xml')
if __name__ == "__main__":
run(link_base) | jaap67/TESI | question_05.py | question_05.py | py | 1,222 | python | pt | code | 0 | github-code | 13 |
70675801938 | # defines the default edit actions for linux
from talon import Module, Context, actions, clip
# ctx = Context()
# ctx.matches = r"""
# os: mac
# and app: chrome
# """
# @ctx.action_class("user")
mod = Module()
@mod.action_class
class seach_actions:
def find_here(search: str):
"""Invoke chrome find, optionally with a search string"""
actions.key("esc")
actions.key("cmd-f")
paste_clipboard = search in "clipboard" or search in "clip board"
if paste_clipboard:
#clipboard_val = actions.edit.paste() # use this if you want to invoke "cmd-v"
clipboard_val = actions.clip.text()
print(f"clip val = {clipboard_val}")
actions.insert(clipboard_val)
actions.key("enter")
if search not in "nope" and not paste_clipboard:
actions.key("cmd-f")
actions.insert(search)
actions.key("enter") | jswett77/swett_talon | apps/chrome/chrome.py | chrome.py | py | 927 | python | en | code | 0 | github-code | 13 |
22256164260 | """add-name
Revision ID: a4e4df6ced25
Revises: bd4ac7853e8e
Create Date: 2022-02-05 21:26:34.241881
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a4e4df6ced25'
down_revision = 'bd4ac7853e8e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('name', sa.String(), nullable=False))
op.create_unique_constraint(None, 'users', ['name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='unique')
op.drop_column('users', 'name')
# ### end Alembic commands ###
| atulbhai1/fastapimessagingapp | alembic/versions/a4e4df6ced25_add_name.py | a4e4df6ced25_add_name.py | py | 752 | python | en | code | 0 | github-code | 13 |
39438606511 | from typing import List, Union
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QTableWidget, QPushButton, QAbstractItemView, QHeaderView, QHBoxLayout, QWidget, QAbstractSpinBox
class CustomRowField:
"""
Class for custom row fields
:param unique: unique name of column
:param label: label of column
:param tooltip: (optional) tooltip of column
:param synced: (optional) is this value synced with other values
:param limit: (optional) limit column to maximum value or False
:param reset_neg: (optional) resets to default value if negative
:param enabled: (optional) if field is enabled
"""
uniqueList = []
uniqueIdLast = 0
def __init__(self, unique: str, label: str, tooltip: str = '', synced: bool = True,
limit: Union[bool, int, float] = False, reset_neg: bool = False, enabled: bool = True):
self.unique = unique
self.label = label
self.tooltip = tooltip
self.synced = synced
self.limit = limit
self.reset_neg = reset_neg
self.enabled = enabled
self.unique_id = self.uniqueIdLast
CustomRowField.uniqueIdLast += 1
class CustomRow(QObject):
"""
Class for custom QObject row
"""
contentChanged = pyqtSignal()
def __init__(self):
super().__init__()
self.remove = QPushButton(QIcon(':/icons/delete.png'), '')
self.remove.setFixedSize(30, 30)
# Center the remove button by surrounding it with two stretches inside a horizontal layout
self.remove_button_parent = QWidget()
self.remove_button_parent_hl = QHBoxLayout()
self.remove_button_parent_hl.setSpacing(0)
self.remove_button_parent_hl.setContentsMargins(0, 0, 0, 0)
self.remove_button_parent_hl.addStretch(1)
self.remove_button_parent_hl.addWidget(self.remove)
self.remove_button_parent_hl.addStretch(1)
self.remove_button_parent.setLayout(self.remove_button_parent_hl)
self.row_widgets = [self.remove_button_parent]
def clearSpinboxButtons(self):
"""Clear QSpinBox buttons"""
for widget in self.row_widgets:
if isinstance(widget, QAbstractSpinBox):
widget.setButtonSymbols(QAbstractSpinBox.NoButtons)
def selectRowInput(self):
"""Select row as input"""
raise NotImplementedError('Must override selectRowInput()')
def containsData(self) -> bool:
"""Check if row contains data"""
return True
def getRowData(self):
"""Returns data of row"""
raise NotImplementedError('Must override getRowData()')
def getArguments(self):
"""Returns <Argument> container"""
raise NotImplementedError('Must overwrite getArguments()')
class CustomTable(QTableWidget):
"""
Class for custom QTableWidget
:param row_count: number of rows
:param header_labels: list of header labels
:param parent: parent widget
"""
settingsChanged = pyqtSignal(dict)
contentChanged = pyqtSignal()
def __init__(self, row_count: int, header_labels, parent):
self.header_labels = [''] + header_labels
super().__init__(row_count, len(self.header_labels), parent)
self.rows: List[CustomRow] = []
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.NoSelection)
self.verticalHeader().setVisible(False)
self.setHorizontalHeaderLabels(self.header_labels)
self.horizontal_header = self.horizontalHeader()
self.horizontal_header.setSectionResizeMode(QHeaderView.ResizeToContents)
self.horizontal_header.setSectionResizeMode(0, QHeaderView.Fixed)
self.horizontal_header.setMinimumSectionSize(40)
self.add_button = QPushButton(QIcon(':/icons/add.png'), '')
self.add_button.setFixedSize(30, 30)
self.add_button.clicked.connect(lambda: self.addRow())
self.createAddButton()
def createAddButton(self):
"""Creates add button"""
self.insertRow(0)
self.setCellWidget(0, 0, self.add_button)
self.setSpan(0, 0, 1, len(self.header_labels))
def createRow(self, row_idx: int):
"""
Create row with index
:param row_idx: index of row
"""
raise NotImplementedError('Must override createRow()')
def addRow(self, update: bool = True, connect: bool = True):
"""
Add a new row
:param update: if updates should happen
:param connect: if row should be connected
"""
row_idx = self.rowCount() - 1 # add it before the '+'-button-row
row = self.createRow(row_idx)
row.remove.clicked.connect(lambda: self.removeCustomRow(self.rows.index(row)))
self.insertRow(row_idx)
self.rows.append(row)
self.updateCellWidgets(row_idx)
if update:
self.resizeTable()
self.updateRemoveButtons()
row.selectRowInput()
self.contentChanged.emit()
if connect:
row.contentChanged.connect(self.contentChanged.emit)
return row
def connectRows(self):
"""Connect all rows"""
for row in self.rows:
row.contentChanged.connect(self.contentChanged.emit)
def resizeTable(self):
"""Resizes the table"""
self.resizeColumnsToContents()
self.resizeRowsToContents()
# first column needs to be resized as well
if self.rowCount() > 1:
first_width = self.cellWidget(0, 0).sizeHint().width()
self.setColumnWidth(0, first_width)
def removeCustomRow(self, row_idx: int, update: bool = True):
"""
Remove row with index
:param row_idx: index of row
:param update: if update should happen
"""
self.removeRow(row_idx)
del self.rows[row_idx]
if update:
self.updateRemoveButtons()
self.contentChanged.emit()
def updateRemoveButtons(self):
"""Updates remove buttons of rows"""
for i, row in enumerate(self.rows):
row.remove.setEnabled(True)
if len(self.rows) == 1:
self.rows[0].remove.setEnabled(False)
def updateCellWidgets(self, row_idx: int):
"""
Update cell widgets in row with index
:param row_idx: index of row
"""
for i, widget in enumerate(self.rows[row_idx].row_widgets):
self.setCellWidget(row_idx, i, widget)
def getData(self):
"""
Returns a list of custom 'entry'-objects (depending on the table type)
containing the table's rows' data, if the row contains data
"""
return [row.getRowData() for row in self.rows if row.containsData()]
def resetTable(self):
"""Reset the table"""
while len(self.rows) > 0:
self.removeCustomRow(0, update=False)
def getArguments(self):
"""Returns list of <Argument> containers for each row"""
raise NotImplementedError('Must overwrite getArguments()')
def emit(self, value_dict: dict = None):
"""
Emits settingsChanged pyqtSignal
:param value_dict: dictionary to emit
"""
if value_dict is None:
return
self.settingsChanged.emit(value_dict)
def receive(self, value_dict: dict):
"""Receives other settingsChanged pyqtSignal -> dict"""
pass
| atomicplasmaphysics/BCA-GUIDE | TableWidgets/CustomTable.py | CustomTable.py | py | 7,543 | python | en | code | 4 | github-code | 13 |
41238329490 | import math
import numpy as np
def isprime(n):
if n == 1:
return 0
if n == 2:
return 1
if n % 2 == 0:
return 2
for i in range(3,int(math.sqrt(n))+1,2):
if n % i == 0:
return i
return True
def recur(n):
#print (n)
p = isprime(n)
#print (p)
if p == True:
yield n
else:
yield from recur(p)
yield from recur(n//p)
minval = 100
print (set(recur(36)))
for n in range(2,1000001):
#print (np.prod([1 - 1/x for x in set(recur(n))]))
x = np.prod([1 - 1/x for x in set(recur(n))])
if x < minval:
minval = x
print (n)
| tssdavey/maths-problems | project euler/51-100/69.py | 69.py | py | 645 | python | en | code | 0 | github-code | 13 |
30139346642 | import math
from zou.app import app
from zou.app.utils import fields, string
from sqlalchemy import func
def get_query_criterions_from_request(request):
"""
Turn request parameters into a dict where keys are attributes to filter and
values are values to filter.
"""
criterions = {}
for key, value in request.args.items():
if key not in ["page"]:
criterions[key] = value
return criterions
def apply_criterions_to_db_query(model, db_query, criterions):
"""
Apply criterions given in HTTP request to the sqlachemy db query object.
"""
if "name" in criterions and hasattr(model, "name"):
value = criterions["name"]
db_query = db_query.filter(model.name.ilike(value))
del criterions["name"]
return db_query.filter_by(**criterions)
def get_paginated_results(query, page, limit=None, relations=False):
"""
Apply pagination to the query object.
"""
if page < 1:
entries = query.all()
return fields.serialize_models(entries, relations=relations)
else:
limit = limit or app.config["NB_RECORDS_PER_PAGE"]
total = query.count()
offset = (page - 1) * limit
nb_pages = int(math.ceil(total / float(limit)))
query = query.limit(limit)
query = query.offset(offset)
if total < offset:
result = {
"data": [],
"total": 0,
"nb_pages": nb_pages,
"limit": limit,
"offset": offset,
"page": page,
}
else:
models = fields.serialize_models(query.all(), relations=relations)
result = {
"data": models,
"total": total,
"nb_pages": nb_pages,
"limit": limit,
"offset": offset,
"page": page,
}
return result
def apply_sort_by(model, query, sort_by):
"""
Apply an order by clause to a sqlalchemy query from a string parameter.
"""
if sort_by in model.__table__.columns.keys():
sort_field = model.__table__.columns[sort_by]
if sort_by in ["created_at", "updated_at"]:
sort_field = sort_field.desc()
else:
sort_field = model.updated_at.desc()
return query.order_by(sort_field)
def cast_value(value, field_key):
if field_key.type.python_type is bool:
return string.strtobool(value)
else:
return func.cast(value, field_key.type)
| cgwire/zou | zou/app/utils/query.py | query.py | py | 2,542 | python | en | code | 152 | github-code | 13 |
74327460178 | import csv
f = open('profiles_stan.csv','r', newline='')
w = open('Stanford Student Profiles/profiles___3.csv','w', newline='')
fr = csv.reader(f)
fw = csv.writer(w)
l = []
for i in fr:
l+=[i]
for j in range(len(l)):
if l[j][1] == '':
l[j][1] = '1450'
if l[j][2] == '':
l[j][2] = '4.2'
if l[j][3] == '':
l[j][3] = '34'
l[j][4] = l[j][4].replace('<span itemprop="title">','').replace('B.B.A., ','').replace('B.A., ','').replace('B.A.,','').replace('B.S.,','').replace('B.B.A.,','').replace('B.S., ','')
if 'Male' == l[j][5] or 'Female' in l[j][5]:
l[j][5] = l[j][5].replace('Male', '1').replace('Female','2')
else:
l[j][5] = '3'
if 'Asian' in l[j][6]:
l[j][6] = '1'
elif 'White' in l[j][6] or 'Native' in l[j][6]:
l[j][6] = '2'
elif 'Black' in l[j][6]:
l[j][6] = '3'
elif 'Hispanic' in l[j][6]:
l[j][6] = '4'
else:
l[j][6] = '5'
if l[j][8] == 'Yes':
l[j][8] = '1'
else:
l[j][8] = '0'
if 'Regular' in l[j][9] or '' == l[j][9]:
l[j][9] = '1'
else:
l[j][9] = '0'
f.close()
fw.writerows(l)
w.close()
| tgarg10/College-Admissions-Calculator | Pgm4_Cleaning_Data.py | Pgm4_Cleaning_Data.py | py | 1,172 | python | en | code | 0 | github-code | 13 |
34664698479 | from tema6 import Cerc, Dreptunghi, Angajat, Cont
# Ex 1:
cerc1 = Cerc(5, 'rosu')
cerc1.descrie_cerc()
print("Aria cercului este:", cerc1.aria())
print("Diametrul cercului este:", cerc1.diametru())
print("Circumferinta cercului este:", cerc1.circumferinta())
#Ex 2:
dreptunghi1 = Dreptunghi(10, 5, 'verde')
dreptunghi1.descrie()
print(dreptunghi1.aria())
print(dreptunghi1.perimetrul())
dreptunghi1.schimba_culoarea('albastru')
dreptunghi1.descrie()
# Ex 3:
angajat = Angajat("Palamariu", "Nicu", 10000)
angajat.descrie()
angajat.marire_salariu(15)
angajat.descrie()
#Ex 4:
cont = Cont("RO12345", "Nicu Palamariu", 2453)
cont.afisare_sold()
cont.debitare_cont(2360)
cont.afisare_sold()
cont.creditare_cont(3120)
cont.afisare_sold()
| Nicu0478/Repo1 | tema6main.py | tema6main.py | py | 742 | python | ro | code | 0 | github-code | 13 |
9665048982 | from data.voc.voc_dataset import VocDataset
from torchvision import transforms
import torch
def get_voc_dataloader(split='train', batch_size=32):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if split == "train":
train_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
normalize
])
ds_train = VocDataset("/content/VOCdevkit/VOC2012",
'train', train_transform)
return torch.utils.data.DataLoader(dataset=ds_train,
batch_size=batch_size,
shuffle=True,
num_workers=1)
elif split == "val":
test_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
ds_val = VocDataset("/content/VOCdevkit/VOC2012",
'val', test_transform)
return torch.utils.data.DataLoader(dataset=ds_val,
batch_size=batch_size,
shuffle=True,
num_workers=1)
else:
test_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
ds_val = VocDataset("/content/VOCdevkit/VOC2012",
'val', test_transform)
return torch.utils.data.DataLoader(dataset=ds_val,
batch_size=batch_size,
shuffle=False,
num_workers=1)
| wannieman98/RandWireNN | data/voc/voc_dataloader.py | voc_dataloader.py | py | 2,067 | python | en | code | 0 | github-code | 13 |
22516690951 | from collections import defaultdict
from elasticsearch_dsl import Document, Integer, Text, Boolean, Q, Keyword, SF, Date
from elasticsearch_dsl.connections import connections
from elasticsearch.helpers import parallel_bulk
from elasticsearch.exceptions import ConflictError
from flask_sqlalchemy import Pagination
from corelib.mc import rdb, cache
from config import ES_HOSTS, PER_PAGE
from models.consts import K_POST, ONE_HOUR
from models.core import Post
connections.create_connection(hosts=ES_HOSTS)
ITEM_MC_KEY = 'core:search:{}:{}'
POST_IDS_BY_TAG_MC_KEY = 'core:search:post_ids_by_tag:%s:%s:%s:%s'
SEARCH_FIELDS = ['title^10', 'tags^5', 'content^2']
gauss_sf = SF('gauss', created_at={
'origin': 'now', 'offset': '7d', 'scale': '10d'
})
score_sf = SF('script_score', script={
'lang': 'painless',
'inline': ("doc['n_likes'].value * 2 + doc['n_collects'].value")
})
TARGET_MAPPER = {
K_POST: Post
}
def get_item_data(item):
"""item是Post model的对象"""
try:
content = item.content
except AttributeError:
content = ''
try:
tags = [tag.name for tag in item.tags]
except AttributeError:
tags = []
return {
'id': item.id,
'tags': tags,
'content': content,
'title': item.title,
'kind': item.kind,
'n_likes': item.n_likes,
'n_comments': item.n_comments,
'n_collects': item.n_collects,
}
class Item(Document):
title = Text()
kind = Integer()
content = Text()
n_likes = Integer()
n_collects = Integer()
n_comments = Integer()
can_show = Boolean()
created_at = Date()
tags = Text(fields={'raw': Keyword()})
class Index:
name = 'test'
@classmethod
def add(cls, item):
obj = cls(**get_item_data(item))
obj.save()
obj.clear_mc(item.id, item.kind)
return obj
@classmethod
@cache(ITEM_MC_KEY.format('{id}', '{kind}'))
def get(cls, id, kind):
s = cls.search()
s.query = Q('bool', must=[Q('term', id=id),
Q('term', kind=kind)])
rs = s.execute()
if rs:
return rs.hits[0]
@classmethod
def clear_mc(cls, id, kind):
rdb.delete(ITEM_MC_KEY.format(id, kind))
@classmethod
def delete(cls, item):
rs = cls.get(item.id, item.kind)
if rs:
super(cls, rs).delete()
cls.clear_mc(item.id, item.kind)
return True
return False
@classmethod
def update_item(cls, item):
"""item是Post的ORM对象, 其实也可以放其他模型字段, 只要它们的字段都包含Item中的字段, 由kind做区分"""
obj = cls.get(item.id, item.kind)
if obj is None:
return cls.add(item)
if not obj:
return
kw = get_item_data(item)
try:
obj.update(**kw)
except ConflictError:
obj.clear_mc(item.id, item.kind)
obj = cls.get(item.id, item.kind)
obj.update(**kw)
obj.clear_mc(item.id, item.kind)
return True
@classmethod
def get_es(cls):
search = cls.search()
return connections.get_connection(search._using)
@classmethod
def bulk_update(cls, items, chunk_size=5000, op_type='update', **kwargs):
"""
浅析ES的_source、_all、store、index: https://www.cnblogs.com/wynjauu/articles/9326168.html
"""
index = cls._index._name
type = cls._doc_type.name
objects = ({
'_op_type': op_type,
'_id': f'{doc.id}_{doc.kind}',
'_index': index,
'_type': type,
'_source': doc.to_dict()
} for doc in items)
client = cls.get_es()
rs = list(parallel_bulk(client, objects,
chunk_size=chunk_size, **kwargs))
for item in items:
cls.clear_mc(item.id, item.kind)
return rs
@classmethod
def new_search(cls, query, page, order_by=None, per_page=PER_PAGE):
"""用于在搜索栏搜搜, query是输入的搜索内容, 字符串"""
s = cls.search()
# multi_match: 多个字段匹配, SEARCH_FIELDS设置了字段权重
s = s.query('multi_match', query=query,
fields=SEARCH_FIELDS)
if page < 1:
page = 1
start = (page - 1) * PER_PAGE
s = s.extra(**{'from': start, 'size': per_page})
if order_by is not None:
s = s.sort(order_by)
rs = s.execute()
dct = defaultdict(list)
# 这里考虑了多种kind的情况, 不过我们目前只会用到Post
for i in rs:
dct[i.kind].append(i.id)
items = []
for kind, ids in dct.items():
target_cls = TARGET_MAPPER.get(kind)
# 最终还是依靠orm来获取数据
if target_cls:
items_ = target_cls.get_multi(ids)
items.extend(items_)
return Pagination(query, page, per_page, rs.hits.total, items)
@classmethod
@cache(POST_IDS_BY_TAG_MC_KEY % ('{tag}', '{page}', '{order_by}',
'{per_page}'), ONE_HOUR)
def get_post_ids_by_tag(cls, tag, page, order_by=None, per_page=PER_PAGE):
s = cls.search()
#s = s.query(Q('bool', must=Q('term', tags=tag)))
s = s.query(Q('bool', must=Q('term', kind=K_POST)))
if page < 1:
page = 1
start = (page - 1) * PER_PAGE
s = s.extra(**{'from': start, 'size': per_page})
if order_by is not None:
if order_by == 'hot':
s = s.query(Q('function_score', functions=[gauss_sf, score_sf])) # noqa
else:
s = s.sort(order_by)
rs = s.execute()
ids = [obj.id for obj in rs]
return Pagination(tag, page, per_page, rs.hits.total, ids) | miniyk2012/my_toutiao | models/search.py | search.py | py | 5,972 | python | en | code | 0 | github-code | 13 |
14527495776 | from app.main import main_app
from flask import render_template,request,redirect,session,jsonify,url_for
from app.forms import EditBlogForm
from app.models import Post,Type
import random,base64,os
from functools import wraps
from app import app,db
#访问前验证
def logined_require(func):
@wraps(func)
def inner(*args,**kwargs):
user=session.get('screen_name')
if not user:
return redirect(url_for('login.login'))
return func(*args,**kwargs)
return inner
# 首页
@main_app.route('/')
@main_app.route('/index')
def index():
#从数据库随机抽取三篇文章
num=Post.query.count()
if num<=3:
return redirect(url_for('main.editblog'))
blist=list(range(1,num))
bid=random.sample(blist,3)
querydata=Post.query.filter(Post.id.in_(bid)).all()
#end抽取
barselect='index'
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('index.html',barselect=barselect,data=querydata,sessions=sessions)
#博客页
@main_app.route('/blog')
def blog():
page=request.args.get('page',1,type=int)
posts=Post.query.order_by(Post.id.desc()).paginate(page,app.config['POSTS_PRE_PAGE'],False)
barselect='blog'
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('blog.html',barselect=barselect,sessions=sessions,posts=posts)
#特色页
@main_app.route('/page')
def page():
barselect='page'
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('page.html',barselect=barselect,sessions=sessions)
#work页面
@main_app.route('/work')
def work():
barselect='work'
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('work.html',barselect=barselect,sessions=sessions)
#contact页面
@main_app.route('/contact')
def contact():
barselect='contact'
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('contact.html',barselect=barselect,sessions=sessions)
#single页面
@main_app.route('/single/',methods=['GET','POST'])
def single():
id=request.args.get('id') #得到点击页面的id,得到文章
article=Post.query.filter_by(id=id).first()
#根据session判断当前阅读的人是否加1
if not session.get('reding'):
if article.reding:
article.reding+=1
else:
article.reding=1
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
#设置session
session['reding']='true'
return render_template('single.html',article=article,sessions=sessions)
#编写博客页面 editblog
@main_app.route('/editblog',methods=['POST','GET'])
@logined_require
def editblog():
barselect='editblog'
form=EditBlogForm(title='标题',body='输入内容')
if form.validate_on_submit():
title=form.title.data
body=form.body.data
categoryid=form.category.data
type=Type.query.get(categoryid)
keyword=form.keyword.data
coverpic=request.files['coverpic'].read() #得到图片二进制流
coverpic=base64.b64encode(coverpic)
#将数据写入数据库
try:
post=Post(title=title,body=body,keyword=keyword,coverpic=coverpic,category=type)
db.session.add(post)
db.session.commit()
except:
return 'error'
return 'success'
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('editblog.html',form=form,barselect=barselect,sessions=sessions)
#得到最近三个博客的处理方法
@main_app.route('/getrecent',methods=['POST'])
def getrecent():
postlist=[]
# num=Post.query.count()
# if num<=3:
rectarticale=Post.query.order_by(Post.id.desc()).limit(3).all()
for post in rectarticale:
dtime=post.timestamp.strftime('%Y-%m-%d %H:%M')
pic=post.coverpic.decode('utf-8')
dic={'id':post.id,'title':post.title,'timestamp':dtime,'pic':pic}
postlist.append(dic)
return jsonify({'signal':1,'posts':postlist})
#得到分类类别
@main_app.route('/getcategory',methods=['POST'])
def getcategory():
type=Type.query.all()
catelist=[]
for t in type:
id=t.id
name=t.name
length=len(t.posts.all())
dic={'id':id,'name':name,'length':length}
catelist.append(dic)
return jsonify({'signal':1,'category':catelist})
#upload富文本图片上传
@main_app.route('/upload',methods=['POST','GET'])
def upload():
basepath=os.path.abspath(os.path.dirname(__file__))
if request.method=='POST':
img=request.files.get('file')
path=basepath+'/static/upload/'
img_path=path+img.filename
img.save(img_path)
back_url='/static/upload/'+img.filename
return jsonify({'location': back_url})
#具体分类页
@main_app.route('/categorylist/')
def categorylist():
id=request.args.get('id')
posts=Type.query.get(id).posts.all()
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('categorylist.html',posts=posts,sessions=sessions)
#修改博客页
@main_app.route('/revise/',methods=['POST','GET'])
@logined_require
def revise():
id=request.args.get('id')
article=Post.query.get(id)
form=EditBlogForm(title=article.title,body=article.body,category=article.kind,keyword=article.keyword,coverpic=article.coverpic)
if form.validate_on_submit():
title=form.title.data
body=form.body.data
categoryid=form.category.data
type=Type.query.get(categoryid)
keyword=form.keyword.data
coverpic=request.files['coverpic'].read() #得到图片二进制流
coverpic=base64.b64encode(coverpic)
#将数据写入数据库
try:
post=Post(title=title,body=body,keyword=keyword,coverpic=coverpic,category=type)
db.session.add(post)
db.session.commit()
except:
print('error')
return 'success'
sessions={'name':session.get('screen_name'),'headimg':session.get('profile_image_url')}
return render_template('editblog.html',article=article,sessions=sessions,form=form) | inkfish1/weblog | app/main/routes.py | routes.py | py | 6,487 | python | en | code | 0 | github-code | 13 |
7020617582 |
"""
This module contains functions to assemble information from multiple different
files relevant to the Mayo Clinic biobanks.
"""
###############################################################################
# Notes
###############################################################################
# Installation and importation
# Standard
import sys
#print(sys.path)
import os
import math
import statistics
import pickle
import copy
import random
import itertools
import time
# Relevant
import numpy
import pandas
pandas.options.mode.chained_assignment = None # default = "warn"
import scipy.stats
import scipy.linalg
import statsmodels.multivariate.pca
# Custom
import partner.utility as putility
import partner.polygenic_score as pgs
#import stragglers.mcita_assembly as s_mcita_ass
###############################################################################
# Functionality
# TODO: TCW; 10 April 2023
# TODO: need to create the table_scores_collection.tsv file.
##########
# Initialization
def initialize_directories(
restore=None,
path_dock=None,
):
"""
Initialize directories for procedure's product files.
arguments:
restore (bool): whether to remove previous versions of data
path_dock (str): path to dock directory for source and product
directories and files
raises:
returns:
(dict<str>): collection of paths to directories for procedure's files
"""
# Collect paths.
paths = dict()
# Define paths to directories.
paths["dock"] = path_dock
paths["mbpdb_assembly"] = os.path.join(path_dock, "mbpdb_assembly")
# Remove previous files to avoid version or batch confusion.
if restore:
putility.remove_directory(path=paths["mbpdb_assembly"])
# Initialize directories.
putility.create_directories(
path=paths["mbpdb_assembly"]
)
# Return information.
return paths
##########
# Read
def read_source(
path_dock=None,
report=None,
):
"""
Reads and organizes source information from file.
Notice that Pandas does not accommodate missing values within series of
integer variable types.
arguments:
path_dock (str): path to dock directory for source and product
directories and files
report (bool): whether to print reports
raises:
returns:
(object): source information
"""
# Specify directories and files.
#path_table_scores = os.path.join(
# path_dock, "test_sbayesr_body_mass_tcw_2023-03-21",
# "polygenic_scores_comparison", "BMI_GIANTUKB_EUR_collection",
# "table_scores_collection.tsv"
#)
path_table_scores = os.path.join(
path_dock, "hormone_genetics_tcw_2023-02-24",
"sbayesr_plink_polygenic_scores_collection",
"table_scores_collection.tsv"
)
path_table_genetic_sex_case = os.path.join(
path_dock, "phenotypes_mayo_bipolar_disorder_1_2_merge",
"GWAS_MERGED_BPphe_wMCBBctrl.maf0.01.dosR20.8.noDups.noSM.fam"
)
path_table_genotype_pca = os.path.join(
path_dock, "phenotypes_mayo_bipolar_disorder_1_2_merge",
"Top20_PCs_europe.csv"
)
path_table_identifiers = os.path.join(
path_dock, "phenotypes_mayo_bipolar_disorder_1_2_merge",
"210421_id_matching_gwas.csv"
)
path_table_phenotypes_case_old = os.path.join(
path_dock, "phenotypes_mayo_bipolar_disorder_1_2_merge",
"220513_BP_phenotypes.csv"
)
path_table_phenotypes_case_new = os.path.join(
path_dock, "phenotypes_mayo_bipolar_disorder_1_2_merge",
"thyroid_prs.csv"
)
path_table_phenotypes_control = os.path.join(
path_dock, "phenotypes_mayo_bipolar_disorder_1_2_merge",
"gwas_TCF7L2_bib.csv"
)
# Read information from file.
table_scores = pandas.read_csv(
path_table_scores,
sep="\t",
header=0,
na_values=["nan", "na", "NAN", "NA", "<nan>", "<na>", "<NAN>", "<NA>",],
keep_default_na=True,
)
table_scores.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
# https://www.cog-genomics.org/plink/2.0/formats#fam
table_genetic_sex_case = pandas.read_csv(
path_table_genetic_sex_case,
sep="\s+", # ","; "\t"; "\s+"; "\s+|\t+|\s+\t+|\t+\s+"
header=None,
names=[
"FID", "IID", "father", "mother",
"sex_genotype_raw", "bipolar_disorder_genotype_raw"
],
dtype={
"FID": "string",
"IID": "string", # identifier of individual's genotype
"father": "string",
"mother": "string",
"sex_genotype_raw": "string", # 1: male; 2: female; 0: unknown
"bipolar_disorder_genotype_raw": "string", # 1: control; 2: case;
},
na_values=["nan", "na", "NAN", "NA", "<nan>", "<na>", "<NAN>", "<NA>",],
keep_default_na=True,
)
table_genetic_sex_case.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_genotype_pca = pandas.read_csv(
path_table_genotype_pca,
sep=",", # ","; "\t"; "\s+"; "\s+|\t+|\s+\t+|\t+\s+"
header=0,
dtype="string",
na_values=["nan", "na", "NAN", "NA", "<nan>", "<na>", "<NAN>", "<NA>",],
keep_default_na=True,
)
table_genotype_pca.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_identifiers = pandas.read_csv(
path_table_identifiers,
sep=",",
header=0,
#dtype="string",
dtype={
"bib_id": "string",
"gwas1_sampleid": "string", # identifier of individual's genotype
"gwas2_sampleid": "string", # identifier of individual's genotype
},
na_values=["nan", "na", "NAN", "NA", "<nan>", "<na>", "<NAN>", "<NA>",],
keep_default_na=True,
)
table_identifiers.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_phenotypes_case_old = pandas.read_csv(
path_table_phenotypes_case_old,
sep=",",
header=0,
dtype="string",
)
table_phenotypes_case_old.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_phenotypes_case_new = pandas.read_csv(
path_table_phenotypes_case_new,
sep=",",
header=0,
dtype="string",
)
table_phenotypes_case_new.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_phenotypes_control = pandas.read_csv(
path_table_phenotypes_control,
sep=",", # ","; "\t"; "\s+"; "\s+|\t+|\s+\t+|\t+\s+"
header=0,
dtype={
"sample.id": "string",
"bib_id": "string",
"pt_age": "string",
"bmi": "string",
},
)
table_phenotypes_control.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
# Compile and return information.
return {
"table_scores": table_scores,
"table_genetic_sex_case": table_genetic_sex_case,
"table_genotype_pca": table_genotype_pca,
"table_identifiers": table_identifiers,
"table_phenotypes_case_old": table_phenotypes_case_old,
"table_phenotypes_case_new": table_phenotypes_case_new,
"table_phenotypes_control": table_phenotypes_control,
}
##########
# Organize separate tables before merge
##########
# Merge information on phenotypes
# TODO: split up this function... some of this function is specific to the Bipolar Biobank... joining phenotypes, identifiers, etc...
# TODO: TCW; 01 September 2022
# TODO: obsolete
def merge_polygenic_scores_to_phenotypes(
table_identifiers=None,
table_phenotypes=None,
table_genetic_sex_case=None,
tables_scores=None,
report=None,
):
"""
Merge information from multiple source tables.
Most controls that accompany the Bipolar Biobank do not have phenotype
records and do not have a "bib_id".
Only a few controls from the Mexico and Chile cohorts do have "bib_id" and
phenotype records.
Many cases in the Bipolar Biobank do not have genotypes.
Sample identifiers from genotype files ("IID") are the most inclusive
identifiers when analyses prioritize genotypes.
arguments:
table_identifiers (object): Pandas data frame of identifiers for
matching phenotype and genotype records
table_phenotypes (object): Pandas data frame of information about
phenotype variables
table_genetic_sex_case (object): Pandas data frame of information about
genetic sex and case-control status from file in PLINK2 ".fam"
format
tables_scores (list<object>): collection of Pandas data frames of
polygenic scores
report (bool): whether to print reports
raises:
returns:
(object): Pandas data frame of information about phenotype variables
"""
# 1. Introduce phenotype records for Bipolar Disorder cases (mostly) to the
# table of genetic sex and case status.
# The table of genetic sex and case status is the most inclusive of cases
# and controls with genotypes.
# 1.1. Introduce identifiers for phenotype records ("bib_id") to the table
# of genetic sex and case status.
# Copy information in table.
table_genetic_sex_case = table_genetic_sex_case.copy(deep=True)
table_identifiers = table_identifiers.copy(deep=True)
# Organize tables' indices.
table_genetic_sex_case.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_genetic_sex_case["identifier_genotype"] = (
table_genetic_sex_case["identifier_genotype"].astype(
"string",
copy=True,
errors="raise",
))
table_genetic_sex_case.set_index(
"identifier_genotype",
append=False,
drop=True, # move regular column to index; remove original column
inplace=True
)
table_identifiers.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_identifiers["identifier_genotype"] = (
table_identifiers["identifier_genotype"].astype(
"string",
copy=True,
errors="raise",
))
table_identifiers.set_index(
"identifier_genotype",
append=False,
drop=True, # move regular column to index; remove original column
inplace=True
)
# Merge data tables using database-style join.
# Alternative is to use DataFrame.join().
table = pandas.merge(
table_genetic_sex_case, # left table
table_identifiers, # right table
left_on=None, # "identifier_genotype",
right_on=None, # "identifier_genotype",
left_index=True,
right_index=True,
how="outer", # keep union of keys from both tables
#suffixes=("_main", "_identifiers"), # deprecated?
)
# 1.2. Introduce phenotype records for Bipolar Disorder cases (mostly) to
# the main table of genetic sex and case status.
# Copy information in table.
table_phenotypes = table_phenotypes.copy(deep=True)
# Organize tables' indices.
table.reset_index(
level=None,
inplace=True,
drop=False, # remove index; do not move to regular columns
)
table["identifier_phenotype"] = table["identifier_phenotype"].astype(
"string",
copy=True,
errors="raise",
)
table.set_index(
"identifier_phenotype",
append=False,
drop=True, # move regular column to index; remove original column
inplace=True
)
table_phenotypes.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_phenotypes["identifier_phenotype"] = (
table_phenotypes["identifier_phenotype"].astype(
"string",
copy=True,
errors="raise",
))
table_phenotypes.set_index(
"identifier_phenotype",
append=False,
drop=True, # move regular column to index; remove original column
inplace=True
)
# Merge data tables using database-style join.
# Alternative is to use DataFrame.join().
table = pandas.merge(
table, # left table
table_phenotypes, # right table
left_on=None, # "identifier_phenotype",
right_on=None, # "identifier_phenotype",
left_index=True,
right_index=True,
how="outer", # keep union of keys from both tables
#suffixes=("_main", "_identifiers"), # deprecated?
)
# 3. Introduce polygenic scores.
# Organize main table's index.
table.reset_index(
level=None,
inplace=True,
drop=False, # remove index; do not move to regular columns
)
table["identifier_genotype"] = table["identifier_genotype"].astype(
"string",
copy=True,
errors="raise",
)
table.set_index(
"identifier_genotype",
append=False,
drop=True, # move regular column to index; remove original column
inplace=True
)
# Iterate on tables for polygenic scores.
for table_score in tables_scores:
# Organize score table's index.
table_score.reset_index(
level=None,
inplace=True,
drop=True, # remove index; do not move to regular columns
)
table_score["identifier_genotype"] = (
table_score["identifier_genotype"].astype(
"string",
copy=True,
errors="raise",
))
table_score.set_index(
"identifier_genotype",
append=False,
drop=True, # move regular column to index; remove original column
inplace=True
)
# Merge data tables using database-style join.
# Alternative is to use DataFrame.join().
table = pandas.merge(
table, # left table
table_score, # right table
left_on=None, # "identifier_genotype",
right_on=None, # "identifier_genotype",
left_index=True,
right_index=True,
how="outer", # keep union of keys from both tables
#suffixes=("_main", "_identifiers"), # deprecated?
)
pass
# Organize table's index.
table.reset_index(
level=None,
inplace=True,
drop=False, # remove index; do not move to regular columns
)
#table.set_index(
# "identifier_genotype",
# append=False,
# drop=True, # move regular column to index; remove original column
# inplace=True
#)
# Report.
if report:
putility.print_terminal_partition(level=2)
print("report: ")
print("merge_polygenic_scores_to_phenotypes()")
putility.print_terminal_partition(level=3)
print(table)
print("columns")
print(table.columns.to_list())
pass
# Return information.
return table
##########
# Organize phenotype variables
# # Convert column types to float.
# columns_type = [
# "2784-0.0", "2794-0.0", "2804-0.0",
# "2814-0.0", "3536-0.0", "3546-0.0",
# ]
# table = putility.convert_table_columns_variables_types_float(
# columns=columns_type,
# table=table,
# )
##########
# Write
def write_product_assembly(
pail_write=None,
path_directory=None,
):
"""
Writes product information to file.
arguments:
pail_write (dict): collection of information to write to file
path_directory (str): path to parent directory
raises:
returns:
"""
# Specify directories and files.
path_table_merge = os.path.join(
path_directory, "table_merge.pickle"
)
path_table_merge_text = os.path.join(
path_directory, "table_merge.tsv"
)
path_list_columns_text = os.path.join(
path_directory, "list_table_columns.txt"
)
# Write information to file.
pail_write["table_merge"].to_pickle(
path_table_merge
)
pail_write["table_merge"].to_csv(
path_or_buf=path_table_merge_text,
sep="\t",
header=True, # include header in table
index=False, # do not include index in table
)
putility.write_file_text_list(
elements=pail_write["table_merge"].columns.to_list(),
delimiter="\n",
path_file=path_list_columns_text,
)
pass
def write_product(
pail_write=None,
paths=None,
):
"""
Writes product information to file.
arguments:
pail_write (dict): collection of information to write to file
paths (dict<str>): collection of paths to directories for procedure's
files
raises:
returns:
"""
# Organization procedure main information.
write_product_assembly(
pail_write=pail_write["mbpdb_assembly"],
path_directory=paths["mbpdb_assembly"],
)
pass
###############################################################################
# Procedure
def execute_procedure(
path_dock=None,
):
"""
Function to execute module's main behavior.
arguments:
path_dock (str): path to dock directory for source and product
directories and files
raises:
returns:
"""
# Report version.
putility.print_terminal_partition(level=1)
print(path_dock)
print("version check: 1")
# Pause procedure.
time.sleep(5.0)
# Initialize directories.
paths = initialize_directories(
restore=True,
path_dock=path_dock,
)
# Read source information from file.
source = read_source(
path_dock=path_dock,
report=True,
)
##########
# Organize and merge together information on identifiers for genotypes.
# Organize table of genetic sex (from PLINK2 file in ".fam" format).
# https://www.cog-genomics.org/plink/2.0/formats#fam
table_genetic_sex_case = (
putility.simplify_translate_table_columns_organize_identifier(
columns_keep=[
"IID", "sex_genotype_raw", "bipolar_disorder_genotype_raw"
],
columns_translations={},
columns_copy={},
identifier_source="IID",
identifier_product="identifier_genotype",
table=source["table_genetic_sex_case"],
report=True,
))
# Organize table of principal components across genotypes.
table_genotype_pca = (
putility.simplify_translate_table_columns_organize_identifier(
columns_keep=[
"ID", "PC1", "PC2", "PC3", "PC4", "PC5",
],
columns_translations={
"PC1": "genotype_pc_1",
"PC2": "genotype_pc_2",
"PC3": "genotype_pc_3",
"PC4": "genotype_pc_4",
"PC5": "genotype_pc_5",
},
columns_copy={},
identifier_source="ID",
identifier_product="identifier_genotype",
table=source["table_genotype_pca"],
report=True,
))
# Organize table of polygenic scores.
table_scores = (
putility.simplify_translate_table_columns_organize_identifier(
columns_keep=[],
columns_translations={},
columns_copy={},
identifier_source="identifier",
identifier_product="identifier_genotype",
table=source["table_scores"],
report=True,
))
# Merge table of genetic sex and case status with table of principal
# components across genotypes.
table_merge_genotypes = putility.merge_columns_two_tables(
identifier_first="identifier_genotype",
identifier_second="identifier_genotype",
table_first=table_genetic_sex_case,
table_second=table_genotype_pca,
report=True,
)
# Merge table of genetic sex and case status and principal components
# across genotypes with table of polygenic scores.
table_merge_genotypes = putility.merge_columns_two_tables(
identifier_first="identifier_genotype",
identifier_second="identifier_genotype",
table_first=table_merge_genotypes,
table_second=table_scores,
report=True,
)
# Organize table of phenotype variables on controls.
# Merge these phenotypes on identifiers for genotypes.
table_phenotypes_control = (
putility.simplify_translate_table_columns_organize_identifier(
columns_keep=[
"sample.id", "bib_id", "pt_age", "bmi",
],
columns_translations={
"bib_id": "bib_id_supplement",
"pt_age": "pt_age_supplement",
"bmi": "BMI_supplement",
},
columns_copy={},
identifier_source="sample.id",
identifier_product="identifier_genotype",
table=source["table_phenotypes_control"],
report=True,
))
# Merge table of genetic sex and case status, principal components across
# genotypes, and polygenic scores with table of phenotype variables on
# controls.
table_merge_genotypes = putility.merge_columns_two_tables(
identifier_first="identifier_genotype",
identifier_second="identifier_genotype",
table_first=table_merge_genotypes,
table_second=table_phenotypes_control,
report=True,
)
# Remove unnecessary columns.
#table_merge_genotypes.drop(
# labels=["level_0",],
# axis="columns",
# inplace=True
#)
# Report.
print("...")
print("...")
print("...")
print("table after merges on genotype identifiers:")
print(table_merge_genotypes)
putility.print_terminal_partition(level=3)
print("table columns: " + str(int(table_merge_genotypes.shape[1])))
print("table rows: " + str(int(table_merge_genotypes.shape[0])))
print("columns")
print(table_merge_genotypes.columns.to_list())
##########
# Organize and merge together information on identifiers for phenotypes.
# Organize tables of phenotype variables on cases.
# Append suffix to names of all columns in older phenotype data for cases.
# Append prefix to names of columns.
table_phenotypes_case_old = (
source["table_phenotypes_case_old"].add_prefix("2022-05-13_")
)
translations = dict()
translations["2022-05-13_bib_id"] = "bib_id"
table_phenotypes_case_old.rename(
columns=translations,
inplace=True,
)
table_phenotypes_case_old = (
putility.organize_table_column_identifier(
column_source="bib_id",
column_product="identifier_phenotype",
table=table_phenotypes_case_old,
report=True,
))
table_phenotypes_case_new = (
putility.organize_table_column_identifier(
column_source="bib_id",
column_product="identifier_phenotype",
table=source["table_phenotypes_case_new"],
report=True,
))
# Organize table of identifiers.
# Determine consensus combination of identifiers for genotypes.
# Prioritize identifiers from "GWAS1" set of genotypes.
table_identifiers = (
putility.simplify_translate_table_columns_organize_identifier(
columns_keep=["bib_id", "gwas1_sampleid", "gwas2_sampleid",],
columns_translations={
"bib_id": "bib_id_match",
},
columns_copy={},
identifier_source="bib_id_match",
identifier_product="identifier_phenotype",
table=source["table_identifiers"],
report=True,
))
table_identifiers["gwas_sampleid_consensus"] = table_identifiers.apply(
lambda row:
putility.prioritize_combination_values_string(
value_priority=row["gwas1_sampleid"],
value_spare=row["gwas2_sampleid"],
),
axis="columns", # apply function to each row
)
# Merge table of identifiers with table of phenotype variables on cases.
table_merge_phenotypes = putility.merge_columns_two_tables(
identifier_first="identifier_phenotype",
identifier_second="identifier_phenotype",
table_first=table_identifiers,
table_second=table_phenotypes_case_old,
report=True,
)
table_merge_phenotypes = putility.merge_columns_two_tables(
identifier_first="identifier_phenotype",
identifier_second="identifier_phenotype",
table_first=table_merge_phenotypes,
table_second=table_phenotypes_case_new,
report=True,
)
# Remove unnecessary columns.
#table_merge_phenotypes.drop(
# labels=["level_0",],
# axis="columns",
# inplace=True
#)
# Report.
print("...")
print("...")
print("...")
print("table after merges on phenotype identifiers:")
print(table_merge_phenotypes)
putility.print_terminal_partition(level=3)
print("table columns: " + str(int(table_merge_phenotypes.shape[1])))
print("table rows: " + str(int(table_merge_phenotypes.shape[0])))
print("columns")
print(table_merge_phenotypes.columns.to_list())
##########
# Merge together information on genotypes and phenotypes.
# Merge table of phenotype variables with table of genetic sex, case
# status, and polygenic scores.
table_merge_phenotypes = putility.organize_table_column_identifier(
column_source="gwas_sampleid_consensus",
column_product="identifier_genotype",
table=table_merge_phenotypes,
report=True,
)
table = putility.merge_columns_two_tables(
identifier_first="identifier_genotype",
identifier_second="identifier_genotype",
table_first=table_merge_genotypes,
table_second=table_merge_phenotypes,
report=True,
)
# Report.
print("...")
print("...")
print("...")
print("table after merges of genotypes and phenotypes:")
print(table)
putility.print_terminal_partition(level=3)
print("table columns: " + str(int(table.shape[1])))
print("table rows: " + str(int(table.shape[0])))
print("columns")
print(table.columns.to_list())
# Collect information.
pail_write = dict()
pail_write["mbpdb_assembly"] = dict()
pail_write["mbpdb_assembly"]["table_merge"] = table
# Write product information to file.
write_product(
pail_write=pail_write,
paths=paths,
)
pass
#
| tcameronwaller/stragglers | package/mbpdb_assembly.py | mbpdb_assembly.py | py | 27,203 | python | en | code | 0 | github-code | 13 |
20768753079 | """
Metrics class.
"""
from collections import Counter
from nltk.translate import bleu_score
from nltk.translate.bleu_score import SmoothingFunction
import numpy as np
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.spice.spice import Spice
def coco_scores(refs, hyps):
Bleu_metrics = Bleu(3)
Rouge_metrics = Rouge()
Cider_metrics = Cider()
Meteor_metrics = Meteor()
Spice_metrics = Spice()
print("bleu", Bleu_metrics.compute_score(refs, hyps)[0])
print("rouge", Rouge_metrics.compute_score(refs, hyps)[0])
print("cider", Cider_metrics.compute_score(refs, hyps)[0])
print("meteor", Meteor_metrics.compute_score(refs, hyps)[0])
print("spice", Spice_metrics.compute_score(refs, hyps)[0])
return 1
def coco_bleu(refs, hyps):
Bleu_metrics = Bleu(3)
return Bleu_metrics.compute_score(refs, hyps, verbose=0)[0]
def distinct(seqs):
""" Calculate intra/inter distinct 1/2. """
batch_size = len(seqs)
intra_dist1, intra_dist2 = [], []
unigrams_all, bigrams_all = Counter(), Counter()
for seq in seqs:
unigrams = Counter(seq)
bigrams = Counter(zip(seq, seq[1:]))
intra_dist1.append((len(unigrams)+1e-12) / (len(seq)+1e-5))
intra_dist2.append((len(bigrams)+1e-12) / (max(0, len(seq)-1)+1e-5))
unigrams_all.update(unigrams)
bigrams_all.update(bigrams)
inter_dist1 = (len(unigrams_all)+1e-12) / (sum(unigrams_all.values())+1e-5)
inter_dist2 = (len(bigrams_all)+1e-12) / (sum(bigrams_all.values())+1e-5)
intra_dist1 = np.average(intra_dist1)
intra_dist2 = np.average(intra_dist2)
return intra_dist1, intra_dist2, inter_dist1, inter_dist2
def bleu(hyps, refs):
""" Calculate bleu 1/2. """
bleu_1 = []
bleu_2 = []
bleu_3 = []
for hyp, ref in zip(hyps, refs):
try:
score = bleu_score.sentence_bleu(
[ref], hyp,
smoothing_function=SmoothingFunction().method7,
weights=[1, 0, 0, 0])
except:
score = 0
bleu_1.append(score)
try:
score = bleu_score.sentence_bleu(
[ref], hyp,
smoothing_function=SmoothingFunction().method7,
weights=[0.5, 0.5, 0, 0])
except:
score = 0
bleu_2.append(score)
try:
score = bleu_score.sentence_bleu(
[ref], hyp,
smoothing_function=SmoothingFunction().method7,
weights=[0.33, 0.33, 0.33, 0])
except:
score = 0
bleu_3.append(score)
bleu_1 = np.average(bleu_1)
bleu_2 = np.average(bleu_2)
bleu_3 = np.average(bleu_3)
return bleu_1, bleu_2, bleu_3
def novelty(hyps):
poems = [" ".join(poem).split(" [SEP] ") for poem in hyps]
for i in range(len(poems)):
for j in range(len(poems[i])):
poems[i][j] = poems[i][j].split()
poem_num = len(poems)
sent_nums = [len(poem) for poem in poems]
max_sent_num = max(np.array(sent_nums))
unigrams_novelty = 0
bigrams_novelty = 0
for i in range(max_sent_num):
unigrams_all, bigrams_all = Counter(), Counter()
for j in range(poem_num):
if i < len(poems[j]):
seq = poems[j][i]
unigrams_all.update(Counter(seq))
bigrams_all.update(Counter(zip(seq, seq[1:])))
unigrams_novelty += (len(unigrams_all)+1e-12) / (sum(unigrams_all.values())+1e-5)
bigrams_novelty += (len(bigrams_all)+1e-12) / (sum(bigrams_all.values())+1e-5)
unigrams_novelty /= max_sent_num
bigrams_novelty /= max_sent_num
return unigrams_novelty, bigrams_novelty
| Kunhao18/image-to-poetry | plato/metrics/metrics.py | metrics.py | py | 3,846 | python | en | code | 0 | github-code | 13 |
7314066995 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import redis
from scrapy.conf import settings
from AIHR.items import AihrItem, CompanyItem, QcwyPostItem, QcwyCompanyItem
class AihrPipeline(object):
def __init__(self):
# #连接数据库
host = settings["MONGO_HOST"]
port = settings["MONGO_PORT"]
dbname = settings["MONGO_DB"]
sheetname = settings["MONGO_COLLPO"]
client = pymongo.MongoClient(host=host, port=port)
mydb = client[dbname]
# mydb.authenticate(user, pwd)
self.post = mydb[sheetname]
# self.comp = mydb[mycoll]
self.redis_url = 'redis://localhost:6379/'
self.r = redis.Redis(host="localhost", port=6379)
def process_item(self, item, spider):
# data = dict(item) # 把item转化成字典形式
# print("+++++++++++++++++++")
# if isinstance(item, (AihrItem, QcwyPostItem)):
# self.post.update({'postName': item['postName'], 'companyName': item['companyName'],
# 'address': item['address']}, {'$set': data}, upsert=True) # 向数据库插入一条记录
# return item # 会在控制台输出原item数据,可以选择不写
self.r.lpush('hyperchain:start_urls', item['url'])
| notsayyu/spider | master/AIHR/pipelines.py | pipelines.py | py | 1,442 | python | en | code | 0 | github-code | 13 |
16824411454 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from packaging import version
import elasticsearch
from elasticsearch.exceptions import NotFoundError, RequestError, ElasticsearchException
class ElasticsearchUtility:
def __init__(self, es_url, logger=None, **kwargs):
self.es = elasticsearch.Elasticsearch(hosts=es_url if type(es_url) == list else [es_url], **kwargs)
self.es_url = es_url
self.logger = logger
self.version = None
def set_es_version(self):
"""
Sets the version of elasticsearch; ex. 7.10.2
"""
es_info = self.es.info()
self.version = version.parse(es_info["version"]["number"])
def index_document(self, **kwargs):
"""
indexing (adding) document to Elasticsearch
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.index
index – (required) The name of the index
body – The document
id – (optional) Document ID, will use ES generated id if not specified
refresh – If true then refresh the affected shards to make this operation visible to search
ignore - will not raise error if status code is specified (ex. 404, [400, 404])
"""
try:
result = self.es.index(**kwargs)
return result
except RequestError as e:
self.logger.exception(e.info) if self.logger else print(e.info)
raise e
except (ElasticsearchException, Exception) as e:
self.logger.exception(e) if self.logger else print(e)
raise e
def get_by_id(self, **kwargs):
"""
retrieving document from Elasticsearch based on _id
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.get
index (required) – A comma-separated list of index names
allow_no_indices – Ignore if a wildcard expression resolves to no concrete indices (default: false)
expand_wildcards – Whether wildcard expressions should get expanded to open or closed indices
(default: open) Valid choices: open, closed, hidden, none, all Default: open
ignore - will not raise error if status code is specified (ex. 404, [400, 404])
"""
try:
data = self.es.get(**kwargs)
return data
except NotFoundError as e:
self.logger.error(e) if self.logger else print(e)
raise e
except (ElasticsearchException, Exception) as e:
self.logger.exception(e) if self.logger else print(e)
raise e
def search_by_id(self, **kwargs):
"""
similar to get_by_id, but this uses the _search API and can be used on aliases w/ multiple indices
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
@param kwargs:
- _id: Str, ES document id
- index: Str, ES index or alias
- return_all: Bool, if there are more than one records returned
List[Dict] if True, else returns Dict of the latest record
@return: Dict or List[Dict]
"""
_id = kwargs.pop("id", None)
if not _id:
raise RuntimeError("_id key argument must be supplied")
index = kwargs.get("index", None)
if not index:
raise RuntimeError("index key argument must be supplied")
ignore = kwargs.get("ignore", None)
kwargs["sort"] = kwargs.get("sort", "@timestamp:desc")
return_all = kwargs.pop("return_all", False)
kwargs["body"] = {
"query": {
"bool": {
"must": [
{"term": {"_id": _id}}
]
}
}
}
docs = self.es.search(**kwargs)
hits = docs["hits"]["hits"]
if len(hits) == 0:
if (type(ignore) is list and 404 in ignore) or (type(ignore) is int and ignore == 404):
not_found_doc = {
"_index": index,
"_type": "_doc",
"_id": _id,
"found": False
}
return [not_found_doc] if return_all is True else not_found_doc
err = f"{_id} not found on index/alias {index}"
self.logger.error(err) if self.logger else print(err)
raise NotFoundError(404, err)
for hit in hits:
hit["found"] = True # adding "found" to match get_by_id
return hits if return_all is True else hits[0]
def _pit(self, **kwargs):
"""
using the PIT (point-in-time) + search_after API to do deep pagination
https://www.elastic.co/guide/en/elasticsearch/reference/7.10/point-in-time-api.html
https://www.elastic.co/guide/en/elasticsearch/reference/7.10/paginate-search-results.html#search-after
:param kwargs: please see the docstrings for the "search" method below
* index is required when using the search_after API
:return: List[any]
"""
keep_alive = "2m"
body = kwargs.pop("body", {})
index = kwargs.pop("index", body.pop("index", None))
if index is None:
raise RuntimeError("ElasticsearchUtility._pit: the search_after API must specify a index/alias")
pit = self.es.open_point_in_time(index=index, keep_alive=keep_alive)
size = kwargs.get("size", body.get("size", 1000))
if not size:
kwargs["size"] = size
sort = kwargs.get("sort", body.get("sort", []))
if not sort:
body["sort"] = [{"@timestamp": "desc"}, {"id.keyword": "asc"}]
body = {
**body,
**{"pit": {**pit, **{"keep_alive": keep_alive}}},
}
res = self.es.search(body=body, **kwargs)
records = []
while True:
if len(res["hits"]["hits"]) == 0:
break
records.extend(res["hits"]["hits"])
last_record = res["hits"]["hits"][-1]
body["search_after"] = last_record["sort"]
res = self.es.search(body=body, **kwargs)
self.es.close_point_in_time(body=pit)
return records
def _scroll(self, **kwargs):
if "size" not in kwargs:
kwargs["size"] = 1000
if "scroll" not in kwargs:
kwargs["scroll"] = "2m"
scroll = kwargs["scroll"] # re-use in each subsequent scroll
page = self.es.search(**kwargs)
sid = page["_scroll_id"]
scroll_id = sid
documents = page["hits"]["hits"]
page_size = page["hits"]["total"]["value"]
if page_size <= len(documents): # avoid scrolling if we get all data in initial query
self.es.clear_scroll(scroll_id=scroll_id, ignore=[404])
return documents
while page_size > 0:
page = self.es.scroll(scroll_id=sid, scroll=scroll)
scroll_documents = page["hits"]["hits"]
sid = page["_scroll_id"]
if sid != scroll_id:
self.es.clear_scroll(scroll_id=scroll_id, ignore=[404])
scroll_id = sid
page_size = len(scroll_documents)
documents.extend(scroll_documents)
self.es.clear_scroll(scroll_id=scroll_id, ignore=[404]) # clear the last scroll id (if possible)
return documents
def query(self, **kwargs):
"""
returns all records returned from a query, through the scroll API
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
body – The search definition using the Query DSL
index – (required) A comma-separated list of index names to search (or aliases)
_source – True or false to return the _source field or not, or a list of fields to return
_source_excludes – A list of fields to exclude from the returned _source field
_source_includes – A list of fields to extract and return from the _source field
q – Query in the Lucene query string syntax
scroll – Specify how long a consistent view of the index should be maintained for scrolled search
size – Number of hits to return (default: 10)
sort – A comma-separated list of <field>:<direction> pairs
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.clear_scroll
body – A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter
scroll_id – A comma-separated list of scroll IDs to clear
"""
page_limit = 10000
if "size" not in kwargs and "size" not in kwargs.get("body", {}):
kwargs["size"] = 1000
else:
kwargs["size"] = kwargs.get("size") or kwargs.get("body", {}).get("size", 1000)
scroll = kwargs.pop("scroll", "2m")
data = self.es.search(**kwargs)
total = data["hits"]["total"]["value"]
if total >= page_limit:
if self.version is None:
self.set_es_version()
if self.version >= version.parse("7.10"):
return self._pit(**kwargs)
else:
kwargs["scroll"] = scroll
return self._scroll(**kwargs)
else:
page_size = kwargs["size"]
documents = data["hits"]["hits"]
kwargs["from_"] = 0
while page_size > 0:
kwargs["from_"] += page_size # shift offset afterwards
if kwargs["from_"] + kwargs["size"] >= page_limit:
kwargs["size"] = page_limit - kwargs["from_"]
data = self.es.search(**kwargs)
rows = data["hits"]["hits"]
page_size = len(rows)
documents.extend(rows)
return documents
def search(self, **kwargs):
"""
similar to query method but does not scroll, used if user doesnt want to scroll
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
index – (required) A comma-separated list of index names to search (or aliases)
body – The search definition using the Query DSL
_source – True or false to return the _source field or not, or a list of fields to return
q – Query in the Lucene query string syntax
scroll – Specify how long a consistent view of the index should be maintained for scrolled search
size – Number of hits to return (default: 10)
sort – A comma-separated list of <field>:<direction> pairs
"""
try:
if self.logger:
self.logger.info("search **kwargs: {}".format(dict(**kwargs)))
result = self.es.search(**kwargs)
return result
except RequestError as e:
self.logger.exception(e) if self.logger else print(e)
raise e
except (ElasticsearchException, Exception) as e:
self.logger.exception(e) if self.logger else print(e)
raise e
def get_count(self, **kwargs):
"""
returning the count for a given query (warning: ES7 returns max of 10000)
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.count
body – A query to restrict the results specified with the Query DSL (optional)
index – (required) A comma-separated list of indices to restrict the results
q – Query in the Lucene query string syntax
ignore - will not raise error if status code is specified (ex. 404, [400, 404])
"""
try:
result = self.es.count(**kwargs)
return result["count"]
except (ElasticsearchException, Exception) as e:
self.logger.exception(e) if self.logger else print(e)
raise e
def delete_by_id(self, **kwargs):
"""
Removes a document from the index
https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html
index – (required) The name of the index
id – The document ID
refresh – If true then refresh the affected shards to make this operation visible to search
ignore - will not raise error if status code is specified (ex. 404, [400, 404])
"""
try:
if self.logger:
self.logger.info("query **kwargs: {}".format(dict(**kwargs)))
result = self.es.delete(**kwargs)
return result
except NotFoundError as e:
self.logger.exception(e) if self.logger else print(e)
raise e
except (ElasticsearchException, Exception) as e:
self.logger.exception(e) if self.logger else print(e)
raise e
def update_document(self, **kwargs):
"""
updates Elasticsearch document using the update API
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.update
index – (required) The name of the index
id – Document ID
body – The request definition requires either script or partial doc:
ex. {
"doc_as_upsert": true,
"doc": <ES document>
}
_source – True or false to return the _source field or not, or a list of fields to return
refresh – If true then refresh the affected shards to make this operation visible to search
ignore - will not raise error if status code is specified (ex. 404, [400, 404])
"""
try:
if self.logger:
self.logger.info("update_document **kwargs".format(dict(**kwargs)))
result = self.es.update(**kwargs)
return result
except RequestError as e:
self.logger.exception(e) if self.logger else print(e)
raise e
except (ElasticsearchException, Exception) as e:
self.logger.exception(e) if self.logger else print(e)
raise e
# TODO: remove all code that uses this function
def get_es_scrolled_data(es_url, index, query):
es = elasticsearch.Elasticsearch([es_url])
documents = []
page = es.search(index=index, scroll="2m", size=100, body=query)
sid = page["_scroll_id"]
documents.extend(page["hits"]["hits"])
page_size = page["hits"]["total"]["value"]
# Start scrolling
while page_size > 0:
page = es.scroll(scroll_id=sid, scroll="2m")
# Update the scroll ID
sid = page["_scroll_id"]
scroll_document = page["hits"]["hits"]
# Get the number of results that we returned in the last scroll
page_size = len(scroll_document)
documents.extend(scroll_document)
return documents
| hysds/hysds_commons | hysds_commons/elasticsearch_utils.py | elasticsearch_utils.py | py | 15,234 | python | en | code | 1 | github-code | 13 |
22477371223 | import tensorflow as tf
import streamlit as st
from module import preprocessing
from sklearn.model_selection import train_test_split
from transformers import AutoTokenizer, TFBertModel
from keras.layers import Dense, Input, GlobalMaxPool1D, Dropout
from keras.models import Model
from keras.optimizers import Adam
from keras.losses import CategoricalCrossentropy
from keras.metrics import CategoricalAccuracy
import numpy as np
@st.cache_resource
def create_model():
df = preprocessing.get_all_data()
train_data, test_data = train_test_split(
df, test_size=0.3, random_state=42, shuffle=True, stratify=df.sentiment)
model_name = 'bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(model_name)
bert_model = TFBertModel.from_pretrained(model_name)
max_len = 40
X_train = tokenizer(text=train_data.text.tolist(),
add_special_tokens=True,
return_tensors='tf',
max_length=max_len,
padding=True,
truncation=True,
return_token_type_ids=False,
return_attention_mask=True,
verbose=True
)
X_test = tokenizer(text=test_data.text.tolist(),
add_special_tokens=True,
return_tensors='tf',
max_length=max_len,
padding=True,
truncation=True,
return_token_type_ids=False,
return_attention_mask=True,
verbose=True
)
input_ids = Input(shape=(max_len,), name='input_ids', dtype=tf.int32)
attention_mask = Input(
shape=(max_len,), name='attention_mask', dtype=tf.int32)
embeddings = bert_model(input_ids, attention_mask=attention_mask)[0]
output = GlobalMaxPool1D()(embeddings)
output = Dense(units=128, activation='relu')(output)
output = Dropout(0.1)(output)
output = Dense(units=64, activation='relu')(output)
output = Dense(units=32, activation='relu')(output)
y = Dense(units=6, activation='softmax')(output)
model = Model(inputs=[input_ids, attention_mask], outputs=y)
model.compile(loss=CategoricalCrossentropy(from_logits=True),
optimizer=Adam(learning_rate=5e-5,
epsilon=1e-8, clipnorm=1.0),
metrics=CategoricalAccuracy('balanced_accuracy'))
return model
@st.cache_resource
def load_model():
model_name = 'bert-base-uncased'
bert_model = TFBertModel.from_pretrained(model_name)
max_len = 40
input_ids = Input(shape=(max_len,), name='input_ids', dtype=tf.int32)
attention_mask = Input(
shape=(max_len,), name='attention_mask', dtype=tf.int32)
embeddings = bert_model(input_ids, attention_mask=attention_mask)[0]
output = GlobalMaxPool1D()(embeddings)
output = Dense(units=128, activation='relu')(output)
output = Dropout(0.1)(output)
output = Dense(units=64, activation='relu')(output)
output = Dense(units=32, activation='relu')(output)
y = Dense(units=6, activation='softmax')(output)
model = Model(inputs=[input_ids, attention_mask], outputs=y)
model.load_weights('models/ed-bert.h5')
return model
@st.cache_data
def predict(text, kind=0):
id_to_label = {0: 'anger', 1: 'fear', 2: 'joy',
3: 'love', 4: 'sadness', 5: 'surprise'}
model_name = 'bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(model_name)
if kind == 1:
model = create_model()
else:
model = load_model()
x_val = tokenizer(
text=text,
add_special_tokens=True,
max_length=40,
truncation=True,
padding='max_length',
return_tensors='tf',
return_token_type_ids=False,
return_attention_mask=True,
verbose=True
)
validation = model.predict(
{'input_ids': x_val['input_ids'], 'attention_mask': x_val['attention_mask']})*100
return id_to_label[np.argmax(validation)]
| putuwaw/ed-bert | module/bert.py | bert.py | py | 4,155 | python | en | code | 0 | github-code | 13 |
10894962904 | class Cup:
def __init__(self, type, value, number, nextCup, captureCup):
self.type = type
self.value = value
self.nextCup = nextCup
self.captureCup = captureCup
self.number = number
def Sow(self):
self.value = self.value + 1
def Harvest(self):
self.value = 0
def Capture(self, num):
self.value = self.value + num
def __str__(self):
return f'Cup {self.number}: {self.value} seeds'
class KalahGame:
def __init__(self, state = None):
if state is None:
self.state = {
'board': self.GenerateInitialBoardState(),
'possibleMoves': [1, 2, 3, 4],
'score': (0, 0)
}
else:
self.state = {
'board': self.GenerateSpecificBoardState(state['board']),
'possibleMoves': state['possibleMoves'],
'score': state['score']
}
self.end = False
def GenerateSpecificBoardState(self, board):
cups = []
for cup in board:
cups.append(Cup(type=cup.type,
value=cup.value,
number=cup.number,
nextCup=cup.nextCup,
captureCup=cup.captureCup))
return cups
def GenerateInitialBoardState(self):
cups = []
type = 1
for i in range(1,11):
if i % 5 == 0:
type = 0
cups.append(Cup(type=type,
value=4*type,
number=i,
nextCup=(i+1)%10,
captureCup=10-i))
type = 1
return cups
def MakeMove(self, move):
# If the move is currently a possible move
if move in self.state['possibleMoves']:
seeds = self.state['board'][move-1].value
currentCup = move
nextCup = self.state['board'][move-1].nextCup
self.state['board'][move-1].Harvest()
repeat = False
# While there are still seeds to drop
while seeds > 0:
# If next cup to drop in would be the opponents scoring cup, skip
if move < 5 and nextCup == 10:
nextCup = 1
elif move > 5 and nextCup == 5:
nextCup = 6
else:
# Else, drop a seed into the next cup
# Set current cup to the cup that was just dropped in
# Get the next cup
self.state['board'][nextCup - 1].Sow()
currentCup = nextCup
nextCup = self.state['board'][nextCup - 1].nextCup
seeds = seeds - 1
# If the last cup dropped was a scoring cup, get an additional move
if currentCup % 5 == 0:
repeat = True
elif self.state['board'][currentCup-1].value == 1:
# Else if the last cup dropped was empty before dropping
# Capture that cup and the opposite cup
total = 1 + self.state['board'][10 - currentCup-1].value
self.state['board'][currentCup-1].Harvest()
self.state['board'][10-currentCup-1].Harvest()
if move < 5:
self.state['board'][4].Capture(total)
else:
self.state['board'][9].Capture(total)
# Setting the score
self.state['score'] = (self.state['board'][4].value, self.state['board'][9].value)
# Determining possible moves
possibleMoves = []
if repeat:
# If gaining another consecutive move, check moves of the same parity
if move < 5:
for i in range(4):
if self.state['board'][i].value > 0:
possibleMoves.append(i+1)
else:
for i in range(5, 9):
if self.state['board'][i].value > 0:
possibleMoves.append(i+1)
else:
# Else check moves of opposite parity
if move < 5:
for i in range(5, 9):
if self.state['board'][i].value > 0:
possibleMoves.append(i+1)
else:
for i in range(4):
if self.state['board'][i].value > 0:
possibleMoves.append(i+1)
self.state['possibleMoves'] = possibleMoves
# If there are no possible moves, set the end flag
if len(possibleMoves) == 0:
self.end = True
def __str__(self):
str = 'Game Board: \n'
for i in range(8, 4, -1):
str = str + f'{self.state["board"][i].value} '
str = str + '\n'
for i in range(4):
str = str + f'{self.state["board"][i].value} '
str = str + '\n'
str = str + f'Possible Moves: {self.state["possibleMoves"]}\n'
str = str + f'Score: P1 {self.state["score"][0]} P2 {self.state["score"][1]}'
return str
| NicholasACTran/Kalah-MiniMax | src/kalah.py | kalah.py | py | 5,286 | python | en | code | 0 | github-code | 13 |
73669065616 | from cpy.symbolic.sym_utils import vec_subs
import sympy
def rk4(dynamics):
q = dynamics['state']
qdot = dynamics['dynamics']
h = sympy.Symbol('h')
h_half = sympy.Rational(1, 2) * h
h_sixth = sympy.Rational(1, 6) * h
k1 = qdot
k2 = vec_subs(qdot, q, q + (h_half * k1))
k3 = vec_subs(qdot, q, q + (h_half * k2))
k4 = vec_subs(qdot, q, q + (h * k3))
qn = q + (h_sixth * h) * (k1 + (2 * k2) + (2 * k3) + k4)
return qn
| jpanikulam/cpy | cpy/optimization/dynamics.py | dynamics.py | py | 465 | python | en | code | 4 | github-code | 13 |
9039212600 | import logging
from dialogue_system import bml
from .tcp_sender import TCPSender
logger = logging.getLogger().getChild(__name__)
class UnityBody:
def __init__(self, character_name='ChrBrad'):
self._conn = None
self._character_name = character_name
def __enter__(self):
self._conn = TCPSender()
self._conn.init_network()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._conn.finit_network()
def execute(self, bml_list):
logger.debug('Executing BML commands:\n%s', bml_list)
xml = bml.to_xml_clean(bml_list)
logger.debug('Turning into XML:\n%s', xml)
return self._conn.send_msg(xml)
def check_done(self, id):
logger.debug('MSG ID: %s', id)
done = False
while done == False:
try:
if self._conn.check_msg_done(id):
done = True
except:
done = True
def execute_and_check(self, bml_list):
logger.debug('Execute and Check in UnityBody')
return self.check_done(self.execute(bml_list))
#return self.execute(bml_list) | onyalcin/echo_bot | dialogue_system/smart_body/unity_body.py | unity_body.py | py | 1,159 | python | en | code | 3 | github-code | 13 |
10135613377 | from django.db import models
from django.contrib.auth.models import User
class Article(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
image = models.ImageField(null=True)
created = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
sub_heading = models.CharField(max_length=200)
publish_date = models.DateField(auto_now=True)
is_currentuser = models.BooleanField(default=False)
def __str__(self):
return self.title
class Reply(models.Model):
timestamp = models.DateTimeField(auto_now_add=True)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
reply = models.TextField()
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
| aalkhulaifi/Beautiful-designed-blog | main/models.py | models.py | py | 835 | python | en | code | 0 | github-code | 13 |
24129383418 | import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession
from redis.asyncio import ConnectionPool
from starlette import status
from car_rental_service.db.dao.reservation_dao import ReservationDAO
from car_rental_service.tests.payloads import CREATE_RESERVATION_PAYLOAD
@pytest.mark.anyio
async def test_health(client: AsyncClient, fastapi_app: FastAPI) -> None:
"""
Checks the health endpoint.
:param client: client for the app.
:param fastapi_app: current FastAPI application.
"""
url = fastapi_app.url_path_for("health_check")
response = await client.get(url)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.anyio
async def test_create_reservations(
fastapi_app: FastAPI,
client: AsyncClient,
dbsession: AsyncSession,
fake_redis_pool: ConnectionPool,
) -> None:
url = fastapi_app.url_path_for("create_reservation")
response = await client.post(url, data=CREATE_RESERVATION_PAYLOAD.json())
assert response.status_code == status.HTTP_204_NO_CONTENT
@pytest.mark.anyio
async def test_get_reservations(
fastapi_app: FastAPI,
client: AsyncClient,
dbsession: AsyncSession,
fake_redis_pool: ConnectionPool,
) -> None:
dao = ReservationDAO(dbsession, fake_redis_pool)
await dao.create_reservation(CREATE_RESERVATION_PAYLOAD)
url = fastapi_app.url_path_for("get_reservations")
response = await client.get(url)
reservations = response.json()
assert response.status_code == status.HTTP_200_OK
assert len(reservations) >= 1
| d3prof3t/car-rental-service | car_rental_service/tests/test_reservation.py | test_reservation.py | py | 1,606 | python | en | code | 0 | github-code | 13 |
42834718163 | from clients.Client import Client
from getpass import getpass
from datetime import date
import json
#def cliente2():
if __name__ == "__main__":
print("Cliente Extender Arriendo")
keep_alive = True
try:
while(keep_alive):
#fecha = input("Ingrese la nueva fecha de arriendo: ")
print("Ingrese fecha de entrega\n")
dia = input("ingrese dia: ")
mes = input("Ingrese mes: ")
año = input ("ingrese año: ")
id_prestamo = input("Ingrese el id del prestamo que se debe modificar: ")
#fecha = date(int(año),int(mes),int(dia))
try:
climsg = {
#"fecha": fecha,
"dia": dia,
"mes": mes,
"año": año,
"id_prestamo": id_prestamo,
}
a = Client("EXARR")
msg = a.exec_client(debug=True, climsg=json.dumps(climsg))
print("###################################\n\n", msg, "\n\n###################################")
except Exception as e:
print("Error: ", e)
except KeyboardInterrupt:
print("\nCerrando cliente, hasta pronto ....")
keep_alive = False
exit() | wolfzart/arquiSoftware | disfraz/backend/cliente2.py | cliente2.py | py | 1,343 | python | es | code | 0 | github-code | 13 |
73393198738 | #! /usr/bin/env python3
from bs4 import BeautifulSoup
import jieba
from urllib.parse import urlparse, ParseResult
from typing import List, Dict
import config
class WebPageAnalyzer(object):
def __init__(self, url:str, html:str) -> None:
self.__url:str = url
self.__html:str = html.replace('\0', '')
self.__bs = BeautifulSoup(self.__html, "lxml")
def url(self) -> str:
return self.__url
def title(self) -> str:
if self.__bs.title is not None:
return self.__bs.title.getText().strip()
def text(self) -> str:
return self.__bs.getText()
def content(self) -> str:
return self.__html
def keywords(self) -> List[str]:
words = jieba.cut_for_search(self.text())
return [word.lower() for word in words if len(word) < config.sql_index_field_length_limit and word.strip() != "" and word not in "`~!@#$%^&*()_+-={[]}\\|\"':;,.?/,。、·"]
def keyword_scores(self) -> Dict[str, float]:
keywords:List[str] = self.keywords()
scores:Dict[str, float] = {}
for keyword in keywords:
if keyword not in scores:
scores[keyword] = 1
else:
scores[keyword] += 1
return {key:scores[key]/len(scores) for key in scores}
def urls(self) -> List[str]:
refer_url:ParseResult = urlparse(self.__url)
doc = BeautifulSoup(self.__html, "lxml")
anchors = doc.find_all('a')
urls:Set[str] = set()
for anchor in anchors:
try:
url:ParseResult = urlparse(anchor.get("href"))
except Exception as e:
logging.warning(e)
continue
if url.scheme == "":
url = url._replace(scheme=refer_url.scheme)
if url.netloc == "":
url = url._replace(netloc=refer_url.netloc)
urls.add(str(url.geturl()))
return urls
if __name__ == "__main__":
web_page_analyzer = WebPageAnalyzer(url="", html="<html><head><title> 葡萄Grape </title></head><body>吃葡萄不吐葡萄皮,不吃葡萄倒吐葡萄皮</body></html>")
print(web_page_analyzer.keyword_scores()) | hubenchang0515/Phosphophyllite | analyzer.py | analyzer.py | py | 2,211 | python | en | code | 2 | github-code | 13 |
38568719882 | import imp
from urllib.parse import urlencode
from urllib import parse
from enum import Enum, unique
import requests
import os,sys
import time
# todo: 可以改成自动获取API_KEY
API_KEY = "your api key" # 替换成你的API_KEY
# todo: 可以改成自己的保存路径
DOWNLOAD_FILE_PATH = '/Users/xxx/Desktop/pixabayDownload/' # 替换成你的保存路径
class Spider():
@unique
class DownloadType(Enum):
All = 0 # 默认下载全部
Video = 1
Photo = 2
def __init__(self):
self.keyword = input('欢迎使用pixabay视频搜索下载器\n请输入搜索关键词(推荐输入英文):')
self.downloadType = input("下载类型:0全部(默认),1:视频,2:图片)\n")
self.p = 1080 #默认1080p
self.videoPage = 1
self.picPage = 1
self.perPage = 20
self.picURL = "https://pixabay.com/api/?key="+API_KEY+"&q="+parse.quote(self.keyword)+"&pretty=true"+"&page="+str(self.picPage)+"&per_page="+str(self.perPage)
self.videoURL = "https://pixabay.com/api/videos/?key="+API_KEY+"&q="+parse.quote(self.keyword)+"&pretty=true"+"&page="+str(self.videoPage)+"&per_page="+str(self.perPage)
self.path = DOWNLOAD_FILE_PATH
self.searchedKeywordsDirPath = ""
# 是否存在
self.isExists = os.path.exists(self.path)
# 判断结果
if not self.isExists:
# 如果不存在则创建目录
os.makedirs(self.path)
else:
# 如果目录存在则不创建,并提示目录已存在
pass
def getSource(self, url):
results = requests.get(url).json()
return results
def downloadResource(self,type):
if not os.path.exists(self.searchedKeywordsDirPath):
os.makedirs(self.searchedKeywordsDirPath)
if type == self.DownloadType.Photo:
self.downloadPhoto()
elif type == self.DownloadType.Video:
self.downloadVideo()
else:
self.downloadVideo()
self.downloadPhoto()
def downloadPhoto(self):
while True:
self.picURL = "https://pixabay.com/api/?key="+API_KEY+"&q="+parse.quote(self.keyword)+"&pretty=true"+"&page="+str(self.picPage)+"&per_page="+str(self.perPage)
results = spider.getSource(self.picURL)
print("找到了"+str(len(results['hits']))+"个图片")
# 下载图片
if results['totalHits'] == 0:
print("没有找到图片")
break
print("正在下载第"+str(self.picPage)+"页")
for picBlock in results["hits"]:
downloadPicUrl = ""
try:
downloadPicUrl = picBlock["fullHDURL"]
except:
print("没有1080p图片,换为其他大图")
downloadPicUrl = picBlock["largeImageURL"]
tags = picBlock["tags"]
picName = tags+"_"+str(picBlock["imageHeight"])+"p.jpg"
print("正在下载:"+picName)
downloadPic = requests.get(downloadPicUrl)
filePath = self.searchedKeywordsDirPath+"/"+picName
# 保存文件 如果文件已存在则改名称
if os.path.exists(filePath):
filePath = self.searchedKeywordsDirPath+"/"+picName+"_"+str(time.time())+".jpg"
with open(filePath, 'wb') as f:
f.write(downloadPic.content)
print("下载完成:" + picName)
time.sleep(1)
self.picPage += 1
if len(results["hits"]) < self.perPage:
print("已到达最后一页")
break
self.picPage = 1
def downloadVideo(self):
while True:
self.videoURL = "https://pixabay.com/api/videos/?key="+API_KEY+"&q="+parse.quote(self.keyword)+"&pretty=true"+"&page="+str(self.videoPage)+"&per_page="+str(self.perPage)
results = self.getSource(self.videoURL)
print("找到了"+str(len(results['hits']))+"个视频")
if results['totalHits'] == 0:
print("没有找到视频")
break
print("正在下载第"+str(self.videoPage)+"页")
# 下载视频
for videoBlock in results["hits"]:
videos = videoBlock["videos"]
tags = videoBlock["tags"]
for video,value in videos.items():
if value["height"] <= spider.p and value["height"] > 720 or value["height"] == "1080":
videoURL = value["url"]
videoName = tags+"_"+str(value["height"])+"p.mp4"
print("正在下载:"+videoName)
downloadVideo = requests.get(videoURL)
filePath = self.searchedKeywordsDirPath+"/"+videoName
# 保存文件 如果文件已存在则改名称
if os.path.exists(filePath):
filePath = self.searchedKeywordsDirPath+"/"+videoName+"_"+str(time.time())+".mp4"
with open(filePath, 'wb') as f:
f.write(downloadVideo.content)
print("下载完成:" + videoName)
time.sleep(1)
self.videoPage += 1
if len(results["hits"]) < self.perPage:
print("已到达最后一页")
break
self.videoPage = 1
if __name__ == '__main__':
spider = Spider()
# 搜索视频的对应文件夹路径
spider.searchedKeywordsDirPath = spider.path+spider.keyword
spider.downloadResource(spider.downloadType)
| zeku2022/pixbayDownloader | pixabayDownloader.py | pixabayDownloader.py | py | 5,763 | python | en | code | 0 | github-code | 13 |
10968510003 | import requests
import io
import pandas as pd
import math
# url = "https://ldlink.nci.nih.gov/LDlinkRest/ldmatrix"
# rs = [
# "rs12980275",
# "rs8109886",
# "rs4803222",
# "rs111531283",
# "rs8099917",
# "rs7248668",
# "rs35963157",
# "rs955155",
# "rs8101517",
# "rs6508852",
# ]
# data = {"pop": "CEU", "r2_d": "d", "token": "9e88a9311435", "snps": "\n".join(rs)}
# r = requests.get(url, params=data)
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from PySide2.QtGui import *
import sys
class HaploWidget(QWidget):
"""docstring for ClassName"""
def __init__(self, parent=None):
super().__init__(parent)
# content = b"""
# RS_number\trs148890987\trs3\trs4
# rs148890987\t1.0\t0.707\t0.707
# rs3\t0.707\t1.0\t1.0
# rs4\t0.707\t1.0\t1.0
# """
# f = io.StringIO(content.decode("utf-8"))
# self.df = pd.read_csv(f, sep="\t", index_col=0)
self.size = 30
self.mouse = None
self.load()
def load(self):
url = "https://ldlink.nci.nih.gov/LDlinkRest/ldmatrix"
rs = [
"rs12980275",
"rs8109886",
"rs4803222",
"rs111531283",
"rs8099917",
"rs7248668",
"rs35963157",
"rs955155",
"rs8101517",
"rs6508852",
]
data = {
"pop": "CEU",
"r2_d": "d",
"token": "FILL THE TOKEN",
"snps": "\n".join(rs),
}
r = requests.get(url, params=data)
f = io.StringIO(r.content.decode("utf-8"))
self.df = pd.read_csv(f, sep="\t", index_col=0)
# self.df = pd.read_csv("fake.csv")
def paintEvent(self, event):
painter = QPainter(self)
painter.setBrush(QBrush(QColor("white")))
painter.drawRect(self.rect())
painter.setRenderHint(QPainter.HighQualityAntialiasing)
item_count = len(self.df)
bounding_rect = QRect(0, 0, item_count * self.size, item_count * self.size)
transform = QTransform()
transform.translate(self.rect().center().x(), self.rect().center().y() + 100)
transform.rotate(-135)
# transform.reset()
painter.save()
painter.setTransform(transform)
if self.mouse:
mouse = transform.inverted()[0].map(self.mouse)
# painter.drawRect(bounding_rect)
for j in range(0, item_count):
for i in range(0, item_count - j):
rect = QRect(0, 0, self.size, self.size)
x = i * self.size
y = j * self.size
rect.moveLeft(x)
rect.moveTop(y)
color = "red"
if self.mouse:
if rect.contains(mouse):
color = "blue"
painter.setBrush(QBrush(QColor(color)))
painter.setPen(QPen(QColor("lightgray")))
painter.drawRect(rect)
painter.restore()
painter.setPen(QPen(QColor("black")))
y = -self.size * math.sqrt(2)
for name in self.df.columns:
painter.save()
painter.translate(self.rect().center().x(), 20)
painter.rotate(90)
painter.drawText(0, y, name)
painter.restore()
y += self.size * math.sqrt(2)
def mousePressEvent(self, event):
self.mouse = event.pos()
self.update()
if __name__ == "__main__":
app = QApplication(sys.argv)
w = HaploWidget()
w.show()
app.exec_()
| labsquare/cutevariant | poc/ldmatrix.py | ldmatrix.py | py | 3,650 | python | en | code | 86 | github-code | 13 |
15596784123 | from ..common import Proxy
from ..common import Utils
from ..common import Model
from ..common import gateway
from copy import copy
class DistributedModel(Model):
def __init__(self, ignite, reader, parser, instances=1, max_per_node=1):
"""Constructs a new instance of distributed model.
Parameters
----------
ignite : Ignite instance.
reader : Model reader.
parser : Model parser.
mdl : Model.
instances : Number of worker instances.
max_per_node : Max number of worker per node.
"""
super(DistributedModel, self).__init__(None, False)
self.ignite = ignite
self.reader = reader
self.parser = parser
self.instances = instances
self.max_per_node = max_per_node
def __enter__(self):
self.proxy = [gateway.jvm.org.apache.ignite.ml.inference.builder.IgniteDistributedModelBuilder(
self.ignite.ignite,
self.instances,
self.max_per_node
).build(r, self.parser) for r in self.reader]
return self
def __exit__(self, t, v, trace):
if self.proxy is not None:
for p in self.proxy:
p.close()
self.proxy = None
return False
class XGBoostDistributedModel(DistributedModel):
def __init__(self, ignite, mdl, instances=1, max_per_node=1):
reader = [gateway.jvm.org.apache.ignite.ml.inference.reader.FileSystemModelReader(mdl)]
parser = gateway.jvm.org.apache.ignite.ml.xgboost.parser.XGModelParser()
super(XGBoostDistributedModel, self).__init__(ignite, reader, parser, instances, max_per_node)
def predict(self, X):
keys = gateway.jvm.java.util.HashMap()
data = []
idx = 0
for key in X:
keys[key] = idx
idx = idx + 1
data.append(X[key])
java_array = Utils.to_java_double_array(data)
java_vector_utils = gateway.jvm.org.apache.ignite.ml.math.primitives.vector.VectorUtils
X = gateway.jvm.org.apache.ignite.ml.math.primitives.vector.impl.DelegatingNamedVector(java_vector_utils.of(java_array), keys)
res = self.proxy[0].predict(X)
# This if handles 'future' response.
if hasattr(res, 'get') and callable(res.get):
res = res.get()
return res
class IgniteDistributedModel(DistributedModel):
"""Ignite distributed model.
Parameters
----------
ignite : Ignite instance.
mdl : Model.
instances : Number of instances.
max_per_node : Max number of instance per node.
"""
def __init__(self, ignite, mdl, instances=1, max_per_node=1):
"""Constructs a new instance of Ignite distributed model.
Parameters
----------
ignite : Ignite instance.
reader : Model reader.
parser : Model parser.
instances : Number of worker instances.
max_per_node : Max number of worker instances per ignite node.
"""
if isinstance(mdl.proxy, list):
reader = [gateway.jvm.org.apache.ignite.ml.inference.reader.InMemoryModelReader(p) for p in mdl.proxy]
else:
reader = [gateway.jvm.org.apache.ignite.ml.inference.reader.InMemoryModelReader(mdl.proxy)]
parser = gateway.jvm.org.apache.ignite.ml.inference.parser.IgniteModelParser()
super(IgniteDistributedModel, self).__init__(ignite, reader, parser, instances, max_per_node)
| gridgain/ml-python-api | python/ggml/inference/__init__.py | __init__.py | py | 3,485 | python | en | code | 6 | github-code | 13 |
14812885948 | from flask_app.config.mysqlconnection import query_db
from flask import Flask, flash, session
from flask_app.models import product
app = Flask(__name__)
class Arrangement:
def __init__(self, data):
self.id = data['id']
self.size = data['size']
self.price = data['price']
self.inventory = data['inventory']
self.sale_price = data['sale_price']
self.product_id = data['product_id']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
def __eq__(self, other):
return self.id == other.id
@property
def image(self):
query = f"SELECT * FROM images WHERE arrangement_id = {self.id};"
results = query_db(query)
image1 = results[0]
image = image1['image']
return image
@property
def product(self):
data = {
'id': self.product_id
}
return product.Product.select(data=data)
@classmethod
def select(cls, type='id', data=None):
if data:
query = f"SELECT * FROM arrangements WHERE arrangements.{type} = %({type})s;"
results = query_db(query, data)
arrangements = []
for arrangement in results:
arrangements.append(cls(arrangement))
return arrangements
else:
query = "SELECT * FROM arrangements;"
results = query_db(query)
arrangements = []
for arrangement in results:
arrangements.append(cls(arrangement))
return arrangements
@classmethod
def select_one(cls, data):
query = "SELECT * FROM arrangements WHERE arrangements.id = %(id)s;"
results = query_db(query, data)
arrangement = cls(results[0])
return arrangement
@classmethod
def select_arrangement_from_product(cls, data):
query = "SELECT * FROM arrangements JOIN products on arrangements.product_id = products.id WHERE arrangements.size = 'Deluxe' AND products.name = %(name)s"
results = query_db(query, data)
if results:
arrangement = cls(results[0])
return arrangement
else:
return False
@classmethod
def create_arrangement(cls, data):
query = "INSERT INTO arrangements (size, price, inventory, sale_price, product_id) VALUES (%(size)s, %(price)s, %(inventory)s, %(sale_price)s, %(product_id)s);"
results = query_db(query, data)
return results
@classmethod
def edit_arrangement(cls, data):
query = "UPDATE arrangements SET size = %(size)s, price = %(price)s, inventory = %(inventory, sale_price, product_id)s WHERE arrangements.id = %(id)s;"
results = query_db(query, data)
return results
@classmethod
def delete_arrangement(cls, data):
query = "DELETE FROM arrangements WHERE arrangements.id = %(id)s;"
return query_db(query, data) | Sal-Nunez/marketplace_schema | flask_app/models/arrangement.py | arrangement.py | py | 2,960 | python | en | code | 1 | github-code | 13 |
3107988376 |
"""
Write a program that asks the user to enter the width and length of a room.
Once these values have been read, your program should compute and display the area of the room.
The length and the width will be entered as floating-point numbers.
Include units in your prompt and output message; either feet or meters,
depending on which unit you are more comfortable working with.
"""
# Acquisition of DATA entered by the USER
width = float(input("Enter the WIDTH of your room (meters): "))
length = float(input("Enter also the LENGTH (meters): "))
# Computing the ROOM AREA (square meters)
area = width * length
# Displaying the ROOM AREA
print(f'The AREA of your ROOM is {area} square meters.')
| aleattene/python-workbook | chap_01/exe_003_area_room.py | exe_003_area_room.py | py | 700 | python | en | code | 1 | github-code | 13 |
4338114615 | from transformers import BertTokenizer
from transformers import AdamW
from torch.utils.data import DataLoader
from transformers.optimization import get_linear_schedule_with_warmup
import pytorch_lightning as pl
import pickle
from deepspeed.ops.adam import FusedAdam
from deepspeed.ops.adam import DeepSpeedCPUAdam
import torch
from nltk.corpus import stopwords
from torch.nn import CosineSimilarity
import string
import numpy as np
import os
from nltk.corpus import wordnet
from transformers import BertForMaskedLM
class BertBase(pl.LightningModule):
def __init__(self, args):
super(BertBase, self).__init__()
self.pre_trained_model = args.pre_trained_model
self.n_classes = args.n_classes
self.max_length = args.max_length
self.dataset = args.dataset
self.learning_rate = args.learning_rate
self.batch_size = args.batch_size
self.epochs = args.epochs
self.warmup_proportion = args.warmup_proportion
self.ngpu = args.ngpu
self.train_size = args.train_size
self.val_size = args.val_size
self.test_size = args.test_size
self.tokenizer = BertTokenizer.from_pretrained(self.pre_trained_model, do_lower_case=True,
model_max_length=self.max_length)
dataset_file_name = 'dataset/' + self.dataset + '/test_dataset.pkl'
with open(dataset_file_name, 'rb') as datasetFile:
self.test_dataset = pickle.load(datasetFile)
dataset_file_name = 'dataset/' + self.dataset + '/val_dataset.pkl'
with open(dataset_file_name, 'rb') as datasetFile:
self.val_dataset = pickle.load(datasetFile)
dataset_file_name = 'dataset/' + self.dataset + '/train_dataset.pkl'
with open(dataset_file_name, 'rb') as datasetFile:
self.train_dataset = pickle.load(datasetFile)
self.train_dataset = self.train_dataset.select(range(int(len(self.train_dataset)*self.train_size)))
self.val_dataset = self.val_dataset.select(range(int(len(self.val_dataset) * self.val_size)))
self.test_dataset = self.test_dataset.select(range(int(len(self.test_dataset) * self.test_size)))
print("Train Samples: {}\nVal Samples: {}\nTest Samples: {}".format(
len(self.train_dataset), len(self.val_dataset), len(self.test_dataset)))
def _collate_fn(self, batch):
label = torch.tensor([item['label'] for item in batch])
text = [item['text'] for item in batch]
outputs = self.tokenizer(text, truncation=True, padding=True)
input_ids = torch.tensor(outputs["input_ids"])
attention_mask = torch.tensor(outputs["attention_mask"])
return {'label': label, 'text': text, 'input_ids': input_ids, 'attention_mask': attention_mask}
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = FusedAdam(params=optimizer_grouped_parameters, lr=self.learning_rate)
## optimizer = DeepSpeedCPUAdam(model_params=optimizer_grouped_parameters, lr=self.learning_rate)
num_training_steps = self.epochs * int(len(self.train_dataset) / self.batch_size / self.ngpu)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=int(self.warmup_proportion * num_training_steps),
num_training_steps=num_training_steps,
)
return [optimizer], [{'scheduler': scheduler, 'interval': 'step', 'frequency': 1}]
def train_dataloader(self):
train_dataloader = DataLoader(
dataset=self.train_dataset,
shuffle=True,
batch_size=self.batch_size,
collate_fn=self._collate_fn
)
return train_dataloader
def val_dataloader(self):
val_dataloader = DataLoader(
dataset=self.val_dataset,
shuffle=False,
batch_size=self.batch_size,
collate_fn=self._collate_fn
)
return val_dataloader
def test_dataloader(self):
if self.dataset == 'imdb':
test_dataloader = DataLoader(
dataset=self.test_dataset.select(range(1000)),
shuffle=False,
batch_size=self.batch_size,
collate_fn=self._collate_fn
)
return test_dataloader
if self.dataset == 'dbpedia':
test_dataloader = DataLoader(
dataset=self.test_dataset.select(range(5000)),
shuffle=False,
batch_size=self.batch_size,
collate_fn=self._collate_fn
)
return test_dataloader
if self.dataset == 'yelp_polarity':
test_dataloader = DataLoader(
dataset=self.test_dataset.select(range(2000)),
shuffle=False,
batch_size=self.batch_size,
collate_fn=self._collate_fn
)
return test_dataloader
test_dataloader = DataLoader(
dataset=self.test_dataset,
shuffle=False,
batch_size=self.batch_size,
collate_fn=self._collate_fn
)
return test_dataloader
filter_words = ['a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against', 'ain', 'all', 'almost',
'alone', 'along', 'already', 'also', 'although', 'am', 'among', 'amongst', 'an', 'and', 'another',
'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'aren', "aren't", 'around', 'as',
'at', 'back', 'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides',
'between', 'beyond', 'both', 'but', 'by', 'can', 'cannot', 'could', 'couldn', "couldn't", 'd', 'didn',
"didn't", 'doesn', "doesn't", 'don', "don't", 'down', 'due', 'during', 'either', 'else', 'elsewhere',
'empty', 'enough', 'even', 'ever', 'everyone', 'everything', 'everywhere', 'except', 'first', 'for',
'former', 'formerly', 'from', 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his',
'how', 'however', 'hundred', 'i', 'if', 'in', 'indeed', 'into', 'is', 'isn', "isn't", 'it', "it's",
'its', 'itself', 'just', 'latter', 'latterly', 'least', 'll', 'may', 'me', 'meanwhile', 'mightn',
"mightn't", 'mine', 'more', 'moreover', 'most', 'mostly', 'must', 'mustn', "mustn't", 'my', 'myself',
'namely', 'needn', "needn't", 'neither', 'never', 'nevertheless', 'next', 'no', 'nobody', 'none',
'noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'o', 'of', 'off', 'on', 'once', 'one', 'only',
'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out', 'over', 'per',
'please', 's', 'same', 'shan', "shan't", 'she', "she's", "should've", 'shouldn', "shouldn't", 'somehow',
'something', 'sometime', 'somewhere', 'such', 't', 'than', 'that', "that'll", 'the', 'their', 'theirs',
'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein',
'thereupon', 'these', 'they', 'this', 'those', 'through', 'throughout', 'thru', 'thus', 'to', 'too',
'toward', 'towards', 'under', 'unless', 'until', 'up', 'upon', 'used', 've', 'was', 'wasn', "wasn't",
'we', 'were', 'weren', "weren't", 'what', 'whatever', 'when', 'whence', 'whenever', 'where',
'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while',
'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'with', 'within', 'without', 'won',
"won't", 'would', 'wouldn', "wouldn't", 'y', 'yet', 'you', "you'd", "you'll", "you're", "you've",
'your', 'yours', 'yourself', 'yourselves']
filter_words = set(filter_words)
class BaseAttack:
def __init__(self, args, tokenizer):
self.stopwords = set(stopwords.words('english'))
self.model = None
self.cosine_similarity = CosineSimilarity(dim=1, eps=1e-6)
self.tokenizer = tokenizer
self.num_classes = args.n_classes
self.device = None
self.msk_model = BertForMaskedLM.from_pretrained('bert-base-uncased')
self.msk_model.eval()
self.words_to_sub_words = None
self.max_length = args.max_length
self.n_candidates = args.n_candidates
self.max_loops = args.max_loops
self.sim_thred = args.sim_thred
self.splits = args.splits
self.word2id = self.tokenizer.get_vocab()
self.id2word = {v: k for k, v in self.word2id.items()}
self.cos_sim = None
self.sim_word2id = None
self.sim_id2word = None
self.synonym = args.synonym
self.cos_sim_dict = None
if args.cos_sim:
self.init_matrix(args.embedding_path, args.sim_path)
def get_important_scores(self, grads, words_to_sub_words):
index_scores = [0.0] * len(words_to_sub_words)
for i in range(len(words_to_sub_words)):
matched_tokens = words_to_sub_words[i]
agg_grad = np.sum(grads[matched_tokens], axis=0)
index_scores[i] = np.linalg.norm(agg_grad, ord=1)
return index_scores
def init_matrix(self, embedding_path, sim_path):
embeddings = []
self.sim_id2word = {}
self.sim_word2id = {}
## constuct cosine similarity matrix
with open(embedding_path, 'r') as ifile:
for line in ifile:
embedding = [float(num) for num in line.strip().split()[1:]]
embeddings.append(embedding)
word = line.split()[0]
if word not in self.sim_id2word:
self.sim_id2word[len(self.sim_id2word)] = word
self.sim_word2id[word] = len(self.sim_id2word) - 1
if os.path.exists(sim_path):
self.cos_sim = np.load(sim_path)
else:
embeddings = np.array(embeddings)
norm = np.linalg.norm(embeddings, axis=1, keepdims=True)
embeddings = np.asarray(embeddings / norm, "float32")
self.cos_sim = np.dot(embeddings, embeddings.T)
## construct top-k similar words for each word
if self.synonym == 'cos_sim':
self.cos_sim_dict = {}
for idx, word in self.sim_id2word.items():
candidates = set()
indices = torch.topk(torch.tensor(self.cos_sim[idx]), k=self.n_candidates).indices
for i in indices:
i = int(i)
if self.cos_sim[idx][i] < self.sim_thred:
break
if i == idx:
continue
candidates.add(self.sim_id2word[i])
if len(candidates) == 0:
candidates = [word]
self.cos_sim_dict[idx] = candidates
def check_word(self, word):
return word == '[PAD]' or word == '[UNK]' or word == '[CLS]' or \
word == '[SEP]' or word in self.stopwords or word in string.punctuation or \
word in filter_words or word in '...' or word == '[MASK]'
def get_synonym_by_cos(self, word):
if not (word in self.sim_word2id):
return [word]
idx = self.sim_word2id[word]
return self.cos_sim_dict[idx]
def get_synonym(self, word):
candidates = set()
for syn in wordnet.synsets(word):
for l in syn.lemmas():
w = l.name()
if self.check_word(w):
continue
if w in candidates:
continue
candidates.add(w)
if self.tokenizer.tokenize(word)[0] != word:
for syn in wordnet.synsets(self.tokenizer.tokenize(word)[0]):
for l in syn.lemmas():
w = l.name()
if self.check_word(w):
continue
if w in candidates:
continue
candidates.add(w)
if len(candidates) == 0:
candidates = [word]
return candidates
def calc_words_to_sub_words(self, words, batch_size):
self.words_to_sub_words = []
for i in range(batch_size):
position = 0
self.words_to_sub_words.append({})
for idx in range(len(words[i])):
length = len(self.tokenizer.tokenize(words[i][idx]))
if position + length > self.max_length - 2:
break
self.words_to_sub_words[i][idx] = np.arange(position, position + length)
position += length
def get_inputs(self, sentences):
outputs = self.tokenizer(sentences, truncation=True, padding=True)
input_ids = outputs["input_ids"]
attention_mask = outputs["attention_mask"]
return torch.tensor(input_ids).to(self.device), torch.tensor(attention_mask).to(self.device)
## TODO: it is better to check USE score for each attack sample
# import tensorflow.compat.v1 as tf
# import tensorflow_hub as hub
# tf.disable_v2_behavior()
# class USE(object):
# def __init__(self):
# super(USE, self).__init__()
# os.environ['TFHUB_CACHE_DIR'] = 'tmp'
# module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3"
# self.embed = hub.Module(module_url)
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# self.sess = tf.Session(config=config)
# self.build_graph()
# self.sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
# def build_graph(self):
# self.sts_input1 = tf.placeholder(tf.string, shape=(None))
# self.sts_input2 = tf.placeholder(tf.string, shape=(None))
# sts_encode1 = tf.nn.l2_normalize(self.embed(self.sts_input1), axis=1)
# sts_encode2 = tf.nn.l2_normalize(self.embed(self.sts_input2), axis=1)
# self.cosine_similarities = tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1)
# clip_cosine_similarities = tf.clip_by_value(self.cosine_similarities, -1.0, 1.0)
# self.sim_scores = 1.0 - tf.acos(clip_cosine_similarities)
# def semantic_sim(self, sents1, sents2):
# scores = self.sess.run(
# [self.sim_scores],
# feed_dict={
# self.sts_input1: sents1,
# self.sts_input2: sents2,
# })
# return scores[0]
| LotusDYH/ssl_robust | utils.py | utils.py | py | 15,227 | python | en | code | 2 | github-code | 13 |
28529029252 | """
Script for converting symlinks to normal files
"""
import argparse
import pathlib
def main():
"""
Main function
"""
# pylint: disable=too-many-locals, too-many-statements
args = argparse.ArgumentParser(description='Convert symlink')
# Required
args.add_argument('-p', '--path', type=str, help='path to folder with files')
args = args.parse_args()
path = pathlib.Path(args.path)
for file in path.rglob("*"):
if file.is_symlink():
print(file)
orig_file = file.resolve()
file.unlink()
orig_file.link_to(file)
if __name__ == '__main__':
main()
| ag14774/symlink2file | symlink2file.py | symlink2file.py | py | 652 | python | en | code | 0 | github-code | 13 |
9391823092 | # coding: utf8
#
# version: 2020-05-09.10-30
#
# Создатель Permyak_Logy#8606
# По заказу от Daniillazarev#0202 685355179570233344
import discord
from datetime import datetime
import json
import random
import re
config_file_name = 'config.json'
class Obj:
pass
class FastEmoji:
yes = discord.PartialEmoji(animated=False, name='✅', id=None)
not_ = discord.PartialEmoji(animated=False, name='❌', id=None)
close = discord.PartialEmoji(animated=False, name='⏹️', id=None)
class FrostyBot(discord.Client):
config_data = {}
async def on_ready(self):
guild_list_name = "\n".join([f"\t{guild}" for guild in self.guilds])
message = (
f'```python\n'
f'{datetime.now()} Бот {self.user.mention} класса {self.__class__.__name__}'
f' бы авторизован и готов к работе\n'
f'Подключённые сервера:\n{guild_list_name}\n'
f'```'
)
print(message)
await self.load_config()
await self.get_user(685355179570233344).send(message)
async def load_config(self):
data = self.config_data
try:
with open(config_file_name, encoding='utf8') as file:
self.config_data = json.load(file)
except Exception as E:
print(E)
self.config_data = data
await self.logout()
else:
# Просматриваемая гильдия
self.checked_guild: discord.Guild = self.get_guild(
self.config_data['checked_guild']
)
self.log_channel: discord.TextChannel = self.get_channel(
self.config_data['log_channel']
)
if not self.checked_guild or not self.log_channel:
print(self.checked_guild, self.log_channel)
await self.logout()
async def save_config(self):
with open(config_file_name, encoding='utf8') as file:
data = file.read()
try:
with open(config_file_name, mode='w', encoding='utf8') as file:
file.write(
str(self.config_data
).replace("'", '"'
).replace('None', 'null'
).replace('True', 'true'
).replace('False', 'false')
)
except Exception as E:
print(E)
with open(config_file_name, mode='w', encoding='utf8') as file:
file.write(data)
async def on_message(self, message: discord.Message):
if self.user == message.author:
return
if await self.application_processor_on_message(message):
return True
async def application_processor_on_message(self, message: discord.Message):
channel_applications = self.get_channel(self.config_data.get('channel_applications', 0))
channel_moderation_applications = self.get_channel(self.config_data.get('channel_moderation_applications', 0))
if not channel_applications or not channel_moderation_applications:
return False
if message.channel.id == channel_applications.id:
delay = 10
if len(message.content) <= 20:
embed = discord.Embed(
title="**ОТКАЗАНО В ОБРАБОТКЕ**",
colour=discord.Colour.from_rgb(255, 78, 78),
description=(
'Ваше сообщение слишком короткое\n'
'Не стесняйтесь, распишите заявку по полной!'
)
)
message_bot: discord.Message = await channel_applications.send(embed=embed)
await message_bot.delete(delay=delay)
await message.delete(delay=delay)
return True
if len(message.content) >= 1500:
embed = discord.Embed(
title="**ОТКАЗАНО В ОБРАБОТКЕ**",
colour=discord.Colour.from_rgb(255, 78, 78),
description=(
'Ваше сообщение слишком большое\n'
'Постарайтесь сократить сообщение!'
)
)
message_bot: discord.Message = await channel_applications.send(embed=embed)
await message_bot.delete(delay=delay)
await message.delete(delay=delay)
return True
if re.search(r'https://[^(vk.com/)]', message.content.lower()):
embed = discord.Embed(
title="**ОТКАЗАНО В ОБРАБОТКЕ**",
colour=discord.Colour.from_rgb(255, 78, 78),
description=(
'Имеется запрещённая ссылка'
)
)
message_bot: discord.Message = await channel_applications.send(embed=embed)
await message_bot.delete(delay=delay)
await message.delete(delay=delay)
return True
if not re.search(r'1\..*\n2\..*\n3\..*\n4\..*\n5\..*\n6\..*', message.content):
embed = discord.Embed(
title="**ОТКАЗАНО В ОБРАБОТКЕ**",
colour=discord.Colour.from_rgb(255, 78, 78),
description=(
'Вы написали заявку не по форме\n'
'Прочитайте закрепленное сообщение, что посмотреть форму заявки!\n'
'Соблюдайте каждый знак, возможно вы написали пункты 1,2,3 без точки!'
)
)
message_bot: discord.Message = await channel_applications.send(embed=embed)
await message_bot.delete(delay=delay)
await message.delete(delay=delay)
return True
author: discord.Member = message.author
# Сообщение о заявке
changed_message_user = message.content.replace('\n', '\n> ')
embed = discord.Embed(
title="**НОВАЯ ЗАЯВКА НА ПОСТ АДМИНИСТРАЦИИ**",
colour=discord.Colour.from_rgb(117, 117, 255),
timestamp=datetime.now(),
description=(
''
)
)
embed.add_field(name='Автор',
value=f'{author.mention}', inline=True)
embed.add_field(name='ID Автора',
value=f'{author.id}', inline=True)
embed.add_field(name='Дата регистрации',
value=f'{author.created_at.date()}', inline=True)
embed.add_field(name='Дата присоединения на сервер',
value=f'{author.joined_at.date()}')
embed.add_field(name='Время с момента присоединения на сервер',
value=f'{(datetime.now() - author.joined_at).days} days')
embed.add_field(name='Выберите действие',
value=':white_check_mark: Принять \n:x: Отклонить \n:stop_button: Закрыть')
embed.add_field(name='**Текст сообщения**',
value=f'> {changed_message_user}')
message_bot: discord.Message = await channel_moderation_applications.send(embed=embed)
# Добавление реакций
await message_bot.add_reaction(FastEmoji.yes)
await message_bot.add_reaction(FastEmoji.not_)
await message_bot.add_reaction(FastEmoji.close)
# Формирование заявки
application = dict()
application['id'] = random.randint(0, 2 ** 64)
application['author'] = message.author.id
application['message_id'] = message_bot.id
# Занесение заявки в память
if not self.config_data.get('applications'):
self.config_data['applications'] = []
self.config_data['applications'].append(application)
# Ответ автору заявки
embed = discord.Embed(
title=f'**Уведомление**',
colour=discord.Colour.from_rgb(117, 117, 255),
description=(
f'{author.mention} Ваша заявка отправлена на расмотрение, ожидайте ответа!'
)
)
response = await channel_applications.send(embed=embed)
await response.delete(delay=delay)
await message.delete()
# Сохранение данных конфигурации
await self.save_config()
return True
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):
try:
channel: discord.TextChannel = self.get_channel(payload.channel_id)
reaction = Obj()
reaction.message = await channel.fetch_message(payload.message_id)
reaction.emoji = payload.emoji
user = self.get_user(payload.user_id)
await self._on_reaction_add(reaction, user)
except discord.errors.NotFound as E:
print(E)
async def _on_reaction_add(self, reaction: discord.Reaction, user: discord.User):
if self.user == user:
return
if await self.application_processor_on_reaction_add(reaction, user):
return True
async def application_processor_on_reaction_add(self, reaction: discord.Reaction, user: discord.User):
channel_applications = self.get_channel(self.config_data.get('channel_applications', 0))
channel_moderation_applications = self.get_channel(self.config_data.get('channel_moderation_applications', 0))
if not channel_applications or not channel_moderation_applications:
return False
if any(map(lambda x: x['message_id'] == reaction.message.id, self.config_data.get('applications', []))):
application = list(filter(lambda x: x['message_id'] == reaction.message.id,
self.config_data['applications']))[0]
author: discord.Member = self.checked_guild.get_member(application['author'])
delay = 6 * 60 * 60
# Обработка реакций
if reaction.emoji.name == FastEmoji.yes.name:
issued_role1 = self.checked_guild.get_role(self.config_data.get('issued_role'))
if issued_role1:
await author.add_roles(issued_role1)
try:
await author.edit(nick=f'[7] {author.display_name}')
except discord.errors.Forbidden:
pass
embed = discord.Embed(
title='**Уведомление**',
timestamp=datetime.now(),
colour=discord.Colour.from_rgb(100, 255, 100),
description=(
f'{author.mention}**, ваша заявка одобрена!**\n'
f'Теперь вы являетесь администратором сервера!\n'
f'Обязательно прочтите правила для начала работы!\n'
)
)
embed.add_field(name='Рассматривал:', value=f'{user.mention}')
embed.add_field(name='Примечание:', value='Cообщение удалится через несколько часов!')
act = '**Одобрена**'
message_bot = await channel_applications.send(embed=embed)
await message_bot.delete(delay=delay)
elif reaction.emoji.name == FastEmoji.not_.name:
embed = discord.Embed(
title='**Уведомление**',
timestamp=datetime.now(),
colour=discord.Colour.from_rgb(255, 78, 78),
description=(
f'{author.mention}**, ваша заявка не прошла проверку!**\n'
f'Возможно, вы не прошли по критериям, почитайте правила подачи заявки.'
)
)
embed.add_field(name='Рассматривал:', value=f'{user.mention}')
embed.add_field(name='Примечание:', value='Cообщение удалится через несколько часов!')
act = '**Отклонена**'
message_bot = await channel_applications.send(embed=embed)
await message_bot.delete(delay=6 * 60 * 60)
elif reaction.emoji.name == FastEmoji.close.name:
act = '**Закрыта**'
else:
return False
# Логирование
embed = discord.Embed(
title='**ЛОГ ЗАЯВОК**',
timestamp=datetime.now(),
colour=discord.Colour.from_rgb(117, 117, 255),
description=(
f'Была расмотренна новая заявка.'
)
)
embed.add_field(name='Автор:', value=f'{author.mention}', inline=True)
embed.add_field(name='Рассмотрел:', value=f'{user.mention}', inline=True)
embed.add_field(name='Вердикт:', value=act, inline=True)
await self.log_channel.send(embed=embed)
# Очистка заявки
# await reaction.message.delete()
# Очистка заявки из памяти
self.config_data['applications'].remove(application)
# Сохранение данных конфигурации
await self.save_config()
# Сообщение об успехе
message_bot: discord.Message = await channel_moderation_applications.send(embed=discord.Embed(
title='**Уведомление**', timestamp=datetime.now(), colour=discord.Colour.from_rgb(100, 255, 100),
description='Успешно!'
))
await message_bot.delete(delay=5)
return True
if __name__ == '__main__':
FrostyBot().run(open('token.txt').read())
| daniillazarev2301/- | main.py | main.py | py | 15,429 | python | ru | code | 0 | github-code | 13 |
9069975447 | def input_to_matrix(rows, columns):
matrix = []
for row_index in range(rows):
# row = [int(n) for n in input().split()]
row_string = input()
row = []
for ch in row_string:
row.append(ch)
matrix.append(row)
return matrix
def count_all_moves(matrix):
moves = []
dim = len(matrix)
for i in range(dim):
moves.append([])
for j in range(dim):
moves[i].append(0)
for r in range(dim):
for c in range(dim):
if matrix[r][c] == 'K':
curr_moves = available_moves(dim, r, c)
for move in curr_moves:
row_index = move[0]
column_index = move[1]
if matrix[row_index][column_index] == 'K':
moves[r][c] += 1
return moves
def available_moves(dim, row, column):
curr_moves = []
search_knight_r = [-2, -2, -1, -1, 1, 1, 2, 2]
search_knight_c = [-1, 1, -2, 2, -2, 2, -1, 1]
for search_index in range(len(search_knight_c)):
look_in_r = row + search_knight_r[search_index]
look_in_c = column + search_knight_c[search_index]
if look_in_r in range(dim) and look_in_c in range(dim):
curr_moves.append((look_in_r, look_in_c))
return curr_moves
def max_removal(dim, moves):
max_removals = 0
max_row = 0
max_column = 0
for r_move in range(dim):
for c_move in range(dim):
if max_removals < moves[r_move][c_move]:
max_removals = moves[r_move][c_move]
max_row = r_move
max_column = c_move
return max_removals, max_row, max_column
def knight_game_2(matrix, moves, number_moves):
dim = len(moves)
max_removals, r, c = max_removal(dim, moves)
if max_removals > 0:
curr_moves = available_moves(dim, r, c)
moves[r][c] = 0
matrix[r][c] = '0'
number_moves += 1
for move in curr_moves:
move_rows = move[0]
move_columns = move[1]
if moves[move_rows][move_columns] > 0:
moves[move_rows][move_columns] -= 1
number_moves = knight_game_2(matrix, moves, number_moves)
return number_moves
n = int(input())
matrix = input_to_matrix(n, n)
search_knight_r = [-2, -2, -1, -1, 1, 1, 2, 2]
search_knight_c = [-1, 1, -2, 2, -2, 2, -1, 1]
moves = count_all_moves(matrix)
num_removed_knights = knight_game_2(matrix, moves, 0)
print(num_removed_knights)
| vbukovska/SoftUni | Python_advanced/Ex3_multidimentional_lists/knight_game.py | knight_game.py | py | 2,516 | python | en | code | 0 | github-code | 13 |
10422033917 | import cv2
import os
from pathlib import Path
import torch
from .model_download import *
from .PlaneRecNet.planerecnet import PlaneRecNet
from .PlaneRecNet.data.augmentations import FastBaseTransform
from .PlaneRecNet.planerecnet import PlaneRecNet
from .PlaneRecNet.data.config import set_cfg, cfg, COLORS
from .PlaneRecNet.utils import timer
from .PlaneRecNet.models.functions.funcs import calc_size_preserve_ar, pad_even_divided
from collections import defaultdict
import numpy as np
import easydict
import matplotlib.pyplot as plt
from app.library import model_download
color_cache = defaultdict(lambda: {})
def custom_args(argv=None):
global args
path_parents = Path(os.getcwd()).as_posix()
path_trained_model = path_parents + '/' + 'model' + '/' + 'PlaneRecNet_101_9_125000.pth'
path_sampleimg = path_parents + '/' + 'images' + '/' + '220803105932_color.png'
args = easydict.EasyDict({ "trained_model": None ,
"config": 'PlaneRecNet_101_config' ,
"image": path_sampleimg,
"trained_model": path_trained_model,
"max_img":0,
"top_k":100,
"nms_mode":'matrix',
"score_threshold":0.1,
"depth_shift":512,
"no_mask":False,
"no_box":True,
"no_text":True,
})
def display_on_frame(result, frame, mask_alpha=0.5, fps_str='', no_mask=False, no_box=False, no_text=False):
draw_point=True
frame_gpu = frame / 255.0
h, w, _ = frame.shape
pred_scores = result["pred_scores"]
pred_depth = result["pred_depth"].squeeze()
if pred_scores is None:
return frame.byte().cpu().numpy(), pred_depth.cpu().numpy()
pred_masks = result["pred_masks"].unsqueeze(-1)
pred_boxes = result["pred_boxes"]
pred_classes = result["pred_classes"]
num_dets = pred_scores.size()[0]
def get_color(j, on_gpu=None):
global color_cache
color_idx = (j * 5) % len(COLORS)
if on_gpu is not None and color_idx in color_cache[on_gpu]:
return color_cache[on_gpu][color_idx]
else:
color = COLORS[color_idx]
color = (color[2], color[1], color[0])
if on_gpu is not None:
color = torch.Tensor(color).to(on_gpu).float() / 255.
color_cache[on_gpu][color_idx] = color
return color
if not no_mask and num_dets>0:
# Prepare the RGB images for each mask given their color (size [num_dets, h, w, 1])
colors = torch.cat([get_color(j, on_gpu=frame_gpu.device.index).view(
1, 1, 1, 3) for j in range(num_dets)], dim=0)
masks_color = pred_masks.repeat(1, 1, 1, 3) * colors * mask_alpha
# This is 1 everywhere except for 1-mask_alpha where the mask is
inv_alph_masks = pred_masks * (-mask_alpha) + 1
np_mask = -inv_alph_masks[:,:,:,0].byte().cpu().numpy()
for j in range(num_dets):
frame_gpu = frame_gpu * inv_alph_masks[j] + masks_color[j]
frame_numpy = (frame_gpu * 255).byte().cpu().numpy()
save_list_add = np.array([])
for j in range(num_dets):
color = get_color(j)
masks_color_np = pred_masks[j].cpu().squeeze().numpy().astype(np.uint8)
contours, hierarchy = cv2.findContours(masks_color_np, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame_numpy,contours,-1,(255,255,255),1)
if draw_point:
count_zero = np.count_nonzero(np_mask[j] == 0)
avg_x = round(np.mean(np.where(np_mask[j] == 0)[1]))
avg_y = round(np.mean(np.where(np_mask[j] == 0)[0]))
cv2.circle(frame_numpy, (avg_x,avg_y), 5, (0,255, 0), -1, cv2.LINE_AA)
text_str = 'order:%s d:%.2fm %d x:%d y:%d' % (j, pred_depth.cpu().numpy()[avg_y, avg_x],count_zero,avg_x ,avg_y )
text_str_pix = 'pix:%d'%(count_zero)
# print('result%d'%(j),text_str)
font_face = cv2.FONT_HERSHEY_DUPLEX
font_scale = 0.6
font_thickness = 1
text_w, text_h = cv2.getTextSize(text_str_pix, font_face, font_scale, font_thickness)[0]
text_pt = (avg_x, avg_y + text_h + 1)
text_color = [255, 255, 255]
cv2.rectangle(frame_numpy, (avg_x, avg_y),(avg_x + text_w, avg_y + text_h*5 + 4), color, -1)
cv2.putText(frame_numpy, 'ord:%s'%(j), (avg_x, avg_y + 1*text_h + 1), font_face,font_scale, text_color, font_thickness, cv2.LINE_AA)
cv2.putText(frame_numpy, 'dis:%.2fm'%(pred_depth.cpu().numpy()[avg_y, avg_x]), (avg_x, avg_y + 2*text_h + 1), font_face,font_scale, text_color, font_thickness, cv2.LINE_AA)
cv2.putText(frame_numpy, 'pix:%d'%(count_zero), (avg_x, avg_y + 3*text_h + 1), font_face,font_scale, text_color, font_thickness, cv2.LINE_AA)
cv2.putText(frame_numpy, 'x_m:%d'%(avg_x), (avg_x, avg_y + 4*text_h + 1), font_face,font_scale, text_color, font_thickness, cv2.LINE_AA)
cv2.putText(frame_numpy, 'y_m:%d'%(avg_y), (avg_x, avg_y + 5*text_h + 1), font_face,font_scale, text_color, font_thickness, cv2.LINE_AA)
save_list = np.array([round(avg_x/int(frame_numpy.shape[1]),3) ,round(avg_y/int(frame_numpy.shape[0]),3), round(pred_depth.cpu().numpy()[avg_y, avg_x],3)])
save_list_add = np.append(save_list_add,save_list)
if not no_text or not no_box:
for j in reversed(range(num_dets)):
x1, y1, x2, y2 = pred_boxes[j].int().cpu().numpy()
color = get_color(j)
score = pred_scores[j].detach().cpu().numpy()
if not no_box:
cv2.rectangle(frame_numpy, (x1, y1), (x2, y2), color, 1)
if not no_text:
_class = cfg.dataset.class_names[pred_classes[j].cpu().numpy()]
text_str = '%s: %.2f' % (_class, score)
font_face = cv2.FONT_HERSHEY_DUPLEX
font_scale = 0.6
font_thickness = 1
text_w, text_h = cv2.getTextSize(text_str, font_face, font_scale, font_thickness)[0]
text_pt = (x1, y1 + text_h + 1)
text_color = [255, 255, 255]
cv2.rectangle(frame_numpy, (x1, y1),(x1 + text_w, y1 + text_h + 4), color, -1)
cv2.putText(frame_numpy, text_str, text_pt, font_face,font_scale, text_color, font_thickness, cv2.LINE_AA)
if not no_text:
score = pred_scores[j].detach().cpu().numpy()
_class = cfg.dataset.class_names[pred_classes[j].cpu().numpy()]
text_str = '%s: %.2f' % (_class, score)
font_face = cv2.FONT_HERSHEY_DUPLEX
font_scale = 0.6
font_thickness = 1
text_w, text_h = cv2.getTextSize(
text_str, font_face, font_scale, font_thickness)[0]
text_pt = (x1, y1 + text_h + 1)
text_color = [255, 255, 255]
cv2.rectangle(frame_numpy, (x1, y1),
(x1 + text_w, y1 + text_h + 4), color, -1)
cv2.putText(frame_numpy, text_str, text_pt, font_face,
font_scale, text_color, font_thickness, cv2.LINE_AA)
return frame_numpy, pred_depth.cpu().numpy(),save_list_add
# return frame_numpy, pred_depth.cpu().numpy()
else:
save_list_add=[]
return frame.byte().cpu().numpy(), pred_depth.cpu().numpy(), save_list_add
# return frame.byte().cpu().numpy(), pred_depth.cpu().numpy()
def inference_image(net: PlaneRecNet, path: str, output_type: int):
frame_np = cv2.imread(str(path))
H, W, _ = frame_np.shape
if frame_np is None:
return
frame_np = cv2.resize(frame_np, calc_size_preserve_ar(W, H, cfg.max_size), interpolation=cv2.INTER_LINEAR)
frame_np = pad_even_divided(frame_np) #pad image to be evenly divided by 32
frame = torch.from_numpy(frame_np).cuda().float()
batch = FastBaseTransform()(frame.unsqueeze(0))
results = net(batch)
blended_frame, depth,save_list_add = display_on_frame(results[0], frame, no_mask=args.no_mask, no_box=args.no_box, no_text=args.no_text)
name, ext = os.path.splitext(path)
save_path = name + '_seg' + ext
depth_path1 = name + '_dep1.png'
depth_path2 = name + '_dep2.png'
# 절대 값을 기준으로 시각화(0~5m)
depth_copy1 = depth
vmin = 0
vmax = 10
depth_copy1 = depth_copy1.clip(min=vmin, max=vmax)
depth_copy1 = ((depth_copy1 - vmin) / (vmax - vmin) * 255).astype(np.uint8)
depth_color1 = cv2.applyColorMap(depth_copy1, cv2.COLORMAP_PLASMA)
cv2.putText(depth_color1,str(round(depth[240,320],2)),(320,240),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2,cv2.LINE_AA)
cv2.circle(depth_color1,(320,240), 5, (0,255,0), -1)
# 획득 데이터 기준 시각화
depth_copy2 = depth
vmin = np.percentile(depth_copy2, 1)
vmax = np.percentile(depth_copy2, 99)
depth_copy2 = depth_copy2.clip(min=vmin, max=vmax)
depth_copy2 = ((depth_copy2 - depth.min()) / (depth.max() - depth.min()) * 255).astype(np.uint8)
depth_color2 = cv2.applyColorMap(depth_copy2, cv2.COLORMAP_PLASMA)
cv2.putText(depth_color2,str(round(depth[240,320],2)),(320,240),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2,cv2.LINE_AA)
cv2.circle(depth_color2,(320,240), 5, (0,255,0), -1)
cv2.imwrite(save_path, blended_frame)
cv2.imwrite(depth_path1, depth_color1)
cv2.imwrite(depth_path2, depth_color2)
return save_path, depth_path1, depth_path2, save_list_add
if output_type == 0:
cv2.imwrite(save_path, blended_frame)
return save_path, save_list_add
elif output_type == 1:
cv2.imwrite(depth_path1, depth_color1)
return depth_path1, save_list_add
elif output_type == 2:
cv2.imwrite(depth_path2, depth_color2)
return depth_path2, save_list_add
def main(input_path : str, output_type = 0):
# model 가져오기
model_download.get_model_planerecnet()
timer.disable_all()
new_nms_config = {
'nms_type': "mask",
'mask_thr': 0.5,
'update_thr': 0.5,
'top_k': 5,
}
custom_args()
set_cfg(args.config)
cfg.solov2.replace(new_nms_config)
net = PlaneRecNet(cfg)
net.load_weights(args.trained_model)
net.train(mode=False)
net = net.cuda()
torch.set_default_tensor_type("torch.cuda.FloatTensor")
save_path, depth_path1, depth_path2, save_list_add = inference_image(net, path = input_path, output_type=output_type)
return save_path, depth_path1, depth_path2, save_list_add | raptermeal/cjlearningclass2nd | app/library/inference_PRN.py | inference_PRN.py | py | 11,042 | python | en | code | 0 | github-code | 13 |
71006334737 |
# We arbitrarily defined the value of a winning board as +1.0 and a losing board as −1.0. All other boards would receive values between −1.0 and +1.0, with a neural network favoring boards with higher values.
minimax_win = 1
minimax_lose = -minimax_win
minimax_draw = 0
minimax_empty = -1
import random
class TMCTS:
def __init__(self, ply, evaluator, debug=False):
self.ply = ply
self.evaluator = evaluator
self.baseRound = 300
self.debug = debug
if self.debug:
self.baseRound = 10
def Decide(self, B, colour):
self.movesets = {}
return self.random_ts(B, self.ply, colour)
# -------------------------------------------------------
def random_ts(self, B, ply, colour, printDebug=False):
# print("YOU ARE", colour)
# colour = 1 if colour == 0 else 0
moves = B.get_moves()
# if theres only one move to make theres no point evaluating future moves.
if len(moves) == 1:
return moves[0]
else:
# expand the depth if there is a limited set of moves to
# choose from!
if len(moves) < 4:
ply = ply + len(moves)
# if the user adds some dud plycount default to 1 random round.
random_rounds = 1
# iterate some random amount of times.
if ply > 0:
random_rounds = self.baseRound * ply
# set up moves
for move in moves:
self.movesets[move] = {
'plays' : 0,
'chances' : 0
}
# iterate through the random number of rounds
for i in range(random_rounds):
random_move = random.choice(moves)
HB = B.copy()
HB.make_move(random_move)
# start mcts
self.movesets[random_move]['chances'] += self.treesearch(HB,ply,colour)
self.movesets[random_move]['plays'] += 1
bestChance = -1000
bestMove = moves[0]
for m in self.movesets:
moveIndex = B.get_moves().index(m)
moveString = B.get_move_strings()[moveIndex]
# calculate the chance of it winning
chance = 0
if (self.movesets[m]['plays'] > 0) and (self.movesets[m]['chances'] > 0):
chance = self.movesets[m]['chances'] / self.movesets[m]['plays']
if chance > bestChance:
bestChance = chance
bestMove = m
if printDebug:
print(moveString, chance, self.movesets[m]['chances'] ,self.movesets[m]['plays'], "*")
else:
if printDebug:
print(moveString, chance)
return bestMove
def treesearch(self,B,ply,colour):
# enemyColour = 1 if colour == 0 else 0
isOver = self.isOver(B, colour)
if isOver[0]:
return isOver[1]
else:
if ply < 1:
return self.evaluator(B, colour)
else:
# get moves
moves = B.get_moves()
# choose random enemy move
move = random.choice(moves)
HB = B.copy()
HB.make_move(move)
# check if that move ended the game
isOver = self.isOver(HB, colour)
if isOver[0]:
return isOver[1]
else:
# get moves
moves = HB.get_moves()
if len(moves) > 0:
# choose random player move
move = random.choice(moves)
HB.make_move(move)
# traverse, moving down the player ply
return self.treesearch(HB, ply-1, colour)
else:
return 0
def isOver(self,B, colour):
if B.is_over():
if B.winner != minimax_empty:
if B.winner == colour:
return (True,minimax_win)
else:
return (True,minimax_lose)
else:
return (True,minimax_draw)
else:
return (False,-1)
| thien/slowpoke | library/decision/tmcts.py | tmcts.py | py | 4,430 | python | en | code | 1 | github-code | 13 |
18953724080 | import urllib
import deepl
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.template import loader
import aai.autosuggest as sgst
import aai.query as query
from aai.log.ArtistRecommendation import ArtistRecommendation
from aai.log.LogLoader import LogLoader
pool = query.RDFQueries()
languages = ['DE', 'FR', 'ES', 'IT', 'NL', 'PL']
recommender = ArtistRecommendation(pool)
logLoader = LogLoader()
logLoader.register(0, recommender)
def index(request):
template = loader.get_template('artoogle/index.html')
return HttpResponse(template.render())
def search(request):
search_terms = str(request.GET.get('arg'))
search_terms = search_terms.replace(' ', '_')
search_terms = urllib.parse.quote(search_terms)
search_terms = search_terms.replace('%28', '(')
search_terms = search_terms.replace('%29', ')')
# Hilarious eastergg following
if search_terms == 'bob_ross':
webite = '<head><meta http-equiv="refresh" content="0; url=https://geekandsundry.com/wp-content/uploads/' \
'2017/07/Bob-Ross-The-Art-of-Chill-featured.jpg"/></head>'
return HttpResponse(webite, content_type="text")
abstract = pool.get_abstract(search_terms)
images = pool.get_art_from_artist(search_terms)
log = logLoader.get_log(get_client_ip(request))
if search_terms:
log.add_search(search_terms)
lang = request.COOKIES.get('lang')
if lang in languages:
abstract, _ = deepl.translate(abstract, source='EN', target=lang)
# for path, title in images.items():
# images[path], _ = deepl.translate(title, source='EN', target=lang)
recommendations = recommender.get_recommendations(get_client_ip(request))
print("recommendations für ", get_client_ip(request))
print(recommendations)
print(images)
return render(request, 'artoogle/index.html', {
'abstract': abstract,
'images': images,
'recommendations': recommendations
})
def detail(request):
# artwork contatins the artwork's name that was klicked on, i.e. "Starry Night Over the Rhone"
artwork = str(request.GET.get('arg'))
images = pool.get_similar_art(artwork, 5)
labels = pool.get_artwork_labels(artwork)
return render(request, 'artoogle/detail.html', {
'images': images,
'labels': labels,
})
def auto_suggest(request):
search_str = str(request.GET.get('arg'))
suggestions = sgst.search(search_str)
return JsonResponse({'suggestions': suggestions})
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def run_index(request):
sgst.full_index()
return HttpResponse("done")
| watzkuh/artoogle | artoogle/views.py | views.py | py | 2,868 | python | en | code | 0 | github-code | 13 |
39837902042 | #Program to convert output from minisat to readable sudoku format and write it to "output.txt"
#to output the result to "output.txt"
import sys
orig_stdout = sys.stdout
f = open('output.txt', 'w')
sys.stdout = f
#reading and storing minisat output in a
input_file=open('minisat_output.txt','r')
a = input_file.read().splitlines()
#if unsolvable
if(a[0]=="UNSAT"):
print("UNSAT")
pass
#if solution exoits
else:
# converts the output from minisat to final sudoku format and stores in 2D list "c"
a.pop(0)
b = a[0].split(" ")
b.pop(-1)
c=[['0' for j in range(9)] for i in range(9)]
for j in b:
k=int(j)
if(k>0):
l=int(int((k-1)/81)+1)
m=k-(l-1)*81
row=int((m-1)/9)
col=(m-1)%9
c[row][col]=l
#to print the output
for i in range(9):
for j in range(9):
print(c[i][j],end="")
print("")
sys.stdout = orig_stdout
| vaibhavjindal/sat_solver | a1/a1.1/decode.py | decode.py | py | 886 | python | en | code | 0 | github-code | 13 |
29026302651 | CAR_NUMBER = "CAR_2016_04_004"
CAR_NAME = "Successful Local Account Login"
CAR_DESCRIPTION = "The successful use of Pass The Hash for lateral movement between workstations would trigger event ID 4624 with an event level of information, from the security log. " \
"This behavior would be a LogonType of 3 using NTLM authenticiation where it is not a domain login and not the ANONYMOUS LOGON account"
ATTACK_TACTIC = "Defense Evasion"
CAR_URL = "https://car-.mitre.org/wiki/CAR-2016-04-004"
ES_INDEX = "winevent_security*"
ES_TYPE = "winevent_security"
ALERT_INDEX = "alert"
INDICATOR_ID = "indicator--7522721a-b267-4b21-ad38-910044ce4720"
class CAR_2016_04_004():
def __init__(self):
self.car_data = dict(car_name=CAR_NAME,
car_number=CAR_NUMBER,
car_description=CAR_DESCRIPTION,
car_url=CAR_URL,
alert_index=ALERT_INDEX,
alert_type=CAR_NUMBER,
es_type=ES_TYPE,
indicator_id=INDICATOR_ID, es_index=ES_INDEX)
def analyze(self, rdd, begin_timestamp, end_timestamp):
end = end_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
begin = begin_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
rdd = rdd.filter(lambda item: (item[1]["data_model"]["fields"]["event_code"] == 4624))
rdd = rdd.map(lambda item: (
item[0],
{
"event_code": item[1]["data_model"]["fields"]["event_code"],
"hostname": item[1]["data_model"]["fields"]["hostname"],
"@timestamp": item[1]["@timestamp"],
"data_model": item[1]["data_model"],
"logon_type": item[1]["LogonType"],
"authentication_package_name": item[1]["AuthenticationPackageName"],
"target_user_name": item[1]["TargetUserName"],
'car_id': CAR_NUMBER,
'car_name': CAR_NAME,
'car_description': CAR_DESCRIPTION,
'car_url': CAR_URL}))
rdd = rdd.filter(lambda item: (item[1]['target_user_name'] != "ANONYMOUS LOGON"))
rdd = rdd.filter(lambda item: (item[1]['logon_type'] == "3"))
rdd = rdd.filter(lambda item: (item[1]['authentication_package_name'] == 'NTLM'))
rdd = rdd.filter(lambda item: (item[1]["@timestamp"] <= end))
rdd = rdd.filter(lambda item: (item[1]["@timestamp"] >= begin))
return rdd
| unfetter-discover/unfetter-analytic | analytic-system/src/CAR_2016_04_004.py | CAR_2016_04_004.py | py | 2,510 | python | en | code | 174 | github-code | 13 |
71251550417 | #!/usr/bin/env python3
# -*- coding: ascii -*-
import pygame
from ..cat.history import History
from scripts.utility import scale
from .Screens import Screens
from scripts.utility import get_text_box_theme
from scripts.cat.cats import Cat
import pygame_gui
from scripts.game_structure.image_button import UIImageButton
from scripts.game_structure.game_essentials import game, screen_x, MANAGER
class CeremonyScreen(Screens):
def __init__(self, name=None):
super().__init__(name)
self.back_button = None
self.text = None
self.scroll_container = None
self.life_text = None
self.header = None
self.the_cat = None
def screen_switches(self):
self.hide_menu_buttons()
self.the_cat = Cat.all_cats.get(game.switches['cat'])
if self.the_cat.status == 'leader':
self.header = pygame_gui.elements.UITextBox(str(self.the_cat.name) + '\'s Leadership Ceremony',
scale(pygame.Rect((200, 180), (1200, -1))),
object_id=get_text_box_theme(), manager=MANAGER)
else:
self.header = pygame_gui.elements.UITextBox(str(self.the_cat.name) + ' has no ceremonies to view.',
scale(pygame.Rect((200, 180), (1200, -1))),
object_id=get_text_box_theme(), manager=MANAGER)
if self.the_cat.status == 'leader' and not self.the_cat.dead:
self.life_text = History.get_lead_ceremony(self.the_cat)
else:
self.life_text = ""
self.scroll_container = pygame_gui.elements.UIScrollingContainer(scale(pygame.Rect((100, 300), (1400, 1000))))
self.text = pygame_gui.elements.UITextBox(self.life_text,
scale(pygame.Rect((0, 0), (1100, -1))),
object_id=get_text_box_theme("#text_box_30_horizleft"),
container=self.scroll_container, manager=MANAGER)
self.text.disable()
self.back_button = UIImageButton(scale(pygame.Rect((50, 50), (210, 60))), "",
object_id="#back_button", manager=MANAGER)
self.scroll_container.set_scrollable_area_dimensions((1360 / 1600 * screen_x, self.text.rect[3]))
def exit_screen(self):
self.header.kill()
del self.header
self.text.kill()
del self.text
self.scroll_container.kill()
del self.scroll_container
self.back_button.kill()
del self.back_button
def on_use(self):
pass
def handle_event(self, event):
if game.switches['window_open']:
pass
if event.type == pygame_gui.UI_BUTTON_START_PRESS:
if event.ui_element == self.back_button:
self.change_screen('profile screen')
elif event.type == pygame.KEYDOWN and game.settings['keybinds']:
if event.key == pygame.K_ESCAPE:
self.change_screen('profile screen')
return
| Thlumyn/clangen | scripts/screens/CeremonyScreen.py | CeremonyScreen.py | py | 3,208 | python | en | code | 135 | github-code | 13 |
26110907469 | from __future__ import unicode_literals
from hazm import *
import filtering as fl
import openpyxl
def mallet(file_name1, file_name2):
words = []
words_count =[]
delete_list = ["ViviGirl",':',"?" ,"؟" ,"ک","ها","از","در","Mr_Mean","mehraveh_tma","MAminHP","!","ب","ی","یه","FWD","Photo","]","{","[","هاش",".","اگ","اگه","اگر","۱","۲","۳","۴","۵"
,"۶","۷","۸", "۹","…","نه", "ن" ,"و" ,"را" ,"که","این","در","برای","تو","من","او" ,"به","با","تا","یک","چی", "هر" ,"چون" ,"باشه","ولی","بعد","هم","یا","کلا",
"ای" ,"ن" ,"چ"]
o = open("mallet.txt" ,"w")
f1 = open(file_name1.__str__()+"_cleaned.txt")
f2 = open(file_name2.__str__()+"_cleaned.txt")
normalizer = Normalizer()
lemmatizer = Lemmatizer()
counter = 0
count = 0
feature = ''
for i in range(8032):
line1 = f1.readline()
line2 = f2.readline()
s1 = normalizer.normalize(line1.__str__())
t1 = word_tokenize(s1.__str__())
s2 = normalizer.normalize(line2.__str__())
t2 = word_tokenize(s2.__str__())
for word in t1:
word = lemmatizer.lemmatize(word.__str__())
if word not in delete_list:
feature += 'f'+ (count + 1).__str__()+' '+word+' '
count += 1
if count >= 5:
break
label = 'l' + (counter % 2 + 1).__str__()
o.writelines((counter+1).__str__() + ' ' + label.__str__()+ ' '+ feature.__str__()+'\n')
counter += 1
count = 0
feature = ''
for word in t2:
word = lemmatizer.lemmatize(word.__str__())
if word not in delete_list:
feature += 'f' + (count + 1).__str__() + ' ' + word + ' '
count += 1
if count >= 5:
break
label = 'l' + (counter % 2 + 1).__str__()
o.writelines((counter+1).__str__() + ' ' + label.__str__() + ' ' + feature.__str__()+'\n')
counter += 1
count = 0
feature = ''
print(i)
| mehraveh/ChatSubjects | mallet_file_gn.py | mallet_file_gn.py | py | 2,151 | python | en | code | 0 | github-code | 13 |
25054883404 | #10162
from sys import stdin
#A = 300, B = 60, C = 10
time = int(stdin.readline())
a = 0
b = 0
c = 0
while time > 0:
if time >= 300:
time -= 300
a += 1
elif time >= 60:
time -= 60
b += 1
else:
time -= 10
c += 1
if time == 0:
print(f"{a} {b} {c}")
else:
print(-1)
| wjsehdlf77/baekjoonpractice | practice42.py | practice42.py | py | 343 | python | en | code | 0 | github-code | 13 |
22800281512 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors: Chiheb Trabelsi
#
# Implementation of Layer Normalization and Complex Layer Normalization
#
import numpy as np
from keras.layers import Layer, InputSpec
from keras import initializers, regularizers, constraints
import keras.backend as K
from .bn import ComplexBN as complex_normalization
from .bn import sqrt_init
def layernorm(x, axis, epsilon, gamma, beta):
# assert self.built, 'Layer must be built before being called'
input_shape = K.shape(x)
reduction_axes = list(range(K.ndim(x)))
del reduction_axes[axis]
del reduction_axes[0]
broadcast_shape = [1] * K.ndim(x)
broadcast_shape[axis] = input_shape[axis]
broadcast_shape[0] = K.shape(x)[0]
# Perform normalization: centering and reduction
mean = K.mean(x, axis=reduction_axes)
broadcast_mean = K.reshape(mean, broadcast_shape)
x_centred = x - broadcast_mean
variance = K.mean(x_centred ** 2, axis=reduction_axes) + epsilon
broadcast_variance = K.reshape(variance, broadcast_shape)
x_normed = x_centred / K.sqrt(broadcast_variance)
# Perform scaling and shifting
broadcast_shape_params = [1] * K.ndim(x)
broadcast_shape_params[axis] = K.shape(x)[axis]
broadcast_gamma = K.reshape(gamma, broadcast_shape_params)
broadcast_beta = K.reshape(beta, broadcast_shape_params)
x_LN = broadcast_gamma * x_normed + broadcast_beta
return x_LN
class LayerNormalization(Layer):
def __init__(self,
epsilon=1e-4,
axis=-1,
beta_init='zeros',
gamma_init='ones',
gamma_regularizer=None,
beta_regularizer=None,
**kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = InputSpec(ndim=len(input_shape),
axes={self.axis: input_shape[self.axis]})
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name))
self.beta = self.add_weight(shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name))
self.built = True
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
return layernorm(x, self.axis, self.epsilon, self.gamma, self.beta)
def get_config(self):
config = {'epsilon': self.epsilon,
'axis': self.axis,
'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,
'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ComplexLayerNorm(Layer):
def __init__(self,
epsilon=1e-4,
axis=-1,
center=True,
scale=True,
beta_initializer='zeros',
gamma_diag_initializer=sqrt_init,
gamma_off_initializer='zeros',
beta_regularizer=None,
gamma_diag_regularizer=None,
gamma_off_regularizer=None,
beta_constraint=None,
gamma_diag_constraint=None,
gamma_off_constraint=None,
**kwargs):
self.supports_masking = True
self.epsilon = epsilon
self.axis = axis
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_diag_initializer = initializers.get(gamma_diag_initializer)
self.gamma_off_initializer = initializers.get(gamma_off_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer)
self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_diag_constraint = constraints.get(gamma_diag_constraint)
self.gamma_off_constraint = constraints.get(gamma_off_constraint)
super(ComplexLayerNorm, self).__init__(**kwargs)
def build(self, input_shape):
ndim = len(input_shape)
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
self.input_spec = InputSpec(ndim=len(input_shape),
axes={self.axis: dim})
gamma_shape = (input_shape[self.axis] // 2,)
if self.scale:
self.gamma_rr = self.add_weight(
shape=gamma_shape,
name='gamma_rr',
initializer=self.gamma_diag_initializer,
regularizer=self.gamma_diag_regularizer,
constraint=self.gamma_diag_constraint
)
self.gamma_ii = self.add_weight(
shape=gamma_shape,
name='gamma_ii',
initializer=self.gamma_diag_initializer,
regularizer=self.gamma_diag_regularizer,
constraint=self.gamma_diag_constraint
)
self.gamma_ri = self.add_weight(
shape=gamma_shape,
name='gamma_ri',
initializer=self.gamma_off_initializer,
regularizer=self.gamma_off_regularizer,
constraint=self.gamma_off_constraint
)
else:
self.gamma_rr = None
self.gamma_ii = None
self.gamma_ri = None
if self.center:
self.beta = self.add_weight(shape=(input_shape[self.axis],),
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs):
input_shape = K.shape(inputs)
ndim = K.ndim(inputs)
reduction_axes = list(range(ndim))
del reduction_axes[self.axis]
del reduction_axes[0]
input_dim = input_shape[self.axis] // 2
mu = K.mean(inputs, axis=reduction_axes)
broadcast_mu_shape = [1] * ndim
broadcast_mu_shape[self.axis] = input_shape[self.axis]
broadcast_mu_shape[0] = K.shape(inputs)[0]
broadcast_mu = K.reshape(mu, broadcast_mu_shape)
if self.center:
input_centred = inputs - broadcast_mu
else:
input_centred = inputs
centred_squared = input_centred ** 2
if (self.axis == 1 and ndim != 3) or ndim == 2:
centred_squared_real = centred_squared[:, :input_dim]
centred_squared_imag = centred_squared[:, input_dim:]
centred_real = input_centred[:, :input_dim]
centred_imag = input_centred[:, input_dim:]
elif ndim == 3:
centred_squared_real = centred_squared[:, :, :input_dim]
centred_squared_imag = centred_squared[:, :, input_dim:]
centred_real = input_centred[:, :, :input_dim]
centred_imag = input_centred[:, :, input_dim:]
elif self.axis == -1 and ndim == 4:
centred_squared_real = centred_squared[:, :, :, :input_dim]
centred_squared_imag = centred_squared[:, :, :, input_dim:]
centred_real = input_centred[:, :, :, :input_dim]
centred_imag = input_centred[:, :, :, input_dim:]
elif self.axis == -1 and ndim == 5:
centred_squared_real = centred_squared[:, :, :, :, :input_dim]
centred_squared_imag = centred_squared[:, :, :, :, input_dim:]
centred_real = input_centred[:, :, :, :, :input_dim]
centred_imag = input_centred[:, :, :, :, input_dim:]
else:
raise ValueError(
'Incorrect Layernorm combination of axis and dimensions. axis should be either 1 or -1. '
'axis: ' + str(self.axis) + '; ndim: ' + str(ndim) + '.'
)
if self.scale:
Vrr = K.mean(
centred_squared_real,
axis=reduction_axes
) + self.epsilon
Vii = K.mean(
centred_squared_imag,
axis=reduction_axes
) + self.epsilon
# Vri contains the real and imaginary covariance for each feature map.
Vri = K.mean(
centred_real * centred_imag,
axis=reduction_axes,
)
elif self.center:
Vrr = None
Vii = None
Vri = None
else:
raise ValueError('Error. Both scale and center in batchnorm are set to False.')
return complex_normalization(
input_centred, Vrr, Vii, Vri,
self.beta, self.gamma_rr, self.gamma_ri,
self.gamma_ii, self.scale, self.center,
layernorm=True, axis=self.axis
)
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_diag_initializer': initializers.serialize(self.gamma_diag_initializer),
'gamma_off_initializer': initializers.serialize(self.gamma_off_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_diag_regularizer': regularizers.serialize(self.gamma_diag_regularizer),
'gamma_off_regularizer': regularizers.serialize(self.gamma_off_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_diag_constraint': constraints.serialize(self.gamma_diag_constraint),
'gamma_off_constraint': constraints.serialize(self.gamma_off_constraint),
}
base_config = super(ComplexLayerNorm, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| ChihebTrabelsi/deep_complex_networks | complexnn/norm.py | norm.py | py | 11,241 | python | en | code | 673 | github-code | 13 |
36409314226 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from administrar.models import Tarea # Importa el modelo
from .forms import TareaForm
# Create your views here.
def v_index(request):
if request.method == 'POST':
datos = request.POST.copy()
form = TareaForm(datos)
if form.is_valid():
form.save()
else:
return HttpResponseRedirect("/")
if False:
_titulo = request.POST["titulo"]
tarea = Tarea()
tarea.titulo = _titulo
tarea.save()
return HttpResponseRedirect("/")
else:
consulta = Tarea.objects.filter(
titulo__icontains=request.GET.get("titulo", ""))
if request.GET.get("estado", "") != "":
consulta = consulta.filter(estado=request.GET.get("estado", ""))
context = {'var1': 'Valor1', 'var2': 'Valor2', 'lista': consulta}
return render(request, 'index.html', context)
def v_eliminar(request, tarea_id):
Tarea.objects.filter(id=tarea_id).delete()
return HttpResponseRedirect("/")
def v_completado(request, tarea_id):
task = Tarea.objects.get(id=tarea_id)
task.estado = 1
task.save()
return HttpResponseRedirect("/")
| diazalejandra/M7-Lista-Tareas-Django | administrar/views.py | views.py | py | 1,264 | python | en | code | 0 | github-code | 13 |
72927439698 | from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Conv1D, Dropout, MaxPool1D
from keras.utils import to_categorical
from keras.optimizers import SGD
from classify_bubble import get_candidate_signals, is_bubble
import os
from cv2 import imread
import numpy as np
def get_bubble_train_signal(img,
signal_len,
classifier,
threshold_abs=50,
min_distance=2):
signals_x, signals_y, signals_z = get_candidate_signals(img,
signal_len,
threshold_abs=threshold_abs,
min_distance=min_distance,
smooth_img=False)
signals_labels = [False] * len(signals_z)
for idx, (sig_x, sig_y, sig_z) in enumerate(zip(signals_x, signals_y, signals_z)):
if is_bubble(sig_z, classifier):
signals_labels[idx] = True
assert len(signals_labels) == len(signals_z)
return signals_z, signals_labels
def create_bubbleNet():
nb_classes = 2
nb_features = 20
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=4, activation='relu', use_bias=True, input_shape=(nb_features,1)))
model.add(MaxPool1D(pool_size=2))
model.add(Conv1D(filters=32, kernel_size=2, activation='relu'))
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
return model
def main():
NUM_CLASSES = 2
# get data
train_images = []
mess_dir = "data/training/"
for filename in os.listdir(mess_dir):
if filename.endswith(".png"):
train_images.append(imread(os.path.join(mess_dir, filename), 0))
X_data = y_data = []
for img in train_images:
signal, label = get_bubble_train_signal(img,
signal_len=10,
classifier="logistic_regression",
threshold_abs=50,
min_distance=5)
X_data.append(signal)
y_data.append(label)
X_data = np.asarray(X_data, dtype='float32')
y_data = np.asarray(y_data, dtype='float32')
X_data /= 255
y_data /= 255
y_data = to_categorical(y_data, NUM_CLASSES)
nb_train = int(0.7 * len(X_data))
X_train = X_data[0:nb_train]
y_train = y_data[0:nb_train]
X_valid = X_data[nb_train:]
y_valid = y_data[nb_train:]
# get and train model
model = create_bubbleNet()
sgd = SGD(lr=0.01, nesterov=True, decay=1e-6, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
nb_epoch = 15
model.fit(X_train, y_train, nb_epoch=nb_epoch, validation_data=(X_valid, y_valid), batch_size=16)
if __name__ == '__main__':
main()
| zazizizou/gradient_ansatz | bubbleNet1D.py | bubbleNet1D.py | py | 3,112 | python | en | code | 1 | github-code | 13 |
9712414605 | import os
import torch
import torchvision.transforms as transforms
import torchvision.models as models
import torch.nn as nn
from process_test_file import test_eval, _get_test_reults
import warnings
warnings.filterwarnings("ignore")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
test_path = str(os.getcwd())[:-4] + 'test_folder'
state_dict_path = os.path.join(os.getcwd()[:-5], 'models/VGG19.pth')
test_class = {'in1.jpg' : 0,
'land3.jpg' : 3,
'in3.jpeg' : 0,
'earth1.jpeg': 0,
'd1.jpeg' : 3,
'urb3.jpeg' : 1,
'd2.jpeg' : 3,
'urb2.jpeg' : 1,
'earth3.jpeg': 0,
'land2.jpeg' : 3,
'sea1.jpg' : 4,
'wild2.jpeg' : 1,
'sea3.jpeg' : 4,
'earth4.jpeg': 0,
'in2.jpeg' : 0,
'd4.jpg' : 3,
'earth5.jpeg': 0,
'earth2.jpg' : 0,
'd3.jpg' : 3,
'urb1.jpeg' : 1,
'sea2.jpeg' : 5,
'urb4.jpeg' : 1,
'wild3.jpeg' : 1,
'wild1.jpg' : 1,
'land1.jpg' : 3}
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize((224,224)),
transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])
])
device = 'cuda' if torch.cuda.is_available() else 'cpu'
num_classes = 6
model = models.vgg19(pretrained=False)
for param in model.parameters():
param.requires_grad = False
model.classifier[6] = nn.Linear(model.classifier[6].in_features, num_classes)
model = model.to(device)
model.load_state_dict(torch.load(state_dict_path, map_location=torch.device(device)))
def get_accuracy_test_results():
print('USING BEST MODEL: VGG19 ...')
ans, acc = test_eval(model, test_path, test_class, transform)
print("Accuracy of the test path: ", acc*100, "%")
print("Predicted values: ", ans)
def get_test_results(test_path, save_as='results.csv'):
_get_test_reults(model, transform, test_path, save_as=save_as)
print('process finished ...')
| Anindyadeep/Image-Collagination-as-Augmentations | code/run.py | run.py | py | 2,255 | python | en | code | 1 | github-code | 13 |
42756097431 | import logging
from Ant import Ant
logger = logging.getLogger("AntScheduler.MaxMin")
MULTIPLY = 0
ADD = 1
class AntAlgorithm:
""" Class containing Graph representing industrial process and method that runs Ant Colony Optimization on this
graph"""
def __init__(self, _config, _nodes_list):
self.config = _config
self.nodes_list = _nodes_list
self.ant_population = []
self.result_history = []
def run(self):
pass
def pheromone_init(self):
for node in self.nodes_list:
nested_predecessors = [node] + node.nested_predecessors()
for successor in self.nodes_list:
if successor not in nested_predecessors:
node.pheromone_dict[successor] = self.config.init_pheromone_value
@staticmethod
def pheromone_trail_modify(_trail, _value, _operation):
iterator = iter(_trail)
node = next(iterator) # throws StopIteration if empty.
for next_node in iterator:
if _operation == MULTIPLY:
node.pheromone_dict[next_node] *= _value
elif _operation == ADD:
node.pheromone_dict[next_node] += _value
node = next_node
class MaxMin(AntAlgorithm):
""" Class containing Graph representing industrial process and method that runs Ant Colony Optimization on this
graph"""
def __init__(self, _config, _nodes_list):
AntAlgorithm.__init__(self, _config, _nodes_list)
self.history_best = Ant(self.nodes_list[0])
# TODO how to make arbitrary big max value?
self.history_best.result_value = 100000
def run(self):
self.pheromone_init()
for iteration in range(self.config.iterations):
self.ant_population = [Ant(self.nodes_list[0]) for _ in range(self.config.ant_population)]
for ant in self.ant_population:
ant.result_generate()
self.evaporate_pheromone_trail(ant)
self.graph_update()
logger.info(
"running iteration: {0}, best result_permutation is: {1}".format(iteration,
self.result_history[-1].result_value))
def graph_update(self):
"""In each iteration graph pheromone is being updated according to the best ants results"""
self.ant_population.sort(key=lambda x: x.result_value)
self.result_history.append(self.ant_population[0])
self.history_best = min(self.history_best, self.ant_population[0], key=lambda x: x.result_value)
self.prepare_and_modify_best_trails()
def prepare_and_modify_best_trails(self):
"""Modify trail - once for history best result and once for couple of local best results"""
self.pheromone_trail_modify(self.history_best.visited_list, 1 + self.config.pheromone_potency, MULTIPLY)
for i in range(self.config.max_min_ants_promoted):
value = (1 + self.config.pheromone_potency * (self.config.pheromone_distribution ** (i + 1)))
self.pheromone_trail_modify(self.ant_population[i].visited_list, value, MULTIPLY)
def evaporate_pheromone_trail(self, ant):
"""evaporate the pheromone, minimal value is 1"""
self.pheromone_trail_modify(ant.visited_list, self.config.evaporation_rate, MULTIPLY)
self.pheromone_trail_modify(ant.visited_list, 1 - self.config.evaporation_rate, ADD)
class AntSystem(AntAlgorithm):
""" Class containing Graph representing industrial process and method that runs Ant Colony Optimization on this
graph"""
def __init__(self, _config, _nodes_list):
AntAlgorithm.__init__(self, _config, _nodes_list)
def graph_update(self):
self.ant_population.sort(key=lambda x: x.result_value)
self.result_history.append(self.ant_population[0])
# Modify trail - once for history best result and once for couple of local best results
for ant in self.ant_population:
value = (1 / ant.result_value * ant_population[0].result_value * self.config.pheromone_potency)
self.pheromone_trail_modify(self.ant.visited_list, value, ADD)
def run(self):
self.graph_pheromone_clear()
for iteration in range(self.config.iterations):
self.ant_population = [Ant(self.nodes_list[0]) for _ in range(self.config.ant_population)]
for ant in self.ant_population:
ant.result_generate()
self.graph_update()
logger.info(
"running iteration: {0}, best result_permutation is: {1}".format(iteration,
self.result_history[-1].result_value))
| mcalus3/AntScheduler | antscheduler/AntAlgorithm.py | AntAlgorithm.py | py | 4,775 | python | en | code | 5 | github-code | 13 |
70876214418 | import json
import dpath.util
import xmltodict
from fastapi.testclient import TestClient
from parsel import Selector
from .main import app
from .dependencies import BaseResponse, RequestError, ParserError
from .routers.dpath import DpathResponse, DpathRequest
client = TestClient(app)
def test_wake():
response = client.get("/wake")
assert response.status_code == 200
def test_user_agents():
response = client.get("/user_agents")
data = response.json()
# Verify that the returned object is a list, and that all of its elements are strings.
assert bool(data)
assert isinstance(data, list)
assert all(isinstance(elem, str) for elem in data)
def test_example_html():
response = client.get("/examples/html")
data = response.text
# Verify that elements needed for further tests and documentation are returned with the path we expect.
selector = Selector(text=data)
assert selector.xpath("/html/body/div/span[3]/text()").get() is not None
def test_example_css():
response = client.get("/examples/html")
data = response.text
# Verify that elements needed for further tests and documentation are returned with the path we expect.
selector = Selector(text=data)
assert selector.css("body > div > span:nth-child(5)").get() is not None
def test_example_regex():
response = client.get("/examples/html")
data = response.text
# Verify that elements needed for further tests and documentation are returned with the path we expect.
selector = Selector(text=data)
assert selector.re("<span><strong>.*:<\/strong> (.*)<\/span>") is not None
def test_example_json():
response = client.get("/examples/json")
data = response.text
# Verify that elements needed for further tests and documentation are returned with the path we expect.
json_dict = json.loads(data)
assert dpath.util.get(json_dict, "/note/subject") is not None
def test_example_xml():
response = client.get("/examples/xml")
data = response.text
# Verify that elements needed for further tests and documentation are returned with the path we expect.
assert dpath.util.get(xmltodict.parse(data), "/note/subject") is not None
def test_as_basic():
"""Verify that the function to format a response in "basic" is functioning"""
request = DpathRequest(
url="http://localhost/examples/json",
path="/note/subject",
path_type="JSON",
user_agent="some user agent",
return_style="BASIC",
)
response = DpathResponse(
request_item=request,
request_error=RequestError(code=0, msg=["error", "some error msg"]),
parser_error=ParserError(code=0, msg="some error msg"),
path_data="some path data",
raw_data="some raw data",
)
basic_format_keys = response.basic_format_keys.sort()
basic_format_response_keys = list(response.as_basic()).sort()
# Assert that after the response is processed, the keys that are returned are only the keys we specified.
assert basic_format_keys == basic_format_response_keys
| avi-perl/Parsel-Selector-API | app/test_main.py | test_main.py | py | 3,090 | python | en | code | 0 | github-code | 13 |
6948127564 | from typing import *
import time
import matplotlib.pyplot as plt
class Solution:
def integerBreak(self, n: int) -> int:
dp = [0] * (n + 1)
for i in range(2, n + 1):
for j in range(i):
dp[i] = max(dp[i], j * (i - j), j * dp[i - j])
return dp[n]
class Solution1:
def integerBreak(self, n: int) -> int:
if n < 4:
return n - 1
dp = [0] * (n + 1)
dp[2] = 1
for i in range(3, n + 1):
dp[i] = max(2 * (i - 2), 2 * dp[i - 2], 3 * (i - 3), 3 * dp[i - 3])
return dp[n]
class Solution2:
def integerBreak(self, n: int) -> int:
if n < 4:
return n - 1
quotient, remainder = n // 3, n % 3
if remainder == 0:
return 3 ** quotient
elif remainder == 1:
return 3 ** (quotient - 1) * 4
else:
return 3 ** quotient * 2
if __name__ == '__main__':
sol = Solution()
sol1 = Solution1()
sol2 = Solution2()
xs = list(range(30000, 100000, 5000))
times1, times2, times3 = [], [], []
for num in xs:
time1 = time.time()
sol1.integerBreak(num)
time2 = time.time()
print(time2 - time1)
times2.append(time2 - time1)
time1 = time.time()
sol2.integerBreak(num)
time2 = time.time()
print(time2 - time1)
times3.append(time2 - time1)
# plt.plot(xs, times1, color='g', label='sol', lw=2, ls='-')
# plt.scatter(xs, times1, color='m', marker='<')
plt.plot(xs, times2, color='b', label='sol1', lw=2, ls='--')
plt.scatter(xs, times2, color='y', marker='^')
plt.plot(xs, times3, color='r', label='sol2', lw=2, ls='--')
plt.scatter(xs, times3, color='y', marker='>')
plt.legend()
plt.title('no title')
plt.show()
| Xiaoctw/LeetCode1_python | 动态规划/整数拆分_343.py | 整数拆分_343.py | py | 1,830 | python | en | code | 0 | github-code | 13 |
25940813390 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 10:19:16 2020
@author: jhello
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from IOFile import TabOutFile, GridOutFile, DataTabFiles
# time step file
result_dir = 'result/'
result_tmp_dir = result_dir + 'tmp/'
tab_out_file_name = result_tmp_dir + 'tab.out'
grid_out_file_name = result_tmp_dir + 'grid.out'
# dataframe headers
df_names = 'x t rho vx1 vx2 vx3 Bx1 Bx2 Bx3 prs'.split()
# read time step from 'tab.out' file
time_steps = TabOutFile(tab_out_file_name).time_steps()
# time step count
time_count = len(time_steps)
# init space count to 0
space_count = GridOutFile(grid_out_file_name).space_count()
data_tab_files = DataTabFiles(result_tmp_dir, time_steps)
dfs = data_tab_files.solution()
a_style_dataframe = data_tab_files.a_style_dataframe()
print('a_style_dataframe')
print(a_style_dataframe)
s, u, v = tf.linalg.svd(a_style_dataframe)
us, uu, uv = tf.linalg.svd(u)
last_time_point_sol = a_style_dataframe.iloc[:, -1]
coord = last_time_point_sol.dot(uu)
print('coord')
print(coord)
error = []
for i in range(1, len(coord) + 1):
last_time_point_predict = np.dot(uu[:, :i], coord[:i])
last_time_point_sol_ndarray = last_time_point_sol.values
error += [sum(abs(last_time_point_predict - last_time_point_sol_ndarray) / last_time_point_sol_ndarray)]
rho_predict = last_time_point_predict[::6]
rho_sol = last_time_point_sol_ndarray[::6]
fig, ax = plt.subplots()
ax.set_title('Coord Count = ' + str(i))
ax.plot(rho_predict, '.-')
ax.plot(rho_sol)
plt.savefig('imgs/'+'Coord Count = ' + str(i))
plt.close()
fig, ax_err = plt.subplots()
ax_err.plot(error, '.-')
ax_err.grid(True)
plt.savefig('imgs/'+'error')
plt.close()
| jhello/annmhd | test.py | test.py | py | 1,786 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.