blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
602fe338c6cc034c881f9b5fa1609f2eacef7cf0 | b9e630778ebc270bc19761310789525fc05b0e62 | /CovidReportHCSC.py | f5c1071fd2548b09266703c5d775ba3c95e7df9f | [] | no_license | ludwigrubio/covid19-HCSC | abca15939f76747672890cc35270f7cdce0c6d02 | 7954630936a540351a6bc2fa98d42e3e24560472 | refs/heads/main | 2023-05-31T20:34:30.249894 | 2021-07-11T04:23:47 | 2021-07-11T04:23:47 | 384,239,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,038 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 8 16:37:51 CST 2021
@author: ludwig rubio
"""
import logging
import pandas as pd
logger = logging.getLogger()
class CovidReportHCSC():
"""
Class to create an isntance of new report in Daily/Monthly basis
Attributes:
-----------
covidCounties (pandas.DataFrame):
Dataframe simplified an clena to be joined with population dataset
populationCounties (pandas.DataFrame):
DataFrame simplified with 5 digits format FIPS to join with COVID dataset
report (pandas.DataFrame):
Datafrmae with final report to be exported
"""
def __init__(self, covid_url, population_url, report_url):
"""
Constructor, alll data source and report ouput URLs.
Parameters:
-----------
covid_url (string): COVID19 Data Source URL
population_url (string): Population Data Source URL
report_url (string): Output custom file folder/name
"""
logger.info("1) NY Times DataSet preprocess started ...")
self.covidCounties = self.__preprocessCOVIDFile(covid_url)
logger.info(" NY Times preprocess completed.")
logger.info("2) Population by County preprocess Started ...")
self.populationCounties = self.__preprocessPopulationFile(population_url)
logger.info(" Population by County preporcess Times Completed.")
logger.info("3) Join process started...")
self.report = self.joinCOVIDPopulation()
logger.info(" Join process completed.")
def __preprocessCOVIDFile(self, covid_url):
try:
covidCounties = pd.read_csv(covid_url, parse_dates=True, keep_default_na=False)
except:
logger.error(f"There was an issue trying to read file: {covid_url}, please verify file exists")
raise
try:
# Remove PUERTO RICO
covidCounties = covidCounties[covidCounties['state'] !='Puerto Rico']
# Tranform deaths to Integer type
covidCounties['deaths'] = covidCounties['deaths'].str.strip().astype('float').astype('Int64')
# Convert dates to Date type
covidCounties['date'] = pd.to_datetime(covidCounties['date'])
# Fixing missing FIP for New York
covidCounties.loc[covidCounties['county'] == 'New York City', 'fips'] = '36061'
except:
logger.error(f"There was an issue preprocessing the file, please ensure file is following defined standard https://github.com/nytimes/covid-19-data")
raise
return covidCounties
def __preprocessPopulationFile(self, population_url):
try:
populationCounties = pd.read_csv(population_url, parse_dates=True, keep_default_na=False, encoding='ISO-8859-1')
except:
logger.error(f"There was an issue trying to read file: {population_url}, please verify file exists")
try:
# Keep only needed data
populationCounties = populationCounties.loc[populationCounties['SUMLEV'] == 50][['STATE','COUNTY','POPESTIMATE2019']]
# Raname column
populationCounties.rename(columns={"POPESTIMATE2019": "population_2019"}, inplace = True)
# Create FIPS in format SSCCC
populationCounties['COUNTY'] = populationCounties['COUNTY'].astype(str).str.zfill(3)
populationCounties['STATE'] = populationCounties['STATE'].astype(str).str.zfill(2)
populationCounties['fips'] = populationCounties['STATE'] + populationCounties['COUNTY']
except:
logger.error(f"There was an issue preprocessing the file, please ensure file is following defined standard https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/co-est2019-alldata.pdf")
raise
return populationCounties
def joinCOVIDPopulation(self):
"""
Joining data sources an applying required aggregations.
Returns:
--------
report (pandas.DataFrame): include columns:
fips
state
county
population_2019
daily_cases
daily_deaths
cumulative_cases_to_date
cumulative_death_to_date
"""
joined = self.covidCounties.merge( self.populationCounties, how='left', on='fips')
# Fix known issue: when a Nan is created is converted to An Object type
joined['population_2019'] = joined['population_2019'].astype('Int64')
# Comulative data
joined = joined[['date','fips','county','state','population_2019', 'cases','deaths']]
joined.sort_values(['date']).reset_index(drop=True)
joined["cumulative_cases_to_date"] = joined.groupby(['fips'])['cases'].cumsum(axis=0)
joined["cumulative_death_to_date"] = joined.groupby(['fips'])['deaths'].cumsum(axis=0)
# Fix know Issue
joined["cumulative_death_to_date"] =joined["cumulative_death_to_date"].astype('Int64')
# Raname columns
joined.rename(columns={"cases": "daily_cases", "deaths": "daily_deaths"}, inplace = True)
return joined
def exporToCSV(self, file_name):
"""
Export final report into a CSV file
Parameters:
-----------
file_name (string): name of file to be exported, by default COVID_19-Population-[timestamp]_[daily|weekly].csv
Returns:
--------
report (csv): create a file in specified folder/name
"""
self.report.to_csv(file_name, index=False) | [
"gerardo.lgrj@gmail.com"
] | gerardo.lgrj@gmail.com |
08977ed344c5bd849838d147b062717e2952e216 | bee4888fc01385a4531f83f07e0039bbebb2df16 | /regression.py | fa0ed41996aae236fe75a259aebed43ba17da685 | [] | no_license | jhualberta/pyex | 0764ef0fed6499925362ee27da19fdb271b817b5 | 1ef3fea79d07e9cb9804817c014dfdabf819f56c | refs/heads/master | 2021-01-09T01:56:43.787548 | 2020-02-21T18:53:57 | 2020-02-21T18:53:57 | 242,209,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,643 | py | import pandas as pd
import numpy as np
import string
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import roc_curve
import click
colNames = ["GOS", "brain", "Synek", "N20", "III", "V", "III-V/I-III", "FOUR", "GCS", "lesion"]
#neurodata = pd.read_csv(r'~/exercise/pyEx/trainData1.csv', sep = " ", header=0, index_col=False, names= colNames)
#testdata = pd.read_csv(r'~/exercise/pyEx/testData.csv', sep = " ", header=0, index_col=False, names= colNames)
neurodata = pd.read_csv(r'~/exercise/pyEx/rawData.csv', sep = ",", header=0, index_col=False, names= colNames)
nPar = 9 # nPar-1, number of parameters for fitting
nParTot = 10 # total 10-1 pars,
nrows = neurodata.shape[0]
#print("raw data")
print("number of raw dataset: ", nrows)
#print(neurodata)
#print("use parameters: ", neurodata.columns)
#inputData = neurodata.iloc[:,1:9]
#outputData = neurodata['GOS']
def fillNan(data):
# for the missing data (NaN), replace them by mode/mean, interpolate values
valName = ""
for title in colNames:
val = data[title].mode()[0]
valName = "mode"
#val = data[title].mean()
#valName = "mean"
data[title] = data[title].fillna(val)
#print("NaNs in", title.ljust(20,'-'),"are replaced by", valName, val)
data_filtered = data
return data_filtered
def reject_outliers(data, item):
# remove all the records containing NaN values
data.fillna(1000, inplace=True)
data_filtered = data[data[item]!=1000]
return data_filtered
def bootstrap( data ):
######## use bootstrap method to select train/test data (after filtering) #######
kSample = data.shape[0]
## Note: the below calculation can be dangerous if kSample is very large!
prob_nonDuplication = (1-1./kSample)**kSample
print("Sample non-duplication probability = ",prob_nonDuplication)
print( "Expected test set number: ", int(kSample*prob_nonDuplication) )
sampleRow = []
for k in range(kSample):
random_subset = data.sample(n=1,replace=True)# sample and put back
#print(random_subset)
sampleRow.append(random_subset)
trainSet = pd.concat(sampleRow)
#trainSet.drop_duplicates(keep='first', inplace=True)
#print("-----------random sampled index---------")
#print(trainSet.index)
testSet = data.drop(trainSet.index)
trainSet = data.drop(testSet.index)
testSet.sort_index()
print("number of test data: ", testSet.shape[0])
print("----test set (filtered set - train set)")
#print(testSet)
#print(testSet.to_string())
trainSet.sort_index()
print("number of train data: ", trainSet.shape[0])
print("----train set --------------------")
#print(trainSet)
#print(trainSet.to_string())
return( trainSet, testSet)
### for the raw data, we can filter all the records with NaN values
### or we can replace NaN with means/modes/interpolate
## filtering N/A values
#data_filtered = reject_outliers(neurodata, colNames[0]) # check row by row
## replace the N/A values by mode
data_filtered = fillNan(neurodata)
for item in colNames[1:nParTot]:
data_filtered = reject_outliers(data_filtered, item)
print("number of dataset after filtering: ", data_filtered.shape[0], ", ", nrows-data_filtered.shape[0], "are removed.")
#print(data_filtered)
#print(data_filtered.to_string())
trainSet = neurodata
testSet = neurodata
## use saved train, test set or bootstrap sampling again ########################################
saveKey = False
if click.confirm("Use the saved tables? (Make sure they exist)", default=True):
saveKey = True
trainSet = pd.read_csv(r'trainSet.csv', sep = ",", header=0, index_col=False, names= colNames)
testSet = pd.read_csv(r'testSet.csv', sep = ",", header=0, index_col=False, names= colNames)
else:
### Separate the data into trainSet and testSet by using bootstrap method
trainSet, testSet = bootstrap(data_filtered)
features = trainSet.iloc[:,1:nPar]
target = trainSet.iloc[:,0]
#print(target)
logreg = LogisticRegression()
result = logreg.fit(features, target)
print("trained coeffieciency = ", logreg.coef_)
testdata = testSet
testdata_filtered = reject_outliers(testdata, colNames[0])
for item in colNames[1:nParTot]:
testdata_filtered = reject_outliers(testdata_filtered, item)
print("number of test data: ", testdata_filtered.shape[0], ",", testdata.shape[0] - testdata_filtered.shape[0], "are removed.")
x_test = testdata_filtered.iloc[:,1:nPar]
y_test = testdata_filtered.iloc[:,0]
y_pred = logreg.predict(x_test)
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred))
print("Recall:",metrics.recall_score(y_test, y_pred))
fig, ax = plt.subplots(figsize=(8, 8))
ax.imshow(cnf_matrix)
ax.grid(False)
ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0, 1), ticklabels=('Actual 0s', 'Actual 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j, i, cnf_matrix[i, j], ha='center', va='center', color='red')
#plt.show()
y_pred_proba = logreg.predict_proba(x_test)[::,1]
#ROC
fpr, tpr, roc = roc_curve(y_test, y_pred_proba, drop_intermediate=False)
plt.figure()
##Adding the ROC
plt.plot(fpr, tpr, color='red',
lw=2, label="ROC curve of test data 1")#, roc="+str(roc))
##Random FPR and TPR
plt.plot([0, 1], [0, 1], color='blue', lw=2, linestyle='--')
##Title and label
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC curve')
plt.legend(loc=4)
# AUC
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
print("AUC", auc)
plt.plot(fpr,tpr,label="AUC curve of test data 1, auc="+str(auc))
plt.legend(loc=4)
plt.show()
################### classifiers ########################
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
# check parameters 2 by 2
# assign which two parameters you want to check, pick up in range: [1, 8]
# colNames = ["GOS", "brain", "Synek", "N20", "III", "V", "III-V/I-III", "FOUR", "GCS", "lesion"]
# 0 1 2 3 4 5 6 7 8 9
par1 = 1
par2 = 2
featuresCheck = data_filtered.iloc[:, [par1, par2]]
target = data_filtered.iloc[:,0]
clf = LogisticRegression().fit(featuresCheck, target)
h = .02 # step size in the mesh
x_min, x_max = float(x_test[colNames[par1]].min()) - 1, float(x_test[colNames[par1]].max()) + 1
y_min, y_max = float(x_test[colNames[par2]].min()) - 1, float(x_test[colNames[par2]].max()) + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression %s, %s " %(colNames[par1],colNames[par2]) )
plt.axis('tight')
plt.scatter(featuresCheck[colNames[par1]], featuresCheck[colNames[par2]], c=target, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel(colNames[par1])
plt.ylabel(colNames[par2])
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
#plt.show()
if saveKey == False:
if click.confirm("Saved the tables?", default=True):
testSet.to_csv('trainSet.csv', index=False) #, compression=compression_opts)
trainSet.to_csv('testSet.csv', index=False) #, compression=compression_opts)
else:
print("You used the saved data, no need to save them again. Good bye.")
| [
"jhu9@ualberta.ca"
] | jhu9@ualberta.ca |
fb5026de89f588c4cae40d8fcd5d0d5492286c95 | ba59964d15b7652e02b74a85d8f8853ef67eefb1 | /app/commander/interfaces/cli.py | 3b6d0e8b21080949b2678be1a0c476bd5e4b26ac | [] | no_license | netixx/NetProbes | 190a1d54beb5890683ce0bf5d3260f2559ba53a0 | ca59600c973fb63ec974fa4a3b03784784f30a31 | refs/heads/master | 2020-04-21T01:27:42.299904 | 2015-11-09T22:55:23 | 2015-11-09T22:55:23 | 10,406,940 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | """Command line interface for the commander
@author: francois
"""
from common.intfs.exceptions import ProbeConnectionFailed
from interface import Interface
class Cli(Interface):
"""Command line interface
Displays a prompt that reads commands from user's keyboard input
"""
COL_WIDTHS = (10, 20, 20)
COL_NAMES = ("ProbeId", "ProbeIp", "Status")
HEADING = "{names[0]:<{wi[0]}}{names[1]:<{wi[1]}}{names[2]:<{wi[2]}}\n"
PROBE_TEMPLATE = "{names[0]:<{wi[0]}}{names[1]:<{wi[1]}}{names[2]:<{wi[2]}}\n"
CMD_PREFIX = "do"
DISP_CMD = "disp"
EXIT_CMD = "exit"
PROMPT = "%s (%s) > "
def __init__(self, probeIp):
Interface.__init__(self, probeIp)
self.prompt = self.PROMPT % (self.targetId, self.targetIp)
self.isRunning = True
self.status = None
self.commandInput = None
def start(self):
"""Start reading and replying to commands"""
while self.isRunning:
try:
cmd = input(self.prompt)
if cmd.startswith(self.CMD_PREFIX):
tcmd = self.doCommand(cmd)
tcmd.join()
elif cmd == self.DISP_CMD:
print(self.getProbes())
elif cmd == self.EXIT_CMD:
self.quit()
else:
print("Command not recognized, commands are %s" % self.getCommands())
except (KeyboardInterrupt, EOFError):
self.quit()
finally:
print("\n")
def quit(self):
"""Stop listening"""
self.isRunning = False
super().quit()
def getCommands(self):
"""Return available commands"""
return "Commands : %s" % ', '.join([self.CMD_PREFIX, self.DISP_CMD, self.EXIT_CMD])
def getProbes(self):
"""Get probe from remote commander server and prints them as string"""
try:
p = self.fetchProbes()
probes = "Number of probes : %s\n" % len(p)
probes += self.HEADING.format(wi = self.COL_WIDTHS, names = self.COL_NAMES)
for probe in p:
probes += self.PROBE_TEMPLATE.format(names = (probe.getId(), probe.getIp(), probe.getStatus()), wi = self.COL_WIDTHS)
return probes
except ProbeConnectionFailed:
self.updateStatus("Cannot get the list of probes")
self.logger.error("Connection failed", exc_info = 1)
def updateStatus(self, status):
"""Update the status of this commander
:param status: new status to apply
"""
print("Status : %s" % status)
| [
"dev.espinetfrancois@gmail.com"
] | dev.espinetfrancois@gmail.com |
d88163aaaa5177271b169358ae63e56a82ca0345 | edb97b259ffc9b0c0f1f1e1964f0b1c1cee33937 | /WeatherCrawler/WeatherData.py | fa4cff95a74c31725b374dceea2ca71e13b9fb29 | [
"MIT"
] | permissive | rebsi/WeatherCrawler | b81e739941e868c77bfeee3cfad5898a778b7b37 | b2e22df988b3d49a62f286f6e224ceaa1534a409 | refs/heads/master | 2020-08-06T01:53:39.602947 | 2019-11-12T08:12:17 | 2019-11-12T08:12:17 | 212,790,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,511 | py | #!usr/bin/env python
# -*-coding:utf-8 -*-
import datetime
from MsSqlWeatherDataMixin import MsSqlWeatherDataMixin
class WeatherData(MsSqlWeatherDataMixin):
"""description of class"""
def __init__(self, stationId, dataTime, temperature, humidity, windDirection, windSpeed, gust, rainLastHour, sun, pressure, elevation):
self.stationId = stationId
self.dataTime = dataTime
self.temperature = temperature
self.humidity = humidity
self.windDirection = windDirection
self.windSpeed = windSpeed
self.gust = gust
self.rainLastHour = rainLastHour
self.cloudiness = 100 - sun
self.pressure = pressure
self.elevation = elevation
def __init__(
self, stationId,
dataTime, elevation, temp2m, temp2mMin, temp2mMinTime, temp2mMax, temp2mMaxTime,
humidity, humidityMin, humidityMinTime, humidityMax, humidityMaxTime,
dewPoint, dewPointMin, dewPointMinTime, dewPointMax, dewPointMaxTime,
pressure, pressure3hTrend,
wellness, forecastShort, forecastLong, snowLine, cloudBase, uvIndex,
solarRadiation, solarRadiationMax, solarRadiationMaxTime,
evapotranspiration,
windchill, windchillMin, windchillMinTime, windchillMax, windchillMaxTime,
windSpeed, windDirection, windDominatingDirection, windMaxTime, windMax, windMaxDirection,
gust, gustDirection, gustMaxTime, gustMax, gustMaxDirection,
lastFrost, lastFrostDuration,
rainLastHour, rainDay, rainLast,
sunrise, sunZenith, sunset, cloudiness,
moonPhase, moonNextFull
):
self.stationId = stationId
self.dataTime = dataTime
self.elevation = elevation
self.temperature = self.temp2m = temp2m
self.temp2mMin = temp2mMin
self.temp2mMinTime = temp2mMinTime
self.temp2mMax = temp2mMax
self.temp2mMaxTime = temp2mMaxTime
self.humidity = humidity
self.humidityMin = humidityMin
self.humidityMinTime = humidityMinTime
self.humidityMax = humidityMax
self.humidityMaxTime = humidityMaxTime
self.dewPoint = dewPoint
self.dewPointMin = dewPointMin
self.dewPointMinTime = dewPointMinTime
self.dewPointMax = dewPointMax
self.dewPointMaxTime = dewPointMaxTime
self.pressure = pressure
self.pressure3hTrend = pressure3hTrend
self.wellness = wellness
self.forecastShort = forecastShort
self.forecastLong = forecastLong
self.snowLine = snowLine
self.cloudBase = cloudBase
self.uvIndex = uvIndex
self.solarRadiation = solarRadiation
self.solarRadiationMax = solarRadiationMax
self.solarRadiationMaxTime = solarRadiationMaxTime
self.evapotranspiration = evapotranspiration
self.windchill = windchill
self.windchillMin = windchillMin
self.windchillMinTime = windchillMinTime
self.windchillMax = windchillMax
self.windchillMaxTime = windchillMaxTime
self.windSpeed = windSpeed
self.windDirection = windDirection
self.windDominatingDirection = windDominatingDirection
self.windMaxTime = windMaxTime
self.windMax = windMax
self.windMaxDirection = windMaxDirection
self.gust = gust
self.gustDirection = gustDirection
self.gustMaxTime = gustMaxTime
self.gustMax = gustMax
self.gustMaxDirection = gustMaxDirection
self.lastFrost = lastFrost
self.lastFrostDuration = lastFrostDuration
self.rainLastHour = rainLastHour
self.rainDay = rainDay
self.rainLast = rainLast
self.sunrise = sunrise
self.sunZenith = sunZenith
self.sunset = sunset
self.cloudiness = cloudiness
self.moonPhase = moonPhase
self.moonNextFull = moonNextFull
def __str__(self):
return 'temperature: {}°\nhumidity: {}%\nwindDirection: {}\nwindSpeed: {}km/h\ngust: {}km/h\nrain: {}mm\ncloudiness: {}%\npressure: {}hPa\nElevation: {}'.format(self.temperature, self.humidity, self.windDirection, self.windSpeed, self.gust, self.rainLastHour, self.cloudiness, self.pressure, self.elevation)
def __unicode__(self):
return self.__str__();
def __repr__(self):
return self.__str__();
| [
"robert.ebetsberger@stiwa.com"
] | robert.ebetsberger@stiwa.com |
9d6d8796d79e57adabb9abb63c3db43e487b7b06 | c72e87b8312dd9688dfd9a4a4c942240a8af9242 | /server/tests-py/test_actions.py | 87c12be52b71c403a2d3b8e7a41f3388f609bc5e | [
"Apache-2.0",
"MIT"
] | permissive | jensenity/graphql-engine | e41a23ad0fe5b97e6e94c2c4b31abe5de2dcc306 | a8affc2cda8ca3b7a6c6e96ad4eecd37662ddb0e | refs/heads/master | 2022-04-21T04:40:35.012026 | 2020-04-24T06:09:00 | 2020-04-24T06:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,369 | py | #!/usr/bin/env python3
import pytest
import time
from validate import check_query_f, check_query
"""
TODO:- Test Actions metadata
"""
use_action_fixtures = pytest.mark.usefixtures(
"actions_fixture",
'per_class_db_schema_for_mutation_tests',
'per_method_db_data_for_mutation_tests'
)
@pytest.mark.parametrize("transport", ['http', 'websocket'])
@use_action_fixtures
class TestActionsSyncWebsocket:
@classmethod
def dir(cls):
return 'queries/actions/sync'
def test_create_user_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/create_user_fail.yaml', transport)
def test_create_user_success(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/create_user_success.yaml', transport)
def test_create_users_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/create_users_fail.yaml', transport)
def test_create_users_success(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/create_users_success.yaml', transport)
@use_action_fixtures
class TestActionsSync:
@classmethod
def dir(cls):
return 'queries/actions/sync'
def test_invalid_webhook_response(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/invalid_webhook_response.yaml')
def test_expecting_object_response(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/expecting_object_response.yaml')
def test_expecting_array_response(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/expecting_array_response.yaml')
# Webhook response validation tests. See https://github.com/hasura/graphql-engine/issues/3977
def test_mirror_action_not_null(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/mirror_action_not_null.yaml')
def test_mirror_action_unexpected_field(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/mirror_action_unexpected_field.yaml')
def test_mirror_action_no_field(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/mirror_action_no_field.yaml')
def test_mirror_action_success(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/mirror_action_success.yaml')
@use_action_fixtures
class TestQueryActions:
@classmethod
def dir(cls):
return 'queries/actions/sync'
def test_query_action_fail(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/get_user_by_email_fail.yaml')
def test_query_action_success_output_object(self, hge_ctx):
gql_query = '''
mutation {
insert_user_one(object: {email: "clarke@gmail.com", name:"Clarke"}){
id
}
}
'''
query = {
'query': gql_query
}
headers = {}
admin_secret = hge_ctx.hge_key
if admin_secret is not None:
headers['X-Hasura-Admin-Secret'] = admin_secret
code, resp, _ = hge_ctx.anyq('/v1/graphql', query, headers)
assert code == 200,resp
check_query_f(hge_ctx, self.dir() + '/get_user_by_email_success.yaml')
def test_query_action_success_output_list(self, hge_ctx):
gql_query = '''
mutation {
insert_user(objects:
[{id:1,email: "clarke@gmail.com", name:"Clarke 1"},
{id:2,email: "clarke@gmail.com", name:"Clarke 2"}])
{
returning {
id
}
}
}
'''
query = {
'query': gql_query
}
headers = {}
admin_secret = hge_ctx.hge_key
if admin_secret is not None:
headers['X-Hasura-Admin-Secret'] = admin_secret
code, resp, _ = hge_ctx.anyq('/v1/graphql', query, headers)
assert code == 200,resp
check_query_f(hge_ctx, self.dir() + '/get_users_by_email_success.yaml')
# This test is to make sure that query actions work well with variables.
# Earlier the HGE used to add the query action to the plan cache, which
# results in interrmittent validation errors, like:
# {
# "errors": [
# {
# "extensions": {
# "path": "$.variableValues",
# "code": "validation-failed"
# },
# "message": "unexpected variables: email"
# }
# ]
# }
def test_query_action_should_not_throw_validation_error(self, hge_ctx):
for _ in range(25):
self.test_query_action_success_output_object(hge_ctx)
def mk_headers_with_secret(hge_ctx, headers={}):
admin_secret = hge_ctx.hge_key
if admin_secret:
headers['X-Hasura-Admin-Secret'] = admin_secret
return headers
@use_action_fixtures
class TestActionsSyncResponseHeaders:
@classmethod
def dir(cls):
return 'queries/actions/sync'
# See https://github.com/hasura/graphql-engine/issues/4021
def test_set_cookie_header(self, hge_ctx):
mutation = '''
mutation {
create_user(email: "clarke@gmail.com", name: "Clarke"){
id
}
}
'''
query = {
'query': mutation,
'variables': {}
}
status, resp, resp_headers = hge_ctx.anyq('/v1/graphql', query, mk_headers_with_secret(hge_ctx))
assert status == 200, resp
assert 'data' in resp, resp
assert ('Set-Cookie' in resp_headers and
resp_headers['Set-Cookie'] == 'abcd'), resp_headers
@use_action_fixtures
class TestActionsAsync:
@classmethod
def dir(cls):
return 'queries/actions/async'
def test_create_user_fail(self, hge_ctx):
graphql_mutation = '''
mutation {
create_user(email: "random-email", name: "Clarke")
}
'''
query = {
'query': graphql_mutation,
'variables': {}
}
status, resp, _ = hge_ctx.anyq('/v1/graphql', query, mk_headers_with_secret(hge_ctx))
assert status == 200, resp
assert 'data' in resp
action_id = resp['data']['create_user']
query_async = '''
query ($action_id: uuid!){
create_user(id: $action_id){
id
errors
}
}
'''
query = {
'query': query_async,
'variables': {
'action_id': action_id
}
}
response = {
'data': {
'create_user': {
'id': action_id,
'errors': {
'code': 'invalid-email',
'path': '$',
'error': 'Given email address is not valid'
}
}
}
}
conf = {
'url': '/v1/graphql',
'headers': {},
'query': query,
'status': 200,
'response': response
}
check_query_timeout(hge_ctx, conf, True, 10)
def test_create_user_success(self, hge_ctx):
graphql_mutation = '''
mutation {
create_user(email: "clarke@hasura.io", name: "Clarke")
}
'''
query = {
'query': graphql_mutation,
'variables': {}
}
status, resp, _ = hge_ctx.anyq('/v1/graphql', query, mk_headers_with_secret(hge_ctx))
assert status == 200, resp
assert 'data' in resp
action_id = resp['data']['create_user']
query_async = '''
query ($action_id: uuid!){
create_user(id: $action_id){
__typename
id
output {
__typename
id
user {
__typename
name
email
is_admin
}
}
}
}
'''
query = {
'query': query_async,
'variables': {
'action_id': action_id
}
}
response = {
'data': {
'create_user': {
'__typename': 'create_user',
'id': action_id,
'output': {
'__typename': 'UserId',
'id': 1,
'user': {
'__typename': 'user',
'name': 'Clarke',
'email': 'clarke@hasura.io',
'is_admin': False
}
}
}
}
}
conf = {
'url': '/v1/graphql',
'headers': {},
'query': query,
'status': 200,
'response': response
}
check_query_timeout(hge_ctx, conf, True, 10)
def test_create_user_roles(self, hge_ctx):
graphql_mutation = '''
mutation {
create_user(email: "blake@hasura.io", name: "Blake")
}
'''
query = {
'query': graphql_mutation,
'variables': {}
}
headers_user_1 = mk_headers_with_secret(hge_ctx, {
'X-Hasura-Role': 'user',
'X-Hasura-User-Id': '1'
})
# create action with user-id 1
status, resp, headers = hge_ctx.anyq('/v1/graphql', query, headers_user_1)
assert status == 200, resp
assert 'data' in resp
action_id = resp['data']['create_user']
query_async = '''
query ($action_id: uuid!){
create_user(id: $action_id){
id
output {
id
}
}
}
'''
query = {
'query': query_async,
'variables': {
'action_id': action_id
}
}
headers_user_2 = mk_headers_with_secret(hge_ctx, {
'X-Hasura-Role': 'user',
'X-Hasura-User-Id': '2'
})
conf_user_2 = {
'url': '/v1/graphql',
'headers': headers_user_2,
'query': query,
'status': 200,
'response': {
'data': {
'create_user': None # User 2 shouldn't able to access the action
}
}
}
# Query the action as user-id 2
# Make request without auth using admin_secret
check_query_timeout(hge_ctx, conf_user_2, add_auth = False, timeout = 10)
conf_user_1 = {
'url': '/v1/graphql',
'headers': headers_user_1,
'query': query,
'status': 200,
'response': {
'data': {
'create_user': {
'id': action_id,
'output': {
'id': 1
}
}
}
}
}
# Query the action as user-id 1
# Make request without auth using admin_secret
check_query_timeout(hge_ctx, conf_user_1, add_auth = False, timeout = 10)
def check_query_timeout(hge_ctx, conf, add_auth, timeout):
wait_until = time.time() + timeout
while True:
time.sleep(2)
try:
check_query(hge_ctx, conf, add_auth = add_auth)
except AssertionError:
if time.time() > wait_until:
raise
else:
continue
break
@pytest.mark.usefixtures('per_class_tests_db_state')
class TestSetCustomTypes:
@classmethod
def dir(cls):
return 'queries/actions/custom-types'
def test_reuse_pgscalars(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/reuse_pgscalars.yaml')
def test_reuse_unknown_pgscalar(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/reuse_unknown_pgscalar.yaml')
def test_create_action_pg_scalar(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/create_action_pg_scalar.yaml')
def test_list_type_relationship(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/list_type_relationship.yaml')
@pytest.mark.usefixtures('per_class_tests_db_state')
class TestActionsMetadata:
@classmethod
def dir(cls):
return 'queries/actions/metadata'
def test_recreate_permission(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/recreate_permission.yaml')
| [
"noreply@github.com"
] | jensenity.noreply@github.com |
9602070d1a908a5c1f35c000e1bc4291fd8c6ad3 | f04b05e990a47a70f8e72c2518b738c9bcce1842 | /codes/simulation.py | 8e46b601c260e82e6575eb13244b1db3c6deb9f8 | [] | no_license | lkqllx/High-Frequency-Signal-Filtering | 825b7ce26d3b3e43ed5ee3dca26f612796241507 | 7dd68f45caa07e61db2a096da8b59099f13b5687 | refs/heads/master | 2022-07-11T11:28:56.090623 | 2019-12-17T03:08:57 | 2019-12-17T03:08:57 | 207,538,952 | 0 | 0 | null | 2022-06-21T23:41:24 | 2019-09-10T11:18:04 | HTML | UTF-8 | Python | false | false | 3,256 | py | """
simulation.py is to manually create an environment for testing the performance of filtering
"""
import numpy as np
from visualization import VisualTool
import random
np.random.seed(1)
def noise_signal(signals, high=1, low=-1):
"""
Add Noise to signals
:param signals: clean signals
:param high: upper limit of noise
:param low: lower limit of noise
:return: a noised signal
"""
for signal in signals:
yield signal + random.uniform(high, low)
class RandSignal:
"""
Class RandSignal for generating signals - step or simulated signal
:param upper: upper limit of random signal
:param lower: lower limit of random signal
:param size: the number of periods
:param freq: the number of padding points per period
SAMPLE:
step signal - step-wise signal:
-> r = RandSignal(upper=10, lower=1, freq=0.1, size=10)
-> step_signal = list(r.step_signal)
-> v = VisualTool(step_signal)
-> v.plot_line(percent=1)
simulated signal - continuously changing signal:
-> r = RandSignal(upper=10, lower=1, freq=0.1, size=10)
-> simulated_signal = list(r.fake_signal)
-> v = VisualTool(simulated_signal)
-> v.plot_line(percent=1)
"""
def __init__(self, upper: int, lower: int, size: int = 10, freq: float = 0.1):
self.upper = upper
self.lower = lower
self._size = size
self._freq = freq
self.rand_width = list(self.random_nums)
self.rand_length = list(self.random_nums)
@property
def random_nums(self):
"""
Create random numbers
:return: a generator contains a list of random number
"""
for _ in range(self._size):
yield np.random.randint(low=self.lower, high=self.upper)
@property
def step_signal(self):
"""
Create step signals with random width and length
:return: a generator of step signal
"""
for idx, length in enumerate(self.rand_length):
for _ in range(int(length / self._freq)):
yield self.rand_width[idx]
@property
def fake_signal(self):
"""
Create a simulated signal with random length and width
:return: a list of simulated signal
"""
result = []
for idx, length in enumerate(self.rand_length):
if idx == 0:
"""Initialize the point at (x=0, y=0)"""
result.append(0)
y_start = result[-1]
y_distance = self.rand_width[idx] - result[-1]
for count in range(int(length / self._freq)):
# yield (self.rand_width[idx] - start_point) * count / int(length / self._freq)
result.append(y_start + y_distance * count / int(length / self._freq))
return result
if __name__ == '__main__':
r = RandSignal(upper=10, lower=1, freq=0.5, size=10)
step_signal = list(r.fake_signal)
v = VisualTool(step_signal)
v.plot_line(percent=1, to_png=True, png_path='../figs/simulated_clean.png')
noise_signal = noise_signal(step_signal)
v = VisualTool(noise_signal)
v.plot_line(percent=1, to_png=True, png_path='../figs/simulated_noisy.png')
| [
"lkqllx@gmail.com"
] | lkqllx@gmail.com |
5411ce1e8a0ef802cd08f02bf93efd957c3dda38 | 8a27770fc4414f9335f9bb06c89276c45d5de86b | /snowstorm.py | c9788dba3c53f2852a242eb060ac0f04069c401a | [] | no_license | anushapanta/tkinter-programs | 4edc47a949574c7ed03b801caa18b58260241636 | 18a118a1a403caff5f006b1f4fa2707b751b8cea | refs/heads/master | 2022-12-17T03:02:31.505701 | 2020-09-22T11:01:35 | 2020-09-22T11:01:35 | 297,620,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | from turtle import *
import random
shape("classic")
speed(2)
pensize(3)
colours=["blue","purple","cyan","red","yellow","green","orange"]
Screen().bgcolor("dark orchid")
def vshape(size):
right(25)
forward(size)
backward(size)
left(50)
forward(size)
backward(size)
right(25)
def snowflakeArm(size):
for x in range(0,4):
forward(size)
vshape(size)
backward(size*4)
def snowflake(size):
for x in range(0,6):
color(random.choice(colours))
snowflakeArm(size)
right(60)
for i in range(0,10):
size=random.randint(5,30)
x=random.randint(-200,200)
y=random.randint(-200,200)
penup()
goto(x,y)
pendown()
snowflake(size) | [
"anusha474232@gmail.com"
] | anusha474232@gmail.com |
c2cfda99592ea8ed25c13139448162753c8e3e09 | 6ff7b3cd99aea670792aad35f49b4d762bd3952a | /migrations/versions/f8f3d3338933_initial.py | e1fddda3a2e45b3ac97c7d14513b75afa99b9458 | [] | no_license | melardev/FlaskApiCrud | 3af8c1f375f6aefe258334368fdc7bcab900a2a0 | 40e0ffe6f690a1698a3c3f6dd1a03398260cd073 | refs/heads/master | 2020-04-27T23:06:51.527985 | 2019-03-10T00:52:26 | 2019-03-10T00:52:26 | 174,762,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | """'initial'
Revision ID: f8f3d3338933
Revises:
Create Date: 2019-03-08 21:02:46.699000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f8f3d3338933'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('todos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=100), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('completed', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('todos')
# ### end Alembic commands ###
| [
"melardev@users.noreply.github.com"
] | melardev@users.noreply.github.com |
c8ef96a6d31d3cc3aea5eaff102bec24a40ef438 | 56c6b4fc7be95d667403585e50600afd388d9188 | /transactions/getTransactionsData.py | 579e67e3ddb3a7fa28bc4deaa2bcfdf27aadc04c | [] | no_license | pLabarta/dcrjournal-auto | f32939b6aad9589dd4e535d53f47cb8cfb74be70 | 584ea480898f56aa57cf3bfa2e4239f6d531cd68 | refs/heads/master | 2021-05-17T08:24:37.443001 | 2020-03-28T04:00:55 | 2020-03-28T04:00:55 | 250,707,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,041 | py | from decred.dcr.dcrdata import DcrdataClient
from datetime import datetime
import json, time, sys, requests
import pandas as pd
import numpy as np
client = DcrdataClient('https://explorer.dcrdata.org')
# You can enter any block before the month you want to analyze
# The date crop will take care of it (:
# Enter starting and last block or just use the best one
block_start = 419000
# last_block = 420500
last_block = client.block.best.height()
# YYYY-MM-DD HH:MM:SS format to crop the DataFrame
start_date = '2020-02-01 00:00:00'
end_date = '2020-03-01 00:00:00'
## UTILS
def ts_to_date(ts):
date = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return date
def GetHumanReadable(size,precision=2):
suffixes=['B','KB','MB','GB','TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision,size,suffixes[suffixIndex])
def split_range(start,stop):
if (stop - start) > 1000:
print('Splitting range into several requests...')
print('This will take a while to avoid stressing the Block Explorer')
range_list = []
current = start
while current < stop:
if (stop - current) > 999:
range_list.append((current,current+999))
current = current+1000
print(f'added ({current},{current+999}) to the list')
else:
range_list.append((current,stop))
current = stop
return range_list
else:
print('Requesting range in one request! Nice little block range!')
range_list = [(start,stop)]
return range_list
## GET BLOCK RANGE AND RETURN PANDAS DF
## This could be get using an endpoint similar to TX COUNT https://explorer.dcrdata.org/api/chart/block-size?bin=block
def get_block_size_df(idx0, idx):
range_list = split_range(idx0,idx)
df = pd.DataFrame()
for each in range_list:
print(f'Getting blocks {each}')
block_list = client.block.range(each[0],each[1])
sub_df = pd.DataFrame()
sub_df['height'] = np.array([block['height'] for block in block_list])
sub_df['size'] = np.array([block['size'] for block in block_list])
sub_df['date'] = np.array([ts_to_date(block['time']) for block in block_list])
df = df.append(sub_df,ignore_index=True)
time.sleep(1)
return df
## TX COUNT AND SIZE CHART END POINT
api = 'https://explorer.dcrdata.org/api'
def dcrdata_req(req):
response = requests.get(api+req).json()
return response
def get_all_day_tx_count(blocks_dataframe):
raw_count = dcrdata_req('/chart/tx-count?bin=day')
dates = blocks_dataframe['date']
dates = dates.apply(lambda x: x.replace(hour=0,minute=0,second=0))
df = pd.DataFrame()
df['count'] = np.array([count for count in raw_count['count']])
df['date'] = [pd.Timestamp(t,unit='s') for t in raw_count['t']]
this_mask = (df['date'] >= dates.min()) & (df['date'] <= dates.max())
df = df.loc[this_mask]
return df
## MAIN FUNC
if __name__ == '__main__':
df = get_block_size_df(block_start, last_block)
# print('Before the mask')
# print(df)
df['date'] = pd.to_datetime(df['date'])
mask = (df['date'] > start_date) & (df['date'] < end_date)
df = df.loc[mask]
# print('After the mask')
# print(df)
tx_count_df = get_all_day_tx_count(df)
# print(tx_count_df['count'])
monthly_tx_count_sum = tx_count_df['count'].sum()
monthly_tx_count_mean = tx_count_df['count'].mean()
monthly_tx_count_min = tx_count_df['count'].min()
monthly_tx_count_max = tx_count_df['count'].max()
monthly_block_size_mean = df['size'].mean()
monthly_block_size_sum = df['size'].sum()
monthly_block_size_max = df['size'].max()
monthly_block_size_min = df['size'].min()
print(f'This month, the blockchain size grew {GetHumanReadable(monthly_block_size_sum,precision=2)}. Blocks had an average size of {GetHumanReadable(monthly_block_size_mean,precision=2)}. The smallest block had a size of {GetHumanReadable(monthly_block_size_min,precision=2)} and the largest one, {GetHumanReadable(monthly_block_size_max,precision=2)}.')
print(f'{monthly_tx_count_sum} transactions were included in the blockchain. On average, there were {int(monthly_tx_count_mean)} per day. The busiest day saw {monthly_tx_count_max} TXs and the least one {monthly_tx_count_min} TXs.')
# DATAFRAME TO CSV
tx_count_df.to_csv('daily_tx_count.csv')
df.to_csv('block_size.csv')
# This month, the blockchain size grew 112.98MB. Blocks had an average size of 13.92KB. The smallest block had a size of 1.36KB and the largest one 366.37KB.
# 132323 transactions were included in the blockchain. On average, there were 4562 transactions per day. The busiest day saw 5175 TXs and the least one 3709.
## TODO
# IDEAL USAGE "python3 getTransactionsText.py year month(english) "
| [
"noreply@github.com"
] | pLabarta.noreply@github.com |
dfbba26851a42e9ca1b1a62230992475e7e16da9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/12/usersdata/76/5514/submittedfiles/impedimento.py | e64198eb3222c4320efd7c71f70c7c45cd091526 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#COMECE SEU CÓDIGO AQUI
L = input('Digite o valor de L:')
R = input('Digite o valor de R:')
D = input('Digite o valor de D:')
if R>50 and L<R and R>D:
print('S')
if R>50 and L<R and R<D:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
aad8c4965b91dbc1a68802b5dc45aa593d98d20a | d65cb684d344ab072d0f9801afbd074768a059a2 | /Suanfa/天际线问题Suanfa1_3.py | e826e906c21a8c27b4b7e96acc49c55fb8d6548d | [] | no_license | QiuHongHao123/Algorithm-Practise | a918debd002182010b78e284df038c01d9921619 | e7a7b7537edbbb8fa35c2dddf2b122cf863e479d | refs/heads/master | 2023-03-14T09:16:28.407137 | 2021-03-01T11:57:54 | 2021-03-01T11:57:54 | 272,642,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | def getSkyline(buildings):
if not buildings: return []
if len(buildings) == 1:
return [[buildings[0][0], buildings[0][2]], [buildings[0][1], 0]]
mid = len(buildings) // 2
left = getSkyline(buildings[:mid])
right = getSkyline(buildings[mid:])
return merge(left, right)
def merge(left, right):
# 记录目前左右建筑物的高度
lheight = rheight = 0
# 位置
l = r = 0
res = []
while l < len(left) and r < len(right):
if left[l][0] < right[r][0]:
cp = [left[l][0], max(left[l][1], rheight)]
lheight = left[l][1]
l += 1
elif left[l][0] > right[r][0]:
cp = [right[r][0], max(right[r][1], lheight)]
rheight = right[r][1]
r += 1
else:
cp = [left[l][0], max(left[l][1], right[r][1])]
lheight = left[l][1]
rheight = right[r][1]
l += 1
r += 1
# 和前面高度比较,不一样才加入
if len(res) == 0 or res[-1][1] != cp[1]:
res.append(cp)
# 剩余部分添加进去
res.extend(left[l:] or right[r:])
return res
print(getSkyline([[1,5,11], [2,7,6], [3,9,13], [12,16,7], [14,25,3], [19,22,18], [23,29,13],[24,28,4]])) | [
"3158874848@qq.com"
] | 3158874848@qq.com |
64227c2c6a90c9fcfb1b545880778f17fb57500f | f31bf36539e43ca9bd998eb58d3311975a8b0c69 | /Assignment 3 - Backprop and Self-Attention/code/test_decoder.py | 58c9a5613be61aaa036764bbdb8bf7229b481f0c | [] | no_license | keya-desai/Natural-Language-Processing | 26dc465327ddebc3ac0bcbfe501e9661cd86ccd9 | 5c9a7149f2454f84afd3b18ba007b29b0a99fa01 | refs/heads/master | 2022-12-02T09:22:21.475022 | 2020-08-20T22:22:56 | 2020-08-20T22:22:56 | 289,068,783 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,964 | py | import encoder
import decoder
import torch
import torch.nn as nn
import unittest
class TestDecoder(unittest.TestCase):
def setUp(self):
self.vocab_size = 200
self.dim = 30
self.wemb = nn.Embedding(self.vocab_size, self.dim, padding_idx=0)
self.num_layers = 2
self.dropout = 0
# 1 6 9
# 2 7 10
# 3 8 0
# 4 0 0
# 5 0 0
self.batch_size = 3
self.T_src = 5
self.src = torch.zeros((self.T_src, self.batch_size)).long()
for row in range(5): self.src[row][0] = row + 1
for row in range(3): self.src[row][1] = row + 6
for row in range(2): self.src[row][2] = row + 9
self.lengths = torch.IntTensor([5, 3, 2])
# 9 3 1
# 8 2 100
# 7 100 0
# 100 0 0
self.T_tgt = 4
self.tgt = torch.zeros((self.T_tgt, self.batch_size)).long()
self.tgt[0][0] = 9
self.tgt[1][0] = 8
self.tgt[2][0] = 7
self.tgt[3][0] = 100
self.tgt[0][1] = 3
self.tgt[1][1] = 2
self.tgt[2][1] = 100
self.tgt[0][2] = 1
self.tgt[1][2] = 100
def test_attention_conditioning_bidirectional(self):
enc = encoder.Encoder(self.wemb, self.num_layers, self.dropout,
bidirectional=True, use_bridge=True)
dec = decoder.Decoder(self.wemb, self.num_layers, self.dropout,
use_attention=True,
bidirectional_encoder=True)
memory_bank, final = enc.forward(self.src, self.lengths)
dec.init_state(batch_size=None, encoder_final=final)
dec_outs, attns = dec(self.tgt, memory_bank=memory_bank,
memory_lengths=self.lengths)
self.assertEqual(list(dec_outs.size()), [self.T_tgt, self.batch_size,
self.dim])
def test_attention_conditioning(self):
enc = encoder.Encoder(self.wemb, self.num_layers, self.dropout,
bidirectional=False, use_bridge=False)
dec = decoder.Decoder(self.wemb, self.num_layers, self.dropout,
use_attention=True,
bidirectional_encoder=False)
memory_bank, final = enc.forward(self.src, self.lengths)
dec.init_state(batch_size=None, encoder_final=final)
dec_outs, attns = dec(self.tgt, memory_bank=memory_bank,
memory_lengths=self.lengths)
self.assertEqual(list(dec_outs.size()), [self.T_tgt, self.batch_size,
self.dim])
def test_no_conditioning(self):
dec = decoder.Decoder(self.wemb, self.num_layers, self.dropout,
use_attention=False,
bidirectional_encoder=False)
dec.init_state(batch_size=self.batch_size, encoder_final=None)
dec_outs, attns = dec(self.tgt, memory_bank=None, memory_lengths=None)
self.assertEqual(list(dec_outs.size()), [self.T_tgt, self.batch_size,
self.dim])
def test_simple_conditioning(self):
enc = encoder.Encoder(self.wemb, self.num_layers, self.dropout,
bidirectional=False, use_bridge=False)
dec = decoder.Decoder(self.wemb, self.num_layers, self.dropout,
use_attention=False,
bidirectional_encoder=False)
memory_bank, final = enc.forward(self.src, self.lengths)
dec.init_state(batch_size=None, encoder_final=final)
dec_outs, attns = dec(self.tgt, memory_bank=None, memory_lengths=None)
self.assertEqual(list(dec_outs.size()), [self.T_tgt, self.batch_size,
self.dim])
if __name__ == '__main__':
unittest.main()
| [
"keyadesai97@gmail.com"
] | keyadesai97@gmail.com |
967f66256d5f7939295346fde9199007e5c073b0 | ff75e9f1989c57e8e8d511ad01a10de4f1d1f60b | /mh.py | 068038afee52692a61c204c781143fb1d944ab10 | [] | no_license | GregoryZeng/pmr-cw2 | 11ef75d01358a663bcc84c05a7cc4246fc0b2fab | d9f06ec4abbee1f08624c6ea24cd783bc61187fb | refs/heads/master | 2020-05-02T05:26:40.826376 | 2019-03-27T16:25:59 | 2019-03-27T16:25:59 | 177,771,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | import numpy as np
def mh(p_star, param_init, num_samples=5000, stepsize=1.0, W=0):
param_init = np.array(param_init)
if num_samples == 0:
return []
x_curr = param_init
# burn-in stage
for i in range(W):
x_cand = np.random.normal(x_curr,stepsize,x_curr.shape)
a = p_star(x_cand)/p_star(x_curr)
if a >= 1:
x_curr = x_cand
else:
u = np.random.uniform()
if u < a:
x_curr = x_cand
# sampling stage
samples = [x_curr]
for i in range(num_samples-1):
x_cand = np.random.normal(x_curr,stepsize,x_curr.shape)
a = p_star(x_cand)/p_star(x_curr)
if a >= 1:
x_curr = x_cand
else:
u = np.random.uniform()
if u < a:
x_curr = x_cand
samples.append(x_curr)
return samples
| [
"gregzeng@qq.com"
] | gregzeng@qq.com |
f7a8ba1746262090a73c07c6bad57f67d52d6ea2 | 4228d415179b798ddfc7c4fb3f6af242aa8bc75f | /SnakeGame/view.py | a07be9896b823a22e1acc5b6aa686fffadca6ad0 | [] | no_license | Adel-Wehbi/SnakeAI | 2f98455006489be37c71ecdbbe6f08a3dd22a94c | 33161a6c2303171bada61ac88293720b673560f7 | refs/heads/master | 2020-08-30T15:49:20.392223 | 2020-01-18T18:58:59 | 2020-01-18T18:58:59 | 218,425,561 | 1 | 0 | null | 2020-01-18T18:59:00 | 2019-10-30T02:22:09 | Python | UTF-8 | Python | false | false | 2,372 | py | import pygame
from model import CellContent
import enum
class GameView(object):
BG_COLOR = green = 0, 135, 0
SNAKE_COLOUR = 135, 0, 0
FOOD_COLOUR = 255, 51, 0
SNAKEHEAD_COLOR = 0, 0, 0
SNAKEHEAD_OFFSET = 0.1
SNAKEBODY_OFFSET = 0.05
FOOD_OFFSET = 0.25
def __init__(self, gridDimensions, windowSize=(540, 540)):
#Grid dimensions in number of cells
self.width = gridDimensions[0]
self.height = gridDimensions[1]
#grid dimensions in pixels
self.windowSize=windowSize
#grid cell dimensions in pixels
self.cellWidth = windowSize[0]/self.width
self.cellHeight = windowSize[1]/self.height
def render(self, grid):
self.screen = pygame.display.set_mode(self.windowSize)
self.screen.fill(self.BG_COLOR)
for i in range(self.height):
for j in range(self.width):
if grid[i][j] == CellContent.SnakeBody:
pygame.draw.rect(self.screen, self.SNAKE_COLOUR, (self.getleftCoord(j, self.SNAKEBODY_OFFSET), self.getTopCoord(i, self.SNAKEBODY_OFFSET), self.cellWidth*(1 - 2.0*self.SNAKEBODY_OFFSET), self.cellHeight*(1 - 2.0*self.SNAKEBODY_OFFSET)))
elif grid[i][j] == CellContent.SnakeHead:
pygame.draw.rect(self.screen, self.SNAKEHEAD_COLOR, (self.getleftCoord(j, self.SNAKEHEAD_OFFSET), self.getTopCoord(i, self.SNAKEHEAD_OFFSET), self.cellWidth*(1 - 2.0*self.SNAKEHEAD_OFFSET), self.cellHeight*(1 - 2.0*self.SNAKEHEAD_OFFSET)))
elif grid[i][j] == CellContent.Food:
pygame.draw.ellipse(self.screen, self.FOOD_COLOUR, (self.getleftCoord(j, self.FOOD_OFFSET), self.getTopCoord(i, self.FOOD_OFFSET), (self.cellWidth)/2.0, (self.cellHeight)/2.0))
pygame.display.flip()
def getleftCoord(self, x, offset):
'''Computes the left side coordinate of a rectangle. Offset parameter will shift it
to the right by a percentage of the cell width '''
return x * self.cellWidth + (self.cellWidth*offset)
def getTopCoord(self, y, offset):
'''Computes the top side coordinate of a rectangle.
Offset parameter will shift it down by a percentage of the cell height'''
return y * self.cellHeight + (self.cellHeight*offset)
| [
"adelwehbi@gmail.com"
] | adelwehbi@gmail.com |
ac9d0ab45bb1f3c40e908be5cb872fe5fda5b093 | 26a3d877a4e1da3227834ed65a5c9056b740f4b8 | /sendgrid/utils/filterutils.py | 7910051b33a9dd6d759d23311aa8c616af2c179d | [] | no_license | shaunc44/django-sendgrid-3 | c7ddb87637c19afe434b3715e87876b5a7794238 | 0070802d7a4fbbef171add9463b35bdc282cc378 | refs/heads/master | 2021-03-13T11:01:54.854730 | 2020-03-11T21:34:35 | 2020-03-11T21:34:35 | 246,673,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | from django.conf import settings
PASS = lambda i: True
FAIL = lambda i: False
IS_ZERO_OR_ONE = lambda i: i in (0, 1, "0", "1")
INTERFACES = {
"gravatar": ["enable"],
"clicktrack": ["enable"],
"subscriptiontrack": ["enable", "text/html", "text/plain", "replace", "url", "landing"],
"opentrack": ["enable"],
}
FILTER_SETTING_VALUE_TESTS = {
"gravatar.enable": IS_ZERO_OR_ONE,
"clicktrack.enable": IS_ZERO_OR_ONE,
"subscriptiontrack.enable": IS_ZERO_OR_ONE,
"subscriptiontrack.text/html": PASS,
"opentrack.enable": IS_ZERO_OR_ONE,
}
IGNORE_MISSING_TESTS = getattr(settings, "IGNORE_MISSING_TESTS", False)
VALIDATE_FILTER_SPECIFICATION = getattr(settings, "VALIDATE_FILTER_SPECIFICATION", True)
def validate_filter_setting_value(filter, setting, value, ignoreMissingTests=IGNORE_MISSING_TESTS):
"""
Validates the given value for the filter setting.
"""
if filter not in INTERFACES:
raise AttributeError("The filter {f} is not valid".format(f=filter))
if setting not in INTERFACES[filter]:
raise AttributeError("The setting {s} is not valid for the filter {f}".format(s=setting, f=filter))
testName = ".".join([filter, setting])
try:
test = FILTER_SETTING_VALUE_TESTS[testName]
except KeyError as e:
if ignoreMissingTests:
result = True
else:
raise e
else:
result = test(value)
return result
def validate_filter_specification(f):
"""
Validates a given filter specification.
"""
passedAllTests = None
testResults = {}
for filter, spec in f.items():
for setting, value in spec.items():
testKey = ".".join([filter, setting])
testResult = validate_filter_setting_value(filter, setting, value)
testResults[testKey] = testResult
resultSet = set(testResults.values())
passedAllTests = len(resultSet) == 1 and True in resultSet
return passedAllTests
def update_filters(email, filterSpec, validate=VALIDATE_FILTER_SPECIFICATION):
"""
Updates the ``SendGridEmailMessage`` filters, optionally validating the given sepcification.
"""
if validate:
filterSpecIsValid = validate_filter_specification(filterSpec)
if not filterSpecIsValid:
raise Exception("Invalid filter specification")
for filter, spec in filterSpec.items():
for setting, value in spec.items():
email.sendgrid_headers.addFilterSetting(fltr=filter, setting=setting, val=value)
return
| [
"coxs@osgbilling.com"
] | coxs@osgbilling.com |
cd54aeef70fce7aa969d28bee00bcaad640ac91d | bb1cf45bcd4af02cbb4e1c53ff95b6f4bb71d8d0 | /valoraquiz/quiz/populate.py | fca1bc110b22a01f8cbd33af20da2968a1307e3e | [
"MIT"
] | permissive | Matheus-mVilela/desafio-backend | 4d8bac3dbd203a609ceff29dafd3ca7216403098 | 64f192a036a366eee40a19a2a51a730035bdd393 | refs/heads/main | 2023-07-16T23:14:27.937077 | 2021-09-05T18:31:53 | 2021-09-05T18:31:53 | 401,106,904 | 0 | 0 | MIT | 2021-09-05T15:15:41 | 2021-08-29T17:53:22 | Python | UTF-8 | Python | false | false | 1,502 | py | import users.models
import users.choices
from . import models, services
def create_user(email, username, password, type):
return users.models.User.objects.create(
email=email, username=username, password=password, type=type
)
def create_category(title):
return models.Category.objects.create(title=title)
def create_question(label, category_id):
return models.Question.objects.create(label=label, category_id=category_id)
def create_answer(label, question_id, is_right):
return models.Answer.objects.create(
label=label, question_id=question_id, is_right=is_right
)
def run():
player = create_user(
email="player@email.com",
username="player",
password="player123",
type=users.choices.PLAYER,
)
admin = create_user(
email="admin@email.com",
username="admin",
password="admin123",
type=users.choices.ADMIN,
)
fake_category = create_category(title="Machine")
count = 0
while count <= 10:
question = create_question(
label=f"Question {count}", category_id=fake_category.pk
)
for idx in range(3):
create_answer(
label=f"Answer {idx}",
question_id=question.pk,
is_right=False if idx < 2 else True,
)
count += 1
services.create_quiz(category_id=fake_category.pk, user=player)
services.create_quiz(category_id=fake_category.pk, user=admin)
| [
"noreply@github.com"
] | Matheus-mVilela.noreply@github.com |
035ac7b690230c459aa6193cb7c95e3ede77e7d5 | 805d3df2c2e1f14f5537c4bd5bc0dc29f052f602 | /myenv/Lib/site-packages/werkzeug/utils.py | d0841d8429efa97fa252f5cc4f7f3827d0904e3c | [] | no_license | 047-Mahima/form | a70dfca4ba28248fc86a1822a7653668c59c0437 | f493375593ad8fe7a5d6ec6d42e192abebdda2a1 | refs/heads/main | 2023-05-11T10:34:10.701598 | 2023-04-29T10:50:54 | 2023-04-29T10:50:54 | 348,625,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,744 | py | from __future__ import annotations
import io
import mimetypes
import os
import pkgutil
import re
import sys
import typing as t
import unicodedata
from datetime import datetime
from time import time
from urllib.parse import quote
from zlib import adler32
from markupsafe import escape
from ._internal import _DictAccessorProperty
from ._internal import _missing
from ._internal import _TAccessorValue
from .datastructures import Headers
from .exceptions import NotFound
from .exceptions import RequestedRangeNotSatisfiable
from .security import safe_join
from .wsgi import wrap_file
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIEnvironment
from .wrappers.request import Request
from .wrappers.response import Response
_T = t.TypeVar("_T")
_entity_re = re.compile(r"&([^;]+);")
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
_windows_device_files = {
"CON",
"PRN",
"AUX",
"NUL",
*(f"COM{i}" for i in range(10)),
*(f"LPT{i}" for i in range(10)),
}
class cached_property(property, t.Generic[_T]):
"""A :func:`property` that is only evaluated once. Subsequent access
returns the cached value. Setting the property sets the cached
value. Deleting the property clears the cached value, accessing it
again will evaluate it again.
.. code-block:: python
class Example:
@cached_property
def value(self):
# calculate something important here
return 42
e = Example()
e.value # evaluates
e.value # uses cache
e.value = 16 # sets cache
del e.value # clears cache
If the class defines ``__slots__``, it must add ``_cache_{name}`` as
a slot. Alternatively, it can add ``__dict__``, but that's usually
not desirable.
.. versionchanged:: 2.1
Works with ``__slots__``.
.. versionchanged:: 2.0
``del obj.name`` clears the cached value.
"""
def __init__(
self,
fget: t.Callable[[t.Any], _T],
name: str | None = None,
doc: str | None = None,
) -> None:
super().__init__(fget, doc=doc)
self.__name__ = name or fget.__name__
self.slot_name = f"_cache_{self.__name__}"
self.__module__ = fget.__module__
def __set__(self, obj: object, value: _T) -> None:
if hasattr(obj, "__dict__"):
obj.__dict__[self.__name__] = value
else:
setattr(obj, self.slot_name, value)
def __get__(self, obj: object, type: type = None) -> _T: # type: ignore
if obj is None:
return self # type: ignore
obj_dict = getattr(obj, "__dict__", None)
if obj_dict is not None:
value: _T = obj_dict.get(self.__name__, _missing)
else:
value = getattr(obj, self.slot_name, _missing) # type: ignore[arg-type]
if value is _missing:
value = self.fget(obj) # type: ignore
if obj_dict is not None:
obj.__dict__[self.__name__] = value
else:
setattr(obj, self.slot_name, value)
return value
def __delete__(self, obj: object) -> None:
if hasattr(obj, "__dict__"):
del obj.__dict__[self.__name__]
else:
setattr(obj, self.slot_name, _missing)
class environ_property(_DictAccessorProperty[_TAccessorValue]):
"""Maps request attributes to environment variables. This works not only
for the Werkzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj: Request) -> WSGIEnvironment:
return obj.environ
class header_property(_DictAccessorProperty[_TAccessorValue]):
"""Like `environ_property` but for headers."""
def lookup(self, obj: Request | Response) -> Headers:
return obj.headers
# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
# https://www.iana.org/assignments/media-types/media-types.xhtml
# Types listed in the XDG mime info that have a charset in the IANA registration.
_charset_mimetypes = {
"application/ecmascript",
"application/javascript",
"application/sql",
"application/xml",
"application/xml-dtd",
"application/xml-external-parsed-entity",
}
def get_content_type(mimetype: str, charset: str) -> str:
"""Returns the full content type string with charset for a mimetype.
If the mimetype represents text, the charset parameter will be
appended, otherwise the mimetype is returned unchanged.
:param mimetype: The mimetype to be used as content type.
:param charset: The charset to be appended for text mimetypes.
:return: The content type.
.. versionchanged:: 0.15
Any type that ends with ``+xml`` gets a charset, not just those
that start with ``application/``. Known text types such as
``application/javascript`` are also given charsets.
"""
if (
mimetype.startswith("text/")
or mimetype in _charset_mimetypes
or mimetype.endswith("+xml")
):
mimetype += f"; charset={charset}"
return mimetype
def secure_filename(filename: str) -> str:
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename('i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you abort or
generate a random filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
filename = unicodedata.normalize("NFKD", filename)
filename = filename.encode("ascii", "ignore").decode("ascii")
for sep in os.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
"._"
)
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if (
os.name == "nt"
and filename
and filename.split(".")[0].upper() in _windows_device_files
):
filename = f"_{filename}"
return filename
def redirect(
location: str, code: int = 302, Response: type[Response] | None = None
) -> Response:
"""Returns a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are
301, 302, 303, 305, 307, and 308. 300 is not supported because
it's not a real redirect and 304 because it's the answer for a
request with a request with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
.. versionadded:: 0.10
The class used for the Response object can now be passed in.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
:param class Response: a Response class to use when instantiating a
response. The default is :class:`werkzeug.wrappers.Response` if
unspecified.
"""
from .urls import iri_to_uri
if Response is None:
from .wrappers import Response
display_location = escape(location)
location = iri_to_uri(location)
response = Response( # type: ignore[misc]
"<!doctype html>\n"
"<html lang=en>\n"
"<title>Redirecting...</title>\n"
"<h1>Redirecting...</h1>\n"
"<p>You should be redirected automatically to the target URL: "
f'<a href="{escape(location)}">{display_location}</a>. If'
" not, click the link.\n",
code,
mimetype="text/html",
)
response.headers["Location"] = location
return response
def append_slash_redirect(environ: WSGIEnvironment, code: int = 308) -> Response:
"""Redirect to the current URL with a slash appended.
If the current URL is ``/user/42``, the redirect URL will be
``42/``. When joined to the current URL during response
processing or by the browser, this will produce ``/user/42/``.
The behavior is undefined if the path ends with a slash already. If
called unconditionally on a URL, it may produce a redirect loop.
:param environ: Use the path and query from this WSGI environment
to produce the redirect URL.
:param code: the status code for the redirect.
.. versionchanged:: 2.1
Produce a relative URL that only modifies the last segment.
Relevant when the current path has multiple segments.
.. versionchanged:: 2.1
The default status code is 308 instead of 301. This preserves
the request method and body.
"""
tail = environ["PATH_INFO"].rpartition("/")[2]
if not tail:
new_path = "./"
else:
new_path = f"{tail}/"
query_string = environ.get("QUERY_STRING")
if query_string:
new_path = f"{new_path}?{query_string}"
return redirect(new_path, code)
def send_file(
path_or_file: os.PathLike | str | t.IO[bytes],
environ: WSGIEnvironment,
mimetype: str | None = None,
as_attachment: bool = False,
download_name: str | None = None,
conditional: bool = True,
etag: bool | str = True,
last_modified: datetime | int | float | None = None,
max_age: None | (int | t.Callable[[str | None], int | None]) = None,
use_x_sendfile: bool = False,
response_class: type[Response] | None = None,
_root_path: os.PathLike | str | None = None,
) -> Response:
"""Send the contents of a file to the client.
The first argument can be a file path or a file-like object. Paths
are preferred in most cases because Werkzeug can manage the file and
get extra information from the path. Passing a file-like object
requires that the file is opened in binary mode, and is mostly
useful when building a file in memory with :class:`io.BytesIO`.
Never pass file paths provided by a user. The path is assumed to be
trusted, so a user could craft a path to access a file you didn't
intend. Use :func:`send_from_directory` to safely serve user-provided paths.
If the WSGI server sets a ``file_wrapper`` in ``environ``, it is
used, otherwise Werkzeug's built-in wrapper is used. Alternatively,
if the HTTP server supports ``X-Sendfile``, ``use_x_sendfile=True``
will tell the server to send the given path, which is much more
efficient than reading it in Python.
:param path_or_file: The path to the file to send, relative to the
current working directory if a relative path is given.
Alternatively, a file-like object opened in binary mode. Make
sure the file pointer is seeked to the start of the data.
:param environ: The WSGI environ for the current request.
:param mimetype: The MIME type to send for the file. If not
provided, it will try to detect it from the file name.
:param as_attachment: Indicate to a browser that it should offer to
save the file instead of displaying it.
:param download_name: The default name browsers will use when saving
the file. Defaults to the passed file name.
:param conditional: Enable conditional and range responses based on
request headers. Requires passing a file path and ``environ``.
:param etag: Calculate an ETag for the file, which requires passing
a file path. Can also be a string to use instead.
:param last_modified: The last modified time to send for the file,
in seconds. If not provided, it will try to detect it from the
file path.
:param max_age: How long the client should cache the file, in
seconds. If set, ``Cache-Control`` will be ``public``, otherwise
it will be ``no-cache`` to prefer conditional caching.
:param use_x_sendfile: Set the ``X-Sendfile`` header to let the
server to efficiently send the file. Requires support from the
HTTP server. Requires passing a file path.
:param response_class: Build the response using this class. Defaults
to :class:`~werkzeug.wrappers.Response`.
:param _root_path: Do not use. For internal use only. Use
:func:`send_from_directory` to safely send files under a path.
.. versionchanged:: 2.0.2
``send_file`` only sets a detected ``Content-Encoding`` if
``as_attachment`` is disabled.
.. versionadded:: 2.0
Adapted from Flask's implementation.
.. versionchanged:: 2.0
``download_name`` replaces Flask's ``attachment_filename``
parameter. If ``as_attachment=False``, it is passed with
``Content-Disposition: inline`` instead.
.. versionchanged:: 2.0
``max_age`` replaces Flask's ``cache_timeout`` parameter.
``conditional`` is enabled and ``max_age`` is not set by
default.
.. versionchanged:: 2.0
``etag`` replaces Flask's ``add_etags`` parameter. It can be a
string to use instead of generating one.
.. versionchanged:: 2.0
If an encoding is returned when guessing ``mimetype`` from
``download_name``, set the ``Content-Encoding`` header.
"""
if response_class is None:
from .wrappers import Response
response_class = Response
path: str | None = None
file: t.IO[bytes] | None = None
size: int | None = None
mtime: float | None = None
headers = Headers()
if isinstance(path_or_file, (os.PathLike, str)) or hasattr(
path_or_file, "__fspath__"
):
path_or_file = t.cast(t.Union[os.PathLike, str], path_or_file)
# Flask will pass app.root_path, allowing its send_file wrapper
# to not have to deal with paths.
if _root_path is not None:
path = os.path.join(_root_path, path_or_file)
else:
path = os.path.abspath(path_or_file)
stat = os.stat(path)
size = stat.st_size
mtime = stat.st_mtime
else:
file = path_or_file
if download_name is None and path is not None:
download_name = os.path.basename(path)
if mimetype is None:
if download_name is None:
raise TypeError(
"Unable to detect the MIME type because a file name is"
" not available. Either set 'download_name', pass a"
" path instead of a file, or set 'mimetype'."
)
mimetype, encoding = mimetypes.guess_type(download_name)
if mimetype is None:
mimetype = "application/octet-stream"
# Don't send encoding for attachments, it causes browsers to
# save decompress tar.gz files.
if encoding is not None and not as_attachment:
headers.set("Content-Encoding", encoding)
if download_name is not None:
try:
download_name.encode("ascii")
except UnicodeEncodeError:
simple = unicodedata.normalize("NFKD", download_name)
simple = simple.encode("ascii", "ignore").decode("ascii")
# safe = RFC 5987 attr-char
quoted = quote(download_name, safe="!#$&+-.^_`|~")
names = {"filename": simple, "filename*": f"UTF-8''{quoted}"}
else:
names = {"filename": download_name}
value = "attachment" if as_attachment else "inline"
headers.set("Content-Disposition", value, **names)
elif as_attachment:
raise TypeError(
"No name provided for attachment. Either set"
" 'download_name' or pass a path instead of a file."
)
if use_x_sendfile and path is not None:
headers["X-Sendfile"] = path
data = None
else:
if file is None:
file = open(path, "rb") # type: ignore
elif isinstance(file, io.BytesIO):
size = file.getbuffer().nbytes
elif isinstance(file, io.TextIOBase):
raise ValueError("Files must be opened in binary mode or use BytesIO.")
data = wrap_file(environ, file)
rv = response_class(
data, mimetype=mimetype, headers=headers, direct_passthrough=True
)
if size is not None:
rv.content_length = size
if last_modified is not None:
rv.last_modified = last_modified # type: ignore
elif mtime is not None:
rv.last_modified = mtime # type: ignore
rv.cache_control.no_cache = True
# Flask will pass app.get_send_file_max_age, allowing its send_file
# wrapper to not have to deal with paths.
if callable(max_age):
max_age = max_age(path)
if max_age is not None:
if max_age > 0:
rv.cache_control.no_cache = None
rv.cache_control.public = True
rv.cache_control.max_age = max_age
rv.expires = int(time() + max_age) # type: ignore
if isinstance(etag, str):
rv.set_etag(etag)
elif etag and path is not None:
check = adler32(path.encode("utf-8")) & 0xFFFFFFFF
rv.set_etag(f"{mtime}-{size}-{check}")
if conditional:
try:
rv = rv.make_conditional(environ, accept_ranges=True, complete_length=size)
except RequestedRangeNotSatisfiable:
if file is not None:
file.close()
raise
# Some x-sendfile implementations incorrectly ignore the 304
# status code and send the file anyway.
if rv.status_code == 304:
rv.headers.pop("x-sendfile", None)
return rv
def send_from_directory(
directory: os.PathLike | str,
path: os.PathLike | str,
environ: WSGIEnvironment,
**kwargs: t.Any,
) -> Response:
"""Send a file from within a directory using :func:`send_file`.
This is a secure way to serve files from a folder, such as static
files or uploads. Uses :func:`~werkzeug.security.safe_join` to
ensure the path coming from the client is not maliciously crafted to
point outside the specified directory.
If the final path does not point to an existing regular file,
returns a 404 :exc:`~werkzeug.exceptions.NotFound` error.
:param directory: The directory that ``path`` must be located under. This *must not*
be a value provided by the client, otherwise it becomes insecure.
:param path: The path to the file to send, relative to ``directory``. This is the
part of the path provided by the client, which is checked for security.
:param environ: The WSGI environ for the current request.
:param kwargs: Arguments to pass to :func:`send_file`.
.. versionadded:: 2.0
Adapted from Flask's implementation.
"""
path = safe_join(os.fspath(directory), os.fspath(path))
if path is None:
raise NotFound()
# Flask will pass app.root_path, allowing its send_from_directory
# wrapper to not have to deal with paths.
if "_root_path" in kwargs:
path = os.path.join(kwargs["_root_path"], path)
if not os.path.isfile(path):
raise NotFound()
return send_file(path, environ, **kwargs)
def import_string(import_name: str, silent: bool = False) -> t.Any:
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
import_name = import_name.replace(":", ".")
try:
try:
__import__(import_name)
except ImportError:
if "." not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e) from None
except ImportError as e:
if not silent:
raise ImportStringError(import_name, e).with_traceback(
sys.exc_info()[2]
) from None
return None
def find_modules(
import_path: str, include_packages: bool = False, recursive: bool = False
) -> t.Iterator[str]:
"""Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_path: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, "__path__", None)
if path is None:
raise ValueError(f"{import_path!r} is not a package")
basename = f"{module.__name__}."
for _importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
yield from find_modules(modname, include_packages, True)
else:
yield modname
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name: str
#: Wrapped exception.
exception: BaseException
def __init__(self, import_name: str, exception: BaseException) -> None:
self.import_name = import_name
self.exception = exception
msg = import_name
name = ""
tracked = []
for part in import_name.replace(":", ".").split("."):
name = f"{name}.{part}" if name else part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, "__file__", None)))
else:
track = [f"- {n!r} found in {i!r}." for n, i in tracked]
track.append(f"- {name!r} not found.")
track_str = "\n".join(track)
msg = (
f"import_string() failed for {import_name!r}. Possible reasons"
f" are:\n\n"
"- missing __init__.py in a package;\n"
"- package or module path not included in sys.path;\n"
"- duplicated package or module name taking precedence in"
" sys.path;\n"
"- missing module, class, function or variable;\n\n"
f"Debugged import:\n\n{track_str}\n\n"
f"Original exception:\n\n{type(exception).__name__}: {exception}"
)
break
super().__init__(msg)
def __repr__(self) -> str:
return f"<{type(self).__name__}({self.import_name!r}, {self.exception!r})>"
| [
"akashpawar080808@gmail.com"
] | akashpawar080808@gmail.com |
cc1d8f55048f4c48a653b9d1a554a6c93af9ce5b | 74581cdce74e60d59c4c43efaf3850f77f53084d | /otsu.py | b5e101b1748c727e67aae4400ae7a3368b92f90a | [] | no_license | Sjasilva/pi_2019 | b625a9c8e79e8d6a368efda620de29dd559f537d | 068c18a5ba1a57e4f26eb38544d53d48eeced70b | refs/heads/master | 2020-06-01T11:13:18.463136 | 2019-06-07T13:58:55 | 2019-06-07T13:58:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,725 | py | from __future__ import division
import math
import numpy as np
USE_PIL = True
USE_CV2 = False
USE_SCIPY = False
try:
import PIL
from PIL import Image
raise ImportError
except ImportError:
USE_PIL = False
if not USE_PIL:
USE_CV2 = True
try:
import cv2
except ImportError:
USE_CV2 = False
if not USE_PIL and not USE_CV2:
USE_SCIPY = True
try:
import scipy
from scipy import misc
except ImportError:
USE_SCIPY = False
raise RuntimeError("Erro")
class ImageReadWrite(object):
def read(self, filename):
if USE_PIL:
color_im = PIL.Image.open(filename)
grey = color_im.convert('L')
return np.array(grey, dtype=np.uint8)
elif USE_CV2:
return cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
elif USE_SCIPY:
greyscale = True
float_im = scipy.misc.imread(filename, greyscale)
im = np.array(float_im, dtype=np.uint8)
return im
def write(self, filename, array):
if USE_PIL:
im = PIL.Image.fromarray(array)
im.save(filename)
elif USE_SCIPY:
scipy.misc.imsave(filename, array)
elif USE_CV2:
cv2.imwrite(filename, array)
class _OtsuPyramid(object):
def load_image(self, im, bins=256):
if not type(im) == np.ndarray:
raise ValueError(
'must be passed numpy array. Got ' + str(type(im)) +
' instead'
)
if im.ndim == 3:
raise ValueError(
'image must be greyscale (and single value per pixel)'
)
self.im = im
hist, ranges = np.histogram(im, bins)
hist = [int(h) for h in hist]
histPyr, omegaPyr, muPyr, ratioPyr = \
self._create_histogram_and_stats_pyramids(hist)
self.omegaPyramid = [omegas for omegas in reversed(omegaPyr)]
self.muPyramid = [mus for mus in reversed(muPyr)]
self.ratioPyramid = ratioPyr
def _create_histogram_and_stats_pyramids(self, hist):
bins = len(hist)
ratio = 2
reductions = int(math.log(bins, ratio))
compressionFactor = []
histPyramid = []
omegaPyramid = []
muPyramid = []
for _ in range(reductions):
histPyramid.append(hist)
reducedHist = [sum(hist[i:i+ratio]) for i in range(0, bins, ratio)]
hist = reducedHist
bins = bins // ratio
compressionFactor.append(ratio)
compressionFactor[0] = 1
for hist in histPyramid:
omegas, mus, muT = \
self._calculate_omegas_and_mus_from_histogram(hist)
omegaPyramid.append(omegas)
muPyramid.append(mus)
return histPyramid, omegaPyramid, muPyramid, compressionFactor
def _calculate_omegas_and_mus_from_histogram(self, hist):
probabilityLevels, meanLevels = \
self._calculate_histogram_pixel_stats(hist)
bins = len(probabilityLevels)
ptotal = float(0)
omegas = []
for i in range(bins):
ptotal += probabilityLevels[i]
omegas.append(ptotal)
mtotal = float(0)
mus = []
for i in range(bins):
mtotal += meanLevels[i]
mus.append(mtotal)
muT = float(mtotal)
return omegas, mus, muT
def _calculate_histogram_pixel_stats(self, hist):
bins = len(hist)
N = float(sum(hist))
hist_probability = [hist[i] / N for i in range(bins)]
pixel_mean = [i * hist_probability[i] for i in range(bins)]
return hist_probability, pixel_mean
class OtsuFastMultithreshold(_OtsuPyramid):
def calculate_k_thresholds(self, k):
self.threshPyramid = []
start = self._get_smallest_fitting_pyramid(k)
self.bins = len(self.omegaPyramid[start])
thresholds = self._get_first_guess_thresholds(k)
deviate = self.bins // 2
for i in range(start, len(self.omegaPyramid)):
omegas = self.omegaPyramid[i]
mus = self.muPyramid[i]
hunter = _ThresholdHunter(omegas, mus, deviate)
thresholds = \
hunter.find_best_thresholds_around_estimates(thresholds)
self.threshPyramid.append(thresholds)
scaling = self.ratioPyramid[i]
deviate = scaling
thresholds = [t * scaling for t in thresholds]
return [t // scaling for t in thresholds]
def _get_smallest_fitting_pyramid(self, k):
for i, pyramid in enumerate(self.omegaPyramid):
if len(pyramid) >= k:
return i
def _get_first_guess_thresholds(self, k):
kHalf = k // 2
midway = self.bins // 2
firstGuesses = [midway - i for i in range(kHalf, 0, -1)] + [midway] + \
[midway + i for i in range(1, kHalf)]
firstGuesses.append(self.bins - 1)
return firstGuesses[:k]
def apply_thresholds_to_image(self, thresholds, im=None):
if im is None:
im = self.im
k = len(thresholds)
bookendedThresholds = [None] + thresholds + [None]
greyValues = [0] + [int(256 / k * (i + 1)) for i in range(0, k - 1)] \
+ [255]
greyValues = np.array(greyValues, dtype=np.uint8)
finalImage = np.zeros(im.shape, dtype=np.uint8)
for i in range(k + 1):
kSmall = bookendedThresholds[i]
bw = np.ones(im.shape, dtype=np.bool8)
if kSmall:
bw = (im >= kSmall)
kLarge = bookendedThresholds[i + 1]
if kLarge:
bw &= (im < kLarge)
greyLevel = greyValues[i]
greyImage = bw * greyLevel
finalImage += greyImage
return finalImage
class _ThresholdHunter(object):
def __init__(self, omegas, mus, deviate=2):
self.sigmaB = _BetweenClassVariance(omegas, mus)
self.bins = self.sigmaB.bins
self.deviate = deviate
def find_best_thresholds_around_estimates(self, estimatedThresholds):
bestResults = (
0, estimatedThresholds, [0 for t in estimatedThresholds]
)
bestThresholds = estimatedThresholds
bestVariance = 0
for thresholds in self._jitter_thresholds_generator(
estimatedThresholds, 0, self.bins):
variance = self.sigmaB.get_total_variance(thresholds)
if variance == bestVariance:
if sum(thresholds) < sum(bestThresholds):
bestThresholds = thresholds
elif variance > bestVariance:
bestVariance = variance
bestThresholds = thresholds
return bestThresholds
def find_best_thresholds_around_estimates_experimental(self, estimatedThresholds):
estimatedThresholds = [int(k) for k in estimatedThresholds]
if sum(estimatedThresholds) < 10:
return self.find_best_thresholds_around_estimates_old(
estimatedThresholds
)
print('estimated', estimatedThresholds)
fxn_to_minimize = lambda x: -1 * self.sigmaB.get_total_variance(
[int(k) for k in x]
)
bestThresholds = scipy.optimize.fmin(
fxn_to_minimize, estimatedThresholds
)
bestThresholds = [int(k) for k in bestThresholds]
print('bestTresholds', bestThresholds)
return bestThresholds
def _jitter_thresholds_generator(self, thresholds, min_, max_):
pastThresh = thresholds[0]
if len(thresholds) == 1:
for offset in range(-self.deviate, self.deviate + 1):
thresh = pastThresh + offset
if thresh < min_ or thresh >= max_:
continue
yield [thresh]
else:
thresholds = thresholds[1:]
m = len(thresholds)
for offset in range(-self.deviate, self.deviate + 1):
thresh = pastThresh + offset
if thresh < min_ or thresh + m >= max_:
continue
recursiveGenerator = self._jitter_thresholds_generator(
thresholds, thresh + 1, max_
)
for otherThresholds in recursiveGenerator:
yield [thresh] + otherThresholds
class _BetweenClassVariance(object):
def __init__(self, omegas, mus):
self.omegas = omegas
self.mus = mus
self.bins = len(mus)
self.muTotal = sum(mus)
def get_total_variance(self, thresholds):
thresholds = [0] + thresholds + [self.bins - 1]
numClasses = len(thresholds) - 1
sigma = 0
for i in range(numClasses):
k1 = thresholds[i]
k2 = thresholds[i+1]
sigma += self._between_thresholds_variance(k1, k2)
return sigma
def _between_thresholds_variance(self, k1, k2):
omega = self.omegas[k2] - self.omegas[k1]
mu = self.mus[k2] - self.mus[k1]
muT = self.muTotal
return omega * ((mu - muT)**2)
if __name__ == '__main__':
filename = 'muda.jpg'
dot = filename.index('.')
prefix, extension = filename[:dot], filename[dot:]
imager = ImageReadWrite()
im = imager.read(filename)
otsu = OtsuFastMultithreshold()
otsu.load_image(im)
for k in [1, 2, 3, 4, 5, 6]:
savename = prefix + '_crushed_' + str(k) + extension
kThresholds = otsu.calculate_k_thresholds(k)
print(kThresholds)
crushed = otsu.apply_thresholds_to_image(kThresholds)
imager.write(savename, crushed)
| [
"geraldo.cruz@pe.senai.br"
] | geraldo.cruz@pe.senai.br |
35cbfb5d9ab46e0121aa703131cfebfacd12ca6e | 01493653ff4e922542d8f01898da8a60f1fcfa41 | /trees/bst_check.py | 6cd5ceac3ef6e6aa759955d5103b64b80a88c362 | [] | no_license | umeshror/data-structures-python | bcc0ac4a6be56d274e281a503933db0721b19d16 | 71ffc8be2dbd0c66e832db7c3d5c3f1bfe7566a8 | refs/heads/master | 2022-04-02T11:43:02.799121 | 2020-02-09T10:48:06 | 2020-02-09T10:48:06 | 208,948,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | class Node(object):
def __init__(self, k, val):
self.key = k
self.value = val
self.left = None
self.right = None
def tree_max(node):
"""
Return maximum between left/right and root
:param node: node/root
:return: value
"""
if not node:
return float("-inf")
max_left = tree_max(node.left)
max_right = tree_max(node.right)
return max(node.key, max_left, max_right)
def tree_min(node):
"""
Return minimum between left/right and root
:param node: node/root
:return: value
"""
if not node:
return float("inf")
min_left = tree_min(node.left)
min_right = tree_min(node.right)
return min(node.key, min_left, min_right)
def verify(node):
if not node:
return True
if (tree_max(node.left) <= node.key <= tree_min(node.right) and
verify(node.left) and verify(node.right)):
return True
else:
return False
root = Node(10, "Hello")
root.left = Node(5, "Five")
root.right = Node(30, "Thirty")
print(verify(root)) # prints True, since this tree is valid
root = Node(10, "Ten")
root.right = Node(20, "Twenty")
root.left = Node(5, "Five")
root.left.right = Node(15, "Fifteen")
print(verify(root)) # prints False, since 15 is to the left of 10
| [
"sarukumesh@gmail.com"
] | sarukumesh@gmail.com |
12e21107c31a8100c0351528f420eaca7e0e98b8 | e92cc5477a1976a4427a6dccd55ec477cdeed29a | /src/game.py | aafd472eaad0ab8a40c020c0c1f8739bb8b1bb48 | [] | no_license | seanwieser/Platformer | 32fecb234b20928036d8e8d74030b6964fc8739c | 52c45e43485e6a2f4be72051c8cc1514babd12de | refs/heads/main | 2021-06-21T12:37:05.858159 | 2021-01-16T09:33:42 | 2021-01-16T09:33:42 | 176,231,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | from classes import *
from constants import *
import PIL.Image
import os
# Useful functions
def redrawGameWindow():
global win
win.fill((255,255,255))
py.draw.rect(win, (0, 0, 0), (0, GROUND+PLAY_H, WIN_WIDTH, GROUND_H))
player.draw(win)
bulletManager.draw(win)
py.display.update()
# Set up the window with dimensions, name, and clock
win = py.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
py.display.set_caption("Game")
clock = py.time.Clock()
# Create game objects
bulletManager = BulletManager()
gun1 = Gun('Pistol', 'Light', 10)
gun2 = Gun('Shotgun', 'Shotgun', 5)
gunManager = GunManager()
gunManager.add(gun1)
gunManager.add(gun2)
player = Player(PLAY_W, PLAY_H, SPRITES, bulletManager, gunManager)
# Game loop
run = True
pause = False
while run:
clock.tick(40)
events = py.event.get()
for event in events:
if event.type == py.QUIT:
run = False
if event.type == py.KEYDOWN and event.key == py.K_p: # causes a crash. pause is broken
pause = not pause
if not pause:
keys = py.key.get_pressed()
player.move(keys, events)
bulletManager.moveBullets()
redrawGameWindow()
py.quit()
| [
"seanwieser@gmail.com"
] | seanwieser@gmail.com |
bfe582a5f8e7e55c85cb36383022be35ca6b9780 | c08225b2cc9924f12af667230c540ecd6b6a801f | /BUILD/sql_test.py | cf57af6140f59d4cb6e21ec2ad5922dc9d787752 | [] | no_license | eleanorstrib/hackbright | 68ef671a0281971a09029ba1a450c462b922becc | 84f692c13126b8c433153247fe0ca1d4f5a5b648 | refs/heads/master | 2021-01-21T02:45:27.857978 | 2014-12-04T23:43:02 | 2014-12-04T23:43:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py |
import csv, sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
cur.execute("CREATE TABLE t (col1, col2);")
with open('BUILD_SiteTimes.csv','rb') as fin: # `with` statement available in 2.5+
# csv.DictReader uses first line in file for column headings by default
dr = csv.DictReader(fin) # comma is default delimiter
to_db = [(i['col1'], i['col2']) for i in dr]
cur.executemany("INSERT INTO t (col1, col2) VALUES (?, ?);", to_db)
con.commit()
print to_db
| [
"melissa.fabros@gmail.com"
] | melissa.fabros@gmail.com |
869d7f8aec582f9c09dfa15e9791d99d7c9c617d | 170a4c0b1accb9468567f6a88254ff738f2a8166 | /EQ5D.py | 3fab62e39350976be780783eaeae004522dfd006 | [] | no_license | yazhisun/Labs_PreferenceScores | 2ecd9acdb21403f912200db1fa41f0f6e325ef18 | 3eb0ec0e55f1772b15a3108dd0a85dbcf75e1743 | refs/heads/master | 2021-05-09T22:56:44.996009 | 2018-01-18T16:03:31 | 2018-01-18T16:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py |
# EQ-5D regression coefficients
Constant = 0.081
N3 = 0.269
dictCoefficients = {'Mobility': [0, 0.069, 0.314],
'Self-Care': [0, 0.104, 0.214],
'Usual Activity': [0, 0.036, 0.094],
'Pain/Discomfort': [0, 0.123, 0.386],
'Anxiety/Depression': [0, 0.071, 0.236]};
| [
"reza.yaesoubi@yale.edu"
] | reza.yaesoubi@yale.edu |
49681f30a6612dac501c48d0b1e070e630b6bf72 | fd9257a4cc04b89c2b8c92008770a82ccdfe85bd | /build/spdlog/catkin_generated/generate_cached_setup.py | 3db5318c1429f193fb60f8495755cfd61895d77f | [] | no_license | Zauberr/KAL | 40b135f02e9ae9c7bf55b064094aaff88c43628e | 225e538058b632c8c14cc638e12fcb124bd81e29 | refs/heads/master | 2020-08-16T18:26:19.863213 | 2019-10-16T13:38:46 | 2019-10-16T13:38:46 | 215,537,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/mrtros/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/mrtros/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/kal5-2/rammbo/devel;/opt/mrtros;/opt/mrtsoftware/local;/opt/mrtsoftware/release".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/kal5-2/rammbo/devel/.private/spdlog/env.sh')
output_filename = '/home/kal5-2/rammbo/build/spdlog/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"bobocao93@gmail.com"
] | bobocao93@gmail.com |
37b973fff2fbbb70adca43095f7713a15f44886e | b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf | /000989letpy/letpy_059_is_20200503.py | 6e16cfdee1ebd5c0dcfe90e1764e2c9e70037371 | [
"Apache-2.0"
] | permissive | SafonovMikhail/python_000577 | 5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4 | f2dccac82a37df430c4eb7425b5d084d83520409 | refs/heads/master | 2022-12-08T10:53:57.202746 | 2022-12-07T09:09:51 | 2022-12-07T09:09:51 | 204,713,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | a = "Hello, world!"
b = "Hello, world!"
print(a is b) | [
"ms33@inbox.ru"
] | ms33@inbox.ru |
8059a516e77743f1739728cf8a264e52c6bdcc2f | b9bc4a0bfbd61499583debd27e00aa0e2cb50c94 | /tests/image_displayer/image_displayer_11.py | bcee717dffef9560487595b741bded6501eef715 | [
"MIT"
] | permissive | Vicken-Ghoubiguian/Imtreat | 55a8fdbecd27ca053d9c0d492c9ce1e5d001993b | 1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0 | refs/heads/master | 2023-08-23T12:39:48.107620 | 2021-10-12T08:26:24 | 2021-10-12T08:26:24 | 314,793,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | import imtreat
img = imtreat.imageManagerClass.openImageFunction("../images/soleil.png", 1)
img = imtreat.definedModesClass.oceanModeFunction(img)
imtreat.imageManagerClass.displayImageFunction("experience de MMMMMOOOOIIIIII", img)
imtreat.imageManagerClass.finishItFunction()
| [
"Eric Ghoubiguian"
] | Eric Ghoubiguian |
c3dbca3d76b2ae13be8d6823f27a212653d54c75 | 72fd2f0d2c7dcca2806a10e1e27d0a7b9d7f7080 | /top_ten.py | 87a9848ab280f39b35099251084f9bfc47f88de2 | [] | no_license | ravs/TwitterUtilities | 5d03c2a974644751ee2f65890a49240268095e12 | 7b233a8b53469a8a096b97cdd1066013da575c2f | refs/heads/master | 2016-09-05T15:00:07.565269 | 2013-11-11T09:51:26 | 2013-11-11T09:51:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | #!/usr/bin/env python
#python script to get top ten hash tags in a tweet file
import sys
import json
#hash tag dict variable
hashDict = {}
def countHashTags(fp):
for line in fp:
parseTweet = json.loads(line)
#better to check for text key before proceeding
if 'text' in parseTweet:
tweetWords = parseTweet["text"].split(" ")
for word in tweetWords:
#hash tags
if word.startswith('#'):
#remove hash
word = word.lstrip('#')
#remove dot and comma if exist
if word.endswith(',') or word.endswith('.'):
word = word.rstrip(',.')
if word in hashDict.keys():
#print "incrementing count of "+word
hashDict[word] += 1.0
else:
#print "adding "+word
hashDict[word] = 1.0
def main():
tweet_file = open(sys.argv[1])
countHashTags(tweet_file)
#sort and print top 10 hash tags
i = 0
for w in sorted(hashDict, key=hashDict.get, reverse=True):
if i == 10:
break
elif i == 9:
print w, hashDict[w],
else:
print w, hashDict[w]
i+=1
if __name__ == '__main__':
main() | [
"ravs@ravs.me"
] | ravs@ravs.me |
ae4389ec76c50a34e34d8aa7eb9b393fdda2694d | 90d17d09c705947365eb72024464abeeecac4e1e | /test7.py | ad2de079f352f21cffac2dfd219d6e66910a7971 | [] | no_license | Mindy-cm19/Pythonlearning | a23ee705ddc6286066bddd0f08a11dab2f4db518 | cb06246b8126a220f5e8ca09144683fc0409caf5 | refs/heads/master | 2020-03-25T17:56:51.023134 | 2018-08-16T13:52:09 | 2018-08-16T13:52:09 | 144,004,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # 请把下面的Student对象的gender字段对外隐藏起来,用get_gender()和set_gender()代替,并检查参数有效性:
class Student(object):
def __init__(self, name, gender):
self.name = name
self.__gender = gender
def get_gender(self):
return self.__gender
def set_gender(self,gender):
if gender not in ('male','female'):
raise ValueError('性别错误')
else:
self.__gender=gender
bart = Student('Bart', 'male')
if bart.get_gender() != 'male':
print('测试失败!')
else:
bart.set_gender('female')
if bart.get_gender() != 'female':
print('测试失败!')
else:
print('测试成功!')
bart.set_gender('m') | [
"504135715@qq.com"
] | 504135715@qq.com |
5f177af7f622e3d01bef1985b99f4ad3dfd7f96d | 3054bca3c8c59ae026b6cb3505596c4d5ed9c356 | /documentprocessor.py | 806087a6538aabd569521931895e918acedf9e9d | [] | no_license | fernflower/pdf-sectioning | 2195029a74c388839513508f75d666be82751a6f | 172b14c84833de0dfe745ae39c43c7ef16967aa9 | refs/heads/master | 2016-09-06T18:41:04.446044 | 2014-08-29T10:32:06 | 2014-08-29T10:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,626 | py | # -*- coding: utf-8 -*-
import os
from PyQt4 import QtCore
from popplerqt4 import Poppler
from lxml import etree
from lxml.builder import ElementMaker
from zonetypes import DEFAULT_ZONE_TYPES
from cmsquerymodule import NSMAP
XHTML_NAMESPACE = "http://internet-school.ru/abc"
E = ElementMaker(namespace=XHTML_NAMESPACE, nsmap=NSMAP)
class LoaderError(Exception):
def __str__(self):
return self.message.encode("utf-8")
class DocumentProcessor(object):
resolution = 72.0
def __init__(self, filename, display_name):
self.filename = filename
self.display_name = display_name
self.curr_page_num = 0
# map of maps: rendered_pages in all possible scales
self.rendered_pages = {}
print u"filename is %s" % filename
# check that file exists (in case app is run from console)
# if error log exists -> remove it
if os.path.isfile("errors.log"):
os.remove("errors.log")
if os.path.isfile(filename):
self.doc = Poppler.Document.load(filename)
self.doc.setRenderHint(Poppler.Document.TextAntialiasing)
else:
raise LoaderError(u"No such file: %s" % filename)
# 0 for first page
@property
def curr_page_number(self):
return self.curr_page_num
@property
def png_prefix(self):
return "page_"
@property
def totalPages(self):
if not self.doc:
return 0
return self.doc.numPages()
def width(self, scale=1):
return self.curr_page(scale).width()
def height(self, scale=1):
return self.curr_page(scale).height()
# returns a QImage
def render_page(self, num, scale):
page = self.doc.page(num)
qimage = page.renderToImage(self.resolution * scale,
self.resolution * scale,
0,
0,
page.pageSize().width() * scale,
page.pageSize().height() * scale,
0)
return qimage
def next_page(self):
self.curr_page_num = self.curr_page_num + 1 \
if self.curr_page_num < self.doc.numPages() - 1 \
else self.doc.numPages() - 1
return self.curr_page()
def prev_page(self):
self.curr_page_num = self.curr_page_num - 1 \
if self.curr_page_num > 0 else 0
return self.curr_page()
# here 1st page is passed as page 0
def go_to_page(self, pagenum):
self.curr_page_num = pagenum \
if pagenum in range(0, self.totalPages) else self.curr_page_num
return self.curr_page() if self.curr_page_num == pagenum else None
def curr_page(self, scale=1):
# if page has already been rendered -> take from rendered dict
if self.curr_page_num in self.rendered_pages.keys():
# search for prerendered scale
page_scales = self.rendered_pages[self.curr_page_num]
if scale not in page_scales.keys():
self.rendered_pages[self.curr_page_num][scale] = \
self.render_page(self.curr_page_num, scale)
else:
rendered = self.render_page(self.curr_page_num, scale)
self.rendered_pages[self.curr_page_num] = {scale: rendered}
return self.rendered_pages[self.curr_page_num][scale]
# selection is a QRect
def get_text(self, selection):
if not selection:
return ""
return unicode(self.doc.page(self.curr_page_num).text(selection))
# returns a dict of {cas-id : paragraph marks data} (the same used in
# bookviewer as self.paragraphs)
# filename = name of file with markup, NOT pdf
def load_native_xml(self, filename):
tree = etree.parse(filename)
SETTINGS_XPATH = "/is:object/is:text/is:ebook-pages/is:settings"
PARAGRAPHS_XPATH = "/is:object/is:text/is:ebook-pages/is:ebook-para"
settings = tree.xpath(SETTINGS_XPATH, namespaces=NSMAP)
if len(settings) > 0:
settings = settings[0]
paragraphs = tree.xpath(PARAGRAPHS_XPATH, namespaces=NSMAP)
out_paragraphs = {}
def _process_settings(param, text):
if param in ['start-autozones', 'margins', 'all-autozones',
'end-autozones', 'passthrough-zones', 'zonetypes']:
return text.split(',') if text else []
try:
return int(text)
except ValueError:
return text
book_settings = {e.xpath('local-name()'):
_process_settings(e.xpath('local-name()'), e.text)
for e in settings.getchildren()} \
if len(settings) > 0 else {}
for paragraph in paragraphs:
cas_id = paragraph.get("id")
name = paragraph.get("name")
start_y = paragraph.get("start-y")
start_page = paragraph.get("start-page")
end_y = paragraph.get("end-y")
end_page = paragraph.get("end-page")
start = {"cas-id": cas_id,
"name": name,
"y": start_y,
"page": start_page,
"type": "start"}
end = {"cas-id": cas_id,
"name": name,
"y": end_y,
"page": end_page,
"type": "end"}
zones = []
for zone in paragraph.xpath("is:ebook-zone", namespaces=NSMAP):
objects = [{"oid": o.get("oid"),
"block-id": o.get("block-id")}
for o in zone.xpath("is:ebook-object",
namespaces=NSMAP)]
placements = [{"page": pl.get("page"),
"y": pl.get("y")}
for pl in zone.xpath("is:ebook-placement",
namespaces=NSMAP)]
page = zone.get("page") or \
next((z["page"] for z in placements), None)
def _get_zone_id():
return zone.get("rubric") if zone.get("n") in ["00", None] \
else zone.get("n") + zone.get("rubric")
new_zone = {"cas-id": cas_id,
"zone-id": _get_zone_id(),
"page": page,
"type": zone.get("type"),
"rubric": zone.get("rubric"),
"placements": placements,
"number": zone.get("n"),
"y": zone.get("y"),
"at": zone.get("at"),
"objects": objects,
"passthrough": zone.get("type") == u"repeat"}
zones.append(new_zone)
out_paragraphs[cas_id] = {"marks": [start, end], "zones": zones}
return (out_paragraphs, book_settings)
# Paragraphs - a dict {cas-id : dict with all paragraph data}
def gen_native_xml(self, paragraphs, settings, progress):
PAGES = E("ebook-pages", src=self.filename)
ICON_SET = E("ebook-icon-set")
all_zones = settings.get("zonetypes", DEFAULT_ZONE_TYPES)
# TODO perhaps should pass precisely icons used in markup, not all?
for icon_type in all_zones:
icon = E("ebook-icon", rubric=icon_type, src="%s.png" % icon_type)
ICON_SET.append(icon)
PAGES.append(ICON_SET)
# here save autozone settings
SETTINGS = E("settings")
for key in settings:
tag = E(key)
value = u",".join(p for p in settings[key]) \
if isinstance(settings[key], list) else settings[key]
tag.text = unicode(value)
SETTINGS.append(tag)
PAGES.append(SETTINGS)
# add paragraphs info
for cas_id, data in paragraphs.items():
# make sure that no paragraphs are saved without end mark
marks = data.get("marks", None)
if not marks:
continue
zones = data.get("zones", [])
assert len(marks) == 2, \
"Some paragraphs don't have end marks, can't save that way!"
PARA = E("ebook-para", id=str(cas_id),
**{"start-page": str(marks[0]["page"]),
"start-y": str(marks[0]["y"]),
"name": marks[0]["name"],
"end-page": str(marks[1]["page"]),
"end-y": str(marks[1]["y"])})
for zone in zones:
# passthrough zones come first
if zone["type"] == "repeat":
ZONE = E("ebook-zone", type="repeat",
**{"y": str(zone["y"]),
"rubric": zone["rubric"],
"at": zone["at"]})
for pl in zone["placements"]:
ZONE.append(E("ebook-placement",
**{"page": str(pl["page"]),
"y": str(pl["y"])}))
else:
ZONE = E("ebook-zone", type=zone["type"],
**{"n": str(zone["n"]),
"page": str(zone["page"]),
"y": str(zone["y"]),
"rubric": zone["rubric"],
"at": zone["at"]})
for obj in zone["objects"]:
ZONE.append(E("ebook-object",
**{"oid": obj["oid"],
"block-id": obj["block-id"]}))
PARA.append(ZONE)
PAGES.append(PARA)
# TODO can take a lot of time (esp. when rendering differently sized
# pages), show progress here
if progress:
progress.setRange(1, self.totalPages)
for page in range(1, self.totalPages):
def _get_page_preview_str(page):
return "page-" + "0"*(3-len(str(page))) + str(page) + ".png"
def _get_fold(first_page, pagenum):
page_order = [first_page,
next(x for x in ["l", "r"] if x != first_page)]
return page_order[(pagenum + 1) % 2]
fold = _get_fold(settings["first-page"], page)
PAGE = E("ebook-page",
**{"preview": _get_page_preview_str(page),
"n": str(page),
"width": str(self.width()),
"height": str(self.height()),
"hide": "false",
# FIXME
"zone-margins": fold,
"fold": fold})
PAGES.append(PAGE)
if progress:
progress.setValue(page)
root = E.object(E.text(PAGES),
**{"display-name": settings["display-name"]})
result = etree.tostring(root, pretty_print=True, encoding="utf-8")
return result
def gen_toc_xml(self):
# returns last page processed
def _process_child(fc, prefix="", prev_page=1):
i = 1
previous_page = prev_page
while not fc.isNull():
elem = fc.toElement()
# tex-like section-ref
destination = elem.attribute("DestinationName")
pagenum = self.doc.linkDestination(destination).pageNumber()
previous_page = pagenum
if fc.hasChildNodes():
previous_page = _process_child(fc.firstChild(),
prefix=prefix+"%d." % i,
prev_page=previous_page)
fc = fc.nextSibling()
i = i + 1
return previous_page
toc = self.doc.toc()
if not toc:
return
_process_child(toc.firstChild())
# generates and saves previes, returns a list of generated filenames
def gen_previews(self, path):
filenames = []
for i, png in enumerate(self._render_all_to_png(), start=1):
name = os.path.join(path, self.png_prefix + str(i))
filenames.append(name)
png.save(name, "png")
return filenames
def save_error_log(self, errors, course_id, course_name):
with open("errors.log", "a+") as f:
# first write the name of the course
f.write("###########################\n")
f.write(course_name.encode('utf-8'))
f.write("(%s)\n" % course_id)
for error in errors:
f.write(error.message.encode('utf-8'))
f.write("\n")
def save_all(self, path_to_file, paragraphs, settings, progress=None):
with open(path_to_file, 'w') as fname:
fname.write(self.gen_native_xml(paragraphs, settings, progress))
return path_to_file
def _is_in_pdf_bounds(self, pos_tuple, scale, viewport_delta):
img = self.curr_page(scale).rect()
viewport = QtCore.QRect(img.x(),
img.y() + viewport_delta,
img.width(),
img.height() - viewport_delta)
if len(pos_tuple) == 2:
pos = QtCore.QPoint(pos_tuple[0], pos_tuple[1])
return viewport.contains(pos)
elif len(pos_tuple) == 4:
pos = QtCore.QRect(QtCore.QPoint(pos_tuple[0], pos_tuple[1]),
QtCore.QPoint(pos_tuple[2], pos_tuple[3]))
return viewport.intersects(pos)
return False
def _processTextBlocks(self):
result = dict()
for i in range(0, self.doc.numPages()):
result[i] = [((t.boundingBox().left(),
t.boundingBox().top(),
t.boundingBox().width(),
t.boundingBox().height()),
unicode(t.text()))
for t in self.doc.page(i).textList()]
return result
def _render_one_to_png(self, num):
return self.doc.page(num).renderToImage()
def _render_all_to_png(self):
return [self._render_one_to_png(i)
for i in range(0, self.doc.numPages())]
| [
"i-vasilevskaja@igrade.ru"
] | i-vasilevskaja@igrade.ru |
4a40f72f4b9ea6739c0c443c81269490ece59a25 | 6637bc6e80e7502705856e192fc53d2c2d7e0312 | /tobe.py | a4bff03fe0ebc8fa52991dbafa16e58c9524aebf | [
"MIT"
] | permissive | pavelkryukov/studious-octo-spoon | 6d15c2ea54c05faf03ed8bc7a7e08de7d1c87a6a | 6b9f10824e7a440b15789c0b19951dcab3cd6e02 | refs/heads/master | 2021-01-22T22:34:44.190449 | 2017-05-29T23:23:36 | 2017-05-29T23:23:36 | 92,781,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import random
src = "tobe"
letters = "abcdefghijklmnopqrstuvwxyz"
output = "tobs"
while not output == src:
output = random.sample(letters,len(src))
print (output) | [
"kryukov@frtk.ru"
] | kryukov@frtk.ru |
ac7d3826b9939d6d2fb9b9de03a7d8131dd6d6b4 | 0945c811bf6e9c980a193b0820edea98aebb7d4a | /create_vm.py | a9f0cbebcad168614ef8aa3a4b1f856b3c7723cd | [] | no_license | jitendra-singh01/azure-python-scripts | 64ed791a1ef5fb4134de823576791653af7a4e5f | 98f5beea3dae30029b2a02b2e1cac1155476d745 | refs/heads/master | 2021-07-14T21:41:10.836720 | 2020-11-29T15:53:48 | 2020-11-29T15:53:48 | 221,238,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | from azure.common.client_factory import get_client_from_auth_file
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
import os
import traceback
USERNAME = "ubuntu"
# Resource Group
GROUP_NAME = 'azure-sample-group-virtual-machinest'
# Network
VNET_NAME = 'azure-sample-vnett'
VM_NAME = "azureusest2"
SUBNET_NAME = 'azure-sample-subnett'
LOCATION = "eastus"
IP_CONFIG_NAME = 'azure-sample-ip-config'
NIC_NAME = 'azure-sample-nic'
def create_nic(network_client):
"""Create a Network Interface for a VM.
"""
# Create VNet
print('\nCreate Vnet')
async_vnet_creation = network_client.virtual_networks.create_or_update(
GROUP_NAME,
VNET_NAME,
{
'location': LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
)
async_vnet_creation.wait()
# Create Subnet
print('\nCreate Subnet')
async_subnet_creation = network_client.subnets.create_or_update(
GROUP_NAME,
VNET_NAME,
SUBNET_NAME,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
# Create NIC
print('\nCreate NIC')
async_nic_creation = network_client.network_interfaces.create_or_update(
GROUP_NAME,
NIC_NAME,
{
'location': LOCATION,
'ip_configurations': [{
'name': IP_CONFIG_NAME,
'subnet': {
'id': subnet_info.id
}
}]
}
)
return async_nic_creation.result()
def create_vm(compute_client):
print('\nCreating VM')
VM_PARAMETERS={
'location': LOCATION,
'os_profile': {
'computer_name': VM_NAME,
'admin_username': 'ubuntu',
'linux_configuration': {
"disable_password_authentication": True,
"ssh": {
"public_keys": [{
"path": "/home/{}/.ssh/authorized_keys".format(USERNAME),
"key_data": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDeuojy8n1Covi3Hv4VQ0NQPGeRaEaQfXdiMdpFVVCsZPzN243bbEo0x3xhcODBDl8BR7NB6917cGBdT6dETUBdeqL6gjeAMia56utwLoUfIsFX5c4gKvara31LkbTGcbSXhtO8DECSzhQiY9zdkBtabqseRaCByFQ7wKM3I5YTyimuAUPdMZDZ/eNEM5exKnYS+uVmadFoLmSngeDpmZPe/vnGueDbAVCrAKVKbaHUHsll3NUkM9/iFp89ij140aC74L4wTbaE2Wnp7gowcqpON6QgKB9E9OcZ2pfrWP7Nv9CA7ndaxEPU8FM1fOt+IqYk8wr9Sk2YFhlbo3IB9wj5 jayantjainco@penguin"
}]
}
}
},
'storage_profile': {
'image_reference': {
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '16.04.0-LTS',
'version': 'latest'
},
},
'hardware_profile': {
'vm_size': 'Standard_DS1_v2'
},
'network_profile': {
'network_interfaces': [{
'id': '/subscriptions/a9e7f5b3-273a-4ebf-8ea5-81dec14515ee/resourceGroups/NetworkWatcherRG/providers/Microsoft.Network/networkInterfaces/test1ss372',
}]
},
}
compute_vm = compute_client.virtual_machines.create_or_update(GROUP_NAME, VM_NAME, VM_PARAMETERS)
return compute_vm.result()
print('\nCreate Resource Group')
resource_client = get_client_from_auth_file(ResourceManagementClient)
compute_client = get_client_from_auth_file(ComputeManagementClient)
resource_client.resource_groups.create_or_update(GROUP_NAME, {'location': LOCATION})
network_client = get_client_from_auth_file(NetworkManagementClient)
nic = create_nic(network_client)
async_vm_creation = create_vm(compute_client)
# Tag the VM
print('\nTag Virtual Machine')
async_vm_update = compute_client.virtual_machines.create_or_update(GROUP_NAME,VM_NAME,{'location': LOCATION,'tags': {'who-rocks': 'python','where': 'on azure'}})
async_vm_update.wait() | [
"jayantjainco@penguin"
] | jayantjainco@penguin |
3768a008bf0d01110697053d81caba8a96e32d01 | 54f09892bb73cea57e21bce961c32d7cb58629c6 | /Content/WallBackGround.py | 7714fc35ce600baf55c5f24bbf05225d55c7ab95 | [] | no_license | tung362/MtRainierHeroSquad | 6c95609f3a685befea465b2339f5845f4f26b0b9 | a684925325f48e6b3ccfe36b045ac20d401ea198 | refs/heads/master | 2021-01-19T06:58:42.652909 | 2017-04-07T05:16:02 | 2017-04-07T05:16:02 | 87,512,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,853 | py | import Zero
import Events
import Property
import VectorMath
Vector3 = VectorMath.Vec3
class WallBackGround:
def DefineProperties(self):
self.MaxHealth = Property.Float(300)
self.CurrentHealth = Property.Float(300)
self.Start = Property.Bool(True)
self.TextureNumber = Property.Int(0)
self.Switch1 = Property.Bool(True)
self.Switch2 = Property.Bool(True)
self.Switch3 = Property.Bool(True)
self.Switch4 = Property.Bool(True)
self.SpawnParticle = Property.Bool(True)
def Initialize(self, initializer):
Zero.Connect(self.Space, Events.LogicUpdate, self.OnLogicUpdate)
def OnLogicUpdate(self, UpdateEvent):
GetMaxHealth = self.MaxHealth;
GetCurrentHealth = self.CurrentHealth;
if(self.Start == True):
self.Owner.Name = "WallBackGround"
self.MaxHealth = 300
self.CurrentHealth = 300
self.Start = False
if(self.TextureNumber is 0):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftTopBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftTopBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftTopBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftTopBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 1):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightTopBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightTopBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightTopBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightTopBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 2):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleTopBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleTopBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleTopBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleTopBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 3):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftBottomBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftBottomBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftBottomBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftBottomBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 4):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftMiddleBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftMiddleBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftMiddleBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1LeftMiddleBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 5):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightBottomBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightBottomBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightBottomBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightBottomBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 6):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightMiddleBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightMiddleBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightMiddleBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1RightMiddleBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 7):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleBottomBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleBottomBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleBottomBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleBottomBackgroundBreak4"
self.Switch4 = False
elif(self.TextureNumber is 8):
if(self.CurrentHealth > 225 and self.CurrentHealth < 299 and self.Switch1 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleMiddleBackgroundBreak1"
self.Switch1 = False
elif(self.CurrentHealth > 150 and self.CurrentHealth < 224 and self.Switch2 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleMiddleBackgroundBreak2"
self.Switch2 = False
elif(self.CurrentHealth > 75 and self.CurrentHealth < 149 and self.Switch3 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleMiddleBackgroundBreak3"
self.Switch3 = False
elif(self.CurrentHealth > 1 and self.CurrentHealth < 74 and self.Switch4 is True):
self.Owner.Sprite.SpriteSource = "Wall1MiddleMiddleBackgroundBreak4"
self.Switch4 = False
if(GetCurrentHealth <= 0):
Position = self.Owner.Transform.WorldTranslation
if(self.SpawnParticle is True):
PlayerBlasterProjectile = self.Space.CreateAtPosition("RubbleParticle", Vector3(Position.x, Position.y + 0.5, 4))
self.SpawnParticle = False
self.Owner.Destroy()
Zero.RegisterComponent("WallBackGround", WallBackGround) | [
"7319tnguyen040@gmail.com"
] | 7319tnguyen040@gmail.com |
f101697b52e3d9220832a5188b3e621b8780f43b | c160dcfa9d8ebf186b596cbddd509cd3fde35ffd | /simple_tests/writeTest.py | 7ef47d639d6d034b097baa05f3419f1d7dd581f5 | [] | no_license | hbreauxv/shared_projects | ace56cf9a62c6c755d82ff25b1a3d31738f62cd8 | bf05f4c65ac8af459d9be04c291c5d42c8a56a9d | refs/heads/master | 2021-08-07T05:07:37.777521 | 2018-09-21T19:12:20 | 2018-09-21T19:12:20 | 134,627,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | file = open('testfile.txt', 'w')
file.write('Hello World')
file.close()
| [
"hbreauxv@gmail.com"
] | hbreauxv@gmail.com |
4c69aba309858501551b000e6236b893e0d8f7f7 | 30b2eb381ec8f3225671274e77a55b63206dfb60 | /leetcode/p0461/solve.py | d9975e98a22b92ab40c7733e7fe0660fbb2ee3ca | [] | no_license | b1ueskydragon/PythonGround | 52888f32336e5e20be8490454beb67e676be7057 | 5a401250e88926235f581e6c004d1a4acb44230d | refs/heads/master | 2021-07-10T03:00:38.340959 | 2021-04-02T03:34:29 | 2021-04-02T03:34:29 | 98,208,402 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xor = x ^ y
ones = 0
while xor:
if (xor | 1) == xor:
ones += 1
xor >>= 1
return ones
if __name__ == '__main__':
s = Solution()
"""
011
101
---
110
count = 2
"""
print(s.hammingDistance(3, 5))
| [
"dragoalie@gmail.com"
] | dragoalie@gmail.com |
01d6c0f40e4d86f1d6eaea281414fae24e0e6749 | 966a0c41d2e4c09e4742db4627e8a40d5ec0378c | /tests/common/test_permissions.py | d50e1824db9e32eca716a7d105f13bc5dacbb51f | [
"MIT"
] | permissive | seedwithroot/myth-caster-api | 4436a80f535d455b4708af21427fd99edc24502a | 76a43f48b70c6a4b509c90757d7906689799cc25 | refs/heads/master | 2022-12-19T08:03:53.425495 | 2020-09-27T03:36:33 | 2020-09-27T03:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,529 | py | """ Unit tests for the common.permissions module """
from django.contrib.auth import get_user_model
from ddf import G
from rest_framework.test import APITestCase, APIClient
from character.models import Character
from skills.models import Skill
IS_ADMIN_OR_READONLY_PATH = '/api/users/'
SKILLS_PATH = '/api/skills/'
CHARACTERS_PATH = '/api/characters/'
class IsAdminOrReadOnlyTestCase(APITestCase):
""" Tests for the IsAdminOrReadOnly permissions class """
def setUp(self):
# Initialize client
self.client = APIClient()
# Initialize different POV users
user_model = get_user_model()
self.superuser = user_model.objects.create_superuser('superuser', password='temp')
self.user = user_model.objects.create_user('user', password='temp')
self.delete_target = user_model.objects.create_user('delete_target')
def test_superuser_can_read(self):
""" Test that a superuser can read data with this permission class """
self.client.login(username='superuser', password='temp')
response = self.client.get(f'{IS_ADMIN_OR_READONLY_PATH}')
self.assertEqual(200, response.status_code)
def test_superuser_can_create(self):
""" Test that a superuser can create stuff on endpoints with this permission class """
self.client.login(username='superuser', password='temp')
response = self.client.options(f'{IS_ADMIN_OR_READONLY_PATH}')
self.assertIn('POST', response.json()['actions'])
def test_superuser_can_edit(self):
""" Test that a superuser can edit stuff on endpoints with this permission class """
self.client.login(username='superuser', password='temp')
response = self.client.options(f'{IS_ADMIN_OR_READONLY_PATH}{self.user.id}/')
self.assertIn('PUT', response.json()['actions'])
def test_user_can_read(self):
""" Test that a user can read data with this permission class """
self.client.login(username='user', password='temp')
response = self.client.get(f'{IS_ADMIN_OR_READONLY_PATH}')
self.assertEqual(200, response.status_code)
def test_user_cant_create(self):
""" Test that a user cant create stuff on endpoints with this permission class """
self.client.login(username='user', password='temp')
response = self.client.options(f'{IS_ADMIN_OR_READONLY_PATH}')
self.assertNotIn('actions', response.json())
def test_user_cant_edit(self):
""" Test that a user cant edit stuff on endpoints with this permission class """
self.client.login(username='user', password='temp')
response = self.client.options(f'{IS_ADMIN_OR_READONLY_PATH}{self.user.id}/')
self.assertNotIn('actions', response.json())
class IsOwnerOrEditorTestCase(APITestCase):
""" Tests for the IsOwnerOrEditor permissions class """
def setUp(self):
# Initialize client
self.client = APIClient()
# Initialize different POV users
user_model = get_user_model()
self.superuser = user_model.objects.create_superuser('superuser', password='temp')
self.owner = user_model.objects.create_user('owner', password='temp')
self.editor = user_model.objects.create_user('editor', password='temp')
self.other_user = user_model.objects.create_user('other_user', password='temp')
self.inactive_user = user_model.objects.create_user('inactive_user', \
password='temp', is_active=False)
self.skill = G(Skill, owner=self.owner, authorized_editors=[self.editor.id])
self.private_character = G(Character, is_private=True, \
owner=self.owner, authorized_editors=[self.editor.id])
def test_anonymous_readonly(self):
""" Anonymous users should be able to only read data """
detail_response = self.client.get(f'{SKILLS_PATH}{self.skill.id}/', format='json')
options_response_detail = self.client.options(f'{SKILLS_PATH}{self.skill.id}/', \
format='json')
self.assertEqual(200, detail_response.status_code)
self.assertNotIn('actions', options_response_detail.json())
def test_anonymous_403_for_private_content(self):
""" Test that anonymous users cannot access private content """
character_response = self.client.get(
f'{CHARACTERS_PATH}{self.private_character.id}/', format='json')
self.assertIn(character_response.status_code, [403, 404])
def test_inactive_user_readonly(self):
""" Test that an inactive user has read only access, like an anonymous user """
self.client.login(username='inactive_user', password='temp')
detail_response = self.client.get(f'{SKILLS_PATH}{self.skill.id}/', format='json')
options_response_detail = self.client.options(f'{SKILLS_PATH}{self.skill.id}/', \
format='json')
self.assertEqual(200, detail_response.status_code)
self.assertNotIn('actions', options_response_detail.json())
def test_authenticated_user_can_read_and_post(self):
""" Test that an authenticated user can create new data, and read data """
self.client.login(username='other_user', password='temp')
response = self.client.get(f'{SKILLS_PATH}', format='json')
self.assertEqual(200, response.status_code)
options = self.client.options(f'{SKILLS_PATH}', format='json')
self.assertIn('POST', options.json()['actions'])
def test_superuser_can_edit(self):
""" Superusers should be able to edit data """
self.client.login(username='superuser', password='temp')
response = self.client.options(f'{SKILLS_PATH}{self.skill.id}/', format='json')
self.assertIn('PUT', response.json()['actions'])
def test_authenticated_non_owner_editor_user_cant_edit(self):
""" Test that any authenticate users can create new data """
self.client.login(username='other_user', password='temp')
response = self.client.options(f'{SKILLS_PATH}{self.skill.id}/')
self.assertNotIn('actions', response.json())
def test_private_object_superuser_can_see(self):
""" Test that for private objects, the superuser can edit """
self.client.login(username='superuser', password='temp')
get_response = self.client.get(f'{CHARACTERS_PATH}{self.private_character.id}/', \
format='json')
self.assertEqual(200, get_response.status_code)
def test_private_object_owner_can_see(self):
""" Test that for private objects, the owner can edit """
self.client.login(username='owner', password='temp')
get_response = self.client.get(f'{CHARACTERS_PATH}{self.private_character.id}/')
self.assertEqual(200, get_response.status_code)
def test_private_object_editor_can_see(self):
""" Test that for private objects, the editor can edit """
self.client.login(username='editor', password='temp')
get_response = self.client.get(f'{CHARACTERS_PATH}{self.private_character.id}/', \
format='json')
self.assertEqual(200, get_response.status_code)
def test_private_object_other_users_cant_see(self):
""" Test that for private objects, other users cant edit """
self.client.login(username='other_user', password='temp')
get_response = self.client.get(f'{CHARACTERS_PATH}{self.private_character.id}/', \
format='json')
self.assertIn(get_response.status_code, [403, 404])
| [
"tristangadams@gmail.com"
] | tristangadams@gmail.com |
61d86514b4f788b34ca4fa7e4b2e2c9fe7d8d723 | c07de6f278e7ffc11b4e1c7b37fdeae0d8aeb4a1 | /Section A/group10/task_1/part_1.py | 81576b2078ad1bfc40b4448aba58bd1cf2c06845 | [] | no_license | Medhashah/Big-DataProject | 729c1d14efc5096bae3dd150e9c218fa583a7861 | 571473626e82881b07073edf1de52f2b3563b294 | refs/heads/master | 2020-10-01T07:30:25.637471 | 2019-12-12T21:16:08 | 2019-12-12T21:16:08 | 227,487,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,176 | py | import json
import subprocess
import os
from dateutil import parser
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
import pyspark.sql.functions as F
import time
import sys
def main(start_index, end_index):
spark = SparkSession \
.builder \
.appName("big_data_prof") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# conf = spark.sparkContext._conf.setAll(
# [('spark.executor.memory', '8g'), ('spark.app.name', 'big_data_proj'), ('spark.executor.cores', '4'),
# ('spark.cores.max', '4'), ('spark.driver.memory', '8g')])
# spark.sparkContext.stop()
# spark = SparkSession.builder.config(conf=conf).getOrCreate()
# new_conf = spark.sparkContext._conf.getAll()
# print(new_conf)
cmd = "hadoop fs -ls /user/hm74/NYCOpenData"
files = subprocess.check_output(cmd, shell=True).decode().strip().split('\n')
pfiles = [(x.split()[7], int(x.split()[4])) for x in files[1:]]
pfiles_sorted = sorted(pfiles, key=lambda x: x[1])
if not os.path.exists('job_{}_{}'.format(start_index, end_index)):
os.mkdir('job_{}_{}'.format(start_index, end_index))
for i, nyc_open_datafile in enumerate(pfiles_sorted[start_index:end_index]):
print("processing number {} of {}".format(i+start_index, end_index))
# pretty hacky preprocessing but it will work for now
# could maybe use pathlib library or get it with hdfs
processed_path = nyc_open_datafile[0]
df_nod = spark.read.option("header", "true").option("delimiter", "\t").csv(processed_path)
try:
file_name = processed_path.split('/')[-1].replace('.tsv.gz', '')
print(file_name)
if os.path.exists("job_{}_{}/{}.json".format(start_index, end_index, file_name)):
continue
start_process = time.time()
bp = BasicProfiling(processed_path, df_nod)
table_dict = bp.process()
json_type = json.dumps(table_dict)
#write to hdfs
# spark.parallelize([json_type]).toDF().coalesce(1).write.json('/user/gl758/big_data/job_{}_{}/{}'.format(start_index, end_index, file_name))
with open("job_{}_{}/{}.json".format(start_index, end_index, file_name), 'w+', encoding="utf-8") as f:
f.write(json_type)
end_process = time.time()
print("total process time {}".format(end_process - start_process))
except Exception as e:
print("unable to process because {}".format(e))
# We should put this in it's on package, but submitting with packages is kind of annoying so
# I moved it out for now look at --py-files
#https://spark.apache.org/docs/latest/submitting-applications.html
class BasicProfiling:
"""
Class for data profiling basic schema and statistics on a dataframe
"""
def __init__(self, dataset_name, df_nod):
self.dataset_name = dataset_name
self.df_nod = df_nod
self.table_dict = dict()
self.columns = self.df_nod.columns
self.spec_types = ['INT', 'REAL', 'DATE', 'TEXT']
# self.column_dict = None # the currently processed column dict
# self.column = None # the currently processed column dataframe
def __set_up_dictionary(self):
self.table_dict['dataset_name'] = self.dataset_name
self.table_dict['columns'] = []
def __add_column_general_info(self, column, column_name):
general_count = column.agg(lit(column_name).alias("name"), count(when(col(column_name).isNotNull(), True)).alias("count_not_null"), countDistinct(col(column_name)).alias("distinct"), count(when(col(column_name).isNull(), True)).alias("count_null"))
general_fre = column.groupBy(column_name).agg(count(column_name).alias("count_col")).orderBy(desc("count_col")).limit(5).agg(collect_list(column_name).alias('fre'))
return general_count, general_fre
def _add_datatype_columns(self, column, column_name):
"""
Adds a type column to add every column we currently have, obviously this doubles the size
:return:
"""
get_column_type_udf = udf(self.get_column_type)
column = column.withColumn("dtype", get_column_type_udf(column_name))
return column
def __get_stats_int(self, column, column_name):
int_info = column.filter("dtype = 'INT'").withColumn(column_name[1:-1], col(column_name).cast('int'))\
.select(array(count(col(column_name)), F.max(col(column_name)), F.min(col(column_name)), mean(col(column_name)), stddev(col(column_name))).alias('stats_int'))
return int_info
def __get_stats_double(self, column, column_name):
double_info = column.filter("dtype = 'REAL'").withColumn(column_name[1:-1], column[column_name].cast('double')).\
select(array(count(column_name), max(column_name), min(column_name), mean(column_name), stddev(column_name)).alias('stats_double'))
return double_info
def __get_stats_date(self, column, column_name):
udf_cast_date = udf(BasicProfiling.__get_datetime)
date_info = column.filter("dtype = 'DATE'").select(array(count(column_name), max(udf_cast_date(column_name)),
min(udf_cast_date(column_name))).alias('stats_date'))
return date_info
def __get_stats_text(self, column, column_name):
df_len = column.filter("dtype = 'TEXT'").withColumn("len", length(column_name))
text_info = df_len.select(array(count(column_name), mean("len")).alias('stats_text'))
shortest = df_len.orderBy(asc("len")).limit(5).agg(collect_list(column_name).alias('shortest_values')).select('shortest_values')
longest = df_len.orderBy(desc("len")).limit(5).agg(collect_list(column_name).alias('longest_values')).select('longest_values')
return text_info, shortest, longest
def __convert_df_to_dict(self, integer, real, date, text, shortest, longest, count, fre):
stats_int = integer.collect()
stats_double = real.collect()
stats_date = date.collect()
stats_text = text.collect()
stats_shortest = shortest.collect()
stats_longest = longest.collect()
general_count = count.collect()
# general_empty = empty.collect()
general_fre = fre.collect()
for i in range(len(stats_int)):
column_dict = {}
column_stats = [general_count[i][0], stats_int[i][0], stats_double[i][0], stats_date[i][0], stats_text[i][0], stats_shortest[i][0], stats_longest[i][0]]
column_dict['column_name'] = column_stats[0]
column_dict['number_empty_cells'] = general_count[i][3]
column_dict['number_non_empty_cells'] = general_count[i][1]
column_dict['number_distinct_values'] = general_count[i][2]
column_dict['frequent_values'] = general_fre[i][0]
column_dict['data_type'] = []
if column_stats[1][0] != 0:
type_dict = {}
type_dict['type'] = "INTERGER(LONG)"
type_dict['count'] = int(column_stats[1][0])
type_dict['max_value'] = int(column_stats[1][1])
type_dict['min_value'] = int(column_stats[1][2])
type_dict['mean'] = float(column_stats[1][3])
type_dict['stddev'] = float(column_stats[1][4])
column_dict['data_type'].append(type_dict)
if column_stats[2][0] != 0:
type_dict = {}
type_dict['type'] = 'REAL'
type_dict['count'] = int(column_stats[2][0])
type_dict['max_value'] = float(column_stats[2][1])
type_dict['min_value'] = float(column_stats[2][2])
type_dict['mean'] = float(column_stats[2][3])
type_dict['stddev'] = float(column_stats[2][4])
column_dict['data_type'].append(type_dict)
if column_stats[3][0] != '0':
type_dict = {}
type_dict['type'] = "DATE/TIME"
type_dict['count'] = int(column_stats[3][0])
type_dict['max_value'] = column_stats[3][1]
type_dict['min_value'] = column_stats[3][2]
column_dict['data_type'].append(type_dict)
if column_stats[4][0] != 0:
type_dict = {}
type_dict['type'] = "TEXT"
type_dict['count'] = column_stats[4][0]
type_dict['shortest_value'] = column_stats[5]
type_dict['longest_value'] = column_stats[6]
type_dict['average_length'] = column_stats[4][1]
column_dict['data_type'].append(type_dict)
self.table_dict['columns'].append(column_dict)
@staticmethod
def get_column_type(val):
"""
Returns the type of the value
:param val:
:return:
"""
if BasicProfiling.__is_int(val):
return 'INT'
elif BasicProfiling.__is_real(val):
return 'REAL'
elif BasicProfiling.__is_datetime(val):
return 'DATE'
elif val is None:
return None
else:
return 'TEXT'
@staticmethod
def __is_int(val):
try:
int(val)
return True
except (ValueError, TypeError):
return False
@staticmethod
def __is_real(val):
try:
float(val)
return True
except (ValueError, TypeError):
return False
@staticmethod
def __is_datetime(val):
try:
parser.parse(val)
return True
# raw exception here, I tried to catch none raw dateutil error exception, but it's giving some errors
# not sure I will need to fix up.
except:
return False
@staticmethod
def __get_datetime(val):
#tried to give actual timestamp, but then we can't put it into array, so instead I'm giving iso format
return parser.parse(val).isoformat()
def process(self):
start = time.time()
self.__set_up_dictionary()
for i, column_name in enumerate(self.columns):
column_name = "`{}`".format(column_name)
column = self.df_nod.select(column_name)
general_count, general_fre = self.__add_column_general_info(column, column_name)
# # generate type_dict
column = self._add_datatype_columns(column, column_name)
stats_int = self.__get_stats_int(column, column_name)
stats_double = self.__get_stats_double(column, column_name)
stats_date = self.__get_stats_date(column, column_name)
stats_text, shortest, longest = self.__get_stats_text(column, column_name)
if i == 0:
stats_table_int = stats_int
stats_table_double = stats_double
stats_table_date = stats_date
stats_table_text = stats_text
table_shortest = shortest
table_longest = longest
general_table_count = general_count
general_table_fre = general_fre
else:
stats_table_int = stats_table_int.union(stats_int)
stats_table_double = stats_table_double.union(stats_double)
stats_table_date = stats_table_date.union(stats_date)
stats_table_text = stats_table_text.union(stats_text)
table_shortest = table_shortest.union(shortest)
table_longest = table_longest.union(longest)
general_table_count = general_table_count.union(general_count)
general_table_fre = general_table_fre.union(general_fre)
self.__convert_df_to_dict(stats_table_int, stats_table_double, stats_table_date, stats_table_text, table_shortest, table_longest, general_table_count, general_table_fre)
return self.table_dict
if __name__ == "__main__":
start_index = int(sys.argv[1])
end_index = int(sys.argv[2])
main(start_index, end_index)
| [
"mys286@nyu.edu"
] | mys286@nyu.edu |
b9fbeb110968935494faf7bf0cf13c35573c9be4 | 8bb610173584f7027b49ddfde3caa8806c509a1d | /automatedtesting/selenium/selenium-test.py | b70e208ee6d9bc32e2bc71eb57a3fd30f4d6d5d6 | [] | no_license | ooeid/ensuring-quality-releases-with-azure | bb72c206ad97129694e3f874987e27371bd3b2bf | 49ada0f70c7fe49006d1c8d78e4e3537f7038424 | refs/heads/main | 2023-07-21T23:09:34.843145 | 2021-09-05T00:26:09 | 2021-09-05T00:26:09 | 397,095,548 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | # #!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
import datetime
def timestamp():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Start the browser and login with standard_user
def login (driver, user="standard_user", password="secret_sauce"):
#Login
driver.get('https://www.saucedemo.com/')
driver.find_element_by_css_selector("input[id='user-name']").send_keys(user)
driver.find_element_by_css_selector("input[id='password']").send_keys(password)
driver.find_element_by_id("login-button").click()
assert 'https://www.saucedemo.com/inventory.html' in driver.current_url
print(timestamp() +' Login successful with username '+ user + ' and password '+ password)
if __name__ == "__main__":
print ('Starting the browser...')
# --uncomment when running in Azure DevOps.
options = ChromeOptions()
options.add_argument("--headless")
driver = webdriver.Chrome(options=options)
#driver = webdriver.Chrome()
print (timestamp()+' Browser started successfully. Navigating to the demo page to login.')
login(driver, 'standard_user', 'secret_sauce')
add_cart(driver, 6)
remove_cart(driver, 6)
print(timestamp() + ' Selenium Tests DONE') | [
"ooeid"
] | ooeid |
1d6d8f589fd670ded80fcfaa97a34c53e40fac81 | 960f3c1b1a64ee58c939857b91191df18a535fe6 | /notion_api/hotspot_updater.py | 9a4b3358532917b4324771d38de02a86e131f9fb | [] | no_license | arshadansari27/growth_board | f15ac23d4a87cf0a202bec6bd5a2027ba9cde660 | 7f13a99c1ad965456399ca180e0cb1686011e26a | refs/heads/master | 2022-12-09T06:27:10.280818 | 2020-06-25T08:03:29 | 2020-06-25T08:03:29 | 243,248,710 | 0 | 0 | null | 2022-12-08T03:56:26 | 2020-02-26T11:41:18 | Python | UTF-8 | Python | false | false | 1,815 | py | from datetime import datetime, timedelta
import pytz
import requests
from requests.auth import HTTPBasicAuth
from config import CONFIG, TOGGL_URL, NOTION_TRACKING_DAILY_URL, TOGGL_KEY
from notion_api import NotionDB
daily_tracking_url = CONFIG[NOTION_TRACKING_DAILY_URL]
toggl_url = CONFIG[TOGGL_URL]
api_key = CONFIG[TOGGL_KEY]
def update_daily_hotspots():
end_date = datetime.now(tz=pytz.FixedOffset(330))
start_date = end_date - timedelta(days=7)
daily_tracking_db = NotionDB(daily_tracking_url)
while start_date <= end_date:
set_final = False if start_date >= end_date else True
st = start_date.date()
row = daily_tracking_db.find_one_by('Date', st)
if row and not row.Final:
print('Updating:', row.title, st)
data = get_data(st)
for k, v in data.items():
setattr(row, k, v)
if set_final:
row.Final = True
else:
print("Skipping", st)
start_date += timedelta(days=1)
def get_data(today):
def _client_data(dict_data):
client = dict_data['title']['client']
time = dict_data['time']
return client, round(time / (3600 * 1000), 2)
auth = HTTPBasicAuth(api_key, 'api_token')
url = toggl_url + f"&since={today}&until={today}"
data = requests.get(url, auth=auth)
data = data.json()['data']
data = [u for u in [_client_data(u) for u in data] if u[0]]
by_client = {
'Fun': [],
'Career':[],
'Relationship': [],
'Mind': [],
'Deen': [],
'Body': [],
'Chores':[],
'Finance': [],
}
for c, t in data:
by_client[c].append(t)
return {c: sum(t) for c, t in by_client.items()}
if __name__ == '__main__':
update_daily_hotspots() | [
"arshadansari27@gmail.com"
] | arshadansari27@gmail.com |
3be69ff7ff3d5004acb3b797e69f0622319996a5 | 6809845abdfa971271a42de292c6af46241d60aa | /photo/migrations/0006_auto_20151203_2048.py | fabbafe0c642497aa138fa2f306fcee1fd01f65c | [] | no_license | VosOditDeus/Ms | 180ddf2aa819da5b41d819f832deeaf0722c4e8e | da3d83052525e72f49dd10d6a6f633f021fb1e75 | refs/heads/master | 2021-05-01T09:15:45.793397 | 2016-02-02T19:30:48 | 2016-02-02T19:30:48 | 47,513,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photo', '0005_auto_20151203_2038'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='album',
),
migrations.AddField(
model_name='image',
name='album',
field=models.ManyToManyField(to='photo.Album', blank=True),
),
]
| [
"vosoditdeus@gmail.com"
] | vosoditdeus@gmail.com |
6604fb15b9ff9268bd74549c1ed6720f0e95bce2 | b729e399ee736b364630d4648bf51285892789d7 | /automatic/utils/server.py | ec8ecfaa91f7f0cfc11c35a5630aa1f1d244e1cf | [] | no_license | antmoveh/auto | 25ec59517257706be3e20a9eb87a81bc0c7c8a11 | ad55f0b6e0021e309f1ea69d35d1125231477965 | refs/heads/master | 2021-07-14T02:33:46.984381 | 2017-07-12T09:46:02 | 2017-07-12T09:46:02 | 96,770,244 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,950 | py | from .exceptions import DeployError
from .log import LOG
from automatic.models import *
class SERVER():
def __init__(self, s_id=None, s_name=None):
self.id = None
self.server_name = None
self.server_host = {}
self.server_db = {}
self.server_path = {}
self.server_action = {}
if s_id is None and s_name is None:
return False
log = LOG('svr_info', False)
if s_id is None:
try:
server = Dep_Server.objects.filter(server=s_name)[0]
except IndexError:
log.error('Cannot find server with name: ' + s_name)
return
else:
s_id = server.id
else:
try:
server = Dep_Server.objects.filter(id=s_id)[0]
except IndexError:
log.error('Cannot find server with id: ' + str(s_id))
return
else:
s_name = server.server
self.id = server.id
self.server_name = s_name
try:
host_list = server.hosts.all()
except Exception as e:
log.warning(e.args[0])
else:
if len(host_list) == 0:
log.warning('Cannot find [' + s_name + ']\'s host data.')
else:
for host in host_list:
self.server_host[host.id] = {'host': host.host, 'port': host.ssh_port, 'user': host.ssh_user,
'pass': host.ssh_pass, 'pkey': host.ssh_file}
try:
path_list = host.paths.all()
except IndexError:
log.warning('Cannot find <' + host.host + '>@[' + s_name + ']\'s working path data.')
else:
if len(path_list) == 0:
log.warning('Cannot find <' + host.host + '>@[' + s_name + ']\'s working path data.')
else:
s_p = {}
for path in path_list:
s_p[path.module] = {'module': path.module, 'work_mod': path.work_mod,
'src_name': path.src_name, 'des_path': path.des_path,
'des_name': path.des_name}
self.server_path[host.id] = s_p
try:
db_list = server.databases.all()
except IndexError:
log.warning('Cannot find [' + s_name + ']\'s database connection data.')
else:
if len(db_list) == 0:
log.warning('Cannot find [' + s_name + ']\'s database connection data.')
else:
for db in db_list:
self.server_db[db.database] = {'host': db.db_host, 'type': db.db_type, 'name': db.db_name,
'port': db.db_port, 'user': db.db_user, 'pass': db.db_pass}
for action_host_id in ([0] + list(self.server_host.keys())):
try:
action_list = Dep_Action.objects.filter(server_id=self.id, host_id=action_host_id, active='Y')
except IndexError:
log.warning('Cannot find [' + s_name + ']\'s deploy action data.')
else:
if len(action_list) == 0:
if action_host_id == 0:
log.warning('Cannot find [' + s_name + ']\'s standard deploy action data.')
else:
actions = []
action_list = action_list.order_by('sequence')
for action in action_list:
actions.append({'id': action.id, 'prod_id': action.prod_id, 'sequence': action.sequence,
'operation': action.operation, 'param1': action.param1, 'param2': action.param2, \
'param3': action.param3, 'param4': action.param4, 'param5': action.param5})
self.server_action[action_host_id] = actions
log.info('Load server [' + s_name + ']\'s configurations.')
def is_ready(self):
if self.id is not None:
return True
else:
return False
def get_db_info(self, database=None):
if database is None:
raise DeployError('36')
db_info = {}
if database in self.server_db:
db_info['db_host'] = self.server_db[database]['host']
db_info['db_type'] = self.server_db[database]['type']
db_info['db_name'] = self.server_db[database]['name']
db_info['db_port'] = self.server_db[database]['port']
db_info['db_user'] = self.server_db[database]['user']
db_info['db_pass'] = self.server_db[database]['pass']
return db_info
else:
return False
| [
"shen.ren.love@163.com"
] | shen.ren.love@163.com |
51c7bb8a93222572915e9c948d7a2094ae2afc0b | 9c04dfec0c7e6a5e82fc5bf0ddf2e06c2308cdef | /packages/auto-nlp-deployment/src/trainings/runtimes/kubernetes/kubernetes_training_actor.py | 4573f5e9ba944ad7316f05ef13a0a4da4a77c2b8 | [
"MIT"
] | permissive | fhswf/tagflip-autonlp | 8d678c780476d20d4d870a23320e5908a4e8972f | f94abb35ed06198567e5d9cbb7abb7e112149d6c | refs/heads/main | 2023-04-07T10:19:01.108884 | 2022-04-10T19:56:48 | 2022-04-10T19:56:48 | 410,777,896 | 5 | 2 | MIT | 2022-04-10T12:19:35 | 2021-09-27T07:07:28 | TypeScript | UTF-8 | Python | false | false | 4,422 | py | import json
import os
import kubernetes
import mlflow
import mlflow.projects
import yaml
from mlflow.entities import RunStatus as MLFlowRunStatus
from mlflow.projects.kubernetes import KubernetesSubmittedRun
from mlflow.tracking import MlflowClient
from docker_repositories import DockerRepositoryService
from trainings.runtimes.kubernetes.kubernetes_training_runtime_config import KubernetesTrainingRuntimeConfig
from trainings.training_actor import TrainingActor, TrainingDescription
from util import find_file
from ...training_actor import Run
class KubernetesRun(Run):
def __init__(self, submitted_run: KubernetesSubmittedRun):
super().__init__(run_id=submitted_run.run_id)
self._submitted_run = submitted_run
self._client = MlflowClient()
def wait(self):
self._submitted_run.wait()
def stop(self):
# TODO: Cancelling job does not remove pods. # https://github.com/mlflow/mlflow/pull/3997
self._submitted_run.cancel()
kube_run = mlflow.get_run(self.run_id)
if kube_run.info.status != MLFlowRunStatus.to_string(MLFlowRunStatus.FINISHED):
self._client.set_terminated(run_id=self.run_id, status=MLFlowRunStatus.to_string(MLFlowRunStatus.FAILED))
def is_running(self):
raise NotImplementedError("Not required")
class KubernetesTrainingActor(TrainingActor):
"""
Fails currently due to https://github.com/mlflow/mlflow/issues/3412
"""
class Config:
creates_mlflow_run_internally = True
def start_run(self, training_description: TrainingDescription, tmp_dir: str, run: mlflow.ActiveRun = None,
**kwargs) -> Run:
kube_runtime_config = KubernetesTrainingRuntimeConfig.parse_obj(training_description.runtime_config)
env = training_description.env_vars
def read_mlflow_project_file():
ml_project_file = find_file("MLproject", training_description.script_url)
if not ml_project_file:
raise RuntimeError("Could not file MLproject file.")
with open(ml_project_file, 'r') as f:
return yaml.safe_load(f)
def build_job_spec_template():
if not os.path.isfile(kube_runtime_config.job_template_path):
raise RuntimeError(
"Job template not found in path " + os.path.abspath(
kube_runtime_config.job_template_path))
with open(kube_runtime_config.job_template_path, 'r') as f:
job_spec = yaml.safe_load(f)
container_spec = job_spec['spec']['template']['spec']['containers'][0]
if 'env' not in container_spec:
container_spec['env'] = []
for k, v in env.items():
container_spec['env'].append({"name": k, "value": v})
target_file = os.path.join(tmp_dir, os.path.basename(kube_runtime_config.job_template_path))
with open(target_file, 'w') as f:
yaml.safe_dump(job_spec, f)
return target_file
docker_repo = DockerRepositoryService().get_repository(kube_runtime_config.repository)
docker_repo.login()
template_path = build_job_spec_template()
ml_project_config = read_mlflow_project_file()
def create_backend_config() -> str:
backend_config = {
"kube-context": kube_runtime_config.name,
"repository-uri": docker_repo.image_name_for_project(ml_project_config['name']),
"kube-job-template-path": template_path
}
self.logger.debug(f"Kubernetes config: {str(backend_config)}")
file = os.path.join(tmp_dir, "kubernetes-backend-config.json")
with open(file, 'w') as fp:
json.dump(backend_config, fp)
return file
backend_config = create_backend_config()
run = mlflow.projects.run(training_description.script_url,
experiment_name=training_description.project_id,
parameters=training_description.script_parameters,
storage_dir=tmp_dir,
backend='kubernetes',
backend_config=backend_config,
synchronous=False)
return KubernetesRun(run)
| [
"timo@n.euhaus.net"
] | timo@n.euhaus.net |
20ad013549fca69ede21708590b3ebd07ae70136 | f1818c9ce5d26c89b2b3eda46a5288577122b937 | /Tic_tac_toe_python/game.py | cb33f21c4e5581a56fa3413a7efb9796cc14a8b9 | [] | no_license | Theodore1512/Theodore1512 | 04bc22f070a049b32d3b5f4ec00ec3ccc30e26bf | 532a628b8d9b80beb298c1fae1bca0080e9fb606 | refs/heads/main | 2023-07-11T05:29:50.733078 | 2021-08-31T14:42:31 | 2021-08-31T14:42:31 | 392,699,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,958 | py | #
# inspired by Kylie Ying
# Developed by Theodore Vasilikos
# 8-27-21
#
import math
import time
from player import HumanPlayer, RandomComputerPlayer, SmartComputerPlayer
class TicTacToe():
def __init__(self):
self.board = self.make_board()
self.current_winner = None
@staticmethod
def make_board():
return [' ' for _ in range(9)]
def print_board(self):
for row in [self.board[i*3:(i+1) * 3] for i in range(3)]:
print('| ' + ' | '.join(row) + ' |')
@staticmethod
def print_board_nums():
# 0 | 1 | 2
number_board = [[str(i) for i in range(j*3, (j+1)*3)] for j in range(3)]
for row in number_board:
print('| ' + ' | '.join(row) + ' |')
def make_move(self, square, letter):
if self.board[square] == ' ':
self.board[square] = letter
if self.winner(square, letter):
self.current_winner = letter
return True
return False
def winner(self, square, letter):
# check the row
row_ind = math.floor(square / 3)
row = self.board[row_ind*3:(row_ind+1)*3]
# print('row', row)
if all([s == letter for s in row]):
return True
col_ind = square % 3
column = [self.board[col_ind+i*3] for i in range(3)]
# print('col', column)
if all([s == letter for s in column]):
return True
if square % 2 == 0:
diagonal1 = [self.board[i] for i in [0, 4, 8]]
# print('diag1', diagonal1)
if all([s == letter for s in diagonal1]):
return True
diagonal2 = [self.board[i] for i in [2, 4, 6]]
# print('diag2', diagonal2)
if all([s == letter for s in diagonal2]):
return True
return False
def empty_squares(self):
return ' ' in self.board
def num_empty_squares(self):
return self.board.count(' ')
def available_moves(self):
return [i for i, x in enumerate(self.board) if x == " "]
def play(game, x_player, o_player, print_game=True):
if print_game:
game.print_board_nums()
letter = 'X'
while game.empty_squares():
if letter == 'O':
square = o_player.get_move(game)
else:
square = x_player.get_move(game)
if game.make_move(square, letter):
if print_game:
print(letter + ' makes a move to square {}'.format(square))
game.print_board()
print('')
if game.current_winner:
if print_game:
print(letter + ' wins!')
return letter # ends the loop and exits the game
letter = 'O' if letter == 'X' else 'X' # switches player
time.sleep(.8)
if print_game:
print('It\'s a tie!')
if __name__ == '__main__':
question = input("""Which mode de you want to play?
1. Human Vs Human
2. Human Vs Easy Computer
3. Human Vs Hard Computer (A.I)
4. Quit
Please select by typing 1, 2, 3 or 4: """)
if question == '1':
x_player = HumanPlayer('X')
o_player = HumanPlayer('O')
t = TicTacToe()
play(t, x_player, o_player, print_game=True)
elif question == '2':
x_player = RandomComputerPlayer('X')
o_player = HumanPlayer('O')
t = TicTacToe()
play(t, x_player, o_player, print_game=True)
elif question == '3':
x_player = SmartComputerPlayer('X')
o_player = HumanPlayer('O')
t = TicTacToe()
play(t, x_player, o_player, print_game=True)
elif question == '4':
print('Thanks for playing my game and have a great day! Yours Theodore')
exit()
else:
print('\n')
print('-' * 20)
print('Type 1, 2, 3 or 4! Please try again!')
print('-' * 20)
print('\n')
exit() | [
"noreply@github.com"
] | Theodore1512.noreply@github.com |
f30b334dfdfba093a92bf40f797a2dcb7f09a8c8 | eb358e65208e909b7ffae71388ac15a8db419863 | /writers/__init__.py | c2e9e7607c76babee15ec1c173a336b288e2d80f | [] | no_license | YaserJaradeh/SyntheticDSGenerator | b329e85c1d85c20325154aa84f0ab4ab8d451bf2 | 4fb8ffba7008f4ad82122e66038ff8b08789a82e | refs/heads/master | 2020-08-28T00:24:01.179022 | 2019-11-04T13:41:42 | 2019-11-04T13:41:42 | 217,533,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | from . import BaseWriter, RDFWriter
__all__ = [BaseWriter, RDFWriter] | [
"noreply@github.com"
] | YaserJaradeh.noreply@github.com |
5493b2f3a565402852a6d878c4d63e0d4b1c5509 | 3263139017e2e3cc253e93a9fb92604b00176466 | /pias/pias_logging.py | 761213610fb3cf88f47af4c7ab242ecf47990d20 | [] | no_license | saalfeldlab/pias | 245fb589b30e197fc03c152231ecc138d6ac7ae3 | acc7c19dc0ca81b846816ec0d0edf7ff87d46665 | refs/heads/master | 2020-04-22T06:38:58.126298 | 2019-03-10T19:01:53 | 2019-03-10T19:01:56 | 170,197,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import logging
print(logging)
trace = logging.DEBUG- 5
logging.TRACE = trace
logging.addLevelName(trace, 'TRACE')
class PiasLogger(logging.getLoggerClass()):
def trace(self, msg, *args, **kwargs):
self.log(trace, msg, *args, **kwargs)
logging.setLoggerClass(PiasLogger)
levels = ('NOTSET', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL', 'FATAL', 'TRACE') | [
"hanslovskyp@janelia.hhmi.org"
] | hanslovskyp@janelia.hhmi.org |
daaa06653b0e498b7a16be9783d039231ad6c81e | 27c01e47c29ee4e0f7755809b2e8eb104c9c1e32 | /toy/shared_to_shared.py | 29d06d1bab0267480efbd6e1252cbac1729c5d26 | [
"MIT"
] | permissive | 0xbunyip/dqnet | 29c525486c013656df66859319298859746f0eee | d7d6fe9de4256d8ec1313cc4a625282025778202 | refs/heads/master | 2021-06-01T05:11:19.220892 | 2016-07-15T09:08:02 | 2016-07-15T09:08:02 | 56,122,560 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import numpy as np
import theano
import theano.tensor as T
np.random.seed(333)
s = theano.shared(np.arange(12).astype(np.float32))
d = T.cast(theano.shared(np.arange(5).astype(np.float32)), 'int32')
r = s[d]
f = theano.function([], r)
print f()
| [
"chiemduybao@gmail.com"
] | chiemduybao@gmail.com |
8989de43385136114c810498d5e6ec4ff969e026 | fcdd39835340e377d2271e6da2b6a2be5ccdac0c | /src/init.py | a150ead4568f8f6c2889adbb2a28518474bd43a6 | [] | no_license | whitepooh/RTD_test | 3648a036d53ce1855128dc95a2e8c488ab5e7350 | dc5d7aa71982cce9058054994fbb1ee9004c3a87 | refs/heads/master | 2021-01-10T02:16:28.257922 | 2016-03-16T05:58:47 | 2016-03-16T05:58:47 | 53,993,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py |
__version__ = '1.0'
import sys
# print the calculation result
def print_result(op1,op2,result,operator):
print("{0} {2} {1} = {3}".format(op1,op2,operator,result))
# display the guildine
def print_guide():
print("1: addition")
print("2: subtraction")
print("3: multiplication")
print("4: division")
print("5: power")
print("6: guidline")
print("7: exit program")
# display the message in case users input the invalid key
def print_no_operation():
print("You have entered incorrect operator");
print("Please press 6 for help")
# main control flow
def main():
print_guide()
while(1):
print("---------------------------------------")
operator = int(input("Enter operation: "))
if(int(operator) >= 1 and int(operator) <= 5):
""" get the input """
op1 = int(input("Enter the 1st operand: "))
op2 = int(input("Enter the 2nd operand: "))
"""Just to make the interpreter happy"""
result = "undefined"
str_op = "undefined"
if(operator == 1):
result = op1 + op2
str_op = '+' #addition
elif(operator == 2):
result = op1 - op2
str_op = '-' #subtraction
elif(operator == 3):
result = op1 * op2
str_op = '*' #multiplication
elif(operator == 4):
result = op1/op2
str_op = '/' #division
elif(operator == 5):
result = op1**op2
str_op = '^' #exponent
"""Pint out the result"""
print_result(op1,op2,result,str_op)
elif(operator == 6):
print_guide() #Print out the guidline
elif(operator == 7):
print("you are about exiting the program! Thanks for giving a try!!")
break; #Exit program
else:
print_no_operation(); #invalid operation
# call the main function
if __name__ == '__main__':
sys.exit(main()) | [
"like_otl@naver.com"
] | like_otl@naver.com |
798bdae28615d1a0be43a72522800823c79108fd | 7393309331fde02a139d1b9341da6cf8f3d66ce1 | /todo_app/settings/production.py | 8ae22645785c66083e7019b5afc2b043cf88ebca | [
"MIT"
] | permissive | golmansax/todo-app-in-django | 556707b766a6dca5bfbdbf0ff40cd88be0c909dc | 54c67de8fe0592bcbcdb269e9da06e97d4069889 | refs/heads/master | 2016-09-05T19:53:53.945914 | 2015-07-02T01:50:08 | 2015-07-02T01:50:08 | 35,309,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | # pylint: disable=wildcard-import,unused-wildcard-import
from .base import * # noqa
DEBUG = False
| [
"holman@golmansax.com"
] | holman@golmansax.com |
be40ef08108936d145f86fe1c8b27f02fab25bee | b1ee261d3305e76192feba32fcc2fa7820bbbd61 | /src/imagemsg_to_png.py | ed1d2de2ef1dd56f1f73c2e08b9308ca0a689640 | [] | no_license | duckietown-project-unicorn/bag_tools | 7c251b123eb5e551dd9e06e96085cf704191f8f4 | 3b1c2179e8e77cc37e0fd85e7e1d83e8a2ae3a4a | refs/heads/master | 2020-04-07T02:33:11.258974 | 2018-11-17T14:14:52 | 2018-11-17T14:14:52 | 157,980,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | #!/usr/bin/env python
###############################################################################
# Duckietown - Project Unicorn ETH
# Author: Simon Schaefer
# Subscribe and store compressed image.
###############################################################################
import cv2
from cv_bridge import CvBridge, CvBridgeError
from os.path import isdir
import rospy
from sensor_msgs.msg import CompressedImage
class Main():
def __init__(self):
self.bridge = CvBridge()
topic = rospy.get_param("/imagemsg_to_png/img_topic")
rospy.Subscriber(topic, CompressedImage, self.callback)
self.i = 0
self.storage_path = rospy.get_param("/imagemsg_to_png/storage_dir")
if not isdir(self.storage_path):
raise OSError("Invalid storage path !")
rospy.spin()
def callback(self, data):
"""Store message data as png."""
try:
frame = self.bridge.compressed_imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logfatal(e)
name = self.storage_path + "/" + str(self.i) + ".png"
cv2.imwrite(name, frame)
self.i += 1
if __name__ == '__main__':
rospy.init_node('converter_imagemsg_png', anonymous=True)
try:
Main()
except rospy.ROSInterruptException:
cv2.destroyAllWindows()
| [
"sischaef@student.ethz.ch"
] | sischaef@student.ethz.ch |
ba81708752f0fb17ace59645543fa3e7548bc1cb | 6bfcb3b91c2489cab0d9788079f69f37cf7e1387 | /test/test-bbox.py | fdd971e119df9736e87277292955aa7e59241bc5 | [
"BSD-3-Clause"
] | permissive | glamod/cdm-lens | 02f77f4270594acfadcf5b628bcdd8ea9a574b46 | d257906a3cd9fd01c118777803ef6b880b15ba81 | refs/heads/master | 2023-01-28T17:44:25.861444 | 2023-01-13T08:55:13 | 2023-01-13T08:55:13 | 212,615,087 | 1 | 0 | NOASSERTION | 2022-12-08T06:50:15 | 2019-10-03T15:34:44 | Python | UTF-8 | Python | false | false | 2,423 | py | import requests
import pandas as pd
import io
import math
TMPL = 'http://glamod2.ceda.ac.uk/select/?domain=land&frequency=monthly&variable=accumulated_precipitation,air_temperature&intended_use=non-commercial&data_quality=quality_controlled&column_selection=detailed_metadata&year=1974&month=03&bbox={w}.0,{s}.0,{e}.0,{n}.0&compress=false'
def _assert_in_range(df, w, s, e, n, to_nearest_degree=False):
if len(df) == 0:
print('Empty df')
return
lats, lons = df.latitude, df.longitude
min_lat, max_lat = lats.min(), lats.max()
min_lon, max_lon = lons.min(), lons.max()
print(f'Wanted lons: {w} to {e}; lats: {s} to {n}')
print(f'Actual lons: {min_lon} to {max_lon}; lats: {min_lat} to {max_lat}')
def fix(n):
if n < 0:
return math.ceil(n)
else:
return math.floor(n)
if to_nearest_degree:
min_lat, max_lat, min_lon, max_lon = [fix(_) for _ in [min_lat, max_lat, min_lon, max_lon]]
# print(lats, lats.max(), lats.min())
assert(min_lat >= s), 'min_lat >= s'
assert(max_lat <= n), 'max_lat <= n'
if min_lat == max_lat and min_lat == -90 or min_lat == 90:
print('Longitude checks are meaningless at the north/south pole')
return
if 90 in list(lats) or -90 in list(lats):
print('Some lats are north/south pole - so ignore longitude checks')
assert(min_lon >= w), 'min_lon >= w'
assert(max_lon <= e), 'max_lon <= e'
def _fetch_as_df(w, s, e, n):
url = TMPL.format(**vars())
print(f'{url}')
content = requests.get(url).text
if content.startswith('Exception raised'):
print(f'[ERROR] Fetch error: {content}')
return content
return pd.read_csv(io.StringIO(content))
def test_bbox_in_range():
for w in range(-180, 160, 30):
e = w + 30
for s in range(-90, 61, 30):
n = s + 30
df = _fetch_as_df(w, s, e, n)
_assert_in_range(df, w, s, e, n, True)
def test_bbox_full_range():
bboxes = ['-180,-90,180,90'] #, '-90,90,-180,180', '-90,-180,90,180']
for bbox in bboxes:
w, s, e, n = [int(_) for _ in bbox.split(',')]
df = _fetch_as_df(w, s, e, n)
if type(df) == str:
continue
_assert_in_range(df, w, s, e, n, True)
if __name__ == '__main__':
test_bbox_full_range()
test_bbox_in_range()
| [
"ag.stephens@stfc.ac.uk"
] | ag.stephens@stfc.ac.uk |
5b32b72979653aa9f50816ccfd662688417c13c4 | a8de4bf4f78c0c74b822292f100452a7a6a62d90 | /SDM/rules/SynSrcRule.py | ff00edfe5375c02901193f1ddb960723a5f5fa97 | [
"Apache-2.0"
] | permissive | jalilm/SDN-Monitoring | d7f78ccfdcf3b6552d58ab5a5dc108570686629e | 4ba8dd0f0ed5e44c0e803713d6c82ee2c815c7e4 | refs/heads/master | 2021-05-03T11:44:24.295957 | 2016-10-06T07:58:26 | 2016-10-06T07:58:26 | 31,901,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | from SDM.rules.FlagsSrcRule import FlagsSrcRule
from SDM.rules.Rule import Rule
from SDM.rules.TCPIPSrcRule import TCPIPSrcRule
class SynSrcRule(Rule):
"""
A class that represents a rule in the switch table.
"""
def __init__(self, datapath, ipv4_string, subnet_string, table_id=0, priority=0, father_rule=None):
super(SynSrcRule, self).__init__(datapath, table_id, priority, father_rule)
self.ipv4_string = ipv4_string
self.subnet_string = subnet_string
self.tcp_rule = TCPIPSrcRule(datapath, ipv4_string, subnet_string, table_id, priority, None)
self.syn_rule = FlagsSrcRule(datapath, ipv4_string, subnet_string, table_id, priority + 1, 0x02, None)
self.match_args = self.tcp_rule.match_args
self.match = self.tcp_rule.match
def __repr__(self):
return "SynSrcRule(" + repr(self.datapath) + ", " + repr(self.tcp_rule.ipv4_string) + ", " \
+ repr(self.tcp_rule.subnet_string) + ", " + repr(self.table_id) + ", " + repr(
self.priority) + ", " + \
repr(self.syn_rule.match_args['tcp_flags']) + ")"
def __str__(self):
return "SynSrcRule ({self.tcp_rule.ipv4_string}, {self.tcp_rule.subnet_string}) " \
"Flags:{self.syn_rule.match_args[tcp_flags]}".format(self=self)
def get_finer_rules(self):
rules = []
tcp_finer_rules = self.tcp_rule.get_finer_rules()
syn_finer_rules = self.syn_rule.get_finer_rules()
for t_rule in tcp_finer_rules:
s_rule = [s_rule for s_rule in syn_finer_rules if
s_rule.ipv4_string == t_rule.ipv4_string and s_rule.subnet_string == t_rule.subnet_string][0]
rule = SynSrcRule.from_sub_rules(t_rule, s_rule, self)
rules.append(rule)
return rules
def get_paired_rule(self):
t_paired = self.tcp_rule.get_paired_rule()
s_paired = self.syn_rule.get_paired_rule()
return SynSrcRule.from_sub_rules(t_paired, s_paired, self)
def remove_flow(self):
self.syn_rule.remove_flow()
self.tcp_rule.remove_flow()
def add_flow(self, inst):
self.syn_rule.add_flow(inst)
self.tcp_rule.add_flow(inst)
# noinspection PyPep8Naming
@classmethod
def from_sub_rules(cls, tcp_rule, syn_rule, father_rule):
r = cls(tcp_rule.datapath, tcp_rule.ipv4_string, tcp_rule.subnet_string, tcp_rule.table_id, tcp_rule.priority,
father_rule)
r.tcp_rule = tcp_rule
r.syn_rule = syn_rule
return r
| [
"jalilm@cs.technion.ac.il"
] | jalilm@cs.technion.ac.il |
ff6760ecb82a5853707ebe69781165f141b9ed77 | 61ba9ec78e004cbf7ad38dbc047b7d9b99a013cb | /src/GymNow_site/pages/migrations/0007_member.py | 51dd481fde4654dc51dbdf43d5feaffe5143d77b | [] | no_license | lackeya2/GymNow-Final-Year-Project | eb286d5b75238057cc1443e05f0c569fc6b10846 | 89cabd3cb44b78dd5e103c7c34f940a222a4d9aa | refs/heads/master | 2023-06-05T09:31:09.094600 | 2021-05-24T15:16:37 | 2021-05-24T15:16:37 | 378,228,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | # Generated by Django 2.2.17 on 2021-03-25 16:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0006_customer_bookings_transaction_id'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('member_name', models.CharField(max_length=200, null=True)),
('email', models.EmailField(max_length=200, null=True)),
('phone', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
| [
"adrian.lackey2@mail.dcu.ie"
] | adrian.lackey2@mail.dcu.ie |
a36ea7f7089a08780838322db177294d028b5dae | ed6f45ae1a8ed7d37f9662394f4df29366e7806e | /web19/app.py | 72cc8962acf6bb45c2dd79136be7c7373f917e0c | [] | no_license | FTDRefrain/python_read | e2b9e0cb9b48293c2eb08b1928bd7e9bd1c3efb8 | 1ec78ecb40b8bd47dd8743c7f93b41af3ca6e57a | refs/heads/master | 2020-03-27T17:57:21.443992 | 2018-08-31T12:20:37 | 2018-08-31T12:20:37 | 146,887,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | from flask import Flask
import config
# web framework
# web application
# __main__
app = Flask(__name__)
# 设置 secret_key 来使用 flask 自带的 session
# 这个字符串随便你设置什么内容都可以
app.secret_key = config.secret_key
"""
在 flask 中,模块化路由的功能由 蓝图(Blueprints)提供
蓝图可以拥有自己的静态资源路径、模板路径(现在还没涉及)
用法如下
"""
# 注册蓝图
# 有一个 url_prefix 可以用来给蓝图中的每个路由加一个前缀
from routes.index import main as index_routes
from routes.topic import main as topic_routes
from routes.reply import main as reply_routes
from routes.board import main as board_routes
app.register_blueprint(index_routes)
app.register_blueprint(topic_routes, url_prefix='/topic')
app.register_blueprint(reply_routes, url_prefix='/reply')
app.register_blueprint(board_routes, url_prefix='/board')
# 运行代码
if __name__ == '__main__':
# debug 模式可以自动加载你对代码的变动, 所以不用重启程序
# host 参数指定为 '0.0.0.0' 可以让别的机器访问你的代码
config = dict(
debug=True,
host='0.0.0.0',
port=2000,
)
app.run(**config)
| [
"yuanzhenyi@sjtu.edu.cn"
] | yuanzhenyi@sjtu.edu.cn |
1e4c3dc8648edeb0f51d861b4003419811ebc27a | 28b6e6a35b6591f36140b6cb907ac60c71dbcab1 | /app/migrations/0001_initial.py | b9dba1404818a767036d64bf7989c45046f5bdcf | [] | no_license | mahmud-sajib/Social-Profile-Rest-Api | 97c89af42439d08e730b3901fc76ac21cc3a7280 | 5c84ad847ce3303d63284a4363a2b1b4aaf76319 | refs/heads/master | 2023-03-22T09:24:00.439550 | 2021-03-15T08:18:44 | 2021-03-15T08:18:44 | 347,887,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | # Generated by Django 3.1 on 2021-02-28 06:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='uploads/%Y/%m/%d')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"shout.mahmud@gmail.com"
] | shout.mahmud@gmail.com |
1f2b719692cb0e7eeaf979a85432258556dc5cf3 | 1380088318ee63e2ee1ffb4e0f1c709487d4cf56 | /Iterables/Iteration_Protocols.py | bbfaef869778bf0075d514737fe5bf4695207906 | [] | no_license | minamh/Python-Fundamentals | d331a50d1e4dbad786abf8d8a8709e83987a6e5b | 5e298bf243e4ac4f77f106425ad946e272293ea1 | refs/heads/master | 2020-06-13T18:17:23.076796 | 2019-10-03T22:44:45 | 2019-10-03T22:44:45 | 194,744,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | iterable = ['Srping','Summer','Autumn','Winter']
iterator = iter(iterable)
while (True):
try:
print(next(iterator))
except StopIteration:
print("Reach the end of the list")
break | [
"minahabashy@hotmail.com"
] | minahabashy@hotmail.com |
bd93ffb133655aa9cc9fecc76bd105afad5b5f4e | b68bad3690a2bd6bd1140bb8299f59a47dc79b6a | /lib/python3.7/io.py | 2fed886fd2c394a761c4b30572acefb656749aa6 | [] | no_license | ghaffaru/gender-prediction | 12397d68d2e750305770bd53a6487077e53e4723 | 4b0436c7352f271f3f3b6c729d966017c3420c8f | refs/heads/master | 2022-01-16T16:53:37.773223 | 2019-07-29T08:47:01 | 2019-07-29T08:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | /home/ghaff/anaconda3/lib/python3.7/io.py | [
"mudashiruagm@gmail.com"
] | mudashiruagm@gmail.com |
f269d82402c98d7bf21f0b90e965b2aeff0c96d7 | 6850b97bd7e5402464024f24ae608f960801bfea | /workflow.py | 757457797f856f138beaacd9485a8e393c0237e9 | [] | no_license | rafaelfsilva/pid-simulator | e3f035494a4968b416691b4119c71a1f7bdaddf8 | 1b9d548133422aebe038ce07651bf522ad38799a | refs/heads/master | 2020-12-31T05:09:58.622076 | 2016-05-23T20:08:56 | 2016-05-23T20:08:56 | 59,513,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,716 | py | #!/usr/bin/env python
#
# Copyright 2016 Rafael Ferreira da Silva
# http://www.rafaelsilva.com/tools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Rafael Ferreira da Silva"
import logging
from file import FileLink
from util import *
log = logging.getLogger(__name__)
class Element:
TASK = "task"
FILE = "file"
USES = "uses"
DEPENDS = "depends"
class Workflow:
def __init__(self):
self.tasks = {}
self.pending_tasks = {}
self.files = {}
def add_task(self, task):
"""
:param task:
:return:
"""
self.tasks[task.id] = task
self.pending_tasks[task.id] = task
def add_file(self, file):
"""
:param file:
:return:
"""
if file.name not in self.files:
self.files[file.name] = file
def add_use(self, task_id, file_name, link):
"""
:param task_id:
:param file_name:
:param link:
:return:
"""
task = self.tasks[task_id]
file = self.files[file_name]
if link == FileLink.INPUT:
task.input_data[file.name] = file
elif link == FileLink.OUTPUT:
task.output_data[file.name] = file
elif link == FileLink.INTERMEDIATE:
task.intermediate_data[file.name] = file
def add_dependency(self, child_id, parent_id):
"""
:param child_id:
:param parent_id:
:return:
"""
child_task = self.tasks[child_id]
child_task.add_parent(self.tasks[parent_id])
def is_completed(self):
return len(self.pending_tasks) == 0
def __str__(self):
"""
:return:
"""
out_str = "Workflow {\n"
# tasks
out_str += " tasks:\n"
for task in self.tasks.values():
out_str += " %s\n" % task
# files
out_str += " files:\n"
for file in self.files.values():
out_str += " %s\n" % file
# pending tasks
out_str += " pending_tasks: \n" \
" (%s)\n" % print_dictionary_ids(self.pending_tasks)
out_str += "}"
return out_str
| [
"rafsilva@isi.edu"
] | rafsilva@isi.edu |
bf4bb70eebecdb403967b7f33dc90ddc5bc48a6b | 98bd8d84d572052f50dd9552eabee48fe29c5c4f | /For_s/Test.py | 0e562f90ef40b0412ca0c82fa2ef3587764f6b73 | [] | no_license | MNandaNH/MNandaNH | cab0ba8bc63e84a7d589a2b8ca5166e1bc4b4f10 | 1503da79e62e43da9dcf9f69be885275f2dbcc6f | refs/heads/main | 2023-07-14T02:37:56.903868 | 2021-08-27T05:41:06 | 2021-08-27T05:41:06 | 398,369,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 19 22:52:49 2021
@author: fer
"""
print("Comienzo")
for i in range(0,30,1):
print("Hola ", end=" ")
print()
print("Final")
for a in "AMIGO":
print("Dame una", {a})
print("¡AMIGO!")
veces = int(input("¿Cuántas veces quiere que le salude? "))
for i in range(1,veces+1,1):
print("Holis ", end="")
print()
print("Adiós") | [
"noreply@github.com"
] | MNandaNH.noreply@github.com |
0a6291eaae1de1fc9b8321ad436642d3776c3ae5 | d48dfa622e07d346a91be3aa8e8657e409faf552 | /RozbudowaKodu/lab_files/lab_file_2.py | 6b25fe1a7a19217f18835bf54768e39c3fa1b477 | [] | no_license | sineczek/PythonSrednioZaawansowany | 71c8c94f7cdc193482a50b94315b86e1f0ab0039 | 75823b36de99ef9ac487672cf131a0b84ce23d2b | refs/heads/main | 2023-03-14T04:33:26.853500 | 2021-03-06T18:13:02 | 2021-03-06T18:13:02 | 332,524,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import math
argument_list = []
results_list = []
for i in range (1000000):
argument_list.append(i/10)
for x in argument_list:
results_list.append(abs(x**3 - x**0.5))
print('min = {} max = {}'.format(min(results_list), max(results_list))) | [
"michalzaitz@gmail.com"
] | michalzaitz@gmail.com |
a8016c963c144a2e6496e7df991b399b0b11d99d | d452860a530e0ad056f8504b8515ee93bb384218 | /polling_app1/settings.py | 44c3d80bc71b6918ee3c48b4287b6e0338776f7d | [] | no_license | ridhoasfian/django_polling_app | bb64ff7abc8ba26602cdc01dd200843483b6321b | 04fdb8ce97a3cd2dc956609a3f14b0b675562a2d | refs/heads/master | 2020-03-27T21:37:15.464860 | 2018-09-03T12:38:26 | 2018-09-03T12:38:26 | 147,161,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,350 | py | """
Django settings for polling_app1 project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rncw&a3m+ui1fymojw81!%tav9ypx=d5j92)6*-1z!rmr&5&m1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'accounts',
'polling',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'polling_app1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'polling_app1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static"),]
LOGIN_URL = '/accounts/login_user' #work with decorators @login_required
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
| [
"ridhoasfian86@gmail.com"
] | ridhoasfian86@gmail.com |
0d0d7db1d305e82b504804746fdd50151a35e1b5 | 5c7ac1213bdeed103d88e6b5f6dc78587e05464c | /myBlogProj/settings.py | ca8032c6214ebd652b69c27d2a5b6b8f15769d94 | [] | no_license | madao33/myBlogProj | fc8566d9f977d6d21686b46e37e86ce0adb7864d | c8e8a60ec635016b3eba09b7ae9f9cd545dc48fe | refs/heads/master | 2022-11-27T15:47:29.573099 | 2020-07-31T09:35:50 | 2020-07-31T09:35:50 | 280,379,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,298 | py | """
Django settings for myBlogProj project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k=#eh49hft9e@vvd8j!pu4w7qocyekkd(^i(q=%gj62nu%alsr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'blog',
'pure_pagination', # 分页
'blog.apps.BlogConfig',
'comments.apps.CommentsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myBlogProj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myBlogProj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"3282389858@qq.com"
] | 3282389858@qq.com |
8290d4f3cb61dd5d6eeabcf67b1b96e05bd08d6b | 2d160387e076e41c5a9cdea1e14c366bfa346842 | /copy.py | 29fff43924943deeaa00feaa8bcfa8f1064c8454 | [] | no_license | OrthantBing/Pythonlearnings | 0c959169ed951e22e5f7ca39b6818fdae0de665f | e23bdfc9a81457b423e79a6ad8a752fb8df01a8b | refs/heads/master | 2021-10-09T05:45:57.810661 | 2018-12-22T03:25:21 | 2018-12-22T03:25:21 | 112,430,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Python program to print all permutations with
# duplicates allowed
def toString(List):
return ''.join(List)
# Function to print permutations of string
# This function takes three parameters:
# 1. String
# 2. Starting index of the string
# 3. Ending index of the string.
def permute(a, l, r):
if l==r:
print toString(a)
else:
for i in xrange(l,r+1):
a[l], a[i] = a[i], a[l]
permute(a, l+1, r)
a[l], a[i] = a[i], a[l] # backtrack
# Driver program to test the above function
string = "ABC"
n = len(string)
a = list(string)
permute(a, 0, n-1)
| [
"antonbright@athenahealth.com"
] | antonbright@athenahealth.com |
7ce7379e76ae153dda18415fc9bbdb7332c10fc9 | b61c532fd11dbe29039505e995778ce812674f7e | /terminalformat.py | ffd3916dc4a98fd03f908ec21f7f8c6aeb8dd5a8 | [] | no_license | willvdb/scripts | 996be0bda758e6d19d52527d9a72a498bd16d3ea | 2ba902e8ae0e3c86bdfed67dd3aa58cb54aef54d | refs/heads/master | 2022-12-05T19:08:34.827631 | 2022-11-21T04:59:52 | 2022-11-21T04:59:52 | 130,092,870 | 0 | 0 | null | 2022-06-14T21:33:53 | 2018-04-18T16:38:11 | Perl | UTF-8 | Python | false | false | 1,236 | py | class TerminalFormat:
"""A terminal output helper class"""
def __init__(self):
self.isBold = False
self.isUnderline = False
_info = '\033[94m' # blue
_success = '\033[92m' # green
_warning = '\033[93m' # orange
_error = '\033[91m' # red
_emphasis = '\033[95m' # purple
_bold = '\033[1m'
_underline = '\033[4m'
_reset = '\033[0m'
@property
def bold(self):
self.isBold = True
return self
@property
def underline(self):
self.isUnderline = True
return self
def __parse__(self, color, msg):
fonTerminalFormatlags = ''
if self.isBold:
fonTerminalFormatlags += TerminalFormat._bold
self.isBold = False
if self.isUnderline:
fonTerminalFormatlags += TerminalFormat._underline
self.isUnderline = False
return color+fonTerminalFormatlags+msg+TerminalFormat._reset
def info(self, msg):
return self.__parse__(TerminalFormat._info, msg)
def success(self, msg):
return self.__parse__(TerminalFormat._success, msg)
def warning(self, msg):
return self.__parse__(TerminalFormat._warning, msg)
def error(self, msg):
return self.__parse__(TerminalFormat._error, msg)
def emphasis(self, msg):
return self.__parse__(TerminalFormat._emphasis, msg)
tf = TerminalFormat() | [
"will@ipartnr.com"
] | will@ipartnr.com |
c668614ba1c31b9ddada5697bd9bd9833931bd3e | d28a65d23c204a9736b597ae510d9dd54d2ffd0f | /bin/newdb | cbffe8f0ede31ac97c8ea7393d309dee7b9fa505 | [
"BSD-3-Clause"
] | permissive | cts2/rf2db | 99ba327611e620fc5533245064afcc1daff7c164 | 985cd7ad84c8907306a0d7d309d4a1c0fb422ba4 | refs/heads/master | 2020-05-17T22:37:25.476553 | 2015-08-24T22:18:19 | 2015-08-24T22:18:19 | 15,264,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,053 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import argparse
import os
# Assuming that we are running in the bin directory
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
sys.path.append(os.path.join(_curdir, '..'))
# TODO: Make this into a package
sys.path.append(os.path.join(_curdir, '..', '..', 'ConfigManager'))
from rf2db.db.RF2DBConnection import config_parms, debug_parms, cp_values, db_values, RF2DBConnection
helpargs = ['-h', '-?']
def main(argv):
""" Create a MySQL database for RF2 files and/or manage the connection parameters. Example sequence:
* python newdb --upd ../../rf2service/settings.conf --host localhost --db rf220140731 --charset utf8 --user root --passwd pw
* python newdb --show ../../rf2service/settings.conf
* python newdb --create ../../rf2service/settings.conf
"""
parser = argparse.ArgumentParser(description="Set up RF2 DB parameters and optionally create a database")
parser.add_argument('configfile', help="configuration file location")
parser.add_argument('--show', dest='show', action="store_true", help="show current configuration")
parser.add_argument('--upd', dest='update', action="store_true", help="update configuration file")
parser.add_argument('--create', action="store_true", help="create database if it doesn't exist")
# Can't do a lot more if there isn't configuration file
if len(argv) == 0 or (len(argv) == 1 and argv[0] in helpargs):
config_parms.add_to_parser(parser)
debug_parms.add_to_parser(parser)
parser.parse_args(argv)
return
# There is (or should be) a configuration file -- pick it out of the arguments and then reparse
args = [e for e in argv if e not in helpargs]
fileopt, _ = parser.parse_known_args(args)
# Open the existing configuration file so we know what the defaults should be
cp_values.set_configfile(fileopt.configfile)
config_parms.add_to_parser(parser, cp_values)
debug_parms.add_to_parser(parser, db_values)
opts = parser.parse_args(argv)
cp_values.update(vars(opts))
if opts.show:
print(str(cp_values))
if opts.update or not opts.show:
if cp_values.flush():
print("\nConfiguration file updated")
if opts.create:
RF2DBConnection().newDB()
print("Database %s created in %s" % (cp_values.db, cp_values.host + ((':' + cp_values.port) if cp_values.port else '')))
if __name__ == '__main__':
main(sys.argv[1:])
| [
"solbrig.harold@mayo.edu"
] | solbrig.harold@mayo.edu | |
47dd2201eee56ee0b205dcc830ba6f6ee494913c | 602d62c74c70854ce8ea043410ebd81d1fec5462 | /hong/visualize.py | a790dd94f4441350c089a2fe383125071ce7d4cb | [] | no_license | AllenZHX/CPE646Project | a63ca81f81a461487c205ecc1a3bdd717a038631 | 196db1d6fcb16b033cae0eaedc7ab77c5f9a0152 | refs/heads/master | 2020-06-10T08:32:58.742341 | 2016-12-11T01:32:03 | 2016-12-11T01:32:03 | 75,979,795 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import numpy as np
import pandas as pd
df = pd.read_csv('/home/hong/PRProject/data/voice.csv')
df.head()
print("Total number of samples:{}".format(df.shape[0]))
print("Number of male: {}".format(df[df.label == 'male'].shape[0]))
print("Number of female: {}".format(df[df.label == 'female'].shape[0]))
import seaborn
seaborn.pairplot(df[['meanfreq', 'sd', 'median', 'Q25', 'Q75', 'IQR','label']], hue='label', size=2)
seaborn.plt.show()
| [
"AllenZHX@github.com"
] | AllenZHX@github.com |
31982ead5acacd09b5d42b51ba3c5bbe5ba5c3fa | 2e217709b2442f95933ebdd5ef3979d5f443e0ca | /double_list.py | de16ad538bedeb27ec8eb8dd6bd894a58b27a5df | [] | no_license | kudoyoshihiro10xx/11_bulit_in_function | 30e20b91d780ddf573d3c6ae26013c76b16e4686 | 126b48fdb866fb9a12d982a9d5c83e0216d0de99 | refs/heads/master | 2020-04-27T23:03:09.678714 | 2019-03-10T14:17:55 | 2019-03-10T14:17:55 | 174,759,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | numbers1 = [1, 2, 3]
def double(number):
return 2 *
for
| [
"kudo@example.com"
] | kudo@example.com |
9e2a1eb8dc0e28203677ec2235e4047d4077c6ce | aed88d107685092d6cdc43129f94138b206e3dda | /dadi/2Pins/GBS_bottle.py | 68910b94d3e0d400d2b3a2c2945ae303538bb4c7 | [] | no_license | claire-p/Pinniped_NeNc | 91e255bddd4239ccacf96c66b094037acf4ada1c | fd02262b59ec9e4386ba0f5cdb619c10f4c2a19a | refs/heads/master | 2021-05-20T22:03:31.393624 | 2020-03-03T13:45:19 | 2020-03-03T13:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | # Numpy is the numerical library dadi is built upon
import numpy
from numpy import array
import matplotlib
matplotlib.use("AGG")
import sys
sys.path.append('/proj/b2012209/private/tools/dadi/RyanGutenkunst-dadi-e741dc81ff1e')
import dadi
# In demographic_models.py, we've defined a custom model for this problem
import bottlenecks
#from a frequency spectrum file
fs = dadi.Spectrum.from_file(sys.argv[1])
print fs
fs = fs.fold()
print fs
# Define sample size (list)
ns = fs.sample_sizes
print ns
# Define grid size
pts_l = [50,60,70]
func = bottlenecks.no_rec
params = array([1, 1])
upper_bound = [100, 20]
lower_bound = [0.0001,0.0001]
func_ex = dadi.Numerics.make_extrap_log_func(func)
model = func_ex(params, ns, pts_l)
ll_model = dadi.Inference.ll_multinom(model, fs)
print 'Model log-likelihood:', ll_model
theta = dadi.Inference.optimal_sfs_scaling(model, fs)
p0 = dadi.Misc.perturb_params(params, fold=1, upper_bound=upper_bound)
popt = dadi.Inference.optimize_log(p0, fs, func_ex, pts_l,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=len(params),
maxiter=None)
print 'Optimized parameters', repr(popt)
model = func_ex(popt, ns, pts_l)
ll_opt = dadi.Inference.ll_multinom(model, fs)
print 'Optimized log-likelihood:', ll_opt
#INSERT 2D BS HERE
bootstraps = []
for ii in range(100):
print ii
bootstrap_data = fs.sample()
popt = dadi.Inference.optimize_log(p0, bootstrap_data, func_ex, pts_l,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=len(params),
maxiter=None)
print 'Optimized parameters', repr(popt)
model = func_ex(popt, ns, pts_l)
ll = dadi.Inference.ll_multinom(model, bootstrap_data)
print 'Optimized log-likelihood:', ll
theta = dadi.Inference.optimal_sfs_scaling(model, bootstrap_data)
model *= theta
bootstraps.append([ll, theta, popt[0], popt[1]])
bootstraps = numpy.array(bootstraps)
numpy.savetxt('1Dboots.npy', bootstraps)
bootstraps = numpy.loadtxt('1Dboots.npy')
sigma_boot = numpy.std(bootstraps, axis=0)[1:]
print 'Bootstrap uncertainties:', sigma_boot
| [
"noreply@github.com"
] | claire-p.noreply@github.com |
37460d45828f9a6bb867d37149fd0b03abad6412 | 2ab3155d9ca69d61d5ef48870640cbb3c8b400bd | /search.py | e0a08a05fd651799ff33f08aa12eced56146be9c | [] | no_license | JoshuaKrause/AIND_PacMan | d58a3f36da18a436989fc2b848e6f986cb4a02ab | 4b5b46ecf6c119b9c7e998bfdd449590b90f9a9b | refs/heads/master | 2021-05-15T05:05:53.811562 | 2018-01-17T22:59:41 | 2018-01-17T22:59:41 | 117,585,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,879 | py | # search.py
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
[2nd Edition: p 75, 3rd Edition: p 87]
Your search algorithm needs to return a list of actions that reaches
the goal.
Make sure to implement a graph search algorithm
[2nd Edition: Fig. 3.18, 3rd Edition: Fig 3.7].
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
frontier = util.Stack()
frontier.push((problem.getStartState(), []))
explored = []
while not frontier.isEmpty():
node, path = frontier.pop()
if problem.isGoalState(node):
return path
explored.append(node)
for coords, action, cost in problem.getSuccessors(node):
if coords not in explored:
frontier.push((coords, path + [action]))
return None
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
[2nd Edition: p 73, 3rd Edition: p 82]
"""
frontier = util.Queue()
frontier.push((problem.getStartState(), []))
explored = []
while not frontier.isEmpty():
node, path = frontier.pop()
if problem.isGoalState(node):
return path
explored.append(node)
for coords, action, cost in problem.getSuccessors(node):
if coords not in explored:
frontier.push((coords, path + [action]))
return None
def uniformCostSearch(problem):
"Search the node of least total cost first. "
frontier = util.PriorityQueue()
frontier.push((problem.getStartState(), []), 0)
explored = []
while not frontier.isEmpty():
node, path = frontier.pop()
if problem.isGoalState(node):
return path
explored.append(node)
for coords, action, cost in problem.getSuccessors(node):
if coords not in explored:
total_cost = problem.getCostOfActions(path + [action]) + cost
frontier.push((coords, path + [action]), total_cost)
return None
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
frontier = util.PriorityQueue()
frontier.push((problem.getStartState(), []), heuristic(problem.getStartState(), problem))
explored = []
while not frontier.isEmpty():
node, path = frontier.pop()
if problem.isGoalState(node):
return path
explored.append(node)
for coords, action, cost in problem.getSuccessors(node):
if coords not in explored:
frontier.push((coords, path + [action]), heuristic(coords, problem))
return None
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"jkrause@joshuakrause.net"
] | jkrause@joshuakrause.net |
2b180c0981ccf907afbff320dc6bdc395da1bad8 | 62d3e022dd2d998bc0b50e93bfa0af7f53ff7f39 | /205cafe.py | eab3ac5108273e576b2c01e9aed54b264e9f5aa6 | [] | no_license | wtchung23/205CDE-HTML-code | 3a158153f7ee43e419e93bc7bef7628645f7fb05 | 5d6c402b95ff00f732665be12c5adc4b6eaf802c | refs/heads/master | 2021-04-12T04:36:40.332112 | 2018-03-19T13:51:28 | 2018-03-19T13:51:28 | 125,856,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,901 | py | from flask import Flask, render_template, request, redirect, url_for, session
import pymysql
from datetime import date
#create an instance of FLask class
app = Flask(__name__)
app.static_folder = 'static'
app.secret_key = 'any random string'
@app.route("/home")
def home():
if 'username' in session:
return render_template('home.html',username=session['username'])
else:
return render_template('home.html')
@app.route("/coffee")
def coffee():
if 'username' in session:
return render_template('coffee.html',username=session['username'])
else:
return render_template('coffee.html')
@app.route("/menu")
def menu():
if 'username' in session:
return render_template('menu.html',username=session['username'])
else:
return render_template('menu.html')
@app.route("/breakfast")
def breakfast():
if 'username' in session:
return render_template('breakfast.html',username=session['username'])
else:
return render_template('breakfast.html')
@app.route("/lunch")
def lunch():
if 'username' in session:
return render_template('lunch.html',username=session['username'])
else:
return render_template('lunch.html')
@app.route("/dessert")
def dessert():
if 'username' in session:
return render_template('dessert.html',username=session['username'])
else:
return render_template('dessert.html')
@app.route("/event")
def event():
if 'username' in session:
return render_template('event.html',username=session['username'])
else:
return render_template('event.html')
@app.route("/contact")
def contact():
if 'username' in session:
return render_template('contact.html',username=session['username'])
else:
return render_template('contact.html')
@app.route("/contactSend", methods=['POST', 'GET'])
def contactSend():
if request.method=='POST':
questioner = request.form.get("questioner")
questionerEmail = request.form.get("questionerEmail")
question = request.form.get("question")
today = date.today()
db = pymysql.connect(host="localhost",user="root",password="password819",db="205cafe")
"""
connection = pymysql.connect(host='localhost',
user='user',
password='passwd',
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
"""
# prepare a cursor object using cursor() medthod
cursor = db.cursor()
sql = """INSERT INTO user_question(`questioner`,`questioner-email`,`question`,`question-date`,`Done?`)
VALUES('%s','%s','%s','%s','no')""" %(questioner,questionerEmail,question,today)
try:
cursor.execute(sql)
#commit your changes in the database
db.commit()
except:
db.rollback()
if 'username' in session:
return render_template('contactSend.html',username=session['username'])
else:
return render_template('contactSend.html')
@app.route("/userinfo")
def userinfo():
if 'username' in session:
db = pymysql.connect(host="localhost",user="root",password="password819",db="205cafe")
cursor = db.cursor()
sql="SELECT `username`,`password`,`email`,`integral` FROM `userInfo` WHERE `username`='%s'"%(session['username'])
cursor.execute(sql)
dataResults = cursor.fetchone()
return render_template('userInfo.html',username=session['username'],password=dataResults[1],email=dataResults[2],integral=dataResults[3])
else:
return redirect(url_for('home'))
db.close()
@app.route("/changePwdPage")
def changePwdPage():
return render_template('changePwdPage.html',username=session['username'])
@app.route("/changePassword",methods=['POST', 'GET'])
def changePassword():
if request.method == 'POST':
oldpassword = request.form.get("oldpassword")
newPassword = request.form.get("newPassword")
confirmPassword = request.form.get("confirmPassword")
db = pymysql.connect(host="localhost",user="root",password="password819",db="205cafe")
cursor = db.cursor()
sql="SELECT `password` FROM `userInfo` WHERE `username`='%s'"%(session['username'])
cursor.execute(sql)
dataResults = cursor.fetchone()
if oldpassword==dataResults[0]:
if newPassword==confirmPassword:
sql="UPDATE `userInfo` SET `password`='%s' WHERE `username`='%s'"%(newPassword,session['username'])
try:
cursor.execute(sql)
#commit your changes in the database
db.commit()
except:
#rollback in case there is any error
db.rollback()
return redirect(url_for('userinfo'))
else:
return render_template('changePwdPage.html',message2='Password and confirm password are not the same')
else:
return render_template('changePwdPage.html',message2='Your old password is not correct')
db.close()
@app.route("/login", methods=['POST', 'GET'])
def login():
db = pymysql.connect(host="localhost",user="root",password="password819",db="205cafe")
"""
connection = pymysql.connect(host='localhost',
user='user',
password='passwd',
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
"""
# prepare a cursor object using cursor() medthod
#error = None;
cursor = db.cursor()
sql = "SELECT `username`,`password` FROM `userInfo`"
cursor.execute(sql)
dataResults = cursor.fetchall()
if request.method == 'POST':
userName = request.form.get("name")
userPassword = request.form.get("password")
for row in dataResults:
dbUserName=row[0]
dbUserPassword=row[1]
if userName==dbUserName and userPassword==dbUserPassword :
session['username'] = dbUserName
return redirect(url_for('home'))
break
else:
#url_for('name of the function of the route','parameters(if required)')
return render_template('home.html',message='false')
db.close()
"""
result = request.form.to_dict(),
return render_template('result', result=result, data=dataResults)
"""
@app.route("/signup")
def signup():
return render_template('signUp.html')
@app.route("/register", methods=['POST', 'GET'])
def register():
if request.method=='POST':
NewUsername = request.form.get("newUsername")
NewPassword = request.form.get("newPassword")
ConfirmPassword = request.form.get("confirmPassword")
email = request.form.get("email")
db = pymysql.connect(host="localhost",user="root",password="password819",db="205cafe")
cursor = db.cursor()
"""
connection = pymysql.connect(host='localhost',
user='user',
password='passwd',
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
"""
sql = "SELECT `username` FROM `userInfo`"
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
username=row[0]
if username==NewUsername:
return render_template('signUp.html',wrongMessage='Username is already used. Please try another one')
break
else:
if NewPassword==ConfirmPassword:
# prepare a cursor object using cursor() medthod
sql = """INSERT INTO userInfo(`username`,`password`,`email`,`integral`)VALUES('%s','%s','%s',0)""" %(NewUsername,NewPassword,email)
try:
cursor.execute(sql)
#commit your changes in the database
db.commit()
except:
db.rollback()
return render_template('register.html',NewUsername=NewUsername,NewPassword=NewPassword,email=email)
else:
return render_template('signUp.html',wrongMessage='Password and confirm password are not the same',NewUsername=NewUsername)
db.close()
"""
result = request.form.to_dict(),
return render_template('result', result=result, data=dataResults)
"""
@app.route("/signout")
def signout():
session.pop('username',None)
return redirect(url_for('home'))
if __name__ == '__main__':
app.run(debug=True)
| [
"noreply@github.com"
] | wtchung23.noreply@github.com |
852065b653ca396ea321c7ff5ad1faeaba1cebe6 | 88b4b883c1a262b5f9ca2c97bf1835d6d73d9f0b | /src/api/python/hce/ftests/ftest_exit_code_simple.py | b2c4a8d9a82b0002258fc983f2ffd5611aca4435 | [] | no_license | hce-project/hce-bundle | 2f93dc219d717b9983c4bb534884e4a4b95e9b7b | 856a6df2acccd67d7af640ed09f05b2c99895f2e | refs/heads/master | 2021-09-07T22:55:20.964266 | 2018-03-02T12:00:42 | 2018-03-02T12:00:42 | 104,993,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | #!/usr/bin/python
"""
HCE project, Python bindings, Distributed Tasks Manager application.
RTCFinalizer Class content main functional for finalize realtime crawling.
@package: dc
@file rtc-finalizer.py
@author Oleksii <developers.hce@gmail.com>, bgv, Alexander Vybornyh <alexander.hce.cluster@gmail.com>
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2015 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
"""
import ppath
from ppath import sys
import os
import sys
os._exit(11)
| [
"bgv@bgv-d9"
] | bgv@bgv-d9 |
fa31ca5402666d8af0008504ae3fb93efc735023 | e9418f4574976f6b875e94705a180fa3e49a1d60 | /nemo/utils/exp_manager.py | f17491ed38e16a99ffd17ee897df873e0dda3e94 | [
"Apache-2.0"
] | permissive | rajatisdead/NeMo | 02053bc596807e614b5f2d5b481ff87a27996114 | 45f60f03e5d15a4b6e7db145b5d731d73f3a891d | refs/heads/main | 2023-02-16T05:40:08.470940 | 2021-01-15T01:08:06 | 2021-01-15T01:08:06 | 329,863,109 | 1 | 0 | Apache-2.0 | 2021-01-15T09:13:42 | 2021-01-15T09:13:41 | null | UTF-8 | Python | false | false | 27,812 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
from dataclasses import dataclass
from pathlib import Path
from shutil import copy, move
from typing import Any, Dict, List, Optional, Union
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import LoggerCollection as _LoggerCollection
from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from nemo.constants import NEMO_ENV_VARNAME_VERSION
from nemo.utils import logging
from nemo.utils.exceptions import NeMoBaseException
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.lightning_logger_patch import add_filehandlers_to_pl_logger
class NotFoundError(NeMoBaseException):
""" Raised when a file or folder is not found"""
class LoggerMisconfigurationError(NeMoBaseException):
""" Raised when a mismatch between trainer.logger and exp_manager occurs"""
def __init__(self, message):
message = (
message
+ " You can disable lighning's trainer from creating a logger by passing logger=False to its constructor."
)
super().__init__(message)
class CheckpointMisconfigurationError(NeMoBaseException):
""" Raised when a mismatch between trainer.callbacks and exp_manager occurs"""
@dataclass
class CallbackParams:
filepath: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
monitor: Optional[str] = "val_loss"
verbose: Optional[bool] = True
save_last: Optional[bool] = True
save_top_k: Optional[int] = 3
save_weights_only: Optional[bool] = False
mode: Optional[str] = "auto"
period: Optional[int] = 1
prefix: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
postfix: str = ".nemo"
save_best_model: bool = False
@dataclass
class ExpManagerConfig:
# Log dir creation parameters
explicit_log_dir: Optional[str] = None
exp_dir: Optional[str] = None
name: Optional[str] = None
version: Optional[str] = None
use_datetime_version: Optional[bool] = True
resume_if_exists: Optional[bool] = False
resume_past_end: Optional[bool] = False
resume_ignore_no_checkpoint: Optional[bool] = False
# Logging parameters
create_tensorboard_logger: Optional[bool] = True
summary_writer_kwargs: Optional[Dict[Any, Any]] = None
create_wandb_logger: Optional[bool] = False
wandb_logger_kwargs: Optional[Dict[Any, Any]] = None
# Checkpointing parameters
create_checkpoint_callback: Optional[bool] = True
checkpoint_callback_params: Optional[CallbackParams] = CallbackParams()
# Additional exp_manager arguments
files_to_copy: Optional[List[str]] = None
def exp_manager(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None) -> Path:
"""
exp_manager is a helper function used to manage folders for experiments. It follows the pytorch lightning paradigm
of exp_dir/model_or_experiment_name/version. If the lightning trainer has a logger, exp_manager will get exp_dir,
name, and version from the logger. Otherwise it will use the exp_dir and name arguments to create the logging
directory. exp_manager also allows for explicit folder creation via explicit_log_dir.
The version will be a datetime string or an integer. Note, exp_manager does not handle versioning on slurm
multi-node runs. Datestime version can be disabled if use_datetime_version is set to False.
It optionally creates TensorBoardLogger, WandBLogger, ModelCheckpoint objects from pytorch lightning. It copies
sys.argv, and git information if available to the logging directory. It creates a log file for each process to log
their output into.
exp_manager additionally has a resume feature which can be used to continuing training from the constructed log_dir.
Args:
trainer (pytorch_lightning.Trainer): The lightning trainer.
cfg (DictConfig, dict): Can have the following keys:
- explicit_log_dir (str, Path): Can be used to override exp_dir/name/version folder creation. Defaults to
None, which will use exp_dir, name, and version to construct the logging directory.
- exp_dir (str, Path): The base directory to create the logging directory. Defaults to None, which logs to
./nemo_experiments.
- name (str): The name of the experiment. Defaults to None which turns into "default" via name = name or
"default".
- version (str): The version of the experiment. Defaults to None which uses either a datetime string or
lightning's TensorboardLogger system of using version_{int}.
- use_datetime_version (bool): Whether to use a datetime string for version. Defaults to True.
- resume_if_exists (bool): Whether this experiment is resuming from a previous run. If True, it sets
trainer.resume_from_checkpoint so that the trainer should auto-resume. exp_manager will move files
under log_dir to log_dir/run_{int}. Defaults to False.
- resume_past_end (bool): exp_manager errors out if resume_if_exists is True and a checkpoint matching
*end.ckpt indicating a previous training run fully completed. This behaviour can be disabled, in which
case the *end.ckpt will be loaded by setting resume_past_end to True. Defaults to False.
- resume_ignore_no_checkpoint (bool): exp_manager errors out if resume_if_exists is True and no checkpoint
could be found. This behaviour can be disabled, in which case exp_manager will print a message and
continue without restoring, by setting resume_ignore_no_checkpoint to True. Defaults to False.
- create_tensorboard_logger (bool): Whether to create a tensorboard logger and attach it to the pytorch
lightning trainer. Defaults to True.
- summary_writer_kwargs (dict): A dictionary of kwargs that can be passed to lightning's TensorboardLogger
class. Note that log_dir is passed by exp_manager and cannot exist in this dict. Defaults to None.
- create_wandb_logger (bool): Whether to create a Weights and Baises logger and attach it to the pytorch
lightning trainer. Defaults to False.
- wandb_logger_kwargs (dict): A dictionary of kwargs that can be passed to lightning's WandBLogger
class. Note that name and project are required parameters if create_wandb_logger is True.
Defaults to None.
- create_checkpoint_callback (bool): Whether to create a ModelCheckpoint callback and attach it to the
pytorch lightning trainer. The ModelCheckpoint saves the top 3 models with the best "val_loss", the most
recent checkpoint under *last.ckpt, and the final checkpoint after training completes under *end.ckpt.
Defaults to True.
- files_to_copy (list): A list of files to copy to the experiment logging directory. Defaults to None which
copies no files.
returns:
log_dir (Path): The final logging directory where logging files are saved. Usually the concatenation of
exp_dir, name, and version.
"""
# Add rank information to logger
# Note: trainer.global_rank and trainer.is_global_zero are not set until trainer.fit, so have to hack around it
global_rank = trainer.node_rank * trainer.num_gpus + trainer.local_rank
logging.rank = global_rank
if cfg is None:
logging.error("exp_manager did not receive a cfg argument. It will be disabled.")
return
if trainer.fast_dev_run:
logging.info("Trainer was called with fast_dev_run. exp_manager will return without any functionality.")
return
# Ensure passed cfg is compliant with ExpManagerConfig
schema = OmegaConf.structured(ExpManagerConfig)
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True))
cfg = OmegaConf.merge(schema, cfg)
error_checks(trainer, cfg) # Ensures that trainer options are compliant with NeMo and exp_manager arguments
log_dir, exp_dir, name, version = get_log_dir(
trainer=trainer,
exp_dir=cfg.exp_dir,
name=cfg.name,
version=cfg.version,
explicit_log_dir=cfg.explicit_log_dir,
use_datetime_version=cfg.use_datetime_version,
)
if cfg.resume_if_exists:
check_resume(trainer, log_dir, cfg.resume_past_end, cfg.resume_ignore_no_checkpoint)
checkpoint_name = name
# If name returned from get_log_dir is "", use cfg.name for checkpointing
if checkpoint_name is None or checkpoint_name == '':
checkpoint_name = cfg.name or "default"
cfg.name = name # Used for configure_loggers so that the log_dir is properly set even if name is ""
cfg.version = version
# Create the logging directory if it does not exist
os.makedirs(log_dir, exist_ok=True) # Cannot limit creation to global zero as all ranks write to own log file
logging.info(f'Experiments will be logged at {log_dir}')
trainer._default_root_dir = log_dir
# Handle Loggers by creating file and handle DEBUG statements
log_file = log_dir / f'nemo_log_globalrank-{global_rank}_localrank-{trainer.local_rank}.txt'
logging.add_file_handler(log_file)
# For some reason, LearningRateLogger requires trainer to have a logger. Safer to create logger on all ranks
# not just global rank 0.
if cfg.create_tensorboard_logger or cfg.create_wandb_logger:
configure_loggers(
trainer,
exp_dir,
cfg.name,
cfg.version,
cfg.create_tensorboard_logger,
cfg.summary_writer_kwargs,
cfg.create_wandb_logger,
cfg.wandb_logger_kwargs,
)
if is_global_rank_zero():
if cfg.create_checkpoint_callback:
configure_checkpointing(trainer, log_dir, checkpoint_name, cfg.checkpoint_callback_params)
# Move files_to_copy to folder and add git information if present
if cfg.files_to_copy:
for _file in cfg.files_to_copy:
copy(Path(_file), log_dir)
# Create files for cmd args and git info
with open(log_dir / 'cmd-args.log', 'w') as _file:
_file.write(" ".join(sys.argv))
# Try to get git hash
git_repo, git_hash = get_git_hash()
if git_repo:
with open(log_dir / 'git-info.log', 'w') as _file:
_file.write(f'commit hash: {git_hash}')
_file.write(get_git_diff())
# Add err_file logging to global_rank zero
logging.add_err_file_handler(log_dir / 'nemo_error_log.txt')
# Add lightning file logging to global_rank zero
add_filehandlers_to_pl_logger(log_dir / 'lightning_logs.txt', log_dir / 'nemo_error_log.txt')
return log_dir
def error_checks(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None):
"""
Checks that the passed trainer is compliant with NeMo and exp_manager's passed configuration. Checks that:
- Throws error when hydra has changed the working directory. This causes issues with lightning's DDP
- Throws error when trainer has loggers defined but create_tensorboard_logger or create_WandB_logger is True
- Prints error messages when 1) run on multi-node and not slurm, and 2) run on multi-gpu without DDP
"""
if HydraConfig.initialized() and get_original_cwd() != os.getcwd():
raise ValueError(
"Hydra changed the working directory. This interferes with ExpManger's functionality. Please pass "
"hydra.run.dir=. to your python script."
)
if trainer.logger is not None and (cfg.create_tensorboard_logger or cfg.create_wandb_logger):
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, and either "
f"create_tensorboard_logger: {cfg.create_tensorboard_logger} or create_wandb_logger: "
f"{cfg.create_wandb_logger} was set to True. These can only be used if trainer does not already have a"
" logger."
)
if trainer.num_nodes > 1 and not trainer.is_slurm_managing_tasks:
logging.error(
"You are running multi-node without slurm. Please note that this is not tested in NeMo and could result in "
"errors."
)
if trainer.num_gpus > 1 and not trainer.use_ddp:
logging.error(
"You are running multi-gpu without ddp.Please note that this is not tested in NeMo and could result in "
"errors."
)
def check_resume(
trainer: 'pytorch_lightning.Trainer',
log_dir: str,
resume_past_end: bool = False,
resume_ignore_no_checkpoint: bool = False,
):
"""Checks that resume=True was used correctly with the arguments pass to exp_manager. Sets
trainer.resume_from_checkpoint as necessary.
Returns:
log_dir (Path): the log_dir
exp_dir (str): the base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
Raises:
NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.
ValueError: If resume is True, and there were more than 1 checkpoint could found.
"""
if not log_dir:
raise ValueError(f"Resuming requires the log_dir {log_dir} to be passed to exp_manager")
checkpoint_dir = Path(Path(log_dir) / "checkpoints")
checkpoint = None
end_checkpoints = list(checkpoint_dir.glob("*end.ckpt"))
last_checkpoints = list(checkpoint_dir.glob("*last.ckpt"))
if not checkpoint_dir.exists():
if resume_ignore_no_checkpoint:
logging.warning(
f"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Training from scratch."
)
return
else:
raise NotFoundError(f"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Cannot resume.")
elif len(end_checkpoints) > 0:
if resume_past_end:
if len(end_checkpoints) > 1:
raise ValueError(f"Multiple multiple checkpoints {end_checkpoints} that matches *end.ckpt.")
logging.info(f"Resuming from {end_checkpoints[0]}")
checkpoint = end_checkpoints[0]
else:
raise ValueError(
f"Found {end_checkpoints[0]} indicating that the last training run has already completed."
)
elif not len(last_checkpoints) > 0:
if resume_ignore_no_checkpoint:
logging.warning(f"There were no checkpoints found in {checkpoint_dir}. Training from scratch.")
return
else:
raise NotFoundError(f"There were no checkpoints found in {checkpoint_dir}. Cannot resume.")
elif len(last_checkpoints) > 1:
raise ValueError(f"Multiple multiple checkpoints {last_checkpoints} that matches *last.ckpt.")
else:
logging.info(f"Resuming from {last_checkpoints[0]}")
checkpoint = last_checkpoints[0]
trainer.resume_from_checkpoint = str(checkpoint)
if is_global_rank_zero():
# Check to see if any files exist that need to be moved
files_to_move = []
for child in Path(log_dir).iterdir():
if child.is_file():
files_to_move.append(child)
if len(files_to_move) > 0:
# Move old files to a new folder
other_run_dirs = Path(log_dir).glob("run_*")
run_count = 0
for fold in other_run_dirs:
if fold.is_dir():
run_count += 1
new_run_dir = Path(Path(log_dir) / f"run_{run_count}")
new_run_dir.mkdir()
for _file in files_to_move:
move(str(_file), str(new_run_dir))
def check_explicit_log_dir(
trainer: 'pytorch_lightning.Trainer', explicit_log_dir: [Path, str], exp_dir: str, name: str, version: str
) -> (Path, str, str, str):
""" Checks that the passed arguments are compatible with explicit_log_dir.
Returns:
log_dir (Path): the log_dir
exp_dir (str): the base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
Raise:
LoggerMisconfigurationError
"""
if trainer.logger is not None:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger and explicit_log_dir: "
f"{explicit_log_dir} was pass to exp_manager. Please remove the logger from the lightning trainer."
)
# Checking only (explicit_log_dir) vs (exp_dir and version).
# The `name` will be used as the actual name of checkpoint/archive.
if exp_dir or version:
logging.error(
f"exp_manager received explicit_log_dir: {explicit_log_dir} and at least one of exp_dir: {exp_dir}, "
f"or version: {version}. Please note that exp_dir, name, and version will be ignored."
)
if is_global_rank_zero() and Path(explicit_log_dir).exists():
logging.warning(f"Exp_manager is logging to {explicit_log_dir}, but it already exists.")
return Path(explicit_log_dir), str(explicit_log_dir), "", ""
def get_log_dir(
trainer: 'pytorch_lightning.Trainer',
exp_dir: str = None,
name: str = None,
version: str = None,
explicit_log_dir: str = None,
use_datetime_version: bool = True,
) -> (Path, str, str, str):
"""
Obtains the log_dir used for exp_manager.
Returns:
log_dir (Path): the log_dir
exp_dir (str): the base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
Raise:
LoggerMisconfigurationError: If trainer is incompatible with arguments
NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.
ValueError: If resume is True, and there were more than 1 checkpoint could found.
"""
if explicit_log_dir: # If explicit log_dir was passed, short circuit
return check_explicit_log_dir(trainer, explicit_log_dir, exp_dir, name, version)
# Default exp_dir to ./nemo_experiments if None was passed
_exp_dir = exp_dir
if exp_dir is None:
_exp_dir = str(Path.cwd() / 'nemo_experiments')
# If the user has already defined a logger for the trainer, use the logger defaults for logging directory
if trainer.logger is not None:
if trainer.logger.save_dir:
if exp_dir:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, the logger's "
f"save_dir was not None, and exp_dir ({exp_dir}) was not None. If trainer.logger.save_dir "
"exists, exp_manager will use trainer.logger.save_dir as the logging directory and exp_dir "
"must be None."
)
_exp_dir = trainer.logger.save_dir
if name:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, and name: "
f"{name} was also passed to exp_manager. If the trainer contains a "
"logger, exp_manager will use trainer.logger.name, and name passed to exp_manager must be None."
)
name = trainer.logger.name
version = f"version_{trainer.logger.version}"
# Use user-defined exp_dir, project_name, exp_name, and versioning options
else:
name = name or "default"
version = version or os.environ.get(NEMO_ENV_VARNAME_VERSION, None)
if version is None:
if trainer.is_slurm_managing_tasks:
logging.warning("Running on a slurm cluster. exp_manager will not add a version number.")
version = ""
elif is_global_rank_zero():
if use_datetime_version:
version = time.strftime('%Y-%m-%d_%H-%M-%S')
else:
tensorboard_logger = TensorBoardLogger(save_dir=Path(_exp_dir), name=name, version=version)
version = f"version_{tensorboard_logger.version}"
os.environ[NEMO_ENV_VARNAME_VERSION] = version
log_dir = Path(_exp_dir) / Path(str(name)) / Path(str(version))
return log_dir, str(_exp_dir), name, version
def get_git_hash():
"""
Helper function that tries to get the commit hash if running inside a git folder
returns:
Bool: Whether the git subprocess ran without error
str: git subprocess output or error message
"""
try:
return (
True,
subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT).decode(),
)
except subprocess.CalledProcessError as err:
return False, "{}\n".format(err.output.decode("utf-8"))
def get_git_diff():
"""
Helper function that tries to get the git diff if running inside a git folder
returns:
Bool: Whether the git subprocess ran without error
str: git subprocess output or error message
"""
try:
return subprocess.check_output(['git', 'diff'], stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
return "{}\n".format(err.output.decode("utf-8"))
class LoggerList(_LoggerCollection):
""" A thin wrapper on Lightning's LoggerCollection such that name and version are better aligned with exp_manager
"""
def __init__(self, _logger_iterable, nemo_name=None, nemo_version=""):
super().__init__(_logger_iterable)
self._nemo_name = nemo_name
self._nemo_version = nemo_version
@property
def name(self) -> str:
return self._nemo_name
@property
def version(self) -> str:
return self._nemo_version
def configure_loggers(
trainer: 'pytorch_lightning.Trainer',
exp_dir: [Path, str],
name: str,
version: str,
create_tensorboard_logger: bool,
summary_writer_kwargs: dict,
create_wandb_logger: bool,
wandb_kwargs: dict,
):
""" Creates TensorboardLogger and/or WandBLogger and attach them to trainer. Raises ValueError if
summary_writer_kwargs or wandb_kwargs are misconfigured.
"""
# Potentially create tensorboard logger and/or WandBLogger
logger_list = []
if create_tensorboard_logger:
if summary_writer_kwargs is None:
summary_writer_kwargs = {}
elif "log_dir" in summary_writer_kwargs:
raise ValueError(
"You cannot pass `log_dir` as part of `summary_writer_kwargs`. `log_dir` is handled by lightning's "
"TensorBoardLogger logger."
)
tensorboard_logger = TensorBoardLogger(save_dir=exp_dir, name=name, version=version, **summary_writer_kwargs)
logger_list.append(tensorboard_logger)
logging.info("TensorboardLogger has been set up")
if create_wandb_logger:
if wandb_kwargs is None:
wandb_kwargs = {}
if "name" not in wandb_kwargs and "project" not in wandb_kwargs:
raise ValueError("name and project are required for wandb_logger")
wandb_logger = WandbLogger(save_dir=exp_dir, version=version, **wandb_kwargs)
logger_list.append(wandb_logger)
logging.info("WandBLogger has been set up")
logger_list = (
LoggerList(logger_list, nemo_name=name, nemo_version=version) if len(logger_list) > 1 else logger_list[0]
)
trainer.logger_connector.configure_logger(logger_list)
class NeMoModelCheckpoint(ModelCheckpoint):
""" Light wrapper around Lightning's ModelCheckpoint to force a saved checkpoint on train_end
"""
def __init__(self, save_best_model=False, postfix=".nemo", **kwargs):
# Parse and store "extended" parameters: save_best model and postfix.
self.save_best_model = save_best_model
self.postfix = postfix
# Call the parent class constructor with the remaining kwargs.
super().__init__(**kwargs)
@rank_zero_only
def on_train_end(self, trainer, pl_module):
if trainer.fast_dev_run:
return None
# Load the best model and then re-save it
if self.save_best_model:
trainer.checkpoint_connector.restore(self.best_model_path, on_gpu=trainer.on_gpu)
pl_module.save_to(save_path=os.path.join(self.dirpath, self.prefix + self.postfix))
def configure_checkpointing(
trainer: 'pytorch_lightning.Trainer', log_dir: Path, name: str, params: Dict,
):
""" Adds ModelCheckpoint to trainer. Raises CheckpointMisconfigurationError if trainer already has a ModelCheckpoint
callback or if trainer.weights_save_path was passed to Trainer.
"""
for callback in trainer.callbacks:
if isinstance(callback, ModelCheckpoint):
raise CheckpointMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a ModelCheckpoint "
"and create_checkpoint_callback was set to True. Please either set create_checkpoint_callback "
"to False, or remove ModelCheckpoint from the lightning trainer"
)
if Path(trainer.weights_save_path) != Path.cwd():
raise CheckpointMisconfigurationError(
"The pytorch lightning was passed weights_save_path. This variable is ignored by exp_manager"
)
# Create the callback and attach it to trainer
if params.filepath is None:
params.filepath = Path(log_dir / 'checkpoints' / f'--{{{params.monitor}:.2f}}-{{epoch}}')
if params.prefix is None:
params.prefix = name
if "val" in params.monitor and trainer.max_epochs != -1 and trainer.max_epochs < trainer.check_val_every_n_epoch:
logging.error(
"The checkpoint callback was told to monitor a validation value but trainer.max_epochs("
f"{trainer.max_epochs}) was less than trainer.check_val_every_n_epoch({trainer.check_val_every_n_epoch})."
f"It is very likely this run will fail with ModelCheckpoint(monitor='{params.monitor}') not found in the "
"returned metrics. Please ensure that validation is run within trainer.max_epochs."
)
checkpoint_callback = NeMoModelCheckpoint(**params)
trainer.callbacks.append(checkpoint_callback)
| [
"noreply@github.com"
] | rajatisdead.noreply@github.com |
faabee197c6f74f38866e4b6c5c7cf9967640b63 | d9d592ab6352723a70bd38d130235822ce0cef7e | /testing/connections.py | fdc1c85b073ac141c5bcbf9f79355de4fc901a0a | [] | no_license | xvalier/basicLib | 790da231206f0478a4dc2e175b5ebca9c16aeea0 | 52ce19accd9787a6ec13faf205bc993e6a30ae49 | refs/heads/master | 2022-12-07T20:14:21.955249 | 2020-08-17T13:11:21 | 2020-08-17T13:11:21 | 149,390,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,763 | py | from configparser import ConfigParser
from backend.utilities import toolsCouch as couch
from backend.utilities import toolsElastic as elastic
from backend.utilities import toolsSQL as sql
#Connect to Couchbase and ElasticSearch for Search microservice
def connectSearch(connectionString):
user, pw = extractCouchInfo(connectionString)
connections = couch.accessCouch(user, pw)
connections['elastic'] = elastic.accessElastic()
return connections
#Connect to Postgresql and ElasticSearch for Import microservice
def connectImport(connectionString):
host, db, user, pw = extractPSQLInfo(connectionString)
connections['psql'] = sql.accessPSQL(host, db, user, pw)
connections['elastic'] = elastic.accessElastic()
connections['csv'] = extractCSVPath(connection)
return connections
#Connect to Postgresql for Auth microservice
def connectAuth(connectionString):
host, db, user, pw = extractPSQLInfo(connectionString)
connections['psql'] = sql.accessPSQL(host, db, user, pw)
return connections
def connectTraversal(connectionString):
userCB, passCB = extractCouchInfo(connectionString)
connections = couch.accessCouch(userCB, passCB)
host, db, userSQL, passSQL = extractPSQLInfo(connectionString)
connections['psql'] = sql.accessPSQL(host, db, userSQL, passSQL)
return connections
#ConnectionString Functions-----------------------------------------------------
#Determine location of connectionString based on type of server
def chooseConnectionString(cloudString, localString):
answer = 0
while((answer != 1) & (answer != 2)):
print('Is this on (1) GCP or (2) Personal laptop?')
answer = input('Choose either 1 or 2:')
if answer == '1':
return cloudString
elif answer == '2':
return localString
else:
print('Please choose either 1 or 2')
#Extraction Functions-----------------------------------------------------------
#Extract gRPC client stub info based on connectionString
def extractClientGRPCInfo(file):
connectionString = readINI(file)
address = connectionString['grpc']['clientAddr']
port = int(connectionString['grpc']['port'])
return address,port
#Extract gRPC server info based on connectionString
def extractServerGRPCInfo(file):
connectionString = readINI(file)
address = connectionString['grpc']['serverAddr']
port = int(connectionString['grpc']['port'])
return address,port
#Extract Postgresql SQL database info based on connectionString
def extractPSQLInfo(file):
connectionString = readINI(file)
host = connectionString['postgresql']['host']
db = connectionString['postgresql']['database']
user = connectionString['postgresql']['user']
pw = connectionString['postgresql']['password']
return host, db, user, pw
#Extract Couchbase Document Store database info based on connectionString
def extractCouchInfo(file):
connectionString = readINI(file)
user = connectionString['couchbase']['user']
pw = connectionString['couchbase']['password']
return user, pw
#Extract ElasticSearch search engine info based on connectionString
def extractElasticInfo(file):
connectionString = readINI(file)
host = connectionString['elasticsearch']['host']
db = connectionString['elasticsearch']['port']
return host, db
#Extract information on directories where CSV files are stored
def extractCSVPath(file):
connectionString = readINI(file)
return connectionString['csv']['path']
#Open connectionString INI file and read its contents
def readINI(file):
ini = ConfigParser();
ini.read(file)
return ini
| [
"nikilsunilkumar@gmail.com"
] | nikilsunilkumar@gmail.com |
2a6b94ca3becd2641a328a2c9bdd5d938549625b | 82403156aa68783d9c123454dc8170182f1ca689 | /Scraper:Naieve Bayes Classifier/tripadvisor-scraper/tripadvisor-scraper/tripadvisorbot/pipelines.py | 1c2cfa66cc78bdbd7a56be1f4f8ef875dde8b152 | [] | no_license | smurthy1996/Projects_ | 892fc2ac32ad4b6b1d8b6ac9850f51f6ed5632a9 | 8ff7f53de829807375cbe3c3cc9ef4748d1cd358 | refs/heads/master | 2022-03-29T11:24:45.962404 | 2020-01-13T22:12:24 | 2020-01-13T22:12:24 | 111,475,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from scrapy.exceptions import DropItem
class FilterWordsPipeline(object):
"""A pipeline for filtering out items which contain certain words in their
description"""
def process_item(self, item, spider):
return item; | [
"noreply@github.com"
] | smurthy1996.noreply@github.com |
ab65d81d276a3c2645df6ac28e20acfc2e015c04 | f8d2f88c08bfa4bee5692032b9404e31e9df304d | /src/set_orientation.py | 9ad4c8455f3b2dc0dca7b9f12c5e5071f20d3f7e | [] | no_license | pmessan/dummy-gas-detector | d10df29467f2a355bd52a50c457ec4a4b6f89917 | eb0e6f11121954ca730a958cd2484cc09e748129 | refs/heads/master | 2023-05-28T17:38:47.227984 | 2021-06-18T18:28:15 | 2021-06-18T18:28:15 | 368,345,650 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | #!/usr/bin/env python3
import sys
import rospy
import time
import math
from turtlesim.msg import Pose
from geometry_msgs.msg import Twist
# Global variables
x = None
y = None
yaw = None
def poseCallback(pose_message):
global x, y, yaw
# update the pose for the other functions
x = pose_message.x
y = pose_message.y
yaw = pose_message.theta
def rotate(vel_pub, angular_speed, target_angle, clockwise=False):
global x, y
# init twist object to send velocity message
vel_message = Twist()
#initial pose
x0 = x
y0 = y
t0 = rospy.Time.now().to_sec()
#determine direction
if (clockwise):
vel_message.angular.z = -abs(angular_speed)
else:
vel_message.angular.z = abs(angular_speed)
# set rate
rate = rospy.Rate(10)
ang_turned = 0
while ang_turned < target_angle:
# publish
vel_pub.publish(vel_message)
rate.sleep()
# track angle covered
t1 = rospy.Time.now().to_sec()
ang_turned = (t1-t0) * angular_speed
print(math.degrees(ang_turned), end="\r")
sys.stdout.flush()
print(math.degrees(ang_turned))
rospy.loginfo("Angle turned")
def setOrientation(vel_publisher, target_angle, angular_speed):
ang_to_rotate = target_angle - yaw
clockwise=1
if (ang_to_rotate < 0):
clockwise = 0
print(f"Target angle: {target_angle}\nAngle to rotate: {ang_to_rotate}")
rotate(vel_publisher, angular_speed, ang_to_rotate, clockwise)
if __name__ == "__main__":
rospy.init_node("move_straight_node")
vel_publisher = rospy.Publisher("/turtle1/cmd_vel", Twist, queue_size=10)
pose_subscriber = rospy.Subscriber("/turtle1/pose", Pose, poseCallback)
time.sleep(2)
try :
setOrientation(vel_publisher, math.radians(float(sys.argv[1])), math.radians(45))
except rospy.ROSInterruptException:
sys.exit(1) | [
"p.newman.messan@gmail.com"
] | p.newman.messan@gmail.com |
46ba76f341afc4728445b1052ae4b777957eab5c | 546d9e4777cf62840c002a0d1a32648a44c0a5aa | /raspberry_pi/ai/modules/module.py | 3c76bd8474edfb520aacb24500bd1dba47782583 | [] | no_license | Atmelfan/ash | d947c59cb05e513c348dcd4d0215caad5be29a6b | e8d5fb2baff9a6388ae4f46ddffd76cf4b359e00 | refs/heads/master | 2021-01-19T18:41:01.703877 | 2017-04-19T20:33:53 | 2017-04-19T20:33:53 | 68,116,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | import os, time
import threading, Queue
import logging
from logging.handlers import RotatingFileHandler
class Event(object):
"""docstring for Event"""
def __init__(self, name):
super(Event, self).__init__()
self.name = name
# Returns true if this event is still valid (for example, face is still being tracked)
# False if the event has become invalig (for example, face has been lost)
def valid(self):
return False
class Module(threading.Thread):
"""docstring for Module"""
drun = False
def __init__(self, events, args):
super(Module, self).__init__()
self.stop = threading.Event()
self.events = events
self.args = args
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
fh = RotatingFileHandler('logs/%s.log'%self.__class__.__name__, maxBytes=512*1024, backupCount=0)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [%(name)s] %(levelname)s: %(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def run(self):
while not self.stop.is_set():
try:
if not self.drun:
event = self.update()
if event != None:
self.events.put()
except Queue.Empty:
continue
def join(self, timeout=None):
self.stop.set()
super(Module, self).join(timeout)
# Update module
def update(self):
return None
def pause(self, drun):
if drun:
self.logger.info('Paused')
else:
self.logger.info('Unpaused')
self.drun=drun
return self.drun
| [
"gustavp@gpa-robotics.com"
] | gustavp@gpa-robotics.com |
65e1d34a1f7f96955c2ec68156d6da06d5ed4e7f | 9c38f3a354844f5080632005630d249d6487ebb3 | /haspy/test/simple/classdef_4.py | e8b6676653624320671c3245c2ff360d34222689 | [
"MIT"
] | permissive | keithroe/keithscode | ee7247ad6bdd844279f29a56718992cb886f9215 | 470c6b833b9b8bc2c78d1b43aac896b0ce9c9a7c | refs/heads/master | 2021-01-10T08:48:12.729594 | 2018-10-16T17:48:31 | 2018-10-16T17:48:31 | 51,531,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | @b(c)
@d
class a:
pass
| [
"keithroe@cba41313-dcc7-113d-0d3d-2a2d30939b49"
] | keithroe@cba41313-dcc7-113d-0d3d-2a2d30939b49 |
48d75f44368651e68119d4eaa78b6c2ad0811677 | 05d45d5bf64f95b1ecad1041096a1c78c4ce59da | /auctions/migrations/0016_auto_20200728_1433.py | 0e391ecccb48cc1585b1b3cf5fb34ebc5ebc0160 | [] | no_license | mustufajan/ecommerce-live | ed4bfddacd370a7750a02e7b4001774a9ccbcd76 | 02250725dbe94ffa99e623248b72990659d04fa4 | refs/heads/master | 2022-12-09T03:10:09.770734 | 2020-09-13T17:46:23 | 2020-09-13T17:46:23 | 294,824,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # Generated by Django 3.0.8 on 2020-07-28 18:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0015_auto_20200728_1424'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='category',
field=models.CharField(choices=[('Home & Electronics', 'Home & Electronics'), ('Fashion', 'Fashion')], default='FASHION', max_length=20),
),
]
| [
"jan.mustufa92@gmail.com"
] | jan.mustufa92@gmail.com |
de6a127836cd2b1603b39276bc954cc6b6fdc3b6 | c9673b8aa003c2f62deed64511602be487b6de18 | /venv/Scripts/miniterm.py | 143faa31e1000cb198a27e8acb55f4d313706bb7 | [] | no_license | dvago88/ArduinoSerialGetterPython | 5f36e1088e4e912e147d31dd8c7b07fc63117278 | adc19feaca1b6a16cf1da2c6aaff2e9c8a918a5b | refs/heads/master | 2020-03-11T18:31:13.692159 | 2018-04-22T11:18:54 | 2018-04-22T11:18:54 | 130,179,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,158 | py | #!C:\Users\Daniel\PycharmProjects\ArduinoSerialGetter\venv\Scripts\python.exe
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
| [
"dvago1988@gmail.com"
] | dvago1988@gmail.com |
e35f4f1d78a895399109533769407b639962a97b | 605577022fc77422af1d21b439576df3fb687e86 | /src/demo/RaspberryPi/AlphaBot/python/SR04.py | 7bbeaeee8856a931a471b3ee8342c6b81c6db0de | [] | no_license | we4690/WinAlphaBot | be63b7b7aafd5186f711b48df0d35f7540d34c41 | d322a6377188d7e557c5f5436c14516b0b74721f | refs/heads/master | 2021-06-10T09:36:39.540819 | 2016-12-31T08:37:17 | 2016-12-31T08:37:17 | 75,812,980 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | import RPi.GPIO as GPIO
import time
TRIG = 17
ECHO = 5
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(TRIG,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(ECHO,GPIO.IN)
def dist():
GPIO.output(TRIG,GPIO.HIGH)
time.sleep(0.000015)
GPIO.output(TRIG,GPIO.LOW)
while not GPIO.input(ECHO):
pass
t1 = time.time()
while GPIO.input(ECHO):
pass
t2 = time.time()
return (t2-t1)*34000/2
try:
while True:
print "HELLO"
print "Distance:%0.2f cm" % dist()
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
| [
"ghjackie@qq.com"
] | ghjackie@qq.com |
98b29494a0d8d58aa22516fd5f7ffbbfe96b37c1 | c6cca19653c7113d9720371a5197ccadb3624e99 | /setup.py | da5a1ded931e21adce4c25ad99b7c007c5892897 | [] | no_license | DooMMasteR/nfc-laser-lock | 4c48538f6f98a9b3cbebe0b64b0104e3f357e889 | d68bf3752caba256fa4f7e58781d20de7d39b0db | refs/heads/master | 2021-02-25T00:38:05.189672 | 2020-09-14T05:37:26 | 2020-09-14T05:37:26 | 245,444,957 | 0 | 1 | null | 2020-09-14T05:37:27 | 2020-03-06T14:48:22 | Python | UTF-8 | Python | false | false | 387 | py | import setuptools
setuptools.setup(
name="nfclock",
version="0.1",
author="Steffen Arntz, Matthias Uschok",
author_email="dev@uschok.de",
description="Simple tool to identify and authenticate cheap nfc-tags against a DB of known authorized keys.",
url="https://github.com/hellfyre/nfclock",
packages=setuptools.find_packages(),
python_requires='>=3.8'
)
| [
"noreply@github.com"
] | DooMMasteR.noreply@github.com |
8025ba35b9d424317c8728eb00872d51f226b847 | 5fe083b1082dd960dda5789b1cac7287be1d882b | /bin/parse_oneway.py | ade40fc8bd3d76417799a19345007a36ee098b97 | [
"MIT"
] | permissive | single-cell-rna-sequencing/scanorama | d412a98386354483a7ae768cb314731084c36431 | 60d21e5f71722baedc1cc0c2f0bff0338116b16a | refs/heads/master | 2020-05-18T19:03:02.178470 | 2018-12-11T23:14:55 | 2018-12-11T23:14:55 | 184,600,314 | 0 | 1 | null | 2019-05-02T14:55:33 | 2019-05-02T14:55:33 | null | UTF-8 | Python | false | false | 1,088 | py | import numpy as np
from scanorama import plt
plt.rcParams.update({'font.size': 25})
import sys
scano, uncor = {}, {}
in_scano = True
for line in open(sys.argv[1]):
fields = line.rstrip().split()
if len(fields) > 3:
continue
try:
F = float(fields[1])
except ValueError:
continue
if in_scano:
scano[fields[0]] = F
else:
uncor[fields[0]] = F
if fields[0] == 'ZZZ3':
in_scano = False
scanorama, uncorrected = [], []
for gene in set(scano.keys()) & set(uncor.keys()):
scanorama.append(scano[gene])
uncorrected.append(uncor[gene])
scanorama = np.array(scanorama)
uncorrected = np.array(uncorrected)
below = sum(scanorama > uncorrected + 50)
above = sum(scanorama < uncorrected - 50)
print('{}% above line'.format(float(above) / float(above + below) * 100))
name = sys.argv[1].split('.')[0]
line = min(max(scanorama), max(uncorrected))
plt.figure()
plt.scatter(scanorama, uncorrected, s=10)
plt.plot([0, line], [0, line], 'r--')
plt.tight_layout()
plt.savefig('oneway_{}.png'.format(name))
| [
"brianhie@mit.edu"
] | brianhie@mit.edu |
21419208e4717153cda0a84e771de0808fb251a4 | 47db8d092bdb43bd7813b72e9e9612e2e2998a83 | /build/lib/Backlogged Code/eval_model.py | 8bc3694bf7627bef4229779c2dea5384ddc28fee | [
"MIT"
] | permissive | EricCacciavillani/eFlow | 3af621abb439beb7964d73c174e1a4a810119fd1 | 19a4f7c08037ae1925eb098c43f802df46d52b05 | refs/heads/master | 2022-12-09T21:49:30.412931 | 2020-12-13T22:29:50 | 2020-12-13T22:29:50 | 182,604,546 | 1 | 0 | MIT | 2022-12-08T03:28:25 | 2019-04-22T01:50:39 | Jupyter Notebook | UTF-8 | Python | false | false | 6,844 | py | def evaluate_model(self,
model_name,
model,
df,
df_features,
output_folder=None,
le_map=None,
show_extra=True,
find_nearest_on_cols=False,
zscore_low=-2,
zscore_high=2):
"""
model_name:
The string key to give the dict
model:
Cluster model type; it must have '.labels_' as an attribute
df:
Dataframe object
df_features:
DataFrameTypes object; organizes feature types into groups.
output_folder:
Sub directory to put the pngs
le_map:
Dict of dataframe cols to LabelEncoders
show_extra:
Show extra information from all functions
find_nearest_on_cols:
Allows columns to be converted to actual values found within
the dataset.
Ex: Can't have .7 in a bool column convert's it to 1.
False: Just apply to obj columns and bool columns
True: Apply to all columns
zscore_low/zscore_high:
Defines how the threshold to remove data points when profiling the
cluster.
The main purpose of 'evaluate_model' is to display/save tables/plots
accoiated with describing the model's 'findings'.
"""
df = copy.deepcopy(df)
# Create folder structure for png outputs
if not output_folder:
output_path = str(model).split("(", 1)[0] + "/" + model_name
else:
output_path = output_folder + "/" + model_name
# ---
# self.__visualize_clusters(model=model,
# output_path=output_path,
# model_name=model_name)
# ---
df["Cluster_Name"] = model.labels_
numerical_features = df_features.numerical_features()
clustered_dataframes, shrunken_labeled_df = \
self.__create_cluster_sub_dfs(
df=df, model=model, numerical_features=numerical_features,
zscore_low=zscore_low, zscore_high=zscore_high)
rows_count, cluster_profiles_df = self.__create_cluster_profiles(
clustered_dataframes=clustered_dataframes,
shrunken_df=shrunken_labeled_df,
numerical_features=df_features.numerical_features(),
le_map=le_map,
output_path=output_path,
show=show_extra,
find_nearest_on_cols=find_nearest_on_cols)
# Check to see how many data points were removed during the profiling
# stage
print("Orginal points in dataframe: ", df.shape[0])
print("Total points in all modified clusters: ", rows_count)
print("Shrank by: ", df.shape[0] - rows_count)
# In case to many data points were removed
if cluster_profiles_df.shape[0] == 0:
print(
"The applied Z-scores caused the cluster profiles "
"to shrink to far for the model {0}!".format(
model_name))
# Display and save dataframe table
else:
display(cluster_profiles_df)
self.__render_mpl_table(cluster_profiles_df, sub_dir=output_path,
filename="All_Clusters",
header_columns=0, col_width=2.0)
def __create_cluster_profiles(self,
clustered_dataframes,
shrunken_df,
numerical_features,
le_map,
output_path,
find_nearest_on_cols=False,
show=True):
"""
Profile each clustered dataframe based off the given mean.
Displays extra information in dataframe tables to be understand
each cluster.
find_nearest_on_cols:
Allows columns to be converted to actual values found within
the dataset.
Ex: Can't have .7 in a bool column convert's it to 1.
False: Just apply to obj columns and bool columns
True: Apply to all columns
"""
def find_nearest(numbers, target):
"""
Find the closest fitting number to the target number
"""
numbers = np.asarray(numbers)
idx = (np.abs(numbers - target)).argmin()
return numbers[idx]
cluster_profiles_df = pd.DataFrame(columns=shrunken_df.columns).drop(
'Cluster_Name', axis=1)
rows_count = 0
for cluster_identfier, cluster_dataframe in \
clustered_dataframes.items():
df = pd.DataFrame(columns=cluster_dataframe.columns)
df = df.append(cluster_dataframe.mean(), ignore_index=True)
df.index = [cluster_identfier]
if cluster_dataframe.shape[0] <= 1:
continue
# Attempt to convert numbers found within the full set of data
for col in cluster_dataframe.columns:
if col not in numerical_features or find_nearest_on_cols:
df[col] = find_nearest(numbers=shrunken_df[
col].value_counts().index.tolist(),
target=df[col].values[0])
# Evaluate cluster dataframe by dataframe
eval_df = pd.DataFrame(columns=cluster_dataframe.columns)
eval_df = eval_df.append(
cluster_dataframe.mean(), ignore_index=True)
eval_df = eval_df.append(
cluster_dataframe.min(), ignore_index=True)
eval_df = eval_df.append(
cluster_dataframe.median(),
ignore_index=True)
eval_df = eval_df.append(
cluster_dataframe.max(), ignore_index=True)
eval_df = eval_df.append(
cluster_dataframe.std(), ignore_index=True)
eval_df = eval_df.append(
cluster_dataframe.var(), ignore_index=True)
eval_df.index = ["Mean", "Min", "Median",
"Max", "Standard Deviation", "Variance"]
if show:
print("Total found in {0} is {1}".format(
cluster_identfier, cluster_dataframe.shape[0]))
self.__render_mpl_table(
df,
sub_dir=output_path,
filename=cluster_identfier +
"_Means_Rounded_To_Nearest_Real_Numbers",
header_columns=0,
col_width=4.0)
self.__render_mpl_table(
eval_df,
sub_dir=output_path,
filename=cluster_identfier +
"_Eval_Df",
header_columns=0,
col_width=4.0)
display(df)
display(eval_df)
self.__vertical_spacing(7)
cluster_profiles_df = cluster_profiles_df.append(
self.__decode_df(df, le_map))
rows_count += cluster_dataframe.shape[0]
return rows_count, cluster_profiles_df
| [
"EricCacciavillani@github.com"
] | EricCacciavillani@github.com |
90510f719a773aefb667ba3c6bca845013844106 | 7fd1295bed35f75bc42bebb4d31b8ec711fca5ba | /zakupne_rada/apps/gallery/urls.py | 7382e78062b62aae99c34214c0cb7513e3f283c6 | [] | no_license | maximlukasevich/zk_rada | 19ac0cd7c4e7339b98ebbb2977cb91ff75b76e95 | 542417ce2d6a0eb56833b0dddd57946434e5d0bc | refs/heads/main | 2023-03-07T15:05:30.092804 | 2021-02-23T10:26:31 | 2021-02-23T10:26:31 | 341,514,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.urls import path, include
from . import views
app_name = 'gallery'
urlpatterns = [
path('', views.index, name = 'index'),
] | [
"maxluks@MacBook-Pro-Maksim.local"
] | maxluks@MacBook-Pro-Maksim.local |
fdb60420ee1dc2a5af17f0de6520153d4d5e6f91 | 7fd96af77a2ae30b963df29236d7d386296dac50 | /rational_base.py | c255a399c19e2545ea8a918290c475cd79027cfb | [] | no_license | davidfoerster/codegolf | a37003462d11b92bb730a6f5a78723ea9d4077de | 85ed29d84ce1cfb3c73cf1e926fa0df2d7c273a9 | refs/heads/master | 2020-04-07T21:13:34.816000 | 2018-11-22T15:28:05 | 2018-11-22T15:28:05 | 158,718,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,091 | py | #!/usr/bin/python3 -O
"""
Represent numbers using rational bases
Source: https://codegolf.stackexchange.com/q/157224
"""
import string
from numbers import Rational
from collections.abc import Sequence, ByteString
class strtuple(tuple):
def __str__(self):
return ''.join(self)
digits = {
'alphanum-' + k: strtuple(
string.digits + getattr(string, 'ascii_{:s}case'.format(k)))
for k in ('upper', 'lower')
}
import fractions
class Fraction(fractions.Fraction):
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], cls):
return args[0]
return super().__new__(cls, *args)
def explode(self, n):
return n // self.numerator * self.denominator
def frange(fstep, start, end=0, return_default=False):
while return_default or start != end:
yield start
start = fstep(start)
return_default = False
def _verify_base(base, digits=None):
base = abs(base)
if base <= 1:
raise ValueError('Base must be greater than 1')
if digits is not None and base.numerator > len(digits):
raise ValueError(
'Base {0} requires at least {0.numerator:d} digits, not {1!r} ({2:d})'
.format(base, digits, len(digits)))
def _get_digit_type(digits):
for i in range(2):
if isinstance(digits, str):
return str
if isinstance(digits, ByteString):
return bytes
if not isinstance(digits, Sequence):
break
digits = digits[0]
raise TypeError
def int2str(n, base=Fraction(10), sign_prefixes=strtuple(('-', '')),
digits=digits['alphanum-upper']
):
if base is None:
if digits is not None:
base = len(digits)
else:
raise ValueError("'base' and 'digits' are both None")
elif not isinstance(base, Rational):
raise TypeError(
"'base' must be a Rational type, not {0.__module__}.{0.__qualname__}"
.format(type(base)))
_verify_base(base, digits)
n = int(n)
base = Fraction(base)
s = list(map(base.numerator.__rmod__, frange(base.explode, n, 0, True)))
s.reverse()
if digits is None:
return s
return (sign_prefixes[n >= 0] +
_get_digit_type(digits)().join(map(digits.__getitem__, map(abs, s))))
def _parse_sign_prefixes(s):
if not s or len(s) > 2:
raise ValueError('String is empty or longer than two')
return s if len(s) == 2 else (s, '')
def _make_argparser():
import argparse, inspect
pp = inspect.signature(int2str).parameters
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
**dict(zip(('description', 'epilog'),
map(str.strip, __doc__.rsplit('\n\n', 1)))))
p.add_argument('numbers', metavar='N[:Z]', nargs='+',
help='Numbers and (optional) bases')
p.add_argument('-b', '--base', metavar='Z',
default=pp['base'].default,
help='The default target base')
p.add_argument('-s', '--sign-prefixes', metavar='CHARS',
type=_parse_sign_prefixes, default=pp['sign_prefixes'].default,
help='Characters used as sign prefixes; the first is for negative values, '
'the second is for positive values and defaults to the empty string if '
'ommitted.')
d = p.add_mutually_exclusive_group()
d.add_argument('-d', '--digits', metavar='CHARS',
default=pp['digits'].default,
help='Characters used to represent the digits')
d.add_argument('--digits-preset', metavar='PRESET',
choices=digits.keys(), help='Use a named digit preset.')
return p
def _parse_args(args=None):
args = _make_argparser().parse_args(args)
if args.digits_preset:
args.digits = digits[args.digits_preset]
elif not args.digits:
if args.base:
args.digits = None
else:
raise ValueError('At least one of digits and base must be non-empty')
if args.base:
args.base = Fraction(args.base)
_verify_base(args.base, args.digits)
else:
args.base = len(args.digits)
return args
def main(args=None):
args = _parse_args(args)
for n in args.numbers:
n, sep, base = n.partition(':')
n = int(n)
base = Fraction(base) if sep else args.base
s = int2str(n, base, args.sign_prefixes, args.digits)
if not isinstance(s, str):
s = '{' + ', '.join(map(str, s)) + '}'
print('{0:d} = {2:s} (base {1!s})'.format(n, base, s))
if __name__ == '__main__':
main()
| [
"david.foerster@informatik.hu-berlin.de"
] | david.foerster@informatik.hu-berlin.de |
e3e11adc4b08ae52dd308e6dff8192c25b704387 | 87088ae6b84b419025ed0ed86127b184ba9af408 | /beholder/server_side/beholder_plugin.py | 7ab9936c84a1ea290d00a804540db08c1a500b43 | [] | no_license | tony32769/beholder | 1946332a9cceba40dbca5b56b4e1231fcee67233 | ceefdba88595e3f91756d1db76c9eed25e6ca577 | refs/heads/master | 2021-01-19T16:33:30.380469 | 2017-08-21T20:54:14 | 2017-08-21T20:54:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import time
from google.protobuf import message
import numpy as np
from PIL import Image
import tensorflow as tf
from werkzeug import wrappers
from tensorboard.backend import http_util
from tensorboard.backend.event_processing import plugin_asset_util as pau
from tensorboard.plugins import base_plugin
from beholder.shared_config import PLUGIN_NAME, SECTION_HEIGHT, IMAGE_WIDTH
from beholder.shared_config import SECTION_INFO_FILENAME, CONFIG_FILENAME,\
TAG_NAME, SUMMARY_FILENAME, DEFAULT_CONFIG
from beholder.file_system_tools import read_tensor_summary, read_pickle,\
write_pickle, get_image_relative_to_script
class BeholderPlugin(base_plugin.TBPlugin):
plugin_name = PLUGIN_NAME
def __init__(self, context):
self._MULTIPLEXER = context.multiplexer
self.PLUGIN_LOGDIR = pau.PluginDirectory(context.logdir, PLUGIN_NAME)
self.FPS = 10
self.most_recent_frame = get_image_relative_to_script('no-data.png')
self.most_recent_info = [{
'name': 'Waiting for data...',
}]
if not tf.gfile.Exists(self.PLUGIN_LOGDIR):
tf.gfile.MakeDirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG, '{}/{}'.format(self.PLUGIN_LOGDIR,
CONFIG_FILENAME))
def get_plugin_apps(self):
return {
'/change-config': self._serve_change_config,
'/beholder-frame': self._serve_beholder_frame,
'/section-info': self._serve_section_info,
'/ping': self._serve_ping,
'/tags': self._serve_tags
}
def is_active(self):
return True
def _fetch_current_frame(self):
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
try:
frame = read_tensor_summary(path).astype(np.uint8)
self.most_recent_frame = frame
return frame
except (message.DecodeError, IOError):
return self.most_recent_frame
@wrappers.Request.application
def _serve_tags(self, request):
if self.is_active:
runs_and_tags = {
'plugins/{}'.format(PLUGIN_NAME): {'tensors': [TAG_NAME]}
}
else:
runs_and_tags = {}
return http_util.Respond(request,
runs_and_tags,
'application/json')
@wrappers.Request.application
def _serve_change_config(self, request):
config = {}
for key, value in request.form.items():
try:
config[key] = int(value)
except ValueError:
if value == 'false':
config[key] = False
elif value == 'true':
config[key] = True
else:
config[key] = value
self.FPS = config['FPS']
write_pickle(config, '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME))
return http_util.Respond(request, {'config': config}, 'application/json')
@wrappers.Request.application
def _serve_section_info(self, request):
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME)
info = read_pickle(path, default=self.most_recent_info)
self.most_recent_info = info
return http_util.Respond(request, info, 'application/json')
def _frame_generator(self):
while True:
last_duration = 0
if self.FPS == 0:
continue
else:
time.sleep(max(0, 1/(self.FPS) - last_duration))
start_time = time.time()
array = self._fetch_current_frame()
if len(array.shape) == 2:
image = Image.fromarray(array, mode='L') # L: 8-bit grayscale
if len(array.shape) == 3:
image = Image.fromarray(array)
bytes_buffer = io.BytesIO()
image.save(bytes_buffer, 'PNG')
image_bytes = bytes_buffer.getvalue()
frame_text = b'--frame\r\n'
content_type = b'Content-Type: image/png\r\n\r\n'
response_content = frame_text + content_type + image_bytes + b'\r\n\r\n'
last_duration = time.time() - start_time
yield response_content
@wrappers.Request.application
def _serve_beholder_frame(self, request): # pylint: disable=unused-argument
# Thanks to Miguel Grinberg for this technique:
# https://blog.miguelgrinberg.com/post/video-streaming-with-flask
mimetype = 'multipart/x-mixed-replace; boundary=frame'
return wrappers.Response(response=self._frame_generator(),
status=200,
mimetype=mimetype)
@wrappers.Request.application
def _serve_ping(self, request): # pylint: disable=unused-argument
return http_util.Respond(request, {'status': 'alive'}, 'application/json')
| [
"chris.anderson@byu.net"
] | chris.anderson@byu.net |
49962c1a43967920625f790089e2a1a1ad17b725 | 007a42268e501af19a5dfa974f223f00e3cf87ba | /WuEnDa/week4/week4.py | 1337999b36d50280fd3afdf006841fef4f3c20e9 | [] | no_license | YangYaoCD/MyPython | 7bbbaa0e7fe71659304c5a34ebbd9e3c30326ea9 | f5de1bccccd317054d46f397db5f9ba31bf94db5 | refs/heads/master | 2020-08-26T13:08:50.861099 | 2020-07-31T13:03:01 | 2020-07-31T13:03:01 | 217,020,349 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,382 | py | import numpy as np
import h5py
import matplotlib.pyplot as plt
from WuEnDa.week4.testCases_v2 import *
from WuEnDa.week4.dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
np.random.seed(1)
#初始化参数parameters(W,b)(W1,W2,b1,b2)
def initialize_parameters_deep(layer_dims):
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert (parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert (parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
#正向线性得到Z,cache(A,W,b)
def linear_forward(A, W, b):
Z = np.dot(W, A) + b
assert (Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
#正向线性激活(一个神经元整体),传入A,W,b和对应的激活函数,输出A,cache(linear_cache(A_pre,W,b),activation_cache(Z))
def linear_activation_forward(A_prev, W, b, activation):
if activation == "sigmoid":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
#返回AL(=y^),caches(cache(linear_cache(A_pre,W,b),activation_cache(Z)),......)
def L_model_forward(X, parameters):
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
for l in range(1, L):#从1到L-1层用relu函数激活
A_prev = A
#linear_activation_forward函数返回A,cache(linear_cache(A_pre,W,b),activation_cache(Z))
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], "relu")
caches.append(cache)
#第L层用sigmoid激活函数
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid")
caches.append(cache)
assert (AL.shape == (1, X.shape[1]))
return AL, caches
#计算返回损失函数J
def compute_cost(AL, Y):
m = Y.shape[1]
SUM=np.multiply(Y, np.log(AL))
cost = -1 / m * np.sum(np.multiply(Y, np.log(AL)) + np.multiply((1 - Y), np.log(1 - AL)),axis=1)
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert (cost.shape == ())
return cost
'''
dW[l]=(1/m)*dZ[l]*A[l-1].T;
db[l]=(1/m)*np.sum(dZ[l],axis=1,keepdims=True);
dA[L-1]=W[l].T*dZ[l];
'''
def linear_backward(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW =np.dot(dZ, A_prev.T) / m
db = np.sum(dZ,axis=1, keepdims=True)/m
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
#反向递推单元,返回dA_prev, dW, db
'''
dZ[l]=dA[l]*g'[l](Z[l])
'''
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache #linear_cache(A_pre,W,b),activation_cache(Z)
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
print("dZ:"+str(dZ))
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
#反向递推模型
def L_model_backward(AL, Y, caches):
grads = {}
#caches(cache(linear_cache(A_pre,W,b),activation_cache(Z)),......)
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape)
'''!dA[l]=-np.divide(Y,A)+np.divide((1-Y),(1-A))'''
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL,current_cache , "sigmoid")
for l in reversed(range(L - 1)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA"+str(l+2)],current_cache , "relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
#更新参数W,b
def update_parameters(parameters, grads, learning_rate):
L = len(parameters) // 2 # number of layers in the neural network
for l in range(L):
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)]-learning_rate*grads["dW"+str(l+1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)]-learning_rate*grads["db"+str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"])) | [
"1344262070@qq.com"
] | 1344262070@qq.com |
a450f9929c1b5e931a70d4b23b7cb47fcffc06b3 | d12ee64534b57b5debd16dac2cdcba15534fc7f2 | /oldBot/Wasatch_Conversions.py | 5a312313dd96d43e1305fc6779d6adef53e4f030 | [] | no_license | Eblanken/DeLaZerda_Wasatch | e83782ff51925a8e2470e86b3afa66e68e5693fc | 3c296a7fbc50db7c62eb78bd4ac8fce488f03f55 | refs/heads/master | 2020-03-15T18:35:17.469049 | 2018-07-10T21:26:36 | 2018-07-10T21:26:36 | 132,287,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,063 | py | # TODO: Conversions: Bleachpercentage overhaul, use default values for conversions
# COMBAK Conversions: Standardize documentation. Add more conversions.
#
# File: WasatchInterface_MicroscopeSettings
# ------------------------------
# Author: Erick Blankenberg, based off of work from Edwin
# Date: 5/12/2018
#
# Description:
#
# These methods convert user inputs in conventional units
# to units that the microscope uses for serial interpretation.
#
#----------------------- Imported Libraries -----------------------------------
import math
#---------------------------- Constants ---------------------------------------
# Microseconds of dwelling time to fully bleach one mm long section w/ standard profile
USFORMM = 3000
# Borrowed from Edwin's code, Wasatch units seem to be roughly 2093 per mm
MIN_Y = 3492.0
MAX_Y = 24418.0
MIN_X = 5081.0
MAX_X = 26032.0
MAX_LENGTH = 9566.0
# Experimentaly the total reach of the beam is roughly 10mm in either direction
MM_Y = 10.0
MM_X = 10.0
# Note that actual total exposure times are determined from USFORMM, these
# are just preferences but should not effect the total amount of energy
# recieved by the sample.
PULSEPERIOD = 100 # Duration of a delay-pulse pair in microseconds
PULSESPERSWEEP = 100 # Number of pulses per sweep of the scanner
DUTY_CYCLE = 0.75 # Percentage of on time for pulses, this is the assumed duty cycle in USFORMM
# ---------------------- Function Definitions ---------------------------------
#
# Converts desired point in mm to wasatch units. Good for about 0.478 microns
# but there seems to be issues where the laser goes over etc. and is pretty
# wide?
#
# InputPoints is assumed to be a tuple of size 2 whose
# values are (x, y) mm.
#
# The function returns a tuple with the arguments converted
# to mm.
#
def WConvert_FromMM(inputPoint):
val = (float((inputPoint[0] * ((MAX_X - MIN_X) / MM_X)) + MIN_X), float((inputPoint[1] * ((MAX_Y - MIN_Y) / MM_Y)) + MIN_Y))
return val
#
# Description:
# Returns the number of scans from the required duration
#
# Parameters:
# 'numSeconds' (float) How long the scan should last in seconds.
# 'pulsePeriod' (int) Period of a single pulse in microseconds.
# 'pulsesPerSweep' (int) Number of pulses in a primary scan.
#
# Returns:
# Integer number of scans required.
#
def WConvert_NumScansFromSecs(numSeconds, pulsePeriod = PULSEPERIOD, pulseCount = PULSESPERSWEEP):
return int(math.ceil((float(numSeconds)*(10**6)) / (PULSEPERIOD * PULSESPERSWEEP)))
#
# Determines the number of complete scans required to achieve
# the desired exposure percentage with the given duty cycle,
# and period of the pulse.
#
# TODO Conversions: Exposure overhaul (may be bugged)
#
def WConvert_NumScans(distance, exposurePercentage, dutyCycle = DUTY_CYCLE, pulsePeriod = PULSEPERIOD, pulsesPerSweep = PULSESPERSWEEP):
# Calculates scans for full exposure
normalizedDutyCycle = (dutyCycle / DUTY_CYCLE)
print(("%d") % normalizedDutyCycle)
normalRequiredTime = (USFORMM * distance) / normalizedDutyCycle
normalRequiredPasses = normalRequiredTime / (2 * pulsesPerSweep * pulsePeriod)
# Applies exposure percentage
nTimes = int(round((exposurePercentage * normalRequiredPasses)))
return nTimes
#
# Description:
# Calculates duration of pulse for the given duty cycle.
#
# Returns:
# Returns the pulse length in microseconds.
#
def WConvert_PulseDuration(dutyCycle = DUTY_CYCLE):
return int(round(dutyCycle * PULSEPERIOD))
#
# Description:
# Calculates delay between pulses for the given duty cycle.
#
# Returns:
# The delay between pulses in microseconds.
#
def WConvert_PulseDelay(dutyCycle = DUTY_CYCLE):
return int(round((1 - DUTY_CYCLE) * PULSEPERIOD))
#
# Returns the number of triggers per sweep
#
def WConvert_PulsesPerSweep():
return PULSESPERSWEEP
#
# Returns the number of seconds required to bleach a line of the given
# length.
#
# TODO: Conversions: Exposure overhaul
#
def WConvert_BleachExposureTimeSecs(distance):
return USFORMM * distance * 10**-6
| [
"edy.yuan@gmail.com"
] | edy.yuan@gmail.com |
bdf73d0d32d84b83c6318d15fd177e70db625d5a | 79366da50fc4c6518485362ac839ef29e5d469e8 | /music.py | fc7fe4aaf42156617bc0db91072e66e2e25a4b6b | [] | no_license | SuperNate/RadioPi | a0baa1ddd77a1542cca43661135002f213f06def | 86a34fcf13cab4cc50a56af5985e3705c0e8f45a | refs/heads/master | 2020-12-25T21:34:51.382957 | 2013-07-21T19:49:55 | 2013-07-21T19:49:55 | 18,526,579 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,624 | py | #!/usr/bin/python3
import RPi.GPIO as io
import string, cgi, time, os
import mpd
import json
io.setmode(io.BCM)
io.setwarnings(False)
# Define GPIO to LCD mapping
LCD_RS = 7
LCD_E = 8
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 18
switch1 = 17
switch2 = 22
# ok, lets write the pid of this script
pidfile = open("/tmp/music.pid", "w")
pidfile.write("%s" % os.getpid())
pidfile.close()
io.setup(switch1, io.IN)
io.setup(switch2, io.IN)
io.setup(LCD_E, io.OUT) # E
io.setup(LCD_RS, io.OUT) # RS
io.setup(LCD_D4, io.OUT) # DB4
io.setup(LCD_D5, io.OUT) # DB5
io.setup(LCD_D6, io.OUT) # DB6
io.setup(LCD_D7, io.OUT) # DB7
# Define some device constants
LCD_WIDTH = 16 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
# Timing constants
E_PULSE = 0.00005
E_DELAY = 0.00005
client = mpd.MPDClient()
client.connect("localhost", 6600)
client.repeat
currentstatus = 'stop'
previousstatus = 'stop'
def connectMPD():
print('Connecting to MPD')
client.connect("localhost", 6600)
def main():
global previousstatus
# Initialise display
lcd_init()
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string("")
lcd_byte(LCD_LINE_2, LCD_CMD)
lcd_string("")
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string("Hi Alan....")
lcd_byte(LCD_LINE_2, LCD_CMD)
print("Starting radio...")
lcd_string("Starting radio.")
time.sleep(2)
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string("Radio ready.")
lcd_byte(LCD_LINE_2, LCD_CMD)
lcd_string("Volume 50%")
client.setvol(50)
while True:
powerswitch = io.input(switch1)
nextswitch = io.input(switch2)
if (powerswitch == 0):
print (previousstatus)
if previousstatus == 'stop':
try:
client.play()
except:
connectMPD()
client.play()
Current = client.currentsong()
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string(Current['name'])
lcd_byte(LCD_LINE_2, LCD_CMD)
lcd_string("Playing...")
previousstatus = 'play'
elif previousstatus == 'play':
try:
client.stop()
except:
connectMPD()
client.stop()
lcd_byte(LCD_LINE_2, LCD_CMD)
lcd_string("Stopped...")
previousstatus = 'stop'
time.sleep(1)
if(nextswitch == 0):
if previousstatus == 'stop':
jdata = client.status()
curvol = int(jdata['volume'])
if curvol > 90:
newvol = 10
else:
newvol = curvol+10
print("Setting new volume %s" % newvol)
client.setvol(newvol)
lcd_byte(LCD_LINE_2, LCD_CMD)
lcd_string("Volume %s%%" % newvol)
else:
try:
client.next()
except:
connectMPD()
client.next()
Current = client.currentsong()
print(Current['name'])
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string(Current['name'])
time.sleep(1)
io.cleanup()
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD)
lcd_byte(0x32,LCD_CMD)
lcd_byte(0x28,LCD_CMD)
lcd_byte(0x0C,LCD_CMD)
lcd_byte(0x06,LCD_CMD)
lcd_byte(0x01,LCD_CMD)
def lcd_string(message):
# Send string to display
message = message.ljust(LCD_WIDTH," ")
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
io.output(LCD_RS, mode) # RS
# High bits
io.output(LCD_D4, False)
io.output(LCD_D5, False)
io.output(LCD_D6, False)
io.output(LCD_D7, False)
if bits&0x10==0x10:
io.output(LCD_D4, True)
if bits&0x20==0x20:
io.output(LCD_D5, True)
if bits&0x40==0x40:
io.output(LCD_D6, True)
if bits&0x80==0x80:
io.output(LCD_D7, True)
# Toggle 'Enable' pin
time.sleep(E_DELAY)
io.output(LCD_E, True)
time.sleep(E_PULSE)
io.output(LCD_E, False)
time.sleep(E_DELAY)
# Low bits
io.output(LCD_D4, False)
io.output(LCD_D5, False)
io.output(LCD_D6, False)
io.output(LCD_D7, False)
if bits&0x01==0x01:
io.output(LCD_D4, True)
if bits&0x02==0x02:
io.output(LCD_D5, True)
if bits&0x04==0x04:
io.output(LCD_D6, True)
if bits&0x08==0x08:
io.output(LCD_D7, True)
# Toggle 'Enable' pin
time.sleep(E_DELAY)
io.output(LCD_E, True)
time.sleep(E_PULSE)
io.output(LCD_E, False)
time.sleep(E_DELAY)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string("Shutting down")
lcd_byte(LCD_LINE_2, LCD_CMD)
lcd_string("radio.")
client.stop()
time.sleep(2)
lcd_byte(LCD_LINE_1, LCD_CMD)
lcd_string("Radio shut down.")
lcd_byte(LCD_LINE_2, LCD_CMD)
lcd_string("")
io.cleanup
os.remove('/tmp/music.pid')
| [
"neil@lathwood.co.uk"
] | neil@lathwood.co.uk |
48a5627ddd349fde1a4bd854221b3b7f9dc5a5a6 | cbe2033173534fb8759d3ebcd6ef2d6ec5d64a16 | /CurveTools/curve_tools_properties.py | 326404bfb34afcb104d995c882113ba84d38d27d | [] | no_license | Hichigo/NexusTools | 3008621badfedd77498dbecb6cbb5734d2079146 | 7573dd915299944137bc42c6d3e25599514f36e8 | refs/heads/master | 2020-06-13T03:18:07.508108 | 2019-07-01T18:40:00 | 2019-07-01T18:40:00 | 194,515,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | import bpy
from bpy.types import PropertyGroup
from bpy.props import BoolProperty, IntProperty, EnumProperty, PointerProperty
from .curve_tools_utils import filter_on_curve_type
class CurveTools_SCENE_Properties(PropertyGroup):
num_copy: IntProperty(
name="Copy number",
description="Number copy active object",
min=2,
default=5
)
target_curve: PointerProperty(
name="Target curve",
description="Chose curve object",
type=bpy.types.Object,
poll=filter_on_curve_type
)
follow_curve: BoolProperty(
name="Follow curve",
description="Object follow by curve",
default=False
)
curve_radius: BoolProperty(
name="Curve radius",
description="Apply radius curve to object",
default=False
)
forward_axis: EnumProperty(
name="Forward axis",
items=[
("FORWARD_X", "X", "", 0),
("FORWARD_Y", "Y", "", 1),
("FORWARD_Z", "Z", "", 2),
("TRACK_NEGATIVE_X", "-X", "", 3),
("TRACK_NEGATIVE_Y", "-Y", "", 4),
("TRACK_NEGATIVE_Z", "-Z", "", 5)
],
default = "FORWARD_X"
)
up_axis: EnumProperty(
name="Up axis",
items=[
("UP_X", "X", "", 0),
("UP_Y", "Y", "", 1),
("UP_Z", "Z", "", 2)
],
default = "UP_Z"
) | [
"hichigo91@gmail.com"
] | hichigo91@gmail.com |
dc024d3deede1a1a78d947c9912e6b8916ec0a92 | 1edaf48fb81223513b386e81927d51c70f09b8f9 | /problems_00s/problem_3.py | e58a788eb278e4bdefd4756567485d7c4211d5ae | [] | no_license | famalhaut/ProjectEuler | 4392cc50ac88a168e9a2d7889e4a8da201afaf72 | 9c6be56f0fed472472d08bd35f488d8b94f684ff | refs/heads/master | 2020-05-26T11:50:52.711715 | 2017-04-21T16:01:58 | 2017-04-21T16:01:58 | 84,996,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | """
Largest prime factor
Problem 3
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?
"""
def check(x, list_primes):
for prime in list_primes:
if x % prime == 0:
return False
else:
if prime * prime > x:
return True
def primes_gen():
primes = [2, 3]
yield 2
yield 3
multiple_6 = 6
while True:
for possible in [multiple_6 - 1, multiple_6 + 1]:
if check(possible, primes):
primes.append(possible)
yield possible
multiple_6 += 6
def problem(x):
result = x
for p in primes_gen():
# print('новое p', x, p)
if p * p > x:
if x == 1:
return result
else:
return x
while x % p == 0:
# print('круг', x, p)
x //= p
result = p
if __name__ == '__main__':
print('Test:', problem(13195))
print('Answer:', problem(600851475143))
| [
"famalhaut.ru@gmail.com"
] | famalhaut.ru@gmail.com |
32f4c462ec8097a34c1519e066a80a65f1a14c8f | 4f3a4c194451eae32f1ff7cf3b0db947e3892365 | /contest24/matrix.py | 6a654f89bbe393517b379bdacf7311c9a7f2387e | [] | no_license | szhongren/leetcode | 84dd848edbfd728b344927f4f3c376b89b6a81f4 | 8cda0518440488992d7e2c70cb8555ec7b34083f | refs/heads/master | 2021-12-01T01:34:54.639508 | 2021-11-30T05:54:45 | 2021-11-30T05:54:45 | 83,624,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | """
Given a matrix consists of 0 and 1, find the distance of the nearest 0 for each cell.
The distance between two adjacent cells is 1.
Example 1:
Input:
0 0 0
0 1 0
0 0 0
Output:
0 0 0
0 1 0
0 0 0
Example 2:
Input:
0 0 0
0 1 0
1 1 1
Output:
0 0 0
0 1 0
1 2 1
Note:
The number of elements of the given matrix will not exceed 10,000.
There are at least one 0 in the given matrix.
The cells are adjacent in only four directions: up, down, left and right.
"""
class Solution(object):
def updateMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
indexes = [(i, j) for i in range(len(matrix)) for j in range(len(matrix[0])) if matrix[i][j] == 1]
matrix = [[0 if val == 0 else -1 for val in row]for row in matrix]
curr_level = 0
while len(indexes) > 0:
new_indexes = []
for index in indexes:
done = False
x = index[0]
y = index[1]
if x > 0:
if matrix[x - 1][y] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if y > 0:
if matrix[x][y - 1] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if x < len(matrix) - 1:
if matrix[x + 1][y] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if y < len(matrix[0]) - 1:
if matrix[x][y + 1] == curr_level:
done = True
matrix[x][y] = curr_level + 1
if not done:
new_indexes.append(index)
curr_level += 1
indexes = new_indexes
return matrix
ans = Solution()
print(ans.updateMatrix([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]))
print(ans.updateMatrix([
[1, 1, 1],
[0, 1, 0],
[0, 0, 0]
]))
| [
"shao.zhongren@gmail.com"
] | shao.zhongren@gmail.com |
682c034c5183adb2c1cb84911ac68fc063307eb6 | ffbee50402fce2c88740413e8d5633e7104107d3 | /timertest.py | 1ac5e1f3cbce2c239c64f576da25da63a8002d30 | [] | no_license | balassaloci/Robotics-lab | 22773182791a2d48d51f1496422138c3d13776cd | d281970799f70b0b3a20c7c8deafda7b8a472c91 | refs/heads/master | 2021-03-16T10:50:28.638233 | 2017-02-15T11:24:53 | 2017-02-15T11:24:53 | 70,690,423 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import time
def timer():
now = time.localtime(time.time())
return now[5]
run = raw_input("Start? > ")
while run == "start":
| [
"bc1714@point05.doc.ic.ac.uk"
] | bc1714@point05.doc.ic.ac.uk |
86c74660e97fcfad69873b6025a9430d87b3496f | 492d3e666b87eff971628a74fe13facde01e2949 | /htmlcov/_python_Django_My Projects_student-portal_Lib_site-packages_django_db_migrations_graph_py.html.py | 05c7ac90efdcb13c551ee039f2457e7c75ee6996 | [] | no_license | OmarFateh/Student-Portal | 42050da15327aa01944dc79b5e00ca34deb51531 | 167ffd3a4183529c0cbc5db4ab232026711ea915 | refs/heads/master | 2023-06-13T01:03:16.475588 | 2021-07-08T11:09:09 | 2021-07-08T11:09:09 | 382,895,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96,031 | py | XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
XXX XXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
XX XXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXXXXX
XXXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXXX XXXXXXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXX XX XXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXX XXXXX XXXXXXXXXXX XXXXX
XXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX X XXXXXX XXXX XX XXX XXXXXXXXX XXXXXX XXXXXXXX XXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX XX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX X XXXX XXXX XXXXXXX XXXXXXXXXX XX X XXXXXXXXX XXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXXXXXXX XXXXXXXXX XXXX XXX XXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX XXX XXXXXXXXX XXXXX XX XXXXXXXXXX XXX XXXXX XXXXX XXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXXXX XXX XXX XXXXX X XXXXXXXXXXX XXXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXX XXXXXXXXX XX X XXXXX XXX XXXX XXXXXXXXXX XX XX XXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXXXXXXX XXXXXXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXXX X XXX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX X XXXXXXXXXX XX XXX XXXX XXXXXXXX XXXXX XXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXX X XXXXXXXX XXXXXXXXXX XX XXX XXXXXXXX XXXXXXX XXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XXXXXX XXX XX XXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXX XXXXX XXX XX XXXXXX XX XXXXXXXXX XXXXXXX XXX XX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXX XX XX XXXXXXX XXX XXXXXXXX XXXXXXXX XXX XXXXX XXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXX XXXXXX XXXXXXXX XXX XXXX XX XXXX XXXX XX XXXX XXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XXXXX XXX XX XXX XXXXXXXX XXXXXXXXXX XXX XXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXX XXXXXXXX XX XXXXXX XXXXXX XXX XXXXXXXX XXXXX XXXX XX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XXXXXXXXXX XXX XXXXXXX XXX XXXXXXXXXXXX XXXX XXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXX XXXXXXXXXX XX XXXXX XX XXX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX X XXXX XXXXXX XX X XXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XXXXXX XX XXX X XXXXXXX XXXX XXXXX XXX XXXX XXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXX XXX XXXXXX XXXXX XXXXX XX XXXX XXXXX XXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXX XXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXXX XXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XXXX XX XXX XXXXXXXXXX XXXXX XXXXX XXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXX XXXX XXXX XXXXXXXXXXX XXXX XXX XXXXXXX XX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXX XXXX XX XXXXXXXX XXXX XX XXX XX XXXXX XX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XX XXXX XXXXXXXXXXX XXXX XXX XX XXX XXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXX XXXXXX XX XXX XXXX XXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XX XXXXX XXXX XX XXXXXX XXXXXXXXXXXX XXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXX XXX XXX XXXXXXXXXXX XXXX XX XXXX XXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXX XX XXX XXXXXXXXXXX XXXX XX X XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXXX XX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXX XXXXXXX XXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXX XXXX XXXXXXXXXXXXX XXX XXXXX XXX XXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX X XXX XXXX XX XXXXX XX XXXXX XXXX XXXXXXXXX XXXXX XXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX XX XXXX XXX XXXXXXXX XX XX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XX XXXXXX XXXXXXXXXXX XXXX XXX XX XXX XXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXX XXXXXX XX XXX XXXX XXXXXXX XXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXX XXXX XXXXXXXXXX XX XXX XXXXXX XXXXXXXX XXXXX XX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXXXXX XXXXX XXXX XXX XXXXXXX XX XXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXX XXXXX XX XX XXXX XX XXXXX XXXXXX XXXXXXXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXXX XXX XXXXXXXX XXXXX XXXXXXX XXXX XXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXX XX XXXXX XXXXX XXXXXXXXX XX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX X XXXXX XXXXXX X XXXX XX XXXXX XXXXXXXX XXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXXXXXXX XXXXXX XXXX XXX XXXX XXXXXXX XXXX XX XXX XXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XX XXXXXXXX XXX XXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XX XXX X XXXXX XXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX X XXXXX XXXXXX X XXXX XX XXXXX XXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXX XX XXXXXXXXXX XXXXXX XXXX XXX XXXX XXXXXXX XXXX XX XXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX XXXXXX XX XXXXXXXX XXX XXXXXXXXXX XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XX XXX X XXXXX XXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXX XXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XXX XXXX XXXXX X XXXX XXX XXXXX XXXX XX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX XXXX XXXXX XXX XXX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XXX XXXX XXXXX X XXXX XXX XXXXX XXXX XX XXXXXXXXXX XX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX XXX XXX XXXXX XXXXXXXX XXXXXXX XX XX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XXXX XXXX XXX XXX XXX XX XXXXXXXXXXX XX XXXXXX XXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXX XXXXXXX XXXXXXX XXX XX XXX XXXXXXXXXXX XXXXXXX X XXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXX XX X XXX XXXXX XXX XXXXX XXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXX XXXXXXXXX XXXXXXX XX XXXXX XX XXXXX XX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XX XXXXXX XX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXX X XXXXXXXXX XXXX XX XXXXXX XXXXXX X XXXXXXXX XXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXXXXX XX XXXXXX XXXXXX XXX XXXXX XXXXXX XXX XXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XX XXXXX XX XXX XXXXXXXXX XXXXXX XXX XXXXXXX XXXX XXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXX
XXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXX XXXXXX XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XX XXXXXXXXXX XXXXX XXXXX
XXXX
XXXXXX
XXXXXX
XXXXXXX
XXXXXXX
| [
"66747309+OmarFateh@users.noreply.github.com"
] | 66747309+OmarFateh@users.noreply.github.com |
dbee469da3d768ac8bd9b40a106f32df70d98ae3 | 069dafce9f495f09bf8c2f76dbf5c045b7551721 | /run_size_V1_inhibition_overlapping.py | 2445079f234bb3bce526b0f73ebe9143a77a5600 | [] | no_license | dguarino/T2 | 26b1bc640812aa5438b09f9fab2bc73096cd7eef | 66b786928508089492f5f696c7c1576e098c6615 | refs/heads/master | 2020-04-03T22:39:06.059845 | 2020-03-13T15:43:02 | 2020-03-13T15:43:02 | 41,812,819 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | # -*- coding: utf-8 -*-
"""
This is
"""
from pyNN import nest
import sys
import mozaik
import mozaik.controller
from mozaik.controller import run_workflow, setup_logging
from mozaik.storage.datastore import Hdf5DataStore, PickledDataStore
from parameters import ParameterSet
from model_V1_full import ThalamoCorticalModel
from experiments import create_experiments_size_V1_inactivated_overlapping
from analysis_and_visualization import perform_analysis_test
from analysis_and_visualization import perform_analysis_and_visualization
from analysis_and_visualization import perform_analysis_and_visualization_radius
try:
from mpi4py import MPI
except ImportError:
MPI = None
if MPI:
mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0
logger = mozaik.getMozaikLogger()
# Manage what is executed
# a set of variable here to manage the type of experiment and whether the pgn, cortex are there or not.
withPGN = True #
withV1 = True # open-loop
withFeedback_CxPGN = True # closed loop
withFeedback_CxLGN = True # closed loop
# Model execution
if True:
data_store,model = run_workflow('ThalamoCorticalModel', ThalamoCorticalModel, create_experiments_size_V1_inactivated_overlapping )
data_store.save()
# or only load pickled data
else:
setup_logging()
# data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'Deliverable/ThalamoCorticalModel_data_size_overlapping_____', 'store_stimuli' : False}),replace=True)
data_store = PickledDataStore(load=True,parameters=ParameterSet({'root_directory':'ThalamoCorticalModel_data_size_overlapping_____', 'store_stimuli' : False}),replace=True)
logger.info('Loaded data store')
# Analysis and Plotting
if mpi_comm.rank == MPI_ROOT:
# perform_analysis_test( data_store )
# perform_analysis_and_visualization( data_store, 'luminance', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'contrast', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'spatial_frequency', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'temporal_frequency', withPGN, withV1 )
perform_analysis_and_visualization( data_store, 'size', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'size_radius', withPGN, withV1 )
# perform_analysis_and_visualization( data_store, 'orientation', withPGN, withV1 )
# import numpy
# step = .2
# for i in numpy.arange(step, 2.+step, step):
# perform_analysis_and_visualization_radius( data_store, 'size_radius', [i-step,i], withPGN, withV1 )
data_store.save()
| [
"domenico.guarino@gmail.com"
] | domenico.guarino@gmail.com |
72624296abb72df145e8940e711e87d4f531eab1 | 09ae3f372d1000f118ad80874870ae420a4be66f | /scikit-learn-master/sklearn/datasets/tests/test_base.py | 08a6ba29413cf8bbe4e7c2cdcbf499993f650548 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | lqkweb/learnMLflow | 998f80c3828879b8d542125bc95c6345b8e9b29a | 13c5decaebba95b1b90f92021be35e343b4764af | refs/heads/master | 2022-10-18T06:17:23.584172 | 2019-01-18T09:51:38 | 2019-01-18T09:51:38 | 166,145,472 | 2 | 0 | Apache-2.0 | 2022-09-30T18:26:17 | 2019-01-17T02:22:29 | Python | UTF-8 | Python | false | false | 8,270 | py | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from functools import partial
import pytest
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.externals._pilutil import pillow_installed
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
@pytest.fixture(scope="module")
def data_home(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_data_home_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture(scope="module")
def load_files_root(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_load_files_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture
def test_category_dir_1(load_files_root):
test_category_dir1 = tempfile.mkdtemp(dir=load_files_root)
sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1,
delete=False)
sample_file.write(b"Hello World!\n")
sample_file.close()
yield str(test_category_dir1)
_remove_dir(test_category_dir1)
@pytest.fixture
def test_category_dir_2(load_files_root):
test_category_dir2 = tempfile.mkdtemp(dir=load_files_root)
yield str(test_category_dir2)
_remove_dir(test_category_dir2)
def test_data_home(data_home):
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=data_home)
assert_equal(data_home, data_home)
assert os.path.exists(data_home)
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert not os.path.exists(data_home)
# if the folder is missing it will be created again
data_home = get_data_home(data_home=data_home)
assert os.path.exists(data_home)
def test_default_empty_load_files(load_files_root):
res = load_files(load_files_root)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
def test_default_load_files(test_category_dir_1, test_category_dir_2,
load_files_root):
res = load_files(load_files_root)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b"Hello World!\n"])
def test_load_files_w_categories_desc_and_encoding(
test_category_dir_1, test_category_dir_2, load_files_root):
category = os.path.abspath(test_category_dir_1).split('/').pop()
res = load_files(load_files_root, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, ["Hello World!\n"])
def test_load_files_wo_load_content(
test_category_dir_1, test_category_dir_2, load_files_root):
res = load_files(load_files_root, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert res.DESCR
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
check_return_X_y(digits, partial(load_digits))
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
if pillow_installed:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert res.target.size, 442
assert_equal(len(res.feature_names), 10)
assert res.DESCR
# test return_X_y option
check_return_X_y(res, partial(load_diabetes))
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert res.DESCR
assert os.path.exists(res.data_filename)
assert os.path.exists(res.target_filename)
# test return_X_y option
check_return_X_y(res, partial(load_linnerud))
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert res.DESCR
assert os.path.exists(res.filename)
# test return_X_y option
check_return_X_y(res, partial(load_iris))
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert res.DESCR
# test return_X_y option
check_return_X_y(res, partial(load_wine))
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert res.DESCR
assert os.path.exists(res.filename)
# test return_X_y option
check_return_X_y(res, partial(load_breast_cancer))
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert res.DESCR
assert os.path.exists(res.filename)
# test return_X_y option
check_return_X_y(res, partial(load_boston))
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a surprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert "data" in dir(data)
| [
"leiqk@dxy.cn"
] | leiqk@dxy.cn |
6c9d699dfc2b5e2f9f0092c6aa6c4bc913bcc40f | 806e3fc4fe0ccb668ed27f952157e863261cf64a | /data_analysis/school_analysis.py | 5e7b4ac2ad2c1a16c99dff4992dd0c5baf29fbf8 | [] | no_license | projectRepository-zhangzheng/deep_learning | d39b0ecb4effdb8557cfcabc9092e9d63cd83c59 | 44216e76395481c4a6ecaa5b4690a29676fc7df8 | refs/heads/master | 2022-09-20T06:47:24.833403 | 2020-06-06T03:13:51 | 2020-06-06T03:13:51 | 268,528,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | import pandas as pd
io = './data.xls'
data = pd.read_excel(io) | [
"zhangzhengemail@sina.cn"
] | zhangzhengemail@sina.cn |
6811c8420bc66be2064304f956fff2ae1aa9dbe5 | 296cd2b0c81d8e8057615695d3479b9002260aa4 | /rl/hw3/seminar_alternative/qlearning.py | b01c6e4410dc6fa7e1ab45b1ef6a3e80eb0aa435 | [] | no_license | Ayagoz/dl-rl | 8e92cf9f21b939fcf04ef580288ab5b78ef38fef | 1c4bf214566e856ed1ab51820624702332df7f4c | refs/heads/master | 2021-09-03T10:07:28.439172 | 2018-01-08T08:41:57 | 2018-01-08T08:41:57 | 110,861,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,787 | py | # qlearningAgents.py
# ------------------
## based on http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import random,math
import numpy as np
from collections import defaultdict
class QLearningAgent():
"""
Q-Learning Agent
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate aka gamma)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
- self.getQValue(state,action)
which returns Q(state,action)
- self.setQValue(state,action,value)
which sets Q(state,action) := value
!!!Important!!!
NOTE: please avoid using self._qValues directly to make code cleaner
"""
def __init__(self,alpha,epsilon,discount,getLegalActions):
"We initialize agent and Q-values here."
self.getLegalActions= getLegalActions
self._qValues = defaultdict(lambda:defaultdict(lambda:0))
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def getQValue(self, state, action):
"""
Returns Q(state,action)
"""
return self._qValues[state][action]
def setQValue(self,state,action,value):
"""
Sets the Qvalue for [state,action] to the given value
"""
self._qValues[state][action] = value
#---------------------#start of your code#---------------------#
def getValue(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions.
"""
possibleActions = self.getLegalActions(state)
#If there are no legal actions, return 0.0
if len(possibleActions) == 0:
return 0.0
"*** YOUR CODE HERE ***"
v = max([self.getQValue(state,action) for action in possibleActions])
return v
def getPolicy(self, state):
"""
Compute the best action to take in a state.
"""
possibleActions = self.getLegalActions(state)
#If there are no legal actions, return None
if len(possibleActions) == 0:
return None
best_action = None
"*** YOUR CODE HERE ***"
best_action = max(possibleActions, key = lambda action: self.getQValue(state, action))
return best_action
def getAction(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we should take a random action.
otherwise - the best policy action (self.getPolicy).
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
possibleActions = self.getLegalActions(state)
action = None
#If there are no legal actions, return None
if len(possibleActions) == 0:
return None
#agent parameters:
epsilon = self.epsilon
"*** YOUR CODE HERE ***"
if np.random.random() < epsilon:
return np.random.choice(possibleActions)
else:
return self.getPolicy(state)
return action
def update(self, state, action, nextState, reward):
"""
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
#agent parameters
gamma = self.discount
learning_rate = self.alpha
"*** YOUR CODE HERE ***"
reference_qvalue = reward + self.getValue(nextState)
#<the "correct state value", uses reward and the value of next state>
updated_qvalue = (1-learning_rate) * self.getQValue(state,action) + learning_rate * reference_qvalue
self.setQValue(state,action,updated_qvalue)
#---------------------#end of your code#---------------------#
| [
"ayagoz.musabaeva@mail.ru"
] | ayagoz.musabaeva@mail.ru |
77974af81d3fd6e9036c3296442203bd58796d64 | 12774294b256c3be879a630a0eec6656abef8f6e | /fluent_pages/pagetypes/textfile/migrations/0001_initial.py | d4ca4d400fd02a51c21d9d65c75bb839beedce48 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | websiteburo/django-fluent-pages | 515739bf21dcfe19c265a70ac167d0339730d243 | c2db8ee28a6fd129906b4bea0df167f43c9b1bc4 | refs/heads/master | 2020-12-25T16:35:47.757240 | 2015-02-23T11:32:08 | 2015-02-23T11:32:08 | 31,910,008 | 0 | 0 | null | 2015-03-09T16:35:40 | 2015-03-09T16:35:40 | null | UTF-8 | Python | false | false | 975 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('fluent_pages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TextFile',
fields=[
('urlnode_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fluent_pages.UrlNode')),
('content', models.TextField(verbose_name='File contents')),
('content_type', models.CharField(default=b'text/plain', max_length=100, verbose_name='File type', choices=[(b'text/plain', 'Plain text'), (b'text/xml', 'XML'), (b'text/html', 'HTML')])),
],
options={
'verbose_name': 'Plain text file',
'verbose_name_plural': 'Plain text files',
},
bases=('fluent_pages.page',),
),
]
| [
"cdshryock@gannett.com"
] | cdshryock@gannett.com |
e077eac3f69602fea3cb6ae34ae33516556fbf7a | 9a53f836628b2c9e37e3c4011206698a3e0a06be | /node_modules/lwip/build/config.gypi | 23bb42b2c223df1969c6f6ae9f71fc23b86fe396 | [
"MIT"
] | permissive | supermars01/webpack-demo | 5013af343cc74c1cd76f2075e01e6ca29e62dcef | d92e02d5634d6e7d861fc37dca35514814c3e039 | refs/heads/master | 2021-01-11T03:06:01.165960 | 2016-10-20T06:21:46 | 2016-10-20T06:21:46 | 71,118,059 | 0 | 1 | null | 2016-10-20T06:21:47 | 2016-10-17T08:48:20 | JavaScript | UTF-8 | Python | false | false | 4,248 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt57l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt57l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "57",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 48,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "48.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/supermars/.node-gyp/6.6.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "true",
"dry_run": "",
"legacy_bundling": "",
"browser": "",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/supermars/.npm-init.js",
"userconfig": "/Users/supermars/.npmrc",
"node_version": "6.6.0",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"progress": "true",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/supermars/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/3.10.3 node/v6.6.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/tmp",
"unsafe_perm": "",
"link": "",
"prefix": "/usr/local"
}
}
| [
"kings_821113@163.com"
] | kings_821113@163.com |
57650a22e4642009fb45c0984f172ae7d9317ccc | e6d0ef2d814477d8070ce1068c62e5d113a862d3 | /proto/cheby/wbgen/expand_reg.py | 6bedddb1e558fddea5dbf9cf55517d5e63b75640 | [] | no_license | u230925/cheby | f4df33831130c2f8d82c13cbb632f45547b36f88 | 330cc70bd1cfcad143e72a92148c9fd2cbcbea9f | refs/heads/master | 2021-05-22T23:50:44.676687 | 2018-08-24T19:06:18 | 2018-08-24T19:10:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,114 | py | """Expand registers: create registers for FIFOs"""
import cheby.wbgen.tree as tree
import cheby.wbgen.layout as layout
irq_regs = [
('disable', 'idr', 'WO_RO', 'WRITE_ONLY',
'Writing 1 disables handling of the interrupt associated with'
' corresponding bit. Writin 0 has no effect.',
"write 1: disable interrupt '{name}'\nwrite 0: no effect"),
('enable', 'ier', 'WO_RO', 'WRITE_ONLY',
'Writing 1 enables handling of the interrupt associated with'
' corresponding bit. Writin 0 has no effect.',
"write 1: enable interrupt '{name}'\nwrite 0: no effect"),
('mask', 'imr', 'RO_WO', 'READ_ONLY',
'Shows which interrupts are enabled. 1 means that the interrupt'
' associated with the bitfield is enabled',
"read 1: interrupt '{name}' is enabled\nread 0: interrupt '{name}'"
" is disabled"),
('status', 'isr', 'RW_RW', 'READ_WRITE',
'Each bit represents the state of corresponding interrupt. 1 means the'
' interrupt is pending. Writing 1 to a bit clears the corresponding'
' interrupt. Writing 0 has no effect.',
"read 1: interrupt '{name}' is pending\nread 0: interrupt not pending\n"
"write 1: clear interrupt '{name}'\nwrite 0: no effect")]
def expand_irq(irqs, periph):
res = []
for reg_name, reg_prefix, mode, access_bus, desc, field_desc in irq_regs:
r = tree.IrqReg()
r.c_prefix = "EIC_{}".format(reg_prefix.upper())
r.prefix = "eic_{}".format(reg_prefix)
r.name = "Interrupt {} register".format(reg_name)
r.desc = desc
if reg_name == 'disable':
r.align = 8 # For compatibility (??)
res.append(r)
off = 0
for irq in irqs:
f = tree.Field()
f.prefix = irq.prefix
f.c_prefix = irq.c_prefix
f.name = irq.name
f.typ = 'BIT'
f.bit_offset = off
f.bit_len = 1
f.access = mode
f.access_bus = access_bus
f.desc = field_desc.format(name=irq.name)
r.fields.append(f)
off += 1
return res
fifo_fields = [
('FIFO_FULL', 'full', 'full flag', 'READ_ONLY',
"1: FIFO '{name}' is full\n0: FIFO is not full"),
('FIFO_EMPTY', 'empty', 'empty flag', 'READ_ONLY',
"1: FIFO '{name}' is empty\n0: FIFO is not empty"),
('FIFO_CLEAR', 'clear_bus', 'clear', 'WRITE_ONLY',
"write 1: clears FIFO '{name}\nwrite 0: no effect")]
def expand_fifo(n, periph):
n.regs = []
# Pack registers
if n.direction == 'CORE_TO_BUS':
dir_str = 'output'
acc = 'RO_WO'
bus_acc = 'READ_ONLY'
else:
dir_str = 'input'
acc = 'WO_RO'
bus_acc = 'WRITE_ONLY'
num = 0
width = 0
fields = list(n.fields) # Copy list.
while fields:
# Create a register
r = tree.FifoReg()
r.parent = n
r.num = num
r.c_prefix = "{}_R{}".format(n.get_c_prefix(), num)
r.prefix = "{}_r{}".format(n.get_hdl_prefix(), num)
r.name = "FIFO '{}' data {} register {}".format(n.name, dir_str, num)
n.regs.append(r)
num += 1
# Insert the first field
f = fields.pop(0)
assert f.bit_offset % layout.DATA_WIDTH == 0
off = f.bit_offset
f.bit_offset = 0
f.access = acc
f.access_bus = bus_acc
f.fifo_offset = width
width += f.bit_len
r.fields.append(f)
# Try to insert more fields
while fields:
f = fields[0]
if f.bit_offset + f.bit_len > off + layout.DATA_WIDTH:
break
f.bit_offset -= off
f.fifo_offset = width
width += f.bit_len
r.fields.append(f)
f.access = acc
f.access_bus = bus_acc
del fields[0]
n.width = width
# Create CSR
r = tree.FifoCSReg()
r.parent = n
r.c_prefix = "{}_CSR".format(n.get_c_prefix())
r.prefix = "{}_csr".format(n.get_hdl_prefix())
r.name = "FIFO '{}' control/status register".format(n.name)
n.regs.append(r)
off = 16
for flag, name, comment, acc_bus, desc in fifo_fields:
if flag in n.flags_bus:
f = tree.Field()
f.name = "FIFO {}".format(comment)
f.kind = name
f.c_prefix = name.upper()
f.prefix = name
f.bit_offset = off
f.bit_len = 1
f.typ = 'BIT'
f.access_bus = acc_bus
f.desc = desc.format(name=n.name)
r.fields.append(f)
off += 1
if 'FIFO_COUNT' in n.flags_bus:
f = tree.Field()
f.name = "FIFO counter"
f.kind = "count"
f.c_prefix = "usedw".upper()
f.prefix = 'count'
f.bit_offset = 0
f.bit_len = n.log2_size
f.size = f.bit_len
f.access_bus = 'READ_ONLY'
f.desc = "Number of data records currently " \
"being stored in FIFO '{}'".format(n.name)
r.fields.append(f)
def build_ordered_regs(n):
"""Create ordered list of regs."""
# Ordered regs: regular regs, interrupt, fifo regs, ram
res = [r for r in n.regs if (isinstance(r, tree.Reg)
or isinstance(r, tree.Irq)
or isinstance(r, tree.Fifo))]
res.extend([r for r in n.regs if isinstance(r, tree.IrqReg)])
res.extend([r for r in n.regs if (isinstance(r, tree.FifoReg)
or isinstance(r, tree.FifoCSReg))])
res.extend([r for r in n.regs if isinstance(r, tree.Ram)])
n.ordered_regs = res
def expand(periph):
"""Create regs for irq and fifo."""
# First gather irqs and create the controller.
irqs = [r for r in periph.regs if isinstance(r, tree.Irq)]
if irqs:
irq_regs = expand_irq(irqs, periph)
periph.regs.extend(irq_regs)
# Then expand fifos
for fifo in periph.regs:
if isinstance(fifo, tree.Fifo):
expand_fifo(fifo, periph)
periph.regs.extend(fifo.regs)
build_ordered_regs(periph)
| [
"tristan.gingold@cern.ch"
] | tristan.gingold@cern.ch |
13c0dd9d8152a62bf12e5f33056c9bf75a1fefb4 | 5b27b30c831a1d1bb6362b257e465b25aa428998 | /labs/02_basic_datatypes/02_05_convert.py | 33b1422f37b6167492cbbb90c579279015537f8a | [] | no_license | daniel10012/python-fundamentals | 80c6eb0390897860e9d9db41f10a636246b33f18 | 01846b517d9deaa905b4aef69ee93f54732eaa76 | refs/heads/master | 2020-04-23T05:38:35.461010 | 2019-02-27T15:11:40 | 2019-02-27T15:11:40 | 170,946,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py |
1) float(x)
2) int(x)
3) x = 8.848
y = 2
z = int(x//y)
print(z)
4) x = float(input("imput x"))
y = float(input("imput y"))
z = x*y
print(z)
'''
Demonstrate how to:
1) Convert an int to a float
2) Convert a float to an int
3) Perform floor division using a float and an int.
4) Use two user inputted values to perform multiplication.
Take note of what information is lost when some conversions take place.
''' | [
"danielwegmann@Daniels-MacBook-Pro.local"
] | danielwegmann@Daniels-MacBook-Pro.local |
c63b480efc8952dea8a35e4a7804a0640f25e548 | 85561ce5072c6a97a997eeeb4ed2eb833ce92556 | /Trainer/VisionForPriorBox.py | 83f8d3018141814080b6e6dd4da7ac54b10dbad1 | [] | no_license | monchhichizzq/Mobilenet-SSD-Kitti-Object-Detection | a52e776a6646051e0ad1572994ccc7209dca264d | fe48c959961f8f91d06edc2cee85266cfe936bf1 | refs/heads/master | 2023-05-31T13:04:03.748605 | 2020-10-11T20:32:22 | 2020-10-11T20:32:22 | 298,193,840 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,805 | py | import numpy as np
import pickle
import matplotlib.pyplot as plt
# from SSD_body.anchors import PriorBox, get_mobilenet_anchors, get_vgg16_anchors
def decode_boxes(input_shape, mbox_loc, mbox_priorbox, variances):
# modification
# mbox_priorbox = mbox_priorbox / 300
#
# 获得先验框的宽与高
# left, top, rigth, bottom
img_width = input_shape[0]
img_height = input_shape[1]
prior_width = (mbox_priorbox[:, 2] - mbox_priorbox[:, 0])/img_width
prior_height = (mbox_priorbox[:, 3] - mbox_priorbox[:, 1])/img_height
# 获得先验框的中心点
prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0])
prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1])
# 真实框距离先验框中心的xy轴偏移情况
decode_bbox_center_x = mbox_loc[:, 0] * prior_width * variances[:, 0]
decode_bbox_center_x += prior_center_x
decode_bbox_center_y = mbox_loc[:, 1] * prior_height * variances[:, 1]
decode_bbox_center_y += prior_center_y
# 真实框的宽与高的求取
decode_bbox_width = np.exp(mbox_loc[:, 2] * variances[:, 2])
decode_bbox_width *= prior_width
decode_bbox_height = np.exp(mbox_loc[:, 3] * variances[:, 3])
decode_bbox_height *= prior_height
# 获取真实框的左上角与右下角
decode_bbox_xmin = decode_bbox_center_x - 0.5 * decode_bbox_width
decode_bbox_ymin = decode_bbox_center_y - 0.5 * decode_bbox_height
decode_bbox_xmax = decode_bbox_center_x + 0.5 * decode_bbox_width
decode_bbox_ymax = decode_bbox_center_y + 0.5 * decode_bbox_height
# 真实框的左上角与右下角进行堆叠
decode_bbox = np.concatenate((decode_bbox_xmin[:, None],
decode_bbox_ymin[:, None],
decode_bbox_xmax[:, None],
decode_bbox_ymax[:, None]), axis=-1)
# 防止超出0与1
decode_bbox = np.minimum(np.maximum(decode_bbox, 0.0), 1.0)
# modification
# decode_bbox = decode_bbox * 300
# 回到原图的大小比
decode_bbox[0] *= img_width
decode_bbox[2] *= img_width
decode_bbox[1] *= img_height
decode_bbox[3] *= img_height
return decode_bbox
#
class PriorBox():
def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None,
flip=True, variances=[0.1], clip=True, **kwargs):
self.waxis = 1
self.haxis = 0
self.img_size = img_size
if min_size <= 0:
raise Exception('min_size must be positive.')
self.min_size = min_size
self.max_size = max_size
self.aspect_ratios = [1.0]
if max_size:
if max_size < min_size:
raise Exception('max_size must be greater than min_size.')
self.aspect_ratios.append(1.0)
if aspect_ratios:
for ar in aspect_ratios:
if ar in self.aspect_ratios:
continue
self.aspect_ratios.append(ar)
if flip:
self.aspect_ratios.append(1.0 / ar)
self.variances = np.array(variances)
self.clip = True
def compute_output_shape(self, input_shape):
num_priors_ = len(self.aspect_ratios)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
num_boxes = num_priors_ * layer_width * layer_height
return (input_shape[0], num_boxes, 8)
def call(self, input_shape, mask=None):
# 获取输入进来的特征层的宽与高
# 3x3
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
# 获取输入进来的图片的宽和高
# 300x300
img_width = self.img_size[0]
img_height = self.img_size[1]
# 获得先验框的宽和高
box_widths = []
box_heights = []
for ar in self.aspect_ratios:
if ar == 1 and len(box_widths) == 0:
box_widths.append(self.min_size)
box_heights.append(self.min_size)
elif ar == 1 and len(box_widths) > 0:
box_widths.append(np.sqrt(self.min_size * self.max_size))
box_heights.append(np.sqrt(self.min_size * self.max_size))
elif ar != 1:
box_widths.append(self.min_size * np.sqrt(ar))
box_heights.append(self.min_size / np.sqrt(ar))
print("box_widths:",box_widths)
print("box_heights:",box_heights)
box_widths = 0.5 * np.array(box_widths)
box_heights = 0.5 * np.array(box_heights)
step_x = img_width / layer_width
step_y = img_height / layer_height
linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,
layer_width)
liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,
layer_height)
print("linx:",linx)
print("liny:",liny)
centers_x, centers_y = np.meshgrid(linx, liny)
# 计算网格中心
centers_x = centers_x.reshape(-1, 1)
centers_y = centers_y.reshape(-1, 1)
fig = plt.figure(figsize=(img_width/100, img_height/100))
ax = fig.add_subplot(111)
plt.ylim(0,img_height)
plt.xlim(0,img_width)
plt.scatter(centers_x,centers_y)
num_priors_ = len(self.aspect_ratios)
print('priors number', num_priors_)
# 4 或 6 个框
# 每一个先验框需要两个(centers_x, centers_y),前一个用来计算左上角,后一个计算右下角
prior_boxes = np.concatenate((centers_x, centers_y), axis=1)
print('prior_box_centers', np.shape(prior_boxes), num_priors_)
prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))
# prior_boxes: 300, 24 意味着 从中心向四个方向扩展4 * 提出框6*
# 获得先验框的左上角和右下角
prior_boxes[:, ::4] -= box_widths
prior_boxes[:, 1::4] -= box_heights
prior_boxes[:, 2::4] += box_widths
prior_boxes[:, 3::4] += box_heights
prod_prior = int(np.prod(np.shape(prior_boxes))/4)
new = decode_boxes(input_shape=self.img_size,
mbox_loc=np.random.randn(prod_prior,4),
mbox_priorbox=prior_boxes.reshape([prod_prior,4]),
variances=np.tile(np.expand_dims(self.variances,axis=0),prod_prior))
prior_boxes = new.reshape([np.shape(prior_boxes)[0],-1])
print('prior_box_centers', np.shape(prior_boxes))
print('box_widths', np.shape(box_widths))
print('box_heights', np.shape(box_heights))
# 对于4 这个点总共有24框 (num_prior_*2)
c = 4
for i in range(int(np.shape(prior_boxes)[1])):
try:
rect = plt.Rectangle([prior_boxes[c, i], prior_boxes[c, 1 + i]], box_widths[int(i/4)] * 2, box_heights[int(i/4)] * 2,
color="r", fill=False)
ax.add_patch(rect)
except (IndexError):
pass
plt.show()
# 变成小数的形式
prior_boxes[:, ::2] /= img_width
prior_boxes[:, 1::2] /= img_height
prior_boxes = prior_boxes.reshape(-1, 4)
prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)
num_boxes = len(prior_boxes)
if len(self.variances) == 1:
variances = np.ones((num_boxes, 4)) * self.variances[0]
elif len(self.variances) == 4:
variances = np.tile(self.variances, (num_boxes, 1))
else:
raise Exception('Must provide one or four variances.')
prior_boxes = np.concatenate((prior_boxes, variances), axis=1)
return prior_boxes
if __name__ == '__main__':
net = {}
#-----------------------将提取到的主干特征进行处理---------------------------#
img_size = (480,160)
# img_size = (300, 300)
# img_size, min_size, max_size=None, aspect_ratios=None, flip=True, variances=[0.1], clip=True
# (11,11), (14, 37), (20, 15), (37, 23), (57, 42), (111, 74)
# (7, 21), (9, 9), (14, 13)
print('\nconv4_3_norm_mbox_priorbox 20,60')
priorbox = PriorBox(img_size, 10.0, max_size=21.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv4_3_norm_mbox_priorbox')
net['conv4_3_norm_mbox_priorbox'] = priorbox.call([20, 60])
# (14, 37), (17, 16), (25, 12), (28, 14)
print('\nfc7_mbox_priorbox 10,30')
priorbox = PriorBox(img_size, 21.0, max_size=45.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='fc7_mbox_priorbox')
net['fc7_mbox_priorbox'] = priorbox.call([10, 30])
# (30, 70), (41, 23), (56, 34)
print('\nconv6_2_mbox_priorbox 5,15')
priorbox = PriorBox(img_size, 45.0, max_size=99.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv6_2_mbox_priorbox')
net['conv6_2_mbox_priorbox'] = priorbox.call([5, 15])
# (77, 53), (127, 77)
print('\nconv7_2_mbox_priorbox 3,8')
priorbox = PriorBox(img_size, 99.0, max_size=153.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv7_2_mbox_priorbox')
net['conv7_2_mbox_priorbox'] = priorbox.call([3, 8])
print('\nconv8_2_mbox_priorbox 2,4')
priorbox = PriorBox(img_size, 153.0, max_size=207.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv8_2_mbox_priorbox')
net['conv8_2_mbox_priorbox'] = priorbox.call([2, 4])
print('\nconv9_2_mbox_priorbox 1,1')
priorbox = PriorBox(img_size, 207.0, max_size=261.0, aspect_ratios=[2, 3],
variances=[0.1, 0.1, 0.2, 0.2],
name='conv9_2_mbox_priorbox')
net['conv9_2_mbox_priorbox'] = priorbox.call([1, 1])
#
priorbox = PriorBox(img_size, 261.0, max_size=315.0, aspect_ratios=[2],
variances=[0.1, 0.1, 0.2, 0.2],
name='pool6_mbox_priorbox')
net['pool6_mbox_priorbox'] = priorbox.call([1,1])
#
net['mbox_priorbox'] = np.concatenate([net['conv4_3_norm_mbox_priorbox'],
net['fc7_mbox_priorbox'],
net['conv6_2_mbox_priorbox'],
net['conv7_2_mbox_priorbox'],
net['conv8_2_mbox_priorbox'],
net['pool6_mbox_priorbox']],
axis=0)
print(np.shape(net['mbox_priorbox']))
| [
"zhaoqifeng13816221366@gmail.com"
] | zhaoqifeng13816221366@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.