blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c0ab73bc219a7184f6cbe57fe918ab6749aced2 | 797e98f66262a1f85ee3776028db520155045649 | /tests/player_test.py | a581e3a057edc68737ddc699b84d17f1268f4fab | [] | no_license | saskenuba/oop-casino-practice | f396f18ca512194520be0e1f85ef747b78bc5a28 | 65b9dffe906f45d12f3cc645fe5ceb125779876a | refs/heads/master | 2021-05-13T21:58:09.185386 | 2018-01-29T11:49:03 | 2018-01-29T11:49:03 | 116,476,605 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,923 | py | import unittest
from classes import Wheel, Table
from roulette import RouletteGame
from player import Passenger57, Martingale, SevenReds, PlayerRandom, Player1326
from player import Player1326ZeroWins, Player1326OneWins, Player1326TwoWins
from player import Player1326ThreeWins
from binbuilder import BinBuilder
from exceptions import InvalidBet, PlayerError
from utility import NonRandom
# TODO: Continue on the statistics section
class test_Game(unittest.TestCase):
def setUp(self):
# creating wheel with nonrandom value
self.notSoRandom = NonRandom()
self.notSoRandom.setSeed(33)
self.rouletteWheel = Wheel(self.notSoRandom)
BinBuilder(self.rouletteWheel)
self.currentTable = Table()
self.currentTable.Table(self.rouletteWheel)
self.currentTable.betMinimum = 5
self.game = RouletteGame(self.rouletteWheel, self.currentTable)
def tearDown(self):
del self.game
del self.notSoRandom
def test_Player_Passenger57(self):
playerPassenger = Passenger57(self.currentTable)
playerPassenger.initialBet = 5
expectedStake = [205, 210, 215, 210, 205, 200, 205]
for i in range(6):
self.game.cycle(playerPassenger, 0)
self.assertEqual(playerPassenger.stake, expectedStake[i])
def test_Player_Martingale(self):
playerMartingale = Martingale(self.currentTable)
playerMartingale.initialBet = 5
expectedStake = [205, 210, 215, 210, 200, 180, 220]
"""Expect Player to left the game because lack of funds."""
for i in range(6):
self.game.cycle(playerMartingale, 0)
self.assertEqual(playerMartingale.stake, expectedStake[i])
def test_Player_SevenReds(self):
playerSevenReds = SevenReds(self.currentTable)
self.notSoRandom.setCustomSequence(
[32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 33])
for i in range(17):
if i == 8:
self.assertEqual(playerSevenReds.stake, 190)
if i == 16:
self.assertEqual(playerSevenReds.stake, 210)
self.game.cycle(playerSevenReds, 0)
def test_Player_PlayerRandom(self):
playerRandom = PlayerRandom(self.currentTable, self.notSoRandom)
self.game.cycle(playerRandom, 0)
self.assertIn(playerRandom.favoriteBet,
self.rouletteWheel.getBin(self.notSoRandom.value))
def test_Player_Player1326(self):
player1326 = Player1326(self.currentTable)
self.notSoRandom.setCustomSequence(
[33, 33, 33, 33, 32, 33, 33, 33, 32, 33, 33, 32, 33, 32])
expectedStake = [
210, 240, 260, 320, 310, 320, 350, 370, 310, 320, 350, 330, 340,
310
]
for roll in range(14):
self.game.cycle(player1326, 0)
if roll == 3:
# win whole strategy
self.assertIsInstance(player1326.state, Player1326ZeroWins)
self.assertEqual(player1326.stake, expectedStake[roll])
elif roll == 7:
# got three wins
self.assertIsInstance(player1326.state, Player1326ThreeWins)
self.assertEqual(player1326.stake, expectedStake[roll])
elif roll == 10:
# got two wins
self.assertIsInstance(player1326.state, Player1326TwoWins)
self.assertEqual(player1326.stake, expectedStake[roll])
elif roll == 12:
# got one win
self.assertIsInstance(player1326.state, Player1326OneWins)
self.assertEqual(player1326.stake, expectedStake[roll])
elif roll == 13:
# got no wins
self.assertIsInstance(player1326.state, Player1326ZeroWins)
self.assertEqual(player1326.stake, expectedStake[roll])
| [
"martin@hotmail.com.br"
] | martin@hotmail.com.br |
ed581a13d5d3351cfc0b895f52666c695bf53ec4 | b6c3f3eb034ee9c19914633d1d48a897e92767cb | /Summarizers/forms.py | 6d33280e46944383fcd405244f3f4b064fd7fabd | [] | no_license | Birhant/Document-summarizer | 809420c31c27664c878b900df0965ef21065852e | a8a0270f382d9f8bc323c3042032d178c39f6083 | refs/heads/main | 2023-08-18T19:13:47.464356 | 2021-09-19T21:24:32 | 2021-09-19T21:24:32 | 408,151,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | from Summarizers.supporters.get import preprocessor
from django import forms
from django.forms import widgets
from .models import PickledModel
from django.core.exceptions import ValidationError
import os
def validate(value):
path=value.url
valid_types=[".pk"]
_,base=os.path.split(path)
name,ext=os.path.splitext(base)
if(ext in valid_types):
return value
else:
return ValidationError("The file must be "+str(valid_types))
class UploadModelForm(forms.ModelForm):
class Meta:
model=PickledModel
fields={"file"}
class UploadFileForm(forms.ModelForm):
class Meta:
model=PickledModel
fields={"file"}
class UploadForm(forms.Form):
file=forms.FileField(allow_empty_file=True, required=False, initial=None)
name = forms.CharField(max_length = 100)
purpose = forms.ChoiceField(choices=list(PickledModel.purpose_choice))
default=forms.BooleanField(initial=False, required=False)
accuracy = forms.ChoiceField(choices=list(PickledModel.accuracy_choice))
| [
"birhancity@gmail.com"
] | birhancity@gmail.com |
597f6e44b90374e56fd32df848bc609cc1e37273 | 733496067584ee32eccc333056c82d60f673f211 | /idfy_rest_client/models/signer_info.py | be68e6b47d5cff31143fcbe749d6914360bfe06d | [
"MIT"
] | permissive | dealflowteam/Idfy | 90ee5fefaa5283ce7dd3bcee72ace4615ffd15d2 | fa3918a6c54ea0eedb9146578645b7eb1755b642 | refs/heads/master | 2020-03-07T09:11:15.410502 | 2018-03-30T08:12:40 | 2018-03-30T08:12:40 | 127,400,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,191 | py | # -*- coding: utf-8 -*-
"""
idfy_rest_client.models.signer_info
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
import idfy_rest_client.models.mobile
import idfy_rest_client.models.organization_info
class SignerInfo(object):
"""Implementation of the 'SignerInfo' model.
TODO: type model description here.
Attributes:
first_name (string): The signers first name
last_name (string): The signers last name
email (string): The signers email adress, define this if you are using
notifications
mobile (Mobile): The signers mobile, define this if you are using
notifications
organization_info (OrganizationInfo): The signers organization info
"""
# Create a mapping from Model property names to API property names
_names = {
"first_name":'firstName',
"last_name":'lastName',
"email":'email',
"mobile":'mobile',
"organization_info":'organizationInfo'
}
def __init__(self,
first_name=None,
last_name=None,
email=None,
mobile=None,
organization_info=None,
additional_properties = {}):
"""Constructor for the SignerInfo class"""
# Initialize members of the class
self.first_name = first_name
self.last_name = last_name
self.email = email
self.mobile = mobile
self.organization_info = organization_info
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
first_name = dictionary.get('firstName')
last_name = dictionary.get('lastName')
email = dictionary.get('email')
mobile = idfy_rest_client.models.mobile.Mobile.from_dictionary(dictionary.get('mobile')) if dictionary.get('mobile') else None
organization_info = idfy_rest_client.models.organization_info.OrganizationInfo.from_dictionary(dictionary.get('organizationInfo')) if dictionary.get('organizationInfo') else None
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(first_name,
last_name,
email,
mobile,
organization_info,
dictionary)
| [
"runes@unipluss.no"
] | runes@unipluss.no |
ec2324a282e94a826a991a83f03f98d3cb8c334b | 23684ea5e38a73fe3fad19475c0e56f6b2d58e6f | /app.py | 0dcc55885c27bc1d39cfc169ca7bd7ab7fd79109 | [] | no_license | Udaykiran87/LinearRegression_ai4i2020.csv_deployment | 0fdbb44da9493df93bb05567bbdb8298c4c89e11 | 390f770e4a8c305af0492d8e01f35405d054eb90 | refs/heads/main | 2023-07-17T23:11:48.213783 | 2021-09-06T09:59:09 | 2021-09-06T09:59:09 | 403,362,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,731 | py | from flask import Flask, render_template, request
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from pandas_profiling import ProfileReport
import numpy as np
from sklearn.linear_model import LinearRegression
from statsmodels.stats.outliers_influence import variance_inflation_factor
from joblib import Parallel, delayed
import time
import logging
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge,Lasso,RidgeCV,LassoCV, ElasticNet , ElasticNetCV,LinearRegression
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
app = Flask(__name__)
class Linearregression():
def __init__(self, file_path, predicted_col):
self.file_path = file_path
self.predicted_col = predicted_col
logging.basicConfig(filename='linear_regression.log', level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s')
logging.info('Linearregression class object is created.')
def load_data(self):
"""
Load csv file as pandas dataframe.
Parameters
----------
None
Returns:
----------
None
"""
logging.info('Dataset is getting loaded as pandas dataframe.')
try:
self.original_df = pd.read_csv(self.file_path)
except FileNotFoundError:
logging.error("File not found: exception occured while loading csv as pandas dataframe.")
except pd.errors.EmptyDataError:
logging.error("No data: exception occured while loading csv as pandas dataframe.")
except pd.errors.ParserError:
logging.errornt("Parse error: exception occured while loading csv as pandas dataframe.")
except Exception as e:
logging.error("{} occured while loading csv as pandas dataframe.".format(str(e)))
def pandas_profiling(self, output_html):
"""
Create pandas profiling report for the loaded dataset and
save it as a html file.
Parameters
----------
output_html: Output htmla file named to be saved.
Returns:
----------
None
"""
logging.info('Pandas profiling report is started.')
pf = ProfileReport(self.original_df)
pf.to_widgets()
pf.to_file(output_html)
logging.info('Pandas profiling report is finished ans saved inside {}.'.format(output_html))
def check_NaN(self):
"""
Calculate the number NaN values present in the dataset.
Parameters
----------
None.
Returns:
----------
None.
"""
try:
logging.info('Total number of NaN inside dataset is getting calculated.')
return self.original_df.isna().sum().sum()
except Exception as e:
logging.error("{} occured while calculating total number of NaN inside dataset.".format(str(e)))
return None
def view_multicolinearity_by_vif(self):
"""
This functions helps to judge the mulicolinearity among independent feature by calculating their
VIF (Variable Inflation Factors).
Parameters
----------
None.
Returns:
----------
None.
"""
logging.info('VIF values for all features inside dataset will be calculated.')
try:
result = self.original_df.copy()
ignore_columns = ["UDI", "Product ID", "Type"]
X_variables_col = []
for feature_name in result.columns:
if feature_name not in ignore_columns:
X_variables_col.append(feature_name)
self.X_variables = result[X_variables_col]
self.vif_data = pd.DataFrame()
self.vif_data["feature"] = self.X_variables.columns
self.vif_data["VIF"] = [variance_inflation_factor(self.X_variables.values, i) for i in
range(len(self.X_variables.columns))]
print(self.vif_data)
except Exception as e:
logging.error("{} occured while calculating VIF values for all features inside dataset.".format(str(e)))
def drop_multicolinearity_by_vif(self, vif_thresh):
"""
This functions drops tyhose columns whose values are more than threshold VIF passed as parameter.
Parameters
----------
vif_thresh: This is the threshold VIF value above which dataset column will be dropped.
Returns:
----------
None.
"""
logging.info('All features with VIF more than {} will be dropped from the dataset.'.format(vif_thresh))
try:
X = self.X_variables
variables = [X.columns[i] for i in range(X.shape[1])]
dropped = True
while dropped:
dropped = False
vif = Parallel(n_jobs=-1, verbose=5)(
delayed(variance_inflation_factor)(X[variables].values, ix) for ix in range(len(variables)))
maxloc = vif.index(max(vif))
if max(vif) > vif_thresh:
if X[variables].columns[maxloc] is not self.predicted_col:
logging.info(
time.ctime() + ' dropping \'' + X[variables].columns[maxloc] + '\' at index: ' + str(
maxloc))
variables.pop(maxloc)
dropped = True
logging.info('Remaining variables:')
logging.info([variables])
self.final_df = X[[i for i in variables]]
except Exception as e:
logging.error(
"{} occured while droping some of the feature from dataset based on vif threshold.".format(str(e)))
def create_X_Y(self):
"""
Create and reshuffle dataset based on Independent and dependent feature name.
Parameters
----------
None.
Returns:
----------
None.
"""
logging.info('New dataset is created after reschuffle based on dependent feature')
try:
self.Y = self.original_df[[self.predicted_col]]
feature_name = self.final_df.columns.tolist()
self.X = self.final_df[feature_name]
except Exception as e:
logging.error("{} occured while dataset reschuffle based on dependent feature.".format(str(e)))
def build_model(self):
"""
Build linear regression model.
Parameters
----------
None.
Returns:
----------
None.
"""
logging.info('Linear regression model will be built now.')
try:
self.linear = LinearRegression()
self.model = self.linear.fit(self.x_train, self.y_train)
except Exception as e:
logging.error("{} occured while dbuilding linear regression model.".format(str(e)))
def save_model(self, file_name):
"""
Save the linear regresion model based on the input file name.
Parameters
----------
file_name: linear regression model will be saved with this file name.
Returns:
----------
None.
"""
logging.info('Save the linear regression model into file: {}.'.format(file_name))
try:
pickle.dump(self.model, open(file_name, 'wb'))
except Exception as e:
logging.error("{} occured while saving linear regression model.".format(str(e)))
def calc_accuracy(self):
"""
Calculate the accuracy of the linear regression model.
Parameters
----------
None.
Returns:
----------
Returns the accuracy of the model.
"""
logging.info('Accuracy of the model will be calculated here.')
try:
return self.linear.score(self.x_test, self.y_test)
except Exception as e:
logging.error("{} occured while calculating accuracy linear regression model.".format(str(e)))
return None
def predict(self, test_case):
"""
Predict the dependent feature based on the input test case.
Parameters
----------
test_case: It is the independent variable list value.
Returns:
----------
Returns the predicted feature.
"""
logging.info('Prediction will be done for the testcase {}.'.format(test_case))
try:
return self.linear.predict(test_case)
except Exception as e:
logging.error("{} occured while predicting dependent feature.".format(str(e)))
return None
def train_test_split(self, test_size, random_state):
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(self.independent_scaled, self.Y,
test_size=test_size,
random_state=random_state)
def adj_r2(self, x, y):
r2 = self.linear.score(x, y)
n = x.shape[0]
p = x.shape[1]
adjusted_r2 = 1 - (1 - r2) * (n - 1) / (n - p - 1)
return adjusted_r2
def build_lasso_model(self, cv, max_iter):
self.lassocv = LassoCV(alphas=None, cv=cv, max_iter=max_iter, normalize=True)
self.lassocv.fit(self.x_train, self.y_train)
self.lasso_lr = Lasso(alpha=self.lassocv.alpha_)
self.lasso_model = self.lasso_lr.fit(self.x_train, self.y_train)
def save_lasso_model(self, file_name):
"""
Save the linear regresion model based on the input file name.
Parameters
----------
file_name: linear regression model will be saved with this file name.
Returns:
----------
None.
"""
logging.info('Save lasso regularized linear regression model into file: {}.'.format(file_name))
try:
pickle.dump(self.lasso_model, open(file_name, 'wb'))
except Exception as e:
logging.error("{} occured while saving lasso regularized linear regression model.".format(str(e)))
def calc_lasso_accuracy(self):
"""
Calculate the accuracy of the linear regression model.
Parameters
----------
None.
Returns:
----------
Returns the accuracy of the model.
"""
logging.info('Accuracy of the lasso regularizd model will be calculated here.')
try:
return self.lasso_lr.score(self.x_test, self.y_test)
except Exception as e:
logging.error(
"{} occured while calculating accuracy lasso regularized linear regression model.".format(str(e)))
return None
def build_ridge_model(self, cv):
self.ridgecv = RidgeCV(alphas=np.random.uniform(0, 10, 50), cv=cv, normalize=True)
self.ridgecv.fit(self.x_train, self.y_train)
self.ridge_lr = Ridge(alpha=self.ridgecv.alpha_)
self.ridge_model = self.ridge_lr.fit(self.x_train, self.y_train)
def save_ridge_model(self, file_name):
"""
Save the linear regresion model based on the input file name.
Parameters
----------
file_name: linear regression model will be saved with this file name.
Returns:
----------
None.
"""
logging.info('Save ridge regularized linear regression model into file: {}.'.format(file_name))
try:
pickle.dump(self.ridge_model, open(file_name, 'wb'))
except Exception as e:
logging.error("{} occured while saving ridge regularized linear regression model.".format(str(e)))
def calc_ridge_accuracy(self):
"""
Calculate the accuracy of the linear regression model.
Parameters
----------
None.
Returns:
----------
Returns the accuracy of the model.
"""
logging.info('Accuracy of the ridge regularizd model will be calculated here.')
try:
return self.ridge_lr.score(self.x_test, self.y_test)
except Exception as e:
logging.error(
"{} occured while calculating accuracy of ridge regularizd linear regression model.".format(str(e)))
return None
def build_elasticnet_model(self, cv):
self.elastic = ElasticNetCV(alphas=None, cv=cv)
self.elastic.fit(self.x_train, self.y_train)
self.elastic_lr = ElasticNet(alpha=self.elastic.alpha_, l1_ratio=self.elastic.l1_ratio_)
self.elastic_model = self.elastic_lr.fit(self.x_train, self.y_train)
def save_elasticnet_model(self, file_name):
"""
Save the linear regresion model based on the input file name.
Parameters
----------
file_name: linear regression model will be saved with this file name.
Returns:
----------
None.
"""
logging.info('Save elastic regularized linear regression model into file: {}.'.format(file_name))
try:
pickle.dump(self.elastic_model, open(file_name, 'wb'))
except Exception as e:
logging.error("{} occured while saving elastic regularized linear regression model.".format(str(e)))
def calc_elasticnet_accuracy(self):
"""
Calculate the accuracy of the linear regression model.
Parameters
----------
None.
Returns:
----------
Returns the accuracy of the model.
"""
logging.info('Accuracy of the elasticnet regularizd model will be calculated here.')
try:
return self.elastic_lr.score(self.x_test, self.y_test)
except Exception as e:
logging.error(
"{} occured while calculating accuracy of elasticnet regularizd linear regression model.".format(
str(e)))
return None
def standardize_train(self):
self.scaler = StandardScaler()
self.independent_scaled = self.scaler.fit_transform(self.X)
def scale_test(self, test_data):
scaled_data = self.scaler.transform(test_data)
return scaled_data
@app.route('/')
def form():
return render_template('form.html')
@app.route('/data/', methods=['POST', 'GET'])
def data():
if request.method == 'GET':
return f"The URL /data is accessed directly. Try going to '/form' to submit form"
if request.method == "POST":
torque = float(request.form['Torque'])
tool_wear = float(request.form['Tool_wear'])
twf = float(request.form['TWF'])
hdf = float(request.form['HDF'])
pwf = float(request.form['PWF'])
osf = float(request.form['OSF'])
rnf = float(request.form['RNF'])
if request.form.get("Predict_Air_Temp_using_Linear_Regr"):
file = 'linear_reg.sav'
elif request.form.get("Predict_Air_Temp_using_Lasso_Linear_Regr"):
file = 'lasso_linear_reg.sav'
elif request.form.get("Predict_Air_Temp_using_Ridge_Linear_Regr"):
file = 'ridge_linear_reg.sav'
elif request.form.get("Predict_Air_Temp_using_Elasticnet_Linear_Regr"):
file = 'elastic_linear_reg.sav'
saved_model = pickle.load(open(file, 'rb'))
prediction = saved_model.predict([[torque,tool_wear,twf,hdf,pwf,osf,rnf]])
print('prediction is', prediction)
return render_template('results.html', prediction=prediction)
@app.route('/profie_report/', methods=['POST', 'GET'])
def profie_report():
return render_template('ori_df_profiling.html')
if __name__ == '__main__':
# linear_regr = Linearregression('challange_dataset.csv', 'Air temperature [K]')
#
# # load_data()
# linear_regr.load_data()
#
# # profiling_data()
# linear_regr.pandas_profiling('ori_df_profiling.html')
#
# # fillna()
# nan_count = linear_regr.check_NaN()
# print(nan_count)
#
# # handle_multicolinearity()
# linear_regr.view_multicolinearity_by_vif()
# linear_regr.drop_multicolinearity_by_vif(vif_thresh=10)
#
# # create independent feature and dependent feature
# linear_regr.create_X_Y()
#
# # Standardization
# linear_regr.standardize_train()
#
# # Split dataset
# linear_regr.train_test_split(test_size=0.15, random_state=100)
#
# # build_model()
# linear_regr.build_model()
#
# # save_model()
# linear_regr.save_model('linear_reg.sav')
#
# # model_accuracy()
# accuracy = linear_regr.calc_accuracy()
# print(accuracy)
#
# # build_lasso_model()
# linear_regr.build_lasso_model(cv=10, max_iter=20000)
#
# # save_lasso_model()
# linear_regr.save_lasso_model('lasso_linear_reg.sav')
#
# # lasso_model_accuracy()
# lasso_accuracy = linear_regr.calc_lasso_accuracy()
# print(accuracy)
#
# # build_ridge_model()
# linear_regr.build_ridge_model(cv=10)
#
# # save_ridge_model()
# linear_regr.save_ridge_model('ridge_linear_reg.sav')
#
# # ridge_model_accuracy()
# ridge_accuracy = linear_regr.calc_ridge_accuracy()
# print(ridge_accuracy)
#
# # build_elasticnet_model()
# linear_regr.build_elasticnet_model(cv=10)
#
# # save_elasticnet_model()
# linear_regr.save_elasticnet_model('elastic_linear_reg.sav')
#
# # elasticnet_model_accuracy()
# elasticnet_accuracy = linear_regr.calc_elasticnet_accuracy()
# print(elasticnet_accuracy)
app.run(host='localhost', port=5000) | [
"uday.patnaik@gmail.com"
] | uday.patnaik@gmail.com |
5589fa21b174c17f478fe57246aa63d218d815f0 | a58c8ec6f6f95fd9f24e7c326b476aae15e99bca | /NextGuesses.py | 75064507575c237514975b1cd5f9ad1fd1343b4c | [] | no_license | peggypan0411/snake_game | 38a36f17a28a30b0ae8182957e5af59486cff4f1 | 06fb470eaf2d20c4fc798564bff733a18cf15d4c | refs/heads/master | 2022-01-10T19:36:17.132926 | 2018-08-03T13:45:58 | 2018-08-03T13:45:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,935 | py | import random
import DelineateNetwork
import numpy as np
import FileSettings
import CreateGuesses
import Objective_functions
import Generations
def read_initial_parameters(inputfilename):
subc_params = []
subarea_params = []
global subc_names
subc_names = []
subcatchment_parameters = []
inputfile = open(inputfilename, 'r')
for line in inputfile:
if(line.find("[SUBCATCHMENTS]") != -1):
line = inputfile.readline()
for i in range(CreateGuesses.count):
templine = list(line)
if templine[0] == ";" or templine[0] == " " or len(templine) < 10:
line = inputfile.readline()
continue
elif (line.find("[") != -1):
break
else:
linesplit = line.split()
subc_params.append(linesplit[4:7])
subc_names.append(linesplit[0])
line = inputfile.readline()
if (line.find("[SUBAREAS]") != -1):
line = inputfile.readline()
for i in range(CreateGuesses.count):
templine = list(line)
if templine[0] == ";" or templine[0] == " " or len(templine) < 10:
line = inputfile.readline()
continue
elif (line.find("[") != -1):
break
else:
linesplit = line.split()
subarea_params.append(linesplit[1:6])
line = inputfile.readline()
inputfile.close()
for i in range(len(subc_params)):
for j in range(len(subarea_params[i])):
subc_params[i].append(subarea_params[i][j])
subcatchment_parameters.append(subc_params[i])
return(subcatchment_parameters)
#read_initial_parameters(inputfilename)
def transformation_flatten(twoDlistinput):
oneDlistoutput = []
for i in range(len(twoDlistinput)):
for j in range(len(twoDlistinput[i])):
oneDlistoutput.append(twoDlistinput[i][j])
return(oneDlistoutput)
def compile_initial_guess(inputfilename):
global relevant_subcatchment_indices, relevant_subcatchment_parameters
relevant_subcatchment_indices = []
for allsub in CreateGuesses.subc_names:
for upstreamsub in DelineateNetwork.list_of_subcatchments:
if allsub == upstreamsub:
relevant_subcatchment_indices.append(CreateGuesses.subc_names.index(allsub))
relevant_subcatchment_parameters = []
for i in relevant_subcatchment_indices:
relevant_subcatchment_parameters.append(read_initial_parameters(inputfilename)[i])
initial_guess_flat = transformation_flatten(relevant_subcatchment_parameters)
return(initial_guess_flat)
#compile_initial_guess(inputfilename)
def caststringsasfloats(parameterlist):
initial_guess_floats = []
for guess in parameterlist:
initial_guess_floats.append(float(guess))
return(initial_guess_floats)
def createrandomsetofP(survivinglist):
floatnexttemporaryguess = caststringsasfloats(crossover(survivinglist))
for parameter in range(len(floatnexttemporaryguess)):
binary_setter = random.uniform(0,1)
if binary_setter > 0.1:
continue
else:
if parameter % 8 == 0:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.percentimpervious[0], CreateGuesses.percentimpervious[1])
elif parameter % 8 == 1:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.width[0], CreateGuesses.width[1])
elif parameter % 8 == 2:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.slope[0], CreateGuesses.slope[1])
elif parameter % 8 == 3:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.impervious_n[0], CreateGuesses.impervious_n[1])
elif parameter % 8 == 4:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.pervious_n[0], CreateGuesses.pervious_n[1])
elif parameter % 8 == 5:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.impervious_storage[0], CreateGuesses.impervious_storage[1])
elif parameter % 8 == 6:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.pervious_storage[0], CreateGuesses.pervious_storage[1])
elif parameter % 8 == 7:
floatnexttemporaryguess[parameter] = random.uniform(CreateGuesses.percent_zero_storage[0], CreateGuesses.percent_zero_storage[1])
return(floatnexttemporaryguess)
def fillmatingpool(survivinglist):
matingpool =[]
dummylist = [x for x in survivinglist]
for i in range(FileSettings.geneticdict['population']+10):
choice1 = random.choice(dummylist)
choice2 = random.choice(dummylist)
while choice2 == choice1:
choice2 = random.choice(dummylist)
if Objective_functions.par_aggFunc[survivinglist.index(choice1)] < \
Objective_functions.par_aggFunc[survivinglist.index(choice2)]:
matingpool.append(choice1)
dummylist.remove(choice1)
else:
matingpool.append(choice2)
dummylist.remove(choice2)
global not_selected_list
not_selected_list = dummylist
return(matingpool)
def crossover(survivinglist):
choice1 = random.choice(survivinglist)
survivinglist.remove(choice1)
choice2 = random.choice(survivinglist)
#while choice2 == choice1:
#choice2 = random.choice()
choices = [choice1, choice2]
Objective_functions.readobservationfile(FileSettings.settingsdict['observationdatafile'])
Objective_functions.objectivefunctions(choices, FileSettings.settingsdict['observationdatafile'],
FileSettings.settingsdict['distancefilename'],
FileSettings.settingsdict['root'])
guesses_Agg = Objective_functions.aggregateFunction()
betterguess = choices[guesses_Agg.index(min(guesses_Agg))]
worserguess = choices[guesses_Agg.index(max(guesses_Agg))]
bettertemporaryguess = compile_initial_guess(betterguess)
worsertemporaryguess = compile_initial_guess(worserguess)
threshhold = random.uniform(0, FileSettings.geneticdict['crossover_bias'])
for param in bettertemporaryguess:
crossover_setter = random.uniform(0, 1)
if crossover_setter > (threshhold + FileSettings.geneticdict['crossover_bias']):
continue
else:
store = param
worsertemporaryguess[bettertemporaryguess.index(param)] = store
return(worsertemporaryguess)
def castfloatsasstrings(survivinglist):
floattostring = createrandomsetofP(survivinglist)
guess_strings = []
for float in floattostring:
guess_strings.append(str(float))
return(guess_strings)
def transformation_fatten(oneDlistinput):
new_twoDlistoutput = np.zeros((len(relevant_subcatchment_parameters[0]),len(relevant_subcatchment_parameters)))
row_count = -1
col_count = 0
for oneDparameter in oneDlistinput:
row_count = row_count + 1
if row_count < len(relevant_subcatchment_parameters[0]):
new_twoDlistoutput[row_count][col_count] = oneDparameter
else:
row_count = 0
col_count = col_count + 1
new_twoDlistoutput[row_count][col_count] = oneDparameter
return(new_twoDlistoutput)
def insertguessestoinputfile(inputfilename, trialfile, survivinglist):
guess = transformation_fatten(castfloatsasstrings(survivinglist))
with open(inputfilename, 'r') as swmmput:
contents = swmmput.readlines()
swmmput.seek(0)
for line in swmmput:
if line.find('[SUBCATCHMENTS]') != -1:
for i in range(CreateGuesses.count):
line = swmmput.readline()
linelist = list(line)
if linelist[0] == " " or linelist[0] == ";" or len(linelist) < 10:
continue
elif (line.find('[SUBAREAS]') != -1):
break
else:
for sub in DelineateNetwork.list_of_subcatchments:
templine = contents.index(line)
splitline = contents[templine].split()
if splitline[0] == sub:
splitline[4] = str(guess[0][DelineateNetwork.list_of_subcatchments.index(sub)])
splitline[5] = str(guess[1][DelineateNetwork.list_of_subcatchments.index(sub)])
splitline[6] = str(guess[2][DelineateNetwork.list_of_subcatchments.index(sub)])
contents[templine] = " ".join(splitline) + "\n"
break
if line.find('[SUBAREAS]') != -1:
for i in range(CreateGuesses.count):
line = swmmput.readline()
linelist = list(line)
if linelist[0] == " " or linelist[0] == ";" or len(linelist) < 10:
continue
elif (line.find('[') != -1):
break
else:
for sub in DelineateNetwork.list_of_subcatchments:
templine = contents.index(line)
splitline = contents[templine].split()
if splitline[0] == sub:
splitline[1] = str(guess[3][DelineateNetwork.list_of_subcatchments.index(sub)])
splitline[2] = str(guess[4][DelineateNetwork.list_of_subcatchments.index(sub)])
splitline[3] = str(guess[5][DelineateNetwork.list_of_subcatchments.index(sub)])
splitline[4] = str(guess[6][DelineateNetwork.list_of_subcatchments.index(sub)])
splitline[5] = str(guess[7][DelineateNetwork.list_of_subcatchments.index(sub)])
contents[templine] = " ".join(splitline) + '\n'
break
with open(trialfile, 'w') as newfile:
for i in range(CreateGuesses.count):
newfile.write(contents[i])
newfile.close()
return
def create_next_generation(inputfilename, filelist, survivinglist):
for trialfile in filelist:
insertguessestoinputfile(inputfilename, trialfile, survivinglist)
return
| [
"noreply@github.com"
] | noreply@github.com |
e569e62df990823b645a87b476a01bd945d7f587 | 241c61be6061690699eb84cec2d72fe6eb91bad9 | /Q2.2.py | a464c381820c2e3058e74e40b1c6af735b00455f | [
"MIT"
] | permissive | leobouts/Merge_join_database_queries | de855fff9fe4a834cfb77b0a875563e86a65b665 | 3e41f75384796d503e968ca79d6b8d5a4915e50e | refs/heads/master | 2022-11-12T14:25:50.730505 | 2020-07-06T13:02:54 | 2020-07-06T13:02:54 | 277,543,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | import csv
def generateNextRowOfRatings():
with open('imdbData/title.ratings.tsv') as ratings:
reader = csv.reader(ratings, delimiter='\t')
for row in reader:
yield row
def generateNextRowOfBasics():
with open('imdbData/title.basics.tsv') as basics:
reader = csv.reader(basics, delimiter='\t')
for row in reader:
yield row
def main():
generatorForRatings = generateNextRowOfRatings()
generatorForBasics = generateNextRowOfBasics()
#skip headers
ratings_row = next(generatorForRatings)
basics_row = next(generatorForBasics)
dict_years_sum = {}
dict_years_counter = {}
dict_years_result = {}
while True:
try:
basics_row = next(generatorForBasics)
ratings_row = next(generatorForRatings)
year = basics_row[5]
rating = float(ratings_row[1])
#ignore NaN values and non valid years
if(year=='\\N' or rating=='\\N' or int(year)>2020):
continue
else:
#check if the year already exist, if not create the entry
if year in dict_years_sum:
dict_years_sum[year] += rating
dict_years_counter[year] += 1
else:
dict_years_sum[year] = rating
dict_years_counter[year] = 1
#update gradually the statistics
dict_years_result[year] = dict_years_sum[year] / dict_years_counter[year]
except StopIteration:
break
for year in sorted(dict_years_sum.keys()):
print('year:',year,' average rating:',dict_years_result[year])
if __name__== "__main__":
main() | [
"leobouts@gmail.com"
] | leobouts@gmail.com |
040bdd66365e5a02822e473720f07843ff4c6112 | e5d0bb7241300507941746fe1134e047ee9511a4 | /app.py | 2077d55294351039186a925baffa90f0762f62ce | [
"Apache-2.0"
] | permissive | noelbk/cc4-project1-python27 | d3c99250756978c19a00f6bb9e40b08c48338bcc | 403b7678862dee48b0a5764e048f3377b1b33643 | refs/heads/master | 2020-06-12T19:27:05.715667 | 2016-12-06T23:25:49 | 2016-12-06T23:25:49 | 75,764,857 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | #! /usr/bin/env python
import datetime
from flask import Flask, request, send_from_directory
app = Flask(__name__, static_url_path='')
@app.route('/')
def hello_world():
return """
<h1>Hello OpenShift</h1>
<p>From noelbk at %(now)s.</p>
<p>Deployed on Openshift at <a href="%(url)s">%(url)s</a>.</p>
<h2>Serving static Files</h2>
<ul>
<li><a href="/static/2014-dragons.html">Static Dragons</a> from <a href="http://js1k.com/">js1k</a>
</ul>
""" % dict(
now=datetime.datetime.now(),
url="http://cc4-project1-python2-cc4-project1.44fs.preview.openshiftapps.com",
)
@app.route('/static/<path:path>')
def static_files(path):
return send_from_directory('static', path)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
| [
"noel@burton-krahn.com"
] | noel@burton-krahn.com |
11fad38dc34588ed44dd250c8b3bee034cee5107 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03495/s959914177.py | 79888ecf7a1c1e5617d415a8a5f3fbe869a319b8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from collections import Counter
N,K=map(int,input().split())
A=list(map(int,input().split()))
c = Counter(A)
val = sorted(c.values())
if len(val) <= K:
print(0)
exit()
print(sum(val[:len(val)-K])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e4cc0112a33a78a68c92910a3405e4f59601922f | 80220579951046b30e5873ec42d8a31770a06b71 | /authentication/views.py | 8449872e5f3693eabf19d7e140a115f8d093e02a | [] | no_license | Mohamed2011-bit/hdya-api | 676e39e6ab8a2e50b26383ba06acc43274ef326e | fa213c36c6f88702cc6afd8d7f63c1d7bfc19956 | refs/heads/master | 2023-02-09T04:07:02.144404 | 2021-01-05T12:18:51 | 2021-01-05T12:18:51 | 326,989,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from django.shortcuts import render, HttpResponse
from django.http import JsonResponse
from rest_framework import viewsets
from .models import User
from .serializers import UserSerializer
# class UserViewSet(viewsets.ReadOnlyModelViewSet):
# queryset = User.objects.all()
# serializer_class = UserSerializer
def password_reset_page(request):
uid = request.GET['uid']
token = request.GET['token']
context = {
'uid': uid,
'token': token
}
return render(request, 'reset_password.html', context)
| [
"egyria2011@gmail.com"
] | egyria2011@gmail.com |
a728af285352f2bc6175af70b01b5f0761313a71 | acf5a0ea75b92eb8d082f04961a7646d8ccf7b32 | /passpie/database.py | cf67372c689b4ec9ef0d8045fd5348f847a064c1 | [
"MIT"
] | permissive | mauriciovieira/passpie | 6f9c98ba086bfe10a9d2c964c473507feba22586 | bd0f5cca6ce12fc4469f4007199bef7ab3b8980e | refs/heads/master | 2021-01-18T08:56:23.853489 | 2016-01-26T07:03:26 | 2016-01-26T07:03:26 | 50,439,403 | 0 | 0 | null | 2016-01-26T15:49:43 | 2016-01-26T15:49:43 | null | UTF-8 | Python | false | false | 3,841 | py | from datetime import datetime
import logging
import os
import shutil
from tinydb import TinyDB, Storage, where, Query
import yaml
from .utils import mkdir_open
from .credential import split_fullname, make_fullname
class PasspieStorage(Storage):
extension = ".pass"
def __init__(self, path):
super(PasspieStorage, self).__init__()
self.path = path
def delete(self, credentials):
for cred in credentials:
dirname, filename = cred["name"], cred["login"] + self.extension
credpath = os.path.join(self.path, dirname, filename)
os.remove(credpath)
if not os.listdir(os.path.dirname(credpath)):
shutil.rmtree(os.path.dirname(credpath))
def read(self):
elements = []
for rootdir, dirs, files in os.walk(self.path):
filenames = [f for f in files if f.endswith(self.extension)]
for filename in filenames:
docpath = os.path.join(rootdir, filename)
with open(docpath) as f:
elements.append(yaml.load(f.read()))
return {"_default":
{idx: elem for idx, elem in enumerate(elements, start=1)}}
def write(self, data):
deleted = [c for c in self.read()["_default"].values()
if c not in data["_default"].values()]
self.delete(deleted)
for eid, cred in data["_default"].items():
dirname, filename = cred["name"], cred["login"] + self.extension
credpath = os.path.join(self.path, dirname, filename)
with mkdir_open(credpath, "w") as f:
f.write(yaml.dump(dict(cred), default_flow_style=False))
class Database(TinyDB):
def __init__(self, path, extension='.pass', storage=PasspieStorage):
self.path = path
PasspieStorage.extension = extension
super(Database, self).__init__(self.path, storage=storage)
def has_keys(self):
return os.path.exists(os.path.join(self.path, '.keys'))
def credential(self, fullname):
login, name = split_fullname(fullname)
return self.get((where("login") == login) & (where("name") == name))
def add(self, fullname, password, comment):
login, name = split_fullname(fullname)
if login is None:
logging.error('Cannot add credential with empty login. use "@<name>" syntax')
return None
credential = dict(fullname=fullname,
name=name,
login=login,
password=password,
comment=comment,
modified=datetime.now())
self.insert(credential)
return credential
def update(self, fullname, values):
values['fullname'] = make_fullname(values["login"], values["name"])
values['modified'] = datetime.now()
self.table().update(values, (where("fullname") == fullname))
def credentials(self, fullname=None):
if fullname:
login, name = split_fullname(fullname)
Credential = Query()
if login is None:
creds = self.search(Credential.name == name)
else:
creds = self.search((Credential.login == login) & (Credential.name == name))
else:
creds = self.all()
return sorted(creds, key=lambda x: x["name"] + x["login"])
def remove(self, fullname):
self.table().remove(where('fullname') == fullname)
def matches(self, regex):
Credential = Query()
credentials = self.search(
Credential.name.matches(regex) |
Credential.login.matches(regex) |
Credential.comment.matches(regex)
)
return sorted(credentials, key=lambda x: x["name"] + x["login"])
| [
"marcwebbie@gmail.com"
] | marcwebbie@gmail.com |
419777ed9481857ad070ceddf25ce35d70a0057d | b477642c2897655ce51987a3866a8b5c6ebcdaba | /NLPCC_WORDSIM/Proc/LR/__init__.py | b72315133f048a4612ebf0decdb88aebc72400cc | [] | no_license | ppsunrise/NLPCC-2016-WordSim | 4ccbda149d24a0258a7d21f247e196e50f0c76cc | 379a66487e6e2369a7354207e29cb10ba88d430a | refs/heads/master | 2020-04-05T19:03:08.170273 | 2016-09-26T11:04:34 | 2016-09-26T11:04:34 | 67,658,256 | 0 | 0 | null | 2016-09-08T01:48:11 | 2016-09-08T01:48:11 | null | UTF-8 | Python | false | false | 107 | py | # encoding=UTF-8
"""
@author: Zeco on
@email: zhancong002@gmail.com
@step:
@function:
"""
| [
"zhangcong002@gmail.com"
] | zhangcong002@gmail.com |
934914f8511d88b19437f2a1f858134949506837 | 2d6b5bf0a3a233ae9183d08981acda8254583c7c | /Unidade4/procura_palavra.py | cb9d00828bc4cbb4e471bf29bd505e3df068ca21 | [] | no_license | RaquelFonseca/P1 | 62a98f1cc92b6d63bb7945cfb55372b8c2a17819 | d89187ae58dacfbec783f51fac734ff4a2be6661 | refs/heads/master | 2020-03-28T21:43:41.093742 | 2017-06-17T13:19:21 | 2017-06-17T13:19:21 | 94,622,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | #coding:utf-8
palavra = 'alomundo'
busca = 'mu'
posicao = 0 # vou precisar para comparar com a proxima letra da palavra, sem voltar para o inicio dela
contida = False # controle de verificacao
for index in range(len(palavra)):
if busca[0] == palavra[index]:
posicao = index #atualizo a posicao
contida = True # atualizo o controle
if contida == True:
if busca[1] == palavra[posicao + 1]: #comparo as letras
print busca + ' esta contida em ' + palavra
break
else:
print 'nao esta contida'
else:
print 'nao esta contida'
break
| [
"raquel.fonseca@ccc.ufcg.edu.br"
] | raquel.fonseca@ccc.ufcg.edu.br |
7ec84d7f205c037e07a1896a5ffa3fbe25e8d10c | d03b4c8e4e1eafbfaf19859c045cae8b880a460b | /scripts/tasks_scripts/cpus/generate_data.py | 867635dd34fbd67a63b6e776586fc4a62d93ad0f | [
"MIT"
] | permissive | MTz12/ratatoskr | f77b43080c3ea4ea4ad4298cfe2df2daf115b7bc | d8c117a0c77fd86464ebe1c1717e23c87439f396 | refs/heads/master | 2023-05-27T06:52:57.247309 | 2023-05-16T11:11:15 | 2023-05-16T11:11:15 | 250,216,036 | 0 | 0 | MIT | 2020-03-26T09:35:57 | 2020-03-26T09:35:57 | null | UTF-8 | Python | false | false | 3,146 | py | import xml.etree.ElementTree as ET
from xml.dom import minidom
def prettyPrint(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
##################################################################################
""" Model Parameters """
data_types_names = ['image', 'image part', 'feature']
data_type_ix = data_types_names.index('image part') # The index of the data type to be send and received
num_cpus = 6
probabilities_values = [0.2, 0.2, 0.2, 0.2, 0.2]
packets_rate = ['1', '20'] # [number of packets, interval]
##################################################################################
""" Root Node """
data = ET.Element('data')
##################################################################################
""" Data Types """
dataTypes = ET.SubElement(data, 'dataTypes')
for i in range(0, len(data_types_names)):
dataType = ET.SubElement(dataTypes, 'dataType')
dataType.set('id', str(i))
name = ET.SubElement(dataType, 'name')
name.set('value', str(data_types_names[i]))
################################################################################
""" Tasks """
tasks = ET.SubElement(data, 'tasks')
for i in range(0, num_cpus):
task = ET.SubElement(tasks, 'task')
task.set('id', str(i))
start = ET.SubElement(task, 'start')
start.set('min', '0')
start.set('max', '0')
duration = ET.SubElement(task, 'duration')
duration.set('min', '1')
duration.set('max', '1')
repeat = ET.SubElement(task, 'repeat')
repeat.set('min', '1')
repeat.set('max', '1')
generates = ET.SubElement(task, 'generates')
dist_list = list(range(0, num_cpus))
dist_list.remove(i)
for j in range(0, len(probabilities_values)):
possibility = ET.SubElement(generates, 'possibility')
possibility.set('id', str(j))
probability = ET.SubElement(possibility, 'probability')
probability.set('value', str(probabilities_values[j]))
destinations = ET.SubElement(possibility, 'destinations')
destination = ET.SubElement(destinations, 'destination')
destination.set('id', str(0))
delay = ET.SubElement(destination, 'delay')
delay.set('min', '0')
delay.set('max', '100')
interval = ET.SubElement(destination, 'interval')
interval.set('min', str(packets_rate[1]))
interval.set('max', str(packets_rate[1]))
count = ET.SubElement(destination, 'count')
count.set('min', str(packets_rate[0]))
count.set('max', str(packets_rate[0]))
d_type = ET.SubElement(destination, 'type')
d_type.set('value', str(data_type_ix))
d_task = ET.SubElement(destination, 'task')
d_task.set('value', str(dist_list[j]))
requires = ET.SubElement(task, 'requires')
################################################################################
""" Write data to xml file """
senders_file = open('data.xml', 'w')
senders_file.write(prettyPrint(data))
senders_file.close()
| [
"jan.joseph@ovgu.de"
] | jan.joseph@ovgu.de |
8f92b88728ac56410ba502ac5efbdf5bfc9b72c8 | c540740aa24064f6adc4fe7640231412632911c8 | /DataPreparation.py | 30d502e42200df28e98aba4722e3de8e4475ca6f | [] | no_license | YunusKaratepe/Voice-Emotion-Analysis | 59d8a63e4f27c2428bccbeb01f6e559065a2d689 | f9678085874af95747707e1b06943ac79b4d1a30 | refs/heads/main | 2023-06-08T18:22:14.424867 | 2021-07-02T08:38:06 | 2021-07-02T08:38:06 | 357,509,623 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | # -*- coding: utf-8 -*-
"""
Created on Sat May 1 19:20:23 2021
@author: ilker
"""
import os
from shutil import copy
def changeName(directory):
workDirectory = directory + r'Augmented_Mel_Spectrograms-20210501T153934Z-001/Augmented_Mel_Spectrograms/Derivative_Augmentation_128_Order1/'
for filename in os.listdir(workDirectory):
os.rename(workDirectory + filename, workDirectory + 'derivativeAug128_order1_' + filename)
def setClassDirectory(directory):
directory = directory + 'BPDatasetV3/'
workDirectory = directory + r'MelSpectrogram_128/Unclassified/MelSpectrogramOriginal_Mel128'
classDirectory = directory + r'MelSpectrogram_128/Divided Classes/MelSpectrogramOriginal_Mel128'
for filename in os.listdir(workDirectory):
splitted = filename.split('_')
if splitted[len(splitted)-2] == '01':
copy(workDirectory + '/' + filename, classDirectory + '/01-neutral')
elif splitted[len(splitted)-2] == '02':
copy(workDirectory + '/' + filename, classDirectory + '/02-calm')
elif splitted[len(splitted)-2] == '03':
copy(workDirectory + '/' + filename, classDirectory + '/03-happy')
elif splitted[len(splitted)-2] == '04':
copy(workDirectory + '/' + filename, classDirectory + '/04-sad')
elif splitted[len(splitted)-2] == '05':
copy(workDirectory + '/' + filename, classDirectory + '/05-angry')
elif splitted[len(splitted)-2] == '06':
copy(workDirectory + '/' + filename, classDirectory + '/06-fearful')
elif splitted[len(splitted)-2] == '07':
copy(workDirectory + '/' + filename, classDirectory + '/07-disgust')
elif splitted[len(splitted)-2] == '08':
copy(workDirectory + '/' + filename, classDirectory + '/08-surprised')
else:
print('Bir sorun var, boyle bir duygu yok!')
#Emotions (01 = neutral, 02 = calm, 03 = happy, 04 = sad, 05 = angry, 06 = fearful, 07 = disgust, 08 = surprised)
#Main
directory = 'C:/Users/ilker/Desktop/Bp_datasetV3/'
#changeName(directory)
setClassDirectory(directory)
| [
"noreply@github.com"
] | noreply@github.com |
4155f8276b6bce78b013ee04b5130f7cc76ce673 | c55ca3f5fccf24c963209270b03edc774631615c | /main.py | 61387daf95eb5a70ca28bb9cd6e7733955192db9 | [] | no_license | Changjinxing/titleCrawler | 38d081f6a5df84a617752e50b05bc57640b01e9e | 30faaa0f24e7112987b9a0b393682b6b0492222c | refs/heads/master | 2021-01-16T21:50:59.232770 | 2016-07-31T02:44:02 | 2016-07-31T02:44:02 | 64,399,453 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | # coding: utf-8
import threading
import Queue
from spider import Spider
from general import *
import sqlite3
# Choose the Project name
WORK_PATH = './'
DB_PATH = './'
QUEUE_FILE_NAME = 'top-1m.csv'
DB_FILE_NAME = 'test.db'
QUEUE_FILE_PATH = WORK_PATH + QUEUE_FILE_NAME
NUMBER_OF_THREADS = 8
DB_FILE_PATH = DB_PATH + DB_FILE_NAME
queue_links = file_to_arr(QUEUE_FILE_PATH)
queue = Queue.Queue()
Spider(queue_links)
# Create worker threads (will die when main exits)
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do the next job in the queue
def work():
while True:
url = queue.get()
table_name = 'url_title_rel'
title = Spider.crawl_page(threading.current_thread().name, url, DB_FILE_PATH, table_name)
#print title
queue.task_done()
# Each queued link is a new job
def create_jobs():
for link in queue_links:
queue.put(link)
queue.join()
# Check if there are items in the queue, if so crawl them
def crawl():
queued_links = queue_links
if len(queued_links) > 0:
create_jobs()
# connect the sqlite db
sqlite_db = sqlite3.connect(DB_FILE_PATH)
sqlite_cu = sqlite_db.cursor()
table_name = 'url_title_rel'
sqlite_cu.execute('create table url_title_rel (id integer primary key,url_tag varchar(50),title_name varchar(100) UNIQUE)')
from time import clock
start_time = clock()
#print start_time
create_workers()
crawl()
finish_time = clock()
print finish_time
#print(finish_time - start_time)
#sqlite_cu.execute("select * from url_title_rel")
#print sqlite_cu.fetchall() | [
"jinxingbay@163.com"
] | jinxingbay@163.com |
be9eeac0e7d18b2d86a121d7b5c11760f2dcab98 | 7aef9f1f549072c3f005e86ef83807eb457e76f1 | /notes/2017-08-31-polarized-detection/figures/ill-polarization-two.py | 00e763d77d8d3625b04f033f99d79d930a0ed17f | [
"MIT"
] | permissive | talonchandler/dipsim | 3d6b030666d20a3c67d33a6a79e790508faa8fab | 04904871924276fd1662ca15b7224166d271c0d8 | refs/heads/master | 2020-12-31T00:42:12.673766 | 2018-02-03T01:10:33 | 2018-02-03T01:10:33 | 80,641,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,444 | py | from dipsim import multiframe, util
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.patches as patches
import os; import time; start = time.time(); print('Running...')
import matplotlib.gridspec as gridspec
# Main input parameters
col_labels = ['Geometry (NA${}_{\\textrm{ill}}$ = 0.2, NA${}_{\\textrm{det}}$ = 0.8\n 3 illumination polarizations)', 'Uncertainty Ellipses', r'$\sigma_{\Omega}$ [sr]', 'Median$\{\sigma_{\Omega}\}$ [sr]', 'MAD$\{\sigma_{\Omega}\}$ [sr]']
fig_labels = ['a)', 'b)', 'c)', 'd)', 'e)']
n_pts = 1000 # Points on sphere
n_pts_sphere = 20000 # Points on sphere
n_grid_pts = 10
inch_fig = 5
dpi = 300
# Setup figure and axes
fig = plt.figure(figsize=(2.2*inch_fig, 2*inch_fig))
gs0 = gridspec.GridSpec(2, 1, wspace=0, hspace=0.2, height_ratios=[0.9,1])
gs_up = gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=gs0[0], width_ratios=[1, 1, 1, 0.06], wspace=0.1)
gs_middle = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[1], width_ratios=[1, 1], wspace=0.4)
gs_middle_left = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_middle[0], width_ratios=[1, 0.05], wspace=0.1)
gs_middle_right = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_middle[1], width_ratios=[1, 0.05], wspace=0.1)
ax0 = plt.subplot(gs_up[0])
ax1 = plt.subplot(gs_up[1])
ax2 = plt.subplot(gs_up[2])
cax2 = plt.subplot(gs_up[3])
ax3 = plt.subplot(gs_middle_left[0])
cax3 = plt.subplot(gs_middle_left[1])
ax4 = plt.subplot(gs_middle_right[0])
cax4 = plt.subplot(gs_middle_right[1])
for ax, col_label, fig_label in zip([ax0, ax1, ax2, ax3, ax4], col_labels, fig_labels):
ax.annotate(col_label, xy=(0,0), xytext=(0.5, 1.05), textcoords='axes fraction',
va='bottom', ha='center', fontsize=14, annotation_clip=False)
ax.annotate(fig_label, xy=(0,0), xytext=(0, 1.05), textcoords='axes fraction',
va='bottom', ha='center', fontsize=14, annotation_clip=False)
for ax in [ax0, ax1, ax2, ax3, ax4]:
ax.tick_params(axis='both', labelsize=14)
for cax in [cax2, cax3, cax4]:
cax.tick_params(axis='both', labelsize=14)
# Calculate a list of points to sample in region
n = 1.33
NA_max = n
x = np.arange(2, 11, 1)
y, step = np.linspace(0, NA_max, num=n_grid_pts, retstep=True, endpoint=False)
y += step/2
pts = np.array(np.meshgrid(x, y)).reshape(2, len(x)*len(y)).T
def is_feasible(pt):
return True
pts_list = [pt for pt in pts if is_feasible(pt)]
pts = np.array(pts_list).T
# Calculate med and mad for each point
def calc_stats(param):
n_pol = int(param[0])
ill_na = param[1]
det_na = 0.8
exp = multiframe.MultiFrameMicroscope(ill_thetas=[0], det_thetas=[0],
ill_nas=[ill_na], det_nas=[det_na],
ill_types=['wide'], det_types=['lens'],
colors=['(1,0,0)'], n_frames=n_pol,
n_pts=n_pts, max_photons=4000/n_pol, n_samp=n)
exp.calc_estimation_stats()
data = exp.sa_uncert
med = np.median(data)
return med, np.median(np.abs(data - med))
med = []
mad = []
for i, pt in enumerate(pts.T):
print('Calculating microscope '+str(i+1)+'/'+str(pts.shape[1]))
x = calc_stats(pt)
med.append(x[0])
mad.append(x[1])
# Plot 2D regions
def plot_2d_regions(ax, cax, pts, data, special_pt=(-1,-1)):
# Set y ticks
from matplotlib.ticker import FuncFormatter, FixedLocator
# Annotation
def my_annotate(ax, annotation, xy, fontsize=9, rotation=0):
ax.annotate(annotation, xy=(0,0), xytext=xy, textcoords='axes fraction',
va='center', ha='center', fontsize=fontsize,
annotation_clip=False, rotation=rotation, zorder=13)
my_annotate(ax, '\# of illumination polarizations', (0.5, -0.13), fontsize=14)
my_annotate(ax, 'NA${}_{\\textrm{ill}}$', (-0.13, 0.5), fontsize=14, rotation=90)
# Calculate colors
color_map='coolwarm'
color_norm='log'
color_min=1e-4
color_max=1e1
if color_norm == 'linear':
norm = matplotlib.colors.Normalize(vmin=color_min, vmax=color_max)
elif color_norm == 'log':
norm = matplotlib.colors.LogNorm(vmin=color_min, vmax=color_max)
elif color_norm == 'linlog':
norm = matplotlib.colors.SymLogNorm(linthresh=linthresh, vmin=-color_max, vmax=color_max)
elif color_norm == 'power':
norm = matplotlib.colors.PowerNorm(gamma=gamma, vmin=data.min(), vmax=data.max())
norm_data = norm(data).data
norm_data2 = np.expand_dims(norm_data, 1)
cmap = matplotlib.cm.get_cmap(color_map)
colors = np.apply_along_axis(cmap, 1, norm_data2)
# Plot scatter for colorbar
sc = ax.scatter(pts[0,:], pts[1,:], c=data, s=0, cmap=cmap, norm=norm,
marker='s', lw=0)
ax.plot(special_pt[0], special_pt[1], 'kx', markersize=5, zorder=15)
# Plot patches
width = 1
height = NA_max/len(y)
for i, (pt, c) in enumerate(zip(pts_list, colors)):
ax.add_patch(patches.Rectangle((pt[0] - width/2, pt[1] - height/2), width, height, facecolor=c, edgecolor=c))
fig.colorbar(sc, cax=cax, orientation='vertical')
ax.set(xlim=[1.5, 10], ylim=[0, 1.33])
# Plot first two columns
n_pol = 2
ill_na = 0.2
det_na = 0.8
exp = multiframe.MultiFrameMicroscope(ill_thetas=[0], det_thetas=[0],
ill_nas=[ill_na], det_nas=[det_na],
ill_types=['wide'], det_types=['lens'],
colors=['(1,0,0)'], n_frames=n_pol,
n_pts=n_pts_sphere, max_photons=4000/n_pol, n_samp=n)
exp.calc_estimation_stats()
# Make scene string
util.draw_scene(exp.scene_string(), my_ax=ax0, dpi=dpi)
util.draw_scene(exp.ellipse_string(n_pts=250), my_ax=ax1, dpi=dpi)
util.plot_sphere(directions=exp.directions, data=exp.sa_uncert,
color_norm='log', linthresh=1e-4,
color_max=1e1, color_min=1e-4,
my_ax=ax2, my_cax=cax2)
# Plots last two columns
plot_2d_regions(ax3, cax3, pts, med, special_pt=(n_pol, ill_na))
plot_2d_regions(ax4, cax4, pts, mad, special_pt=(n_pol, ill_na))
# Label axes and save
print('Saving final figure.')
fig.savefig('ill-polarization-two.pdf', dpi=250)
print('Total time: '+str(np.round(time.time() - start, 2)))
os.system('say "done"')
| [
"talonchandler@talonchandler.com"
] | talonchandler@talonchandler.com |
9728d3469911e999ed53abd170b3c8608947e880 | caaf9046de59559bb92641c46bb8ab00f731cb46 | /Configuration/Generator/python/Upsilon1SToMuMu_forSTEAM_13TeV_TuneCUETP8M1_cfi.py | eaeffad1236fe5b17d942a6e9bfb79db3a17feaa | [] | no_license | neumeist/cmssw | 7e26ad4a8f96c907c7373291eb8df205055f47f0 | a7061201efe9bc5fa3a69069db037d572eb3f235 | refs/heads/CMSSW_7_4_X | 2020-05-01T06:10:08.692078 | 2015-01-11T22:57:32 | 2015-01-11T22:57:32 | 29,109,257 | 1 | 1 | null | 2015-01-11T22:56:51 | 2015-01-11T22:56:49 | null | UTF-8 | Python | false | false | 3,453 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
source = cms.Source("EmptySource")
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(0.53),
pythiaHepMCVerbosity = cms.untracked.bool(False),
crossSection = cms.untracked.double(9090000.0),
comEnergy = cms.double(13000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Bottomonium:states(3S1) = 553', # filter on 553 and prevents other onium states decaying to 553, so we should turn the others off
'Bottomonium:O(3S1)[3S1(1)] = 9.28',
'Bottomonium:O(3S1)[3S1(8)] = 0.15',
'Bottomonium:O(3S1)[1S0(8)] = 0.02',
'Bottomonium:O(3S1)[3P0(8)] = 0.02',
'Bottomonium:gg2bbbar(3S1)[3S1(1)]g = on',
'Bottomonium:gg2bbbar(3S1)[3S1(8)]g = on',
'Bottomonium:qg2bbbar(3S1)[3S1(8)]q = on',
'Bottomonium:qqbar2bbbar(3S1)[3S1(8)]g = on',
'Bottomonium:gg2bbbar(3S1)[1S0(8)]g = on',
'Bottomonium:qg2bbbar(3S1)[1S0(8)]q = on',
'Bottomonium:qqbar2bbbar(3S1)[1S0(8)]g = on',
'Bottomonium:gg2bbbar(3S1)[3PJ(8)]g = on',
'Bottomonium:qg2bbbar(3S1)[3PJ(8)]q = on',
'Bottomonium:qqbar2bbbar(3S1)[3PJ(8)]g = on',
'553:onMode = off', # ignore cross-section re-weighting (CSAMODE=6) since selecting wanted decay mode
'553:onIfAny = 13',
'PhaseSpace:pTHatMin = 20.',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
oniafilter = cms.EDFilter("PythiaFilter",
Status = cms.untracked.int32(2),
MaxEta = cms.untracked.double(1000.0),
MinEta = cms.untracked.double(-1000.0),
MinPt = cms.untracked.double(0.0),
ParticleID = cms.untracked.int32(553)
)
mumugenfilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(0.5, 0.5),
MinP = cms.untracked.vdouble(2.7, 2.7),
MaxEta = cms.untracked.vdouble(2.5, 2.5),
MinEta = cms.untracked.vdouble(-2.5, -2.5),
MinInvMass = cms.untracked.double(5.0),
MaxInvMass = cms.untracked.double(20.0),
ParticleCharge = cms.untracked.int32(-1),
ParticleID1 = cms.untracked.vint32(13),
ParticleID2 = cms.untracked.vint32(13)
)
ProductionFilterSequence = cms.Sequence(generator*oniafilter*mumugenfilter)
| [
"you@somedomain.com"
] | you@somedomain.com |
649bb0bf2824bec1e45eafc3fd20ca5859cb78b6 | 6eee7d27c3bd74c705e85ac7f4476ece0dae54cc | /jobplus8-1/migrations/versions/55551f164231_v1_0.py | f89e1c0a2ed5cd3f578613266a34bf2e8190cc43 | [] | no_license | LouPlus/jobplus8-1 | fa90caaba5c61f89e0ddae0fce2d892d4a2b922f | b751d467fb6a7e11a716a43916c8e1d70c7f55fe | refs/heads/master | 2021-09-08T07:23:53.803014 | 2021-09-01T03:36:27 | 2021-09-01T03:36:27 | 152,206,376 | 0 | 4 | null | 2018-10-29T19:14:19 | 2018-10-09T07:23:04 | Python | UTF-8 | Python | false | false | 4,929 | py | """v1.0
Revision ID: 55551f164231
Revises:
Create Date: 2018-10-30 02:34:18.982619
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '55551f164231'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('username', sa.String(length=32), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('role', sa.SmallInteger(), nullable=True),
sa.Column('password', sa.String(length=256), nullable=False),
sa.Column('phonenumber', sa.Text(), nullable=True),
sa.Column('work_experience', sa.SmallInteger(), nullable=True),
sa.Column('upload_resume_jobname', sa.String(length=64), nullable=True),
sa.Column('is_disable', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('company',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(length=512), nullable=False),
sa.Column('logo', sa.String(length=512), nullable=True),
sa.Column('about', sa.String(length=1024), nullable=False),
sa.Column('description', sa.String(length=24), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('phone', sa.Text(), nullable=True),
sa.Column('c_email', sa.String(length=24), nullable=False),
sa.Column('tags', sa.String(length=128), nullable=True),
sa.Column('stack', sa.String(length=128), nullable=True),
sa.Column('field', sa.String(length=32), nullable=True),
sa.Column('financing', sa.String(length=32), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('job',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('jobname', sa.String(length=32), nullable=False),
sa.Column('description', sa.String(length=128), nullable=True),
sa.Column('experience_requirement', sa.String(length=32), nullable=True),
sa.Column('degree_requirement', sa.String(length=32), nullable=True),
sa.Column('lowest_salary', sa.Integer(), nullable=True),
sa.Column('highest_salary', sa.Integer(), nullable=True),
sa.Column('location', sa.String(length=24), nullable=True),
sa.Column('education', sa.String(length=32), nullable=True),
sa.Column('job_label', sa.String(length=128), nullable=True),
sa.Column('is_fulltime', sa.Boolean(), nullable=True),
sa.Column('is_open', sa.Boolean(), nullable=True),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['company_id'], ['company.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('delivery',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.Column('status', sa.SmallInteger(), nullable=True),
sa.Column('response', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['company_id'], ['company.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['job_id'], ['job.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_job',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('job_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['job_id'], ['job.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='CASCADE')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_job')
op.drop_table('delivery')
op.drop_table('job')
op.drop_table('company')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| [
"noreply@github.com"
] | noreply@github.com |
f8202764eacbf21b84e1afab879c8f6bea7c9820 | ec6f83a3636fdb0d6f2266c56b58ac294eb2a945 | /ntut python/associationRule.py | 5c174b680a9cb0dfb518b5d31898b1cfb5313f2c | [] | no_license | jack20951948/Python-Learning | f65c2aacea6cbe61a8be2539f2959202546adb7d | d683790ba47b73c6360f5f804700c664d40777c9 | refs/heads/main | 2023-06-26T03:43:47.395088 | 2021-07-18T08:00:28 | 2021-07-18T08:00:28 | 387,111,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,601 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import networkx as nx
from apyori import apriori
#pip install apriori
from wordcloud import WordCloud
#pip install wordcloud
def testTensorflow():
hello = tf.constant('hello tensorflow!')
sess = tf.Session()
print("hello")
print(sess.run(hello))
#conda install -c conda-forge wordcloud
#pip install wordcloud
def wordCloud():
plt.figure(figsize=(9,6))
data=np.array([
['Milk','Bread','Apple'],
['Milk','Bread'],
['Milk','Bread','Apple', 'Banana'],
['Milk', 'Banana','Rice','Chicken'],
['Apple','Rice','Chicken'],
['Milk','Bread', 'Banana'],
['Rice','Chicken'],
['Bread','Apple', 'Chicken'],
['Bread','Chicken'],
['Apple', 'Banana']])
#convert the array to text
text_data=[]
for i in data:
for j in i:
text_data.append(j)
products=' '.join(map(str, text_data))
print(products)
wordcloud = WordCloud(relative_scaling = 1.0,stopwords = {}).generate(products)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
def draw(df):
plt.style.use('ggplot')
plt.figure(figsize=(9,6))
print(df.iloc[6:19][['items','support']]) # Only get items with two pair sets. They start from index 6 to 19
ar=(df.iloc[6:19]['items'])
G = nx.Graph()
G.add_edges_from(ar)
pos = nx.spring_layout(G)
nx.draw(G, pos, font_size=16, with_labels=False, edge_color='green',node_size=800,node_color=['red','green','blue','cyan','orange','magenta'])
for p in pos:
pos[p][1] += 0.07
nx.draw_networkx_labels(G, pos)
plt.show()
def simple_bar_chart(support,products):
labels=np.array(products)
colors = ['#008000','#808000','#FFFF00','#000000','#FF0000','#00FF00','#0000FF','#008080','#aa22ff','#aa22ff','#dd0022','#ff00cc','#eeaa22','#22bbaa','#C0C0C0']
y_pos = np.arange(len(labels))
x_pos = np.array(support)
plt.barh(y_pos, x_pos, color=colors, align='center' ,edgecolor='green')
plt.yticks(y_pos, labels)
plt.ylabel('Products',fontsize=18)
plt.xlabel('Support',fontsize=18)
plt.title('Consumer Buying Behaviour\n',fontsize=20)
plt.show()
def testApriori_s():
data=np.array([
['Milk','Bread','Apple'],
['Milk','Bread'],
['Milk','Bread','Apple', 'Banana'],
['Milk', 'Banana','Rice','Chicken'],
['Apple','Rice','Chicken'],
['Milk','Bread', 'Banana'],
['Rice','Chicken'],
['Bread','Apple', 'Chicken'],
['Bread','Chicken'],
['Apple', 'Banana']])
for i in data:
print(i)
print("\n\n")
result=list(apriori(data))
df=pd.DataFrame(result)
df.to_csv("appriori_results.csv") #Save to csv formart for detailed view
print(df.head()) # Print the first 5 items
#print(df)
draw(df)
support=df.iloc[0:19]['support']*100
products=df.iloc[0:19]['items']
simple_bar_chart(support,products)
def testApriori():
records = []
store_data = pd.read_csv('e:\\Datasets\\store_data.csv', header=None)
#print(store_data)
print(store_data.head())
#perprocessing
#convert our pandas dataframe into a list of lists
for i in range(0, 7501):
#records.append([str(store_data.values[i,j]) for j in range(0, 20)])
records.append([str(store_data.values[i,j]) for j in range(0, 20) if str(store_data.values[i,j]) != 'nan'])
# remove NaN value
#print(records)
association_rules = apriori(records, min_support=0.0045, min_confidence=0.2, min_lift=3, min_length=2)
#min_length: at least 2 product in the rules
association_results = list(association_rules)
print(len(association_results))
#print(association_results)
print(association_results[0])
for item in association_results:
# first index of the inner list
# Contains base item and add item
pair = item[0]
items = [x for x in pair]
print("Rule: " + items[0] + " -> " + items[1])
#second index of the inner list
print("Support: " + str(item[1]))
#third index of the list located at 0th
#of the third index of the inner list
print("Confidence: " + str(item[2][0][2]))
print("Lift: " + str(item[2][0][3]))
print("=====================================")
def main():
testApriori()
#testApriori_s()
wordCloud()
main() | [
"j20951948@gmail.com"
] | j20951948@gmail.com |
0a3953d0402b818210f35ac3401f274eb0d96b78 | cae8adc520ee71ffd9cfc82418152b4ec63f9302 | /template_wsgi/demo1.py | 98b1b0acbce69b38b641792d1f5dcb3850bfeb56 | [] | no_license | dong-c-git/WSGIServer | 55111c04f4bbefe239949ddaea16c71221b7f795 | 1f0b58977e2a951f3c6dec335854dd9d6e31cdfd | refs/heads/master | 2020-08-01T17:03:30.307962 | 2019-11-09T01:45:30 | 2019-11-09T01:45:30 | 211,054,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | #coding:utf-8
import time
def application(environ,start_response):
status = '200 OK'
response_headers = [('Content-Type','text/html')]
start_response(status,response_headers)
return str(environ)+'==Hello world from a simple WSGI application!-->%s\n'%time.ctime()
| [
"dc111000@hotmail.com"
] | dc111000@hotmail.com |
fd5fb93fd710b6c091ebef0c5ac4ac885bf805fc | dcabc50bb549c3a805009932119a81b415a53baa | /CTCI/Trees and Graphs/list of depth.py | 52c3d9ea36f708512a47b2ce9d37ce9e960f9e88 | [] | no_license | totemw/algorithm | dacf68ac8890a3dbd18b0bc43a10b9a33c7babe5 | 608157956f13fccb663b3a3d888a585dbde78cde | refs/heads/master | 2020-03-18T12:39:16.086385 | 2019-08-21T17:14:26 | 2019-08-21T17:14:26 | 134,736,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | """
Given a binary tree, design an algorithm to create a linked list of all nodes
at each depth
"""
class ListNode:
def __init__(self, val):
self.val = val
self.next = None
# pre-order traversal
def createLevelLinkedList(root, lists, level):
if not root:
return None
if len(lists) < level + 1:
lists.append(ListNode(root.val))
else:
lists[level].next = ListNode(root.val)
createLevelLinkedList(root.left, lists, level + 1)
createLevelLinkedList(root.right, lists, level + 1)
def createLevelLinkedList(root):
lists = []
createLevelLinkedList(root, lists, 0)
return lists
| [
"wyc980402@icloud.com"
] | wyc980402@icloud.com |
3137fe863cec39729918fb311670ec545849db7d | 17568ab855952cb51e9c7aca6d55eec585b596b5 | /mainoeuvres/apps.py | 0df8a50826151bce44c5dccfee4eefb108b31b1e | [] | no_license | kherox/gestion | d5db3fe94bd3521b3ea4d79059835ec97b587715 | fcdfb4cc5dcfce2155fc5d0327db9d2d906397a9 | refs/heads/master | 2020-03-24T12:08:47.938460 | 2018-07-28T19:35:35 | 2018-07-28T19:35:35 | 142,704,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class MainoeuvresConfig(AppConfig):
name = 'mainoeuvres'
| [
"deniskakou7@gmail.com"
] | deniskakou7@gmail.com |
16673573c540a7ba9280ab049455b09d684c3340 | a0fcabe9be3a63cb0b3129c193603e41b23541b9 | /Panels/ModePanel.py | 16a932ca3a077a5785f3d6427fe990b2bd0479ae | [] | no_license | AYWG/awake_bpm_daqcp | 6593b85d7dac3284ca148557dfaf1fc3cc4cb401 | cf6efacc3629f591900b4b68965263de47a01178 | refs/heads/master | 2021-03-24T13:51:56.160740 | 2018-01-07T21:59:28 | 2018-01-07T21:59:28 | 84,608,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,469 | py | import wx
class ModePanel(wx.Panel):
"""
Panel for setting the BPM mode
"""
def __init__(self, parent, title, data_processor):
# scrolled.ScrolledPanel.__init__(self, parent=parent, id=wx.ID_ANY)
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
self.data_processor = data_processor
self.btn_update_mode = wx.Button(self, wx.ID_ANY, 'Update')
self.lbl_run = wx.StaticText(self, wx.ID_ANY, 'Run')
self.chk_run = wx.CheckBox(self, wx.ID_ANY)
self.lbl_ext_trig = wx.StaticText(self, wx.ID_ANY, 'External Trigger')
self.chk_ext_trig = wx.CheckBox(self, wx.ID_ANY)
self.lbl_self_trig = wx.StaticText(self, wx.ID_ANY, 'Self Trigger')
self.chk_self_trig = wx.CheckBox(self, wx.ID_ANY)
self.lbl_int_trig = wx.StaticText(self, wx.ID_ANY, 'Internal Trigger')
self.chk_int_trig = wx.CheckBox(self, wx.ID_ANY)
self.lbl_ena_trig_out = wx.StaticText(self, wx.ID_ANY, 'Enable Trigger Output')
self.chk_ena_trig_out = wx.CheckBox(self, wx.ID_ANY)
self.lbl_ena_temp_rd = wx.StaticText(self, wx.ID_ANY, 'Enable Temperature Reading')
self.chk_ena_temp_rd = wx.CheckBox(self, wx.ID_ANY)
self.lbl_bypass_blr = wx.StaticText(self, wx.ID_ANY, 'Bypass BLR')
self.chk_bypass_blr = wx.CheckBox(self, wx.ID_ANY)
self.lbl_raw_adc_rd = wx.StaticText(self, wx.ID_ANY, 'RAW ADC Readout')
self.chk_raw_adc_rd = wx.CheckBox(self, wx.ID_ANY)
self.lbl_blr_rd = wx.StaticText(self, wx.ID_ANY, 'BLR Readout')
self.chk_blr_rd = wx.CheckBox(self, wx.ID_ANY)
self.lbl_afe_cal = wx.StaticText(self, wx.ID_ANY, 'AFE Cal.')
self.chk_afe_cal = wx.CheckBox(self, wx.ID_ANY)
self.lbl_onfly_cal = wx.StaticText(self, wx.ID_ANY, 'On Fly Cal.')
self.chk_onfly_cal = wx.CheckBox(self, wx.ID_ANY)
self.mode_box = wx.StaticBox(self, wx.ID_ANY, title)
self.__set_properties()
self.__do_layout()
self.__attach_events()
self.initialize_controls()
def __set_properties(self):
pass
def __do_layout(self):
SIZER_BORDER_WIDTH = 5
BOX_BORDER_WIDTH = 4
sizer_mode_box = wx.StaticBoxSizer(self.mode_box, wx.VERTICAL)
# Need a sizer for each mode, consisting of a label and a check box
sizer_run = wx.BoxSizer(wx.HORIZONTAL)
sizer_run.Add(self.lbl_run, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_run.Add(self.chk_run, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_ext_trig = wx.BoxSizer(wx.HORIZONTAL)
sizer_ext_trig.Add(self.lbl_ext_trig, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_ext_trig.Add(self.chk_ext_trig, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_self_trig = wx.BoxSizer(wx.HORIZONTAL)
sizer_self_trig.Add(self.lbl_self_trig, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_self_trig.Add(self.chk_self_trig, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_int_trig = wx.BoxSizer(wx.HORIZONTAL)
sizer_int_trig.Add(self.lbl_int_trig, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_int_trig.Add(self.chk_int_trig, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_ena_trig_out = wx.BoxSizer(wx.HORIZONTAL)
sizer_ena_trig_out.Add(self.lbl_ena_trig_out, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_ena_trig_out.Add(self.chk_ena_trig_out, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_ena_temp_rd = wx.BoxSizer(wx.HORIZONTAL)
sizer_ena_temp_rd.Add(self.lbl_ena_temp_rd, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_ena_temp_rd.Add(self.chk_ena_temp_rd, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_bypass_blr = wx.BoxSizer(wx.HORIZONTAL)
sizer_bypass_blr.Add(self.lbl_bypass_blr, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_bypass_blr.Add(self.chk_bypass_blr, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_raw_adc_rd = wx.BoxSizer(wx.HORIZONTAL)
sizer_raw_adc_rd.Add(self.lbl_raw_adc_rd, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_raw_adc_rd.Add(self.chk_raw_adc_rd, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_blr_rd = wx.BoxSizer(wx.HORIZONTAL)
sizer_blr_rd.Add(self.lbl_blr_rd, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_blr_rd.Add(self.chk_blr_rd, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_afe_cal = wx.BoxSizer(wx.HORIZONTAL)
sizer_afe_cal.Add(self.lbl_afe_cal, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_afe_cal.Add(self.chk_afe_cal, 0, ((wx.LEFT | wx.RIGHT) | wx.wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
sizer_onfly_cal = wx.BoxSizer(wx.HORIZONTAL)
sizer_onfly_cal.Add(self.lbl_onfly_cal, 1, ((wx.LEFT | wx.RIGHT) | wx.EXPAND), SIZER_BORDER_WIDTH)
sizer_onfly_cal.Add(self.chk_onfly_cal, 0, ((wx.LEFT | wx.RIGHT) | wx.ALIGN_TOP), SIZER_BORDER_WIDTH)
# Combine update button and sizers for each mode into one vertical sizer
sizer_mode_box.Add(self.btn_update_mode, 0, wx.SHAPED | wx.ALIGN_CENTER, 0)
sizer_mode_box.Add(sizer_run, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_ext_trig, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_self_trig, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_int_trig, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_ena_trig_out, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_ena_temp_rd, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_bypass_blr, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_raw_adc_rd, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_blr_rd, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_afe_cal, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_mode_box.Add(sizer_onfly_cal, 1, wx.ALL | wx.EXPAND, BOX_BORDER_WIDTH)
sizer_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_main.Add(sizer_mode_box, 1, wx.EXPAND, 0)
self.SetSizer(sizer_main)
self.SetAutoLayout(True)
sizer_main.Fit(self)
def __attach_events(self):
self.Bind(wx.EVT_BUTTON, self.OnUpdate, self.btn_update_mode)
def initialize_controls(self):
"""
Initialize the Mode Register Panel with values from the FPGA
"""
mode = self.data_processor.get_mode()
# In order to determine which check boxes to set, we take the given register value
# and work "backwards" from the value of the most significant bit to the least significant bit of the register.
# E.g. the same logic can be used to determine that the value 135 is made up of 128 + 4 + 2 + 1
if mode - 0x4000 >= 0:
mode -= 0x4000
self.chk_onfly_cal.SetValue(True)
if mode - 0x2000 >= 0:
mode -= 0x2000
self.chk_afe_cal.SetValue(True)
if mode - 0x200 >= 0:
mode -= 0x200
self.chk_bypass_blr.SetValue(True)
if mode - 0x100 >= 0:
mode -= 0x100
self.chk_blr_rd.SetValue(True)
if mode - 0x20 >= 0:
mode -= 0x20
self.chk_ena_temp_rd.SetValue(True)
if mode - 0x18 >= 0:
mode -= 0x18
self.chk_ena_trig_out.SetValue(True)
if mode - 0x4 >= 0:
mode -= 0x4
self.chk_int_trig.SetValue(True)
if mode - 0x2 >= 0:
mode -= 0x2
self.chk_self_trig.SetValue(True)
if mode - 0x1 >= 0:
mode -= 0x1
self.chk_run.SetValue(True)
def OnUpdate(self, event):
"""
Event handler for the update button
"""
mode = 0x0
# The value added represents in binary the number that must be written to the control register
# in order to enable that particular mode (same logic in LabVIEW)
if self.chk_run.GetValue():
mode += 0x1
if self.chk_ext_trig.GetValue():
mode += 0x0
# Same logic as in LabVIEW (I'm not entirely sure why this is)
if self.chk_self_trig.GetValue() and not self.chk_ext_trig.GetValue():
mode += 0x2
# Same logic as in LabVIEW (I'm not entirely sure why this is)
if self.chk_int_trig.GetValue() and not self.chk_ext_trig.GetValue():
mode += 0x4
if self.chk_ena_trig_out.GetValue():
mode += 0x18
if self.chk_ena_temp_rd.GetValue():
mode += 0x20
if self.chk_bypass_blr.GetValue():
mode += 0x200
if self.chk_raw_adc_rd.GetValue():
mode += 0x0
if self.chk_blr_rd.GetValue():
mode += 0x100
if self.chk_afe_cal.GetValue():
mode += 0x2000
if self.chk_onfly_cal.GetValue():
mode += 0x4000
self.data_processor.set_mode(mode)
wx.MessageBox("Mode successfully updated", "Success")
| [
"student"
] | student |
10d204fc635044b7344518fc74937b94ee00ade7 | 1d0082893f5fd46dd8962470fbbe880fd251f2a8 | /[6-4] Triangle/triangle.py | fe23cdf067a8824846f04fe351b98ff888da544c | [] | no_license | autorsong/codility | c93d2c53847781ad1af135659150f12da66d80d1 | 545c42d9225320aefcf142c7a6a6927af859600e | refs/heads/master | 2022-11-22T19:14:39.339070 | 2020-07-18T09:02:40 | 2020-07-18T09:02:40 | 272,045,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | def solution(A):
A.sort()
for i in range(len(A) - 2):
if A[i] + A[i + 1] > A[i + 2]:
return 1
return 0
| [
"autorsong@hanyang.ac.kr"
] | autorsong@hanyang.ac.kr |
fbee478ecc1dd477bdebf5a09cd472cb2d0ebc20 | c42a085521cec895fac0021eb1638d6f077eadf7 | /PYTHON_FUNDAMENTALS_May_August_2020/Exersice_Objects_And_Classes_26_06_2020/Storage.py | 88a44072c4d4c8e1b26fab959fea06bf9c937ddf | [] | no_license | vasil-panoff/Python_Fundamentals_SoftUni_May_2020 | f645ce85efa6db047b52a8b63d411d2e5bd5bd9a | daf1a27ff1a4684d51cf875ee0a4c0706a1a4404 | refs/heads/main | 2023-01-06T22:20:30.151249 | 2020-11-03T22:56:24 | 2020-11-03T22:56:24 | 309,818,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | class Storage:
def __init__(self, capacity):
self.capacity = capacity
self.storage = []
def add_product(self, product):
if len(self.storage) < self.capacity:
self.storage.append(product)
def get_products(self):
return self.storage
storage = Storage(4)
storage.add_product("apple")
storage.add_product("banana")
storage.add_product("potato")
storage.add_product("tomato")
storage.add_product("bread")
print(storage.get_products()) | [
"vasil.panov@gmail.com"
] | vasil.panov@gmail.com |
d1dacc66bada8fb4bb99defef2671fabf7268722 | c22b5c68727ef7d351be6db6e4f68965dfd02146 | /blockchain/demo/blockchain.py | 3054280107adae46914daa29c37f16d3262a50fc | [] | no_license | louiewuliyu/mail-by-blockchain | b3baf8a297c78aa0adcba83e5117e079047e0b6d | 5f36f1d0a73027c7db26585ec26b85c36e8f40bb | refs/heads/master | 2020-03-30T15:40:52.549313 | 2018-10-03T07:16:59 | 2018-10-03T07:16:59 | 151,374,742 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,276 | py | import json
import hashlib
import time
import datetime
import email_track
import poplib
class Email_Content(object):
"""docstring for email_content"""
def __init__(self, content):
super(Email_Content, self).__init__()
self.content = content #list #string
self.id = content[:-1]
#self.timestamp = time.time()
self.timestamp = time.asctime(time.localtime(time.time()))
self.hash = None
self.pre_hash = None
self.content_hash = self._hash_content(self.content)
self.payload_hash = self._hash_payload()
self.size = self.get_size #length in bytes
def _hash_content(self, content):
return hashlib.sha256(bytearray(content, 'utf-8')).hexdigest()
def get_size(self, content):
size = 0
for x in content:
size += len(x.encode('utf-8'))
return size
def _hash_payload(self):
return hashlib.sha256(bytearray(str(self.timestamp) + str(self.content) + str(self.id), "utf-8")).hexdigest()
def _hash_message(self):
return hashlib.sha256(bytearray(str(self.pre_hash) + self.payload_hash, "utf-8")).hexdigest()
def link(self, message):
self.pre_hash = message.hash
def seal(self):
self.hash = self._hash_message() #Get the message hash.
def validate(self):
""" Check whether the message is valid or not. """
if self.payload_hash != self._hash_payload():
raise InvalidMessage("Invalid payload hash in message: " + str(self))
if self.hash != self._hash_message():
raise InvalidMessage("Invalid message hash in message: " + str(self))
def __repr__(self):
return 'Message<hash: {}, pre_hash: {}, data: {}>'.format(
self.hash, self.pre_hash, self.content[:20]
)
#block object
class Block(object):
"""docstring for block"""
def __init__(self):
super(Block, self).__init__()
self.messages = []
self.timestamp = None
self.hash = None
self.pre_hash = None
def _hash_block(self):
return hashlib.sha256(bytearray(str(self.pre_hash) + str(self.timestamp) + self.messages[-1].hash, "utf-8")).hexdigest()
def add_message(self, message):
if len(self.messages) > 0:
message.link(self.messages[-1])
message.seal()
message.validate()
self.messages.append(message)
def link(self, block):
self.pre_hash = block.hash
def seal(self):
#self.timestamp = time.time()
self.timestamp = time.asctime(time.localtime(time.time()))
self.hash = self._hash_block()
def validate(self):
""" Validates each message hash, then chain integrity, then the block hash.
Calls each message's validate() method.
If a message fails validation, this method captures the exception and
throws InvalidBlock since an invalid message invalidates the whole block.
"""
for i, msg in enumerate(self.messages):
try:
msg.validate()
if i > 0 and msg.pre_hash != self.messages[i-1].hash:
raise InvalidBlock("Invalid block: Message #{} has invalid message link in block: {}".format(i, str(self)))
except InvalidMessage as ex:
raise InvalidBlock("Invalid block: Message #{} failed validation: {}. In block: {}".format(
i, str(ex), str(self))
)
def __repr__(self):
return 'Block<hash: {}, pre_hash: {}, messages: {}, time: {}>'.format(
self.hash, self.pre_hash, len(self.messages), self.timestamp
)
class Chain(object):
"""docstring for Chain"""
def __init__(self):
super(Chain, self).__init__()
self.chain = []
def add_block(self, block):
#add valid block
if len(self.chain) > 0:
#print('test')
block.pre_hash = self.chain[-1].hash
block.seal()
block.validate()
self.chain.append(block)
def validate(self):
""" Validates each block, in order.
An invalid block invalidates the chain.
"""
for i, block in enumerate(self.simple_chain):
try:
block.validate()
except InvalidBlock as exc:
raise InvalidBlockchain("Invalid blockchain at block number {} caused by: {}".format(i, str(exc)))
return True
def __repr__(self):
return 'SimpleChain<blocks: {}>'.format(len(self.chain))
class InvalidMessage(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
class InvalidBlock(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
class InvalidBlockchain(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
| [
"lywu0420@hotmail.com"
] | lywu0420@hotmail.com |
d93d8835c160041a3e693371468fd5984274927f | d700b9ad1e0b7225871b65ce0dafb27fb408c4bc | /students/k3343/practical_works/Lukina_Anastasia/django_project_Lukina/django_project_Lukina/wsgi.py | 23224686aaf13ce9ef9f14c3e3d72ca836ae159f | [
"MIT"
] | permissive | TonikX/ITMO_ICT_WebProgramming_2020 | a8c573ed467fdf99327777fb3f3bfeee5714667b | ba566c1b3ab04585665c69860b713741906935a0 | refs/heads/master | 2023-01-11T22:10:17.003838 | 2020-10-22T11:22:03 | 2020-10-22T11:22:03 | 248,549,610 | 10 | 71 | MIT | 2023-01-28T14:04:21 | 2020-03-19T16:18:55 | Python | UTF-8 | Python | false | false | 419 | py | """
WSGI config for django_project_Lukina project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_Lukina.settings')
application = get_wsgi_application()
| [
"lukinanastya97@yandex.ru"
] | lukinanastya97@yandex.ru |
0bf6e651d7edbce0ab4bbe11c7a6e44a4acc18bf | 425c273a996ae6db3ed5ef7858e6a06b525270b7 | /tibanna/base.py | 4f9af45f4a8419f5d8144fc8955e3ca8505bb98a | [
"MIT"
] | permissive | 4dn-dcic/tibanna | cc253f3ff5ab07f7f376db077ef5ea8c05077a94 | 1f04d2d359c948c0da6f69614249334991d162db | refs/heads/master | 2023-08-17T00:13:02.288115 | 2023-08-07T19:13:45 | 2023-08-07T19:13:45 | 63,349,223 | 72 | 22 | MIT | 2023-09-12T20:09:09 | 2016-07-14T15:42:51 | Python | UTF-8 | Python | false | false | 964 | py | import copy
class SerializableObject(object):
def __init__(self):
pass
def update(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def as_dict(self):
# use deepcopy so that changing this dictionary later won't affect the SerializableObject
d = copy.deepcopy(self.__dict__)
for k in list(d.keys()):
if d[k] is None:
del d[k]
# recursive serialization
for k, v in d.items():
if isinstance(v, SerializableObject):
d[k] = v.as_dict()
if isinstance(v, list):
for i, e in enumerate(v):
if isinstance(e, SerializableObject):
d[k][i] = e.as_dict()
if isinstance(v, dict):
for l, w in v.items():
if isinstance(w, SerializableObject):
d[k][l] = w.as_dict()
return d
| [
"duplexa@gmail.com"
] | duplexa@gmail.com |
cf9fa0e13b32bff9e7af7e8a1611caa7f641ca2c | af114acdf78efc6473753145adb842cb8ae14b1b | /lab7_venv/bin/easy_install | 9fa289081a0d921aca57e42877fa7ecae9cbf4b4 | [] | no_license | ahmedt26/CS1XA3 | b2ae639fee2c53e1e25870cad3005244b1f099fa | 85d326591f3bf1e7dc4281a78475b13717fdae00 | refs/heads/master | 2021-10-09T16:08:33.173283 | 2019-04-28T01:55:32 | 2019-04-28T01:55:32 | 168,189,295 | 0 | 3 | null | 2021-10-01T22:50:14 | 2019-01-29T16:38:08 | Python | UTF-8 | Python | false | false | 261 | #!/home/ahmedt26/CS1XA3/lab7_venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ahmedt26@mcmaster.ca"
] | ahmedt26@mcmaster.ca | |
52edde132baa948f29b8e5355cd8839ec7dac34a | bd024bf20a7e03322c7790a6dcaf0937bad6d7c0 | /backend/app/alembic/versions/116c50c6f071_pk_from_user_table_in_identity_added_to_.py | 48444892c7c35483ea9a79740729dbdbf2f26514 | [] | no_license | hiveonlineaps/devops-task | 4279186b831eac5376294c02875aea4c4124ac75 | a3068d6ae07e6f20b13cfc7c0f47f7cb88b327b1 | refs/heads/main | 2023-08-03T17:27:06.951583 | 2020-12-08T04:48:19 | 2020-12-08T04:48:19 | 405,637,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | """pk from user table in identity added to user table in repulation
Revision ID: 116c50c6f071
Revises: 5ecead3a5fd7
Create Date: 2020-11-18 01:09:47.626126
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '116c50c6f071'
down_revision = '5ecead3a5fd7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('memberships_user_id_fkey', 'memberships', type_='foreignkey')
#op.create_foreign_key(None, 'memberships', 'user', ['user_id'], ['identity_user_id'])
op.add_column('user', sa.Column('identity_user_id', sa.Integer(), nullable=True))
op.create_index(op.f('ix_user_identity_user_id'), 'user', ['identity_user_id'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_identity_user_id'), table_name='user')
op.drop_column('user', 'identity_user_id')
op.drop_constraint(None, 'memberships', type_='foreignkey')
op.create_foreign_key('memberships_user_id_fkey', 'memberships', 'user', ['user_id'], ['id'])
# ### end Alembic commands ###
| [
"anne.namuli@gmail.com"
] | anne.namuli@gmail.com |
1c15a8f74611c3d902acaada8d3ea3b84236f041 | 8f55625f516526cd1ff4353327dffb2641bd036b | /2. webcam scripts/color_tracking.py | b98580ca506649cd1eb1665c353bdd1915980c4b | [] | no_license | PrateekGoyal18/My-OpenCV-Scripts | 092a700ca085acac6ed31779ee142e92c8e2f82a | 92b826622d57bc0b1c4c20e9e391aa8392586b55 | refs/heads/master | 2022-11-08T04:38:03.371159 | 2020-07-01T13:30:25 | 2020-07-01T13:30:25 | 258,193,588 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import numpy as np
import time
import cv2
blueLower = np.array([0, 67, 0], dtype="uint8")
blueUpper = np.array([255, 128, 50], dtype="uint8")
camera = cv2.VideoCapture(0)
while True:
(grabbed, frame) = camera.read()
blue = cv2.inRange(frame, blueLower, blueUpper)
blue = cv2.GaussianBlur(blue, (3,3), 0)
(cnts, _) = cv2.findContours(blue.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt)))
cv2.drawContours(frame, [rect], -1, (0, 255, 0), 2)
cv2.imshow("Tracking", frame)
cv2.imshow("Binary", blue)
time.sleep(0.025)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
cv2.destroyAllWindows() | [
"prateekg045@gmail.com"
] | prateekg045@gmail.com |
8f7fa9ee64f3eae13acc71b971a91a617cf3069a | aaf89b226d715bccfa81354402b7dc7f8f7da8e4 | /Snishiki/Ant/初級/データ構造/set.py | 44209f518afe772d3797f6cf748d584ce5358222 | [] | no_license | kyojinatsubori/RoadToRedCoder | 56eec77eaccd1d0cba36dfc7edc8e65d4d311f77 | 72f5330ba8d8fe15a1c3f62a0e1640c3217e6a93 | refs/heads/master | 2021-04-23T21:01:32.425419 | 2020-06-06T08:05:32 | 2020-06-06T08:05:32 | 250,003,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | N = int(input())
d = [0]*N
for i in range(N):
d[i] = int(input())
ds = set(d)
print(len(ds)) | [
"shunj1224@gmail.com"
] | shunj1224@gmail.com |
3d2b1885527a8f369c00f33d381bb06310e21fde | 4314783205aa498038b6060f906d47e92731e290 | /test/functional/rpc_deriveaddresses.py | 21d5708de3558cb2de255e2b2b033101639c7eac | [
"MIT"
] | permissive | mraksoll4/litecoinfinance2 | 411a01360537e5ca097d479a433883441d0e9cb2 | 0e1d3b6d041d2d01559e615ca7f3cdef12146202 | refs/heads/master | 2022-03-03T17:55:47.677736 | 2019-10-30T19:29:15 | 2019-10-30T19:29:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,789 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the deriveaddresses rpc call."""
from test_framework.test_framework import LitecoinFinanceTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import assert_equal, assert_raises_rpc_error
class DeriveaddressesTest(LitecoinFinanceTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = 1
def run_test(self):
assert_raises_rpc_error(-5, "Invalid descriptor", self.nodes[0].deriveaddresses, "a")
descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)#t6wfjs64"
address = "bcrt1qjqmxmkpmxt80xz4y3746zgt0q3u3ferr34acd5"
assert_equal(self.nodes[0].deriveaddresses(descriptor), [address])
descriptor = descriptor[:-9]
assert_raises_rpc_error(-5, "Invalid descriptor", self.nodes[0].deriveaddresses, descriptor)
descriptor_pubkey = "wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)#s9ga3alw"
address = "bcrt1qjqmxmkpmxt80xz4y3746zgt0q3u3ferr34acd5"
assert_equal(self.nodes[0].deriveaddresses(descriptor_pubkey), [address])
ranged_descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)#kft60nuy"
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, [1, 2]), ["bcrt1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rq4442dy", "bcrt1qpgptk2gvshyl0s9lqshsmx932l9ccsv265tvaq"])
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, 2), [address, "bcrt1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rq4442dy", "bcrt1qpgptk2gvshyl0s9lqshsmx932l9ccsv265tvaq"])
assert_raises_rpc_error(-8, "Range should not be specified for an un-ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"), [0, 2])
assert_raises_rpc_error(-8, "Range must be specified for a ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"))
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), 10000000000)
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [1000000000, 2000000000])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [2, 0])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [-1, 0])
combo_descriptor = descsum_create("combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)")
assert_equal(self.nodes[0].deriveaddresses(combo_descriptor), ["mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", "mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", address, "2NDvEwGfpEqJWfybzpKPHF2XH3jwoQV3D7x"])
hardened_without_privkey_descriptor = descsum_create("wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1'/1/0)")
assert_raises_rpc_error(-5, "Cannot derive script without private keys", self.nodes[0].deriveaddresses, hardened_without_privkey_descriptor)
bare_multisig_descriptor = descsum_create("multi(1,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)")
assert_raises_rpc_error(-5, "Descriptor does not have a corresponding address", self.nodes[0].deriveaddresses, bare_multisig_descriptor)
if __name__ == '__main__':
DeriveaddressesTest().main()
| [
"litecoinfinance@gmail.com"
] | litecoinfinance@gmail.com |
25e8922426db6bf553f76635f9254ef4505ce15f | a0ba425c9dc32b4e2face4228b7252d19b303f6f | /data-collection/PST/_discontinued/manual_balance_dataset.py | 24597ee112394378205f6eb7cd358be44430abb8 | [
"MIT"
] | permissive | pabsan-0/sub-t | a1f05c57cc39a9bd4fc92631027f161da927e6a3 | 7217fdbd3ba73a4d807939f3a2646ac9f4f00fe0 | refs/heads/master | 2023-08-14T20:37:22.654859 | 2021-10-05T15:44:22 | 2021-10-05T15:44:22 | 339,476,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import pandas
import numpy as np
'''
This is a helper script meant for manually extracting a balanced dataset
from a collection of YOLO pictures+annotations.
Requires a prepared csv file!
DISCONTINUED: it is faster to scan the number of instances in a folder an manually move items around
'''
filename = 'balancedDatasetList_Shuffle.txt'
df = pandas.read_csv('PSTR900-details.csv')
onlydrill = df[df.onlyDrill==1]
onlyextinguisher = df[df.onlyExtinguisher==1]
onlysurvivor = df[df.onlySurvivor==1]
manyitems = df[df.manyItems==1]
how_many_drill = 540 - 302
how_many_extinguisher = 540 - 460
how_many_survivor = 540 - 399
a = list(np.random.choice(onlydrill.picname, how_many_drill))
b = list(np.random.choice(onlyextinguisher.picname, how_many_extinguisher))
c = list(np.random.choice(onlysurvivor.picname, how_many_survivor))
d = list(manyitems.picname)
with open(filename, 'w') as file:
for namelist in [a,b,c, d]:
for name in namelist:
file.write(name + '\n')
| [
"noreply@github.com"
] | noreply@github.com |
a5a63d895a9e68e13d87a4ad0728b7202d87233f | bb0fcb4ddc133d50f8a56097459a772ad5b2f424 | /Phase_3/PacketHandler.py | 07b37b991b90c07a8cd6a48952caff524d1dbc2c | [] | no_license | CPat16/Client_Server | f84990d122a93e16b73e1d6ca5c24bd73581648a | 5e52c477da8f14f6562e1f218130f19da5f1f898 | refs/heads/master | 2020-08-04T15:30:18.696957 | 2019-12-18T18:07:27 | 2019-12-18T18:07:27 | 212,185,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,375 | py | # This file contains the Packet class which handles packing functions
import struct
class Packet():
def __init__(self, seq_num=-1, data=b''):
self.seq_num = seq_num
self.data = data
if(seq_num >= 0):
self.csum = self.checksum(self.seq_num, self.data)
else:
self.csum = -1
self.fmt = '!H' + 'B'*len(self.data) + 'H'
def __repr__(self):
return "\nseq_num: {0}\n csum: {1}\n data: {2}\n".format(self.seq_num, self.csum, self.data)
def pkt_pack(self):
if isinstance(self.data, str):
data = self.data.encode()
else:
data = self.data
return struct.pack(self.fmt, self.seq_num, *data, self.csum)
def pkt_unpack(self, packed):
self.seq_num = int.from_bytes(packed[0:2], byteorder='big', signed=False)
self.data = packed[2:(len(packed)-2)]
self.csum = int.from_bytes(packed[(len(packed)-2):len(packed)], byteorder='big', signed=False)
def carry_around_add(self, a, b):
c = a + b
# print(bin(c>>16))
return (c & 0xffff) + (c >> 16)
def checksum(self, seq, msg):
csum = 0
if isinstance(msg, str):
msg = msg.encode()
if isinstance(msg, (bytearray, bytes)):
msg = int.from_bytes(msg, byteorder='big', signed=False)
# add sequence number first
csum = self.carry_around_add(csum, seq)
#for _ in range(0, msg.bit_length()//16 + 1, 1):
while msg != 0:
next_bits = msg & 0xffff
csum = self.carry_around_add(csum, next_bits)
#print(csum, ':', bin(csum))
msg = msg >> 16
# print(bin(msg))
return ~csum & 0xffff
if __name__ == "__main__":
#msg = b'\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01\x00\x01'
msg = "checksum2"
# int_msg = int.from_bytes(msg, byteorder='big', signed=False)
# print(bin(int_msg))
pkt = Packet(0, msg)
print(pkt.csum, ':', hex(pkt.csum))
""" print(pkt)
packed = pkt.pkt_pack()
print(packed)
new_pkt = Packet()
print("New pkt before:", new_pkt)
new_pkt.pkt_unpack(packed)
print("New pkt unpacked:", new_pkt) """
#unpacked = pkt.pkt_unpack(packed)
# unpacked = struct.unpack('!H', packed)
""" print(int.from_bytes(packed[0:2], byteorder='big', signed=False))
print(packed[2:(len(packed)-2)])
print(int.from_bytes(packed[(len(packed)-2):len(packed)], byteorder='big', signed=False)) """
# print("Message:", new_pkt.data.decode()) | [
"fondu16@gmail.com"
] | fondu16@gmail.com |
fdaa6feee64f198d314c318bec4df6ed4a407efc | e9693a3d566b3a59f08c1a822e01213f48759f0e | /httprunner/parser.py | 43776609d7467f61302be02888e471b2bf74ca1a | [] | no_license | kfusac/MyHttpRunner | 94162f25ae0bddba4099099e0cbe7f247a0bdd17 | 4ffb06246a5c03dbfacaa224561496ee29f2f1ca | refs/heads/master | 2020-03-27T04:17:59.134345 | 2018-08-30T03:02:06 | 2018-08-30T03:02:06 | 145,515,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,309 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
import ast
import os
import re
from httprunner import exceptions, utils, logger
variable_regexp = r"\$([\w_]+)"
function_regexp = r"\$\{([\w_]+\([\$\w\.\-/_ =,]*\))\}"
funciton_regexp_compile = re.compile(r"^([\w_]+)\(([\$\w\.\-/_ =,]*)\)$")
###############################################################################
# expression parser
###############################################################################
def parse_string_value(str_value):
'''
parse string to number if possible
e.g.
"123" => 123
"12.2" => 12.2
"abc" => "abc"
"$var" => "$var"
'''
try:
return ast.literal_eval(str_value)
except ValueError:
return str_value
except SyntaxError:
# e.g. $var, ${func}
return str_value
def extract_variables(content):
'''
extract all variables names from content, which is in format $variable
Args:
content (str): string content
Returns:
list: variables list extracted from string content
Examples:
>>> extract_variables("$variable")
["variable"]
>>> extract_variables("/blog/$postid")
["postid"]
>>> extract_variables("/$var1/$var2")
["var1", "var2]
>>> extract_variables("abc")
[]
'''
try:
return re.findall(variable_regexp, content)
except TypeError:
return []
def extract_functions(content):
'''
extract all functions from string content, which are in format ${func()}
Args:
content (str): string content
Returns:
list: functions list extracted from string content
Examples:
>>> extract_functions("${func(5)}")
["func(5)"]
>>> extract_functions("${func(a=1, b=2)}")
["func(a=1, b=2)"]
>>> extract_functions("/api/1000?_t=${get_timestamp()}")
["get_timestamp()"]
>>> extract_functions("/api/${add(1, 2)}")
["add(1, 2)"]
>>> extract_functions("/api/${add(1 ,2)}?_t=${get_timestamp()}")
["add(1, 2)", "get_timestamp()"]
'''
try:
return re.findall(function_regexp, content)
except TypeError:
return []
def parse_function(content):
'''
parse function name and args from string expression
Args:
content (str): string content
Returns:
dict: function meta dict
{
"func_name":"xxx",
"args":[],
"kwargs":{}
}
Examples:
>>> parse_function("func()")
{'func_name': 'func','args': [],'kwargs': {}}
>>> parse_function("func(5)")
{'func_name': 'func','args': [5],'kwargs': {}}
>>> parse_function("func(1, 2)")
{'func_name': 'func','args': [1, 2],'kwargs': {}}
>>> parse_function("func(a=1, b=2)")
{'func_name': 'func','args': [],'kwargs': {'a': 1, 'b': 2}}
>>> parse_function("func(1, 2, a=3, b=4)")
{'func_name': 'func','args': [1, 2],'kwargs': {'a': 3, 'b': 4}}
'''
matched = funciton_regexp_compile.match(content)
if not matched:
raise exceptions.FunctionNotFound(f'{content} not found!')
function_meta = {"func_name": matched.group(1), "args": [], "kwargs": {}}
args_str = matched.group(2).strip()
if args_str == "":
return function_meta
args_list = args_str.split(',')
for arg in args_list:
arg = arg.strip()
if "=" in arg:
key, value = arg.split("=")
function_meta["kwargs"][key.strip()] = parse_string_value(
value.strip())
else:
function_meta["args"].append(parse_string_value(arg))
return function_meta
def parse_validator(validator):
'''
parse validator, validator maybe in two format
Args:
validator (dict):
format1:
{"check":"status_code","comparator":"eq","expect":201}
{"check":"$resp_body_success","comparator":"eq","expect":True}
format2:
{"eq":["status_code",201]}
{"eq":["$resp_body_success",True]}
Returns:
dict: validator info
{
"check":"status_code",
"expect":201,
"comparator":"eq"
}
'''
if not isinstance(validator, dict):
raise exceptions.ParamError(f'invalid validator: {validator}')
if "check" in validator and len(validator) > 1:
check_item = validator.get("check")
if "expect" in validator:
expect_value = validator.get("expect")
elif "expected" in validator:
expect_value = validator.get("expected")
else:
raise exceptions.ParamError(f"invalid validator: {validator}")
comparator = validator.get("comparator", "eq")
elif len(validator) == 1:
comparator = list(validator.keys())[0]
compare_values = validator[comparator]
if not isinstance(compare_values, list) or len(compare_values) != 2:
raise exceptions.ParamError(f"invalid validator: {validator}")
check_item, expect_value = compare_values
else:
raise exceptions.ParamError(f"invalid validator: {validator}")
return {
"check": check_item,
"expect": expect_value,
"comparator": comparator
}
def parse_parameters(parameters, variables_mapping, functions_mapping):
'''
parse parameters and generate cartesian product
Args:
parameters (list):parameter name and value in list
parameter value may be in three types:
(1) data list: e.g. ['ios/10.1', 'ios/10.2', 'ios/10.3']
(2) call built-in parameterize function, "${parameter(account.csv)}"
(3) call custom function in confcustom.py, ${gen_app_version()}
variables_mapping (dict): variables mapping loaded from confcustom.py
functions_mapping (dict): functions mapping loaded from confcustom.py
Returns:
list: cartesian product list
Examples:
>>> parameters = [
{'user_agent':['ios/10.1', 'ios/10.2', 'ios/10.3']},
{'username-password}: "${parameterize(account.csv)}",
{'app_verison': '${gen_app_version()}'}
]
>>> parse_parameters(parameters)
'''
parsed_parameters_list = []
for parameter in parameters:
parameter_name, parameter_content = list(parameter.items())[0]
parameter_name_list = parameter_name.split('-')
if isinstance(parameter_content, list):
# (1) data list
# e.g. {'app_version':['2.8.5','2.8.6]}
# => [{'app_version':'2.8.5','app_version':'2.8.6'}]
# e.g. {'username-password':[['user1','111111'],['user2','222222']]}
# => [{'username':'user1','password':'111111'},{'username':'user2','password':'222222'}]
parameter_content_list = []
for parameter_item in parameter_content:
if not isinstance(parameter_item, (list, tuple)):
# '2.8.5' => ['2.8.5']
parameter_item = [parameter_item]
# ['app_version'],['2.8.5']=>{'app_version':'2.8.5'}
parameter_content_dict = dict(
zip(parameter_name_list, parameter_item))
parameter_content_list.append(parameter_content_dict)
else:
# (2) & (3)
parsed_parameter_content = parse_data(
parameter_content, variables_mapping, functions_mapping)
if not isinstance(parsed_parameter_content, list):
raise exceptions.ParamError(
f'{parameters} parameters syntax error!')
parameter_content_list = [{
key: parameter_item[key]
for key in parameter_name_list
} for parameter_item in parsed_parameter_content]
parsed_parameters_list.append(parameter_content_list)
return utils.gen_cartesian_product(*parsed_parameters_list)
###############################################################################
# content parser
###############################################################################
def substitute_variables(content, variables_mapping):
'''
subsititute variables in content with variables_mapping
Args:
content (str/dict/list/numeric/bool/type): content to be substituted.
variables_mapping (dict): variables mapping.
Returns:
subsitituted content.
Examples:
>>> content = {
'request': {
'url': '/api/users/$uid',
'headers': {'token': '$token'}
}
}
>>> variables_mapping = {"$uid": 1000}
>>> substitute_variables(content, variables_mapping)
{
'request': {
'url': '/api/users/1000',
'headers': {'token': '$token'}
}
}
'''
if isinstance(content, (list, set, tuple)):
return [
substitute_variables(item, variables_mapping) for item in content
]
if isinstance(content, dict):
return {
substitute_variables(key, variables_mapping): substitute_variables(
value, variables_mapping)
for key, value in content.items()
}
if isinstance(content, str):
# content is in string format
for var, value in variables_mapping.items():
if content == var:
content = value
else:
if not isinstance(value, str):
value = str(value)
content = content.replace(var, value)
return content
def get_mapping_variable(variable_name, variables_mapping):
'''
get variable from variables_mapping
Args:
variable_name (str): specified variable name
variables_mapping (dict): variables mapping
Returns:
mapping variable value.
Raises:
exceptions.VariableNotFound: variable is not found
'''
try:
return variables_mapping[variable_name]
except KeyError:
raise exceptions.VariableNotFound(f'{variable_name} is not found.')
def get_mapping_funciton(function_name, functions_mapping):
'''
get function from functions_mapping, if not found, then try to check if builtin function.
Args:
function_name (str): specified function name
functions_mapping (dict): functions mapping
Returns:
mapping function object
Raises:
exceptions.FunctionNotFound: function is neither defined in confcustom.py nor builtin.
'''
if function_name in functions_mapping:
return functions_mapping[function_name]
try:
# check if built-in functions
item_func = eval(function_name)
if callable(item_func):
return item_func
except (NameError, TypeError):
raise exceptions.FunctionNotFound(f'{function_name} is not found.')
def parse_string_functions(content, variables_mapping, functions_mapping):
'''
parse string content with functions mapping.
Args:
content (str): string content to be parsed.
variables_mapping (dict): variables_mapping
functions_mapping (dict): functions_mapping
Returns:
str: parsed string content
Examples:
>>> content = "abc${add_one(3)}def"
>>> functions_mapping = {"add_one": lambda x: x + 1}
>>> parse_string_functions(content,functions_mapping)
"abc4def
'''
functions_list = extract_functions(content)
for func_content in functions_list:
function_meta = parse_function(func_content)
func_name = function_meta['func_name']
args = function_meta.get("args", [])
kwargs = function_meta.get('kwargs', {})
args = parse_data(args, variables_mapping, functions_mapping)
kwargs = parse_data(kwargs, variables_mapping, functions_mapping)
if func_name in ["parameterize", 'P']:
from httprunner import loader
eval_value = loader.load_csv_file(*args, **kwargs)
else:
func = get_mapping_funciton(func_name, functions_mapping)
eval_value = func(*args, **kwargs)
func_content = "${" + func_content + "}"
if func_content == content:
# content is a function
content = eval_value
else:
# content contains one or many functions
content = content.replace(func_content, str(eval_value), 1)
return content
def parse_string_variables(content, variables_mapping):
'''
parse string content with variables mapping.
Args:
content (str): string content to be parsed.
variables_mapping (dict): variables mappings.
Returns:
str: parsed string content
Examples:
>>> content = '/api/users/$uid"
>>> variables_mapping = {"$uid": 1000}
>>> parse_string_variables(content, variables_mapping)
"/api/users/1000"
'''
variables_list = extract_variables(content)
for variable_name in variables_list:
try:
variable_value = get_mapping_variable(variable_name,
variables_mapping)
except exceptions.VariableNotFound:
content_match = []
for key, value in variables_mapping.items():
if variable_name.startswith(key):
name = key
content_match.append({key: value})
if len(content_match) == 1:
variable_name = name
variable_value = get_mapping_variable(variable_name,
variables_mapping)
else:
err_msg = f'{variable_name} match too many varaibles, {content_match}'
logger.log_error(err_msg)
raise exceptions.VariableNotFound(err_msg)
if f'${variable_name}' == content:
# content is a variable
content = variable_value
else:
# content contains one or several variabels
if not isinstance(variable_value, str):
variable_value = str(variable_value)
content = content.replace(f'${variable_name}', variable_value, 1)
return content
def parse_data(content, variables_mapping=None, functions_mapping=None):
'''
Args:
content (str/dict/list/numeric/bool/type): content to be parsed
variables_mapping (dict): variables mapping.
functions_mapping (dict): functions mapping.
Returns:
parsed content.
Examples:
>>> content = {
'request': {
'url': '/api/users/$uid',
'headers': {'token': '$token'}
}
}
>>> variables_mapping = {"$uid": 1000}
>>> parse_data(content, variables_mapping)
{
'request': {
'url': '/api/users/1000',
'headers': {'token': '$token'}
}
}
'''
if content is None or isinstance(content, (int, float, bool, type)):
return content
if isinstance(content, (list, set, tuple)):
return [
parse_data(item, variables_mapping, functions_mapping)
for item in content
]
if isinstance(content, dict):
return {
parse_data(key, variables_mapping, functions_mapping): parse_data(
value, variables_mapping, functions_mapping)
for key, value in content.items()
}
if isinstance(content, str):
variables_mapping = variables_mapping or []
functions_mapping = functions_mapping or {}
content = content.strip()
# replace functions with evaluated value
content = parse_string_functions(content, variables_mapping,
functions_mapping)
# replace vairables with binding value
content = parse_string_variables(content, variables_mapping)
return content
| [
"kfusac@163.com"
] | kfusac@163.com |
b155c696a2a8dd91263093b7d99c2201926413c7 | e558e99f3403b5931573789d88c2ad47fffe7362 | /sum/two_sum.py | 7a24bce29b647d3f00253a6d4ffa9dfa70458f70 | [] | no_license | gerrycfchang/leetcode-python | a8a3a408381288a34caada8ca68c47c7354459fd | 7fa160362ebb58e7286b490012542baa2d51e5c9 | refs/heads/master | 2021-05-11T00:38:17.925831 | 2018-07-31T14:50:42 | 2018-07-31T14:50:42 | 118,306,858 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | '''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
dic = {}
for x in range(0, len(nums)):
dic[nums[x]] = x
print dic
for x in range(0, len(nums)):
rest = abs(target - nums[x])
print rest
if rest in dic:
return [x, dic[rest]]
def twoSumSol(self, nums, target):
from collections import Counter
c = Counter()
for i in range (len(nums)):
part = target - nums[i]
if part in c:
return [c[part], i]
else:
c[nums[i]] = i
return None
if __name__ == '__main__':
nums = [0, 16, 11, 3]
target = 3
test = Solution()
assert test.twoSumSol(nums, target) == [0,3]
| [
"alfie.gerrycheung@gmail.com"
] | alfie.gerrycheung@gmail.com |
1ba449c89a323feccfaf188abe2ca3cefcabfb28 | 8d042023e261df97d87ddf09edc076db323013a7 | /visual/plot_error_distribution.py | 98438454f0b32c5403949a47f6e262c5699e0873 | [] | no_license | myying/rankine | 8c5f538626590232cb160a53c5a2f6550005d292 | 878eb580253352e72c0871b485bce43226702894 | refs/heads/master | 2022-09-03T19:01:23.053120 | 2022-08-25T11:10:02 | 2022-08-25T11:10:02 | 154,381,581 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | #!/usr/bin/env python
import numpy as np
import rankine_vortex as rv
import graphics as g
import config as p
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
# for x_in in range(41):
# for y_in in range(41):
for x_in in [17]:
for y_in in [29]:
iout = np.array([x_in])
jout = np.array([y_in])
plt.figure(figsize=(3, 2))
###plot histogram
ax = plt.subplot(1, 1, 1)
Hout = rv.obs_operator(p.iX, p.jX, p.nv, iout, jout, p.iSite, p.jSite)
prior_err = np.dot(Hout, p.Xb.T) - np.dot(Hout, p.Xt)
err_mean = np.mean(prior_err)
err_std = np.std(prior_err)
ii = np.arange(-50, 50, 1)
jj = np.exp(-0.5*(ii-err_mean)**2/ err_std**2) / np.sqrt(2*np.pi) / err_std
jj0 = g.hist_normal(ii, prior_err[0, :])
ax.plot(ii, jj0, 'k', linewidth=2, label='Sample')
ax.plot(ii, jj, 'r:', linewidth=1, label='Gaussian')
# ax.legend(fontsize=12, loc=1)
cmap = [plt.cm.jet(x) for x in np.linspace(0, 1, p.nens_show)]
for n in range(p.nens_show):
ax.scatter(prior_err[0, n], 0, s=40, color=[cmap[n][0:3]])
ax.set_xlim(-30, 30)
ax.set_ylim(-0.05, 0.3)
ax.tick_params(labelsize=8)
print(x_in, y_in)
plt.savefig('/glade/work/mying/visual/rankine/loc_sprd_{}'.format(p.loc_sprd)+'/error_distribution/{}_{}.png'.format(x_in, y_in), dpi=100)
plt.close()
| [
"hardyying@gmail.com"
] | hardyying@gmail.com |
a1e5b68668893f65f7dad839b9ac38d0e9b9f2eb | 9bbe44641af7e3c4c4fa6e8d228f65304c9adfc7 | /bin/soniccmsscaletest-interpret | 636e87a0b04be08af2e6f38063b25e289b47d7ee | [
"BSD-3-Clause"
] | permissive | Keb-L/soniccmsscaletest | 584076e317ae21085d973ed1e528efcae1ffd82c | 091205607bf71a57dbb06402e5001f0a8de836dc | refs/heads/master | 2021-03-05T08:48:11.285231 | 2020-03-11T18:47:45 | 2020-03-11T18:47:45 | 246,109,185 | 0 | 0 | BSD-3-Clause | 2020-03-09T18:14:18 | 2020-03-09T18:14:17 | null | UTF-8 | Python | false | false | 801 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import soniccmsscaletest
import argparse, datetime
parser = argparse.ArgumentParser()
parser.add_argument(
'-i', '--infiles', metavar='file', type=str, nargs='+',
default = None,
help = 'List of filenames outputted by the scaletest to be interpreted'
)
args = parser.parse_args()
def main():
outputinterpreter = soniccmsscaletest.OutputInterpreter()
combined, output_per_job = outputinterpreter.interpret(args.infiles)
# infs = combined.sort_inferences_by_end_time()
# for inf in infs[:5] + infs[-5:]:
# print(inf)
binning = combined.bin_inferences(bin_width=datetime.timedelta(minutes=1))
for b in binning:
print(b)
if __name__ == '__main__':
main() | [
"klijnsma@fnal.gov"
] | klijnsma@fnal.gov | |
0225822eeb840ea21393dd7350bdbdacef84178c | 1fbf2532eb85a902769f8ea4d550fded4e7a6944 | /jdwp/commands/__init__.py | e07bc7b85ae7dbe73a9a81c3c94319d06e6b6884 | [] | no_license | soulseekah/pyjdb | 559de43dbdebf3f373a99328ee0622bf8ebe028a | 29db8d98afebcf5090f80eba406bcad64e487282 | refs/heads/master | 2016-09-05T19:40:03.823395 | 2012-12-23T18:45:36 | 2012-12-23T18:51:08 | 6,878,807 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | import virtualmachine
import threadreference
| [
"gennady@kovshenin.com"
] | gennady@kovshenin.com |
866d1c051acc929fded0b916a5dc384cd729456e | 29f18e8ddde0379cef7fa00b1a50058be3cafa79 | /numba/tests/test_make_function_to_jit_function.py | 29161fff594545fffceee89dbe9390cb349f1dc4 | [
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"BSD-2-Clause"
] | permissive | numba/numba | 9a8345ff5f7d57f0ffec40e39941ebf2684df0d1 | 46059957ad416e68476d1e5f32ccd59f7d5df2bb | refs/heads/main | 2023-08-09T22:29:38.170300 | 2023-08-07T15:00:27 | 2023-08-07T15:00:27 | 3,659,275 | 8,247 | 1,151 | BSD-2-Clause | 2023-09-13T14:43:48 | 2012-03-08T11:12:43 | Python | UTF-8 | Python | false | false | 8,288 | py | from numba import njit
from numba.core import errors
from numba.core.extending import overload
import numpy as np
import unittest
@njit
def consumer(func, *args):
return func(*args)
@njit
def consumer2arg(func1, func2):
return func2(func1)
_global = 123
class TestMakeFunctionToJITFunction(unittest.TestCase):
"""
This tests the pass that converts ir.Expr.op == make_function (i.e. closure)
into a JIT function.
"""
# NOTE: testing this is a bit tricky. The function receiving a JIT'd closure
# must also be under JIT control so as to handle the JIT'd closure
# correctly, however, in the case of running the test implementations in the
# interpreter, the receiving function cannot be JIT'd else it will receive
# the Python closure and then complain about pyobjects as arguments.
# The way around this is to use a factory function to close over either the
# jitted or standard python function as the consumer depending on context.
def test_escape(self):
def impl_factory(consumer_func):
def impl():
def inner():
return 10
return consumer_func(inner)
return impl
cfunc = njit(impl_factory(consumer))
impl = impl_factory(consumer.py_func)
self.assertEqual(impl(), cfunc())
def test_nested_escape(self):
def impl_factory(consumer_func):
def impl():
def inner():
return 10
def innerinner(x):
return x()
return consumer_func(inner, innerinner)
return impl
cfunc = njit(impl_factory(consumer2arg))
impl = impl_factory(consumer2arg.py_func)
self.assertEqual(impl(), cfunc())
def test_closure_in_escaper(self):
def impl_factory(consumer_func):
def impl():
def callinner():
def inner():
return 10
return inner()
return consumer_func(callinner)
return impl
cfunc = njit(impl_factory(consumer))
impl = impl_factory(consumer.py_func)
self.assertEqual(impl(), cfunc())
def test_close_over_consts(self):
def impl_factory(consumer_func):
def impl():
y = 10
def callinner(z):
return y + z + _global
return consumer_func(callinner, 6)
return impl
cfunc = njit(impl_factory(consumer))
impl = impl_factory(consumer.py_func)
self.assertEqual(impl(), cfunc())
def test_close_over_consts_w_args(self):
def impl_factory(consumer_func):
def impl(x):
y = 10
def callinner(z):
return y + z + _global
return consumer_func(callinner, x)
return impl
cfunc = njit(impl_factory(consumer))
impl = impl_factory(consumer.py_func)
a = 5
self.assertEqual(impl(a), cfunc(a))
def test_with_overload(self):
def foo(func, *args):
nargs = len(args)
if nargs == 1:
return func(*args)
elif nargs == 2:
return func(func(*args))
@overload(foo)
def foo_ol(func, *args):
# specialise on the number of args, as per `foo`
nargs = len(args)
if nargs == 1:
def impl(func, *args):
return func(*args)
return impl
elif nargs == 2:
def impl(func, *args):
return func(func(*args))
return impl
def impl_factory(consumer_func):
def impl(x):
y = 10
def callinner(*z):
return y + np.sum(np.asarray(z)) + _global
# run both specialisations, 1 arg, and 2 arg.
return foo(callinner, x), foo(callinner, x, x)
return impl
cfunc = njit(impl_factory(consumer))
impl = impl_factory(consumer.py_func)
a = 5
self.assertEqual(impl(a), cfunc(a))
def test_basic_apply_like_case(self):
def apply(array, func):
return func(array)
@overload(apply)
def ov_apply(array, func):
return lambda array, func: func(array)
def impl(array):
def mul10(x):
return x * 10
return apply(array, mul10)
cfunc = njit(impl)
a = np.arange(10)
np.testing.assert_allclose(impl(a), cfunc(a))
@unittest.skip("Needs option/flag inheritance to work")
def test_jit_option_inheritance(self):
def impl_factory(consumer_func):
def impl(x):
def inner(val):
return 1 / val
return consumer_func(inner, x)
return impl
cfunc = njit(error_model='numpy')(impl_factory(consumer))
impl = impl_factory(consumer.py_func)
a = 0
self.assertEqual(impl(a), cfunc(a))
# this needs true SSA to be able to work correctly, check error for now
def test_multiply_defined_freevar(self):
@njit
def impl(c):
if c:
x = 3
def inner(y):
return y + x
r = consumer(inner, 1)
else:
x = 6
def inner(y):
return y + x
r = consumer(inner, 2)
return r
with self.assertRaises(errors.TypingError) as e:
impl(1)
self.assertIn("Cannot capture a constant value for variable",
str(e.exception))
def test_non_const_in_escapee(self):
@njit
def impl(x):
z = np.arange(x)
def inner(val):
return 1 + z + val # z is non-const freevar
return consumer(inner, x)
with self.assertRaises(errors.TypingError) as e:
impl(1)
self.assertIn("Cannot capture the non-constant value associated",
str(e.exception))
def test_escape_with_kwargs(self):
def impl_factory(consumer_func):
def impl():
t = 12
def inner(a, b, c, mydefault1=123, mydefault2=456):
z = 4
return mydefault1 + mydefault2 + z + t + a + b + c
# this is awkward, top and tail closure inlining with a escapees
# in the middle that do/don't have defaults.
return (inner(1, 2, 5, 91, 53),
consumer_func(inner, 1, 2, 3, 73),
consumer_func(inner, 1, 2, 3,),
inner(1, 2, 4))
return impl
cfunc = njit(impl_factory(consumer))
impl = impl_factory(consumer.py_func)
np.testing.assert_allclose(impl(), cfunc())
def test_escape_with_kwargs_override_kwargs(self):
@njit
def specialised_consumer(func, *args):
x, y, z = args # unpack to avoid `CALL_FUNCTION_EX`
a = func(x, y, z, mydefault1=1000)
b = func(x, y, z, mydefault2=1000)
c = func(x, y, z, mydefault1=1000, mydefault2=1000)
return a + b + c
def impl_factory(consumer_func):
def impl():
t = 12
def inner(a, b, c, mydefault1=123, mydefault2=456):
z = 4
return mydefault1 + mydefault2 + z + t + a + b + c
# this is awkward, top and tail closure inlining with a escapees
# in the middle that get defaults specified in the consumer
return (inner(1, 2, 5, 91, 53),
consumer_func(inner, 1, 2, 11),
consumer_func(inner, 1, 2, 3,),
inner(1, 2, 4))
return impl
cfunc = njit(impl_factory(specialised_consumer))
impl = impl_factory(specialised_consumer.py_func)
np.testing.assert_allclose(impl(), cfunc())
if __name__ == '__main__':
unittest.main()
| [
"stuart.archibald@googlemail.com"
] | stuart.archibald@googlemail.com |
2f53f56c0560c7f4ff172de6e383f8c1dc7dffe6 | 7f9dac51bb0c19850f0cb1651c44a1b314298bb3 | /app/admin.py | ee56eeb374ced1fce4567328067a2d1be859f800 | [] | no_license | Manikanndan/Django-Project25-UserRegistrationwitEmail | 5f5b900939f416ccd09f64470750bf1765d24740 | 932c2a793a2d6816246b62ab8eba0cb35f50fc6d | refs/heads/main | 2023-08-14T00:05:28.696076 | 2021-09-29T14:05:01 | 2021-09-29T14:05:01 | 411,698,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from django.contrib import admin
from app.models import *
# Register your models here.
admin.site.register(Profile) | [
"manikanndan.n15@gmail.com"
] | manikanndan.n15@gmail.com |
4e55a601d92c29b22f244e030fe951b5687b9795 | 69c6b92156296ea38a9cb24c56dfb206cd078ec3 | /app.py | e5d8d5a26051dd27d7418a8a46f772b79a3055a3 | [] | no_license | N1ghtF1re/bsuir-get-free-audience-bot | 4d886cd96564321ccaf5fbbb84833741164480f5 | e3e34719187802975197a6d31e79dd6d6e810d92 | refs/heads/master | 2020-03-26T17:19:46.970024 | 2018-08-17T18:45:59 | 2018-08-17T18:45:59 | 145,155,300 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import requests
import sys
import updateDB
import sqlite3
import os
from getEmployedFromDB import getEmployedAud
from getAudiences import getAudiencesList
db_file = '/projects/parser/db/schedule.sqlite' # Файл базы данных SQLite
try:
# Подключаемся к БД
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
updateDB.updateAllTables(cursor, conn)
except sqlite3.Error as e:
print(e)
finally:
conn.close()
| [
"sasha.pankratiew@gmail.com"
] | sasha.pankratiew@gmail.com |
699d74bdfccb64902879770139ec009f1b017033 | fc26310fd41cf06d27c98088d1cd754119025a13 | /Laboratorio1.py | d76f34768db8a6da784b54809693e56a616a8866 | [] | no_license | JuanPineda115/RPiGPIO | 121cae52bf2de502b6c30ce7021470829c998249 | 23adad0632d2671033d4568373b772da3719cc36 | refs/heads/master | 2022-11-26T18:34:40.461268 | 2020-07-31T03:35:18 | 2020-07-31T03:35:18 | 282,972,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | import RPi.GPIO as GPIO
import time
def Main():
ciclo = 0
tiempo1 = 2
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(2, GPIO.IN)
GPIO.setup(3, GPIO.IN)
GPIO.setup(21, GPIO.IN)
GPIO.setup(14, GPIO.OUT)
GPIO.setup(15, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
while True:
if(GPIO.input(2)==False):
ciclo +=1
if(ciclo==1):
Secuencia1(tiempo1)
if(ciclo==2):
Secuencia2(tiempo1)
if(ciclo==3):
Secuencia3(tiempo1)
if(ciclo==4):
Secuencia4(tiempo1)
if(ciclo==5):
ciclo=0
if(GPIO.input(3)==False):
if (tiempo1 > 0.5):
tiempo1 -= 0.5
elif(tiempo1==0.5):
tiempo1=2
def Secuencia1(tiempo):
GPIO.output(14,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(14, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(15,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(15, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(18,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(18, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(23,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(23, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(24,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(24, GPIO.LOW)
time.sleep(tiempo)
def Secuencia2(tiempo):
GPIO.output(24,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(24, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(23,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(23, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(18,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(18, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(15,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(15, GPIO.LOW)
time.sleep(tiempo)
GPIO.output(14,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(14, GPIO.LOW)
time.sleep(tiempo)
def Secuencia3(tiempo):
GPIO.output(14,GPIO.HIGH)
GPIO.output(24,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(14,GPIO.LOW)
GPIO.output(24,GPIO.LOW)
time.sleep(tiempo)
GPIO.output(15,GPIO.HIGH)
GPIO.output(23,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(15,GPIO.LOW)
GPIO.output(23,GPIO.LOW)
time.sleep(tiempo)
GPIO.output(18,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(18,GPIO.LOW)
time.sleep(tiempo)
def Secuencia4(tiempo):
GPIO.output(14,GPIO.HIGH)
GPIO.output(15,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(14,GPIO.LOW)
GPIO.output(15,GPIO.LOW)
time.sleep(tiempo)
GPIO.output(23,GPIO.HIGH)
GPIO.output(24,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(23,GPIO.LOW)
GPIO.output(24,GPIO.LOW)
time.sleep(tiempo)
GPIO.output(18,GPIO.HIGH)
time.sleep(tiempo)
GPIO.output(18,GPIO.LOW)
time.sleep(tiempo)
def ctiempo(time):
if(time>0.5):
time-=-0.5
if (time==0.5):
time = 2
return time
Main() | [
"pin19087@uvg.edu.gt"
] | pin19087@uvg.edu.gt |
446c77742e62b48ce33d3998b58635e643327fe0 | ac1f594bee57ac383ca2f721a1921435b430e2ae | /2/Activities/03-Hashlib/Unsolved/hashing.py | 2a6718bb9d3cf6a69eb53edf14f726bcfa220083 | [] | no_license | Asheladia/Blockchain | d2ef87c8fd69c6d9a9f389c837cf101b2819dde7 | 984f7eb63083e145be82c0bafef7a1b5e237d2c5 | refs/heads/main | 2023-03-04T12:49:26.023920 | 2021-02-19T19:55:53 | 2021-02-19T19:55:53 | 340,473,680 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | import hashlib
# output sha256 hash in hexadecimal string format
def hash(message):
return hashlib.sha256(message).hexdigest()
# modify these messages
# note: we include the "b" before the string definition in order to represent it as bytes instead of a string
message_one = b"You owe me $100,000, please pay asap"
message_two = b"you owe me $100,000 , please pay asap"
# print both messages and their corresponding hashes
print()
print()
# compare the hashes in an if/else statement
hash_one =
hash_two =
if ():
print()
else:
print()
# compare the length of the hashes
print(len())
print(len())
| [
"alpee01@gmail.com"
] | alpee01@gmail.com |
38367fd6306431bab28c7d9476eb7f23583717bf | 1ee3dc4fa096d12e409af3a298ba01f5558c62b5 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/highlevelstream/udf/valuelist/valuelist.py | dbfa6ca0e26dadbdb5304e3fe9253effed894d09 | [
"MIT"
] | permissive | parthpower/ixnetwork_restpy | 321e64a87be0a4d990276d26f43aca9cf4d43cc9 | 73fa29796a5178c707ee4e21d90ff4dad31cc1ed | refs/heads/master | 2020-07-04T13:34:42.162458 | 2019-08-13T20:33:17 | 2019-08-13T20:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,049 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class ValueList(Base):
"""The ValueList class encapsulates a system managed valueList node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the ValueList property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'valueList'
def __init__(self, parent):
super(ValueList, self).__init__(parent)
@property
def AvailableWidths(self):
"""Species all the possible widths available for a UDF in particular Type.
Returns:
list(str)
"""
return self._get_attribute('availableWidths')
@property
def StartValueList(self):
"""Specifies the starting value for a particular UDF.
Returns:
list(number)
"""
return self._get_attribute('startValueList')
@StartValueList.setter
def StartValueList(self, value):
self._set_attribute('startValueList', value)
@property
def Width(self):
"""Specifies the width of the UDF.
Returns:
str(16|24|32|8)
"""
return self._get_attribute('width')
@Width.setter
def Width(self, value):
self._set_attribute('width', value)
def update(self, StartValueList=None, Width=None):
"""Updates a child instance of valueList on the server.
Args:
StartValueList (list(number)): Specifies the starting value for a particular UDF.
Width (str(16|24|32|8)): Specifies the width of the UDF.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def find(self, AvailableWidths=None, StartValueList=None, Width=None):
"""Finds and retrieves valueList data from the server.
All named parameters support regex and can be used to selectively retrieve valueList data from the server.
By default the find method takes no parameters and will retrieve all valueList data from the server.
Args:
AvailableWidths (list(str)): Species all the possible widths available for a UDF in particular Type.
StartValueList (list(number)): Specifies the starting value for a particular UDF.
Width (str(16|24|32|8)): Specifies the width of the UDF.
Returns:
self: This instance with matching valueList data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of valueList data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the valueList data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
858517b9ae608ac5a57f9a6451ad9f46d97f3c4b | 36e63f94f074777ec4f53637cf2f74cca34dae5d | /TRmod_kernel_A1.py | f399c4b2640109b31d5b7d9622ba168d4d97b068 | [] | no_license | yingjin-ma/Fcst_sys_public | ff75efbc62ee84bd67b8e5d900eec4f3955c4638 | a255e3e41fe2f6f26bcb6d7788f777f3d269e5f0 | refs/heads/main | 2023-01-06T02:15:59.674138 | 2022-12-30T03:46:03 | 2022-12-30T03:46:03 | 313,652,326 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | import sys
import os
import time
import socket
hostname = socket.gethostname()
PWD=os.getcwd()
SRC=PWD+"/src"
RAW=PWD+"/database/rawdata"
BAK=PWD+"/database/trained-models"
# add the runtime environments
print(SRC)
sys.path.append(SRC)
# originally import
import torch
import Models
import Configs
#import PredictTime
#import Magnification
#import DecideRefSpace
# rdkit for chem-informatics
#from rdkit import Chem
#from rdkit.Chem import AllChem
# ==> parameters to be used (IO later)
# ML models, related
ML_models = ["MPNN"]
TR_para = [100,200,50,0.01,1.0,2] # [NtrainSet,Nepoch,BatchSize,LRstep,TrainRatio,ValidInt]
TR_dir = PWD + "/database/training-models"
# SDFs and Crawled folder, related
sdfsH = RAW + "/Arxiv1911.05569v1_sdfs_H"
setsDir = RAW + "/G09data.01"
# Functionals and basis sets, related
#functionals = ['B3LYP','bhandhlyp','BLYP','CAM-B3LYP','LC-BLYP','M06','M062x','PBE1PBE','wb97xd']
#bases = ['6-31g','6-31gs','6-31pgs']
functionals = ['B3LYP']
bases = ['6-31g']
# training/validing/testing sets
# ==> training sets, manual selections for adjusting the training models
suits_train = []
#suits_train.append("branch")
#suits_train.append("ring")
#suits_train.append("ring_sub")
#suits_train.append("alkane")
#suits_train.append("PE")
suits_train.append("Gaussian_inputs_training2")
suits_train.append("Gaussian_inputs_training")
suits_train.append("Gaussian_inputs_training3")
suits_train.append("Gaussian_inputs_training4")
suits_train.append("Gaussian_inputs_training5")
# ==> validing/testing sets, manual selections for adjusting
suits_valid = []
suits_valid.append("Gaussian_inputs_validing")
#suits_valid.append("Gaussian_inputs_validing2")
#suits_valid.append("Gaussian_inputs_testing")
#suits_valid.append("Gaussian_inputs_testing2")
# Training and validing/testing process
for mod in ML_models: # models
#Models.prepare(mod)
for funct in functionals:
for basis in bases:
chemspace=funct+'_'+basis
# generate the full path for the training and validing sets
train_tmp=[]
for i in range(len(suits_train)):
tmp = setsDir + "/" + chemspace + "/" + suits_train[i]
train_tmp.append(tmp)
valid_tmp=[]
for i in range(len(suits_valid)):
tmp = setsDir + "/" + chemspace + "/" + suits_valid[i]
valid_tmp.append(tmp)
# Mkdir the "training" folder for usage
if not os.path.exists(TR_dir):
os.mkdir(TR_dir)
# Training and evaluating
Models.TrainAndEval(TR_para=TR_para,TR_dir=TR_dir,chemspace=chemspace,folder_sdf=sdfsH,suits_train=train_tmp,suits_valid=valid_tmp,setsDir=setsDir,model=mod)
print("All the models have been trained evaluated")
exit(0)
| [
"yingjin_ma@163.com"
] | yingjin_ma@163.com |
0cd87e0d9eca96df30c68ee957e543ea4bf80730 | 08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc | /src/mnistk/networks/linearrelu_19.py | 7b3a00c5cb35632a17f95048599dcdc9247a02b4 | [] | no_license | ahgamut/mnistk | 58dadffad204602d425b18549e9b3d245dbf5486 | 19a661185e6d82996624fc6fcc03de7ad9213eb0 | refs/heads/master | 2021-11-04T07:36:07.394100 | 2021-10-27T18:37:12 | 2021-10-27T18:37:12 | 227,103,881 | 2 | 1 | null | 2020-02-19T22:07:24 | 2019-12-10T11:33:09 | Python | UTF-8 | Python | false | false | 1,091 | py | # -*- coding: utf-8 -*-
"""
linearrelu_19.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class LinearReLU_19(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Linear(in_features=784, out_features=75, bias=False)
self.f1 = nn.ReLU(inplace=False)
self.f2 = nn.Linear(in_features=75, out_features=43, bias=True)
self.f3 = nn.ReLU(inplace=False)
self.f4 = nn.Linear(in_features=43, out_features=34, bias=True)
self.f5 = nn.ReLU(inplace=False)
self.f6 = nn.Linear(in_features=34, out_features=10, bias=True)
self.f7 = nn.Linear(in_features=10, out_features=10, bias=False)
self.f8 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],784)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
x = self.f6(x)
x = self.f7(x)
x = self.f8(x)
return x
| [
"41098605+ahgamut@users.noreply.github.com"
] | 41098605+ahgamut@users.noreply.github.com |
520f33f52e07345e0ce8da2416007cdb06d9164d | 0e1abae708fa0d0afc312bcfdc15b4d587d487e1 | /flask_library_app/lib/exceptions.py | 41cb4b0edec54f60a65c5ff9443080c05d1c31cd | [] | no_license | davidiakobidze/flask_library | 2ea156545401a5ba78441bcd5c3c28dd4b831446 | 92a1a15fe1fcb40513e665018dfce9ee6dae8dcd | refs/heads/master | 2023-05-11T16:02:15.743752 | 2019-06-05T11:25:00 | 2019-06-05T11:25:00 | 178,378,117 | 0 | 0 | null | 2023-05-01T20:57:26 | 2019-03-29T09:47:41 | Python | UTF-8 | Python | false | false | 692 | py | from flask import Blueprint, jsonify, make_response
class HandleException(Exception):
status_code = 404
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
mod_err = Blueprint('mod_err', __name__)
@mod_err.app_errorhandler(HandleException)
def not_found_exception_handler(error):
print(error.to_dict())
return make_response(jsonify(error.to_dict()), error.status_code)
| [
"davidiakobidze1@gmail.com"
] | davidiakobidze1@gmail.com |
a08ed3e1f7d2d89e2c8be0bc0cab17f2893907d4 | e86f88bd05d2dfc3197191245a28734e0a94306c | /application/resources/front_end_caging.py | 11e8038147e8de306c4178f76445707f20e23355 | [] | no_license | transreductionist/API-Project-1 | b83e008a8dcf19f690109d89b298111062f760c0 | d5ffcc5d276692d1578cea704125b1b3952beb1c | refs/heads/master | 2022-01-16T06:31:06.951095 | 2019-05-09T15:22:44 | 2019-05-09T15:22:44 | 185,820,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,613 | py | """Resource entry point to handle the business logic for the endpoint."""
from flask import request
from flask_api import status
from nusa_jwt_auth.restful import AdminResource
from application.controllers.front_end_caging import build_ultsys_user
from application.controllers.front_end_caging import update_caged_donor
# pylint: disable=too-few-public-methods
# pylint: disable=no-self-use
class CageDonorAsUltsysUser( AdminResource ):
"""Flask-RESTful resource endpoints to handle caged donors."""
def post( self ):
"""Create new Ultsys user using the caged donor ( updated with payload ), then deleting caged donor."""
payload = request.json
payload[ 'ultsys_user_id' ] = None
response = build_ultsys_user( payload )
if response:
return response, status.HTTP_200_OK
return None, status.HTTP_500_INTERNAL_SERVER_ERROR
def put( self ):
"""Update Ultsys user assigned using the caged donor ( updated with payload ), then deleting caged donor."""
payload = request.json
response = build_ultsys_user( payload )
if response:
return response, status.HTTP_200_OK
return None, status.HTTP_500_INTERNAL_SERVER_ERROR
class CageDonorUpdate( AdminResource ):
"""Flask-RESTful resource endpoints to update a caged donor in the table."""
def put( self ):
"""Update the caged donor address."""
response = update_caged_donor( request.json )
if response:
return None, status.HTTP_200_OK
return None, status.HTTP_500_INTERNAL_SERVER_ERROR
| [
"transreductionist@gmail.com"
] | transreductionist@gmail.com |
d9afbb72bcda9098cdba94f209637b513ae84b0f | 294a91790d9751c08b05030396ebe8f06e846f86 | /python_rob/rob.py | c90092f7e8fe174fac0024bc1303dde09a087958 | [] | no_license | lcy2218/python_robot | 508833ca3d4959d23e350f15149e66093199d639 | 81c20bd98e9dc472113eeb0fd67575cda07ee60e | refs/heads/main | 2023-04-23T04:24:05.083971 | 2021-05-14T08:39:42 | 2021-05-14T08:39:42 | 352,033,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@File : rob.py
@Time : 2021/04/08 16:25:35
@Author : Liu ChaoYang
@Version : 1.0
@Contact : 2218932687@qq.com
'''
# here put the import lib
'''
爬虫,按照一定规则,自动抓取互联网信息的程序或者脚本
模拟浏览器打开网页,获取想要的那部分数据
'''
#思路
import re #正则表达式、文字匹
from bs4 import BeautifulSoup #网页解析、获取数字
import urllib.request, urllib.error #制定URL、获取网页数据
import xlwt #进行excel操作
import sqlite3 #进行SQLite数据库操作
def main():
baseUrl = "https://movie.douban.com/top250?strat="
#1:爬取网页
#2:解析数据 逐个解析数据
# dataList = getData(baseUrl)
# savePath = r"./data.xls"
#3:保存数据
# saveData(savePath)
askUrl(baseUrl)
#2:解析数据 逐个解析数据
def getData(baseUrl):
dataList = []
for i in range(0, 10):
#https://movie.douban.com/top250?strat=1..2..3..4..5
#拼接字符串
url = baseUrl + str(i*25)
html = askUrl(url) #保存获取到的网页源码
#获取网页后开始解析数据
return dataList
#1:爬取网页
def askUrl(url):
#模拟浏览器头部信息、向豆瓣发送信息
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36"
}
request = urllib.request.Request(url, headers = head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode('utf-8')
print(html)
#捕获错误
except urllib.error.URLError as result:
#打印状态码
if hasattr(result, "code"):
print(result.code)
#打印失败原因
if hasattr(result, "reason"):
print(result.reason)
return html
def saveData(savePath):
pass
if __name__ == "__main__":
main()
| [
"39287095+lcy2218@users.noreply.github.com"
] | 39287095+lcy2218@users.noreply.github.com |
c1321c5631ebf0c448cc548297c676d898002ddd | 7ec0707166dc1395a3a7f1c688ad2f199493049b | /python/flowfilter/update.py | 62c177874e91045ed456aac36a32f5d23841d3be | [] | no_license | CaptainJL/optical-flow-filter | f597ffdfae307580f6aafcecc365d0cac7af3cdc | ea4cc4e267e146e3aea77a9729413c0cc37bed1d | refs/heads/master | 2021-01-09T20:22:13.029904 | 2017-05-18T01:22:50 | 2017-05-18T01:22:50 | 81,279,627 | 0 | 0 | null | 2017-02-08T02:43:45 | 2017-02-08T02:43:45 | null | UTF-8 | Python | false | false | 5,043 | py | """
flowfilter.update
-----------------
Module containing Python implementationso of the filter
image model and update methods.
:copyright: 2015, Juan David Adarve, ANU. See AUTHORS for more details
:license: 3-clause BSD, see LICENSE for more details
"""
import math
import numpy as np
import scipy.ndimage as nd
__all__ = ['imageModel', 'update', 'smoothFlow']
def imageModel(img, support=5):
"""Computes brightness model parameters.
Parameters
----------
img : ndarray
Input image in gray scale. If img.dtype is different than
float32, it is automatically converted.
support : integer, optional
Window support used for computing brightness parameters.
The value should be an odd number greater or equal 3.
Defaults to 5.
Returns
-------
A0 : ndarray
Constant brightness term.
Ax : ndarray
X (column) gradient component.
Ay : ndarray
Y (row) gradient component.
Raises
------
ValueError : support < 3 or support % 2 != 1:
"""
if support < 3 or support % 2 != 1:
raise ValueError('support should be an odd number greater or equal 3')
# input image dtype check
if img.dtype != np.float32:
img = img.astype(np.float32)
# creates convolution masks
if support == 3:
blur1D = np.array([[1.0, 2.0, 1.0]], dtype=np.float32)
gradient1D = np.array([[1.0, 0.0, -1.0]], dtype=np.float32)
else:
b = np.array([1.0, 1.0], dtype=np.float32)
blur1D = np.array([1.0, 1.0], dtype=np.float32)
for _ in xrange(support-2):
blur1D = np.convolve(blur1D, b, mode='full')
blur1D = np.reshape(blur1D, (1, blur1D.shape[0]))
gradient1D = np.arange(-support/2 + 1, support/2 + 1, dtype=np.float)
gradient1D = np.reshape(gradient1D[::-1], (1, gradient1D.shape[0]))
# renormalize masks
gradient1D /= np.sum(gradient1D*gradient1D)
blur1D /= np.sum(blur1D)
# Gaussian blurring in X and Y
imgBlurX = nd.convolve(img, blur1D)
imgBlurY = nd.convolve(img, blur1D.T)
# brightness parameters
Ax = nd.convolve(imgBlurY, gradient1D)
Ay = nd.convolve(imgBlurX, gradient1D.T)
A0 = nd.convolve(imgBlurY, blur1D)
return A0, Ax, Ay
def update(img, imgOld, flowPredicted, support=5, gamma=1.0):
"""Update the optical flow field provided new image data.
Parameters
----------
img : ndarray
New brightness image.
imgOld : ndarray
Old brightness image. This corresponds to the old
flowPredicted : ndarray
Predicted estimation of optical flow.
support : integer, optional
Window support used for computing brightness parameters.
The value should be an odd number greater or equal 3.
Defaults to 5.
gamma : float, optional
temporal regularization gain controlling the relevance
of the predicted flow in the update. Value should be
greater than 0.0. Defaults to 1.0.
Returns
-------
flowUpdated : ndarray
Updated optical flow field.
A0 : ndarray
Constant brightness model parameter computed from
img.
Raises
------
ValueError : if gamma <= 0.0
"""
if gamma <= 0.0: raise ValueError('gamma should be greater than zero')
# compute the image model parameters
A0, Ax, Ay = imageModel(img, support)
# temporal derivative
Yt = imgOld - A0
# adjunct matrix N elements for each pixel
N00 = np.zeros(img.shape); N00[:,:] = gamma + Ay*Ay
N01 = np.zeros(img.shape); N01[:,:] = -Ax*Ay
N10 = np.zeros(img.shape); N10[:,:] = np.copy(N01)
N11 = np.zeros(img.shape); N11[:,:] = gamma + Ax*Ax
# determinant of M for each pixel
detM = (gamma*(gamma + (Ax*Ax + Ay*Ay)))
# q components for each pixel
qx = gamma*flowPredicted[:,:,0] + Ax*Yt
qy = gamma*flowPredicted[:,:,1] + Ay*Yt
# compute the updated optic-flow
flowX = (N00*qx + N01*qy) / detM
flowY = (N10*qx + N11*qy) / detM
# pack the results
flowUpdated = np.concatenate([p[...,np.newaxis] for p in [flowX, flowY]], axis=2)
return flowUpdated, A0
def smoothFlow(flow, iterations=1, support=5):
"""Apply a smoothing filter to optical flow
Parameters
----------
flow : ndarray
iterations : integer, optional
support : integer, optional
"""
if iterations <= 0: raise ValueError('iterations should be greater than 1')
if support < 3 or support % 2 != 1:
raise ValueError('support should be an odd number greater or equal 3')
# average mask
avg_k = np.ones((support, support), dtype=np.float32) / float(support*support)
flowSmoothed = np.copy(flow)
for _ in range(iterations):
# apply smoothing to each flow component
for n in range(2):
flowSmoothed[...,n] = nd.convolve(flowSmoothed[...,n], avg_k)
return flowSmoothed | [
"juan.adarve@anu.edu.au"
] | juan.adarve@anu.edu.au |
fc4c15d75a0433a3161767e328348de670f23a26 | 8d49251a314921f86c9db879fd0f5e0cba0c80fa | /my_notes_and_codes/second_exceptions.py | 8c579dd15dc24f5acd2cad5f474a4abf425e8a76 | [] | no_license | LSLubanco/python_fh | 42d3ea253c6d64f5133e741b788247baa378884b | 12222270b314c8eda888356d043077a30b504897 | refs/heads/master | 2020-11-24T07:25:25.711127 | 2020-01-16T09:47:48 | 2020-01-16T09:47:48 | 228,028,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 17:27:04 2019
@author: Daniel
second class exceptions
"""
while True:
try:
x = int(input("Enter a number: "))
break
except ValueError:
print("Enter a valid number!")
print("You have entered the number {}".format(x))
#y=0
try:
y = int(input("Enter a number: "))
except ValueError:
print("Enter a valid number!")
finally:
print("you have entered {}".format(y)) | [
"lubancod@gmail.com"
] | lubancod@gmail.com |
0eaa5041ad678be0831e566690633ee4cbe95362 | 427fb8ca7f1b3cfe524ac0b08339451ce304731b | /src/tracks/jennens_family_speedway_track.py | 54fcfae8de535da21420dad96d8af37c03ab9ba9 | [
"Apache-2.0"
] | permissive | jackycct/deep_racer_guru | 9d33ca0f950adf11459803269db5790940da506b | b6048536b46df98aff1861be460183e6c9c78e22 | refs/heads/master | 2023-07-10T07:28:33.404989 | 2023-06-12T14:31:19 | 2023-06-12T14:31:19 | 285,490,040 | 0 | 0 | MIT | 2020-08-06T06:18:15 | 2020-08-06T06:18:13 | null | UTF-8 | Python | false | false | 10,634 | py | #
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
from src.tracks.track import Track
import src.personalize.configuration.personal_track_annotations as config
class JennensFamilySpeedwayTrack(Track):
def __init__(self):
super().__init__()
self._ui_name = "Jennens Family Speedway"
self._ui_description = "The Jennens Family Speedway (49.56 m) is named in honor of the first ever racing family and 2021 re:Invent finalists James ""JJ"" and Timothy ""Flatearth"" Jennens. This track features two blistering fast drag strips right into unforgiving 90 degree sweeping turns that can spin out even the most skilled developers."
self._ui_length_in_m = 49.56 # metres
self._ui_width_in_cm = 107 # centimetres
self._world_name = "2022_october_open"
self._track_sector_dividers = [68, 119]
self._annotations = config.jennens_family_speedway_annotations
self._track_width = 1.067
self._track_waypoints = [(-7.755003452301025, 0.4451555125415325), (-7.505678415298459, 0.27408921718597234),
(-7.244629383087158, 0.12151135504245758), (-6.972953081130978, -0.011228904128076037),
(-6.694684982299805, -0.12963902950286865), (-6.41720986366272, -0.24994993954896927),
(-6.139669418334961, -0.3701086901128292), (-5.862192869186401, -0.4904141963343136),
(-5.5851404666900635, -0.6116961240768433), (-5.308419942855835, -0.7337275296449661),
(-5.032577991485596, -0.8577362298965454), (-4.757369041442871, -0.9831396788358688),
(-4.483355522155762, -1.1111348569393158), (-4.208613991737366, -1.2375427782535553),
(-3.933869481086731, -1.3639634847640991), (-3.659485936164856, -1.4911675453186035),
(-3.385401487350464, -1.619014024734497), (-3.1114695072174072, -1.7471864819526672),
(-2.837660074234009, -1.87562096118927), (-2.5639175176620483, -2.0041980147361755),
(-2.289720058441162, -2.131802499294281), (-2.0152634978294373, -2.258848547935486),
(-1.7403600215911865, -2.3849234580993652), (-1.4650830030441284, -2.510182499885559),
(-1.1890831291675568, -2.6338380575180054), (-0.9121726751327515, -2.7554404735565186),
(-0.634554997086525, -2.875421404838562), (-0.3561219498515129, -2.993494987487793),
(-0.07585849240422249, -3.106951951980591), (0.21357212215662003, -3.194725513458252),
(0.5040899366140366, -3.278777003288269), (0.7953570783138275, -3.360203981399536),
(1.087129384279251, -3.439800500869751), (1.3795784711837769, -3.5168734788894653),
(1.6725150346755981, -3.5920709371566772), (1.9657999873161316, -3.6658999919891357),
(2.259369969367981, -3.7385900020599365), (2.5531080961227417, -3.8105969429016113),
(2.847051978111267, -3.881759524345398), (3.140995979309082, -3.9529190063476562),
(3.4348909854888916, -4.0242840051651), (3.728566527366638, -4.096544623374939),
(4.0220324993133545, -4.16965389251709), (4.3156349658966064, -4.242184996604919),
(4.609933853149414, -4.311862587928772), (4.9044880867004395, -4.380454063415527),
(5.199220657348633, -4.448277473449707), (5.494209051132202, -4.514976382255554),
(5.789388418197632, -4.580825090408325), (6.084751605987549, -4.645842552185059),
(6.380423545837402, -4.709444522857666), (6.676470041275024, -4.771275997161865),
(6.97294282913208, -4.831032037734985), (7.26960563659668, -4.889832496643066),
(7.566822052001953, -4.9457690715789795), (7.8641345500946045, -5.001286029815674),
(8.159615516662598, -4.951810598373413), (8.406055450439453, -4.780893325805664),
(8.562281608581543, -4.523993968963623), (8.63389539718628, -4.2309370040893555),
(8.648118019104004, -3.9290995597839355), (8.625285148620605, -3.6276700496673584),
(8.57523775100708, -3.3294734954833984), (8.506796836853027, -3.034928560256958),
(8.506796836853027, -3.034928560256958), (8.476519346237183, -2.7347664833068848),
(8.5135657787323, -2.434995412826538), (8.585251808166504, -2.1412404775619507),
(8.664247512817383, -1.849313497543335), (8.740426063537598, -1.55663001537323),
(8.817792892456055, -1.2642599940299988), (8.898229122161865, -0.9727175235748291),
(8.980339527130127, -0.681643009185791), (9.066332340240479, -0.39169690012931824),
(9.15629243850708, -0.10295629687607288), (9.203824520111084, 0.19454235583543822),
(9.216181755065918, 0.496659100055695), (9.202358722686768, 0.7986833155155182),
(9.159722328186035, 1.097980409860611), (9.08905839920044, 1.3919295072555542),
(8.987367153167725, 1.6766055226325989), (8.854998111724854, 1.9483780264854431),
(8.694060325622559, 2.204278588294983), (8.507275104522705, 2.4419875144958496),
(8.298409700393677, 2.6605790853500366), (8.071115493774416, 2.8599555492401114),
(7.82850980758667, 3.040425419807434), (7.573786020278931, 3.2033729553222656),
(7.30945110321045, 3.3502370119094844), (7.0372235774993905, 3.4819134473800655),
(6.758669614791869, 3.599635481834412), (6.475301504135132, 3.7052711248397827),
(6.187966585159302, 3.799584150314331), (5.897493600845337, 3.8837616443634033),
(5.604870557785034, 3.960146427154541), (5.311467170715332, 4.033480882644653),
(5.0262510776519775, 4.132853984832764), (4.730224132537842, 4.192736029624939),
(4.428300380706787, 4.1963454484939575), (4.132031559944153, 4.138618469238281),
(3.853487491607666, 4.021769046783447), (3.6084929704666138, 3.8457579612731934),
(3.411071538925171, 3.617482900619507), (3.2602025270462036, 3.355684518814087),
(3.140528440475464, 3.0780140161514282), (3.0351579189300537, 2.7945384979248047),
(2.929121971130371, 2.5113149881362915), (2.799983024597168, 2.238517999649048),
(2.6032760143280043, 2.0098019838333148), (2.3587000370025617, 1.8329170346260064),
(2.0838890075683576, 1.7077589631080623), (1.7914485335350037, 1.631926953792572),
(1.490757465362551, 1.6019275188446047), (1.1888760328292847, 1.6152389645576477),
(0.8928671777248383, 1.675501525402069), (0.6057200878858566, 1.7701780200004578),
(0.3245176561176777, 1.8814440369606018), (0.04630109667778015, 2.000022053718567),
(-0.23208790179342031, 2.1181859970092773), (-0.5125381946563721, 2.231346011161804),
(-0.7919032573699951, 2.3471999764442444), (-1.071234107017517, 2.463137447834015),
(-1.3506739735603333, 2.578809976577759), (-1.6303449869155884, 2.6939234733581543),
(-1.910144031047821, 2.80872642993927), (-2.1901875138282776, 2.9229310750961304),
(-2.4705530405044556, 3.036341428756714), (-2.7511759996414185, 3.149114966392517),
(-3.0321154594421387, 3.2610939741134644), (-3.3134440183639526, 3.372096538543701),
(-3.595812439918518, 3.4804195165634155), (-3.8793119192123413, 3.5857499837875366),
(-4.163656949996948, 3.6887749433517456), (-4.448596000671387, 3.7901490926742554),
(-4.733931541442871, 3.8904019594192505), (-5.019349098205566, 3.9904184341430664),
(-5.304849624633789, 4.090198993682861), (-5.590124845504761, 4.19062352180481),
(-5.875044584274292, 4.2920531034469604), (-6.159180402755737, 4.3956520557403564),
(-6.44232439994812, 4.501933336257935), (-6.725490093231201, 4.608154058456421),
(-7.008341550827026, 4.715210914611816), (-7.291232585906982, 4.822164058685303),
(-7.574861288070679, 4.927034139633179), (-7.856085538864136, 5.038276672363281),
(-8.14598560333252, 5.122975587844849), (-8.446453571319584, 5.123118400573729),
(-8.70826148986816, 4.980955600738527), (-8.872729301452638, 4.7290720939636195),
(-8.968108654022217, 4.4424121379852295), (-9.031425952911377, 4.146733522415158),
(-9.090214252471924, 3.8500900268554688), (-9.146362781524658, 3.5529894828796387),
(-9.200953483581543, 3.2555309534072876), (-9.238299369812012, 2.9554984569549596),
(-9.239519596099854, 2.6533539295196533), (-9.18390703201294, 2.3566945791244507),
(-9.067930221557617, 2.077654540538788), (-8.931593418121338, 1.8077549934387207),
(-8.777645587921143, 1.5474955439567566), (-8.606619358062744, 1.2981362640857697),
(-8.417943000793455, 1.0618529021739933), (-8.212388515472412, 0.8401062488555908),
(-7.991051435470581, 0.6341079920530319), (-7.755003452301025, 0.4451555125415325)]
| [
"davidmuttonhughes@yahoo.co.uk"
] | davidmuttonhughes@yahoo.co.uk |
5660fae0652271b4c6073443da4b19c18a073dd8 | 6c08fa056cffcf40e7a9c9bbedbe7164a2eb0b08 | /api/api_home_module.py | 86eab06ffae13e62ebc1c81431b068459b03658e | [] | no_license | linzichu/liequ_aotu_master | 09bff617034b02d715b2a91637a1927a4eb46aa2 | bd581b177bd4af48fa696d171b0f8aef3687df7f | refs/heads/master | 2022-12-07T19:02:20.357958 | 2020-08-23T04:06:54 | 2020-08-23T04:06:54 | 282,369,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | import json
import requests
from api.api_get_host import ApiGetHost
import os
class ApiHomeModule(object):
def __init__(self):
self.host = "http://liequ.czhepeng.cn:8888/"
# 验证配置文件
def api_get_config(self):
url = self.host + "/api/public/?service=Home.getConfig"
headers = {"Content-type": "application/json/utf-8"}
response = requests.get(url, headers)
return response
# 获取首页全部信息
def api_get_all_data(self):
url = self.host + "/api/public/?service=Home.GetAllData"
# url = + "/api/public/?service=Home.GetAllData"
headers = {"Content-type": "application/json/utf-8"}
response = requests.get(url, headers)
result = json.dumps(response.json())
return result
# 获取最新主播
def api_get_new(self):
url = self.host + "/api/public/?service=Home.GetNew"
headers = {"Content-type": "application/json/utf-8"}
response = requests.get(url, headers)
return response
# 获取热门主播
def test_get_hot(self):
url = self.host + "/api/public/?service=Home.getHot"
headers = {"Content-type": "application/json/utf-8"}
response = requests.get(url, headers=headers)
return response
# 获取新分类
def api_get_class_live_new(self, liveclassid):
url = self.host + "/api/public/?service=Home.getClassLiveNew"
headers = {"Content-type": "application/json/utf-8"}
data = {
"liveclassid": liveclassid
}
response = requests.post(url, headers=headers, params=data)
return response
if __name__ == '__main__':
# result = ApiHomeModule().api_get_all_data()
# print(result)
path = os.path.abspath(__file__)
print(path)
| [
"644896645@qq.com"
] | 644896645@qq.com |
0ed21295ba0f9f7a224a5a1217d7f998182ad42d | 2ce9700da22e37f27a991c230e9d1658c02e66e0 | /tasks/migrations/0001_initial.py | 1979ad9a384793e6ae0fb0a84c162a8f6020ff06 | [] | no_license | raphaelarcanjo/agenda | a0724ffb99370e3805cdeea8ceba3f907f43a3b2 | e309c71c0d74e11f0af7c987dad54a49ca077252 | refs/heads/master | 2022-05-07T21:16:46.354218 | 2022-03-21T20:53:48 | 2022-03-21T20:53:48 | 201,701,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | # Generated by Django 2.2.16 on 2021-02-03 22:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('description', models.CharField(max_length=80)),
('done', models.BooleanField()),
('date', models.DateField()),
('time', models.TimeField()),
],
),
]
| [
"raphael.o.cunha@hotmail.com"
] | raphael.o.cunha@hotmail.com |
f30b74b74f08df9126992b9926bc6a514aa82cac | 3cd1246ff58f26329021f2d13caa62221c91d5a4 | /testdata/python/stmt/with_.py | 9fd016ae112182564286080a9cbcc9d114768021 | [] | no_license | mwkmwkmwk/unpyc | 0929e15fb37599496930299d7ced0bf1bedd7e99 | 000fdaec159050c94b7ecf6ab57be3950676f778 | refs/heads/master | 2020-12-01T14:01:57.592806 | 2016-03-21T14:11:43 | 2016-03-21T14:12:01 | 230,650,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from __future__ import with_statement
with a:
b
c
d
with a as b:
c
with a as (a, b):
c
d
while a:
with b as c:
continue
| [
"koriakin@0x04.net"
] | koriakin@0x04.net |
bff3627d5b8abf459b876f17bbd1d47abc50bf37 | 2fda7a3f7c21eedf4a1bdd627e04bea4dd0b1c1c | /arrays/max_sum_array.py | 1819b82620c19a5a529273a58511846c1dda3be2 | [] | no_license | shedolkar12/DS-Algo | a9f71ea3f1c2d975844ce44e69298c81f9ae8f44 | 1df65546581b91489509e15484dcada81bd0911b | refs/heads/master | 2021-06-04T21:30:23.917352 | 2016-09-17T15:32:14 | 2016-09-17T15:32:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | # Maximum sum of subarray of given array
#A = [-2, -3, 4, -1, 2, -5, 4]
def max_sum(A):
max_sum=sum= A[0]
j,index,count = 1,0,1
for i in A[1:]:
sum = sum+i
if sum < i:
sum = i
index = j
count=1
if(max_sum <= sum ):
max_sum = max(sum,max_sum)
count+=1
j+=1
print A[index:index+count],index,count
#print index,count
return max_sum
#A = [-3, -2, -1, -5]
A = [0, 1, 0]
for i in range(0,len(A)):
if A[i]==0:
A[i]=1
else :
A[i] = -1
print max_sum(A)
| [
"rajeshshedolkar12@gmail.com"
] | rajeshshedolkar12@gmail.com |
22d26910b602553d6a00545801f30dac1d8c9a36 | 4f1c3f764b29953ece5cf50aeedfe8abce1b8700 | /lj_metadata.py | 267d3a45427782c76ef12e94b5bb67aeb6805e03 | [] | no_license | smagellan/ljData | 9de9281289aea4ee2ba03711497ba42e49542b39 | 25df0bca3a52ddd3de454685b92abf1cebcd814e | refs/heads/master | 2021-01-18T04:54:42.433147 | 2015-12-01T14:44:01 | 2015-12-01T14:44:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,576 | py | # Gathers tabulated data (username, account number and registration date)
# on extended network of LiveJournal accounts, based on friends-of-friends of starting user
# By Lawrence Alexander @LawrenceA_UK
import requests
import time
import argparse
from BeautifulSoup import BeautifulStoneSoup
import re
import csv
import urllib
# define arguments
ap = argparse.ArgumentParser()
ap.add_argument("-u","--username", required=True,help="Enter username of starting LiveJournal account.")
args = vars(ap.parse_args())
ljuser = args['username']
outCSV = "LiveJournal_blogs.csv"
# Main function to extract an LJ user's friends
def getFriends(username, allfriends):
url = "http://www.livejournal.com/misc/fdata.bml?user=" + username
user_agent = {'User-Agent': 'Web scraper by la2894@my.open.ac.uk @LawrenceA_UK'}
print "Getting friends for user " + username
try:
response = requests.get(url, headers=user_agent)
except:
print "Error getting page: " + url
pass
# Process response
if response.status_code == 200:
userFriends= response.content
outFriends = userFriends.split(">")
inFriends = userFriends.split("<")
allFriends = []
for x in range(1, len(outFriends) - 2):
friendName= outFriends[x]
allFriends.append(friendName)
for y in range(1, len(inFriends) - 2):
friendName= inFriends[y]
allFriends.append(friendName)
getFriends.allfriends = allFriends
return (allfriends)
# get network from starting user
getFriends (username=ljuser, allfriends=[])
all_friends=getFriends.allfriends
all_friends = list(set(all_friends))
friendsOf=[]
for user in all_friends:
time.sleep(1) # Good manners
getFriends(username=user, allfriends=[])
friendsOf.extend(getFriends.allfriends)
all_friends.extend(friendsOf)
# Remove any duplicates from friends list
clean = []
for f in all_friends:
if f not in clean:
clean.append(f)
all_friends = clean
print "Usernames collected: " + str(len(all_friends))
for accountName in all_friends:
time.sleep(1) # Good manners
accountName=accountName.replace(" ", "")
accountName=accountName.rstrip()
url = "http://%s" % urllib.quote(accountName)
url += ".livejournal.com/data/foaf.rdf"
user_agent = {'User-Agent': 'Web scraper by lawz.alexander@gmail.com @LawrenceA_UK'}
print "Getting metadata for user " + accountName
response = requests.get(url, headers=user_agent)
try:
if response.status_code == 200:
soup = BeautifulStoneSoup(response.content)
result = soup.contents[3].contents[1]
nickname = soup.contents[3].contents[1].contents[1].contents[0]
accountno = soup.findAll('foaf:img', limit=1)
# Get blog creation date and time
timestamp_pattern='lj:datecreated(.+?) '
pattern=re.compile(timestamp_pattern)
datestamp = soup.findAll(name='foaf:weblog', limit=1)
datestamp=re.findall(pattern,str(datestamp))
datestamp=str(datestamp)
datestamp=datestamp.strip( "\'']" )
datestamp=datestamp.strip( "'['=")
datestamp=datestamp.strip('"')
# Get sequential LiveJournal account number from avatar URL
account_pattern='rdf:resource="(.+?\d)"'
pattern=re.compile(timestamp_pattern)
account=re.findall(account_pattern,str(accountno))
account=account[0]
account = account[-8:]
datestamp=datestamp[:10]
acnumber=int(account)
# Filter by time period of interest
if acnumber > 68000000 and acnumber < 71500000:
troll_flag="Yes"
else:
troll_flag="No"
with open(outCSV, 'a') as ljData:
blogs = csv.writer(ljData, delimiter=',', lineterminator='\n', dialect='excel')
blogs.writerow([nickname] + [datestamp] + [account] + [troll_flag])
ljData.close()
except:
print "Error getting account."
pass
print "Complete. CSV file written." | [
"lawz.alexander@gmail.com"
] | lawz.alexander@gmail.com |
0e128695b6d32a1534a11c72d93838c79e35da17 | a89d5746ab0255a32558be21c33c2d028e9b7148 | /数学基础/code/朴素贝叶斯/高斯朴素贝叶斯.py | 4b876860ed9d51444155b18dc4a4af60d2f108a0 | [] | no_license | 15110500442/pa- | 9c4bf3b2e6906c4df1e609f65a58e00660f31fa7 | 561757e6f6f1e16deaa1b9e5af5ac78fed0e21f5 | refs/heads/master | 2020-04-13T01:56:20.208424 | 2018-12-24T11:39:11 | 2018-12-24T11:39:11 | 162,887,811 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | #测试数据
import numpy as np
#引入高斯朴素贝叶斯
from sklearn.naive_bayes import GaussianNB
#训练集
features_train = np.array([[1,1],[1,2],[2,3],[2,3],[1,4],[2,4]])
labels_train = np.array([1,2,3,1,1,3])
#实例化
clf = GaussianNB()
#训练数据 fit相当于train
clf.fit(features_train, labels_train)
#输出单个预测结果
features_test = np.array([[1,3]])#
pred = clf.predict(features_test)
print("预测的类别为:\t",pred)
| [
"597945045@qq.com"
] | 597945045@qq.com |
cb91209f548fbbf043bcdb0238ad37fd9d391a2f | 5367f9740ad9b13ae42438b2d031f71466498675 | /Bonus2_Mike.py | 8cc06a97ceff7d0e560a228a5f65925fdd6bbc0e | [] | no_license | own2pwn/Intro_To_Prog_HSE | 5e34ca08a02e584b607b3718bbee9627cb5a4344 | ca9e63a68420b2583ee1223a2f126861cb1895e6 | refs/heads/master | 2020-04-01T11:01:01.110063 | 2018-09-26T21:04:53 | 2018-09-26T21:04:53 | 153,142,759 | 0 | 0 | null | 2018-10-15T16:03:01 | 2018-10-15T16:03:24 | null | UTF-8 | Python | false | false | 1,103 | py | # BONUS TASK 1
def f(s, n):
s = s.lower()
l = list(s)
result = ''
for i in l:
if i.isalpha():
result += abc[((ord(i) - ord('a')) % 25 + n) % 25]
else:
result += i
return result
import random
import argparse
parser = argparse.ArgumentParser(description='Process input and output.')
parser.add_argument('input', type=str, nargs='+',
help='input file')
parser.add_argument('output', type=str, nargs='+',
help='output file')
parser.add_argument('type', type=str, nargs='+',
help='"encoding" or "decoding"')
args = parser.parse_args()
abc = 'abcdefghijklmnopqrstuvwxyz'
#!!! encoding and decoding key
n = 3
input_file = open(vars(args)['input'][0], 'r')
output_file = open(vars(args)['output'][0], 'w')
Type = vars(args)['type'][0]
for string in input_file:
for word in string.split():
if Type == 'encoding':
output_file.write(' ' + f(word, n))
else:
output_file.write(' ' + f(word, -1 * n))
output_file.write('\n')
output_file.close() | [
"noreply@github.com"
] | noreply@github.com |
61c12fba05665362085355b586a72a95a6cb9038 | 65a3f548503cd1bdd9a429704aec630c0a507b4b | /src/genie/libs/parser/nxos/show_vxlan.py | 7c7f3d6d9fb672cf68724c8a9bca7691087b3db0 | [
"Apache-2.0"
] | permissive | LouiseSianEvans/genieparser | 75c3c73612db4a7cb31f657dc6ad9f25b5bfebb5 | 7dd4d81834479e35a6c08254e10e7692b00b897b | refs/heads/master | 2020-04-30T15:25:04.158694 | 2019-03-27T22:58:15 | 2019-03-27T22:58:15 | 176,919,539 | 0 | 0 | Apache-2.0 | 2019-03-21T10:10:46 | 2019-03-21T10:10:43 | Python | UTF-8 | Python | false | false | 84,285 | py | """show_vxlan.py
NXOS parser for the following show commands:
* show nve peers
* show nve interface <nve> detail
* show nve ethernet-segment
* show nve vni
* show nve vni summary
* show nve multisite dci-links
* show nve multisite fabric-links
* show l2route fl all
* show l2route evpn ethernet-segment all
* show l2route topology detail
* show l2route mac all detail
* show l2route mac-ip all detail
* show l2route summary
* show nve vni ingress-replication
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
from genie.libs.parser.utils.common import Common
# ====================================================
# schema for show nve peers
# ====================================================
class ShowNvePeersSchema(MetaParser):
"""Schema for:
show nve peers"""
schema = {
Any(): {
'nve_name': str,
'peer_ip': {
Any(): {
'peer_state': str,
'learn_type': str,
'uptime': str,
'router_mac': str,
},
},
},
}
# ====================================================
# parser for show nve peers
# ====================================================
class ShowNvePeers(ShowNvePeersSchema):
"""Parser for :
show nve peers"""
cli_command = 'show nve peers'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Interface Peer-IP State LearnType Uptime Router-Mac
# nve1 201.202.1.1 Up CP 01:15:09 n/a
# nve1 204.1.1.1 Up CP 00:03:05 5e00.0002.0007
p1 = re.compile(r'^\s*(?P<nve_name>[\w\/]+) +(?P<peer_ip>[\w\.]+) +(?P<peer_state>[\w]+)'
' +(?P<learn_type>[\w]+) +(?P<uptime>[\w\:]+) +(?P<router_mac>[\w\.\/]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
nve_name = group.pop('nve_name')
peer_ip = group.pop('peer_ip')
nve_dict = result_dict.setdefault(nve_name,{})
nve_dict.update({'nve_name': nve_name})
peer_dict = nve_dict.setdefault('peer_ip',{}).setdefault(peer_ip,{})
peer_dict.update({'learn_type': group.pop('learn_type')})
peer_dict.update({'uptime': group.pop('uptime')})
peer_dict.update({'router_mac': group.pop('router_mac')})
peer_dict.update({'peer_state': group.pop('peer_state').lower()})
continue
return result_dict
# ====================================================
# schema for show nve vni summary
# ====================================================
class ShowNveVniSummarySchema(MetaParser):
"""Schema for:
show nve vni summary"""
schema = {
'vni': {
'summary': {
'cp_vni_count': int,
'cp_vni_up': int,
'cp_vni_down': int,
'dp_vni_count': int,
'dp_vni_up': int,
'dp_vni_down': int,
},
},
}
# ====================================================
# parser for show nve vni summary
# ====================================================
class ShowNveVniSummary(ShowNveVniSummarySchema):
"""Parser for :
show nve vni summary"""
cli_command = 'show nve vni summary'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Total CP VNIs: 21 [Up: 21, Down: 0]
# Total DP VNIs: 0 [Up: 0, Down: 0]
p1 = re.compile(
r'^\s*Total +CP +VNIs: +(?P<cp_vni_count>[\d]+) +\[Up: +(?P<cp_vni_up>[\d]+), +Down: +(?P<cp_vni_down>[\d]+)\]$')
p2 = re.compile(
r'^\s*Total +DP +VNIs: +(?P<dp_vni_count>[\d]+) +\[Up: +(?P<dp_vni_up>[\d]+), +Down: +(?P<dp_vni_down>[\d]+)\]$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
vni_dict = result_dict.setdefault('vni',{}).setdefault('summary',{})
vni_dict.update({k:int(v) for k,v in group.items()})
continue
m = p2.match(line)
if m:
group = m.groupdict()
vni_dict.update({k: int(v) for k, v in group.items()})
continue
return result_dict
# ====================================================
# schema for show nve vni
# ====================================================
class ShowNveVniSchema(MetaParser):
"""Schema for:
show nve vni"""
schema ={
Any(): {
'vni': {
Any(): {
'vni': int,
'mcast': str,
'vni_state': str,
'mode': str,
'type': str,
'flags': str,
}
}
}
}
# ====================================================
# Parser for show nve vni
# ====================================================
class ShowNveVni(ShowNveVniSchema):
"""parser for:
show nve vni"""
cli_command = 'show nve vni'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Interface VNI Multicast-group State Mode Type [BD/VRF] Flags
# --------- -------- ----------------- ----- ---- ------------------ -----
# nve1 5001 234.1.1.1 Up CP L2 [1001]
p1 = re.compile(r'^\s*(?P<nve_name>[\w\/]+) +(?P<vni>[\d]+) +(?P<mcast>[\w\.\/]+)'
' +(?P<vni_state>[\w]+) +(?P<mode>[\w]+) +(?P<type>[\w\s\-\[\]]+)( +(?P<flags>[\w]+))?$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
nve_name = group.pop('nve_name')
vni = int(group.pop('vni'))
nve_dict = result_dict.setdefault(nve_name,{}).setdefault('vni',{}).setdefault(vni,{})
nve_dict.update({'vni': vni})
nve_dict.update({'mcast': group.pop('mcast').lower()})
nve_dict.update({'vni_state': group.pop('vni_state').lower()})
nve_dict.update({'mode': group.pop('mode')})
nve_dict.update({'type': group.pop('type')})
if group.get('flags'):
nve_dict.update({'flags': group.pop('flags')})
else:
nve_dict.update({'flags': ""})
continue
return result_dict
# ====================================================
# schema for show interface | i nve
# ====================================================
class ShowNveInterfaceSchema(MetaParser):
"""Schema for:
show nve interface | i nve"""
schema = {
'nves':
{Any():
{'nve_name': str,
'nve_state': str,
},
},
}
#=======================================
# show interface | i nve
#=======================================
class ShowNveInterface(ShowNveInterfaceSchema):
"""Parser for show interface | i nve"""
cli_command = 'show interface | i nve'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
result_dict = {}
# nve1 is down (other)
p1 = re.compile(r'^\s*nve(?P<nve>(\d+)) +is +(?P<nve_state>[\w]+)( +(?P<other>[\w\(\)]+))?$')
for line in out.splitlines():
line = line.rstrip()
m = p1.match(line)
if m:
group = m.groupdict()
nve_name = "{}{}".format('nve',group.pop('nve'))
nve_dict = result_dict.setdefault('nves', {}).setdefault(nve_name,{})
nve_dict.update({'nve_name': nve_name})
nve_dict.update({'nve_state': group.pop('nve_state').lower()})
continue
return result_dict
# ====================================================
# schema for show nve interface <nve> detail
# ====================================================
class ShowNveInterfaceDetailSchema(MetaParser):
"""Schema for:
show nve interface <nve> detail"""
schema ={
Any(): {
'nve_name': str,
Optional('if_state'): str,
Optional('encap_type'): str,
Optional('vpc_capability'): str,
Optional('local_rmac'): str,
Optional('host_reach_mode'): str,
Optional('source_if'): str,
Optional('primary_ip'): str,
Optional('secondary_ip'): str,
Optional('src_if_state'): str,
Optional('ir_cap_mode'): str,
Optional('adv_vmac'): bool,
Optional('nve_flags'): str,
Optional('nve_if_handle'): int,
Optional('src_if_holddown_tm'): int,
Optional('src_if_holdup_tm'): int,
Optional('src_if_holddown_left'): int,
Optional('multisite_convergence_time'): int,
Optional('multisite_convergence_time_left'): int,
Optional('vip_rmac'): str,
Optional('vip_rmac_ro'): str,
Optional('sm_state'): str,
Optional('peer_forwarding_mode'): bool,
Optional('dwn_strm_vni_cfg_mode'): str,
Optional('src_intf_last_reinit_notify_type'): str,
Optional('mcast_src_intf_last_reinit_notify_type'): str,
Optional('multi_src_intf_last_reinit_notify_type'): str,
Optional('multisite_bgw_if'): str,
Optional('multisite_bgw_if_ip'): str,
Optional('multisite_bgw_if_admin_state'): str,
Optional('multisite_bgw_if_oper_state'): str,
Optional('multisite_bgw_if_oper_state_down_reason'): str,
}
}
# ====================================================
# schema for show nve interface <nve> detail
# ====================================================
class ShowNveInterfaceDetail(ShowNveInterfaceDetailSchema):
"""parser for:
show nve interface <nve> detail"""
cli_command = 'show nve interface {intf} detail'
def cli(self, intf=""):
nve_list = []
if intf:
nve_list.append(intf)
if not intf:
cmd1 = 'show interface | i nve'
out1 = self.device.execute(cmd1)
# Init vars
# nve1 is down (other)
p1 = re.compile(r'^\s*nve(?P<nve>(\d+)) +is +(?P<nve_state>[\w]+)( +(?P<other>[\w\(\)]+))?$')
for line in out1.splitlines():
line = line.rstrip()
m = p1.match(line)
if m:
group = m.groupdict()
nve_name = '{}{}'.format('nve', group.get('nve'))
nve_list.append(nve_name)
continue
result_dict = {}
# Interface: nve1, State: Up, encapsulation: VXLAN
p1 = re.compile(r'^\s*Interface: +(?P<nve_name>[\w\/]+), +State: +(?P<state>[\w]+),'
' +encapsulation: +(?P<encapsulation>[\w]+)$')
p2 = re.compile(r'^\s*VPC Capability: +(?P<vpc_capability>[\w\s\-\[\]]+)$')
p3 = re.compile(r'^\s*Local Router MAC: +(?P<local_router_mac>[\w\.]+)$')
p4 = re.compile(r'^\s*Host Learning Mode: +(?P<host_learning_mode>[\w\-]+)$')
p5 = re.compile(r'^\s*Source-Interface: +(?P<source_if>[\w\/]+)'
' +\(primary: +(?P<primary_ip>[\w\.]+), +secondary: +(?P<secondary_ip>[\w\.]+)\)$')
p6 = re.compile(r'^\s*Source +Interface +State: +(?P<source_state>[\w]+)$')
p7 = re.compile(r'^\s*IR +Capability +Mode: +(?P<mode>[\w]+)$')
p8 = re.compile(r'^\s*Virtual +RMAC +Advertisement: +(?P<adv_vmac>[\w]+)$')
p9 = re.compile(r'^\s*NVE +Flags:( +(?P<flags>[\w]+))?$')
p10 = re.compile(r'^\s*Interface +Handle: +(?P<intf_handle>[\w]+)$')
p11 = re.compile(r'^\s*Source +Interface +hold-down-time: +(?P<hold_down_time>[\d]+)$')
p12 = re.compile(r'^\s*Source +Interface +hold-up-time: +(?P<hold_up_time>[\d]+)$')
p13 = re.compile(r'^\s*Remaining +hold-down +time: +(?P<hold_time_left>[\d]+) +seconds$')
p14 = re.compile(r'^\s*Virtual +Router +MAC: +(?P<v_router_mac>[\w\.]+)$')
p15 = re.compile(r'^\s*Virtual +Router +MAC +Re\-origination: +(?P<v_router_mac_re>[\w\.]+)$')
p16 = re.compile(r'^\s*Interface +state: +(?P<intf_state>[\w\-]+)$')
p17 = re.compile(r'^\s*unknown-peer-forwarding: +(?P<peer_forwarding>[\w]+)$')
p18 = re.compile(r'^\s*down-stream +vni +config +mode: +(?P<vni_config_mode>[\w\/]+)$')
p19 = re.compile(r'^\s*Nve +Src +node +last +notif +sent: +(?P<last_notif_sent>[\w\-]+)$')
p20 = re.compile(r'^\s*Nve +Mcast +Src +node +last +notif +sent: +(?P<last_notif_sent>[\w\-]+)$')
p20_1 = re.compile(r'^\s*Nve +MultiSite +Src +node +last +notif +sent: +(?P<notif_sent>[\w\-]+)$')
p21 = re.compile(
r'^\s*Multisite +bgw\-if: +(?P<multisite_bgw_if>[\w\/\-]+) +\(ip: +(?P<multisite_bgw_if_ip>[\w\.]+),'
' +admin: +(?P<multisite_bgw_if_admin_state>[\w]+), +oper: +(?P<multisite_bgw_if_oper_state>[\w]+)\)$')
p22 = re.compile(r'^\s*Multisite +bgw\-if +oper +down +reason: +(?P<reason>[\w\.\s]+)$')
# Multi-Site delay-restore time: 180 seconds
p23 = re.compile(r'^\s*Multi(-S|s)ite +delay\-restore +time: +(?P<multisite_convergence_time>\d+) +seconds$')
# Multi-Site delay-restore time left: 0 seconds
p24 = re.compile(
r'^\s*Multi(-S|s)ite +bgw\-if +oper +down +reason: +(?P<multisite_convergence_time_left>\d+) +seconds$')
for nve in nve_list:
out = self.device.execute(self.cli_command.format(intf=nve))
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
nve_name = group.pop('nve_name')
nve_dict = result_dict.setdefault(nve_name , {})
nve_name = m.groupdict()['nve_name']
nve_dict.update({'nve_name': nve_name})
nve_dict.update({'if_state': group.pop('state').lower()})
nve_dict.update({'encap_type': group.pop('encapsulation').lower()})
continue
# VPC Capability: VPC-VIP-Only [notified]
m = p2.match(line)
if m:
group = m.groupdict()
nve_dict.update({'vpc_capability': group.pop('vpc_capability').lower()})
continue
# Local Router MAC: 5e00.0005.0007
m = p3.match(line)
if m:
group = m.groupdict()
nve_dict.update({'local_rmac': group.pop('local_router_mac')})
continue
# Host Learning Mode: Control-Plane
m = p4.match(line)
if m:
group = m.groupdict()
nve_dict.update({'host_reach_mode': group.pop('host_learning_mode').lower()})
continue
# Source-Interface: loopback1 (primary: 201.11.11.11, secondary: 201.12.11.22)
m = p5.match(line)
if m:
group = m.groupdict()
nve_dict.update({k:v for k,v in group.items()})
continue
# Source Interface State: Up
m = p6.match(line)
if m:
group = m.groupdict()
nve_dict.update({'src_if_state': group.pop('source_state').lower()})
continue
# IR Capability Mode: No
m = p7.match(line)
if m:
group = m.groupdict()
nve_dict.update({'ir_cap_mode': group.pop('mode').lower()})
continue
# Virtual RMAC Advertisement: Yes
m = p8.match(line)
if m:
group = m.groupdict()
nve_dict.update({'adv_vmac': True if group.pop('adv_vmac').lower() == 'yes' else False})
continue
# NVE Flags:
m = p9.match(line)
if m:
group = m.groupdict()
if group.get("flags"):
nve_dict.update({'nve_flags': group.pop('flags')})
else:
nve_dict.update({'nve_flags': ""})
continue
# Interface Handle: 0x49000001
m = p10.match(line)
if m:
group = m.groupdict()
nve_dict.update({'nve_if_handle': int(group.pop('intf_handle'),0)})
continue
# Source Interface hold-down-time: 180
m = p11.match(line)
if m:
group = m.groupdict()
nve_dict.update({'src_if_holddown_tm': int(group.pop('hold_down_time'))})
continue
# Source Interface hold-up-time: 30
m = p12.match(line)
if m:
group = m.groupdict()
nve_dict.update({'src_if_holdup_tm': int(group.pop('hold_up_time'))})
continue
# Remaining hold-down time: 0 seconds
m = p13.match(line)
if m:
group = m.groupdict()
nve_dict.update({'src_if_holddown_left': int(group.pop('hold_time_left'))})
continue
# Virtual Router MAC: 0200.c90c.0b16
m = p14.match(line)
if m:
group = m.groupdict()
nve_dict.update({'vip_rmac': group.pop('v_router_mac')})
continue
# Virtual Router MAC Re-origination: 0200.6565.6565
m = p15.match(line)
if m:
group = m.groupdict()
nve_dict.update({'vip_rmac_ro': group.pop('v_router_mac_re')})
continue
# Interface state: nve-intf-add-complete
m = p16.match(line)
if m:
group = m.groupdict()
nve_dict.update({'sm_state': group.pop('intf_state')})
continue
# unknown-peer-forwarding: disable
m = p17.match(line)
if m:
group = m.groupdict()
nve_dict.update({'peer_forwarding_mode': False if group.pop('peer_forwarding') == 'disable' else True})
continue
# down-stream vni config mode: n/a
m = p18.match(line)
if m:
group = m.groupdict()
nve_dict.update({'dwn_strm_vni_cfg_mode': group.pop('vni_config_mode')})
continue
# Nve Src node last notif sent: Port-up
m = p19.match(line)
if m:
group = m.groupdict()
nve_dict.update({'src_intf_last_reinit_notify_type': group.pop('last_notif_sent').lower()})
continue
# Nve Mcast Src node last notif sent: None
m = p20.match(line)
if m:
group = m.groupdict()
nve_dict.update({'mcast_src_intf_last_reinit_notify_type': group.pop('last_notif_sent').lower()})
continue
# Nve MultiSite Src node last notif sent: None
m = p20_1.match(line)
if m:
group = m.groupdict()
nve_dict.update({'multi_src_intf_last_reinit_notify_type': group.pop('notif_sent').lower()})
continue
# Multisite bgw-if: loopback2 (ip: 101.101.101.101, admin: Down, oper: Down)
m = p21.match(line)
if m:
group = m.groupdict()
nve_dict.update({'multisite_bgw_if': group.pop('multisite_bgw_if')})
nve_dict.update({'multisite_bgw_if_ip': group.pop('multisite_bgw_if_ip')})
nve_dict.update({'multisite_bgw_if_admin_state': group.pop('multisite_bgw_if_admin_state').lower()})
nve_dict.update({'multisite_bgw_if_oper_state': group.pop('multisite_bgw_if_oper_state').lower()})
continue
# Multisite bgw-if oper down reason: NVE not up.
m = p22.match(line)
if m:
group = m.groupdict()
nve_dict.update({'multisite_bgw_if_oper_state_down_reason': group.pop('reason')})
continue
m = p23.match(line)
if m:
group = m.groupdict()
nve_dict.update({'multisite_convergence_time': int(group.pop('multisite_convergence_time'))})
continue
m = p24.match(line)
if m:
group = m.groupdict()
nve_dict.update({'multisite_convergence_time_left': int(group.pop('multisite_convergence_time_left'))})
continue
return result_dict
# ====================================================
# schema for show nve multisite dci-links
# ====================================================
class ShowNveMultisiteDciLinksSchema(MetaParser):
"""Schema for:
show nve multisite dci-links"""
schema ={
'multisite': {
Optional('dci_links'): {
Any():{
'if_name': str,
'if_state': str
},
},
},
}
# ====================================================
# Parser for show nve multisite dci-link
# ====================================================
class ShowNveMultisiteDciLinks(ShowNveMultisiteDciLinksSchema):
"""parser for:
show nve multisite dci-links"""
cli_command = 'show nve multisite dci-links'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Interface State
# --------- -----
# Ethernet1/53 Up
p1 = re.compile(r'^\s*(?P<if_name>(?!Interface)[\w\/]+) +(?P<if_state>[\w]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
if_name = group.pop('if_name')
if_state = group.pop('if_state')
if_dict = result_dict.setdefault('multisite', {}).setdefault('dci_links', {}).setdefault(if_name, {})
if_dict.update({'if_name': if_name})
if_dict.update({'if_state': if_state.lower()})
continue
return result_dict
# ====================================================
# schema for show nve multisite fabric-links
# ====================================================
class ShowNveMultisiteFabricLinksSchema(MetaParser):
"""Schema for:
show nve multisite fabric-links"""
schema = {
'multisite': {
'fabric_links': {
Any(): {
'if_name': str,
'if_state': str
},
},
},
}
# ====================================================
# Parser for show nve multisite fabric-link
# ====================================================
class ShowNveMultisiteFabricLinks(ShowNveMultisiteFabricLinksSchema):
"""parser for:
show nve multisite fabric-links"""
cli_command = 'show nve multisite fabric-links'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Interface State
# --------- -----
# Ethernet1/53 Up
p1 = re.compile(r'^\s*(?P<if_name>(?!Interface)[\w\/]+) +(?P<if_state>[\w]+)$')
result_dict = {}
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
if_name = group.pop('if_name')
if_state = group.pop('if_state')
if_dict = result_dict.setdefault('multisite',{}).setdefault('fabric_links',{}).setdefault(if_name,{})
if_dict.update({'if_name': if_name})
if_dict.update({'if_state': if_state.lower()})
continue
return result_dict
# ==================================================
# Schema for show nve ethernet-segment
# ==================================================
class ShowNveEthernetSegmentSchema(MetaParser):
"""Schema for:
show nve ethernet-segment"""
schema ={
'nve':{
Any():{
'ethernet_segment': {
'esi': {
Any(): {
'esi': str,
'if_name': str,
'es_state': str,
'po_state': str,
'nve_if_name': str,
'nve_state': str,
'host_reach_mode': str,
'active_vlans': str,
Optional('df_vlans'): str,
'active_vnis': str,
'cc_failed_vlans': str,
'cc_timer_left': str,
'num_es_mem': int,
Optional('local_ordinal'): int,
'df_timer_st': str,
'config_status': str,
Optional('df_list'): str,
'es_rt_added': bool,
'ead_rt_added': bool,
'ead_evi_rt_timer_age': str,
},
},
},
},
}
}
# ==================================================
# Schema for show nve ethernet-segment
# ==================================================
class ShowNveEthernetSegment(ShowNveEthernetSegmentSchema):
"""parser for:
show nve ethernet-segment"""
cli_command = 'show nve ethernet-segment'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
df_vlans = ""
result_dict = {}
# ESI: 0300.0000.0001.2c00.0309
# Parent interface: nve1
# ES State: Up
# Port-channel state: N/A
# NVE Interface: nve1
# NVE State: Up
# Host Learning Mode: control-plane
# Active Vlans: 1,101-105,1001-1100,2001-2100,3001-3005
# DF Vlans: 102,104,1002,1004,1006,1008,1010,1012,1014,1016,1018,1020,1022,1024
# ,1026,1028,1030,1032,1034,1036,1038,1040,1042,1044,1046,1048,1050,1052,1054,1056
# Active VNIs: 501001-501100,502001-502100,503001-503005,600101-600105
# CC failed for VLANs:
# VLAN CC timer: 0
# Number of ES members: 2
# My ordinal: 0
# DF timer start time: 00:00:00
# Config State: N/A
# DF List: 201.0.0.55 201.0.0.66
# ES route added to L2RIB: True
# EAD/ES routes added to L2RIB: False
# EAD/EVI route timer age: not running
p1 = re.compile(r'^\s*ESI: +(?P<esi>[\w\.]+)$')
p2 = re.compile(r'^\s*Parent +interface: +(?P<parent_intf>[\w\.\/]+)$')
p3 = re.compile(r'^\s*ES +State: +(?P<es_state>[\w\/]+)$')
p4 = re.compile(r'^\s*Port-channel +state: +(?P<po_state>[\w\/]+)$')
p5 = re.compile(r'^\s*NVE +Interface: +(?P<nve_intf>[\w\.\/]+)$')
p6 = re.compile(r'^\s*NVE +State: +(?P<nve_state>[\w\/]+)$')
p7 = re.compile(r'^\s*Host +Learning +Mode: +(?P<host_learning_mode>[\w\-]+)$')
p8 = re.compile(r'^\s*Active +Vlans: +(?P<active_vlans>[\d\-\,]+)$')
p9 = re.compile(r'^\s*DF Vlans: +(?P<df_vlans>[\d\-\,]+)$')
p10 = re.compile(r'^\s*,(?P<df_vlans>[\d\-\,]+)$')
p11 = re.compile(r'^\s*Active +VNIs: +(?P<active_vnis>[\d\-\,]+)$')
p12 = re.compile(r'^\s*CC +failed +for +VLANs:( +(?P<cc_failed_vlans>[\w\/]+))?$')
p13 = re.compile(r'^\s*VLAN CC timer: +(?P<cc_timer_left>[\d]+)?$')
p14 = re.compile(r'^\s*Number +of +ES +members: +(?P<num_es_mem>[\d]+)?$')
p15 = re.compile(r'^\s*My +ordinal: +(?P<local_ordinal>[\d]+)$')
p16 = re.compile(r'^\s*DF +timer +start +time: +(?P<df_timer_start_time>[\w\:]+)$')
p17 = re.compile(r'^\s*Config +State: +(?P<config_status>[\w\/]+)$')
p18 = re.compile(r'^\s*DF +List: +(?P<df_list>[\d\s\.]+)$')
p19 = re.compile(r'^\s*ES +route +added +to +L2RIB: +(?P<is_es_added_to_l2rib>[\w]+)$')
p20 = re.compile(r'^\s*EAD\/ES +routes +added +to +L2RIB: +(?P<ead_rt_added>[\w]+)$')
p21 = re.compile(r'^\s*EAD/EVI +route +timer +age: +(?P<ead_evi_rt_timer_age>[\w\s]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
esi = group.pop('esi')
continue
m = p2.match(line)
if m:
group = m.groupdict()
if_name = group.pop('parent_intf')
continue
m = p3.match(line)
if m:
group = m.groupdict()
es_state = group.pop('es_state').lower()
continue
m = p4.match(line)
if m:
group = m.groupdict()
po_state = group.pop('po_state').lower()
continue
m = p5.match(line)
if m:
group = m.groupdict()
nve_if_name = group.pop('nve_intf')
esi_dict = result_dict.setdefault('nve', {}).setdefault(nve_if_name, {}).\
setdefault('ethernet_segment', {}).setdefault('esi', {}).\
setdefault(esi, {})
esi_dict.update({'esi': esi})
esi_dict.update({'nve_if_name': nve_if_name})
esi_dict.update({'po_state': po_state})
esi_dict.update({'if_name': if_name})
esi_dict.update({'es_state': es_state})
continue
m = p6.match(line)
if m:
group = m.groupdict()
esi_dict.update({'nve_state': group.pop('nve_state').lower()})
continue
m = p7.match(line)
if m:
group = m.groupdict()
esi_dict.update({'host_reach_mode': group.pop('host_learning_mode').lower()})
continue
m = p8.match(line)
if m:
group = m.groupdict()
esi_dict.update({'active_vlans': group.pop('active_vlans')})
continue
m = p9.match(line)
if m:
group = m.groupdict()
df_vlans = group.pop('df_vlans')
esi_dict.update({'df_vlans':df_vlans})
continue
m = p10.match(line)
if m:
group = m.groupdict()
df_vlans = "{},{}".format(df_vlans, group.pop('df_vlans'))
esi_dict.update({'df_vlans': df_vlans})
continue
m = p11.match(line)
if m:
group = m.groupdict()
esi_dict.update({'active_vnis': group.pop('active_vnis')})
continue
m = p12.match(line)
if m:
group = m.groupdict()
if not group.pop('cc_failed_vlans'):
esi_dict.update({'cc_failed_vlans': ''})
else:
esi_dict.update({'cc_failed_vlans': group.pop('cc_failed_vlans')})
continue
m = p13.match(line)
if m:
group = m.groupdict()
esi_dict.update({'cc_timer_left': group.pop('cc_timer_left')})
continue
m = p14.match(line)
if m:
group = m.groupdict()
esi_dict.update({'num_es_mem': int(group.pop('num_es_mem'))})
continue
m = p15.match(line)
if m:
group = m.groupdict()
esi_dict.update({'local_ordinal': int(group.pop('local_ordinal'))})
continue
m = p16.match(line)
if m:
group = m.groupdict()
esi_dict.update({'df_timer_st': group.pop('df_timer_start_time')})
continue
m = p17.match(line)
if m:
group = m.groupdict()
esi_dict.update({'config_status': group.pop('config_status').lower()})
continue
m = p18.match(line)
if m:
group = m.groupdict()
esi_dict.update({'df_list': group.pop('df_list')})
continue
m = p19.match(line)
if m:
group = m.groupdict()
esi_dict.update({'es_rt_added': False if 'False' in group.pop('is_es_added_to_l2rib') else True})
continue
m = p20.match(line)
if m:
group = m.groupdict()
esi_dict.update({'ead_rt_added': False if 'False' in group.pop('ead_rt_added') else True})
continue
m = p21.match(line)
if m:
group = m.groupdict()
esi_dict.update({'ead_evi_rt_timer_age': group.pop('ead_evi_rt_timer_age')})
continue
return result_dict
# ====================================================
# schema for show l2route evpn ethernet-segment all
# ====================================================
class ShowL2routeEvpnEternetSegmentAllSchema(MetaParser):
"""Schema for:
show l2route evpn ethernet-segment all"""
schema ={
'evpn': {
'ethernet_segment': {
Any(): {
'ethernet_segment': str,
'originating_rtr': str,
'prod_name': str,
'int_ifhdl': str,
'client_nfn': int,
}
}
}
}
# ====================================================
# Parser for show l2route evpn ethernet-segment all
# ====================================================
class ShowL2routeEvpnEternetSegmentAll(ShowL2routeEvpnEternetSegmentAllSchema):
"""parser for:
show l2route evpn ethernet-segment all"""
cli_command = 'show l2route evpn ethernet-segment all'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
index = 1
# ESI Orig Rtr. IP Addr Prod Ifindex NFN Bitmap
# ------------------------ ----------------- ----- ----------- ----------
# 0300.0000.0001.2c00.0309 201.0.0.55 VXLAN nve1 64
p1 = re.compile(r'^\s*(?P<ethernet_segment>(?!ESI)[\w\.]+) +(?P<originating_rtr>[\d\.]+)'
' +(?P<prod_name>[\w]+) +(?P<int_ifhdl>[\w\/]+) +(?P<client_nfn>[\w\.]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
evpn_dict = result_dict.setdefault('evpn',{}).setdefault('ethernet_segment', {}).setdefault(index, {})
group = m.groupdict()
for k, v in group.items():
try:
v = int(v)
except:
v = v.lower()
evpn_dict.update({k:v})
index += 1
continue
return result_dict
# ====================================================
# schema for show l2route topology detail
# ====================================================
class ShowL2routeTopologyDetailSchema(MetaParser):
"""Schema for:
show l2route topology detail"""
schema ={
'topology': {
'topo_id': {
Any(): {
'topo_name': {
Any(): {
'topo_name': str,
Optional('topo_type'): str,
Optional('vni'): int,
Optional('encap_type'): int,
Optional('iod'): int,
Optional('if_hdl'): int,
Optional('vtep_ip'): str,
Optional('emulated_ip'): str,
Optional('emulated_ro_ip'): str,
Optional('tx_id'): int,
Optional('rcvd_flag'): int,
Optional('rmac'): str,
Optional('vrf_id'): int,
Optional('vmac'): str,
Optional('flags'): str,
Optional('sub_flags'): str,
Optional('prev_flags'): str,
}
}
}
}
}
}
# ====================================================
# Parser for show l2route topology detail
# ====================================================
class ShowL2routeTopologyDetail(ShowL2routeTopologyDetailSchema):
"""parser for:
show l2route topology detail"""
cli_command = 'show l2route topology detail'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Topology ID Topology Name Attributes
# ----------- ------------- ----------
# 101 Vxlan-10001 VNI: 10001
# Encap:0 IOD:0 IfHdl:1224736769
# VTEP IP: 201.11.11.11
# Emulated IP: 201.12.11.22
# Emulated RO IP: 201.12.11.22
# TX-ID: 20 (Rcvd Ack: 0)
# RMAC: 5e00.0005.0007, VRFID: 3
# VMAC: 0200.c90c.0b16
# Flags: L3cp, Sub_Flags: --, Prev_Flags: -
p1 = re.compile(r'^\s*(?P<topo_id>[\d]+) +(?P<topo_name>[\w\-]+) +(?P<topo_type>[\w\/]+)(: +(?P<vni>[\d]+))?$')
p2 = re.compile(r'^\s*Encap:(?P<encap_type>[\d]+) +IOD:(?P<iod>[\d]+) +IfHdl:(?P<if_hdl>[\d]+)$')
p3 = re.compile(r'^\s*VTEP +IP: +(?P<vtep_ip>[\d\.]+)$')
p4 = re.compile(r'^\s*Emulated +IP: +(?P<emulated_ip>[\d\.]+)$')
p5 = re.compile(r'^\s*Emulated +RO +IP: +(?P<emulated_ro_ip>[\d\.]+)$')
p6 = re.compile(r'^\s*TX-ID: +(?P<tx_id>[\d]+) +\((Rcvd +Ack: +(?P<rcvd_flag>[\d]+))\)$')
p7 = re.compile(r'^\s*RMAC: +(?P<rmac>[\w\.]+), VRFID: +(?P<vrf_id>[\d]+)$')
p8 = re.compile(r'^\s*VMAC: +(?P<vmac>[\w\.]+)$')
p9 = re.compile(
r'^\s*Flags: +(?P<flags>[\w]+), +Sub_Flags: +(?P<sub_flags>[\w\-]+), +Prev_Flags: +(?P<prev_flags>[\w\-]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m0 = p1.match(line)
if m0:
group = m0.groupdict()
topo_id = int(group.pop('topo_id'))
topo_name = group.pop('topo_name')
topo_type = group.pop('topo_type').lower()
topo_dict = result_dict.setdefault('topology', {}).setdefault('topo_id', {}).setdefault(topo_id,{}).\
setdefault('topo_name',{}).setdefault(topo_name,{})
if m0.groupdict()['vni']:
vni = int(group.pop('vni'))
topo_dict.update({'vni': vni})
topo_dict.update({'topo_type': topo_type})
topo_dict.update({'topo_name': topo_name})
continue
m2 = m = ""
if p2.match(line):
m2 = p2.match(line)
if p6.match(line):
m2 = p6.match(line)
if m2:
group = m2.groupdict()
topo_dict.update({k:int(v) for k,v in group.items() })
continue
if p3.match(line):
m= p3.match(line)
if p4.match(line):
m = p4.match(line)
if p5.match(line):
m = p5.match(line)
if p8.match(line):
m = p8.match(line)
if p9.match(line):
m = p9.match(line)
if m:
group = m.groupdict()
topo_dict.update({k:v for k, v in group.items()})
continue
m3 = p7.match(line)
if m3:
group = m3.groupdict()
topo_dict.update({'rmac': group.pop('rmac')})
topo_dict.update({'vrf_id': int(group.pop('vrf_id'))})
continue
return result_dict
# ====================================================
# schema for show l2route mac all detail
# ====================================================
class ShowL2routeMacAllDetailSchema(MetaParser):
"""Schema for:
show l2route mac all detail"""
schema ={
'topology': {
'topo_id': {
Any(): {
'mac': {
Any(): {
'mac_addr': str,
'prod_type': str,
'flags': str,
'seq_num': int,
'next_hop1': str,
'rte_res': str,
'fwd_state': str,
Optional('peer_id'): int,
Optional('sent_to'): str,
Optional('soo'): int,
}
}
}
}
}
}
# ====================================================
# Parser for show l2route mac all detail
# ====================================================
class ShowL2routeMacAllDetail(ShowL2routeMacAllDetailSchema):
"""parser for:
show l2route mac all detail"""
cli_command = 'show l2route mac all detail'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Topology Mac Address Prod Flags Seq No Next-Hops
# ----------- -------------- ------ ------------- ---------- ----------------
# 101 5e00.0002.0007 VXLAN Rmac 0 204.1.1.1
# Route Resolution Type: Regular
# Forwarding State: Resolved (PeerID: 2)
# Sent To: BGP
# SOO: 774975538
p1 = re.compile(r'^\s*(?P<topo_id>[\d]+) +(?P<mac_addr>[\w\.]+) +(?P<prod_type>[\w\,]+)'
' +(?P<flags>[\w\,\-]+) +(?P<seq_num>[\d]+) +(?P<next_hop1>[\w\/\.]+)$')
p2 = re.compile(r'^\s*Route +Resolution +Type: +(?P<rte_res>[\w]+)$')
p3 = re.compile(r'^\s*Forwarding +State: +(?P<fwd_state>[\w]+)( +\(PeerID: +(?P<peer_id>[\d]+)\))?$')
p4 = re.compile(r'^\s*Sent +To: +(?P<sent_to>[\w\,]+)$')
p5 = re.compile(r'^\s*SOO: +(?P<soo>[\d]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
topo_id = int(group.pop('topo_id'))
mac_addr = group.pop('mac_addr')
topo_dict = result_dict.setdefault('topology', {}).setdefault('topo_id', {}).setdefault(topo_id,{}).\
setdefault('mac',{}).setdefault(mac_addr,{})
flags = group.pop('flags')
if flags.endswith(','):
flags = flags[:-1]
topo_dict.update({'flags': flags.lower()})
topo_dict.update({'prod_type': group.pop('prod_type').lower()})
topo_dict.update({'seq_num': int(group.pop('seq_num'))})
topo_dict.update({'mac_addr': mac_addr})
try:
next_hop1 = Common.convert_intf_name(group.pop('next_hop1'))
except:
next_hop1 = group.pop('next_hop1')
topo_dict.update({'next_hop1': next_hop1})
continue
m1 = ""
if p2.match(line):
m1 = p2.match(line)
if p4.match(line):
m1 = p4.match(line)
if m1:
group = m1.groupdict()
topo_dict.update({k:v.lower() for k,v in group.items() })
continue
m = p3.match(line)
if m:
group = m.groupdict()
topo_dict.update({'fwd_state': group.get('fwd_state')})
if group.get('peer_id'):
topo_dict.update({'peer_id': int(group.get('peer_id'))})
continue
m = p5.match(line)
if m:
group = m.groupdict()
topo_dict.update({k:int(v) for k, v in group.items()})
continue
return result_dict
# ====================================================
# schema for show l2route mac-ip all detail
# ====================================================
class ShowL2routeMacIpAllDetailSchema(MetaParser):
"""Schema for:
show l2route mac-ip all detail"""
schema ={
'topology': {
'topo_id': {
Any(): {
'mac_ip': {
Any(): {
'mac_addr': str,
'mac_ip_prod_type': str,
'mac_ip_flags': str,
'seq_num': int,
'next_hop1': str,
'host_ip': str,
Optional('sent_to'): str,
Optional('soo'): int,
Optional('l3_info'): int,
}
}
}
}
}
}
# ====================================================
# Parser for show l2route mac-ip all detail
# ====================================================
class ShowL2routeMacIpAllDetail(ShowL2routeMacIpAllDetailSchema):
"""parser for:
show l2route mac-ip all detail"""
cli_command = 'show l2route mac-ip all detail'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Topology Mac Address Prod Flags Seq No Host IP Next-Hops
# ----------- -------------- ------ ---------- --------------- ---------------
# 1001 fa16.3ec2.34fe BGP -- 0 5.1.10.11 204.1.1.1
# 1001 fa16.3ea3.fb66 HMM -- 0 5.1.10.55 Local
# Sent To: BGP
# SOO: 774975538
# L3-Info: 10001
p1 = re.compile(r'^\s*(?P<topo_id>[\d]+) +(?P<mac_addr>[\w\.]+) +(?P<mac_ip_prod_type>[\w\,]+)'
' +(?P<mac_ip_flags>[\w\,\-]+) +(?P<seq_num>[\d]+) +(?P<host_ip>[\w\/\.]+)'
' +(?P<next_hop1>[\w\/\.]+)$')
p2 = re.compile(r'^\s*Sent +To: +(?P<sent_to>[\w]+)$')
p3 = re.compile(r'^\s*SOO: +(?P<soo>[\d]+)$')
p4 = re.compile(r'^\s*L3-Info: +(?P<l3_info>[\d]+)$')
# Topology Mac Address Host IP Prod Flags Seq No Next-Hops
# ----------- -------------- --------------- ------ ---------- ---------------
# 101 0000.9cfc.2596 100.101.1.3 BGP -- 0 23.23.23.23
p5 = re.compile(r'^\s*(?P<topo_id>[\d]+) +(?P<mac_addr>[\w\.]+) +(?P<host_ip>[\w\/\.]+)'
' +(?P<mac_ip_prod_type>[\w\,]+)'
' +(?P<mac_ip_flags>[\w\,\-]+) +(?P<seq_num>[\d]+)'
' +(?P<next_hop1>[\w\/\.]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
topo_id = int(group.pop('topo_id'))
mac_addr = group.pop('mac_addr')
topo_dict = result_dict.setdefault('topology', {}).setdefault('topo_id', {}).setdefault(topo_id,{}).\
setdefault('mac_ip',{}).setdefault(mac_addr,{})
flags = group.pop('mac_ip_flags')
topo_dict.update({'mac_ip_flags': flags.lower()})
topo_dict.update({'mac_ip_prod_type': group.pop('mac_ip_prod_type').lower()})
topo_dict.update({'seq_num': int(group.pop('seq_num'))})
topo_dict.update({'mac_addr': mac_addr})
topo_dict.update({'host_ip': group.pop('host_ip')})
topo_dict.update({'next_hop1': group.pop('next_hop1').lower()})
continue
m1 = ""
if p3.match(line):
m1 = p3.match(line)
if p4.match(line):
m1 = p4.match(line)
if m1:
group = m1.groupdict()
topo_dict.update({k:int(v) for k,v in group.items() })
continue
m = p2.match(line)
if m:
group = m.groupdict()
topo_dict.update({k:v.lower() for k, v in group.items()})
continue
m = p5.match(line)
if m:
group = m.groupdict()
topo_id = int(group.pop('topo_id'))
mac_addr = group.pop('mac_addr')
topo_dict = result_dict.setdefault('topology', {}).setdefault('topo_id', {}).setdefault(topo_id, {}). \
setdefault('mac_ip', {}).setdefault(mac_addr, {})
flags = group.pop('mac_ip_flags')
topo_dict.update({'mac_ip_flags': flags.lower()})
topo_dict.update({'mac_ip_prod_type': group.pop('mac_ip_prod_type').lower()})
topo_dict.update({'seq_num': int(group.pop('seq_num'))})
topo_dict.update({'mac_addr': mac_addr})
topo_dict.update({'host_ip': group.pop('host_ip')})
topo_dict.update({'next_hop1': group.pop('next_hop1').lower()})
continue
return result_dict
# ====================================================
# schema for show l2route summary
# ====================================================
class ShowL2routeSummarySchema(MetaParser):
"""Schema for:
show l2route summary"""
schema ={
'summary': {
'total_memory': int,
'numof_converged_tables': int,
Optional('table_name'): {
Any(): {
'producer_name': {
Any(): {
'producer_name': str,
'id': int,
'objects': int,
'memory': int,
},
'total_obj': int,
'total_mem': int,
}
}
}
}
}
# ====================================================
# Parser for show l2route summary
# ====================================================
class ShowL2routeSummary(ShowL2routeSummarySchema):
"""parser for:
show l2route summary"""
cli_command = 'show l2route summary'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# L2ROUTE Summary
# Total Memory: 6967
# Number of Converged Tables: 47
# Table Name: Topology
# Producer (ID) Objects Memory (Bytes)
# --------------- ---------- --------------
# VXLAN (11 ) 21 5927
# ---------------------------------------------
# Total 21 5927
# ---------------------------------------------
p1 = re.compile(r'^\s*Total +Memory: +(?P<total_memory>[\d]+)$')
p2 = re.compile(r'^\s*Number +of +Converged +Tables: +(?P<numof_converged_tables>[\d]+)$')
p3 = re.compile(r'^\s*Table +Name: +(?P<table_name>[\w\-]+)$')
p4 = re.compile(r'^\s*(?P<producer_name>[\w]+) +\((?P<id>[\d\s]+)\) +(?P<objects>[\d]+) +(?P<memory>[\d]+)$')
p5 = re.compile(r'^\s*Total +(?P<total_obj>[\d]+) +(?P<total_mem>[\d]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
total_memory = int(group.pop('total_memory'))
summary_dict = result_dict.setdefault('summary', {})
summary_dict.update({'total_memory': total_memory})
continue
m = p2.match(line)
if m:
group = m.groupdict()
numof_converged_tables = int(group.pop('numof_converged_tables'))
summary_dict.update({'numof_converged_tables': numof_converged_tables})
continue
m = p3.match(line)
if m:
group = m.groupdict()
table_name = group.pop('table_name')
table_dict = summary_dict.setdefault('table_name',{}).setdefault(table_name,{})
continue
m = p4.match(line)
if m:
group = m.groupdict()
producer_name = group.pop('producer_name').lower()
producer_dict = table_dict.setdefault('producer_name', {}).setdefault(producer_name, {})
producer_dict.update({k:int(v) for k, v in group.items()})
producer_dict.update({'producer_name':producer_name})
continue
m = p5.match(line)
if m:
group = m.groupdict()
producer_dict = table_dict.setdefault('producer_name', {})
producer_dict.update({k:int(v) for k,v in group.items() })
continue
return result_dict
# ====================================================
# schema for show l2route fl all
# ====================================================
class ShowL2routeFlAllSchema(MetaParser):
"""Schema for:
show l2route fl all"""
schema = {
'topology': {
'topo_id': {
Any():{
Optional('num_of_peer_id'): int,
'peer_id':{
Any():{
'topo_id': int,
'peer_id': int,
'flood_list': str,
'is_service_node': str,
},
},
},
},
},
}
# ====================================================
# Parser for show l2route fl all
# ====================================================
class ShowL2routeFlAll(ShowL2routeFlAllSchema):
"""parser for:
show l2route fl all"""
cli_command = 'show l2route fl all'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
index = 0
# Topology ID Peer-id Flood List Service Node
# ----------- ----------- --------------- ------------
p1 = re.compile(r'^\s*(?P<topo_id>[\d]+) +(?P<peer_id>[\d]+) +(?P<flood_list>[\w\.d]+) +(?P<is_service_node>[\w]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
topo_id = int(group.pop('topo_id'))
peer_id = int(group.pop('peer_id'))
peer_dict = result_dict.setdefault('topology', {}).setdefault('topo_id', {}).setdefault(topo_id, {}). \
setdefault('peer_id', {}).setdefault(peer_id, {})
peer_dict.update({'topo_id': topo_id})
peer_dict.update({'peer_id': peer_id})
peer_dict.update({'flood_list': group.pop('flood_list')})
peer_dict.update({'is_service_node': group.pop('is_service_node').lower()})
continue
if result_dict:
for topo_id in result_dict['topology']['topo_id']:
num_of_peer_id = len(result_dict['topology']['topo_id'][topo_id]['peer_id'])
result_dict['topology']['topo_id'][topo_id]['num_of_peer_id'] = num_of_peer_id
return result_dict
# ===================================================
# Schema for show running-config nv ovelay
# ===================================================
class ShowRunningConfigNvOverlaySchema(MetaParser):
"""Schema for:
show running-config nv overlay"""
schema = {
Optional('evpn_multisite_border_gateway'): int,
Optional('multisite_convergence_time') : int,
Optional('enabled_nv_overlay'): bool,
Any():{
Optional('nve_name'):str,
Optional('if_state'): str,
Optional('host_reachability_protocol'): str,
Optional('adv_vmac'): bool,
Optional('source_if'): str,
Optional('multisite_bgw_if'): str,
Optional('vni'):{
Any():{
Optional('vni'): int,
Optional('associated_vrf'): bool,
Optional('multisite_ingress_replication'): bool,
Optional('mcast_group'): str
},
},
},
Optional('multisite'):{
Optional('dci_links'):{
Any():{
'if_name': str,
'if_state': str,
},
},
Optional('fabric_links'): {
Any(): {
'if_name': str,
'if_state': str,
},
},
},
}
# ====================================================
# Parser for show running-config nv overlay
# =====================================================
class ShowRunningConfigNvOverlay(ShowRunningConfigNvOverlaySchema):
"""parser for:
show running-config nv overlay"""
cli_command = 'show running-config nv overlay'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# feature nv overlay
p0 = re.compile(r'^\s*feature nv overlay$')
# evpn multisite border-gateway 111111
p1 = re.compile(r'^\s*evpn multisite border-gateway +(?P<evpn_multisite_border_gateway>[\w]+)$')
# delay-restore time 185
p2 = re.compile(r'^\s*delay-restore time +(?P<evpn_msite_bgw_delay_restore_time>[\d]+)$')
# interface nve1
p3 = re.compile(r'^\s*interface +(?P<nve_name>nve[\d]+)$')
# no shutdown
p4 = re.compile(r'^\s*no shutdown$')
# host-reachability protocol bgp
p5 = re.compile(r'^\s*host-reachability protocol +(?P<host_reachability_protocol>[\w]+)$')
# advertise virtual-rmac
p6 = re.compile(r'^\s*advertise virtual-rmac$')
# source-interface loopback1
p7 = re.compile(r'^\s*source-interface +(?P<source_if>[\w]+)$')
# multisite border-gateway interface loopback3
p8 = re.compile(r'^\s*multisite +border\-gateway +interface +(?P<multisite_bgw_if>[\w]+)$')
# member vni 10100 associate-vrf
p9 = re.compile(r'^\s*member vni +(?P<nve_vni>[\d]+)( +(?P<associated_vrf>[\w\-]+))?$')
# multisite ingress-replication
p10 = re.compile(r'^\s*multisite ingress-replication$')
# mcast-group 231.100.1.1
p11 = re.compile(r'^\s*mcast-group +(?P<mcast_group>[\d\.]+)$')
# interface Ethernet1/1
p12 = re.compile(r'^\s*interface +(?P<interface>(?!nve)[\w\/]+)$')
# evpn multisite fabric-tracking
# evpn multisite dci-tracking
p13 = re.compile(r'^\s*evpn multisite +(?P<fabric_dci_tracking>[\w\-]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p0.match(line)
if m:
result_dict.update({'enabled_nv_overlay': True})
continue
m = p1.match(line)
if m:
multisite_border_gateway = m.groupdict().pop('evpn_multisite_border_gateway')
result_dict.update({'evpn_multisite_border_gateway': int(multisite_border_gateway)})
continue
m = p2.match(line)
if m:
evpn_msite_bgw_delay_restore_time = m.groupdict().pop('evpn_msite_bgw_delay_restore_time')
result_dict.update({'multisite_convergence_time': int(evpn_msite_bgw_delay_restore_time)})
continue
m = p3.match(line)
if m:
nve_name = m.groupdict().pop('nve_name')
nve_dict = result_dict.setdefault(nve_name, {})
nve_dict.update({'nve_name': nve_name})
continue
m = p4.match(line)
if m:
nve_dict.update({'if_state': "up"})
continue
m = p5.match(line)
if m:
host_reachability_protocol = m.groupdict().pop('host_reachability_protocol')
nve_dict.update({'host_reachability_protocol': host_reachability_protocol})
continue
m = p6.match(line)
if m:
nve_dict.update({'adv_vmac': True})
continue
m = p7.match(line)
if m:
source_if = m.groupdict().pop('source_if')
nve_dict.update({'source_if': source_if})
continue
m = p8.match(line)
if m:
multisite_bgw_if = m.groupdict().pop('multisite_bgw_if')
nve_dict.update({'multisite_bgw_if': multisite_bgw_if})
continue
m = p9.match(line)
if m:
group = m.groupdict()
nve_vni = int(group.pop('nve_vni'))
vni_dict = nve_dict.setdefault('vni',{}).setdefault(nve_vni,{})
vni_dict.update({'vni':nve_vni})
if group.get('associated_vrf'):
vni_dict.update({'associated_vrf':True})
group.pop('associated_vrf')
else:
vni_dict.update({'associated_vrf': False})
continue
m = p10.match(line)
if m:
vni_dict.update({'multisite_ingress_replication': True})
continue
m = p11.match(line)
if m:
mcast = m.groupdict().pop('mcast_group')
vni_dict.update({'mcast_group': mcast})
continue
m = p12.match(line)
if m:
interface = m.groupdict().pop('interface')
continue
m = p13.match(line)
if m:
tracking = m.groupdict().pop('fabric_dci_tracking')
tracking_dict = result_dict.setdefault('multisite', {})
if 'fabric' in tracking:
fabric_dict = tracking_dict.setdefault('fabric_links', {}).setdefault(interface, {})
fabric_dict.update({'if_name': interface})
fabric_dict.update({'if_state': 'up'})
if 'dci' in tracking:
dci_dict = tracking_dict.setdefault('dci_links', {}).setdefault(interface, {})
dci_dict.update({'if_name': interface})
dci_dict.update({'if_state': 'up'})
continue
return result_dict
# ====================================================
# schema for show nve vni ingress-replication
# ====================================================
class ShowNveVniIngressReplicationSchema(MetaParser):
"""Schema for:
show nve vni ingress-replication"""
schema ={
Any(): {
'vni': {
Any(): {
'vni': int,
Optional('repl_ip'): {
Any(): {
Optional('repl_ip'): str,
Optional('source'): str,
Optional('up_time'): str,
}
}
}
}
}
}
# ====================================================
# Parser for show nve vni ingress-replication
# ====================================================
class ShowNveVniIngressReplication(ShowNveVniIngressReplicationSchema):
"""parser for:
show nve vni Ingress-replication"""
cli_command = 'show nve vni ingress-replication'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Interface VNI Replication List Source Up Time
# --------- -------- ----------------- ------- -------
# nve1 10101 7.7.7.7 BGP-IMET 1d02h
p1 = re.compile(r'^\s*(?P<nve_name>[\w]+) +(?P<vni>[\d]+)( +(?P<replication_list>[\w\.]+)'
' +(?P<source>[\w\-]+) +(?P<uptime>[\w\:]+))?$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
nve_name = group['nve_name']
vni = int(group['vni'])
nve_dict = result_dict.setdefault(nve_name,{}).setdefault('vni',{}).setdefault(vni,{})
nve_dict.update({'vni': vni})
if group['replication_list']:
repl_ip = group['replication_list'].strip()
repl_dict = nve_dict.setdefault('repl_ip', {}).setdefault(repl_ip, {})
repl_dict.update({'repl_ip': repl_ip})
repl_dict.update({'source': group['source'].lower()})
repl_dict.update({'up_time': group['uptime']})
continue
return result_dict
# ====================================================
# schema for show fabric multicast globals
# ====================================================
class ShowFabricMulticastGlobalsSchema(MetaParser):
"""Schema for:
show fabric multicast globals"""
schema ={
'multicast': {
'globals': {
'pruning': str,
'switch_role': str,
'fabric_control_seg': str,
'peer_fabric_ctrl_addr': str,
'advertise_vpc_rpf_routes': str,
'created_vni_list': str,
'fwd_encap': str,
'overlay_distributed_dr': bool,
'overlay_spt_only': bool,
}
}
}
# ====================================================
# Parser for show fabric multicast globals
# ====================================================
class ShowFabricMulticastGlobals(ShowFabricMulticastGlobalsSchema):
"""parser for:
show fabric multicast globals"""
cli_command = 'show fabric multicast globals'
def cli(self, output=None):
# excute command to get output
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Pruning: segment-based
p1 = re.compile(r'^\s*Pruning: +(?P<pruning>[\w\-]+)$')
# Switch role:
p2 = re.compile(r'^\s*Switch +role:( +(?P<switch_role>[\w]+))?$')
# Fabric Control Seg: Null
p3 = re.compile(r'^\s*Fabric +Control +Seg: +(?P<fabric_control_seg>[\w]+)$')
# Peer Fabric Control Address: 0.0.0.0
p4 = re.compile(r'^\s*Peer +Fabric +Control +Address: +(?P<peer_fabric_ctrl_addr>[\w\.]+)$')
# Advertising vPC RPF routes: Disabled
p5 = re.compile(r'^\s*Advertising +vPC +RPF +routes: +(?P<advertise_vpc_rpf_routes>[\w]+)$')
# Created VNI List: -
p6 = re.compile(r'^\s*Created +VNI +List: +(?P<created_vni_list>[\w\-]+)$')
# Fwd Encap: (null)
p7 = re.compile(r'^\s*Fwd +Encap: +(?P<fwd_encap>[\w\\(\)]+)$')
# Overlay Distributed-DR: FALSE
p8 = re.compile(r'^\s*Overlay +Distributed\-DR: +(?P<overlay_distributed_dr>[\w]+)$')
# Overlay spt-only: TRUE
p9 = re.compile(r'^\s*Overlay +spt\-only: +(?P<overlay_spt_only>[\w]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
global_dict = result_dict.setdefault('multicast', {}).setdefault('globals', {})
global_dict.update({'pruning': group['pruning']})
continue
m = p2.match(line)
if m:
group = m.groupdict()
if group['switch_role']:
global_dict.update({'switch_role': group['switch_role']})
else:
global_dict.update({'switch_role': ""})
continue
m = p3.match(line)
if m:
group = m.groupdict()
global_dict.update({'fabric_control_seg': group['fabric_control_seg']})
continue
m = p4.match(line)
if m:
group = m.groupdict()
global_dict.update({'peer_fabric_ctrl_addr': group['peer_fabric_ctrl_addr']})
continue
m = p5.match(line)
if m:
group = m.groupdict()
global_dict.update({'advertise_vpc_rpf_routes': group['advertise_vpc_rpf_routes'].lower()})
continue
m = p6.match(line)
if m:
group = m.groupdict()
global_dict.update({'created_vni_list': group['created_vni_list']})
continue
m = p7.match(line)
if m:
group = m.groupdict()
global_dict.update({'fwd_encap': group['fwd_encap']})
continue
m = p8.match(line)
if m:
group = m.groupdict()
global_dict.update({'overlay_distributed_dr': False if \
group['overlay_distributed_dr'].lower()=='false' else True})
continue
m = p9.match(line)
if m:
group = m.groupdict()
global_dict.update({'overlay_spt_only': False if\
group['overlay_spt_only'].lower()=='false' else True})
continue
return result_dict
# ==========================================================
# schema for show fabric multicast ipv4 sa-ad-route vrf all
# ==========================================================
class ShowFabricMulticastIpSaAdRouteSchema(MetaParser):
"""Schema for:
show fabric multicast ipv4 sa-ad-route
show fabric multicast ipv4 sa-ad-route vrf <vrf>
show fabric multicast ipv4 sa-ad-route vrf all"""
schema ={
"multicast": {
"vrf": {
Any(): {
"vnid": str,
Optional("address_family"): {
Any(): {
"sa_ad_routes": {
"gaddr": {
Any(): {
"grp_len": int,
"saddr": {
Any(): {
"src_len": int,
"uptime": str,
Optional("interested_fabric_nodes"): {
Any(): {
"uptime": str,
}
}
}
}
}
}
}
}
}
}
}
}
}
# ===========================================================
# Parser for show fabric multicast ipv4 sa-ad-route vrf all
# ==========================================================
class ShowFabricMulticastIpSaAdRoute(ShowFabricMulticastIpSaAdRouteSchema):
"""parser for:
show fabric multicast ipv4 sa-ad-route
show fabric multicast ipv4 sa-ad-route vrf <vrf>
show fabric multicast ipv4 sa-ad-route vrf all"""
cli_command = ['show fabric multicast ipv4 sa-ad-route vrf {vrf}','show fabric multicast ipv4 sa-ad-route']
def cli(self,vrf="",output=None):
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
vrf = "default"
cmd = self.cli_command[1]
if output is None:
out = self.device.execute(cmd)
else:
out = output
result_dict = {}
# VRF "default" MVPN SA AD Route Database VNI: 0
# VRF "vni_10100" MVPN SA AD Route Database VNI: 10100
# VRF "vpc-keepalive" MVPN SA AD Route Database VNI: 0
p1 = re.compile(r'^\s*VRF +\"(?P<vrf_name>\S+)\" +MVPN +SA +AD +Route +Database'
' +VNI: +(?P<vnid>[\d]+)$')
# Src Active AD Route: (100.101.1.3/32, 238.8.4.101/32) uptime: 00:01:01
p2 = re.compile(r'^\s*Src +Active +AD +Route: +\((?P<saddr>[\w\/\.]+), +(?P<gaddr>[\w\/\.]+)\)'
' +uptime: +(?P<uptime>[\w\.\:]+)$')
# Interested Fabric Nodes:
p3 = re.compile(r'^\s*Interested Fabric Nodes:$')
# This node, uptime: 00:01:01
p4 = re.compile(r'^\s*(?P<interested_fabric_nodes>[\w\s\.]+), +uptime: +(?P<interest_uptime>[\w\.\:]+)$')
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
vrf_dict = result_dict.setdefault('multicast', {}).setdefault('vrf', {}).\
setdefault(group['vrf_name'], {})
vrf_dict.update({'vnid': group['vnid']})
continue
m = p2.match(line)
if m:
group = m.groupdict()
address_family_dict = vrf_dict.setdefault('address_family', {}).setdefault('ipv4', {})
saddr = group['saddr']
gaddr = group['gaddr']
gaddr_dict = address_family_dict.setdefault('sa_ad_routes', {}).\
setdefault('gaddr', {}).setdefault(gaddr ,{})
gaddr_dict.update({'grp_len': int(gaddr.split('/')[1])})
saddr_dict = gaddr_dict.setdefault('saddr', {}).setdefault(saddr, {})
saddr_dict.update({'src_len': int(saddr.split('/')[1])})
saddr_dict.update({'uptime': group['uptime']})
continue
m = p4.match(line)
if m:
group = m.groupdict()
group['interested_fabric_nodes'] = group['interested_fabric_nodes']
interested_dict = saddr_dict.setdefault('interested_fabric_nodes', {}).\
setdefault(group['interested_fabric_nodes'], {})
interested_dict.update({'uptime': group['interest_uptime']})
continue
return result_dict
# ==========================================================
# schema for show fabric multicast ipv4 l2-mroute vni all
# ==========================================================
class ShowFabricMulticastIpL2MrouteSchema(MetaParser):
"""Schema for:
show fabric multicast ipv4 l2-mroute
show fabric multicast ipv4 l2-mroute vni <vni>
show fabric multicast ipv4 l2-mroute vni all"""
schema = {
'multicast': {
"l2_mroute": {
"vni": {
Any(): {
"vnid": str,
Optional("fabric_l2_mroutes"): {
"gaddr": {
Any(): {
"saddr": {
Any(): {
"interested_fabric_nodes": {
Any(): {
"node": str,
}
}
}
}
}
}
}
}
}
}
}
}
# ===========================================================
# Parser for show fabric multicast ipv4 l2-mroute vni all
# ==========================================================
class ShowFabricMulticastIpL2Mroute(ShowFabricMulticastIpL2MrouteSchema):
"""parser for:
show fabric multicast ipv4 l2-mroute
show fabric multicast ipv4 l2-mroute vni <vni>
show fabric multicast ipv4 l2-mroute vni all"""
cli_command = ['show fabric multicast ipv4 l2-mroute vni {vni}','show fabric multicast ipv4 l2-mroute vni all']
def cli(self, vni="",output=None):
if vni:
cmd = self.cli_command[0].format(vni=vni)
else:
cmd = self.cli_command[1]
if output is None:
out = self.device.execute(cmd)
else:
out = output
result_dict = {}
# EVPN C-Mcast Route Database for VNI: 10101
p1 = re.compile(r'^\s*EVPN +C\-Mcast +Route +Database +for +VNI: +(?P<vni>[\d]+)$')
# Fabric L2-Mroute: (*, 231.1.3.101/32)
p2 = re.compile(r'^\s*Fabric +L2\-Mroute: +\((?P<saddr>[\w\/\.\*]+), +(?P<gaddr>[\w\/\.]+)\)$')
# Interested Fabric Nodes:
p3 = re.compile(r'^\s*Interested Fabric Nodes:$')
# This node
p4 = re.compile(r'^(?P<space>\s{4})(?P<interested_fabric_nodes>[\w\s\.]+)$')
interested_flag = False
for line in out.splitlines():
if line:
line = line.rstrip()
else:
continue
m = p1.match(line)
if m:
group = m.groupdict()
vni = group['vni']
continue
m = p2.match(line)
if m:
group = m.groupdict()
mroute_dict = result_dict.setdefault('multicast', {}). \
setdefault('l2_mroute', {}).setdefault('vni', {}). \
setdefault(vni, {})
mroute_dict.update({'vnid': vni})
fabric_dict = mroute_dict.setdefault('fabric_l2_mroutes', {})
saddr = group['saddr']
gaddr = group['gaddr']
gaddr_dict = fabric_dict.setdefault('gaddr', {}).setdefault(gaddr, {})
saddr_dict = gaddr_dict.setdefault('saddr', {}).setdefault(saddr, {})
interested_flag = False
continue
m = p3.match(line)
if m:
interested_flag=True
continue
m = p4.match(line)
if m:
if interested_flag:
group = m.groupdict()
interested_fabric_nodes = group['interested_fabric_nodes']
interested_dict = saddr_dict.setdefault('interested_fabric_nodes', {}). \
setdefault(interested_fabric_nodes, {})
interested_dict.update({'node': interested_fabric_nodes})
continue
return result_dict
| [
"karmoham@cisco.com"
] | karmoham@cisco.com |
9aba0ae9bcdfe028c9cadf0031e66dedbca900f8 | 063d7a179118c7f24e116f4c649b60d263995e3a | /sightings/models.py | e20591ee9941956131331729110a2d88108d23a2 | [] | no_license | hgaoj/S | 4f1fa678ee443038227d5c852027ea4bfc093c49 | d3d998f89b49df16693a5d8bc561b000507eacc8 | refs/heads/main | 2023-04-09T15:16:15.305024 | 2021-04-14T11:37:48 | 2021-04-14T11:37:48 | 357,882,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | from django.db import models
class Squirrel(models.Model):
latitude = models.FloatField(null=False, blank=False)
longitude = models.FloatField(null=False, blank=False)
unique_squirrel_id = models.CharField(max_length=100, unique=True, null=False, blank=False)
AM = 'AM'
PM = 'PM'
OTHER = ''
shift_choice = [
(AM, 'AM'),
(PM, 'PM'),
(OTHER, '')
]
shift = models.CharField(
max_length=15,
choices=shift_choice,
default=OTHER,
)
date = models.DateField(null=True, blank=True)
age = models.CharField(max_length=5, blank=True)
primary_fur_color = models.CharField(max_length=50, blank=True)
location = models.CharField(max_length=50, blank=True)
specific_location = models.CharField(max_length=50, blank=True)
other_activities = models.CharField(max_length=100, blank=True)
running = models.BooleanField()
chasing = models.BooleanField()
climbing = models.BooleanField()
eating = models.BooleanField()
foraging = models.BooleanField()
kuks = models.BooleanField()
quaas = models.BooleanField()
moans = models.BooleanField()
tail_flags = models.BooleanField()
tail_twitches = models.BooleanField()
approaches = models.BooleanField()
indifferent = models.BooleanField()
runs_from = models.BooleanField()
def __str__(self):
return self.unique_squirrel_id
# Create your models here.
| [
"690631890@qq.com"
] | 690631890@qq.com |
edc28a97bf576f1d94cb0070259494365f4e5648 | b4c24bdbb6f87841fc2f4aa5cefb010d9512b1f7 | /main.rf_servo.py | ba30a7efea3f2abf18cb9030250b189092a43298 | [
"MIT"
] | permissive | martin-c/micro-python-intro | d18365d61bf588f57bd5f2aa11a7fec95b55eb95 | 626ff8342f3a00494d43cb6beb25d7c6b3dddb4c | refs/heads/master | 2020-04-28T00:03:05.681718 | 2015-02-12T21:33:41 | 2015-02-12T21:33:41 | 30,710,497 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,557 | py | """
The MIT License (MIT)
Copyright (c) 2015 Martin Clemons
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import sys
from pyb import I2C, LED, Servo, Switch
from srf_rangefinder import SRF08
def setup():
"""Initialize hardware."""
global led, servo, sw, rf
led = LED(2)
servo = Servo(1)
sw = Switch()
# SRF08 rangefinder on I2C bus 2
rf = SRF08(2, I2C.MASTER, baudrate=50000)
# print some sensor info
sensor_info = {
'address': rf.bus_address() << 1,
'revision': rf.sw_rev(),
}
sys.stdout.write(json.dumps(sensor_info) + '\n')
# set max range and gain
rf.set_max_range(6000)
rf.set_analog_gain(16)
def loop():
"""main program loop."""
global led, servo, sw, rf
pyb.wfi()
if sw():
led.on()
sys.stdout.write(json.dumps({'reset': True}) + '\n')
servo.angle(-90)
# allow servo to reach starting angle
pyb.delay(500)
for a in range(-90, 100, 10):
# set new angle, allow servo to settle
servo.angle(a, 150)
pyb.delay(10)
# measure distance in cm
rf.measure_range()
pyb.delay(75)
# read distance, send json to host
sample = {
'angle': a,
'range': rf.read_range(),
}
sys.stdout.write(json.dumps(sample) + '\n')
servo.angle(0)
led.off()
if __name__ == "__main__":
"""
Main program loop, does not return.
"""
setup()
while True:
loop()
| [
"martin.clemons@mac.com"
] | martin.clemons@mac.com |
6ac2ba97c748b6e22830c9b04d81743c690db51a | 7064d88584ed8d9da2ac53e0c92caa5c951a643b | /return_multiple_valueFunction.py | fac6c6860c081c30f44d533bb618d626cd67990c | [] | no_license | tushargoyal02/PythonBasicsToAdVance | 328509ecbe596eb3b2e12137cd9141f6128167c8 | ba47906a7a0dbee59776985f68046eb3c7cfc37f | refs/heads/main | 2023-02-23T23:20:24.221622 | 2021-02-02T13:24:29 | 2021-02-02T13:24:29 | 333,090,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py |
def returnFunction(a):
if a%2 ==0:
return True, 1
return False, -1
# Taking the return in the OUTPUT VARIABLe here
output = returnFunction(10)
#printing the type of value: ----> RETURNS (TUPLE) <----
print(type(output))
firstVal, secondVal = returnFunction(10)
print("First Value :",firstVal, "Second Value",secondVal) | [
"noreply@github.com"
] | noreply@github.com |
956e0a351c167d8f2fbfeb2afb2935e09c0d6ebd | f844be5d2e5913fc3dfa46767b4633076906afb1 | /Programmierung/Sicherungen/Plotkram/plotkram.py | ffa199788d2f70750d8c035e4bfa4e5b00542538 | [] | no_license | Syssy/diplom | dd11a64ab1a9a9faf659728195a89c80242af954 | 74a3f4b8a662490be8b5c22bbf4bdde625bc3d1e | refs/heads/master | 2016-09-05T23:36:49.408174 | 2015-10-02T06:27:53 | 2015-10-02T06:27:53 | 25,148,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,396 | py | # -*- coding: utf-8 -*-
import scipy.stats
import csv
import matplotlib.pyplot as plt
import statsmodels.api as sm
import numpy as np
import pickle
import pylab
import matplotlib.gridspec as gridspec
import time
import simulation
import math
from matplotlib import colors, cm
import argparse
class Simulation(): #TODO
anz = 1
def __init__(self, ps, pm, length=0, number=0, counter=[0,0,0]):
self.params = (ps, pm)
self.length = length
self.number = number
self.times = counter
#TODO Möglichkeit, andere Verteilungen zu berücksichtigen
# berechne die most likeli params der inv-gauß-Verteilung
self.mu, self.loc, self.scale = scipy.stats.invgauss.fit(self.times)
# berechne Momente
self.mean, self.variance, self.skewness, self.kurtosis = scipy.stats.invgauss.stats(self.mu, self.loc, self.scale, moments='mvsk')
def get_moment(self, moment):
if moment == "mean":
return self.mean
if moment == "variance":
return self.variance
if moment == "skewness" or moment=="skew":
return self.skewness
if moment == "kurtosis":
return self.kurtosis
return None
#berechnet die Momente / Parameter Neu
# Momente werden hier aber direkt, ohne Umweg über Verteilung berechnet
def recalculate_params(self):
self.mu, self.loc, self.scale = scipy.stats.invgauss.fit(self.times)
def recalculate_moments(self):
#print "mean", self.mean, ' ',
self.mean = np.mean(self.times)
#print self.mean
#print "var", self.variance, ' ',
self.variance = np.var(self.times)
#print self.variance, ' '
#print "skew", self.skew, ' ',
self.skewness = scipy.stats.skew(self.times)
#print self.skewness, ' '
#print "kurtosis", self.kurtosis, ' ',
self.kurtosis = scipy.stats.kurtosis(self.times)
#print self.kurtosis, ' '
def get_ps(self):
return self.params[0]
def get_pm(self):
return self.params[1]
def __repr__(self):
return str(self.params)
# Eine einzelne Heatmap aus einer .pickleDatei machen
def plot_heatmap_from_file(datei, squareroot_num_sim, moment, recalc = False):
startzeit = time.clock()
print "plot heatmap " + datei
with open(datei, 'rb') as daten:
sim_array = pickle.load(daten)
squareroot_num_sim = sim_array.shape[0]
sim_array = np.reshape(sim_array, squareroot_num_sim*squareroot_num_sim)
if recalc:
for sim in sim_array:
sim.recalculate()
mySortedSims = sorted(sim_array, key= Simulation.get_pm)
#print mySortedSims1, '\n\n'
sim_array = sorted(mySortedSims, key = Simulation.get_ps)
plot_heatmap(sim_array, squareroot_num_sim, moment)
# Eine einzelne Heatmap aus einem array plotten, (Aufruf von der Simulation)
def plot_heatmap(sim_array, squareroot_num_sim, moment):
print "plot heatmap"
#print sim_array,len(sim_array)
# squareroot_num_sim = int(len(sim_array)/2)
print squareroot_num_sim
if not squareroot_num_sim:
print "kein squareroot_num_sim"
return None
sim_array = np.reshape(sim_array, (squareroot_num_sim,squareroot_num_sim))
print "Moment", moment
to_plot = np.zeros((squareroot_num_sim, squareroot_num_sim))
for i in range(squareroot_num_sim):
#print '\n'
for j in range(squareroot_num_sim):
if sim_array[i][j]:
#print sim_array[i][j].get_moment(moment),
#print type(sim_array[i][j])
# if sim_array[i][j].get_moment(moment) == 0 or sim_array[i][j].get_moment(moment) <0:
# print "params +moment ", sim_array[i][j].params, ' ', (sim_array[i][j].get_moment(moment)), (sim_array[i][j].times)
if False:#moment == "mean" or moment == "variance":
to_plot[i][j] = math.log(sim_array[i][j].get_moment(moment))
else:
to_plot[i][j] = sim_array[i][j].get_moment(moment)
else:
to_plot[i][j] = None
print "none"
#print "toplot ", to_plot
fig, ax = plt.subplots()
# extent scheint die achsenbeschriftung zu sein
cax = ax.imshow(to_plot, origin = 'lower', interpolation="nearest", extent = [0,1,0,1])
plt.xticks(np.arange(2))
#plt.yticks([0, 0.5, 1])
plt.yticks(np.arange(2))
plt.xlabel("pm")
plt.ylabel("ps")
#print sim_array[i][j].length
plt.suptitle("Laenge"+ str(sim_array[i][j].length)+ " Anzahl"+ str(sim_array[i][j].number))
cbar = fig.colorbar(cax)#, ticks=[np.amin(to_plot), 0, np.amax(to_plot)])*
# cbar.ax.set_yticklabels(['< -1', '0', '> 1'])
# plot it
#plt.show()
# Vier Heatmaps für die Ecken plotten, je mit einzelner Colorbar, da die Werte oft nicht vergleichbar sind
def plot_4_heats_from_file(filename, moment, recalc = False):
print "öffne", filename,
with open (filename, "rb") as datei:
sim_array = pickle.load(datei)
num = 0
#print len(sim_array), sim_array
for sl in sim_array:
for sim in sl:
if recalc:
sim.recalculate()
num +=1
print "anzahl sim: ", num
# Nach Parametern sortieren, damit das plotten der Heatmap was sinnvolles ergibt
# TODO eigentlich total bescheuert, da in dieser Variante eigentlich schon sortiert ist. Nur halt andere RF
sim_array = np.reshape(sim_array, num)
mySortedSims = sorted(sim_array, key= Simulation.get_pm)
#print mySortedSims1, '\n\n'
sim_array = sorted(mySortedSims, key = Simulation.get_ps)
#print sim_array
plot_4_heatmaps(sim_array, num, moment)
# Wie from file, nur der input sim_array sollte sortiert eine nxn-M der Sim sein
def plot_4_heatmaps(sim_array, num_sim, moment):
print "4heats", num_sim, math.sqrt(num_sim)/2
#Die darzustellende Achsenweite/Parameterraum,
scale = int(math.sqrt(num_sim)/2)
print scale, "scale"
#veraltet erstelle plotlisten (data für imshow), TODO kann noch hübscher werden,
#print sim_array
#print np.shape(sim_array)
sim_array = np.reshape(sim_array, (math.sqrt(num_sim), math.sqrt(num_sim)))
#plotlist = []
#hilfsliste1 = []
fig = plt.figure()
# Jetzt folgen die vier plots, Unterscheidung nur durch Zugriff auf sim_array[i(+scale)][j(+scale)]
#ul, ps&pm klein
print "Plot1, ul223"
hilfsliste2 = []
labelset1, labelset2 = set(), set()
for i in range (scale):
hilfsliste1 = []
for j in range (scale):
hilfsliste1.append(sim_array[i][j].get_moment(moment))
# print sim_array[i][j], math.log(sim_array[i][j].get_moment(moment)),
labelset1.add(sim_array[i][j].params[1])
labelset2.add(sim_array[i][j].params[0])
hilfsliste2.append(hilfsliste1)
#print '\n'
#plotlist.append(hilfsliste2)
ax = fig.add_subplot(223)
print min(labelset1), max(labelset1), min(labelset2), max(labelset2)
cax = plt.imshow(hilfsliste2, origin = "lower", extent=[min(labelset1), max(labelset1), min(labelset2), max(labelset2)])
# print "max", max([max(hl) for hl in hilfsliste2]), " min", min([min(hl) for hl in hilfsliste2]),
# print "achse", np.linspace(min([min(hl) for hl in hilfsliste2]), max([max(hl) for hl in hilfsliste2]), 5)
cbar = fig.colorbar(cax, ticks = np.linspace(min([min(hl) for hl in hilfsliste2]), max([max(hl) for hl in hilfsliste2]), 5))
plt.xlabel("pm")
plt.ylabel("ps")
#ur, pspm!!
print "plot2, ol221"
hilfsliste2 = []
labelset1, labelset2 = set(), set()
# print labelset
for i in range (scale):
hilfsliste1 = []
for j in range (scale):
hilfsliste1.append(sim_array[i+scale][j].get_moment(moment))
# print sim_array[i+scale][j], (sim_array[i+scale][j].get_moment(moment)),
labelset1.add(sim_array[i+scale][j].params[1])
labelset2.add(sim_array[i+scale][j].params[0])
hilfsliste2.append(hilfsliste1)
# print '\n'
ax = fig.add_subplot(221)
# print len(hilfsliste2), len(hilfsliste1)
# print '\n', labelset, '\n\n', labelset1, labelset2
print min(labelset1), max(labelset1), min(labelset2), max(labelset2)
cax = plt.imshow(hilfsliste2, origin = "lower", extent=[min(labelset1), max(labelset1), min(labelset2), max(labelset2)])
# print "max", max([max(hl) for hl in hilfsliste2]), " min", min([min(hl) for hl in hilfsliste2])
cbar = fig.colorbar(cax, ticks = np.linspace(min([min(hl) for hl in hilfsliste2]), max([max(hl) for hl in hilfsliste2]), 5))
plt.xlabel("pm")
plt.ylabel("ps")
#plotlist.append(hilfsliste2)
#print hilfsliste2
#ol ps klein & pm groß
print "plot2, ur224"
hilfsliste2 = []
labelset1, labelset2 = set(), set()
for i in range (scale):
hilfsliste1 = []
for j in range (scale):
hilfsliste1.append(sim_array[i][j+scale].get_moment(moment))
# print sim_array[i][j+scale], math.log(sim_array[i][j+scale].get_moment(moment)),
labelset1.add(sim_array[i][j+scale].params[1])
labelset2.add(sim_array[i][j+scale].params[0])
# print sim_array[i][j],
hilfsliste2.append(hilfsliste1)
#print '\n'
ax = fig.add_subplot(224)
print min(labelset1), max(labelset1), min(labelset2), max(labelset2)
cax = plt.imshow(hilfsliste2, origin = "lower", extent=[min(labelset1), max(labelset1), min(labelset2), max(labelset2)])
#print "max", max([max(hl) for hl in hilfsliste2]), " min", min([min(hl) for hl in hilfsliste2])
cbar = fig.colorbar(cax, ticks = np.linspace(min([min(hl) for hl in hilfsliste2]), max([max(hl) for hl in hilfsliste2]), 5))
plt.xlabel("pm")
plt.ylabel("ps")
#plotlist.append(hilfsliste2)
#or ps&pm groß
print "plot2, or222"
hilfsliste2 = []
labelset1, labelset2 = set(), set()
for i in range (scale):
hilfsliste1 = []
for j in range (scale):
hilfsliste1.append(sim_array[i+scale][j+scale].get_moment(moment))
#print sim_array[i+scale][j+scale],math.log(sim_array[i+scale][j+scale].get_moment(moment)),
labelset1.add(sim_array[i+scale][j+scale].params[1])
labelset2.add(sim_array[i+scale][j+scale].params[0])
hilfsliste2.append(hilfsliste1)
#print '\n'
ax = fig.add_subplot(222)
print min(labelset1), max(labelset1), min(labelset2), max(labelset2)
cax = plt.imshow(hilfsliste2, origin = "lower", extent=[min(labelset1), max(labelset1), min(labelset2), max(labelset2)])
#print "max", max([max(hl) for hl in hilfsliste2]), " min", min([min(hl) for hl in hilfsliste2])
cbar = fig.colorbar(cax, ticks = np.linspace(min([min(hl) for hl in hilfsliste2]), max([max(hl) for hl in hilfsliste2]), 5))
plt.xlabel("pm")
plt.ylabel("ps")
# Alter Kram
'''ax = fig.add_subplot(224)
cax = plt.imshow(plotlist[1], origin = "lower")
cbar = fig.colorbar(cax)
ax = fig.add_subplot(221)
cax = plt.imshow(plotlist[2], origin = "lower")
cbar = fig.colorbar(cax)
ax = fig.add_subplot(222)
cax = plt.imshow(plotlist[3], origin = "lower")
cbar = fig.colorbar(cax) '''
''' Nr = 2
Nc = 2
fig = plt.figure()
#cmap = cm.cool
# figtitle = 'Multiple images'
# t = fig.text(0.5, 0.95, figtitle,
# horizontalalignment='center',
# fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.32
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
print "ij ", i, j
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
data = plotlist[i+j*Nc]
# print data
dd = np.ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, np.amin(dd))
vmax = max(vmax, np.amax(dd))
images.append(a.imshow(data, origin = "lower"))
ax.append(a)
#norm = colors.Normalize(vmin=vmin, vmax=vmax)
#for i, im in enumerate(images):
# im.set_norm(norm)
# if i > 0:
# images[0].callbacksSM.connect('changed', ImageFollower(im))
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this interactively and
# modify the colormap:
plt.axes(ax[0]) # Return the current axes to the first one,
plt.sci(images[0]) # because the current image must be in current axes.'''
#plt.show()
''' fig = plt.figure(figsize = (4,4))
gs1 = gridspec.GridSpec(2,2)
ax_list = [fig.add_subplot(ss) for ss in gs1]
#anz der einzelnen plot berechnen:
scale_len = int(math.sqrt(num_sim/4))
print "scala", scale_len
print np.shape(sim_array)
sim_array = np.reshape(sim_array, (math.sqrt(num_sim), math.sqrt(num_sim)))
#print sim_array
print np.shape(sim_array)
to_plot = np.zeros((scale_len, scale_len))
for i in range(scale_len):
for j in range(scale_len):
if sim_array[i][j]:
if sim_array[i][j].get_moment(moment) == 0:
print "params + logmoment ", sim_array[i][j].params, ' ', (sim_array[i][j].get_moment(moment))
to_plot[i][j] = math.log(sim_array[i][j].get_moment(moment))
else:
to_plot[i][j] = None
print "none",
print to_plot
ax_list[2].imshow(to_plot, origin= "lower", interpolation="hamming")
to_plot = np.zeros((scale_len, scale_len))
for i in range(scale_len):
for j in range(scale_len):
if sim_array[i+scale_len][j]:
if sim_array[i+scale_len][j].get_moment(moment) == 0:
print "params + logmoment ", sim_array[i+scale_len][j].params, ' ', (sim_array[i+scale_len][j].get_moment(moment))
to_plot[i][j] = math.log(sim_array[i+scale_len][j].get_moment(moment))
else:
to_plot[i][j] = None
print "none",
print to_plot
ax_list[0].imshow(to_plot, origin= "lower", interpolation="hamming")
to_plot = np.zeros((scale_len, scale_len))
for i in range(scale_len):
for j in range(scale_len):
if sim_array[i][j+scale_len]:
if sim_array[i][j+scale_len].get_moment(moment) == 0:
print "params + logmoment ", sim_array[i][j+scale_len].params, ' ', (sim_array[i][j+scale_len].get_moment(moment))
to_plot[i][j] = math.log(sim_array[i][j+scale_len].get_moment(moment))
else:
to_plot[i][j] = 0
print "none",
print to_plot
ax_list[3].imshow(to_plot, origin= "lower", interpolation="hamming")
to_plot = np.zeros((scale_len, scale_len))
for i in range(scale_len):
for j in range(scale_len):
if sim_array[i+scale_len][j+scale_len]:
if sim_array[i+scale_len][j+scale_len].get_moment(moment) == 0:
print "params + logmoment ", sim_array[i+scale_len][j+scale_len].params, ' ', (sim_array[i+scale_len][j+scale_len].get_moment(moment))
to_plot[i][j] = math.log(sim_array[i+scale_len][j+scale_len].get_moment(moment))
else:
to_plot[i][j] = None
print "none",
print to_plot
ax_list[1].imshow(to_plot, origin= "lower", interpolation="hamming")
plt.show()'''
# erwartet datei, in der die sim als liste abgespeichert sind
def plot_file (datei, histogram_separate, histogram_spec, qq_Plot, fit_qq_Plot, num_bins = 50, vergleich= scipy.stats.invgauss):
print "plot_file " + datei
with open(datei, 'rb') as daten:
sim_liste = pickle.load(daten)
print sim_liste
print sim_liste[0].times, sim_liste[0].params
plot(sim_liste, histogram_separate, histogram_spec, qq_Plot, fit_qq_Plot, num_bins, vergleich)
def plot (sim_liste, histogram_separate, histogram_spec, qq_Plot, fit_qq_Plot, num_bins = 50, vergleich= scipy.stats.invgauss):
startzeit = time.clock()
if histogram_spec:
print "Erstelle Spektrum"
fig, ax = plt.subplots()
fig.suptitle("Laenge: "+str(sim_liste[0].length)+" Anz Teilchen: " +str(sim_liste[1].number)) #TODO, gehe hier davon aus, dass gleiche sim-bedingungen vorliegen
for sim in sim_liste:
ax.hist(sim.times, num_bins, alpha=0.5, normed = 1, label = str(sim.params) )
# plt.show()
legend = ax.legend(loc='upper right', shadow=True)
# Je Simulation ein Ausgabefenster mit separatem Histogramm/qq-Plot mit gewählten Params/qq mit automatischem Fit
number_stats = sum([histogram_separate, qq_Plot, fit_qq_Plot])
print number_stats
if histogram_separate or qq_Plot or fit_qq_Plot:
print "Erstelle separate Dinge"
for sim in sim_liste:
fig = plt.figure(figsize=(4*number_stats, 4))
gs1 = gridspec.GridSpec(1, number_stats)
ax_list = [fig.add_subplot(ss) for ss in gs1]
akt = 0
fig.suptitle("ps, pm"+str(sim.params)+str(round(sim.params[0]-sim.params[1],5)), size = 15)
if histogram_separate:
ax_list[akt].hist(sim.times, num_bins)
ax_list[akt].set_title("Histogramm")
akt+=1
#print "hist sep", time.clock()-startzeit
if qq_Plot:
sm.qqplot (np.array(sim.times), scipy.stats.norm, line = 'r', ax=ax_list[akt])
ax_list[akt].set_title("qq-Plot; norm!! Params: 0.05")
akt+=1
#print 'qq 0.05', time.clock()-startzeit
if fit_qq_Plot:
#mu, loc, scale = scipy.stats.invgauss.fit(sim.times)
#mean, var = scipy.stats.invgauss.stats(mu, loc, scale, moments='mv')
#print "params", sim.params, '(mu, loc, scale), mean, var', round(mu, 5), round(loc, 2), round(scale, 2), '\n', mean, '\n', var
#sm.qqplot (np.array(sim.times), vergleich, fit = True, line = 'r', ax=ax_list[akt])
#ax_list[akt].set_title("qq-Plot mit auto Fit")
#akt+=1
sm.qqplot (np.array(sim.times), vergleich, distargs= (sim.mu, ), line = 'r', ax=ax_list[akt])
ax_list[akt].set_title("qq-Plot mit mu:" + str(sim.mu))
akt+=1
#print "qq plus rechnen", time.clock()-startzeit
#fig.subplots_adjust(top=5.85)
gs1.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
print time.clock()-startzeit
#plt.tight_layout()
plt.show()
def plot_histogram(datei, histogram_separate, histogram_spec, num_bins=1000):
with open(datei, 'rb') as csvfile:
myreader = csv.reader(csvfile, delimiter = ";",quoting=csv.QUOTE_NONE)
#number,length, params = myreader.next()
liste = []
# Erstelle Liste, mit der plt.hist umgehen kann
for row in myreader:
unterliste = []
for r in row:
r2 = float(r)
unterliste.append(r2)
liste.append(unterliste)
# Erstelle Histogramme
if histogram_separate:
print "erstelle separate histogramme"
#meine_range= (length, length+ length*(1/min(params)))
#meine_range = (length, 4*length)
meine_range = None
#print meine_range
figg = plt.figure()
ax = figg.add_subplot(221)
n, bins, patches = plt.hist(liste[0], num_bins, range = meine_range, normed=1, alpha=0.5 )
ax = figg.add_subplot(222)
n, bins, patches = plt.hist(liste[1], num_bins, range = meine_range, normed=1, alpha=0.5 )
ax = figg.add_subplot(223)
n, bins, patches = plt.hist(liste[2], num_bins, range = meine_range, normed=1, alpha=0.5 )
ax = figg.add_subplot(224)
n, bins, patches = plt.hist(liste[3], num_bins, range = meine_range, normed=1, alpha=0.5 )
# ein gemeinsames Histogramm aller Datensätze erstellen; entspricht Spektrum
if histogram_spec:
meine_range = None
print "Erstelle Spektrum"
figg = plt.figure()
for row in liste:
n, bins, patches = plt.hist(liste, num_bins, normed=1, alpha=0.5 )
print "Hist erstellt",#, n, bins, patches#,(time.clock()-startzeit)
plt.show()
# Aus einer Sim (*.p) ein Histogramm mit IG-Plot und qq Plot erstellen
def plot_single_histqq_ff(datei, num_bins=50):
with open(datei, 'rb') as daten:
sim = pickle.load(daten)
n, bins, patches = plt.hist(sim.times, num_bins, normed=1, alpha=0.5 )
x = np.arange(50000, 250000, 100)
print "ig-params", scipy.stats.invgauss.fit(sim.times)
mu, loc, scale = scipy.stats.invgauss.fit(sim.times)
plt.plot(x,scipy.stats.invgauss.pdf(x,mu, loc, scale))
print 'skew', scipy.stats.skew(sim.times)
sm.qqplot(np.array(sim.times), scipy.stats.invgauss, distargs=(mu,), line = 'r')
def plot_qq(datei, qq_Plot, fit_qq_Plot, vergleich = scipy.stats.invgauss):
with open(datei, 'rb') as csvfile:
myreader = csv.reader(csvfile, delimiter = ";",quoting=csv.QUOTE_NONE)
liste = []
# Erstelle Liste wie oben
for row in myreader:
unterliste = []
for r in row:
r2 = float(r)
unterliste.append(r2)
liste.append(unterliste)
# Und einen qq-Plot erstellen, evtl Parameter zur vergleichsfunktion müssen
# per Hand eingestellt werden
if qq_Plot:
print "erstelle qq-Plot",
fig = plt.figure()
ax = fig.add_subplot(221)
sm.qqplot (np.array(liste[0]), vergleich, distargs= (0.005,), line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[0]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "nr2",
ax = fig.add_subplot(222)
sm.qqplot (np.array(liste[1]), vergleich, distargs= (0.005,), line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[1]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "nr3",
ax = fig.add_subplot(223)
sm.qqplot (np.array(liste[2]), vergleich, distargs= (0.005,), line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[2]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "nr4",
ax = fig.add_subplot(224)
sm.qqplot (np.array(liste[3]), vergleich, distargs= (0.005,), line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[3]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "qqplot erstellt"
# qq-Plot mit automatischem fit zur Vergleichsfunktion
if fit_qq_Plot:
print "erstelle fit-qq-plot",
fig = plt.figure()
ax = fig.add_subplot(221)
sm.qqplot (np.array(liste[0]), vergleich, fit = True, line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[0]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "nr2",
ax = fig.add_subplot(222)
sm.qqplot (np.array(liste[1]), vergleich, fit = True, line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[1]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "nr3",
ax = fig.add_subplot(223)
sm.qqplot (np.array(liste[2]), vergleich, fit = True, line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[2]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "nr4",
ax = fig.add_subplot(224)
sm.qqplot (np.array(liste[3]), vergleich, fit = True, line = 'r', ax =ax)
#txt = ax.text(-1.8, 3500, str(params[3]) ,verticalalignment='top')
#txt.set_bbox(dict(facecolor='k', alpha=0.1))
print "qqplot erstellt"
plt.show()
def get_argument_parser():
p = argparse.ArgumentParser(
description = "beschreibung")
#p.add_argument("--langerbefehl", "-l", help='hilfe', action='store_true', dest = 'destination')
p.add_argument("--inputfile", "-i", help = "input file (pickled) to plot a heatmap, n x n Matrix")
p.add_argument("--moment", "-m" , help = "which moment to plot as heatmap")
p.add_argument("--singlefile", "-sf", action = "store_true", help = "plot a heatmap from single file with multiple simulations")
p.add_argument("--singlesimulation", '-ss', action = "store_true", help = "plot a single simulation (histogram and qq)")
p.add_argument("--multiple_files", '-mf', action="store_true", help = "read multiple files, each a single spectrum")
p.add_argument("--number", "-n", type=int, help= "how many files to read")
p.add_argument("--recalculate", "-rc", action = "store_true", help = "Whether moments should be recalculated before plotting")
return p
def main():
p = get_argument_parser()
args = p.parse_args()
if args.recalculate:
filename = args.inputfile
sims = None
with open(filename,'rb') as datei:
sims = pickle.load(datei)
print np.shape(sims)
for ls in sims:
print '-',
for sim in ls:
sim.recalculate_params()
sim.recalculate_moments()
with open(filename, 'wb') as datei:
pickle.dump(sims, datei)
if args.singlefile:
filename = args.inputfile
plot_heatmap_from_file(filename,0, args.moment, args.recalculate)
plot_4_heats_from_file(filename, args.moment, args.recalculate)
if args.multiple_files: #TODO
number = args.number
print "multiple_files: ", number
filename = args.inputfile
mySims = np.array([None]*number)
num = 0
fehlercounter = 0
for i in range(number):
#print "öffne jetzt", ps, pm,
try:
with open(filename+str(i+1)+".p", 'rb') as daten:
#print daten
aSim = pickle.load(daten)
mySims[num] = aSim
except IOError:
mySims[num] = Simulation(1, 1)
fehlercounter +=1
#print "fehler",
num += 1
print "alle offen mit ", fehlercounter, ' fehlend'
# Nach Parametern sortieren, damit das plotten der Heatmap was sinnvolles ergibt
mySortedSims1 = sorted(mySims, key= Simulation.get_pm)
#print mySortedSims1, '\n\n'
mySortedSims = sorted(mySortedSims1, key = Simulation.get_ps)
#print mySortedSims
plot_4_heatmaps(mySortedSims, number, args.moment)
print "fertig"
if args.singlesimulation:
plot_single_histqq_ff(args.inputfile)
plt.show()
if __name__ == "__main__":
main()
| [
"elly@oh14.de"
] | elly@oh14.de |
af8b0a51f2881672fd3f3fa9774af391f4149570 | 4d37658937bd45e3cbd94e3b7dbfe0d532f855f9 | /Strings.py | 83122f9c2a3a26cef45b19b192d35e2262bc6506 | [] | no_license | dylanmcg22/Scripts | e894906dc9b9e70abd69bae44292307562c786b4 | 005d92933d275792e5f6a7e5967fc647c62f3e77 | refs/heads/master | 2023-01-24T09:45:15.872720 | 2020-12-04T16:22:04 | 2020-12-04T16:22:04 | 298,628,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | #first_name = 'Dylan'
#last_name = 'McGrath'
first_name = input('Please enter your first name: ')
last_name = input('Please enter your last name: ')
#print(first_name + last_name)
print('Hello, ' + first_name.capitalize() + ' '+ last_name.capitalize())
| [
"noreply@github.com"
] | noreply@github.com |
4e564c1af8b54467afa88755cdf82caf68dc1605 | 4197efca9fe4761549d7baa8e5219479ab811d27 | /r1_bea.py | 0f0b9f9b8d60687b0a14f3e8be48e48f3a401255 | [] | no_license | bealeebrandt/chat | 1f1b15276b1242e77668700f2dd705b8b2c7ed8a | 23532c7705507843c8db0a42fc8252c5cd489e82 | refs/heads/main | 2023-01-24T10:06:03.501492 | 2020-11-25T08:04:38 | 2020-11-25T08:04:38 | 314,179,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py |
def read_file(filename):
lines = []
with open(filename, 'r', encoding='utf-8-sig') as f:
for line in f:
lines.append(line.strip())
return lines
def convert(lines):
person = None
bea_sticker_count = 0
bea_image_count = 0
bea_word_count = 0
phoebe_sticker_count = 0
phoebe_image_count = 0
phoebe_word_count = 0
for line in lines:
s = line.split(' ')
time = s[0]
name = s[1]
if name == 'Bea':
if s[2] == '貼圖':
bea_sticker_count += 1
elif s[2] == '圖片':
bea_image_count += 1
else:
for m in s[2:]:
bea_word_count += len(m)
elif name == 'Phoebe':
if s[2] == '貼圖':
phoebe_sticker_count += 1
elif s[2] == '圖片':
phoebe_image_count += 1
else:
for m in s[2:]:
phoebe_word_count += len(m)
print('Bea說了', bea_word_count, '個字')
print('Bea傳了', bea_sticker_count, '個貼圖')
print('Bea傳了', bea_image_count, '張圖片')
print('Phoebe說了', phoebe_word_count, '個字')
print('Phoebe傳了', phoebe_sticker_count, '個貼圖')
print('Phoebe傳了', phoebe_image_count, '張圖片')
def write_file(filename, lines):
with open(filename, 'w') as f:
for line in lines:
f.write(line + '\n')
def main():
lines = read_file('[LINE]Bea Lee.txt')
lines = convert(lines)
# write_file('output.txt', lines)
main() | [
"bea1209@gmail.com"
] | bea1209@gmail.com |
c5bd377254ceb0e1cfae1420ab742301e4d04971 | 52a2871b71b990bdddb046ececfc437e6e462844 | /jira_utils/jira_utils.py | b9bb0cff8e30d5df0a89895b34a78b4a42314327 | [] | no_license | DanteLore/jira-utils | 896eaa2fffc57862b9028c2f11b7973d039744f1 | f68fdc9ada4b7946d34f6817cefb64d6e930054b | refs/heads/master | 2020-09-26T08:04:09.802440 | 2017-05-30T08:47:38 | 2017-05-30T08:47:38 | 66,531,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,989 | py | import getpass
import logging
import keyring
from jira import JIRA
class Jira:
def __init__(self, server, username="", jql="", logger=None):
self.server = server
self.username = username
self.jql = jql
self.jira = None
self.fields = ["id","summary","assignee","status","project","created","updated"]
if logger:
self.logger = logger
else:
self.logger = logging.getLogger("TESTING")
self.logger.setLevel("DEBUG")
self.logger.addHandler(logging.StreamHandler())
def join(self, fragment):
if self.jql == "":
return fragment
else:
return "{0} and {1}".format(self.jql, fragment)
def with_id(self, id):
fragment = 'id = "{0}"'.format(id)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def with_these_ids(self, ids):
id_str = ", ".join(map(lambda s: '"{0}"'.format(s), ids))
fragment = 'id in ({0})'.format(id_str)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def with_project(self, project):
fragment = 'project = "{0}"'.format(project)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def with_fix_version(self, fix_version):
fragment = 'fixVersion = "{0}"'.format(fix_version)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def with_label(self, label):
fragment = 'labels in ("{0}")'.format(label)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def status_is_not(self, statuses):
state_str = ", ".join(map(lambda s: '"{0}"'.format(s), statuses))
fragment = 'status not in ({0})'.format(state_str)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def status_is(self, status):
fragment = 'status = "{0}"'.format(status)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def status_was(self, status, dt):
fragment = 'status was "{status}" on "{date}"'.format(
status=status,
date=dt.strftime("%Y/%m/%d"))
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def not_assigned(self):
return Jira(self.server, self.username, self.join('assignee is empty'), logger=self.logger)
def assigned(self):
return Jira(self.server, self.username, self.join('assignee is not empty'), logger=self.logger)
def created_in_last_n_days(self, days):
return Jira(self.server, self.username, self.join('created >= -{0}d'.format(days)), logger=self.logger)
def order_by(self, field):
return Jira(self.server, self.username, self.jql + ' order by {0}'.format(field), logger=self.logger)
def count_issues(self):
return len(self.get_issues(["id,summary"]))
def get_login(self, username):
password = keyring.get_password('jira_bot', username)
if password is None:
password = getpass.getpass()
keyring.set_password('jira_bot', username, password)
return username, password
def get_issues(self, fields_to_use=None):
if self.jira is None:
options = {
'server': self.server
}
u, p = self.get_login(self.username)
self.jira = JIRA(options, basic_auth=(u, p))
if not fields_to_use:
fields_to_use = self.fields
self.logger.debug("Executing JQL query: '{0}'".format(self.jql))
results = self.jira.search_issues(self.jql, fields=fields_to_use, maxResults=1000)
self.logger.debug("Fetched {0} results".format(len(results)))
return results
def resolved_n_days_ago(self, day):
fragment = "resolutionDate >= startOfDay(-{0}d) and resolutionDate < endOfDay(-{0}d)".format(day)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def created_n_days_ago(self, day):
fragment = "created >= startOfDay(-{0}d) and created < endOfDay(-{0}d)".format(day)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def open_issues_n_days_ago(self, day):
fragment = '(status was not in ("Done", "Closed") before endOfDay(-{0}d)) and created < endOfDay(-{0}d)'.format(day)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def resolved_this_week(self):
fragment = "resolutionDate >= startOfWeek()"
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def resolved_last_week(self):
fragment = "resolutionDate >= startOfWeek(-1w) and resolutionDate < endOfWeek(-1w)"
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def in_progress_for_n_days(self, days):
fragment = 'status changed to "In Progress" before -{0}d'.format(days)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def created_between(self, start, end):
fragment = 'created >= "{start}" and created < "{end}"'.format(
start=start.strftime("%Y/%m/%d"),
end=end.strftime("%Y/%m/%d")
)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def resolved_between(self, start, end):
fragment = 'resolutionDate >= "{start}" and resolutionDate < "{end}"'.format(
start=start.strftime("%Y/%m/%d"),
end=end.strftime("%Y/%m/%d")
)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
def with_sub_team(self, team):
fragment = 'sub-team = "{0}"'.format(team)
return Jira(self.server, self.username, self.join(fragment), logger=self.logger)
| [
"dan@logicalgenetics.com"
] | dan@logicalgenetics.com |
2daeb330fc949ff6e83869c733eda7bee8c15ee1 | e77eacb7c0ef654d8a5372811cdde73e57f89ac2 | /pattern_pr06.py | 1b6cd239f5ca88f00b8e831134c6af5241c52ba8 | [] | no_license | Ashish492/printingPattern | 38664025622c2f093e62df364ca010be050ae163 | 39498a8da09424dc328567ce08cc638f5c22a614 | refs/heads/main | 2023-08-17T12:34:03.827300 | 2021-10-03T05:37:59 | 2021-10-03T05:37:59 | 411,888,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | total_rows = int(input("enter a total rows:"))
for i in range(1,total_rows+1):
print(" "*(total_rows-i),"*"*i,sep="")
| [
"ashishshrestha492@gmail.com"
] | ashishshrestha492@gmail.com |
a5b5e59778367fdedcfbd2d092bc70983a936624 | 97bcb422a3a2366426b6fbe6c53b1c05a6b48d8d | /fichaMedica/apps.py | ba12bafb9e72627beefce57a442747ee65c6bf37 | [] | no_license | maravida292/heartsafe | 616e53fbe5bf2065d7fc945180cca8aca9a49239 | 3d2e3a46fbeead8491fd032fabfaa0be7693ccf5 | refs/heads/master | 2020-04-06T07:11:52.629103 | 2016-09-12T13:13:42 | 2016-09-12T13:13:42 | 65,582,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class FichamedicaConfig(AppConfig):
name = 'fichaMedica'
| [
"villacismr@gmail.com"
] | villacismr@gmail.com |
e7a1d482b26844bb379d5b8a4c17af142e3a914e | 50a74e2b920c4d7ee9d30784448db1d594ca0b48 | /scripts/python/sql_queries.py | 7d0b0092b2895cdbc3e30f9a1f52fcde1701a163 | [
"MIT"
] | permissive | chamsrut/Postgres-data-modeling-Udacity | 5c096cc906b9e487cbf515a2676f02f24e13f49a | d4a73bd29ccf77e2e7ed2acc04ca7c4c4246981a | refs/heads/main | 2023-06-02T16:00:05.162725 | 2021-06-15T10:20:18 | 2021-06-15T10:20:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | # DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays
(
songplay_id INT PRIMARY KEY,
start_time NUMERIC NOT NULL,
user_id INT NOT NULL,
level BOOLEAN,
song_id VARCHAR,
artist_id VARCHAR,
session_id INT NOT NULL,
location VARCHAR,
user_agent VARCHAR
);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users
(
user_id INT PRIMARY KEY,
first_name VARCHAR,
last_name VARCHAR,
gender BOOLEAN,
level BOOLEAN
);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs
(
song_id VARCHAR PRIMARY KEY,
title VARCHAR,
artist_id VARCHAR NOT NULL,
year INT,
duration NUMERIC
);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists
(
artist_id VARCHAR PRIMARY KEY,
name VARCHAR,
location VARCHAR,
latitude NUMERIC,
longitude NUMERIC
);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time
(
start_time TIME PRIMARY KEY,
hour INT,
day INT,
week INT,
month INT,
year INT,
weekday INT
);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays
(
songplay_id,
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent
)
VALUES
(
%s, %s, %s, %s, %s, %s, %s, %s, %s
)
ON CONFLICT (songplay_id)
DO UPDATE
SET level = EXCLUDED.level, song_id = EXCLUDED.song_id, artist_id = EXCLUDED.artist_id;
""")
user_table_insert = ("""
INSERT INTO users
(
user_id,
first_name,
last_name,
gender,
level
)
VALUES
(
%s, %s, %s, %s, %s
)
ON CONFLICT (user_id)
DO UPDATE
SET level = EXCLUDED.level;
""")
song_table_insert = ("""
INSERT INTO songs
(
song_id,
title,
artist_id,
year,
duration
)
VALUES
(
%s, %s, %s, %s, %s
)
ON CONFLICT (song_id)
DO NOTHING;
""")
artist_table_insert = ("""
INSERT INTO artists
(
artist_id,
name,
location,
latitude,
longitude
)
VALUES
(
%s, %s, %s, %s, %s
)
ON CONFLICT (artist_id)
DO NOTHING;
""")
time_table_insert = ("""
INSERT INTO time
(
start_time,
hour,
day,
week,
month,
year,
weekday
)
VALUES
(
%s, %s, %s, %s, %s, %s, %s
)
ON CONFLICT (start_time)
DO NOTHING;
""")
# FIND SONGS
song_select = ("""
SELECT s.song_id, a.artist_id
FROM songs s
JOIN artists a
ON s.artist_id = a.artist_id
WHERE s.title = %s AND a.name = %s AND s.duration = %s;
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
| [
"61270261+chamsrut@users.noreply.github.com"
] | 61270261+chamsrut@users.noreply.github.com |
949195cf0b233dbb3c3c0c87b6f7a7aac64fedb0 | 79164b6ef1a663a077359623fd82e07b1c86de4f | /pirover_controller/AVstreamer.py | 441c84f139fa93c836a6446ae60b6dccb5bfb4f8 | [] | no_license | gnshshraiGit/pirover | ff412b456f1450cfb0fe6c8d24a2bd587d44c318 | bf3decbb677ec6b4bdfdb991f6a73b0a9027a449 | refs/heads/master | 2022-12-21T12:53:22.904212 | 2017-12-23T10:30:20 | 2017-12-23T10:30:20 | 94,701,780 | 2 | 0 | null | 2022-12-07T23:59:58 | 2017-06-18T17:16:12 | Python | UTF-8 | Python | false | false | 1,830 | py | from subprocess import Popen
import config as cfg
import time
class avstreamer:
sendVideoStreamHandle = False
sendAudioStreamHandle = False
@classmethod
def startStream(cls):
if avstreamer.isStreaming() == False:
cls.sendVideoStreamHandle = Popen(cfg.videoStreamingCommand)
cls.sendAudioStreamHandle = Popen(cfg.audioStreamingCommand)
time.sleep(2)
while cls.sendVideoStreamHandle.poll() == 1:
cls.sendVideoStreamHandle = Popen(cfg.videoStreamingCommand)
time.sleep(2)
while cls.sendAudioStreamHandle.poll() == 1:
cls.sendAudioStreamHandle = Popen(cfg.audioStreamingCommand)
time.sleep(2)
@classmethod
def stopStream(cls):
if avstreamer.isStreaming() == True:
while True:
if cls.sendVideoStreamHandle != False and cls.sendVideoStreamHandle.poll() is None:
cls.sendVideoStreamHandle.terminate()
else:
break
while True:
if cls.sendAudioStreamHandle != False and cls.sendAudioStreamHandle.poll() is None:
cls.sendAudioStreamHandle.terminate()
else:
break
@classmethod
def reStream(cls):
cls.stopStream()
time.sleep(2)
cls.startStream()
@classmethod
def isStreaming(cls):
if cls.sendVideoStreamHandle == False or cls.sendAudioStreamHandle == False:
return False;
if cls.sendVideoStreamHandle.poll() is None and cls.sendAudioStreamHandle.poll() is None:
return True
else:
return False;
| [
"gnsh_sh_rai@yahoo.co.in"
] | gnsh_sh_rai@yahoo.co.in |
f4788c99df65eea5f4c06b3a63f6156c51a3610b | 09d42b7fe150d0dc467e3e26379949434aa35f5b | /MLDL_Learning/tf-Tutorials/07_read_data.py | 811361cdd96c172121f455c3c333e0a2cc4906d4 | [] | no_license | shqg/ML-DL_Learning | f333303ece99ed78748f080f0bcfc5da25bdd73f | e3983abcb791233a07c322c51ee6ee17fdd9994f | refs/heads/master | 2021-01-11T22:38:05.767633 | 2017-06-15T08:44:17 | 2017-06-15T08:44:17 | 79,005,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Date : 2017-06-12 11:00:40
@Author : gengshuoqin (gengshuoqin@360.com)
description:
dense_to_one_hot:Convert class labels from scalars to one-hot vectors
"""
import os
import numpy as np
###################################################
"""
将 vector型的label转为oneHot
input:
labels_dense: 训练数据的第一列是label列;labels_dense=trianData[:,0]
num_classes: 分类数
output: oneHot label matrix 中的值为int型的
"""
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
# print (index_offset + labels_dense.ravel()).astype(int)
index_to_int=(index_offset + labels_dense.ravel()).astype(int)
labels_one_hot.flat[index_to_int] = 1
# print "labels_one_hot"
# print labels_one_hot
return labels_one_hot
# 07_sample.txt 第一列标签列{0,1}两类;07_sample02.txt 第一列标签列{0,1,2,3,4,5}六类
filename="/home/gsq/program/tf-Tutorials/data/07_sample.txt"
"""
调用read()会一次性读取文件的全部内容,所以,要保险起见,可以反复调用read(size)方法,每次最多读取size个字节的内容。另外,调用readline()可以每次读取一行内容,调用readlines()一次读取所有内容并按行返回list。因此,要根据需要决定怎么调用。
"""
with open(filename) as f:
lines=f.readlines()
# print len(lines)
# for line in lines:
# print line
data_vec=np.array(lines)
# print data_vec.shape
a = np.loadtxt(filename)
labels=a[:,0]
dense_to_one_hot(labels,2)
##################################################
| [
"gengshuoqin@163.com"
] | gengshuoqin@163.com |
b1fa504216c5745ac6eacf6e2af43947e70ddfcf | 9311cc059d234ad2263314a83b734e732f0be71f | /Main.py | 3847a6ec65797cc3b8780eb449b78d15130f9acf | [] | no_license | caarson/collegesocialgatherer | 76c8c5bf65f9c9c54fc95b5554bcb791c7dea3f8 | 8eab50db54db86775c1119086d8bb84fadb44166 | refs/heads/master | 2020-12-26T13:03:56.529956 | 2020-01-31T21:18:12 | 2020-01-31T21:18:12 | 237,517,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,580 | py | import gspread
import sys
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from oauth2client.service_account import ServiceAccountCredentials
from selenium.webdriver.support.wait import WebDriverWait
###################################################
### Chrome Options / Login Details
###################################################
chrome_options = Options()
#chrome_options.add_argument("--headless")
###################################################
### Global Methods
###################################################
def error(error, exception):
print(exception)
sys.exit(error)
def ask_continue():
option = input("would you like to continue? (search again?): ")
if option.lower() == "yes":
Google.search()
elif option.lower() == "no":
browser.close()
###################################################
### Variables:
###################################################
group_name = input("frat/sorierty name: ")
college_name = input("college name: ")
social_type = input("social name: ")
# declare browser for global use
browser = webdriver.Chrome(chrome_options= chrome_options, executable_path= '/Users/carsonrhodes/Documents/SeleniumDrivers/chromedriver') # comment out to remove browser if isolating
###################################################
### Classes:
###################################################
class Google: # handles tasks involving the use of Google for search queries.
def __init__(self):
# variables
# self.group_name = input("type in the group/frat/soriority you'd like to search: ")
# self.college_name = input("type in the college to search: ") # <- change to google another ig
# self.social_type = input("type in the social to search: ") # <- change to switch social your googling
self.error = "[!] There was a critical error that broke the program!"
def search(self): # a function made for the purpose sending a query to Google.
print("searching...")
url = "http://www.google.com/"
browser.get(url)
xpath = "//input[@name='q']"
google_search_box = browser.find_element_by_xpath(xpath)
search_term = college_name + " " + group_name + " " + social_type
google_search_box.send_keys(search_term, Keys.ENTER)
def locate_instagram(self): # a function made for the purpose of locating an instagram result.
try:
print("locating instagram page...")
#instagram = browser.find_element_by_xpath(xpath= "//div[contains(text(),'Instagram photos and videos')]")
instagram = WebDriverWait(browser, 10).until(lambda x: x.find_element_by_xpath(xpath= "//div[contains(text(),'Instagram photos and videos')]"))
instagram.click()
time.sleep(2)
print(browser.current_url)
except:
print("the program was unable to find instagram page from Google query.")
exception = Exception(self.error)
error(self.error, exception)
class InputData:
def __init__(self):
# use creds to create a client to interact with the Google Drive API
self.scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
self.creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', self.scope)
self.client = gspread.authorize(self.creds)
self.sheet = self.client.open("College Bookings itslit.org").sheet1
self.error = "[!] There was a critical error that broke the program!"
self.colleges = ""
def import_sheet(self): # checks data on sheet and saves it to the init function.
print("checking data...")
try:
print(self.sheet.col_values(1))
self.colleges = self.sheet.col_values(1)
except:
print(self.error)
print("there was an error when trying to connect to Google.")
def check_if_college_exists(self): # checks if college already exists on college columun
matched = "no"
college_name_formatted = college_name.lower()
college_name_formatted = college_name_formatted.capitalize()
print(self.sheet.col_values(1))
count_of_colleges = 0
for college in self.sheet.col_values(1):
self.count_of_colleges = count_of_colleges + 1
if college == college_name_formatted:
matched = "yes"
print("match")
elif college != college_name_formatted:
print("no match")
if matched == "yes":
print("college exists!")
print("moving sheets...")
InputData.navigate_sheet()
if matched == "no":
print("college was not found!")
print("adding college to list...")
InputData.add_college()
def navigate_sheet(self):
worksheet_list = self.sheet.worksheets()
print(worksheet_list)
def add_college(self):
cell = "a" + str(self.count_of_colleges)
print(self.count_of_colleges)
val = self.sheet.acell(cell).value
print(val)
###################################################
### CODE STARTS HERE:
###################################################
#Google = Google()
#Google.search()
#Google.locate_instagram()
InputData = InputData()
InputData.import_sheet()
InputData.check_if_college_exists()
#ask_continue()
| [
"noreply@github.com"
] | noreply@github.com |
27c666183a810fbbf071984d91e6e19c62f210b9 | e1b4c28ddac47bd39d01199ee8013836d0f1a6c3 | /lab2/3-8.py | 239bd52d909a08abf37017420d061d5fef9d89d6 | [] | no_license | LukaszSztuka/Jezyki-Skryptowe | 00eccb8636ad6454973321d7f96d66787e3bf76d | aca6e6bda080f3ddaa1f061a0d00ae63822ddc0a | refs/heads/master | 2023-05-09T23:55:38.283039 | 2021-06-12T18:31:01 | 2021-06-12T18:31:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from math import sqrt
def pier(a, b, c):
delta = pow(b, 2) - (4 * a * c)
if delta < 0:
delta = -delta
delta = sqrt(delta) * 1j
x1 = (-b - delta) / (2 * a)
x2 = (-b + delta) / (2 * a)
return "Pierwiasteki wynoszą: " + str(x1) + " i " + str(x2)
if delta >= 0:
x1 = (-b-sqrt(delta))/(2 * a)
if delta == 0:
return "Pierwiastek wynosi: " + str(x1)
else:
x2 = (-b + sqrt(delta)) / (2 * a)
return "Pierwiasteki wynoszą: " + str(x1) + " i " + str(x2)
a = int(input("Podaj a: "))
b = int(input("Podaj b: "))
c = int(input("Podaj c: "))
print(pier(a, b, c))
| [
"lukaszsztuka21@gmail.com"
] | lukaszsztuka21@gmail.com |
2425211ba02da1a8b060213fea11664e045f8545 | 815bffc29a0479f516f2f31c1da11214a254e804 | /run.py | f1a67a1075bdfb2fc33f5690883348d29544995a | [] | no_license | Jbothost/derp | abe4c9ea983b3327761e89bc8a4eafd06896ad0d | f87ae00a26cf63ebcfcf503e348dc18c8ccd03d5 | refs/heads/master | 2020-03-28T18:59:44.265063 | 2018-09-15T19:39:38 | 2018-09-15T19:39:38 | 148,934,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,953 | py | import discord
import asyncio
from discord.ext.commands import Bot
from discord.ext import commands
Client = discord.Client()
bot_prefix= "/"
client = commands.Bot(command_prefix=bot_prefix)
@client.event
async def on_ready():
print("Bot Online!")
print("Name: {}".format(client.user.name))
print("ID: {}".format(client.user.id))
#Extra 1
await client.change_presence(game=discord.Game(name='/help'))
@client.command(pass_context=True)
async def rules(ctx):
await client.say("""RULES:
1. Be respectful - Be respectful to everyone in this discord.
2. No NSFW - PG13, people of all ages run servers. (Mature Content Filter: ALL)
3. No trolling - Acting stupid, trashy posts missbehaving will be seen as such.
4. No spam - We don't spam you, so don't spam.
5. Clean usernames - Keep your name clean and respectful.
6. No illegal material
7. No piracy links of any kind.
8. No bots allowed - Only our bots. Any unauthorised bots will be banned.
9. No ban evading - Just move on.
10. No selling - This isn't a marketplace. Find somewhere else.
11. No impersonation - No matter who you are trying to impersonate, don't do it.
12. Minimal advertising - Keep it in #ads (Post only ONE advertisement - Not for chatter)
13. Private Information - DO NOT share client information or MultiCraft logins here. Keep that for support tickets.
14. Griefing and or stealing items from other RB members will lead to an immediate expulsion from every RB island!
15. Obey all staff! They're the law in the RB community.""")
#command1
@client.command(pass_context = True)
async def invite(ctx):
x = await client.invites_from(ctx.message.server)
x = ["<" + y.url + ">" for y in x]
print(x)
embed = discord.Embed(title = "Invite Links", description = x, color = 0xFFFFF)
return await client.say(embed = embed)
#command2
@client.command(pass_context = True)
async def getbans(ctx):
x = await client.get_bans(ctx.message.server)
x = '\n'.join([y.name for y in x])
embed = discord.Embed(title = "List of Banned Members", description = x, color = 0xFFFFF)
return await client.say(embed = embed)
#command3
@client.command(pass_context=True)
async def connect(ctx):
if client.is_voice_connected(ctx.message.server):
return await client.say("I am already connected to a voice channel. Do not disconnect me if I am in use!")
author = ctx.message.author
voice_channel = author.voice_channel
vc = await client.join_voice_channel(voice_channel)
#command4
@client.command(pass_context = True)
async def disconnect(ctx):
for x in client.voice_clients:
if(x.server == ctx.message.server):
return await x.disconnect()
#command5
@client.command(pass_context=True)
async def clear(ctx, number):
mgs = []
number = int(number) #Converting the amount of messages to delete to an integer
async for x in client.logs_from(ctx.message.channel, limit = number):
mgs.append(x)
await client.delete_messages(mgs)
#command6
@client.command(pass_context = True)
async def ban(ctx, *, member : discord.Member = None):
if not ctx.message.author.server_permissions.administrator:
return
if not member:
return await client.say(ctx.message.author.mention + "Specify a user to ban!")
try:
await client.ban(member)
except Exception as e:
if 'Privilege is too low' in str(e):
return await client.say(":x: Privilege too low!")
embed = discord.Embed(description = "**%s** has been banned!"%member.name, color = 0xFF0000)
return await client.say(embed = embed)
#command7
@client.command(pass_context = True)
async def kick(ctx, *, member : discord.Member = None):
if not ctx.message.author.server_permissions.administrator:
return
if not member:
return await client.say(ctx.message.author.mention + "Specify a user to kick!")
try:
await client.kick(member)
except Exception as e:
if 'Privilege is too low' in str(e):
return await client.say(":x: Privilege too low!")
embed = discord.Embed(description = "**%s** has been kicked!"%member.name, color = 0xFF0000)
return await client.say(embed = embed)
#command8
@client.command(pass_context = True)
async def listservers(ctx):
x = '\n'.join([str(server) for server in client.servers])
print(x)
embed = discord.Embed(title = "Servers", description = x, color = 0xFFFFF)
return await client.say(embed = embed)
#command9
@client.command(pass_context = True)
async def info(ctx):
await client.say("""**Useful Links:**
Discord Invite: https://discord.gg/p72sjBG
Roles:
@Owner - Official FV Founders
@Admin - Offical FV Staff
client.run("NDkwNTc3NTM2Njg2NzUxODA0.Dn7wOA.QUKuzwCWK4ihvg82kiHCPod3iZM")
| [
"noreply@github.com"
] | noreply@github.com |
eb405f35229c384be7119c0426a36b47fd375b1e | d656f8fda2b9247a28fcd2eb649345136bc863af | /pytorch/letter_vae/meanstd.py | 042764d15fbdc6a2eedd87859467e08bdb47748a | [] | no_license | VasuAgrawal/fun_with_fonts | 7f72fb65e1b33a2c46480239f3100d05e52737d4 | 899703b18c4d38987f65b42f51c22cec8490ab18 | refs/heads/master | 2023-03-11T23:23:25.328006 | 2021-02-20T21:20:16 | 2021-02-20T21:20:16 | 275,771,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | #!/usr/bin/env python3
# https://discuss.pytorch.org/t/about-normalization-using-pre-trained-vgg16-networks/23560/39
from loaders import makeLoaders
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# transform = transforms.Compose([transforms.ToTensor(),])
#
# dataset = datasets.CIFAR10(root='cifar10', train=True, download=True,transform=transform)
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=1, shuffle=False)
#
# mean = torch.zeros(3)
# std = torch.zeros(3)
#
# for i, data in enumerate(dataloader):
# if (i % 10000 == 0): print(i)
# data = data[0].squeeze(0)
# print(data.size())
# break
# if (i == 0): size = data.size(1) * data.size(2)
# mean += data.sum((1, 2)) / size
#
# mean /= len(dataloader)
# print(mean)
# mean = mean.unsqueeze(1).unsqueeze(2)
#
# for i, data in enumerate(dataloader):
# if (i % 10000 == 0): print(i)
# data = data[0].squeeze(0)
# std += ((data - mean) ** 2).sum((1, 2)) / size
#
# std /= len(dataloader)
# std = std.sqrt()
# print(std)
#
train_loader, test_loader = makeLoaders(1, 1)
mean = torch.zeros(1)
std = torch.zeros(1)
for i, data in enumerate(train_loader):
if (i % 10000 == 0):
print(i)
data = data[0].squeeze(0)
if (i == 0):
size = data.size(0) * data.size(1)
mean += data.sum((0, 1)) / size
mean /= len(train_loader)
print(mean)
mean = mean.unsqueeze(0).unsqueeze(1)
mean = mean.view(1).item()
for i, data in enumerate(train_loader):
if (i % 10000 == 0): print(i)
data = data[0].squeeze(0)
std += ((data - mean) ** 2).sum((0, 1)) / size
std /= len(train_loader)
std = std.sqrt()
print(std)
| [
"vasu_agrawal@yahoo.com"
] | vasu_agrawal@yahoo.com |
1c0396241c5678129c6a9423cdd169a9fcdbee83 | de213b73f703fb8f285bc8cf15e388cc2f98898f | /venv/bin/Peg.py | 9586b955a31d199a83f0a27a6c4cf33e702d3049 | [] | no_license | adampehrson/Kattis | 18de025a6a569a46c54cc85c996eec0b55c9f74b | a04922caa356f8113fe30a523f3a148d458a6132 | refs/heads/main | 2023-07-10T02:53:29.782854 | 2021-08-14T10:44:30 | 2021-08-14T10:44:30 | 395,948,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | def checkmoves(karta, posx, posy):
moves = 0
if karta[posy][posx - 1] == 'o':
if karta[posy][posx - 2] == 'o':
moves += 1
if karta[posy][posx + 1] == 'o':
if karta[posy][posx + 2] == 'o':
moves += 1
if karta[posy - 1][posx] == 'o':
if karta[posy - 2][posx] == 'o':
moves += 1
if karta[posy + 1][posx] == 'o':
if karta[posy + 2][posx] == 'o':
moves += 1
return moves
karta = []
karta.append(' ')
i = 0
while i < 7:
karta.append(' ' + input() + ' ')
i += 1
karta.append(' ')
moves = 0
i = 1
while i < 8:
e = 0
while e < 8:
if karta[i][e] == '.':
moves = moves + checkmoves(karta, e, i)
e += 1
i += 1
print(moves)
| [
"85373641+adampehrson@users.noreply.github.com"
] | 85373641+adampehrson@users.noreply.github.com |
5dd5656c70b3d3fb0041c28be639da84f8a05701 | 7bb37821b54c2ffe0dc8cbf2a70bd20c932fb515 | /users/migrations/0006_auto_20190812_1022.py | ae4141e0132bfd3748bbf0be56a5be60ae0212fd | [] | no_license | andrewhstead/dating-site | 6dfed04404fa1ea03594ff08a1d3aa31fe07b47c | bc81ade529ab916093ba80ab009d03a00e1dfab0 | refs/heads/master | 2020-07-02T13:04:02.878187 | 2020-05-07T22:23:09 | 2020-05-07T22:23:09 | 176,342,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Generated by Django 2.2.4 on 2019-08-12 09:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20190811_2134'),
]
operations = [
migrations.AlterField(
model_name='user',
name='intro',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"andrew@andrewstead.co.uk"
] | andrew@andrewstead.co.uk |
18a6ad89d7dac68e7b90c90d2f1a53e0a54d17e0 | d4122944fb1a0559572362dd9ba591d5de13c58f | /logmailer.py | 0f55dd74c4b36dc91fd723336778a37a2b28835e | [
"MIT"
] | permissive | jmares/logmailer | 60108c39533f7c50912c2c0c5226035c5120f6fc | 64e0bdd2e160d32340df658d4b65fa5e7e0fc2a8 | refs/heads/main | 2023-07-19T04:30:49.265432 | 2021-09-06T15:21:02 | 2021-09-06T15:21:02 | 389,320,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | import os
from datetime import datetime
import time
from config import DEST_EMAIL, LOG_DIR
now = datetime.now()
for file in os.listdir(LOG_DIR):
if file.endswith(".log"):
# get the full path for the file, incl. the file
file_full_path = os.path.join(LOG_DIR, file)
# get the last modified time
file_mod_time = time.localtime(os.path.getmtime(file_full_path))
# get the last modified date
file_mod_date = time.strftime("%Y-%m-%d", file_mod_time)
# compare last modified date to current date
if file_mod_date == now.strftime("%Y-%m-%d"):
f = open(file_full_path)
text = f.read()
title = file.split('_')[0]
try:
p = os.popen('msmtp -t', 'w')
p.write(f"To:{DEST_EMAIL}\nSubject: {title}\n\n{text}")
p.close()
print('Message send.')
except Exception as e:
print('Caught exception!')
print(e)
| [
"67288966+jmares@users.noreply.github.com"
] | 67288966+jmares@users.noreply.github.com |
6f002af3955d84710a19217854ee51ea99ccb8f2 | 6f5e08afeb4efc4b5897b2df69a939dbfbdd7714 | /lib/python3.6/site-packages/djangocms_picture/__init__.py | bdd8b9cf626b9012e56840eebf6520b2c90f46ce | [
"MIT"
] | permissive | mitchellbohn/glacier-arena-website | 487e950cbd232d0d6b7e231743426389e8b6f5cd | 43b35a178ea47113268bacaf676268c4cf5c5245 | refs/heads/master | 2020-04-22T11:22:25.428919 | 2019-05-14T14:27:00 | 2019-05-14T14:27:00 | 170,337,893 | 0 | 0 | MIT | 2019-04-19T14:37:45 | 2019-02-12T15:05:13 | Python | UTF-8 | Python | false | false | 46 | py | # -*- coding: utf-8 -*-
__version__ = '2.0.8'
| [
"mitchellbohn96@gmail.com"
] | mitchellbohn96@gmail.com |
c5b684d3453380613bb9bf76e73a9a85aaf4e078 | 70b21bc6a6426a3360abc1bba0c9757b412e3f88 | /Insults/shakespeare.py | 7436fb99a603888f45f5966e74fe11b55af8bfda | [] | no_license | CGasberg/PythonCrashCourse | 029ca65c6358d8168651e72f51f87dd873d8d1c5 | 5591ba65e4bd3f05605da65b1f18de7e6e10593c | refs/heads/master | 2021-01-19T17:25:55.383495 | 2017-07-18T17:57:20 | 2017-07-18T17:57:20 | 82,457,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | #with open("insults.csv","r") as f:
# contents = f.read()
# print(contents)
import random
import csv
list_a = []
list_b = []
list_c = []
with open("insults.csv", "r") as f:
for line in f:
words = line.split(";")
list_a.append(words[0])
list_b.append(words[1])
list_c.append(words[2].strip())
def insult_me():
word_a = random.choice(list_a)
word_b = random.choice(list_b)
word_c = random.choice(list_c)
insult = "Thou " + word_a + " "+ word_b + " " + word_c + "!"
print(insult)
rating = input("Please rate your insult from 1 - 5: ")
print("You've given the insult: " + insult + " the rating " + rating + "!")
writer = csv.writer(open("ratings.csv", "a", newline=""))
writer.writerow([insult.strip() + ";" + rating.strip()])
insult_me()
def display_rating():
display = input("Would you like to display the most rated insult? Y/N")
if display == "Y":
maxrate = 0
with open("ratings.csv","r") as f:
next(f)
try:
m = int(column[1])
maxrate = max(maxrate,m)
except:
pass
print (maxrate)
else:
print("Have a nice day")
display_rating()
| [
"christian.gasberg@accenture.com"
] | christian.gasberg@accenture.com |
1b9e1c8e39b3c1eefb20397ddd723788aa07ebbe | 0eeb0bc894587b9f266eca668b5bca3330df72a4 | /src/indexer/indexer.py | 4bbfd8a6f1a764839246f1e945047b17bf115117 | [] | no_license | taddo3/IdnesCrawl | d86bd5d92671319c07491db06eebafd5585eb27c | f0e7012eb13b4941926d8e127280013f774423f6 | refs/heads/master | 2023-01-21T10:04:55.302532 | 2020-12-01T20:44:49 | 2020-12-01T20:44:49 | 299,682,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,066 | py | from json import loads, dumps
from os import path
class Indexer:
def __init__(self):
self.indexes = dict()
if path.exists('../../data/indexes.txt'):
with open('../../data/indexes.txt', 'r', encoding='utf-8') as index_file:
index_lines = index_file.read()
if index_lines:
self.indexes = loads(index_lines)
else:
open('../../data/indexes.txt', 'w', encoding='utf-8').close()
def create_index(self, articles_filename, keywords_filename):
articles_file = open(articles_filename, 'r', encoding='utf-8')
keywords_file = open(keywords_filename, 'r', encoding='utf-8')
try:
while True:
# Scan whole keyword file and find position of article json for corresponding keywords
line = keywords_file.readline()
if not line or line[0] != '{':
break
article_data = line
while line[0] != '}':
line = keywords_file.readline()
article_data += line
json = loads(article_data)
position = self.find_position(articles_file, json['title'])
if position is None:
print('Error: Position isn\'t find.')
# For every keyword store the filename and the position of the article json
if json['keywords'] and position is not None:
for keyword in json['keywords']:
if keyword in self.indexes.keys():
if position not in self.indexes[keyword]:
self.indexes[keyword].append(articles_filename + '@' + str(position))
else:
self.indexes[keyword] = [articles_filename + '@' + str(position)]
with open('../../data/indexes.txt', 'w', encoding='utf-8') as index_file:
self.indexes = {k: v for k, v in sorted(self.indexes.items(), key=lambda item: len(item[1]), reverse=True)}
index_file.write(dumps(self.indexes, indent=4))
except Exception as ex:
print('Error: ' + str(ex))
finally:
articles_file.close()
keywords_file.close()
@staticmethod
def find_position(articles_file, title):
"""
This method find exact position of a json with article data.
Searching is realized without index, so method scans whole file.
Return the position.
"""
while True:
position = articles_file.tell()
line = articles_file.readline()
if not line or line[0] != '{':
articles_file.seek(0)
return None
article_data = line
while line[0] != '}':
line = articles_file.readline()
article_data += line
json = loads(article_data)
if json['title'] == title:
return position
| [
"td256489"
] | td256489 |
7ad3d6880008f2c252e939266d8e8e9eded5ffa4 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/logging/v2/logging-v2-py/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py | 50feab7c61e55e6a3d999723b820f2fd4318e856 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,147 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.logging_v2.types import logging_metrics
from google.protobuf import empty_pb2 # type: ignore
from .base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO
from .grpc import MetricsServiceV2GrpcTransport
class MetricsServiceV2GrpcAsyncIOTransport(MetricsServiceV2Transport):
"""gRPC AsyncIO backend transport for MetricsServiceV2.
Service for configuring logs-based metrics.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs
)
def __init__(self, *,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_log_metrics(self) -> Callable[
[logging_metrics.ListLogMetricsRequest],
Awaitable[logging_metrics.ListLogMetricsResponse]]:
r"""Return a callable for the list log metrics method over gRPC.
Lists logs-based metrics.
Returns:
Callable[[~.ListLogMetricsRequest],
Awaitable[~.ListLogMetricsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_log_metrics' not in self._stubs:
self._stubs['list_log_metrics'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/ListLogMetrics',
request_serializer=logging_metrics.ListLogMetricsRequest.serialize,
response_deserializer=logging_metrics.ListLogMetricsResponse.deserialize,
)
return self._stubs['list_log_metrics']
@property
def get_log_metric(self) -> Callable[
[logging_metrics.GetLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the get log metric method over gRPC.
Gets a logs-based metric.
Returns:
Callable[[~.GetLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_log_metric' not in self._stubs:
self._stubs['get_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/GetLogMetric',
request_serializer=logging_metrics.GetLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['get_log_metric']
@property
def create_log_metric(self) -> Callable[
[logging_metrics.CreateLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the create log metric method over gRPC.
Creates a logs-based metric.
Returns:
Callable[[~.CreateLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_log_metric' not in self._stubs:
self._stubs['create_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/CreateLogMetric',
request_serializer=logging_metrics.CreateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['create_log_metric']
@property
def update_log_metric(self) -> Callable[
[logging_metrics.UpdateLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the update log metric method over gRPC.
Creates or updates a logs-based metric.
Returns:
Callable[[~.UpdateLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_log_metric' not in self._stubs:
self._stubs['update_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/UpdateLogMetric',
request_serializer=logging_metrics.UpdateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['update_log_metric']
@property
def delete_log_metric(self) -> Callable[
[logging_metrics.DeleteLogMetricRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete log metric method over gRPC.
Deletes a logs-based metric.
Returns:
Callable[[~.DeleteLogMetricRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_log_metric' not in self._stubs:
self._stubs['delete_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/DeleteLogMetric',
request_serializer=logging_metrics.DeleteLogMetricRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_log_metric']
__all__ = (
'MetricsServiceV2GrpcAsyncIOTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
ded6ed30a96d3495a0c55cdab2f2c3075fcfd9bb | 409a3d53b08f55b6b42643dc68b1eb6daeb54ced | /mars/tensor/special/core.py | 3044dddb31a5dd75958a4ce9e56120459f859e1a | [
"BSD-3-Clause",
"MIT",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | sighingnow/mars | 06cbcd8f4aa132a85cdb6c7215c61139636e316b | c7897fbd144d230fff5edabc1494fb3ff44aa0d2 | refs/heads/master | 2023-01-21T12:11:12.469853 | 2019-09-26T09:34:13 | 2019-09-26T09:34:13 | 189,408,668 | 0 | 0 | Apache-2.0 | 2021-01-12T06:19:58 | 2019-05-30T12:19:28 | Python | UTF-8 | Python | false | false | 1,114 | py | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..arithmetic.core import TensorUnaryOp
from ..array_utils import np, cp, sparse
class TensorSpecialOp(TensorUnaryOp):
@classmethod
def _get_func(cls, xp):
if xp is np:
from scipy import special
return getattr(special, cls._func_name)
elif cp is not None and xp is cp:
from cupyx.scipy import special
return getattr(special, cls._func_name)
else:
assert xp is sparse
return getattr(sparse, cls._func_name)
| [
"wenjun.swj@alibaba-inc.com"
] | wenjun.swj@alibaba-inc.com |
04fa739afb063ffe1d5696d7a3f6013540aac270 | e9506f2050ed415e1e0ca263dc72817b95d3393c | /custom_components/hubitat/lock.py | 64227fbff775a479c99410e770e9e42a9910fec3 | [
"MIT"
] | permissive | maxim31cote/hacs-hubitat | 5b2e9e8d6d42c384144625d1d8dc49bb3116b47b | 07f429e530797db2616466bee0e74c80d5724b22 | refs/heads/master | 2022-05-29T06:55:54.934009 | 2020-04-22T12:10:50 | 2020-04-22T12:10:50 | 260,245,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,695 | py | from logging import getLogger
from typing import Any, Dict, Optional, Union, cast
import hubitatmaker as hm
import voluptuous as vol
from homeassistant.components.lock import LockDevice
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from .const import (
ATTR_CODE,
ATTR_CODE_LENGTH,
ATTR_CODES,
ATTR_LAST_CODE_NAME,
ATTR_LENGTH,
ATTR_MAX_CODES,
ATTR_NAME,
ATTR_POSITION,
DOMAIN,
SERVICE_CLEAR_CODE,
SERVICE_SET_CODE,
SERVICE_SET_CODE_LENGTH,
)
from .device import HubitatEntity, get_hub
_LOGGER = getLogger(__name__)
SET_CODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_POSITION): vol.Coerce(int),
vol.Required(ATTR_CODE): vol.Coerce(str),
vol.Optional(ATTR_NAME): str,
}
)
CLEAR_CODE_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_id, vol.Required(ATTR_POSITION): int}
)
SET_CODE_LENGTH_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_id, vol.Required(ATTR_LENGTH): int}
)
class HubitatLock(HubitatEntity, LockDevice):
"""Representation of a Hubitat lock."""
@property
def code_format(self) -> Optional[str]:
"""Regex for code format or None if no code is required."""
code_length = self.get_attr(hm.ATTR_CODE_LENGTH)
if code_length is not None:
return f"^\\d{code_length}$"
return None
@property
def is_locked(self) -> bool:
"""Return True if the lock is locked."""
return self.get_attr(hm.ATTR_LOCK) == hm.STATE_LOCKED
@property
def code_length(self) -> Optional[int]:
return self.get_int_attr(hm.ATTR_CODE_LENGTH)
@property
def codes(self) -> Union[str, Dict[str, Dict[str, str]], None]:
try:
return self.get_json_attr(hm.ATTR_LOCK_CODES)
except Exception:
return self.get_str_attr(hm.ATTR_LOCK_CODES)
@property
def last_code_name(self) -> Optional[str]:
return self.get_attr(hm.ATTR_LAST_CODE_NAME)
@property
def max_codes(self) -> Optional[int]:
return self.get_int_attr(hm.ATTR_MAX_CODES)
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes."""
return {
ATTR_CODES: self.codes,
ATTR_CODE_LENGTH: self.code_length,
ATTR_LAST_CODE_NAME: self.last_code_name,
ATTR_MAX_CODES: self.max_codes,
}
async def async_lock(self, **kwargs: Any) -> None:
"""Lock the lock."""
await self.send_command(hm.CMD_LOCK)
async def async_unlock(self, **kwargs: Any) -> None:
"""Unlock the lock."""
await self.send_command(hm.CMD_UNLOCK)
async def clear_code(self, position: int) -> None:
await self.send_command(hm.CMD_DELETE_CODE, position)
async def set_code(self, position: int, code: str, name: Optional[str]) -> None:
arg = f"{position},{code}"
if name is not None:
arg = f"{arg},{name}"
await self.send_command(hm.CMD_SET_CODE, arg)
async def set_code_length(self, length: int) -> None:
await self.send_command(hm.CMD_SET_CODE_LENGTH, length)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities,
) -> None:
"""Initialize lock devices."""
hub = get_hub(hass, entry.entry_id)
devices = hub.devices
locks = [
HubitatLock(hub=hub, device=devices[i])
for i in devices
if hm.CAP_LOCK in devices[i].capabilities
]
async_add_entities(locks)
hub.add_entities(locks)
_LOGGER.debug(f"Added entities for locks: %s", locks)
if len(locks) > 0:
def get_entity(entity_id: str) -> Optional[HubitatLock]:
for lock in locks:
if lock.entity_id == entity_id:
return lock
return None
async def clear_code(service: ServiceCall):
entity_id = cast(str, service.data.get(ATTR_ENTITY_ID))
lock = get_entity(entity_id)
if lock:
pos = service.data.get(ATTR_POSITION)
await lock.clear_code(pos)
async def set_code(service: ServiceCall):
entity_id = cast(str, service.data.get(ATTR_ENTITY_ID))
lock = get_entity(entity_id)
if not lock:
raise ValueError(f"Invalid or unknown entity '{entity_id}'")
pos = service.data.get(ATTR_POSITION)
code = service.data.get(ATTR_CODE)
name = service.data.get(ATTR_NAME)
await lock.set_code(pos, code, name)
_LOGGER.debug("Set code at %s to %s for %s", pos, code, entity_id)
async def set_code_length(service: ServiceCall):
entity_id = cast(str, service.data.get(ATTR_ENTITY_ID))
lock = get_entity(entity_id)
if lock:
length = service.data.get(ATTR_LENGTH)
await lock.set_code_length(length)
_LOGGER.debug("Set code length for %s to %d", entity_id, length)
hass.services.async_register(
DOMAIN, SERVICE_CLEAR_CODE, clear_code, schema=CLEAR_CODE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_SET_CODE, set_code, schema=SET_CODE_SCHEMA
)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CODE_LENGTH,
set_code_length,
schema=SET_CODE_LENGTH_SCHEMA,
)
| [
"j.cheatham@gmail.com"
] | j.cheatham@gmail.com |
065c07dbd1adc232386da465812aba87f2c9e666 | 61d77677afd8af7f42a700f85cb35804ae04e9e4 | /exe 3.py | 22fd713739534bd39e0f35d2acce0e8691c220c2 | [] | no_license | NeroWithoutHat/tpa-lista-5 | 6de1d212ec98dc3c2767913aeaccdd1693bf587c | bffa1b4b8e5a84a4160d204d8ee37f6a2d74e1cf | refs/heads/main | 2023-08-13T22:37:15.624507 | 2021-09-23T20:04:05 | 2021-09-23T20:04:05 | 409,728,707 | 0 | 1 | null | 2021-09-28T20:29:53 | 2021-09-23T20:03:07 | Python | UTF-8 | Python | false | false | 245 | py | n1= 0
calculator= 1
total= 0
while calculator <= 10:
calculator += 1
n1= int(input('Em que ano você nasceu: '))
maior= (n1 - 2021) *-1
if maior >= 18:
total += 1
print ('{} São maiores de idade'.format (total)) | [
"noreply@github.com"
] | noreply@github.com |
be8163c52c1db68cd70a64e63fa2802bd8be0dc2 | d2993f38ea15467c0c581bfc62282869d411b0c4 | /blog/migrations/0002_auto_20200907_0401.py | 4f44c0bee9b3d81eb1d84c96851e0cf784b44ca6 | [] | no_license | AzmainMahtab/very-basic-blog | 952a8f4d74dbc4b6691ad4d2dddcc29f2dfc99bc | 18f47955bfd0a4aa2f5f8aa145393e0e0a4c57a1 | refs/heads/master | 2022-12-17T08:23:22.834693 | 2020-09-24T08:29:19 | 2020-09-24T08:29:19 | 293,366,336 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # Generated by Django 3.1 on 2020-09-06 22:01
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='blog',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now, null=True),
),
migrations.AddField(
model_name='blog',
name='description',
field=models.CharField(blank=True, max_length=100),
),
]
| [
"azmainmahtab97@gmail.com"
] | azmainmahtab97@gmail.com |
b6441bd419e7b43c5518e361d1ff550fe25ded57 | 70628500b7bdfa5fc548c39cbc8e6df449952a98 | /baseball_API/stats/migrations/0017_auto_20161026_2209.py | a95a83c41aeceea5bfe0f1e070e0a6d70a7f17ae | [] | no_license | cjredmond/baseball_API | 7cd4c1bd07560287d135ceb17f93821234a4fd1d | 0bbe8b4573b34915ebe6eae0ec9b1de62ef42d13 | refs/heads/master | 2021-01-16T23:06:33.866358 | 2016-10-27T04:01:21 | 2016-10-27T04:01:21 | 72,024,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,220 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-26 22:09
from __future__ import unicode_literals
from django.db import migrations
import csv
def add_master_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
with open('Master.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
Master.objects.create(player=row["playerID"], birthYear=row["birthYear"],
birthMonth=row["birthMonth"], birthDay=row["birthDay"], birthCountry=row["birthCountry"],
birthState=row["birthState"], birthCity=row["birthCity"], deathYear=row['deathYear'],
deathMonth=row['deathMonth'], deathDay=row['deathDay'],deathState=row['deathState'],
deathCountry=row['deathCountry'],
deathCity=row['deathCity'], nameFirst=row['nameFirst'], nameLast=row['nameLast'],
nameGiven=row['nameGiven'], weight=row['weight'], height=row['height'], bats=row['bats'],
throws=row['throws'], debut=row['debut'], finalGame=['finalGame'], retroID=['retroID'],
bbrefID=row['bbrefID'])
def add_batting_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
Batting = apps.get_model("stats", "Batting")
with open('Batting.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
#print(row)
player = Master.objects.get(player=row["playerID"])
Batting.objects.create(player=player, year=row["yearID"], stint=row['stint'], team=row["teamID"],
league=row['lgID'], games=row['G'], at_bats=row['AB'], runs=row['R'],
hits=row['H'], doubles=row["2B"], triples=row['3B'], home_runs=row['HR'],
rbi=row['RBI'], stolen_bases=row['SB'], caught_stealing=row['CS'],
walks=row['BB'], strikeouts=row['SO'], intentional_walks=row['IBB'],
hit_by_pitch=row['HBP'], sac_hits=row['SH'], sac_flies=['SF'], double_plays=row['GIDP'])
def add_fielding_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
Fielding = apps.get_model("stats", "Fielding")
with open('Fielding.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
#print(row)
player = Master.objects.get(player=row["playerID"])
Fielding.objects.create(player=player, year=row["yearID"], stint=row['stint'], team=row["teamID"],
league=row['lgID'], position=row['POS'], games=row['G'], games_started=row['GS'],
innOuts=row['InnOuts'], put_outs=row['PO'], assists=row['A'], double_plays=row['DP'],
passed_balls=row['PB'], wild_pitches=row['WP'], stolen_bases=row['SB'],
caught_stealing=row['CS'], zone=row['ZR'] )
def add_pitcher_data(apps, schema_editor):
Master = apps.get_model("stats", "Master")
Pitcher = apps.get_model("stats", "Pitcher")
with open('Pitching.csv') as infile:
reader = csv.DictReader(infile)
for row in reader:
#print(row)
player = Master.objects.get(player=row["playerID"])
Pitcher.objects.create(player=player, year=row["yearID"], stint=row['stint'], team=row["teamID"],
league=row['lgID'], wins=row['W'], losses=row['L'], games=row['G'], games_started=row['GS'],
complete_games=row['CG'], shutouts=row['SHO'], saves=row['SV'], outs_pitched=row['IPouts'],
hits=row['H'], earned_runs=row['ER'], home_runs=row['HR'], walks=row['BB'], strikeouts=row['SO'],
opponent_avg=row['BAOpp'], era=row['ERA'], intentional_bb=row['IBB'],
wild_pitches=row['WP'],hit_by_pitch=row['HBP'], balks=row['BK'],
batters_faced=row['BFP'], games_finised=row['GF'], runs=row['R'],
sac_against=row['SH'], sac_flies=row['SF'], ground_dp_against=row['GIDP'])
class Migration(migrations.Migration):
dependencies = [
('stats', '0016_auto_20161026_2223'),
]
operations = [
migrations.RunPython(add_master_data),
migrations.RunPython(add_batting_data),
migrations.RunPython(add_fielding_data),
migrations.RunPython(add_pitcher_data)
]
| [
"connor.redmond@gmail.com"
] | connor.redmond@gmail.com |
71af2627ed2fe94a82fd4eed3d9461def451698b | 92d6bcc11d0e4004f973a3ca281b4316ea643d14 | /resources_rc.py | aa5fb46473b399d275f444feb4fa0c7a38aef5e2 | [] | no_license | geodrinx/sentinelhub | e8c3cc8e747832749348a504c8122118665aee11 | 306a0d5e2757f75475caceaa0bc906b646ea2a2a | refs/heads/master | 2021-01-11T02:39:41.611055 | 2016-10-14T13:47:27 | 2016-10-14T13:47:27 | 70,914,333 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,220 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: lun 19. set 15:54:05 2016
# by: The Resource Compiler for PyQt (Qt v4.8.5)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x06\xba\
\x00\
\x00\x18\x2c\x78\x9c\xed\x58\x7d\x54\x4c\x69\x18\xbf\x59\x65\x9a\
\x9a\x0f\x89\xa1\x6d\xfb\x10\x25\x25\x85\x72\x4c\x07\xd1\x50\x91\
\xf1\x91\xce\x9e\xcd\x5a\xac\xb3\x38\x58\xb1\x0e\x27\xbb\x3e\x96\
\x5d\x8a\x3d\x3e\xea\x6c\x96\x22\xe5\xd8\xc4\x6e\x93\xb5\x15\x32\
\x6d\x85\xa9\x45\x98\xd0\x16\x39\x8d\x7c\x24\x9f\x45\x35\x33\xc5\
\x66\x7f\xe6\x5d\xf7\xdc\xea\xce\x47\xec\x87\x3f\x7a\xce\x74\xbb\
\xf3\xde\x7b\xdf\xf7\xf7\xfe\x9e\xe7\xf9\x3d\xcf\x9d\xcd\x53\xa4\
\x41\x3c\xae\x1d\x97\xa2\x28\x5e\x48\xb0\x64\x1a\xfe\x0f\x7a\xf5\
\xc7\xe9\x82\x63\x56\xd4\x8a\x0b\x14\x25\xaa\x0f\x91\x8c\x99\x1e\
\x75\xdd\xac\x46\x24\xaa\x31\xf3\x1f\x39\x89\x7a\x07\xac\xcb\xff\
\x0d\xe0\x6f\xeb\xc4\xd1\xda\x3a\x71\xb4\xb6\xae\x6f\xfc\xa4\x48\
\xd4\x93\xc7\xb3\xa6\xbf\xd6\xd7\x37\x3c\x79\x52\xf7\xfc\xf9\xf3\
\xff\x08\x87\x8f\xb7\xe7\xe8\xd1\x62\x27\xa7\x0f\xb4\x1a\x6d\xcd\
\xfd\x87\xf4\xb8\x40\xc0\xeb\xde\x5d\x58\x5b\x5b\x57\x58\x54\x9c\
\x9f\x5f\xd8\x1e\x10\x70\x7b\x79\x0d\x3c\x79\xb2\xe0\x6d\x71\x00\
\xc1\x82\x05\xb3\x6a\x9f\xd4\x1d\x3a\xfc\x4b\xfc\xce\x6b\xac\x5b\
\xef\xd7\xcf\x39\x74\xe2\xb8\xe9\x61\xa1\x45\x45\xc5\xc9\x29\x87\
\xc8\xa0\xb9\xb9\xf9\xa2\x85\x73\x24\x81\x23\x05\x02\xbe\xb5\xb5\
\x55\x46\x46\x76\xfb\x07\x4d\xd2\x31\x4c\xf4\xcd\xfa\x95\x1c\x4b\
\x4e\x74\x4c\xdc\x7d\x06\x07\x06\xee\x0f\x08\x10\x83\x15\x4b\x4b\
\x8e\x4b\x5f\x47\x2e\xd7\x32\x26\x7a\xf5\xd3\xa7\xcf\x2c\x2c\x2c\
\x9a\x9b\x9b\x23\x3e\x5e\xf8\xec\x59\x7d\x9b\x47\xde\x73\x70\x74\
\x33\x3c\x29\x9f\xcf\x4b\x49\x8e\x3b\x9e\x93\xb7\x63\x47\x42\x63\
\xa3\xda\x28\x08\x58\x4b\x4b\xcb\xed\xdb\xd5\x33\x22\xa6\xad\x59\
\xbd\x6c\xea\x94\x09\xf2\xdc\x53\x94\x19\xe5\xec\xe4\x00\x10\xa0\
\x44\x24\xb2\x2d\x28\x28\xea\x18\x0e\x80\xd8\x93\xb8\x2d\x3a\x3a\
\x36\x2f\x5f\x61\x0a\x02\x62\xf0\x4e\xea\x81\x9d\x7e\x7e\x3e\x2f\
\x5f\xbe\x7c\xf1\xe2\x05\x9f\x67\x2d\x93\x65\xf9\xfa\xfa\x98\x9b\
\x77\x6d\x6a\x6a\x1a\xe8\xee\xaa\x54\x5e\xad\xa9\x79\xc0\x7c\xc4\
\x48\xde\x02\x44\x6c\x6c\xc2\x25\xe5\x55\x53\x96\x07\xe8\xd9\xb3\
\x3e\x4c\xde\x17\x27\xea\x65\xfb\x47\xd9\x75\x8d\x46\xdb\xac\x33\
\x7f\x7f\x3f\xdb\x9e\x3d\xb2\xb3\xe5\x20\x03\xb7\x61\x3c\x32\x72\
\x5e\x9b\x67\x0d\xf1\xb1\x6e\xed\xf2\xe2\x62\xe5\xaf\x99\x39\x46\
\x11\x20\x20\x3e\x99\x19\x0e\x2f\x8c\x18\x31\xcc\xa6\xbb\x10\x5e\
\xc8\xcc\xcc\x11\x8f\x18\x46\xae\x82\x03\x64\xca\xbe\xe4\x34\x57\
\xd7\xbe\xb6\x3d\x6c\x80\xac\x4f\x1f\x11\xa8\x52\x96\x94\xd2\x33\
\xe8\xe5\x03\xdc\xda\xd9\xf5\xa6\x63\xde\x80\x81\x86\x8c\xf4\xa4\
\xb9\x9f\x46\x20\x0c\x11\x8c\xf8\x48\x02\x47\x71\xad\xb8\xf2\xdc\
\xd3\x18\x21\xf7\x08\x05\x7c\xe4\xd1\xcf\xe9\x99\xd4\xab\x0c\xe7\
\x83\x2d\x59\xeb\xac\xd1\x8b\x03\x51\xb6\x6b\x57\x8a\x51\x10\x30\
\x04\xff\xdd\xea\x1a\xe2\x05\x32\x02\x28\x33\x22\xc2\x0a\x0a\x0a\
\xeb\x74\x39\x82\x11\x5c\x42\xde\xe2\xe4\xe2\xc5\xcb\x7b\x93\x52\
\xe7\x2f\x58\xd1\x26\x65\xd8\x71\x40\x73\x40\x86\x89\x61\x01\x5b\
\xbb\x6e\x0b\x8e\xf4\xee\x61\x48\xd7\xc1\x83\x3d\x12\xf7\x1c\x40\
\xea\x92\x11\xd0\xf0\xbe\x5d\xef\xa8\x2f\x37\xb2\x72\xcc\x8e\x43\
\x2c\xf6\x3d\x7e\x22\x8f\xf5\x12\xfc\x05\x41\x6b\x3f\xae\x28\x3c\
\xc7\xfc\x0a\x4a\xa0\x66\xea\x46\xf5\x85\x8b\x97\x81\x0f\x20\x40\
\x83\x01\x2f\xb3\xeb\xe9\x70\x5f\x9f\xed\xb1\x09\xcc\x11\x04\x41\
\x60\xe0\xc8\xe0\xa0\x31\x90\x01\x6c\x11\x0e\x86\xd7\x08\x61\xfe\
\x62\xdf\x55\x51\x4b\xd6\xac\xdd\xec\xe1\x31\x80\xfb\x7a\xf7\x94\
\x8e\x9e\x49\xa1\xe3\xb3\xb2\xe5\x42\xa1\xe0\xdb\x8d\xdb\x6f\xdc\
\x50\x61\x03\xfa\x38\x66\xc7\x01\xe9\x44\xd1\xa2\x11\x6c\x58\xbf\
\x12\x49\x4f\xbe\x92\x54\x04\x9a\xb8\xd8\x8d\x0a\xc5\x39\x1a\x2e\
\x8a\xce\xc1\xb4\x8c\x2f\x96\xce\x07\x13\xf4\x3c\xc8\xd8\x4d\x31\
\x71\x74\x4d\x91\x4a\x83\xea\x1b\x1a\x01\xa8\xfd\x8a\x7a\xe3\x94\
\x2e\x1f\xc3\x87\x0f\x41\xf5\xa2\x41\xd0\x27\x58\x6f\xc8\x10\xaf\
\xbd\x89\xdb\xfa\xf7\xef\x0b\xbd\xaf\xab\x7b\x7a\x53\x75\x8b\x78\
\x81\xd2\x45\x43\xf5\xbd\xfb\x73\xe6\x2e\x45\xfc\xd2\x73\x56\x55\
\xdd\xe1\x59\x5b\xb1\x2e\x67\xbc\xff\x98\x38\x41\x82\x63\xea\x41\
\x19\xe6\x65\x46\x22\x0d\x0b\x19\x0b\x5d\x2a\x29\x29\x05\xe7\xdf\
\xc7\x27\xc1\x6b\x00\x91\x2e\xcb\xfa\x6c\xde\x72\x6c\x9d\x59\x0e\
\x91\x56\x4e\xce\x0e\xac\xab\x98\x54\x6f\xa1\x07\xd8\x68\x7e\xbe\
\x62\x80\x5b\x3f\x70\x40\xb3\x42\xe9\x82\x00\xc4\x94\x96\x96\x63\
\x01\x00\xc5\xc2\x88\xc7\x8a\x8a\x4a\x45\xe1\x79\x53\x66\xee\x18\
\x0e\x70\x8b\xf8\x40\x26\x43\x9b\xd1\x73\x40\x09\x68\x62\xa0\x07\
\xb9\x79\x67\x3c\x06\xba\xcd\x9d\xf3\x11\x82\x03\x20\x7e\x4c\x95\
\xe9\xeb\x86\x90\xb7\x25\x0c\x0d\x65\x9a\x5e\xbf\x20\x3c\xe9\x73\
\x50\x0d\x0e\xa0\x89\x48\x45\xf4\x3b\x90\x45\x6c\x1d\x9f\x1f\x76\
\xa7\x20\x0c\x1f\x3d\x7c\x4c\x7a\x0b\x10\x03\x1f\x25\x26\x6c\x65\
\x4d\x6c\x18\xe8\xc4\x53\xac\x97\xd8\xeb\x0b\x62\xb3\xb2\xb2\x8a\
\xa4\x0c\xbc\x3e\x74\xe8\x60\x54\x04\xb5\x5a\x4d\xf6\x24\x10\xf2\
\xe3\xe3\x93\xb4\x5a\xed\xe3\xc7\xb5\x41\xe3\x03\x42\x42\xc6\xe6\
\xe4\xe4\x6b\xd4\x1a\x7b\x7b\x3b\xdc\x83\x2c\x95\x4e\x0a\x82\x92\
\x96\x97\x57\xb4\x99\x36\x3c\x5c\x9a\x76\xe8\x48\x07\xf8\x90\xcb\
\x4f\xa1\x1c\x90\x73\x34\x3e\x88\x38\xf4\x10\xa4\x5a\xc2\xe0\x23\
\xb4\x36\xde\xde\x9e\xc0\x84\x5c\x85\xd7\x02\x02\xfc\xcb\xaf\xdd\
\x80\xa8\x10\x7f\x21\x98\x1c\x1d\xec\x21\xca\xcc\x39\x21\x80\xd5\
\xd5\x35\xac\xcb\xe9\xc5\x71\xe5\x6a\xb9\x37\x83\x5b\xf8\x7b\x53\
\x74\x1c\x7c\x6f\xa1\x33\x8c\xa0\x8c\x91\x4b\x21\xc1\x63\x91\xd5\
\x24\x84\x95\x3a\x8d\xca\x3e\x96\x8b\x11\x52\x4d\x98\x86\x82\x75\
\xf4\xe8\x89\x8e\xe1\x40\x11\xc2\xa4\x53\xa7\x4e\x60\x42\x81\x2a\
\x7f\xb7\x75\x27\xf9\x0a\xc1\xa6\x0b\x07\xb2\x09\xa0\xcf\x9e\xbf\
\x74\xec\xf8\x6f\x88\x18\xb4\xd0\xfe\x62\x3f\xb5\x46\x4b\x2b\x21\
\x65\x42\xc1\xd2\x1b\xa7\x49\xfb\x0e\x86\x87\x85\xa2\xb1\x60\x0e\
\x42\x19\x17\x2e\x8e\x82\x23\xc0\x47\xb7\x6e\xdd\xe8\x71\x78\x0a\
\xf9\x82\xaa\x24\xe0\xf3\x86\xea\x12\x5b\xf8\xda\x89\xc4\x96\x45\
\xce\xdb\xbf\xff\xb0\xbe\xb5\x0c\xe1\x00\x25\x3f\xa5\x67\xa2\x3d\
\x6e\x33\x0e\x85\x98\x39\x6b\xb1\x4a\x75\x0b\xbe\x38\xa3\x38\x87\
\xa8\xa4\x74\x82\x86\xe8\xf1\x1a\xe4\xee\xe8\x68\x0f\x94\x38\x87\
\x77\xe8\xec\x25\xbc\x1a\x56\x14\x43\xfd\x18\x02\x1e\x05\xc2\xc7\
\x7b\xd0\xef\x67\x2f\x30\xc7\xd1\x2d\x83\x73\x74\x53\x38\xaa\x6e\
\xde\x72\x71\x71\x72\xd6\xa9\x64\xa5\xaa\x0a\x47\x37\x57\x97\x6d\
\x3b\x76\x27\x27\xa7\x91\x9b\x51\x05\x23\x22\xc2\x96\x2d\x5f\x87\
\xe6\xf9\x0d\x71\xc0\xd0\x58\x4f\x99\x1c\xec\xe9\xe9\xde\x06\x0a\
\x13\x13\x3a\x84\x3b\x77\xef\xb9\xba\xba\xa0\x25\x46\x72\x81\xff\
\x9c\xd7\x85\x0d\x20\xf0\xe6\xb2\xe8\xf3\x55\x1a\x8d\xc6\xc0\x2a\
\x94\x89\xef\x2f\x68\x54\xf1\xae\x86\x66\xa7\xfd\x7b\x07\x6d\xd0\
\xbd\x5e\xbd\x6c\x51\xc9\x68\x77\xa0\x67\x46\x3e\x2f\x89\xfc\xca\
\xc0\x53\xb4\x19\x7f\x7f\xa1\x74\xac\xfc\xd9\xd2\xb2\xfe\xeb\x15\
\x56\x56\x5c\xa5\x1e\x61\x6e\x6a\x6a\x86\x9b\x08\xf9\xd0\x53\xdc\
\xdc\xd0\xd0\x88\xa6\xc4\x28\x13\xc4\x3a\xf0\xbb\x14\x79\x3d\x74\
\x77\xef\x5f\x56\x56\x91\x99\x75\xf2\xc1\x83\x47\xcc\x8d\xe2\xaa\
\x8d\x8d\x10\x29\x33\x59\x1a\xac\x52\x55\xa1\xca\xb0\xf6\x19\xff\
\x00\x0e\x7a\x3d\x3f\x5f\x6f\x89\x64\x94\x25\x87\xc3\x61\x74\x5f\
\x90\x13\xc8\xe5\xe5\x2b\x65\xb9\xb9\xa7\x4d\x71\xc4\xdb\xe2\xf8\
\x97\xec\x5d\xf9\x1d\xa6\x13\x47\x6b\xeb\xc4\xd1\xda\x3a\x71\xb4\
\xb6\x23\x9b\x07\x98\xc9\x6e\xc7\xc8\x5f\x9d\x87\x8c\x93\x4a\x64\
\x63\x67\x6f\xf8\x0b\x0b\xe5\x3a\x57\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0b\
\x00\xbb\xa4\xd2\
\x00\x73\
\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x6e\x00\x65\x00\x6c\x00\x68\x00\x75\x00\x62\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x30\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"geodrinx@gmail.com"
] | geodrinx@gmail.com |
deb7a61f33472ab71282a207498f5f23dcc79f7a | 3d1b58a90279cdd6f574ff159ff9459137447dfa | /WebApp/app/models/cmdb_GROUP.py | 3dbe361000769b5385182b407e52d1b4661b1c9e | [
"MIT"
] | permissive | abasu644/AIOPS_PLATFORM | 83d05d4177fd01539fd257bf0e2c6d29f98c822e | 239885c9f3cb2f391e060321a01aa735e37fda72 | refs/heads/master | 2021-07-14T16:24:24.171333 | 2021-04-27T07:05:29 | 2021-04-27T07:05:29 | 245,570,774 | 0 | 0 | MIT | 2020-03-07T04:54:29 | 2020-03-07T04:54:28 | null | UTF-8 | Python | false | false | 629 | py | '''
cmdb_GROUP.py Lib
Written By Kyle Chen
Version 20190420v1
'''
# import buildin pkgs
## import priviate pkgs
from app import db
## cmdb_GROUP Class
class cmdb_GROUP(db.Model):
__tablename__ = 'cmdb_GROUP'
id = db.Column('id', db.String(128), primary_key = True, nullable = False, unique = True)
id_os = db.Column(db.String(128))
id_user_list = db.Column(db.String(1024))
run_time = db.Column(db.Date)
gid = db.Column(db.Integer)
group_name = db.Column(db.String(64))
user_list = db.Column(db.String(512))
insert_time = db.Column(db.Date)
update_time = db.Column(db.Date)
| [
"kyle@hacking-linux.com"
] | kyle@hacking-linux.com |
a1f7592ca76c0b11e4f3990d8ed9b8577228f45a | 50c21ad40e7000d8678a9c800f2faa01ad2936df | /w5/C198.py | 291ce6b7f021b1c62e2fb342cd527504d759119d | [] | no_license | qubit-finance/algostudy | 1ea6add559fcbac94c91631aa367aea27d20a267 | aa7b7abed00d68d60ede85d65d968e9dd0c7eb8a | refs/heads/main | 2023-07-17T17:12:55.091042 | 2021-08-26T17:26:20 | 2021-08-26T17:26:20 | 369,228,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | """
198. House Robber
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security systems connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given an integer array nums representing the amount of money of each house, return the maximum amount of money you can rob tonight without alerting the police.
"""
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n==0:
return 0
elif n==1:
return nums[0]
res = [0] * (n+1)
res[1] = nums[0]
res[2] = max(nums[0], nums[1])
for i in range(3, n+1):
res[i] = max(res[i-2]+nums[i-1], res[i-3]+nums[i-1])
return max(res[-3:])
"""
Runtime: 16 ms, faster than 85.00% of Python online submissions for House Robber.
Memory Usage: 13.4 MB, less than 70.06% of Python online submissions for House Robber.
""" | [
"noreply@github.com"
] | noreply@github.com |
5f48983e3802341541423c341cc6f5d54e73071f | b26d11147736cae7a1b10e7deaef08c339cb1e4e | /profiles/views.py | 0173cad0d63ba9afa27f6df0fd76eb4ed69d0cdc | [] | no_license | Komilcoder/Art-blog-website | bcc72e6b18925c141efd28a101c321c82fe7e534 | 8c88e4245a3e5f36593ceade2ab242f331bf3121 | refs/heads/master | 2022-12-15T14:06:57.094660 | 2020-09-02T06:58:00 | 2020-09-02T06:58:00 | 291,511,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,498 | py | from django.shortcuts import render, redirect , get_object_or_404
from .models import Profile,Relationship,RelationshipManager
from .forms import ProfileModelForm, CreateUserForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate,login
from django.contrib import messages
from django.views.generic import ListView
from django.contrib.auth.models import User
from django.db.models import Q
@login_required(login_url='/accounts/login/')
def my_profile_view(request):
profile = Profile.objects.get(user=request.user)
form = ProfileModelForm(request.POST or None, request.FILES or None,instance=profile)
confirm= False
context = {
'profile':profile,
'form':form,
'confirm':confirm
}
return render(request, 'profiles/myprofile.html',context)
def invites_received(request):
profile = Profile.objects.get(user=request.user)
qs = Relationship.objects.invatiotion_recieved(profile)
results = list(map(lambda x: x.sender, qs))
is_empty = False
if len(results) == 0:
is_empty = True
context = {
'qs':results,
'is_empty':is_empty,
}
return render(request,'profiles/my_invites.html', context)
def profiles_list_view(request):
user = request.user
qs = Profile.objects.get_all_profile(user)
context = {'qs':qs}
return render(request, 'profiles/profile_list.html', context)
# it is invite friends
def invite_profile_list(request):
user = request.user
qs = Profile.objects.get_all_profiles_invites(user)
context = {'qs':qs}
return render(request, 'profiles/invite_list.html', context)
def Loginpage(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, login)
return redirect('home')
else:
return render(request,'registration/login.html')
def logout_view(request):
logout(request)
return redirect('home')
def Registration(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + user)
return redirect('login')
else:
form = CreateUserForm()
context = {'form':form}
return render(request, 'registration/signup.html', context)
# for seeing profile on browser
class ProfileListView(ListView):
model = Profile
template_name = 'profiles/profile_list.html'
context_object_name = 'object_list'
def get_queryset(self):
qs = Profile.objects.get_all_profile(self.request.user)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = User.objects.get(username__iexact=self.request.user)
profile = Profile.objects.get(user=user)
rel_rec = Relationship.objects.filter(sender=profile)
rel_sen = Relationship.objects.filter(receiver=profile)
rel_receiver = []
rel_sender = []
for item in rel_rec:
rel_receiver.append(item.receiver.user)
for item in rel_sen:
rel_sender.append(item.sender.user)
context['rel_receiver'] = rel_receiver
context['rel_sender'] = rel_sender
context['is_empty'] = False
if len(self.get_queryset()) == 0:
context['is_empty'] == True
return context
# this is for sending freindship each other
def send_invatiation(request):
if request.method == "POST":
pk = request.POST.get('profile_pk')
user = request.user
sender = Profile.objects.get(user=user)
receiver = Profile.objects.get(pk=pk)
relat = Relationship.objects.create(sender=sender, receiver=receiver,status='send')
return redirect(request.META.get('HTTP_REFERER'))
return redirect('profiles:my_profile')
# this is deleting freindship
def remove_from_friends(request):
if request.method == 'POST':
pk = request.POST.get('profile_pk')
user = request.user
sender = Profile.objects.get(user=user)
receiver = Profile.objects.get(pk=pk)
rel = Relationship.objects.filter((Q(sender=sender) & Q(receiver=receiver)) |(Q(sender=receiver) & Q(receiver=sender)))
rel.delete()
return redirect(request.META.get('HTTP_REFERER'))
return redirect('profiles:my_profile')
def accept_invatition(request):
if request.method == 'POST':
pk = request.POST.get('profile_pk')
sender = Profile.objects.get(pk=pk)
receiver = Profile.objects.get(user=request.user)
rel = get_object_or_404(Relationship, sende=sender, receiver=receiver)
if rel.status =='sender':
rel.status == 'accepted'
rel.save()
return redirect('profiles:my_invite')
def reject_invatition(request):
if request.method == 'POST':
pk= request.POST.get('profile+_pk')
receiver = Profile.objects.get(user=request.user)
sender = Profile.objects.get(pk=pk)
rel = get_object_or_404(Relationship, sender=sender, receiver=receiver)
rel.delete()
return redirect('profiles:my_invite') | [
"yaxshilikovkomil@gmail.com"
] | yaxshilikovkomil@gmail.com |
469ad333f4179cbdcbf8ce66fba436b6172c4ab3 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/fare_structure_element_price_ref.py | 739a4aabbe862e4a51739db6b69106122466c992 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 333 | py | from dataclasses import dataclass
from .fare_structure_element_price_ref_structure import FareStructureElementPriceRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class FareStructureElementPriceRef(FareStructureElementPriceRefStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
| [
"chris@komposta.net"
] | chris@komposta.net |
a2f18034289b7450eea0d77004a2c70a1f3c0571 | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/paths/response_body_post_not_more_complex_schema_response_body_for_content_types/post.py | c63a09de280ebef09cc33bb720ce32b8c0316ad4 | [
"Apache-2.0"
] | permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 7,669 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from . import path
class SchemaFor200ResponseBodyApplicationJson(
schemas.ComposedSchema,
):
class MetaOapg:
additional_properties = schemas.AnyTypeSchema
class not_schema(
schemas.DictSchema
):
class MetaOapg:
class properties:
foo = schemas.StrSchema
__annotations__ = {
"foo": foo,
}
additional_properties = schemas.AnyTypeSchema
foo: typing.Union[MetaOapg.properties.foo, schemas.Unset]
@typing.overload
def __getitem__(self, name: typing.Literal["foo"]) -> typing.Union[MetaOapg.properties.foo, schemas.Unset]: ...
@typing.overload
def __getitem__(self, name: str) -> typing.Union[MetaOapg.additional_properties, schemas.Unset]: ...
def __getitem__(self, name: typing.Union[str, typing.Literal["foo"], ]):
# dict_instance[name] accessor
if not hasattr(self.MetaOapg, 'properties') or name not in self.MetaOapg.properties.__annotations__:
return super().__getitem__(name)
try:
return super().__getitem__(name)
except KeyError:
return schemas.unset
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, ],
foo: typing.Union[MetaOapg.properties.foo, str, schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
) -> 'not_schema':
return super().__new__(
cls,
*args,
foo=foo,
_configuration=_configuration,
**kwargs,
)
def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties:
# dict_instance[name] accessor
if not hasattr(self.MetaOapg, 'properties') or name not in self.MetaOapg.properties.__annotations__:
return super().__getitem__(name)
try:
return super().__getitem__(name)
except KeyError:
return schemas.unset
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
*args,
_configuration=_configuration,
**kwargs,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
def _post_not_more_complex_schema_response_body_for_content_types_oapg(
self: api_client.Api,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostNotMoreComplexSchemaResponseBodyForContentTypes(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def post_not_more_complex_schema_response_body_for_content_types(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_not_more_complex_schema_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def post(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_not_more_complex_schema_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"noreply@github.com"
] | noreply@github.com |
9968b52187515eab726166c5004b7c6ee7b41156 | 2f58ac1aa0c1c5935fbff58b96f2798cc957b5e6 | /mdpo_off/__init__.py | 45a88df824e7a270a06fb4c90bcc9322dd047271 | [] | no_license | manantomar/Mirror-Descent-Policy-Optimization | 21016e8e9578e64db71421734f6d98cfe7f0eff9 | c9bd3ba080e34f94e104ffe7633167e246dd63c6 | refs/heads/master | 2023-01-08T11:55:20.975440 | 2020-10-31T13:14:57 | 2020-10-31T13:14:57 | 306,298,987 | 33 | 2 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from stable_baselines.mdpo_off.mdpo import MDPO
from stable_baselines.mdpo_off.policies import MlpPolicy, CnnPolicy, LnMlpPolicy, LnCnnPolicy
| [
"manant@cedar5.cedar.computecanada.ca"
] | manant@cedar5.cedar.computecanada.ca |
d579d9d7481591148299eedcc255a8d1d8a8cb21 | 7bd15f37ffd26f9f0470cae2b4c1ef491c35c5c1 | /python/dirigible/sheet/tests/test_rewrite_formula_offset_cell_references.py | 3ec2ce88b387f1e57dbbe6c330c75e45665fdf06 | [
"MIT"
] | permissive | bwhmather/dirigible-spreadsheet | 0063aba1cec7df1dc4fc0d5dbbcfaeeb1dad932f | ff0414912110553a5d0f317495cdba39a077a044 | refs/heads/master | 2020-12-28T22:53:10.312409 | 2014-09-14T16:15:59 | 2014-09-14T16:15:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,396 | py | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
try:
import unittest2 as unittest
except ImportError:
import unittest
from dirigible.sheet.worksheet import Worksheet
from dirigible.sheet.rewrite_formula_offset_cell_references import (
rewrite_formula, rewrite_source_sheet_formulae_for_cut,
)
class TestRewriteFormulaOffsetCellReferences(unittest.TestCase):
def test_dont_rewrite_constants(self):
result = rewrite_formula(
"B3", 3, 5, False, (1, 2, 3, 4)
)
self.assertEquals(result, 'B3')
def test_safely_handle_none(self):
self.assertIsNone( rewrite_formula(None, 3, 5, False, (1, 2, 3, 4)) )
def test_safely_handle_nonsense(self):
unparseable_nonsense = '=!:booA1:A2'
self.assertEquals(
rewrite_formula(unparseable_nonsense, 3, 5, False, (1, 2, 3, 4)),
unparseable_nonsense
)
def test_cut_cell_reference_to_cut_cell_is_rewritten(self):
result = rewrite_formula(
"=A2", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=C3')
def test_cut_cell_reference_to_uncut_cell_is_not_rewritten(self):
result = rewrite_formula(
"=B3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=B3')
def test_absolute_cut_cell_reference_to_uncut_cell_is_not_rewritten(self):
result = rewrite_formula(
"=$B$3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=$B$3')
def test_absolute_cut_cell_reference_to_cut_cell_is_rewritten(self):
result = rewrite_formula(
"=$A$2", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=$C$3')
def test_copied_cell_reference_to_copied_cell_is_rewritten(self):
result = rewrite_formula(
"=A2", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=C3')
def test_copied_cell_reference_to_uncopied_cell_is_rewritten(self):
result = rewrite_formula(
"=B3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=D4')
def test_absolute_copied_cell_reference_to_copied_cell_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=$A$2')
def test_absolute_copied_cell_reference_to_uncopied_cell_is_not_rewritten(self):
result = rewrite_formula(
"=$B$3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=$B$3')
def test_copied_cell_reference_that_moves_off_grid_marked_invalid(self):
result = rewrite_formula(
"=A1", 1, -1, False, (1, 2, 1, 2)
)
self.assertEquals(result, '=#Invalid!')
def test_cut_cellrange_reference_to_completely_cut_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, True, (1, 1, 1, 3)
)
self.assertEquals(result, '=C3:C4')
def test_cut_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=A2:A3')
def test_cut_absolute_cellrange_reference_to_completely_cut_cellrange_is_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, True, (1, 1, 1, 3)
)
self.assertEquals(result, '=$C$3:$C$4')
def test_cut_absolute_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_cut_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten_even_if_its_not_obviously_overlapping(self):
cut_region_left = 2
cut_region_right = 3
cut_region_top = 1
cut_region_bottom = 2
cell_range_topleft = "A2"
cell_range_bottomright = "B3"
result = rewrite_formula(
"=%s:%s" % (cell_range_topleft, cell_range_bottomright),
2, 1,
True,
(cut_region_left, cut_region_top, cut_region_right, cut_region_bottom)
)
self.assertEquals(result, '=A2:B3')
def test_cut_absolute_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten_even_if_its_not_obviously_overlapping(self):
cut_region_left = 2
cut_region_right = 3
cut_region_top = 1
cut_region_bottom = 2
cell_range_topleft = "$A$2"
cell_range_bottomright = "$B$3"
result = rewrite_formula(
"=%s:%s" % (cell_range_topleft, cell_range_bottomright),
2, 1,
True,
(cut_region_left, cut_region_top, cut_region_right, cut_region_bottom)
)
self.assertEquals(result, '=$A$2:$B$3')
def test_cut_cellrange_reference_to_uncut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=A2:A3')
def test_cut_absolute_cellrange_reference_to_uncut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_to_completely_copied_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, False, (1, 1, 1, 3)
)
self.assertEquals(result, '=C3:C4')
def test_copied_absolute_cellrange_reference_to_completely_copied_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, False, (1, 1, 1, 3)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_to_partially_copied_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=C3:C4')
def test_copied_absolute_cellrange_reference_to_partially_copied_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_to_uncopied_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=C3:C4')
def test_copied_absolute_cellrange_reference_to_uncopied_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_that_moves_off_grid_marked_invalid(self):
result = rewrite_formula(
"=A1:A2", 1, -1, False, (1, 3, 1, 3)
)
self.assertEquals(result, '=#Invalid!:B1')
def test_source_sheet_cell_references_to_cut_range_are_rewritten(self):
worksheet = Worksheet()
worksheet.A1.formula = '=B1'
worksheet.A2.formula = '=B2'
worksheet.A3.formula = '=B3'
worksheet.A4.formula = 'B1'
worksheet.A5.formula = '=$B$1'
rewrite_source_sheet_formulae_for_cut(worksheet, (2, 1, 2, 2), 3, 4)
self.assertEquals(worksheet.A1.formula, '=C4')
self.assertEquals(worksheet.A2.formula, '=C5')
self.assertEquals(worksheet.A3.formula, '=B3')
self.assertEquals(worksheet.A4.formula, 'B1')
self.assertEquals(worksheet.A5.formula, '=$C$4')
def test_source_sheet_cell_ranges_inside_cut_range_are_rewritten(self):
worksheet = Worksheet()
worksheet.A1.formula = '=B1:B2'
worksheet.A2.formula = '=sum(B1:B2)'
worksheet.A3.formula = '=B3:B4'
worksheet.A4.formula = 'B1:B2'
worksheet.A5.formula = '=$B$1:$B$2'
rewrite_source_sheet_formulae_for_cut(worksheet, (2, 1, 2, 2), 3, 4)
self.assertEquals(worksheet.A1.formula, '=C4:C5')
self.assertEquals(worksheet.A2.formula, '=sum(C4:C5)')
self.assertEquals(worksheet.A3.formula, '=B3:B4')
self.assertEquals(worksheet.A4.formula, 'B1:B2')
self.assertEquals(worksheet.A5.formula, '=$C$4:$C$5')
| [
"hjwp2@cantab.net"
] | hjwp2@cantab.net |
7d6a47dc7a6b8898711f070c63d62cc6bb9eba33 | 70231e35b13260a9dca71214b2891c9792f2fbb5 | /78.py | fd5696bf136524a68eb710019d9f0169051ebde1 | [] | no_license | pavithracapricon/aritifical | 60de25de306e24fa1003744d1cf6088c0073986d | 688756c87ebb59b29b0e13150dcd1ec19df5b948 | refs/heads/master | 2020-04-14T22:31:36.354706 | 2019-07-11T03:23:45 | 2019-07-11T03:23:45 | 164,166,279 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 72 | py |
sur=int(input())
if(sur%13==0):
print('yes')
else:
print('no')
| [
"noreply@github.com"
] | noreply@github.com |
b93954d0e740810f7ead8b600a468c948f77aa18 | a2f2a2c78c0f1bc7d7430df1929f541bffefc323 | /hybrid_branch_predictor_simulation.py | 0b17e1ef70c15c17cef07f69074b74b53dd9fab9 | [] | no_license | koura911/ECE_562_project | f6e03caef337051b865ebbbfc03677a698660463 | 05e49a2891d547f7f8f78c012c82b6c47a76f13a | refs/heads/master | 2020-05-16T21:24:03.418644 | 2019-05-02T01:21:06 | 2019-05-02T01:21:06 | 183,304,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,411 | py | import pdb
import os
from collections import deque
import numpy as np
class Counter:
def __init__(self):
# start at weakly taken
self.state = 3
def predict(self):
# if in a not taken state, return not taken
if self.state < 3:
return 0
# return taken
else:
return 1
def update(self, actual):
# if branch was taken, increment state
if actual == 1:
if self.state != 4:
self.state += 1
# branch not taken, decrement state
else:
if self.state != 1:
self.state -= 1
def saturatingCounter(trace, l):
# variable for the counter for each branch
counter_list = {}
# variable for counting correct predictions
correct = 0
# for all the branches
for branch in trace:
# if the branch does not have a counter, add one to the list
if branch[0] not in counter_list:
counter_list[branch[0]] = Counter()
# make prediction
prediction = counter_list[branch[0]].predict()
# update the counter using whether or not the branch was actually taken or not
counter_list[branch[0]].update(branch[1])
# if we match, count as correct prediction
if prediction == branch[1]:
correct += 1
return correct
class Perceptron:
def __init__(self, N):
# number of inputs
self.N = N
self.bias = 0
# threshold that helps dictates when to update
self.threshold = 2 * N + 14
# initialization of weights for the inputs
self.weights = [0] * N
def predict(self, global_branch_history):
output = self.bias
# sum up output of the neuron
for i in range(0, self.N):
output += global_branch_history[i] * self.weights[i]
# if less than 0, predict not taken
if output < 0:
prediction = -1
# predict taken
else:
prediction = 1
return prediction, output
def update(self, prediction, actual, global_branch_history, output):
if (prediction != actual) or (abs(output) < self.threshold):
self.bias += actual
for i in range(0, self.N):
self.weights[i] += actual * global_branch_history[i]
def perceptronPredictor(trace, l):
# list for branch history
global_branch_history = deque([])
global_branch_history.extend([0]*l)
# list of perceptrons for each branch
perceptron_list = {}
# variable for counting correct predictions
correct = 0
# for all the branches
for branch in trace:
# if no previous branch from this memory location
if branch[0] not in perceptron_list:
perceptron_list[branch[0]] = Perceptron(l)
# predict taken/not taken and get output of the perceptron
prediction, output = perceptron_list[branch[0]].predict(global_branch_history)
# get whether taken or not taken
if branch[1] == 1:
actual = 1
else:
actual = -1
# update the perceptron
perceptron_list[branch[0]].update(prediction, actual, global_branch_history, output)
# add the result to the branch history
global_branch_history.appendleft(actual)
# delete the oldest result
global_branch_history.pop()
# increment if correct
if prediction == actual:
correct += 1
return correct
class hybridCounterPerceptron:
def __init__(self, N):
self.p = Perceptron(N)
self.state = 3
def predict(self, global_branch_history):
if self.state == 2:
prediction, output = self.p.predict(global_branch_history)
return prediction, output
elif self.state < 2:
return -1, 0
else:
return 1, 0
def update(self, actual, prediction, global_branch_history, output):
if self.state == 2:
self.p.update(prediction, actual, global_branch_history, output)
# self.p.update(prediction, actual, global_branch_history, output)
if actual == 1:
if self.state != 3:
self.state += 1
else:
if self.state != 1:
self.state -= 1
def hybridPredictor(trace, l):
global_branch_history = deque([])
global_branch_history.extend([0]*l)
hybrid_list = {}
correct = 0
for branch in trace:
if branch[0] not in hybrid_list:
hybrid_list[branch[0]] = hybridCounterPerceptron(l)
prediction, output = hybrid_list[branch[0]].predict(global_branch_history)
if branch[1] == 1:
actual = 1
else:
actual = -1
if hybrid_list[branch[0]].state == 3:
global_branch_history.appendleft(actual)
# global_branch_history.appendleft(actual)
hybrid_list[branch[0]].update(actual, prediction, global_branch_history, output)
if prediction == actual:
correct += 1
return correct
def simulation(predictor, file, **kwargs):
trace = {}
branches = []
with open(file, 'r') as file_in:
for line in file_in:
register = line[2:8]
result = int(line[9])
trace.setdefault(register, []).append(result)
branches.append([register, result])
correct = predictor(branches, l=kwargs['l'])
total = sum(len(r) for r in trace.values())
return correct / total
gcc = 'gcc_branch.out'
mcf = 'mcf_branch.out'
print("|Predictor| |gcc accuracy| |mcf accuracy|")
nn_gcc = simulation(saturatingCounter, file=gcc, l=16)
nn_mcf = simulation(saturatingCounter, file=mcf, l=16)
print("Saturating counter %.5f %.5f" % (nn_gcc, nn_mcf))
nn_gcc = simulation(perceptronPredictor, file=gcc, l=1)
nn_mcf = simulation(perceptronPredictor, file=mcf, l=1)
print("Perceptron (depth 1) %.5f %.5f" % (nn_gcc, nn_mcf))
nn_gcc = simulation(perceptronPredictor, file=gcc, l=2)
nn_mcf = simulation(perceptronPredictor, file=mcf, l=2)
print("Perceptron (depth 2) %.5f %.5f" % (nn_gcc, nn_mcf))
nn_gcc = simulation(perceptronPredictor, file=gcc, l=4)
nn_mcf = simulation(perceptronPredictor, file=mcf, l=4)
print("Perceptron (depth 4) %.5f %.5f" % (nn_gcc, nn_mcf))
nn_gcc = simulation(perceptronPredictor, file=gcc, l=8)
nn_mcf = simulation(perceptronPredictor, file=mcf, l=8)
print("Perceptron (depth 8) %.5f %.5f" % (nn_gcc, nn_mcf))
nn_gcc = simulation(perceptronPredictor, file=gcc, l=16)
nn_mcf = simulation(perceptronPredictor, file=mcf, l=16)
print("Perceptron (depth 16) %.5f %.5f" % (nn_gcc, nn_mcf))
nn_gcc = simulation(perceptronPredictor, file=gcc, l=32)
nn_mcf = simulation(perceptronPredictor, file=mcf, l=32)
print("Perceptron (depth 32) %.5f %.5f" % (nn_gcc, nn_mcf))
nn_gcc = simulation(hybridPredictor, file=gcc, l=1)
nn_mcf = simulation(hybridPredictor, file=mcf, l=1)
print('Hybrid (depth 1) %.5f %.5f' % (nn_gcc, nn_mcf))
nn_gcc = simulation(hybridPredictor, file=gcc, l=2)
nn_mcf = simulation(hybridPredictor, file=mcf, l=2)
print('Hybrid (depth 2) %.5f %.5f' % (nn_gcc, nn_mcf))
nn_gcc = simulation(hybridPredictor, file=gcc, l=4)
nn_mcf = simulation(hybridPredictor, file=mcf, l=4)
print('Hybrid (depth 4) %.5f %.5f' % (nn_gcc, nn_mcf))
nn_gcc = simulation(hybridPredictor, file=gcc, l=8)
nn_mcf = simulation(hybridPredictor, file=mcf, l=8)
print('Hybrid (depth 8) %.5f %.5f' % (nn_gcc, nn_mcf))
nn_gcc = simulation(hybridPredictor, file=gcc, l=16)
nn_mcf = simulation(hybridPredictor, file=mcf, l=16)
print('Hybrid (depth 16) %.5f %.5f' % (nn_gcc, nn_mcf))
nn_gcc = simulation(hybridPredictor, file=gcc, l=32)
nn_mcf = simulation(hybridPredictor, file=mcf, l=32)
print('Hybrid (depth 32) %.5f %.5f' % (nn_gcc, nn_mcf)) | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.