blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
97d300de75dfd498d30154ef16322609963af9f3 | Python | davidchai21/python_leetcode | /409 Longest Palindrome/LongestPalindrome.py | UTF-8 | 437 | 2.671875 | 3 | [] | no_license | class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
d={}
for i in s:
if i in d:
d[i]+=1
else:
d[i]=1
even=0
odd=0
for k in d:
if d[k]%2:
odd=1
even+=d[k]-1
else:
even+=d[k]
return odd+even | true |
324a97a5c55ffb3999be6d20a203b73e277d93e4 | Python | Jatla-Renuka/python | /gcd.py | UTF-8 | 154 | 3.171875 | 3 | [] | no_license | num1=24
num2=12
while True:
if(num1>=num2):
num1=num1-num2
else:
num1,num2=num2,num1
if num1==0:
break
print(num2)
| true |
2e8e83df7f40b5c56208cf70ba3477c7241e8ea4 | Python | jcook05/aws | /AwsEc2Util.py | UTF-8 | 3,154 | 2.546875 | 3 | [] | no_license | import boto3
class AwsEc2Utilities:
def basictest(self, m):
m = "new message"
return m
### VPC ###
"""Method to get all VPCs"""
def get_vpcs(self, profile):
session = boto3.Session(profile_name=profile)
ec2 = boto3.resource("ec2", region_name="us-west-2")
my_vpcs = ec2.vpcs.all()
return my_vpcs
"""Method to describe VPCs """
def describe_vpcs(self,profile):
session = boto3.Session(profile_name=profile)
ec2 = session.client('ec2')
my_vpcs = ec2.describe_vpcs()
return my_vpcs
""" Method to create a VPC with the default tenancy"""
def create_vpc(self, cidr, profile):
session = boto3.Session(profile_name=profile)
ec2 = session.client('ec2')
vpc = ec2.create_vpc(
CidrBlock=cidr,
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy='default'
)
### EC2 ###
"""Method to delete an instance by tag"""
def getinstancebytag(self, profile, tagkey, tagvalue):
session = boto3.Session(profile_name=profile)
ec2 = session.client('ec2')
## Get All Reservations
response = ec2.describe_instances()
## Get Instances
instances = [i for r in response["Reservations"] for i in r["Instances"]]
for i in instances:
tags = i["Tags"]
## Get Name
for x in tags:
if x["Key"] == tagkey and x["Value"] == tagvalue:
return i
"""Boto3 Method to get instances by filters"""
def get_instancesbyfilter(self, filters, profile):
session = boto3.Session(profile_name=profile)
boto3conn = session.resource("ec2", region_name="us-west-2")
instances = boto3conn.instances.filter(Filters=filters)
return instances
"""Method to get instances attached to ASGs"""
def getinstanceinasg(self, profile):
session = boto3.Session(profile_name=profile)
ec2 = session.client('autoscaling')
## Get All Reservations
response = ec2.describe_auto_scaling_groups()
## Get Instances
instances = [i for r in response["AutoScalingGroups"] for i in r["Instances"]]
for i in instances:
print(i["InstanceId"])
"""Sample method that gets some basic information from Instances"""
def describeinstances(self, profile):
session = boto3.Session(profile_name=profile)
ec2 = session.client('ec2')
## Get All Reservations
response = ec2.describe_instances()
## Get Instances
instances = [i for r in response["Reservations"] for i in r["Instances"]]
for i in instances:
tags = i["Tags"]
print(i["InstanceType"])
## Get Name
for x in tags:
if x["Key"] == 'Name':
print(x["Value"])
## Get Availability Zone
print(i["Placement"]['AvailabilityZone'])
| true |
25f3f3ace8f4f57b07da2ee9b7472971ded6632f | Python | melissmartinez/chaining-methods | /chaining-methods.py | UTF-8 | 1,089 | 3.578125 | 4 | [] | no_license | class User:
def __init__(self, nm, em):
self.name = nm
self.email = em
self.account_balance = 0
def make_deposit(self, amount):
self.account_balance += amount
return self
def withdrew(self, amount):
self.account_balance -= amount
return self
def display_user_balance(self):
print(f"User: {self.name}, Balance: {self.account_balance}")
return self
def transfered_funds(self, other_user, amount):
self.account_balance -= amount
other_user.account_balance += amount
self.display_user_balance()
other_user.display_user_balance()
return self
guido = User("Guido van Rossum", "guido@email.com")
monty = User("Monty Python", "monty@email.com")
melissa = User("Melissa Martinez", "mmartinez@email.com")
guido.make_deposit(100).make_deposit(200).make_deposit(300).withdrew(400).display_user_balance()
monty.make_deposit(50).make_deposit(150).withdrew(25).withdrew(15).display_user_balance()
melissa.make_deposit(500).withdrew(50).withdrew(50).withdrew(50).display_user_balance()
melissa.transfered_funds(monty, 100) | true |
b6b76e3884e9c4e1c17944887e640f1cb0312719 | Python | AMK6610/DL-CA1 | /test/function.py | UTF-8 | 838 | 3.046875 | 3 | [] | no_license | import numpy as np
# activation function
def relu(inputs):
return np.maximum(inputs, 0)
# output probability distribution function
def softmax(inputs):
exp = np.exp(inputs)
return exp / np.sum(exp, axis=1, keepdims=True)
# loss
def cross_entropy(inputs, y):
indices = np.argmax(y, axis=1).astype(int)
probability = inputs[np.arange(len(inputs)), indices] # inputs[0, indices]
log = np.log(probability)
loss = -1.0 * np.sum(log) / len(log)
return loss
# L2 regularization
def L2_regularization(la, weight1, weight2):
weight1_loss = 0.5 * la * np.sum(weight1 * weight1)
weight2_loss = 0.5 * la * np.sum(weight2 * weight2)
return weight1_loss + weight2_loss
def normalize(x):
res = []
for item in x:
res.append((item - np.mean(x)) / np.sqrt(np.var(x)))
return res
| true |
d8711ef24acc3d844b60942b5ddbe46e0e435b70 | Python | civilTANG/TASINFER | /kaggle_code/20-newsgroups-ciphertext-challenge/code.py | UTF-8 | 7,904 | 2.828125 | 3 | [] | no_license | import datetime
import gc
import numpy as np
import os
import pandas as pd
import random
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.stats import skew, kurtosis
import Levenshtein
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold
import lightgbm as lgb
from tqdm import tqdm
id_col = 'Id'
target_col = 'target'
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
def extract_features(df):
df['nunique'] = df['ciphertext'].apply(lambda x: len(np.unique(x)))
df['len'] = df['ciphertext'].apply(lambda x: len(x))
def count_chars(x):
n_l = 0 # count letters
n_n = 0 # count numbers
n_s = 0 # count symbols
n_ul = 0 # count upper letters
n_ll = 0 # count lower letters
for i in range(0, len(x)):
if x[i].isalpha():
n_l += 1
if x[i].isupper():
n_ul += 1
elif x[i].islower():
n_ll += 1
elif x[i].isdigit():
n_n += 1
else:
n_s += 1
return pd.Series([n_l, n_n, n_s, n_ul, n_ll])
cols = ['n_l', 'n_n', 'n_s', 'n_ul', 'n_ll']
for c in cols:
df[c] = 0
tqdm.pandas(desc='count_chars')
df[cols] = df['ciphertext'].progress_apply(lambda x: count_chars(x))
for c in cols:
df[c] /= df['len']
tqdm.pandas(desc='distances')
df['Levenshtein_distance'] = df['ciphertext'].progress_apply(lambda x: Levenshtein.distance(x, x[::-1]))
df['Levenshtein_ratio'] = df['ciphertext'].progress_apply(lambda x: Levenshtein.ratio(x, x[::-1]))
df['Levenshtein_jaro'] = df['ciphertext'].progress_apply(lambda x: Levenshtein.jaro(x, x[::-1]))
df['Levenshtein_hamming'] = df['ciphertext'].progress_apply(lambda x: Levenshtein.hamming(x, x[::-1]))
for m in range(1, 5):
df['Levenshtein_distance_m{}'.format(m)] = df['ciphertext'].progress_apply(lambda x: Levenshtein.distance(x[:-m], x[m:]))
df['Levenshtein_ratio_m{}'.format(m)] = df['ciphertext'].progress_apply(lambda x: Levenshtein.ratio(x[:-m], x[m:]))
df['Levenshtein_jaro_m{}'.format(m)] = df['ciphertext'].progress_apply(lambda x: Levenshtein.jaro(x[:-m], x[m:]))
df['Levenshtein_hamming_m{}'.format(m)] = df['ciphertext'].progress_apply(lambda x: Levenshtein.hamming(x[:-m], x[m:]))
df['Levenshtein_distance_h'] = df['ciphertext'].progress_apply(lambda x: Levenshtein.distance(x[:len(x)//2], x[len(x)//2:]))
df['Levenshtein_ratio_h'] = df['ciphertext'].progress_apply(lambda x: Levenshtein.ratio(x[:len(x)//2], x[len(x)//2:]))
df['Levenshtein_jaro_h'] = df['ciphertext'].progress_apply(lambda x: Levenshtein.jaro(x[:len(x)//2], x[len(x)//2:]))
# All symbols stats
def strstat(x):
r = np.array([ord(c) for c in x])
return pd.Series([
np.sum(r),
np.mean(r),
np.std(r),
np.min(r),
np.max(r),
skew(r),
kurtosis(r),
])
cols = ['str_sum', 'str_mean', 'str_std', 'str_min', 'str_max', 'str_skew', 'str_kurtosis']
for c in cols:
df[c] = 0
tqdm.pandas(desc='strstat')
df[cols] = df['ciphertext'].progress_apply(lambda x: strstat(x))
# Digit stats
def str_digit_stat(x):
r = np.array([ord(c) for c in x if c.isdigit()])
if len(r) == 0:
r = np.array([0])
return pd.Series([
np.sum(r),
np.mean(r),
np.std(r),
np.min(r),
np.max(r),
skew(r),
kurtosis(r),
])
cols = ['str_digit_sum', 'str_digit_mean', 'str_digit_std', 'str_digit_min',
'str_digit_max', 'str_digit_skew', 'str_digit_kurtosis']
for c in cols:
df[c] = 0
tqdm.pandas(desc='str_digit_stat')
df[cols] = df['ciphertext'].progress_apply(lambda x: str_digit_stat(x))
print('Extracting features for train:')
extract_features(train)
print('Extracting features for test:')
extract_features(test)
# TFIDF
for k in range(0, 3):
tfidf = TfidfVectorizer(
max_features=1000,
lowercase=False,
token_pattern='\\S+',
)
def char_pairs(x, k=1):
buf = []
for i in range(k, len(x)):
buf.append(x[i-k:i+1])
return ' '.join(buf)
train['text_temp'] = train.ciphertext.apply(lambda x: char_pairs(x, k))
test['text_temp'] = test.ciphertext.apply(lambda x: char_pairs(x, k))
train_tfids = tfidf.fit_transform(train['text_temp'].values).todense()
test_tfids = tfidf.transform(test['text_temp'].values).todense()
print('k = {}: train_tfids.shape = {}'.format(k, train_tfids.shape))
for i in range(train_tfids.shape[1]):
train['text_{}_tfidf{}'.format(k, i)] = train_tfids[:, i]
test['text_{}_tfidf{}'.format(k, i)] = test_tfids[:, i]
del train_tfids, test_tfids, tfidf
gc.collect()
# Build the model
cnt = 0
p_buf = []
p_valid_buf = []
n_splits = 5
kf = KFold(
n_splits=n_splits,
random_state=0)
err_buf = []
undersampling = 0
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'metric': 'multi_logloss',
'max_depth': 5,
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.85,
'bagging_fraction': 0.85,
'bagging_freq': 5,
'verbose': -1,
'num_threads': -1,
'lambda_l1': 1.0,
'lambda_l2': 1.0,
'min_gain_to_split': 0,
'num_class': train[target_col].nunique(),
}
cols_to_drop = [
id_col,
'ciphertext',
target_col,
'text_temp',
]
X = train.drop(cols_to_drop, axis=1, errors='ignore')
feature_names = list(X.columns)
X = X.values
y = train[1].values
X_test = test.drop(cols_to_drop, axis=1, errors='ignore')
id_test = test[id_col].values
print(X.shape, y.shape)
print(X_test.shape)
n_features = X.shape[1]
for train_index, valid_index in kf.split(X, y):
print('Fold {}/{}'.format(cnt + 1, n_splits))
params = lgb_params.copy()
lgb_train = lgb.Dataset(
X[train_index],
y[train_index],
feature_name=feature_names,
)
lgb_train.raw_data = None
lgb_valid = lgb.Dataset(
X[valid_index],
y[valid_index],
)
lgb_valid.raw_data = None
model = lgb.train(
params,
lgb_train,
num_boost_round=10000,
valid_sets=[lgb_train, lgb_valid],
early_stopping_rounds=100,
verbose_eval=100,
)
if cnt == 0:
importance = model.feature_importance()
model_fnames = model.feature_name()
tuples = sorted(zip(model_fnames, importance), key=lambda x: x[1])[::-1]
tuples = [x for x in tuples if x[1] > 0]
print('Important features:')
for i in range(20):
if i < len(tuples):
print(tuples[i])
else:
break
del importance, model_fnames, tuples
p = model.predict(X[valid_index], num_iteration=model.best_iteration)
err = f1_score(y[valid_index], np.argmax(p, axis=1), average='macro')
print('{} F1: {}'.format(cnt + 1, err))
p = model.predict(X_test, num_iteration=model.best_iteration)
if len(p_buf) == 0:
p_buf = np.array(p, dtype=np.float16)
else:
p_buf += np.array(p, dtype=np.float16)
err_buf.append(err)
cnt += 1
del model, lgb_train, lgb_valid, p
gc.collect
# Train on one fold
if cnt > 0:
break
err_mean = np.mean(err_buf)
err_std = np.std(err_buf)
print('F1 = {:.6f} +/- {:.6f}'.format(err_mean, err_std))
preds = p_buf/cnt
# Prepare submission
subm = pd.DataFrame()
subm[id_col] = id_test
subm['Predicted'] = np.argmax(preds, axis=1)
subm.to_csv('submission.csv', index=False)
| true |
1aab573106334f7e11d871daec3ad54f776db9b6 | Python | sy850811/Python_DSA | /Max_Priority_Queue.py | UTF-8 | 2,376 | 3.25 | 3 | [] | no_license | class pqNode:
def __init__(self,priority,value):
self.priority = priority
self.value = value
#1 1 2 1 2 2 1 3 2 1 4 2 1 5 2 1 6 2 1 7 2 1 8 2 1 9 2 1 10 2 -1
class PriorityQueue:
def __init__(self):
self.pq = []
def isEmpty(self):
return len(self.pq) == 0
def getSize(self):
return len(self.pq)
def getMax(self):
return self.pq[0].value if not self.isEmpty() else -2147483648
def __percolateUp(self):
ci = self.getSize() - 1
while ci > 0:
pi = (ci - 1)//2
if self.pq[pi].value < self.pq[ci].value :
self.pq[pi],self.pq[ci] = self.pq[ci],self.pq[pi]
ci = pi
else:
break
def insert(self,ele,priority):
self.pq.append(pqNode(priority,ele))
self.__percolateUp()
# print('array-->')
# [print(i.value,end=' ') for i in self.pq]
# print()
def __perculateDown(self):
pi = 0
while 2*pi + 1 < self.getSize():
min = self.pq[pi]
cia = 2*pi + 1
cib = 2*pi + 2
ci = pi
if self.pq[cia].value > self.pq[pi].value:
ci = cia
if cib < self.getSize():
if self.pq[cib].value > self.pq[ci].value:
ci = cib
if ci == pi:
break
self.pq[pi],self.pq[ci] = self.pq[ci],self.pq[pi]
pi = ci
def removeMax(self):
if self.isEmpty():
return -2147483648
data = self.pq[0].value
self.pq[0] = self.pq[-1]
self.pq.pop()
self.__perculateDown()
return data
myPq = PriorityQueue()
curr_input = [int(ele) for ele in input().split()]
choice = curr_input[0]
i=1
while choice != -1:
if choice == 1:
element = curr_input[i]
i+=1
myPq.insert(element,element)
elif choice == 2:
print(myPq.getMax())
elif choice == 3:
print(myPq.removeMax())
elif choice == 4:
print(myPq.getSize())
elif choice == 5:
if myPq.isEmpty():
print('true')
else:
print('false')
break
else:
pass
choice = curr_input[i]
i+=1
| true |
ae604eca9e67009813c4e1220b68f466779f1e51 | Python | jacobj10/JumpShyp | /twitter_scrape.py | UTF-8 | 3,583 | 2.515625 | 3 | [
"MIT"
] | permissive | from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from textblob import TextBlob
from twilio.rest import Client
from pymongo import MongoClient
import json
import datetime
import re
CONSUMER_KEY = "nah"
CONSUMER_SECRET = "nah"
ACCESS_TOKEN = "nah-nah"
ACCESS_SECRET = "nah"
TWILIO_ACCOUNT = "nah"
TWILIO_TOKEN = "nah"
CLIENT = Client(TWILIO_ACCOUNT, TWILIO_TOKEN)
MONGO_CLIENT = MongoClient('localhost', 27017)
DATABASE = MONGO_CLIENT['app']
class Target(object):
def __init__(self, name, abb):
self.name = name
self.abbr = abb
self.vel = 0
self.pos_thresh_hit = False
self.neg_thresh_hit = False
self.pos_thresh = 2
self.neg_thresh = -2
self.last = datetime.datetime(1, 1, 1, 1, 1, 1)
def update(self, polarity, time):
timedelta = (time - self.last).total_seconds()
self.last = time
if int(timedelta) == 0:
timedelta = 1
inc = 0
if polarity >= 0.25:
inc = 1
elif polarity <= -0.25:
inc = -1
self.vel += polarity / (int(timedelta))
print(self.vel)
if (self.vel >= self.pos_thresh and not self.pos_thresh_hit):
self.send_message(1)
self.pos_thresh_hit = True
elif (self.vel <= self.neg_thresh and not self.neg_thresh_hit):
self.send_message(-1)
self.neg_thresh_hit = True
if self.vel < self.pos_thresh:
self.pos_thresh_hit = False
if self.vel > self.neg_thresh:
self.neg_thresh_hit = False
def send_message(self, mag):
status = ""
if mag == 1:
status = "doing quite well on Twitter"
elif mag == -1:
status = "doing pretty poorly on Twitter"
url = "http://172.20.44.75:5000/stats/{0}".format(self.abbr + "_" + self.name)
url = ''.join(url.split(' '))
msg = "Looks like {0} is {1} with a velocity of {2}.\nSee {3} for more...".format(
self.name.upper(),
status,
"{0:.2f}".format(round(self.vel,2)),
url
)
for number in DATABASE['companies'].find_one({'company': self.name})['numbers']:
message = CLIENT.messages.create(to=number, from_="+14403791566",
body=msg)
class StdOutListener(StreamListener):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.target = kwargs['target']
def on_status(self, status):
text = self.clean_status(status.text)
polarity = TextBlob(text).sentiment.polarity
self.target.update(polarity, status.created_at)
def clean_status(self, status):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", status).split())
def on_error(self, status):
print(status)
class TwitterScraper(object):
def __init__(self, name, abb):
self.target = Target(name, abb)
self.stream = self.setup_auth()
self.queries = self.generate_queries(self.target.name)
self.stream.filter(track=self.queries, async=True)
def setup_auth(self):
l = StdOutListener(target=self.target)
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
return Stream(auth, l)
def generate_queries(self, name):
with_s = name + 's'
return [name, with_s]
| true |
cd54af46e2ee9fa551a6969f38fa20e1083e1986 | Python | James-Crean/DND5E-Character-sheet | /character.py | UTF-8 | 13,596 | 3.6875 | 4 | [
"CC0-1.0"
] | permissive | import os
import random
class Character():
# Note: Should have classes/races and such be lower case and capitalize them when printing. Oh well.
_alignments = {
"LG": "Lawful Good",
"NG": "Neutral Good",
"CG": "Chaotic Good",
"LN": "Lawful Neutral",
"NN": "True Neutral",
"CN": "Chaotic Neutral",
"LE": "Lawful Evil",
"NE": "Neutral Evil",
"CE": "Chaotic Evil"
}
_classes = [
"Barbarian",
"Bard",
"Cleric",
"Druid",
"Fighter",
"Monk",
"Paladin",
"Ranger",
"Rogue",
"Sorcerer",
"Warlock",
"Wizard"
]
_hit_dice = {
'Sorcerer': 6,
'Wizard': 6,
"Bard": 8,
"Cleric": 8,
"Druid": 8,
"Monk": 8,
"Rogue": 8,
"Warlock": 8,
"Fighter": 10,
"Paladin": 10,
"Ranger": 10,
"Barbarian": 12
}
_races = [
'Dragonborn',
'Dwarf',
'Elf',
'Gnome',
'Half elf',
'Half orc',
'Halfling',
'Human',
'Tiefling'
]
_racial_move_speed = {
"Dragonborn": 30,
"Gnome": 25,
"Half elf": 30,
"Half orc": 30,
"Tiefling": 30,
"Human": 30,
"Halfling": 25,
"Elf": 30,
"Dwarf": 25
}
_proficiency = [None, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6] # by level. No level 0, so 0 index is None
def __init__(self, char_name=None, char_age=None, char_race=None, char_class=None,
char_stats=None, char_alignment=None, char_level=None):
assert char_name, "char_name is a required argument"
assert char_age, "char_age is a required argument"
assert char_race, "char_race is a required argument"
assert char_class, "char_class is a required argument"
assert char_stats, "char_stats is a required argument"
assert char_alignment, "char_alignment is a required argument"
assert char_level, "char_level is a required argument"
self.char_name = char_name
self.char_age = char_age
self.char_race = char_race
self.char_class = char_class
self.char_stats = char_stats
self.char_alignment = char_alignment
self.char_level = char_level
self.move_speed = self._racial_move_speed[char_race]
self.hit_dice = self._hit_dice[char_class]
self.prof_bonus = self._proficiency[char_level]
@staticmethod
def gather_info():
info = {
'char_name': None,
'char_age': None,
'char_race': None,
'char_class': None,
'char_stats': None,
'char_alignment': None,
'char_level': None
}
gather_info = True
while(gather_info):
info['char_name'] = _get_name()
info['char_class'] = _get_class()
info['char_race'] = _get_race()
info['char_age'] = _get_age()
keep_info = input(
f"Ok, so your character is {info['char_name']} who is a {info['char_age']} year old {info['char_race']} and they are a {info['char_class']}.\n"
f"Great choices so far! Does everything look correct to you?\n").capitalize()
if keep_info.lower() in ["yes", "y"]:
gather_info = False
info['char_stats'] = _generate_stats()
info['char_alignment'] = _get_alignment()
info['char_level'] = _get_level()
return info
@property
def max_hit_points(self):
return int((self.hit_dice + self.con_save) * self.char_level)
@property
def str_save(self):
return int((self.char_stats['str'] - 10) // 2)
@property
def dex_save(self):
return int((self.char_stats['dex'] - 10) // 2)
@property
def con_save(self):
return int((self.char_stats['con'] - 10) // 2)
@property
def int_save(self):
return int((self.char_stats['int'] - 10) // 2)
@property
def wis_save(self):
return int((self.char_stats['wis'] - 10) // 2)
@property
def cha_save(self):
return int((self.char_stats['cha'] - 10) // 2)
def print_sheet(self):
with open('character.txt', 'w') as sheet:
sheet.write(f"Character name: {self.char_name}\n")
sheet.write(f"Race: {self.char_race}\n")
sheet.write(f"Class: {self.char_class}\n")
sheet.write(f"Level {self.char_level}\n")
sheet.write(f"Age: {self.char_age}\n")
sheet.write(f"Move speed: {self.move_speed}\n")
sheet.write(f"Alignment: {self.char_alignment}\n")
sheet.write(f"Hit Points: {self.max_hit_points}\n")
sheet.write(f"Your hit dice is {self.hit_dice}\n")
sheet.write(f"Strength = {self.char_stats['str']}\n")
sheet.write(f"Dexterity = {self.char_stats['dex']}\n")
sheet.write(f"Constitution = {self.char_stats['con']}\n")
sheet.write(f"Intelligence = {self.char_stats['int']}\n")
sheet.write(f"Wisdom = {self.char_stats['wis']}\n")
sheet.write(f"Charisma = {self.char_stats['cha']}\n")
sheet.write(f"Proficiency bonus = {self.prof_bonus}\n")
sheet.write(f"Strength saving throw = {self.str_save}\n")
sheet.write(f"Dexterity saving throw = {self.dex_save}\n")
sheet.write(f"Constitution saving throw = {self.con_save}\n")
sheet.write(f"Intelligence saving throw = {self.int_save}\n")
sheet.write(f"Wisdom saving throw = {self.wis_save}\n")
sheet.write(f"Charisma saving throw = {self.cha_save}\n")
def _get_name():
name = input(
"Hello there! My name is Otto and I am very clever.\n"
"I'm here to help you make a Dungeons & Dragons character!\n"
"Do not worry, I will walk you through step by step and do as much as I possibly can to ottomate the process.\n"
"Let's start off easy, what is your characters name going to be?\n")
clear()
return name
def _get_age():
age = input("How old is your character going to be?\n")
clear()
return age
def _get_race():
print(
"What kind of fantasy race are you interested in playing?\n"
"This will affect your stats in a small way, but also can change how your character may perceive the world,\n"
"and how the world may view them, it can also grant you cool abilities.\n"
"But we will touch on that later, for now I can assist with the following:\n"
f"{Character._races}\n")
race_explain = input("Would you like Otto to quickly describe each fantasy race?\n")
if race_explain.lower() in ["yes", "y"]:
print(
"Dragonborn are a bipedal race of half dragon half people, it may be easiest to think of them as 'lizard people'.\n\n"
"Gnomes are short in height but big in spirit, they are often tied to nature and the fey and frequently dabble in mysticism.\n\n"
"Half Elfs are children of humanity and elf-kind, not as tall as elves but taller than most humans,\n"
"typically slender & pointed ears though not as large as a full blooded elf, similar to the character 'Link' from the video game\n"
"series 'The Legend of Zelda'.\n\n"
"Half Orcs are half orc and half human and are typically stronger than a human and smarter than an orc,\n"
"while maintaining green or grey skin from their orc heritage, it may be best to think of an orc from\n"
"'The Lord of the Rings' series but with more brains and fairer skin.\n\n"
"Tieflings are typically a blue or purple hue skinned bipedial horned creature that otherwise maintains the shape of a human.\n"
"Otto supposes they could be likened to a demon though this is technically not true and they would not appreciate such a comparison.\n\n"
"Humans are just like you, Otto hopes a further explaination is not needed.\n\n"
"Halflings are very similar to hobbits from the lord of the rings' series.\n\n"
"'Elves are a tall, long eared slender race similar to Legolas from 'The Lord of the Rings'.\n\n"
"Dwarves are typically short and hardy. They are similar to Gimli from 'The Lord of the Rings'.\n")
else:
print("Ok")
valid_race = False
while(not valid_race):
# find a way to incorporate their stat alterations into player stats
char_race = input("Please select your race: ").capitalize()
if char_race in Character._races:
valid_race = True
else:
print(f"{char_race} is not a valid race. Let's try again.\n")
clear()
return char_race
def _get_alignment():
explain_alignment = input("Next we will need to pick your alignment, would you like an explanation on alignments?:\n")
if explain_alignment.lower() in ["yes", "y"]:
print("Lawful good is like a policeman, or for a high fantasy example, a paladin.")
print("Neutral good is like a good citizen, they may not have a strong set of principles but they still do the right thing.")
print("Chaotic good is a Robin Hood or Batman type character, if you can do good by breaking the law then you likely would do so.")
print("Lawful neutral is like a lawyer, you work within the scope of laws and rules but likely to your own benefit")
print("Neutral/Neutral, often referred to as 'True Neutral', is traditionally a self centered character.")
print("Chaotic neutral is the type of character who just likes to cause commotion and chaos to see what happens, like a small mischievous child would.")
print("Lawful evil is best thought of as the 'devil', operates within a set of rules in order to inflict suffering on people.")
print("Neutral evil can be thought of as a bad sociopath, a type of entity who simply goes through life with no regard for anything but causing pain.")
print("Chaotic evil can present as brigands or a 'Black hat' in the wild west days, they operate outside any laws in order to make the world a worse place.")
else:
print("Ok.")
character_alignment = None
while(not character_alignment):
user_alignment = input(
"Please enter your chosen alignment by typing the initials of the alignment you'd like, for example; Neutal/Neutral would be entered as NN:\n").upper()
if user_alignment not in Character._alignments.keys():
print("Please only choose two letters for your chosen alignment.")
character_alignment = user_alignment
return character_alignment
def _generate_stats():
print(
"Next up is stat rolls.\n"
"There are several different ways of doing this, but for simplicity sake, Otto will handle the rolling for you.\n"
"Do not worry, I am created to not give you any rolls that are too bad,\n"
"though you will likely have at least one or two below 10, this is not only normal, it's part of the fun.\n"
"Nobody's perfect, except for Otto of course.\n")
stats = {
'str': random.randint(7, 18),
'dex': random.randint(7, 18),
'con': random.randint(7, 18),
'int': random.randint(7, 18),
'wis': random.randint(7, 18),
'cha': random.randint(7, 18)
}
print(
f"{stats['str']} is your strength score.\n"
"This is how physically strong you are, and typically how much damage you will deal with a melee attack.\n")
print(
f"{stats['dex']} is your dexterity score.\n"
"This is how nimble you are and can help you avoid traps, or shoot a bow.\n")
print(
f"{stats['con']} is your constitution score.\n"
"This dictates how physically resiliant you are.\n")
print(
f"{stats['int']} is your intelligence score.\n"
"This represents how studied you are. For example, Otto has a 30 in intelligence,\n"
"however a normal PC will never reach that high, as I am a highly advanced A.I.\n")
print(
f"{stats['wis']} is your wisdom score.\n"
"If intelligence is book smarts, wisdom is street smarts.\n"
"Think of it like this, intelligence is knowing a knife is sharp, wisdom is knowing the sharp side points away from you.\n")
print(
f"{stats['cha']} is your charisma score.\n"
"Typically it's how likeable you are perceived to be.\n")
return stats
def _get_class():
valid_class = False
while(not valid_class):
char_class = input(
"What class do you want to play?\n"
"This will dictate what your character can do in combat, how they interact with the monsters, and other various things.\n"
"I can assist with the following classes:\n"
f"{Character._classes}\n").capitalize()
if char_class not in Character._classes:
print("Please choose a valid class.")
else:
valid_class = True
clear()
return char_class
def _get_level():
return int(input("What level is your campaign starting at? If unsure Otto suggests picking 1 for your level.\n"))
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
if __name__ == "__main__":
# execute only if run as a script
clear()
character_info = Character.gather_info()
character = Character(**character_info)
character.print_sheet()
| true |
5118d396297abf69dc9783a0fc5d93f390468ced | Python | Nyarish/Udacity_Self_Driving_Car_Engineer | /Project_Advanced_Lane_Finding/distortion_correction.py | UTF-8 | 2,352 | 3.09375 | 3 | [
"MIT"
] | permissive | # imports
import pickle
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
# Helper functions
def undistort_image(image, mtx, dist):
"""
This function accepts an image, camera matrix as mtx, and distortion coefficients as dist.
The function uses (cv2.undistort(image, mtx, dist, None, mtx)) to undistort then image,and
returns a undistorted image.
inputs: image, mtx, dist
args:(cv2.undistort(image, mtx, dist, None, mtx))
returns: undistorted image
"""
return cv2.undistort(image, mtx, dist, None, mtx)
def get_undistorted_image(image, mtx, dist, gray_scale=False):
img_undistorted = undistort_image(image, mtx, dist)
if gray_scale:
return cv2.cvtColor(img_undistorted, cv2.COLOR_RGB2GRAY)
else:
return img_undistorted
def read_undistorted_image_fname(fname, mtx, dist):
"""
This function takes image filename as fname, camera matrix as mtx, and distortion coefficients as dist.
The function undistorts the image using (cv2.undistort(image, mtx, dist, None, mtx)),
writes and saves the undistorted image to a directory.
inputs: image, mtx, dist
args:(cv2.undistort(image, mtx, dist, None, mtx)),
write undist using (cv2.imwrite())
returns: undistorted image
"""
# Read image
img = mpimg.imread(fname)
image = np.copy(img)
dst = cv2.undistort(image, mtx, dist, None, mtx)
undist_img = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Create output folder if missing
image_dir = './output_images/'
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# write image
name = fname.split('/')[-1]
save_name = 'Undist_'+name
cv2.imwrite(image_dir+save_name,undist_img)
# Read saved image
fname = image_dir+save_name
undist_img = mpimg.imread(fname)
return undist_img
def visualizeUndistortion(fname, undist_img):
#original_image = mpimg.imread(fname)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(fname);
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(undist_img);
ax2.set_title('Undistorted Image', fontsize=30) | true |
a4d0f1246e85d9249bcc1c3ea96a7ed32000a767 | Python | JeRimes/hi-python | /ex03_rps/rockPaperScissors.py | UTF-8 | 1,829 | 4.03125 | 4 | [] | no_license | import random
player_score = 0
computer_score = 0
def game():
All_Moves = ["r", "p", "s"]
Players_Moves = input(
"How to Play : [Type r for Rock] [Type p for Paper] [Type s for Scissors] : ")
while(not Players_Moves in All_Moves):
Players_Moves = input(
"Please : [Type r for Rock] [Type p for Paper] [Type s for Scissors] :")
Computer_Moves = random.choice(All_Moves)
print("Player : " + Players_Moves + " vs Computer : "+Computer_Moves)
battle(Computer_Moves, Players_Moves)
playAgain()
def playAgain():
All_possibility = ["y", "n"]
play_again = (input("Play again ? [Type y for yes] [Type n for no] : "))
while(not play_again in All_possibility):
play_again = input(
"Please : [Type y for yes] [Type n for no] : ")
if(play_again == "y"):
game()
else:
print("Good bye !")
def battle(Computer_Moves, Players_Moves):
global player_score
global computer_score
if(Players_Moves == "r" and Computer_Moves == "s" or Players_Moves == "p" and Computer_Moves == "r" or Players_Moves == "s" and Computer_Moves == "p"):
player_score += 1
print("Player win - player score: " + str(player_score))
print("computer score : " + str(computer_score))
elif(Players_Moves == "r" and Computer_Moves == "p" or Players_Moves == "p" and Computer_Moves == "s" or Players_Moves == "s" and Computer_Moves == "r"):
computer_score += 1
print("Computer win - player score: " + str(player_score))
print("computer score : " + str(computer_score))
elif(Players_Moves == "r" and Computer_Moves == "r" or Players_Moves == "p" and Computer_Moves == "p" or Players_Moves == "s" and Computer_Moves == "s"):
print("Nobody win")
if __name__ == "__main__":
game()
| true |
a3e0c9995576ef8dec458dc1890c02ccdd585ab2 | Python | fahim92/Calcute-your-Age | /age.py | UTF-8 | 185 | 4.40625 | 4 | [] | no_license | #This program takes age from the user in years and then converts it to seconds
x=input('Input your age here')
x1=float(x)
x2=x1*12*30*24*60*60
print('Your age is',x2,'in seconds')
| true |
7f6b291b03149a4fe0b7484f9b76e6ddd1760ac3 | Python | benliebersohn/ideal-octo-waffle | /workflow/plot.py | UTF-8 | 2,478 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import collections as pltc
import shapely
import workflow.colors
def huc(huc, color=None, style='-', linewidth=1):
if color is not None:
plt.plot(huc.exterior.xy[0], huc.exterior.xy[1], style, color=color, linewidth=linewidth)
else:
plt.plot(huc.exterior.xy[0], huc.exterior.xy[1], style, linewidth=linewidth)
def hucs(hucs, color=None, style='-', linewidth=1):
for huc in hucs.polygons():
plt.plot(huc.exterior.xy[0], huc.exterior.xy[1], style, color=color, linewidth=linewidth)
def rivers(rivers, color=None, style='-', linewidth=1):
if style.endswith('-') or style.endswith('.'):
marker = None
else:
marker = style[-1]
if len(style) is 1:
style = None
else:
style = style[:-1]
if len(rivers) is 0:
return
# gather lines
if type(rivers[0]) is workflow.tree.Tree:
lines = []
for tree in rivers:
lines.extend([river.coords[:] for river in tree.dfs()])
elif type(rivers[0]) is shapely.geometry.LineString:
lines = [river.coords[:] for river in rivers]
# plot lines
if style is not None:
lc = pltc.LineCollection(lines, colors=color, linewidths=linewidth, linestyle=style)
plt.gca().add_collection(lc)
if marker is not None:
marked_points = np.concatenate([np.array(l) for l in lines])
assert(marked_points.shape[-1] == 2)
plt.scatter(marked_points[:,0], marked_points[:,1], c=color, marker=marker)
plt.gca().autoscale()
plt.gca().margins(0.1)
def river(river, color='b', style='-', linewidth=1):
for r in river:
plt.plot(r.xy[0], r.xy[1], style, color=color, linewidth=linewidth)
def points(points, **kwargs):
x = [p.xy[0][0] for p in points]
y = [p.xy[1][0] for p in points]
plt.scatter(x,y,**kwargs)
def triangulation(points, tris, color=None, linewidth=1, edgecolor='gray', norm=None):
monocolor = True
if color is None:
if points.shape[1] is 3:
monocolor = False
else:
color = 'gray'
if monocolor:
return plt.triplot(points[:,0], points[:,1], tris, color=color, linewidth=linewidth)
else:
return plt.tripcolor(points[:,0], points[:,1], tris, points[:,2], linewidth=linewidth, edgecolor=edgecolor, norm=norm)
| true |
f14e5f7b14ef8cced8cfcf926ffe44d1f034b787 | Python | GBedenko/algorithms-and-data-structures | /weighted_graph/stack.py | UTF-8 | 792 | 4.625 | 5 | [] | no_license |
class Stack():
def __init__(self):
self.items = []
def push(self, value):
return self.items.append(value) # Adds a value to top of the stack
def pop(self):
# If there is nothing in the stack, return null object
if self.size() == 0:
return None
return self.items.pop() # Return the item at the top of the stack
def size(self):
# Return the length of the list to say how many items there are
return len(self.items)
def output(self):
print(str(self.items))
if __name__ == "__main__":
s = Stack()
# Add values to the stack
s.push(2)
s.output()
s.push(5)
s.output()
print("Retrieved value: " + str(s.pop()))
| true |
51bc67e0169a57093c4f2fa1793a1df80af6ff28 | Python | digital-bauhaus/greedyMLpipeline | /helper/helper.py | UTF-8 | 2,843 | 2.75 | 3 | [] | no_license | from sklearn import metrics
import matplotlib.pyplot as plt
import os
class Helper:
@staticmethod
def print_sklearn_classification_report(targets, predictions, classes):
report = metrics.classification_report(targets, predictions, target_names=classes)
print(report)
@staticmethod
def analyze_results(results):
print("ANALYSIS:")
for result in results:
activated_feature = result[0]
activated_classifier = result[1]
print("active feature: %s" % str(activated_feature))
print("active Classifier: %s" % str(activated_classifier))
print("accuracy: %.2f" % result[2])
Helper.print_sklearn_classification_report(result[3][0], result[3][1], result[3][2])
dest = "confusion_" + result[1].__name__ + ".pdf"
dest = os.path.join("plots", dest)
Helper.print_confusion_matrix(result[3][0], result[3][1], result[3][2], result[1].__name__, dest)
names = [str(feature)+"_"+classifier.__name__ for feature, classifier, _, _ in results]
heights = [acc for _, _, acc, _ in results]
names_ix = [i for i, _ in enumerate(names)]
Helper.bar_plot(names_ix,
heights,
names,
"Repetition Config",
"Accuracy",
"Experiment Evaluation",
"plots/accuracies.pdf")
@staticmethod
def bar_plot(x_axis, y_axis, x_ticks, x_label, y_label, title, destination):
fig = plt.figure()
plt.tight_layout()
plt.bar(x_axis, y_axis)
plt.xticks(x_axis, x_ticks, rotation=90)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.grid()
plt.show()
fig.savefig(destination, bbox_inches='tight')
@staticmethod
def print_confusion_matrix(targets, predictions, classes, classifier, destination):
confusion = metrics.confusion_matrix(targets, predictions)
print(classifier + ":")
print(confusion)
# labels = [str(i) for i in range(len(classes))]
labels = classes
# plt.tight_layout()
fig = plt.figure()
plt.tight_layout()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion)
for r in range(len(confusion)):
for c in range(len(confusion[r])):
ax.text(r,c, confusion[r][c], va="center", ha="center")
#fig.colorbar(cax)
ax.set_xticklabels([''] + labels, rotation="vertical")
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion matrix of ' + classifier, pad=180)
plt.show()
fig.savefig(destination, bbox_inches='tight', dpi=100)
| true |
650cbf5278beda76e39564f6b6862c8e12d96e5a | Python | Zihua-Liu/LeetCode | /27/Remove_Element.py | UTF-8 | 646 | 3.125 | 3 | [] | no_license | class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
del_ele = 0
for i in range(len(nums)):
if nums[i] == val:
nums[i] = 1 << 20
del_ele = del_ele + 1
new_length = len(nums) - del_ele
ptr = new_length
for i in range(new_length):
if nums[i] == 1 << 20:
while True:
if nums[ptr] != 1 << 20:
nums[i] = nums[ptr]
ptr = ptr + 1
break
ptr = ptr + 1
return new_length
| true |
6d215176fc3d6129e380f14d74d2e59a83c7cd02 | Python | rongminjin/queueSimulation | /dataGenerator.py | UTF-8 | 444 | 2.625 | 3 | [] | no_license | import os
import sys
os.system("rm -f rawData.txt") # remove old data
seed = 34534
allArrivalRates = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
allMaxServiceTimes = [ 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10 ]
for arrivalRate in allArrivalRates:
for maxServiceTime in allMaxServiceTimes:
os.system("./simulator " + str(arrivalRate) + " " + str(maxServiceTime) + " " + str(seed) + " >> rawData.txt")
| true |
c2a10798d47e5f63650ed3db12e2e72d1b55f9d9 | Python | Roy6801/CrowdCounterServer | /ccs/myUtils/dataProcessor/Processor.py | UTF-8 | 340 | 2.65625 | 3 | [] | no_license | import numpy as np
import time
def overlap(he, bo):
a = min(he[1] + he[3], bo[1] + bo[3]) - max(he[1], bo[1])
b = min(he[0] + he[2], bo[0] + bo[2]) - max(he[0], bo[0])
intersection = a * b
return (intersection/(he[2] * he[3])) * 100
def process_time(ts):
return time.strftime("%D %H:%M:%S", time.localtime(int(ts)))
| true |
8f4d4b974ab31ad27b76b1f16612330bb10a45cb | Python | ckat609/Codewars | /python/0002_Binary_Addition.py | UTF-8 | 102 | 3.140625 | 3 | [] | no_license | def add_binary(a, b):
print(str(bin(a+b))[2::])
return(str(bin(a+b))[2::])
add_binary(1, 2)
| true |
cfd1a2e7f8a48cf7f9104c1271843f7b9bc47428 | Python | fultonms/crypto | /a3/elgamal/eg.py | UTF-8 | 1,190 | 2.9375 | 3 | [
"MIT"
] | permissive | import modular
class ElGamal(object):
def __init__(self, p, g, b, a):
self.p = p
self.g = g
self.b = b
self.a = a
@classmethod
def load(cls, keyFile):
lines = [line.strip() for line in keyFile.readlines()]
assert len(lines) >= 3
p = int(lines[0])
g = int(lines[1])
b = int(lines[2])
a = int(lines[3])
return cls(p, g, b, a)
def save(self, outFile):
pass
def keygen(self, k):
pass
def encrypt(self, plainFile):
pass
def decrypt(self, cipherFile):
message = list()
lines = [line.strip() for line in cipherFile.readlines()]
for line in lines:
alpha, y = int(line.split(',')[0]), int(line.split(',')[1])
m = (y * modular.modInv(pow(alpha, self.a, self.p), self.p)) % self.p
message.append(m)
return message
def __repr__(self):
return 'ElGamal(%r, %r, %r, %r)'%(self.p, self.g, self.b, self.a)
if __name__ == '__main__':
instance = ElGamal.load(open('pub.keys'))
message = instance.decrypt(open('a3.cipher'))
msgstr = ''
for m in message:
msgstr += chr(m)
print msgstr
else:
pass
| true |
94586b3823c27ef50ef8ba63848ad4e6bb564a2f | Python | silverlyjoo/TIL | /algorithm/190327_AD_practice/g_jangis.py | UTF-8 | 797 | 2.71875 | 3 | [] | no_license | import sys
sys.stdin = open('jangis.txt')
def iswall(y, x):
if y < 0 or y >= N: return True
if x < 0 or x >= M: return True
return False
def BFS(y, x):
q = [(y, x, 0)]
dy = [-2, -1, 1, 2, 2, 1, -1, -2]
dx = [1, 2, 2, 1, -1, -2, -2, -1]
while q:
r, c, n = q.pop(0)
for i in range(8):
nr = r + dy[i]
nc = c + dx[i]
if not iswall(nr, nc) and ppan[nr][nc] == 2:
print(n+1)
return
elif not iswall(nr, nc) and ppan[nr][nc] == 0:
q.append((nr, nc, n+1))
ppan[nr][nc] = 1
N, M = map(int, input().split())
R, C, S, K = map(lambda x:int(x)-1, input().split())
ppan = [[0]* M for _ in range(N)]
ppan[S][K] = 2
BFS(R, C) | true |
d7aacab785adf3a172cba69264721bd9121804d3 | Python | viniciosarodrigues/python-estudos | /ex007.py | UTF-8 | 266 | 3.953125 | 4 | [] | no_license | print('======== Exercício 08 (Calcula valores em centímetros e milímetros ========')
metros = float(input('Informe a quantidade em metros: '))
print('O valor de {}m em centímeros é de {} e em milímetros é de {}'.format(metros, (metros*100), (metros*1000)))
| true |
5367ebc9586710caa234d11302c196bea86a47d7 | Python | Prakashchater/Daily-Practice-questions | /practice.py | UTF-8 | 208 | 3.484375 | 3 | [] | no_license | def cummilative(arr):
l = []
j = 0
for i in range(len(arr)):
j += arr[i]
l.append(j)
return l
if __name__ == '__main__':
arr = [10,20,30,40,50]
print(cummilative(arr)) | true |
b08c6d1ad03e6d70b0d7d0c72d42e0689284a972 | Python | sandeshghanta/Share-Code | /send.py | UTF-8 | 615 | 2.75 | 3 | [] | no_license | import os
import sys
import requests
def main() {
filename = sys.argv[1]
flag = False
for i in (os.listdir(os.getcwd())):
if (i == filename):
flag = True
break
if (not flag):
print filename + " file not found"
exit(0)
flag = False
files = {'file': open(filename, 'rb')}
password = str(raw_input("Enter password for your file "))
if (password == ""):
password = "null"
r = requests.post("http://sharecode.co.nf/server.php?filename="+filename+"&password="+password,files=files)
print r.text
}
if __name__ == "__main__":
main() | true |
fef4f6278ec1c5bbf2a505936feb502d0e2ef0ca | Python | gqmelo/misc | /algorithms/strings.py | UTF-8 | 1,104 | 3.546875 | 4 | [] | no_license | import collections
def has_unique_chars(input_str):
visited = set()
for c in input_str:
if c in visited:
return False
visited.add(c)
return True
def is_permutation(a, b):
if len(a) != len(b):
return False
count = collections.defaultdict(int)
for c in a:
count[c] += 1
for c in b:
count[c] -= 1
if count[c] < 0:
return False
return True
def urlify(chars, real_length):
if len(chars) == 0 or real_length < 1:
return chars
space_count = 0
for i in range(real_length):
if chars[i] == ' ':
space_count += 1
insert_index = real_length - 1 + space_count * 2
read_index = real_length - 1
while read_index != insert_index:
c = chars[read_index]
if c != " ":
chars[insert_index] = c
insert_index -= 1
else:
chars[insert_index] = "0"
chars[insert_index - 1] = "2"
chars[insert_index - 2] = "%"
insert_index -= 3
read_index -= 1
return chars
| true |
e3001aa0e75cd5b159c4a252d215d25c6dbb0447 | Python | jiahuang/gladomus | /app/commander.py | UTF-8 | 16,606 | 2.8125 | 3 | [] | no_license | from models import *
from twilio.rest import TwilioRestClient
from time import gmtime, strftime
import urllib2
import simplejson
from bingMapParser import BingMapParser
import re
from logger import log
from bs4 import BeautifulSoup
import time
from threading import Thread
from operator import itemgetter
CLIENT = TwilioRestClient(TWILIO_SID, TWILIO_AUTH)
MAX_TEXTS = 4 # max number before delaying into more
def isIntOrFloat(s):
try:
int(s)
except ValueError:
try:
float(s)
except ValueError:
return False
return True
def clean(s):
s = re.sub("\s+", " ", s)
s = s.strip(" ")
return s
def parseDoubleCommand(a, b, cmd):
cmdLenA = len(a)
cmdLenB = len(b)
a = cmd.find(a)
b = cmd.find(b)
if a > 0 and b > 0:
# check which one comes first
if a < b: # a is first
a = cmd[a+cmdLenA:b] #+len to get rid of command
b = cmd[b+cmdLenB:]
else:
b = cmd[b+cmdLenB:a]
a = cmd[a+cmdLenA:]
return [a, b]
elif a > 0:
a = cmd[a+cmdLenA:]
return [a, '']
elif b > 0:
b = cmd[b+cmdLenB:]
return ['', b]
def isProperCmd(cmdReqs, cmd):
for cmdReq in cmdReqs:
if cmd.find(cmdReq) < 0:
return False
return True
class Commander(Thread):
def __init__(self, fromNumber, cmd):
self.num = fromNumber
print "commander number", fromNumber
self.user = db.Users.find_one({"number":fromNumber})
print "commander user", self.user
self.moreText = '(txt "more" to cont)'
self.cmd = cmd
Thread.__init__(self)
def mapCommand(self, cmd):
# parse out command into mode, start, and end
mode = cmd[0:6]
# error out if mode is wrong
regex = re.compile('map [wdp] ', re.IGNORECASE)
if not re.match(regex, mode):
return {"error": "Map command must be followed by either (w)alking, (d)riving, or (p)ublic transit and a space. ex:map d "}
elif not isProperCmd(['s:', 'e:'], cmd):
return {"error": "Map command must have both a (s:)tarting and (e:)nding location, ex:map d s:Seattle e:Portland"}
else:
mode = cmd[4]
if mode == "p":
# check for departure (d:) or arrival (a:) but not both
depart = cmd.find("d:")
arrival = cmd.find("a:")
if depart >= 0 and arrival >= 0: #both were found, error out
return {"error":"Transit directions can only have arrival (a:3:00pm) or departure (d:18:00), not both"}
elif depart < 0 and arrival < 0: #neither was found, error out
return {"error":"Transit directions must have either arrival (a:3:00pm) or departure (d:18:00)"}
urlTimeType = "Departure" if depart > 0 else "Arrival"
timeIndex = max(depart, arrival)
start = cmd.find('s:')
end = cmd.find('e:')
# parse out arrival/departure and cut that part out of cmd
if timeIndex > start and timeIndex > end: # parse until end of cmd
time = cmd[timeIndex+2:]
cmd = cmd[:timeIndex]
elif timeIndex < start and timeIndex < end:
minIndex = min(start, end)
time = cmd[timeIndex+2:minIndex]
cmd = cmd[:timeIndex]+cmd[minIndex:]
else: # parse until larger start/end is found
maxIndex = max(start, end)
time = cmd[timeIndex+2:maxIndex]
cmd = cmd[:timeIndex]+cmd[maxIndex:]
[start, end] = parseDoubleCommand('s:', 'e:', cmd)
# parse out bad things
badThings = ['"', "'", '\\', ';']
for bad in badThings:
start = start.replace(bad, '')
end = end.replace(bad, '')
if mode=='p':
time = time.replace(bad, '')
# convert spaces and put together url
start = start.replace(' ', '%20')
end = end.replace(' ', '%20')
if mode== 'd':
url = "http://dev.virtualearth.net/REST/V1/Routes/Driving?distanceUnit=mi&wp.0="+start+"&wp.1="+end+"&key="+BING_KEY
elif mode == 'w':
url = "http://dev.virtualearth.net/REST/V1/Routes/Walking?distanceUnit=mi&wp.0="+start+"&wp.1="+end+"&key="+BING_KEY
else:
time = time.replace(" ", "%20")
url = "http://dev.virtualearth.net/REST/V1/Routes/Transit?distanceUnit=mi&wp.0="+start+"&wp.1="+end+"&timeType="+urlTimeType+"&dateTime="+time+"&key="+BING_KEY
try:
result = simplejson.load(urllib2.urlopen(url))
parser = BingMapParser(mode)
res = parser.parse(result)
return {"success":res}
except urllib2.HTTPError, e:
return {"error": "HTTP error: %d" % e.code}
except urllib2.URLError, e:
return {"error": "Network error: %s" % e.reason.args[1]}
def wikiCommand(self, cmd):
# parse out article title
replace = [";"]
for r in replace:
cmd = cmd.replace(r, "")
if not isProperCmd(['a:'], cmd):
return {"error": "Wiki command must have an (a:)rticle specified, ex:wiki a.rabbits"}
[article, section] = parseDoubleCommand('a:', 's:', cmd)
article = clean(article)
article = article.replace(' ', '%20')
section = clean(section)
try:
request = urllib2.Request("http://en.wikipedia.org/w/index.php?action=render&title="+article)
request.add_header("User-Agent", 'Gladomus/0.1')
raw = urllib2.urlopen(request)
soup = BeautifulSoup(raw)
# check if its a disambiguation article
if soup.find('a', {'title':'Help:Disambiguation'}):
# disambiguation article
sections = soup.findAll('ul', recursive=False) # fuck it, just doing ul's for now
num = 1
res = ""
for section in sections:
lists = section.findAll('li', recursive=False)
for l in lists:
res = res + str(num)+'.'+''.join(l.findAll(text=True))+' '
num = num + 1
else:
# summary
if section == '':
# no section, just grab summary
summary = soup.find('p', recursive=False)
textSummary = summary.findAll(text=True)
res = ''.join(textSummary)
else:
# there is a section
if section == 'toc': #grab table of contents
tocDiv = soup.find('div', {'id':'toctitle'})
if tocDiv == None:
return {'error': 'The article is too short to have a table of contents. Try "wiki a.'+article+'" to get the summary'}
toc = tocDiv.nextSibling
toc = toc.findAll(text=True)
res = ''.join(toc)
else:
if isIntOrFloat(section):
# find by section number
section = soup.find(text=section).parent.parent['href'][1:]#cut off the '#'
header = soup.find('span', {'id':section})
if header == None:
return {'error': 'The section was not found in the article. Try "wiki a.'+article+' s.toc" to see a table of contents'}
else:
headers = soup.findAll(text=re.compile(r'\A'+section, re.IGNORECASE))
if len(headers) == 0:
return {'error': 'The section was not found in the article. Try "wiki a.'+article+' s.toc" to see a table of contents'}
# check to make sure all found headers are spans with class mw-headline
cleanedHeaders = []
for header in headers:
if header.parent.name == 'span':
cleanedHeaders.append(header)
if len(cleanedHeaders) == 0:
return {'error': 'The section was not found in the article. Try "wiki a.'+article+' s.toc" to see a table of contents'}
header = cleanedHeader[-1].parent
p = header.findNext('p')
res = ''.join(p.findAll(text=True))
while p.nextSibling and p.nextSibling.nextSibling and p.nextSibling.nextSibling.name == 'p':
p = p.nextSibling.nextSibling
res = res +' '+ ''.join(p.findAll(text=True))
return {'success':res}
except urllib2.HTTPError, e:
return {"error": "HTTP error: %d" % e.code}
except urllib2.URLError, e:
return {"error": "Network error: %s" % e.reason.args[1]}
def callCommand(self, cmd):
originalCmd = cmd
#cmd = re.sub("\s+", " ", cmd)
#cmd = cmd.strip(" ")
replace = ["-", "(", ")", ":", "."]
for r in replace:
cmd = cmd.replace(r, "")
cmd = cmd.split(" ")
currDate = datetime.datetime.utcnow()
if len(cmd) == 2:
# call x
minutes = cmd[1]
else:
return {"error": "Call command must be 'call min' (call 5)"}
action = db.Actions()
action.number = self.num
action.command = cmd[0]
action.time = currDate + datetime.timedelta(minuets=minutes)
action.original = originalCmd
action.save()
return {'success': "Call has been scheduled"}
def run(self):
# parses command
"""
call x
map d s:start e:end
map p s:start e:end a:arrival/d:departure
map w s:start e:end
wiki a:article
wiki a:article s:toc
wiki a:article s:section
*whois x
*wifi
more
help
s <- search for commands
"""
cmd = clean(self.cmd)
cmd = cmd.lower()
cmdHeader = cmd.split(' ')[0]
if cmd == 'signup':
# signup should have been hit in the controller
return
elif cmdHeader == 'more':
self.processMsg('', False)
elif cmdHeader == "map":
res = self.mapCommand(cmd)
if "error" in res:
self.processMsg(res["error"])
else:
msg = ""
num = 1
for i in res["success"]:
msg = msg + str(num)+". "+i["directions"]+i["distance"]+' '
num = num +1
self.processMsg(msg)
#elif cmdHeader == "call":
# res = self.callCommand(cmd)
# if "error" in res:
# self.processMsg(res["error"])
elif cmdHeader == 'wiki':
res = self.wikiCommand(cmd)
if "error" in res:
self.processMsg(res["error"])
else:
self.processMsg(res['success'])
else:
res = self.performCustomCommand(cmd)
if "error" in res:
self.processMsg(res["error"])
else:
self.processMsg(res['success'])
def performCustomCommand(self, cmd):
cmdHeader = cmd.split(' ')[0]
# look through custom commands
customCmds = db.Commands.find({'tested':True, 'cmd':cmdHeader}, {'_id':1})
if customCmds.count() == 0:
# if no results, error out
return {'error':cmdHeader+' command not found. Go to www.texatron.com/commands for a list of commands'} #TODO: add suggestions module?
elif customCmds.count() == 1:
# if only one custom command returns and user doesnt have that command on their list, add it
customCmds = list(customCmds)
if customCmds[0]._id not in self.user.cmds:
self.user.cmds.append(customCmds[0]._id)
self.user.save()
return self.customCommandHelper(customCmds[0]['_id'], cmd)
# if more than one returns, check user's list
elif customCmds.count() > 1:
customList = [ obj['_id'] for obj in list(customCmds)]
matchCmds = [uCmd for uCmd in self.user.cmds if uCmd in customList]
#print matchCmds
# if more than one appears error out
if len(matchCmds) > 1:
return {'error': cmdHeader+' has multiple custom commands. Please go to www.texatron.com and select one'} #TODO: make it so you can select one from texting
else:
return self.customCommandHelper(matchCmds[0], cmd)
def customCommandHelper(self, cmdId, userCmd):
cmd = db.Commands.find_one({'_id':cmdId})
# parse out userCmd according to switch operators
if len(cmd.switches) > 0:
# there are switches, parse them
#switchLocs = [s for s in cmd.switches]
switchLocs = []
switches = []
#print "switches", cmd.switches
for s in cmd.switches:
if s['switch'] != '':
if userCmd.find(s['switch']+'.') >= 0:
switchLocs.append({'s':s['switch']+'.', 'loc':userCmd.find(s['switch']+'.')})
elif s['default'] != '':
switches.append({'s':s['switch']+'.', 'data':s['default']})
else:
return {'error':'Error:missing '+s['switch']+' switch. ex:'+cmd.example}
#sort by locs
switchLocs = sorted(switchLocs, key=itemgetter('loc'))
for i in xrange(len(switchLocs)-1):
s1 = switchLocs[i]
s2 = switchLocs[i+1]
data = clean(userCmd[s1['loc']+2:s2['loc']]).replace(' ', '%20')
switches.append({'s':s1['s'], 'data':data})
# append final one
if len(switchLocs) > 0:
data = clean(userCmd[switchLocs[-1]['loc']+2:]).replace(' ', '%20')
switches.append({'s':switchLocs[-1]['s'],'data':data})
url = cmd.url
#put together url with switches
for s in switches:
print s['data']
newUrl = url.replace('{'+s['s'][:-1]+'}', s['data'])
if newUrl == url:
# something went wrong. a command didnt get replaced
return {'error':"Error:couldn't find switch "+s['s']+""}
else:
url = newUrl
else:
url = cmd.url
try:
print url
request = urllib2.Request(url)
request.add_header("User-Agent", 'Texatron/0.1')
raw = urllib2.urlopen(request)
soup = BeautifulSoup(raw)
# parse soup according to includes
msg = ''
count = 1
includeText = self.findHtmlElements(soup, cmd.includes)
excludeText = self.findHtmlElements(soup, cmd.excludes)
if len(excludeText) > 0:
text = [included for included in includeText if included not in excludeText]
else:
text = includeText
if cmd.enumerate:
for t in text:
msg = msg + ' '+str(count)+'.'+ str(t.encode("utf-8"))
count = count + 1
else:
msg = ''.join(text)
return {'success':msg}
except urllib2.HTTPError, e:
return {"error": "HTTP error: %d" % e.code}
except urllib2.URLError, e:
return {"error": "Network error: %s" % e.reason.args[1]}
def findHtmlElements(self, soup, elementsToFind):
foundText = []
for i in elementsToFind:
# put together tag matches dict
matchDict = {}
for match in i['matches']:
matchDict[match['type']] = re.compile(r'\b%s\b'%match['value'])
found = soup.findAll(i['tag'], matchDict)
for f in found:
foundText = foundText + f.findAll(text=True)
return foundText
def processMsg(self, msg, isNewMsg=True, cache=True):
print "process Msg", msg
if cache:
# CACHE RES
cacheNumber = db.cache.find_one({'number':self.num})
currDate = datetime.datetime.utcnow()
index = 160*MAX_TEXTS-len(self.moreText)
if cacheNumber and isNewMsg:
# update cache
db.cache.update({'number':self.num}, {'$set':{'data':msg, 'index':index, 'time':currDate}})
elif not isNewMsg:# old message, move cache index
msg = cacheNumber['data']
index = cacheNumber['index']
if (index > len(msg)):
return # break out
msg = msg[index:]
# move cache index to new place, send off message
db.cache.update({'number':self.num}, {'$set':{'index':max(len(msg), index+160*MAX_TEXTS)}})
else: # new cache for that number
cache = db.Cache()
cache.number = unicode(self.num)
cache.data = unicode(msg, errors='ignore')
cache.index = index
cache.time = currDate
cache.save()
i = 0
while i*160 < len(msg) and i<MAX_TEXTS:
print "sending msg"
if self.user.freeMsg > 0:
self.user.freeMsg = self.user.freeMsg - 1
elif self.user.paidMsg > 0:
self.user.paidMsg = self.user.paidMsg - 1
else:
CLIENT.sms.messages.create(to=self.num, from_=TWILIO_NUM, body = "You have used up your texts. Buy more at www.textatron.com")
break
if i+1 >= MAX_TEXTS and len(msg) > (i+1)*160:
CLIENT.sms.messages.create(to=self.num, from_=TWILIO_NUM, body = msg[i*160:(i+1)*160-len(self.moreText)]+self.moreText)
#print self.msg[i*160:(i+1)*160-len(self.moreText)]+self.moreText
elif (i+1)*160 <= len(msg):
CLIENT.sms.messages.create(to=self.num, from_=TWILIO_NUM, body = msg[i*160:(i+1)*160])
#print self.msg[i*160:(i+1)*160]
else:
CLIENT.sms.messages.create(to=self.num, from_=TWILIO_NUM, body = msg[i*160:])
#print self.msg[i*160:]
self.user.save()
i = i + 1
#sleep 1.5 seconds
if i < len(msg):
time.sleep(1.5)
log('text', self.num+':'+str(unicode(msg, errors='ignore')))
| true |
e0198e56ca3cb61ad48fffe54fdf54dd0030fb96 | Python | allmaennitta/LibreOfficePy | /tests/yaml_parser_test.py | UTF-8 | 3,055 | 2.9375 | 3 | [] | no_license | import unittest
from unittest import TestCase
from hamcrest import *
from yamlparser.yamlparser import YamlParser
class YamlParserTest(TestCase):
def test_titled_empty_box(self):
ou = """
---
-
type: body
name: Executive Board
sub:
- N.N
"""
p = YamlParser()
p.parseOrgaUnit(ou)
assert_that(len(p.nodes), is_(1))
assert_that(p.nodes[0].name, is_("Executive Board"))
assert_that(len(p.nodes[0].nodes),is_(1))
assert_that((p.nodes[0].nodes[0].type),is_("NoneType"))
def test_titled_box_with_person(self):
ou = """
---
-
type: body
name: Executive Board
sub:
-
type: person
name: John Doe
-
type: person
name: N.N.
"""
p = YamlParser()
p.parseOrgaUnit(ou)
assert_that(len(p.nodes), is_(1))
assert_that(p.nodes[0].name, is_("Executive Board"))
assert_that(p.nodes[0].nodes[0].text, is_("John Doe"))
assert_that(p.nodes[0].nodes[1].text, is_("N.N."))
def test_titled_box_with_person_and_textrole(self):
ou = """
---
-
type: body
name: Executive Board
sub:
-
type: person
name: John Doe
-
type: person
name: N.N.
-
type: role
position: General
name: John Doe
flags: ["is_text"]
"""
p = YamlParser()
p.parseOrgaUnit(ou)
assert_that(len(p.nodes), is_(1))
assert_that(p.nodes[0].name, is_("Executive Board"))
assert_that(p.nodes[0].nodes[0].text, is_("John Doe"))
assert_that(p.nodes[0].nodes[2].text, is_("General: John Doe"))
def test_titled_box_with_textrole_and_noderole(self):
ou = """
---
-
type: body
name: Executive Board
sub:
-
type: person
name: John Doe
-
type: person
name: N.N.
-
type: role
position: General
name: John Doe
flags: ["is_text"]
-
type: role
position: Major
name: John Doe
"""
p = YamlParser()
p.parseOrgaUnit(ou)
assert_that(len(p.nodes), is_(1))
assert_that(p.nodes[0].name, is_("Executive Board"))
assert_that(p.nodes[0].nodes[0].text, is_("John Doe"))
assert_that(p.nodes[0].nodes[2].text, is_("General: John Doe"))
assert_that(p.nodes[0].nodes[3].position, is_("Major"))
def test_titled_box_with_teams_and_professions(self):
ou = """
---
-
type: department
name: Research & Development
sub:
-
type: role
position: Director
name: John Doe
-
type: team
name: Foo
-
type: team
name: Bar
-
type: profession
name: Chemist for Process Engineering
-
type: profession
name: CAD-Designer
"""
p = YamlParser()
p.parseOrgaUnit(ou)
assert_that(len(p.nodes), is_(1))
assert_that(p.nodes[0].name, is_("Research & Development"))
assert_that(p.nodes[0].nodes[1].text, is_("Foo"))
assert_that(p.nodes[0].nodes[3].text, is_("Chemist for Process Engineering"))
if __name__ == '__main__':
unittest.main()
| true |
72ee1b70f1f392a7eea8904437851a852ef73365 | Python | svd2802/practice_python | /war/weapons/tests/wepaon_test.py | UTF-8 | 651 | 2.703125 | 3 | [] | no_license | import unittest
from weapons.weapon import Weapon
import sys
sys.path.append('C:/Users/narut/practice_python/war')
from exceptions import NotRealizedMethodError
class TestWeaponMethods(unittest.TestCase):
def setUp(self):
self.weapon = Weapon()
def test_get_name(self):
self.assertEqual(self.weapon.name, 'Stick')
def test_get_damage(self):
with self.assertRaises(NotRealizedMethodError):
self.weapon.damage
def test_attack(self):
with self.assertRaises(NotRealizedMethodError):
self.weapon.attack()
if __name__ == '__main__':
unittest.main()
| true |
3e85f39e25c4f23f903f1bd08400043893c79750 | Python | BYUCS452F2020/ForexDataDownloader | /UnitTests/MongoDAOTests/mongo_subscription_dao_test.py | UTF-8 | 3,106 | 2.578125 | 3 | [] | no_license | import unittest
from MongoDAO.mongo_subscription_dao import MongoSubscriptionDAO
# TODO: add more tests for failures, etc.
class MongoSubscriptionDaoTest(unittest.TestCase):
def setUp(self) -> None:
self.mongo_subscription_dao = MongoSubscriptionDAO()
def test_insert_new_subscription_user_id_fail(self):
success, error_message = self.mongo_subscription_dao.insert_new_subscription(58, 'Basic', 5.99)
self.assertEqual(error_message, 'Invalid parameters used when trying to add a new subscription')
def test_insert_new_subscription_subscription_type_fail(self):
success, error_message = self.mongo_subscription_dao.insert_new_subscription('123', 'blah', 5.99)
self.assertEqual(error_message, 'Invalid parameters used when trying to add a new subscription')
def test_insert_new_subscription_subscription_cost_fail(self):
success, error_message = self.mongo_subscription_dao.insert_new_subscription('123', 'Basic', '5.99')
self.assertEqual(error_message, 'Invalid parameters used when trying to add a new subscription')
def test_insert_new_subscription_success(self):
success, error_message = self.mongo_subscription_dao.insert_new_subscription('123', 'Basic', 5.99)
self.assertTrue(success)
def test_update_subscription_user_id_fail(self):
success, error_message = self.mongo_subscription_dao.update_subscription(58, 'Basic', 5.99)
self.assertEqual(error_message, 'Invalid parameters used when trying to update a subscription')
def test_update_subscription_subscription_type_fail(self):
success, error_message = self.mongo_subscription_dao.update_subscription('123', 'blah', 5.99)
self.assertEqual(error_message, 'Invalid parameters used when trying to update a subscription')
def test_update_subscription_subscription_cost_fail(self):
success, error_message = self.mongo_subscription_dao.update_subscription('123', 'Basic', '5.99')
self.assertEqual(error_message, 'Invalid parameters used when trying to update a subscription')
def test_update_subscription_success(self):
success1, error_message1 = self.mongo_subscription_dao.insert_new_subscription('1234', 'Basic', 5.99)
success2, error_message2 = self.mongo_subscription_dao.update_subscription('1234', 'Premium', 7.99)
self.assertTrue(success1 and success2)
monthly_bill, error_message3 = self.mongo_subscription_dao.get_monthly_bill('1234')
self.assertEqual(monthly_bill, 7.99)
def test_get_monthly_bill_user_id_fail(self):
success, error_message = self.mongo_subscription_dao.get_monthly_bill(123)
self.assertEqual(error_message, 'Invalid user ID; should be a string')
def test_get_monthly_bill_success(self):
success1, error_message1 = self.mongo_subscription_dao.insert_new_subscription('71', 'Premium', 15.99)
monthly_bill, error_message2 = self.mongo_subscription_dao.get_monthly_bill('71')
self.assertEqual(monthly_bill, 15.99)
if __name__ == '__main__':
unittest.main()
| true |
c4b42d2cfd23bf2dea0802297e2fec9d9c33d7a8 | Python | xxxfzxxx/cs412-coding-hw | /homework3.py | UTF-8 | 6,334 | 2.71875 | 3 | [] | no_license |
def get_db(file):
def split(line):
return [char for char in line]
f = open(file, 'r')
f_line = f.readlines()
db = []
for i in f_line:
line_arr = split(i)
start = line_arr.index('<') + 1
end = line_arr.index('>')
line_arr = line_arr[start: end]
line = ''.join(line_arr)
print(line)
db.append(line)
return db
def is_frequent(patt, db, minsup):
cnt = 0
for line in db:
if patt in line:
cnt += 1
if cnt < minsup:
return False
else:
return True
def generate_next(patt, db):
next_patt = []
for line in db:
start_pos = [n for n in range(len(line)) if line.find(patt, n) == n]
end_pos = []
for i in start_pos:
i += len(patt)+1
end_pos.append(i)
if len(start_pos) != 0:
for i in range(len(start_pos)):
next_patt.append(line[start_pos[i]:end_pos[i]])
for item in next_patt:
if len(item) == len(patt):
next_patt.remove(item)
next_patt = list(set(next_patt))
return next_patt
def get_patt_cnt(patt, db):
cnt = 0
for line in db:
if patt in line:
cnt+=1
return cnt
def get_sequences(file, minsup):
db = get_db(file)
F1 = {}
freq_item_list = []
for line in db:
for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
F1[i] = 0
for line in db:
for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
if i in line:
F1[i] = F1[i]+1
for a in F1:
if is_frequent(a, db, minsup):
freq_item_list.append(a)
next_patts1 = generate_next(a, db)
for b in next_patts1:
if is_frequent(b, db, minsup):
freq_item_list.append(b)
next_patts2 = generate_next(b, db)
for c in next_patts2:
if is_frequent(c, db, minsup):
freq_item_list.append(c)
next_patts3 = generate_next(c, db)
for d in next_patts3:
if is_frequent(d, db, minsup):
freq_item_list.append(d)
next_patts4 = generate_next(d, db)
for e in next_patts4:
if is_frequent(e, db, minsup):
freq_item_list.append(e)
next_patts5 = generate_next(e, db)
for f in next_patts5:
if is_frequent(f, db, minsup):
freq_item_list.append(f)
next_patts6 = generate_next(f, db)
for g in next_patts6:
if is_frequent(g, db, minsup):
freq_item_list.append(g)
next_patts7 = generate_next(g, db)
for h in next_patts7:
if is_frequent(h, db, minsup):
freq_item_list.append(h)
next_patts8 = generate_next(h,db)
for i in next_patts8:
if is_frequent(i, db, minsup):
freq_item_list.append(i)
next_patts9 = generate_next(i,db)
for j in next_patts9:
if is_frequent(j, db, minsup):
freq_item_list.append(j)
next_patts10 = generate_next(j,db)
for k in next_patts10:
if is_frequent(k, db, minsup):
freq_item_list.append(k)
next_patts11 = generate_next(k,db)
for l in next_patts11:
if is_frequent(l, db, minsup):
freq_item_list.append(l)
next_patts12 = generate_next(l,db)
for m in next_patts12:
if is_frequent(m, db, minsup):
freq_item_list.append(m)
next_patts13 = generate_next(m,db)
result = {}
for item in freq_item_list:
result[item] = get_patt_cnt(item, db)
print(result)
return result
get_sequences('validation.txt', 2)
| true |
416971d443b827b61edc871220d95d10f3ae8dcb | Python | xSAVIKx/ISO-IEC-11770-3 | /key_derivation/IEEE_P1363.py | UTF-8 | 678 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | import hashlib
__author__ = 'Iurii Sergiichuk'
'''
The IEEE P1363 key derivation function
We assume that we use SHA-512 hash function
'''
def derivate_key(shared_secret, *key_derivation_parameters):
"""
Key derivation function of IEEE P1363
:arg shared_secret: shared secret in string format
:arg key_derivation_parameters: list of possible key derivation parameters in string format
:type shared_secret: str
:type key_derivation_parameters: list[str]
:rtype : str
"""
value_to_hash = shared_secret
for arg in key_derivation_parameters:
value_to_hash += arg
h = hashlib.sha512()
h.update(value_to_hash)
return h.hexdigest()
| true |
6c1a77d74b76fe19a4196de8fb90da66edd51974 | Python | ROSAF2/dev4-assignment-test | /problem1/problem1.py | UTF-8 | 948 | 3.71875 | 4 | [] | no_license | # Joining path in order to import modules from other exercises/problems
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
# Importing the LinkedList class from problem5
from problem5.problem5 import LinkedList
class Stack:
def __init__(self):
self.stack = LinkedList()
def push(self, item):
self.stack.append(item)
def pop(self):
return self.stack.pop()
def peek(self):
return self.stack.get(-1)
def is_empty(self):
return len(self.stack) == 0
def main():
stack = Stack()
stack.push('Introductory App Dev Concepts')
stack.push('Intermediate App Dev Concepts')
stack.push('Advanced App Dev Concepts')
stack.pop()
print(f'{stack.peek()} is at the top of the stack')
while not stack.is_empty():
print("The stack is not empty")
stack.pop()
if __name__ == '__main__':
main()
| true |
69e6c71247432f9e85f3e4c70234115c70ce59f7 | Python | revlis975/PDF-Automation-OCR | /counter.py | UTF-8 | 105 | 2.84375 | 3 | [] | no_license | class Counter():
def __init__(self) -> None:
pass
def count(self, c):
return c+1 | true |
313704fb2dcee1c29b9b829079e472a630ac5dab | Python | Scarletcurve188/Crypto-Encryption-and-Decryption-Tools | /CaesarCipher/CaesarCipher.py | UTF-8 | 2,874 | 3.6875 | 4 | [] | no_license | # Tolga AKKAPULU
# /tolgaakkapulu
# -*- coding:utf-8 -*-
print("""
############################### Caesar Cipher ####################################
# Caesar cipher is a type of substitution cipher in which each #
# letter in the plaintext is replaced by a letter some fixed number #
# of positions down the alphabet. #
#--------------------------------------------------------------------------------#
# Tolga AKKAPULU | Github : /tolgaakkapulu #
##################################################################################""")
print(" ________________________________________________________________________________")
print("| |")
print("| Alphabet : |a|b|c|d|e|f|g|h|i|j|k |l |m |n |o |p |q |r |s |t |u |v |w |x |y |z |")
print("| Number : |0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|")
print("|________________________________________________________________________________|\n")
print("++++++++++++++++++++++++++++++++++++++")
print("+ E(p) => c = p + key mod(26) +")
print("+ D(c) => p = c - key mod(26) +")
print("++++++++++++++++++++++++++++++++++++++")
print("\n---------------------------------")
L2I = dict(zip("ABCDEFGHIJKLMNOPQRSTUVWXYZ",range(26)))
I2L = dict(zip(range(26),"ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
def cipher(key,plaintext):
ciphertext = ""
for c in plaintext.upper():
if c.isalpha():
ciphertext += I2L[ (L2I[c] + int(key))%26 ]
else:
ciphertext += c
return ciphertext
def decipher(key,ciphertext):
plaintext = ""
for c in ciphertext.upper():
if c.isalpha():
plaintext += I2L[ (L2I[c] - int(key))%26 ]
else:
plaintext += c
return plaintext
def main():
while True:
try:
print(" (1) Encrypt\t (2) Decrypt")
print("---------------------------------")
mode=input("Mode (1 or 2) : ")
if( mode == '1' ):
plaintext = input("Plain Text : ")
key = input("Key (Rotation) : ")
cipherText = cipher(key,plaintext)
print("Cipher Text :",cipherText)
print("\n---------------------------------")
if( mode == '2' ):
ciphertext = input("Cipher Text : ")
key = input("Key (Rotation) : ")
plainText = decipher(key,ciphertext)
print("Plain Text :",plainText)
print("\n---------------------------------")
if( mode != '1' and mode != '2' ):
print("\nIncorrect choice...\n")
print("---------------------------------")
except ValueError:
print("\nInvalid key value...")
print("\n---------------------------------")
except KeyboardInterrupt:
print("\n\nGood bye...")
break
except EOFError:
print("\n\nGood bye...")
break
if __name__ == '__main__':
main()
| true |
4d0053ccfe8c9275265dc300c16351a0be8fa7ba | Python | kadimakipp/network | /net/FPN.py | UTF-8 | 4,393 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: kipp
@contact: kaidma.kipp@gmail.com
@site:
@software: PyCharm
@file: FPN.py
@time: 2019/11/25 下午5:14
# Shallow men believe in luck.
Strong men believe in cause and effect.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
[FPN](https://arxiv.org/pdf/1612.03144.pdf)
[ResNet50](../net/ResNet.py)
MayBe
[Spatial Aware Pooling](https://arxiv.org/pdf/1911.07217.pdf)
"""
import numpy as np
import torch
import torch.nn as nn
from net.ResNet import BottleneckBlock
class CNNInput(nn.Sequential):
def __init__(self, inc, ouc, kernel=7):
super(CNNInput, self).__init__(
nn.Conv2d(in_channels=inc, out_channels=ouc, kernel_size=kernel, stride=2,padding=3),
nn.BatchNorm2d(ouc),
nn.ReLU(inplace=True),
)
class ResNet50(nn.Module):
def __init__(self, inc):
super(ResNet50, self).__init__()
self.head = CNNInput(inc, 64)
self.stage_one = nn.Sequential(
BottleneckBlock(64, 256),
BottleneckBlock(256, 256),
BottleneckBlock(256, 256),
)
self.stage_two = nn.Sequential(
BottleneckBlock(256, 512),
BottleneckBlock(512, 512),
BottleneckBlock(512, 512),
BottleneckBlock(512, 512),
)
self.stage_three = nn.Sequential(
BottleneckBlock(512, 1024),
BottleneckBlock(1024, 1024),
BottleneckBlock(1024, 1024),
BottleneckBlock(1024, 1024),
BottleneckBlock(1024, 1024),
BottleneckBlock(1024, 1024),
)
self.stage_four = nn.Sequential(
BottleneckBlock(1024, 2048),
BottleneckBlock(2048, 2048),
BottleneckBlock(2048, 2048),
)
def forward(self, x):
out = self.head(x)
one = self.stage_one(out)
two = self.stage_two(one)
three = self.stage_three(two)
four = self.stage_four(three)
return one, two,three,four
class ConvFusionOne(nn.Sequential):
def __init__(self,inc, ouc=256):
super(ConvFusionOne, self).__init__(
nn.Conv2d(in_channels=inc,out_channels=ouc,kernel_size=1,stride=1),
nn.BatchNorm2d(ouc),
nn.ReLU(inplace=True)
)
class ConvFusionThree(nn.Sequential):
def __init__(self,inc=256, ouc=256):
super(ConvFusionThree, self).__init__(
nn.Conv2d(in_channels=inc, out_channels=ouc,kernel_size=3,stride=1,padding=1),
nn.BatchNorm2d(ouc),
nn.ReLU(inplace=True)
)
class FPNLayerLast(nn.Module):
def __init__(self,inc):
super(FPNLayerLast, self).__init__()
self.fusion_one = ConvFusionOne(inc)
self.fusion_three = ConvFusionThree()
def forward(self, cur):
p = self.fusion_one(cur)
out = self.fusion_three(p)
return p,out
class FPNLayer(nn.Module):
def __init__(self, inc):
super(FPNLayer, self).__init__()
self.fusion_one = ConvFusionOne(inc)
self.fusion_three = ConvFusionThree()
self.up = nn.Upsample(scale_factor=2)
def forward(self, cur, down):
next = self.fusion_one(cur)
next = self.up(down) + next
out = self.fusion_three(next)
return next, out
class FPN(nn.Module):
def __init__(self, inc):
super(FPN, self).__init__()
self.resnet50 = ResNet50(inc=inc)
self.fpn_one = FPNLayer(256)
self.fpn_two = FPNLayer(512)
self.fpn_three = FPNLayer(1024)
self.fpn_four = FPNLayerLast(2048)
self.max_pool = nn.MaxPool2d(kernel_size=1,stride=2)
def forward(self, x):
one, two, three, four = self.resnet50(x)
p4, fpn_four = self.fpn_four(four)
fpn_five = self.max_pool(p4)
p3, fpn_three = self.fpn_three(three, p4)
p2, fpn_two = self.fpn_two(two, p3)
_, fpn_one = self.fpn_one(one,p2)
return fpn_one,fpn_two, fpn_three,fpn_four,fpn_five
def main():
input = torch.randn(2,3,256,256)
# res50 = ResNet50(3)
# output = res50(input)
# for out in output:
# print(out.shape)
fpn = FPN(3)
output = fpn(input)
for out in output:
print(out.shape)
if __name__ == "__main__":
import fire
fire.Fire(main) | true |
e27e565275ba447a8541bb96f423f66bcd3b64a8 | Python | dooking/CodingTest | /Algorithm/정렬.py | UTF-8 | 1,430 | 3.28125 | 3 | [] | no_license | def insert_sort(A):
for i in range(1,len(A)):
for j in range(i,0,-1):
if A[j - 1] > A[j]:
A[j - 1], A[j] = A[j], A[j - 1]
def select_sort(A):
for i in range(len(A)-1):
min_index=i
for j in range(i+1,len(A)):
if(A[i]>A[j]):
min_index=j
A[min_index],A[i]=A[i],A[min_index]
def bubble_sort(A):
for i in range(len(A)-1):
for j in range(len(A)-i-1):
if(A[j]>A[j+1]):
A[j],A[j+1]=A[j+1],A[j]
def Quick_Sort(data):
if(len(data)<2):
return data
else:
pivot = data[0]
left = [i for i in data[1:] if i<data[0]]
right = [i for i in data[1:] if i>=data[0]]
return Quick_Sort(left) + [pivot]+Quick_Sort(right)
def merge_sort(array):
if(len(array)<=1):
return array
mid = len(array)//2
left = merge_sort(array[:mid])
right = merge_sort(array[mid:])
i,j,k = 0 , 0 , 0
while i<len(left) and j <len(right):
if left[i] < right[j]:
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
k += 1
if i==len(left):
while j< len(right):
array[k] = right[j]
j += 1
k += 1
elif j==len(right):
while i< len(left):
array[k] = left[i]
i += 1
k += 1
return array | true |
b6bf9f8aaaf1ed806081c0cff0591fade201d670 | Python | greenca/advent-of-code | /2017/day4.py | UTF-8 | 1,005 | 3.53125 | 4 | [] | no_license | def isValid(pp):
if pp.find(' ') == -1:
return False
words = pp.split()
for i, word in enumerate(words):
if word in words[i+1:]:
return False
return True
print isValid('aa bb cc dd ee') == True
print isValid('aa bb cc dd aa') == False
print isValid('aa bb cc dd aaa') == True
def isStricter(pp):
if pp.find(' ') == -1:
return False
words = map(sorted, pp.split())
for i, word in enumerate(words):
if word in words[i+1:]:
return False
return True
print isStricter('abcde fghij') == True
print isStricter('abcde xyz ecdab') == False
print isStricter('a ab abc abd abf abj') == True
print isStricter('iiii oiii ooii oooi oooo') == True
print isStricter('oiii ioii iioi iiio') == False
validCount = 0
strictCount = 0
with open('day4_input.txt') as f:
for row in f:
if isValid(row):
validCount += 1
if isStricter(row):
strictCount += 1
print validCount
print strictCount
| true |
f1e468098c74d51c49bd3b5afeb55f70fcd33de8 | Python | PyMLGame/pymlmario | /camera.py | UTF-8 | 1,796 | 3.53125 | 4 | [
"MIT"
] | permissive | # coding: utf-8
class Camera:
def __init__(self, map_width, map_height, width=40, height=16, position=(0, 0), dz_width=12, dz_height=6):
self.width = width
self.height = height
self._map_width = map_width
self._map_height = map_height
self.x = 0
self.y = 0
self._deadzone_width = dz_width
self._deadzone_height = dz_height
self._deadzone_x = 0
self._deadzone_y = 0
self.update_deadzone()
self.update(position)
def check_camera_boundaries(self):
self.x = max(self.x, 0) # fix left border
self.x = min(self.x, self._map_width - self.width) # fix right border
self.y = max(self.y, 0) # fix bottom border
self.y = min(self.y, self._map_height - self.height) # fix top border
def update_deadzone(self):
self._deadzone_x = self.x + (self.width - self._deadzone_width) / 2
self._deadzone_y = self.y + (self.height - self._deadzone_height) / 2
def update(self, position):
# check deadzone
changed = False
if position[0] < self._deadzone_x:
self.x = self.x - (self._deadzone_x - position[0])
changed = True
elif position[0] > self._deadzone_x + self._deadzone_width:
self.x += position[0] - self._deadzone_x - self._deadzone_width
changed = True
if position[1] < self._deadzone_y:
self.y -= (self._deadzone_y - position[1])
changed = True
elif position[1] > self._deadzone_y + self._deadzone_height:
self.y = self.y + (position[1] - self._deadzone_y - self._deadzone_height)
changed = True
if changed:
self.check_camera_boundaries()
self.update_deadzone()
| true |
fd638627f40b8529819621985418841132e7cb64 | Python | ray-project/ray | /rllib/utils/replay_buffers/reservoir_replay_buffer.py | UTF-8 | 4,522 | 2.859375 | 3 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | from typing import Any, Dict
import random
# Import ray before psutil will make sure we use psutil's bundled version
import ray # noqa F401
import psutil # noqa E402
from ray.rllib.utils.annotations import ExperimentalAPI, override
from ray.rllib.utils.replay_buffers.replay_buffer import (
ReplayBuffer,
warn_replay_capacity,
)
from ray.rllib.utils.typing import SampleBatchType
# __sphinx_doc_reservoir_buffer__begin__
@ExperimentalAPI
class ReservoirReplayBuffer(ReplayBuffer):
"""This buffer implements reservoir sampling.
The algorithm has been described by Jeffrey S. Vitter in "Random sampling
with a reservoir".
"""
def __init__(
self, capacity: int = 10000, storage_unit: str = "timesteps", **kwargs
):
"""Initializes a ReservoirBuffer instance.
Args:
capacity: Max number of timesteps to store in the FIFO
buffer. After reaching this number, older samples will be
dropped to make space for new ones.
storage_unit: Either 'timesteps', 'sequences' or
'episodes'. Specifies how experiences are stored.
"""
ReplayBuffer.__init__(self, capacity, storage_unit)
self._num_add_calls = 0
self._num_evicted = 0
@ExperimentalAPI
@override(ReplayBuffer)
def _add_single_batch(self, item: SampleBatchType, **kwargs) -> None:
"""Add a SampleBatch of experiences to self._storage.
An item consists of either one or more timesteps, a sequence or an
episode. Differs from add() in that it does not consider the storage
unit or type of batch and simply stores it.
Args:
item: The batch to be added.
``**kwargs``: Forward compatibility kwargs.
"""
self._num_timesteps_added += item.count
self._num_timesteps_added_wrap += item.count
# Update add counts.
self._num_add_calls += 1
# Update our timesteps counts.
if self._num_timesteps_added < self.capacity:
self._storage.append(item)
self._est_size_bytes += item.size_bytes()
else:
# Eviction of older samples has already started (buffer is "full")
self._eviction_started = True
idx = random.randint(0, self._num_add_calls - 1)
if idx < len(self._storage):
self._num_evicted += 1
self._evicted_hit_stats.push(self._hit_count[idx])
self._hit_count[idx] = 0
# This is a bit of a hack: ReplayBuffer always inserts at
# self._next_idx
self._next_idx = idx
self._evicted_hit_stats.push(self._hit_count[idx])
self._hit_count[idx] = 0
item_to_be_removed = self._storage[idx]
self._est_size_bytes -= item_to_be_removed.size_bytes()
self._storage[idx] = item
self._est_size_bytes += item.size_bytes()
assert item.count > 0, item
warn_replay_capacity(item=item, num_items=self.capacity / item.count)
@ExperimentalAPI
@override(ReplayBuffer)
def stats(self, debug: bool = False) -> dict:
"""Returns the stats of this buffer.
Args:
debug: If True, adds sample eviction statistics to the returned
stats dict.
Returns:
A dictionary of stats about this buffer.
"""
data = {
"num_evicted": self._num_evicted,
"num_add_calls": self._num_add_calls,
}
parent = ReplayBuffer.stats(self, debug)
parent.update(data)
return parent
@ExperimentalAPI
@override(ReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Returns all local state.
Returns:
The serializable local state.
"""
parent = ReplayBuffer.get_state(self)
parent.update(self.stats())
return parent
@ExperimentalAPI
@override(ReplayBuffer)
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be
obtained by calling `self.get_state()`.
"""
self._num_evicted = state["num_evicted"]
self._num_add_calls = state["num_add_calls"]
ReplayBuffer.set_state(self, state)
# __sphinx_doc_reservoir_buffer__end__
| true |
2217f327a7ce539f86563f2ab6bae59137939ce6 | Python | prositen/advent-of-code | /python/src/y2015/dec06.py | UTF-8 | 4,086 | 3.515625 | 4 | [] | no_license | from collections import defaultdict
import re
__author__ = 'anna'
COMMAND = re.compile('(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)')
def iter_pos(start_x, start_y, end_x, end_y):
"""
Iterate through the coordinates and yield each position in the interval
"""
for x in range(end_x - start_x + 1):
for y in range(end_y - start_y + 1):
pos = (start_x + x, start_y + y)
yield pos
def iter_instructions(instructions):
"""
Iterate through instructions and yield parsed info for the valid ones.
:param instructions:
:return: command, start_x, start_y, end_x, end_y
"""
for i in instructions:
result = re.match(COMMAND, i)
if result:
command = result.group(1)
start_x = int(result.group(2))
start_y = int(result.group(3))
end_x = int(result.group(4))
end_y = int(result.group(5))
yield command, start_x, start_y, end_x, end_y
def turn_off_lighting(lamps, start_x, start_y, end_x, end_y):
"""
Turn off all lamps between the coordinates
"""
for pos in iter_pos(start_x, start_y, end_x, end_y):
lamps[pos] = False
def turn_on_lighting(lamps, start_x, start_y, end_x, end_y):
"""
Turn on all lamps between the coordinates
"""
for pos in iter_pos(start_x, start_y, end_x, end_y):
lamps[pos] = True
def toggle_lighting(lamps, start_x, start_y, end_x, end_y):
"""
Toggle all lamps between the coordinates. off->on, on->off
"""
for pos in iter_pos(start_x, start_y, end_x, end_y):
lamps[pos] = not lamps[pos]
def lightning(instructions):
"""
Follow Santa's instructions and calculate how many lamps are lit.
:param instructions:
:return: No of lit lamps
"""
lamps = defaultdict(bool)
for (command, start_x, start_y, end_x, end_y) in iter_instructions(instructions):
if command == 'turn off':
turn_off_lighting(lamps, start_x, start_y, end_x, end_y)
elif command == 'turn on':
turn_on_lighting(lamps, start_x, start_y, end_x, end_y)
elif command == 'toggle':
toggle_lighting(lamps, start_x, start_y, end_x, end_y)
return sum(1 for v in lamps.values() if v)
def turn_off_brightness(lamps, start_x, start_y, end_x, end_y):
"""
Turn down the brightness of the lamp a notch. Min brightness is 0.
"""
for pos in iter_pos(start_x, start_y, end_x, end_y):
lamps[pos] -= 1
if lamps[pos] < 0:
lamps[pos] = 0
def turn_on_brightness(lamps, start_x, start_y, end_x, end_y):
"""
Turn up the brightness of the lamp a notch
"""
for pos in iter_pos(start_x, start_y, end_x, end_y):
lamps[pos] += 1
def toggle_brightness(lamps, start_x, start_y, end_x, end_y):
"""
In a new definition of the word 'toggle', turn up the brightness of the lamp two notches.
"""
for pos in iter_pos(start_x, start_y, end_x, end_y):
lamps[pos] += 2
def brightness(instructions):
"""
Follow santa's instructions to turn the lamps on and off.
:param instructions:
:return: the total brightness of the lamps
"""
lamps = defaultdict(int)
for (command, start_x, start_y, end_x, end_y) in iter_instructions(instructions):
if command == 'turn off':
turn_off_brightness(lamps, start_x, start_y, end_x, end_y)
elif command == 'turn on':
turn_on_brightness(lamps, start_x, start_y, end_x, end_y)
elif command == 'toggle':
toggle_brightness(lamps, start_x, start_y, end_x, end_y)
return sum(v for v in lamps.values())
if __name__ == '__main__':
with open('../../../data/2015/input.6.txt', 'r') as fh:
count = lightning(fh.readlines())
fh.seek(0)
total_brightness = brightness(fh.readlines())
print("Lamps lit: {count}. Brightness {brightness}".format(count=count,
brightness=total_brightness))
| true |
159dfba965178d5d8330ac100329f99adc4b1aa5 | Python | mutoulovegc/gaochuang123 | /桌面/day13/餐馆1.py | UTF-8 | 519 | 3 | 3 | [] | no_license | class Restaurant:
def p(self,restaurant_name,cuisine_type):
self.name = restaurant_name
self.type = cuisine_type
def describe_restaurant(self):
print("欢迎光临")
print("%s的烹饪方法是"%self.name,"%s"%self.type)
def open_restaurant(self):
print("餐馆正在营业")
#print(a.name,a.type)
class Admin(Restaurant):
def a(self):
self.flavors = []
gao = Admin()
gao.p("高闯","煮")
gao.describe_restaurant()
gao.open_restaurant()
print(Admin())
| true |
4027d5d786b8dd31154931a21b27a938d1fcd7c6 | Python | mannyfin/aioget | /core/async_queue.py | UTF-8 | 2,080 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | import asyncio
from typing import Any, Iterable, Callable
from configs.base.consts import ASYNC_SLEEP
from core import logger
def get_queue(maxsize: int = 0) -> asyncio.Queue:
"""
Creates an FIFO asyncio.Queue object instance
Args:
maxsize (int): number of items allowed in the queue. If maxsize <= 0 the queue size is infinite
Returns:
asyncio.Queue instance
"""
return asyncio.Queue(maxsize)
async def set_onto_queue(queue: asyncio.Queue, item: Any):
"""
Pushes an item onto an asyncio.Queue
Args:
queue: asyncio.Queue instance
item: Any
Returns:
asyncio.Queue
"""
await queue.put(item)
return queue
async def get_from_queue(queue: asyncio.Queue) -> Any:
"""
Pops an item off the asyncio.Queue
Args:
queue: asyncio.Queue instance
Returns:
item from the asyncio.Queue
"""
return await queue.get()
async def set_many_onto_queue(queue: asyncio.Queue, iterable: Iterable):
"""
Convenience function to push multiple items onto the queue.
Args:
queue: asyncio.Queue
iterable: iterable containing the items
Returns:
asyncio.Queue
"""
_ = [asyncio.create_task(set_onto_queue(queue, qitem)) for qitem in iterable]
await asyncio.gather(*_)
return queue
async def worker(queue: asyncio.Queue, next_queue: asyncio.Queue, func: Callable, *args, **kwargs):
while True:
await asyncio.sleep(ASYNC_SLEEP)
try:
if not queue.empty():
# print(queue._unfinished_tasks)
# item = await get_from_queue(queue)
if asyncio.iscoroutinefunction(func):
await func(queue, next_queue, *args, **kwargs)
else:
func(queue, next_queue, *args, **kwargs)
except Exception as exc:
basiclogger.error(exc.__repr__())
# else:
# break
# return next_queue
if __name__ == '__main__':
basiclogger = logger.rabbit_logger(__name__)
| true |
e217cb1a7d91db39ecff5bf7dddb6680b8b8227b | Python | 1Yasmin/compis3 | /scannerGenerator.py | UTF-8 | 3,339 | 2.859375 | 3 | [] | no_license |
from Automata import *
from Thompson import *
from DFA import *
def nextState(s,sim,trans):
for a in trans:
if ((a[0] == s) and (sim in a[2])):
return a[1], 0
return False, s
def getSimTok(dic):
simbols = []
for token in dic:
simbols = joinList(simbols, dic[token].symbols)
return simbols
def lookChar(char_list, ch):
#print("chq¿ar", char_list,"\n", ch)
for a in char_list:
if ch in a:
return True
return False
def simulacion(Aut, exp, simbols):
s = Aut.initial_state[0]
trans = Aut.transitions
c = 0
while(c < len(exp)):
s, d = nextState(s,exp[c],trans)
if(s != False):
c += 1
else:
if lookChar(simbols, exp[c]):
pchar = True
else:
pchar = False
if(d in Aut.final_states):
return False, pchar, True, c
else:
return False, pchar, False, c
if(s in Aut.final_states):
return True, True, True, c
else:
return False, False, False, c
def simulacionAutomaton(Aut_dic, except_tokens, exp):
tok_finales = {}
simbols = getSimTok(Aut_dic)
start = 0
token_test = []
for a in Aut_dic:
token_test.append(a)
tok_finales[a] = []
print(exp)
print(tok_finales)
tok_count = 0
while(start < len(exp)):
print("New iteration:", start, len(exp))
if (tok_count < len(token_test)):
pertenece, chars, fstate, lenC = simulacion(Aut_dic[token_test[tok_count]], exp[slice(start, len(exp))], simbols)
print("*****Results*****\n", "tok_count",tok_count ,"\npertenece: ", pertenece, "\n chars:", chars, "\n fstate:", fstate, "\n")
if(pertenece == True):
print("Easy Pertenece")
ls = tok_finales[token_test[tok_count]]
ls.append([exp[slice(start, start+lenC)]])
tok_finales[token_test[tok_count]] = ls
start += lenC
tok_count = 0
else:
#print("la exp:", exp[(start+lenC)])
#print(chars, fstate)
if(fstate == True):
ls = tok_finales[token_test[tok_count]]
ls.append([exp[slice(start, start+lenC)]])
tok_finales[token_test[tok_count]] = ls
print("True and True", tok_finales, start, lenC, exp[slice(start, start+lenC)])
start += lenC
print("*****start****", start)
tok_count = 0
if (exp[start+lenC] == "\t" or exp[start+lenC] == "\n" ):
print("With slashes: ", exp[start+lenC+1])
start += lenC+1
tok_count = 0
print("*****start1****", start)
elif(chars == True and fstate == False):
tok_count += 1
print("*****start2****", start, tok_count)
elif(chars == False and fstate == False):
print("Error found")
tok_count = 0
start += lenC+1
print("*****start3****", start)
return tok_finales | true |
00234498c63a4fd72b6506e1925a177a48f6cce4 | Python | legerM/database | /insert.py | UTF-8 | 2,179 | 2.8125 | 3 | [] | no_license | import read as data
import mysql.connector
def ajout_plante():
newid = input(" quels id voulez vous ajouter ? ")
newname = input("quel plante voulez vous ajouter ? ")
newindication = input("quel indication preconisez vous ? ")
newutilisation = input("quelle partie est utile ? ")
newprice = input("quel est le prix de cette plante ? ")
famille_id =input("quels est le famille id ?")
sql = "INSERT INTO plante (id,nom,indication,partie_utilisee,prix,famille_id) VALUES (%s, %s, %s, %s, %s, %s)"
mycursor.execute(sql,(newid, newname,newindication,newutilisation, newprice, famille_id))
data.get_plantes(mycursor)
datab.commit()
def update():
updatesql="UPDATE plante SET partie_utilisee = 'feuilles' WHERE partie_utilisee = 'feuiles'"
mycursor.execute(updatesql)
data.get_plantes(mycursor)
datab.commit()
def search_plantes():
search=input("quelle plante voulez vous ?")
mycursor= datab.cursor()
sql = "SELECT * FROM plante WHERE INSTR (nom, %s) "
mycursor.execute(sql,(search,))
myresult = mycursor.fetchall()
for x in myresult:
print(x)
def del_plantes():
supp_id = input("quelle est le numero a supprimer ? ")
supp="DELETE FROM plante WHERE id = '{}' ".format(supp_id)
mycursor.execute(supp)
data.get_plantes(mycursor)
datab.commit()
def main():
while True:
choix_utilisateur=input("\n que voulez vous faire ? L pour lister ,A pour Ajouter , M pour modifier ,S pour supprimer ,Q pour quitter : ").upper()
if choix_utilisateur == "A":
ajout_plante()
elif choix_utilisateur == "M":
update()
elif choix_utilisateur == "L":
data.get_plantes(mycursor)
elif choix_utilisateur == "S":
del_plantes()
elif choix_utilisateur == "R":
search_plantes()
elif choix_utilisateur == "Q":
break
datab.close()
if __name__ == "__main__":
datab = mysql.connector.connect(user='mickaell', password='cheerfulguys84', host='127.0.0.1',
database='herboristerie')
mycursor = datab.cursor()
main()
| true |
e726799d2028eb400da8a260cfede6870b923b78 | Python | pronob1010/Data_Science_Project_with_Edu_data | /first/venv/Scripts/p8.py | UTF-8 | 339 | 3.765625 | 4 | [] | no_license | number = input("Enter the Number")
digit_mapping = {
"1" : "One",
"2" : "Two",
"3" : "Three",
"4" : "Four"
}
output = ""
for ch in number:
print(digit_mapping.get(ch, "!"))
print("Task-2")
text = input(">")
word = text.split(' ')
print(word)
print("Task-3")
text = input(">")
word = text.split(' ')
print(word)
| true |
e0eeacac75a3d2f2c74ae4647cabf74a3777e42f | Python | arbidha/Designing-RESTful-APIs | /Lesson2/geocode.py | UTF-8 | 989 | 2.65625 | 3 | [] | no_license | import httplib2 , json
def getGeocodeLocation(inputString):
google_api_key = "your_api_key"
locationString = inputString.replace(" ","+")
url = ('https://maps.googleapis.com/maps/api/geocode/json?&address=%s&key=%s'% (locationString,google_api_key))
h = httplib2.Http()
response , content = h.request(url,'GET')
result = json.loads(content)
#print response
latitude = result['results'][0]['geometry']['location']['lat']
longitude = result['results'][0]['geometry']['location']['lng']
#print "response header: %s \n \n " % response
#url1 = ("https://api.foursquare.com/v2/venues/search?client_id=%s&client_secret=%s&ll=%s,%s&query=sushi&v=20190421" % (foursquare_client_id, foursquare_client_secret,latitude,longitude))
#h1 = httplib2.Http()
#response1 , content1 = h1.request(url1,'GET')
#result1 = json.loads(content1)
#print "response header: %s \n \n " % response1
#return result1
return (latitude,longitude)
| true |
848f9a6251c40caac967a0d156035e7e36aa321e | Python | AugustasV/ploomber | /src/ploomber/tasks/notebook.py | UTF-8 | 22,356 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import os
import shlex
import pdb
import tempfile
import subprocess
from pathlib import Path
from nbconvert import ExporterNameError
import warnings
try:
# papermill is importing a deprecated module from pyarrow
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
import papermill as pm
except ImportError:
pm = None
try:
import jupytext
except ImportError:
jupytext = None
try:
import nbformat
except ImportError:
nbformat = None
try:
import nbconvert
except ImportError:
nbconvert = None
from ploomber.exceptions import TaskBuildError
from ploomber.sources import NotebookSource
from ploomber.sources.notebooksource import _cleanup_rendered_nb
from ploomber.products import File, MetaProduct
from ploomber.tasks.abc import Task
from ploomber.util import requires, chdir_code
from ploomber.io import FileLoaderMixin
class NotebookConverter:
"""
Thin wrapper around nbconvert to provide a simple API to convert .ipynb
files to different formats.
Parameters
----------
nbconvert_export_kwargs : dict, default=None
Keyword arguments passed to ``nbconvert.export``
Notes
-----
The exporter is searched at initialization time to raise an appropriate
error during Task setup rather than raising it at Task runtime
Handles cases where the output representation is text (e.g. HTML) or bytes
(e.g. PDF)
"""
def __init__(self,
path_to_output,
exporter_name=None,
nbconvert_export_kwargs=None):
if exporter_name is None:
# try to infer it from the extension
suffix = Path(path_to_output).suffix
if not suffix:
raise ValueError('Could not determine output format for '
'product: "{}" because it has no extension. '
'Either add an extension '
'or explicitly pass a '
'"nbconvert_exporter_name" to the task '
'constructor'.format(path_to_output))
exporter_name = suffix[1:]
self.exporter = self._get_exporter(exporter_name, path_to_output)
self.path_to_output = path_to_output
self.nbconvert_export_kwargs = nbconvert_export_kwargs or {}
def convert(self):
if self.exporter is None and self.nbconvert_export_kwargs:
warnings.warn(
f'Output {self.path_to_output!r} is a '
'notebook file. nbconvert_export_kwargs '
f'{self.nbconvert_export_kwargs!r} will be '
'ignored since they only apply '
'when exporting the notebook to other formats '
'such as html. You may change the extension to apply '
'the conversion parameters')
if self.exporter is not None:
self._from_ipynb(self.path_to_output, self.exporter,
self.nbconvert_export_kwargs)
@staticmethod
def _get_exporter(exporter_name, path_to_output):
"""
Get function to convert notebook to another format using nbconvert,
first. In some cases, the exporter name matches the file extension
(e.g html) but other times it doesn't (e.g. slides), use
`nbconvert.get_export_names()` to get available exporter_names
Returns None if passed exported name is 'ipynb', raises ValueError
if an exporter can't be located
"""
extension2exporter_name = {'md': 'markdown'}
# sometimes extension does not match with the exporter name, fix
# if needed
if exporter_name in extension2exporter_name:
exporter_name = extension2exporter_name[exporter_name]
if exporter_name == 'ipynb':
exporter = None
else:
try:
exporter = nbconvert.get_exporter(exporter_name)
# nbconvert 5.6.1 raises ValueError, beginning in version 6,
# it raises ExporterNameError. However the exception is defined
# since 5.6.1 so we can safely import it
except (ValueError, ExporterNameError):
example_dict = {
'source': 'script.ipynb',
'product': {
'nb': 'output.ipynb',
'other': path_to_output
}
}
raise ValueError(
'Could not find nbconvert exporter '
'with name "{}". '
'Change the extension '
'or pass a valid "nbconvert_exporter_name" '
'value. Valid exporters are: {}.\n\nIf "{}" '
'is not intended to be the output noteboook, '
'register multiple products and identify the '
'output notebooks with "nb". Example: {}'.format(
exporter_name,
nbconvert.get_export_names(),
path_to_output,
example_dict,
))
return exporter
@staticmethod
def _from_ipynb(path_to_nb, exporter, nbconvert_export_kwargs):
"""
Convert ipynb to another format
"""
path = Path(path_to_nb)
nb = nbformat.reads(path.read_text(), as_version=nbformat.NO_CONVERT)
content, _ = nbconvert.export(exporter, nb, **nbconvert_export_kwargs)
if isinstance(content, str):
path.write_text(content)
elif isinstance(content, bytes):
path.write_bytes(content)
else:
raise TypeError('nbconvert returned a converted notebook with'
'unknown format, only text and binary objects '
'are supported')
return content
class NotebookRunner(FileLoaderMixin, Task):
"""
Run a Jupyter notebook using papermill. Support several input formats
via jupytext and several output formats via nbconvert
Parameters
----------
source: str or pathlib.Path
Notebook source, if str, the content is interpreted as the actual
notebook, if pathlib.Path, the content of the file is loaded. When
loading from a str, ext_in must be passed
product: ploomber.File
The output file
dag: ploomber.DAG
A DAG to add this task to
name: str, optional
A str to indentify this task. Should not already exist in the dag
params: dict, optional
Notebook parameters. This are passed as the "parameters" argument
to the papermill.execute_notebook function, by default, "product"
and "upstream" are included
papermill_params : dict, optional
Other parameters passed to papermill.execute_notebook, defaults to None
kernelspec_name: str, optional
Kernelspec name to use, if the file extension provides with enough
information to choose a kernel or the notebook already includes
kernelspec data (in metadata.kernelspec), this is ignored, otherwise,
the kernel is looked up using jupyter_client.kernelspec.get_kernel_spec
nbconvert_exporter_name: str, optional
Once the notebook is run, this parameter controls whether to export
the notebook to a different parameter using the nbconvert package,
it is not needed unless the extension cannot be used to infer the
final output format, in which case the nbconvert.get_exporter is used.
ext_in: str, optional
Source extension. Required if loading from a str. If source is a
``pathlib.Path``, the extension from the file is used.
nb_product_key: str, optional
If the notebook is expected to generate other products, pass the key
to identify the output notebook (i.e. if product is a list with 3
ploomber.File, pass the index pointing to the notebook path). If the
only output is the notebook itself, this parameter is not needed
static_analysis : bool
Run static analysis after rendering. This compares the "params"
argument with the declared arguments in the "parameters" cell (they
make notebooks behave more like "functions"), pyflakes is also run to
detect errors before executing the notebook. Has no effect if it's not
a Python file.
nbconvert_export_kwargs : dict
Keyword arguments to pass to the ``nbconvert.export`` function (this is
only used if exporting the output ipynb notebook to another format).
You can use this, for example, to hide code cells using the
exclude_input parameter. See ``nbconvert`` documentation for details.
Ignored if the product is file with .ipynb extension.
local_execution : bool, optional
Change working directory to be the parent of the notebook's source.
Defaults to False. This resembles the default behavior when
running notebooks interactively via `jupyter notebook`
Examples
--------
>>> from pathlib import Path
>>> from ploomber import DAG
>>> from ploomber.tasks import NotebookRunner
>>> from ploomber.products import File
>>> dag = DAG()
>>> # do not include input code (only cell's output)
>>> NotebookRunner(Path('nb.ipynb'), File('out-1.html'), dag=dag,
... nbconvert_export_kwargs={'exclude_input': True},
... name=1)
>>> # Selectively remove cells with the tag "remove"
>>> config = {'TagRemovePreprocessor': {'remove_cell_tags': ('remove',)},
... 'HTMLExporter':
... {'preprocessors':
... ['nbconvert.preprocessors.TagRemovePreprocessor']}}
>>> NotebookRunner(Path('nb.ipynb'), File('out-2.html'), dag=dag,
... nbconvert_export_kwargs={'config': config},
... name=2)
>>> dag.build()
Notes
-----
nbconvert's documentation:
https://nbconvert.readthedocs.io/en/latest/config_options.html#preprocessor-options
"""
PRODUCT_CLASSES_ALLOWED = (File, )
@requires(['jupyter', 'papermill', 'jupytext'], 'NotebookRunner')
def __init__(self,
source,
product,
dag,
name=None,
params=None,
papermill_params=None,
kernelspec_name=None,
nbconvert_exporter_name=None,
ext_in=None,
nb_product_key='nb',
static_analysis=True,
nbconvert_export_kwargs=None,
local_execution=False,
check_if_kernel_installed=True):
self.papermill_params = papermill_params or {}
self.nbconvert_export_kwargs = nbconvert_export_kwargs or {}
self.kernelspec_name = kernelspec_name
self.nbconvert_exporter_name = nbconvert_exporter_name
self.ext_in = ext_in
self.nb_product_key = nb_product_key
self.local_execution = local_execution
self.check_if_kernel_installed = check_if_kernel_installed
if 'cwd' in self.papermill_params and self.local_execution:
raise KeyError('If local_execution is set to True, "cwd" should '
'not appear in papermill_params, as such '
'parameter will be set by the task itself')
kwargs = dict(hot_reload=dag._params.hot_reload)
self._source = NotebookRunner._init_source(source, kwargs, ext_in,
kernelspec_name,
static_analysis,
check_if_kernel_installed)
super().__init__(product, dag, name, params)
if isinstance(self.product, MetaProduct):
if self.product.get(nb_product_key) is None:
raise KeyError("Key '{}' does not exist in product: {}. "
"Either add it to specify the output notebook "
"location or pass a 'nb_product_key' to the "
"task constructor with the key that contains "
" the output location".format(
nb_product_key, str(self.product)))
if isinstance(self.product, MetaProduct):
product_nb = (
self.product[self.nb_product_key]._identifier.best_repr(
shorten=False))
else:
product_nb = self.product._identifier.best_repr(shorten=False)
self._converter = NotebookConverter(product_nb,
nbconvert_exporter_name,
nbconvert_export_kwargs)
# expose the static_analysis attribute from the source, we need this
# since we disable static_analysis when rendering the DAG in Jupyter
@property
def static_analysis(self):
return self._source.static_analysis
@static_analysis.setter
def static_analysis(self, value):
self._source.static_analysis = value
@staticmethod
def _init_source(source,
kwargs,
ext_in=None,
kernelspec_name=None,
static_analysis=False,
check_if_kernel_installed=False):
return NotebookSource(
source,
ext_in=ext_in,
kernelspec_name=kernelspec_name,
static_analysis=static_analysis,
check_if_kernel_installed=check_if_kernel_installed,
**kwargs)
def develop(self, app='notebook', args=None):
"""
Opens the rendered notebook (with injected parameters) and adds a
"debugging-settings" cell to the that changes directory to the current
active directory. This will reflect conditions when callign
`DAG.build()`. This modified notebook is saved in the same location as
the source with a "-tmp" added to the filename. Changes to this
notebook can be exported to the original notebook after the notebook
process is shut down. The "injected-parameters" and
"debugging-settings" cells are deleted before saving.
Parameters
----------
app : {'notebook', 'lab'}, default: 'notebook'
Which Jupyter application to use
args : str
Extra parameters passed to the jupyter application
Notes
-----
Be careful when developing tasks interacively. If the task has run
successfully, you overwrite products but don't save the
updated source code, your DAG will enter an inconsistent state where
the metadata won't match the overwritten product.
If you modify the source code and call develop again, the source
code will be updated only if the ``hot_reload option`` is turned on.
See :class:`ploomber.DAGConfigurator` for details.
"""
# TODO: this code needs refactoring, should be a context manager
# like the one we have for PythonCallable.develop that abstracts
# the handling of the temporary notebook while editing
apps = {'notebook', 'lab'}
if app not in apps:
raise ValueError('"app" must be one of {}, got: "{}"'.format(
apps, app))
if self.source.language != 'python':
raise NotImplementedError(
'develop is not implemented for "{}" '
'notebooks, only python is supported'.format(
self.source.language))
if self.source.loc is None:
raise ValueError('Can only use develop in notebooks loaded '
'from files, not from str')
nb = _read_rendered_notebook(self.source.nb_str_rendered)
name = self.source.loc.name
suffix = self.source.loc.suffix
name_new = name.replace(suffix, '-tmp.ipynb')
tmp = self.source.loc.with_name(name_new)
content = nbformat.writes(nb, version=nbformat.NO_CONVERT)
tmp.write_text(content)
# open notebook with injected debugging cell
try:
subprocess.run(['jupyter', app, str(tmp)] +
shlex.split(args or ''),
check=True)
except KeyboardInterrupt:
print(f'Jupyter {app} application closed...')
# read tmp file again, to see if the user made any changes
content_new = Path(tmp).read_text()
# maybe exclude changes in tmp cells?
if content == content_new:
print('No changes found...')
else:
# save changes
if _save():
nb = nbformat.reads(content_new,
as_version=nbformat.NO_CONVERT)
# remove injected-parameters and debugging-settings cells if
# they exist
_cleanup_rendered_nb(nb)
# write back in the same format and original location
ext_source = Path(self.source.loc).suffix[1:]
print('Saving notebook to: ', self.source.loc)
jupytext.write(nb, self.source.loc, fmt=ext_source)
else:
print('Not saving changes...')
# remove tmp file
Path(tmp).unlink()
def debug(self, kind='ipdb'):
"""
Opens the notebook (with injected parameters) in debug mode in a
temporary location
Parameters
----------
kind : str, default='ipdb'
Debugger to use, 'ipdb' to use line-by-line IPython debugger,
'pdb' to use line-by-line Python debugger or 'pm' to to
post-portem debugging using IPython
Notes
-----
Be careful when debugging tasks. If the task has run
successfully, you overwrite products but don't save the
updated source code, your DAG will enter an inconsistent state where
the metadata won't match the overwritten product.
"""
if self.source.language != 'python':
raise NotImplementedError(
'debug is not implemented for "{}" '
'notebooks, only python is supported'.format(
self.source.language))
opts = {'ipdb', 'pdb', 'pm'}
if kind not in opts:
raise ValueError('kind must be one of {}'.format(opts))
nb = _read_rendered_notebook(self.source.nb_str_rendered)
fd, tmp_path = tempfile.mkstemp(suffix='.py')
os.close(fd)
code = jupytext.writes(nb, version=nbformat.NO_CONVERT, fmt='py')
Path(tmp_path).write_text(code)
if kind == 'pm':
# post-mortem debugging
try:
subprocess.run(['ipython', tmp_path, '--pdb'])
finally:
Path(tmp_path).unlink()
else:
if kind == 'ipdb':
from IPython.terminal.debugger import TerminalPdb, Pdb
code = compile(source=code, filename=tmp_path, mode='exec')
try:
# this seems to only work in a Terminal
debugger = TerminalPdb()
except Exception:
# this works in a Jupyter notebook
debugger = Pdb()
elif kind == 'pdb':
debugger = pdb
try:
debugger.run(code)
finally:
Path(tmp_path).unlink()
def run(self):
if isinstance(self.product, MetaProduct):
path_to_out = Path(str(self.product[self.nb_product_key]))
else:
path_to_out = Path(str(self.product))
# we will run the notebook with this extension, regardless of the
# user's choice, if any error happens, this will allow them to debug
# we will change the extension after the notebook runs successfully
path_to_out_ipynb = path_to_out.with_suffix('.ipynb')
fd, tmp = tempfile.mkstemp('.ipynb')
os.close(fd)
tmp = Path(tmp)
tmp.write_text(self.source.nb_str_rendered)
if self.local_execution:
self.papermill_params['cwd'] = str(self.source.loc.parent)
# create parent folders if they don't exist
Path(path_to_out_ipynb).parent.mkdir(parents=True, exist_ok=True)
try:
# no need to pass parameters, they are already there
pm.execute_notebook(str(tmp), str(path_to_out_ipynb),
**self.papermill_params)
except Exception as e:
raise TaskBuildError('An error occurred when calling'
' papermil.execute_notebook, partially'
' executed notebook with traceback '
'available at {}'.format(
str(path_to_out_ipynb))) from e
finally:
tmp.unlink()
path_to_out_ipynb.rename(path_to_out)
self._converter.convert()
def _read_rendered_notebook(nb_str):
"""
Read rendered notebook and inject cell with debugging settings
"""
# add debug cells
nb = nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT)
nbformat_v = nbformat.versions[nb.nbformat]
source = """
# Debugging settings (this cell will be removed before saving)
# change the current working directory to directory of the session that
# invoked the jupyter app to make relative paths work
import os
{}
""".format(chdir_code(Path('.').resolve()))
cell = nbformat_v.new_code_cell(source,
metadata={'tags': ['debugging-settings']})
nb.cells.insert(0, cell)
return nb
def _save():
res = input('Notebook changed, do you want to save changes '
'in the original location? (injected parameters '
'and debugging cells will be removed before '
'saving). Enter "no" to skip saving changes, '
'anything else will be interpreted as "yes": ')
return res != 'no'
| true |
9acfd3c49238e156499bfbb3f2e11fc824ee9562 | Python | DigitalCurrencyAnalysis/MonitorQQGroup | /qqgroup.py | UTF-8 | 2,253 | 3.1875 | 3 | [] | no_license | #coding:utf-8
import re
from collections import Counter
import xlwt
def time():
qq_times = []
with open("qq1.txt", mode='r', encoding='UTF-8') as f:
data = f.readlines()
pa = re.compile(r"\d{1,2}:\d\d:\d\d")
for d in data:
times = pa.findall(d)
if len(times)==0:
pass
else:
qq_times.append(times[0])
count_time = []
for qq_time in qq_times:
count_time.append(qq_time.split(':')[0])
for i in range(0,24):
b = 0
for count in count_time:
#print int(count)
if i == int(count):
b += 1
sheet1.write(i,0,i)
sheet1.write(i,1,b)
print("%s时信息次数为%s"%(i,b))
def talker():
names = []
pa = re.compile(r"\d{1,2}:\d\d:\d\d")
with open("qq1.txt", mode='r', encoding='UTF-8') as f:
data = f.readlines()
for d in data:
times = pa.findall(d)
if len(times) == 0:
pass
else:
names.append(d.split(" ")[2].split("<")[0].split("(")[0])
count = Counter(names)
i = 0
for key in count:
sheet2.write(i,0,key)
sheet2.write(i,1,count[key])
i += 1
print(key,count[key])
def date():
dates = []
pa = re.compile(r"\d{1,2}:\d\d:\d\d")
with open("qq1.txt", mode='r', encoding='UTF-8') as f:
data = f.readlines()
for d in data:
times = pa.findall(d)
if len(times) == 0:
pass
else:
dates.append(d.split(" ")[0])
count = Counter(dates)
i = 0
for key in count:
sheet3.write(i,0,key)
sheet3.write(i,1,count[key])
i += 1
print(key,count[key])
if __name__ == "__main__":
ws = xlwt.Workbook(encoding="utf-8")
sheet1=ws.add_sheet(u"时段统计",cell_overwrite_ok=True)
sheet2=ws.add_sheet(u"个人统计",cell_overwrite_ok=True)
sheet3=ws.add_sheet(u"日期统计",cell_overwrite_ok=True)
print(u"每个时段的聊天次数统计")
time()
print (u"每个人发言次数统计")
talker()
print(u"每个日期发现次数统计")
date()
ws.save('data_qq.xlsx')
| true |
95911d695fc5c654f3cba2cfd3b6e7b08a257929 | Python | NoorAbdallh/pythonTestBasic | /lec1/try.py | UTF-8 | 675 | 3.546875 | 4 | [] | no_license | def inputNumber(sen):
try:
print(sen)
num = int(input())
except:
num = 0
return num
#num1 = inputNumber('input number 1 : ')
#num2 = inputNumber('input number 2 : ')
#print('sum is ' + str(num1 + num2))
#try:
print('div is :' + str(num1/num2))
#except:
#print('num2 must not be zero!!')
try:
n = int(input('input first num : '))
n2 = int(input('input sec num : '))
print(n/n2)
except ValueError:
print('input int number !')
except ZeroDivisionError:
print('set num2 of none zero value!!')
else:
print('will be printed if we do not have any error ')
finally:
print('will be printed any way')
| true |
f6cbeb2b5fbbbdf105360559b30eb90b795f834b | Python | liuluyang/miller | /October/10-23/test.py | UTF-8 | 1,513 | 2.921875 | 3 | [] | no_license | # ! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Miller"
# Datetime: 2019/10/23 9:02
import time
from threading import Thread
# def sayhi(name):
# time.sleep(2)
# print("%s say 'hello'" % name)
#
#
# if __name__ == '__main__':
# t1 = Thread(target=sayhi, args=("miller",))
# t1.start()
# print("主线程")
# class MyThread(Thread):
# def __init__(self, name):
# super().__init__()
# self.name = name
#
# def run(self):
# time.sleep(2)
# print("%s say 'hello'" % self.name)
#
#
# if __name__ == '__main__':
# t2 = MyThread("miller")
# t2.start()
# print("主线程")
# res = input(">>>")
#
# print(res.upper())
#
#
# with open()
import time
res = None
def recv():
global res
res = input(">>>")
# def print_res():
# while True:
# if res:
# print(res)
# break
# time.sleep(0.5)
#
#
# def save():
# while True:
# if res:
# with open("./test.txt", "w", encoding="utf-8") as f:
# f.write(res)
# break
# time.sleep(0.5)
# def print_res():
# global res
# res = res.upper()
# print(res)
# def save():
# with open("./test.txt", "w", encoding="utf-8") as f:
# f.write(res)
# if __name__ == '__main__':
# t1 = Thread(target=recv)
# t2 = Thread(target=print_res)
# t3 = Thread(target=save)
# t1.start()
# t1.join()
# t2.start()
# t2.join()
# t3.start()
| true |
5343461ee09860abc2312421136f93c1aa14b590 | Python | nandan07/euler | /37_truncate_prime.py | UTF-8 | 636 | 2.65625 | 3 | [] | no_license | #!/usr/bin/python3
#import ipdb
#import helper
def isPrime(x):
x=int(x)
if x<2:
return False
for i in range(2,int(x**.5)+1):
if x%i ==0 :
return False
return True
#n,testdata=helper.readData("input")
N=int(input())
#testdata=input()
#N=int(testdata[0])
ans=0
for i in range(N,20,-1):
prime=True
N_str=str(i)
for p in range(1,len(N_str)+1):
if not isPrime(int(N_str[0:p])):
prime=False
break
if not isPrime(int(N_str[-p:])):
prime=False
break
if(prime):
ans+=i
print(ans)
#ipdb.set_trace()
| true |
ab67ee4616392b5306234762ceaae0d6a1be15d1 | Python | poponzu/atcoder1 | /atcoder/AISing2020/AISing2020_c.py | UTF-8 | 502 | 3.375 | 3 | [] | no_license | N = int(input())
# 答えを配列に全部入れておく発想が初見時なかった
# f(1),f(2),~ , f(n-1), f(n)をそれぞれ計算するのではなく、f(1),f(2),~ , f(n-1), f(n)をまとめて計算しておく
ans = [0] * (10 ** 4 + 1)
for x in range(1, 101):
for y in range(1, 101):
for z in range(1, 101):
calc = x ** 2 + y ** 2 + z ** 2 + x * y + y * z + z * x
if calc <= N:
ans[calc] += 1
for i in range(1,N + 1):
print(ans[i])
| true |
d78592a212734dbe9d66a6e0e57bc2bb4c5a9413 | Python | agmezr/exercises | /snakes_and_ladders.py | UTF-8 | 3,096 | 4.5 | 4 | [] | no_license | """
Rules:
All players start at the 1 tile.
Each turn, the player rolls the die and advances their piece by whatever number was rolled.
If the tile you land on contains the base of a ladder, you instantly ascend to the top of the ladder.
If the tile contains the head of a snake, you instantly descend to the end of the snake's tail.
If you roll a 6, you get another turn after completing the current one (ie: another die roll and move)
The game ends when one of the players reaches the 100 tile.
You must land exactly on the tile; if you overshoot it, you'll 'bounce' back by however many tiles you exceeded it.
For example, if you're currently on the 97 tile and you roll a 5, you'll move 3 tiles forward to the 100, and then you'll travel the remaining 2 tiles back, landing on 98.
The game board in this case is represented by the matrix board, where each tile that has the base of a ladder or head of a snake on it simply contains the number of the tile that it leads to. All other tiles contain -1.
There's also an array diceRolls which contains the outcomes of all the die rolls in the game (for all players), in order.
There may have been extra die rolls after one of the players already won, so you'll need to check when someone wins.
Given board, dieRolls, and integer players (representing how many players are in the game),
your task is to return an array of each player's final position when the game ends.
"""
def snakesAndLadders(board, dieRolls, players):
positions = [1] * players
player_turn = 0
# play the game by iterating over the die rolls
for turn, roll in enumerate(dieRolls):
pos = positions[player_turn % players] + roll
positions[player_turn % players] = pos
# if tile number > 100 rebound
if pos > 100:
pos = 100 - (pos - 100)
positions[player_turn % players] = pos
# if player wins stop
elif pos == 100:
break
# advance to new position
x, y = warp(pos)
tile = board[x][y]
# if tile is a ladder or snake set to new tile
if tile > 0:
positions[player_turn % players] = tile
#if not 6 change to next player
player_turn += (roll < 6)
return positions
# from a given tile number obtain the row and col from the board
def warp(n):
n -= 1
row = (10 - (n // 10)) - 1
col = n % 10
# if row is not odd reverse the numeration
if row % 2 == 0:
col = (10 - col) - 1
return row, col
board = [[-1,28,-1,-1,-1,-1,82,-1,-1,-1],
[-1,-1,-1,67,-1,-1,-1,-1,-1,-1],
[56,-1,-1,-1,-1,-1,-1,55,-1,36],
[-1,-1,-1,-1,87,-1,-1,-1,-1,-1],
[75,-1,-1,-1,-1,-1,-1,-1,-1,-1],
[-1,-1,30,-1,-1,-1,61,-1,-1,-1],
[-1,-1,-1,-1,-1,18,-1,-1,-1,-1],
[-1,44,-1,32,-1,-1,-1,-1,45,-1],
[-1,-1,-1,-1,-1,-1,26,-1,-1,-1],
[-1,-1,-1,12,-1,-1,-1,-1,-1,-1]]
dieRolls = [2, 1, 3]
players = 4
r = snakesAndLadders(board, dieRolls, players)
print(r)
| true |
2978018b6e1d6de90ed29cfce08a9905ad5f1cc5 | Python | Ethic41/codes | /python/oop/intSet.py | UTF-8 | 973 | 4.15625 | 4 | [
"MIT"
] | permissive |
class IntSet(object):
"""An intSet is a set of integers"""
def __init__(self):
#create an empty set of integers
self.vals = []
def insert(self, e):
#assumes 'e' is an integer and insert 'e' into the list
if not e in self.vals:
self.vals.append(e)
def member(self, e):
#Assumes e is an integer, Returns True if e in set, False otherwise
return e in self.vals
def remove(self, e):
#Assumption: same as above and tries to remove e from list
try:
self.vals.remove(e)
except:
raise ValueError(str(e) + "not found")
def getMembers(self):
return self.vals[:]
def __str__(self):
self.vals.sort()
result = ""
for val in self.vals:
result += str(val) + ","
return "{"+ result[:-1] + "}"
s = IntSet()
s.insert(3)
s.insert(4)
s.insert(12)
print s.getMembers()
s.remove(4)
print s.getMembers()
| true |
886f888e8cae90499cf383b17e2db27cf9a4603c | Python | Senethys/5DA000 | /MemberList.py | UTF-8 | 2,361 | 2.71875 | 3 | [] | no_license | # -*- coding: latin-1 -*-
from WebPage import WebPage
from PersonReg import PersonReg
import sqlite3
import cgitb
cgitb.enable()
class MemberList(WebPage):
'''
The "Main" page of the site. Creates a table of all the members
in the databasewith checkboxes in order to delete checked members.
Also includes a form toadd more members and a note with a title.
'''
def printBody(self):
print """<form class = "delAgent" action="deleteAgent.cgi"
method = "post" id = "checkvals"> <input type="submit" value="Delete">
</form>"""
print """<table class = "memberlist" >"""
register=PersonReg("database.db")
agentlist = register.search("")
for agent_id in agentlist:
agent = register.getAgent(agent_id)
print "<tr><td>"
print '<input type= "checkbox" name="checkid" form = "checkvals" value = %d>' % agent.getId()
print "</td><td>"
print "<a href='agent_info.cgi?agentid=%d'>" % agent.getId()
print agent.getName().encode("Latin-1")
print "</a>"
print "</td><td>"
print "<a href='agent_info.cgi?agentid=%d'>" % agent.getId()
print agent.getLastname()
print "</a>"
print "</td><td>"
print "<a href='agent_info.cgi?agentid=%d'>" % agent.getId()
print agent.getEmail()
print "</a>"
print "</td></tr>"
print "</table>"
print "<br>"
print
print "<form class = 'addAgent' action='addAgent.cgi' method='post'>"
print "<h2 class= 'NewMemb'>Add new member:</h2>"
print ("<label>First name: </label> <input type='text' name='fname'>")
print ("<label>Last name:</label> <input type='text' name='lname'>")
print ("<label>Email adress:</label><input type='text' name='mail'>")
print "<br> </br> <br> </br>"
print ("<label>Title:</label> <input type='text' name='note_title'>")
| true |
e415efdaff8b33f191b0cac0b6844f12963c043d | Python | asrp/pyfileconsole | /client.py | UTF-8 | 520 | 2.78125 | 3 | [] | no_license | import time
in_file = "/tmp/pyconsole-out.txt"
out_file = "/tmp/pyconsole-in.txt"
f_in = open(in_file)
f_out = open(out_file, "a")
sep = "===Done===\n"
line = True
while line:
line = f_in.readline()
print(line, end='')
while True:
out_line = input()
f_out.write(f"{out_line}\n" if out_line.startswith("eval") else f"exec {out_line}\n")
f_out.flush()
line = None
while line != sep:
line = f_in.readline()
if not line:
time.sleep(0.1)
print(line, end='')
| true |
1fc6d74c59934a4e05abbf146694259f7ee4fb66 | Python | gsznaier/codeforce_parser | /cfspider/spiders/cf.py | UTF-8 | 3,887 | 2.578125 | 3 | [] | no_license | '''
Scrapy spider to scrape codeforces successful submissions.
NOTE: Robots.txt is set to False
How to use
1. Change the PAGE_LIMIT according to your need
2. Change LANGUAGE to desired one by checking with codeforces language options
'''
from scrapy.http import FormRequest
from urllib.request import urlopen
import scrapy
import json
'''possible language choices:
"c.gcc":GNU C
"c.gcc11": GNU C11
"cpp.clang++-diagnose": Clang++17 Diagnostics
"cpp.g++": GNU C++
"cpp.g++11": GNU C++11
"cpp.g++14": GNU C++14
"cpp.g++17": GNU C++17
"cpp.g++17-drmemory": GNU C++17 Diagnostics
"cpp.ms": MS C++
"csharp.mono": Mono C#
"d": D
"go":Go
"haskell.ghc": Haskell
"java8": Java 8
"kotlin": Kotlin
"ocaml": Ocaml
"pas.dpr": Delphi
"pas.fpc": FPC
"pas.pascalabc": PascalABC.NET
"perl.5": Perl
"php.5":PHP
"python.2":Python 2
"python.3": Python 3
"python.pypy2": PyPy 2
"python.pypy3": PyPy 3
"ruby.1": Ruby
"rust": Rust
"scala": Scala
"v8.3": JavaScript
"v8.nodejs": Node.js
'''
class QuotesSpider(scrapy.Spider):
PAGE_LIMIT = 5 # Change Page limit to get more successful submission
LANGUAGE = 'c.gcc' # Get solutions from the given Language
name = "cfSpider"
def __init__(self, lang = 'python.3', **kwargs):
LANGUAGE = lang
def start_requests(self):
# Make API call to codeforces server
html = urlopen('http://codeforces.com/api/problemset.problems').read()
# Convert given data into JSON format
jsonData = json.loads(html.decode('utf-8'))
for data in jsonData['result']['problems']:
tags = data['tags']
index = data['index']
contestId = data['contestId']
name = data['name']
yield scrapy.Request(url='http://codeforces.com/problemset/status/'+str(contestId)+'/problem/'+str(index),
callback=self.parse,
meta = {'tags': tags, 'index': index, 'contestId': contestId, 'name': name})
def get_details(self, response):
tags = response.meta['tags']
index = response.meta['index']
contestId = response.meta['contestId']
name = response.meta['name']
tem = response.meta['tem']
url = 'http://codeforces.com/problemset/status/'+ str(contestId) +'/problem/'+ str(index) +'/page/'+str(tem)+'?order=BY_PROGRAM_LENGTH_ASC'
data = response.meta['data']
for i in response.css('tr::attr(data-submission-id)').extract():
data.add(i)
tem += 1 # Takes care of solution pages
if tem >= self.PAGE_LIMIT:
yield {
"contestId": contestId,
"index": index,
"name": name,
"tags": tags,
"language": self.LANGUAGE,
"Submissions": data,
}
else:
yield scrapy.Request(url = url,
callback= self.get_details,
meta = {'tags': tags, 'index': index, 'contestId': contestId, 'name': name, 'data': data, 'tem': tem})
def parse(self, response):
data = set() # Empty data used to append successfully submissions in one set
tags = response.meta['tags']
index = response.meta['index']
contestId = response.meta['contestId']
name = response.meta['name']
return scrapy.FormRequest.from_response(
response,
formxpath='//*[@id="sidebar"]/div/div[4]/form',
dont_filter = True,
meta = {'tags': tags, 'index': index, 'contestId': contestId, 'name': name, 'data': data, 'tem': 2},
callback=self.get_details,
formdata={'programTypeForInvoker': self.LANGUAGE,
'verdictName': 'OK'})
| true |
e653a6ce6dc5c836ae537a15f45e826b898038a3 | Python | oliviapangkiey/Student-Administration-Server | /grad_student.py | UTF-8 | 2,692 | 3.390625 | 3 | [] | no_license | #! /usr/bin/env python3
from sqlalchemy import Column, Integer, String, DateTime
from abstract_student import AbstractStudent
class GraduateStudent(AbstractStudent):
"""This is a class for Graduate Student"""
TYPE = 'Graduate'
supervisor = Column(String(250))
undergrad_degree = Column(String(300))
def __init__(self, first_name, last_name, program, classification, enroll_status, enroll_date, type, supervisor, undergrad_degree):
"""Constructor - Initializes variables for graduate student"""
super().__init__(first_name, last_name, program, classification, enroll_status, enroll_date, type)
self.type = GraduateStudent.TYPE
AbstractStudent._validate_parameter("Supervisor", supervisor, 250)
self.supervisor = supervisor
AbstractStudent._validate_parameter("Undergraduate degree", undergrad_degree, 300)
self.undergrad_degree = undergrad_degree
def to_dict(self):
''' returns dictionary of graduate students details '''
dict = super().to_dict()
dict['supervisor'] = self.supervisor
dict['undergraduate_degree'] = self.undergrad_degree
dict['type']=self.type
return dict
def get_details(self):
"""Return the details of graduate student"""
detail = 'Name: {} \nProgram: {} \nClassification: {} \nSupervisor: {}\nEnroll status: {}\nEnroll period: {} years'.format(self.first_name + self.last_name,
self.program(),
self.classification(),
self.supervisor(),
self.enroll_status(),
self.get_enroll_length_in_years())
return detail
def copy(self, object):
"""Copies data from a Student object to this Student object"""
if isinstance(object, GraduateStudent):
self.first_name=object.first_name
self.last_name=object.last_name
self.program = object.program
self.classification = object.classification
self.enroll_status = object.enroll_status
self.enroll_date = object.enroll_date
self.type = object.type
self.supervisor = object.supervisor
self.undergrad_degree = object.undergrad_degree
| true |
a7780df683cafc406208721a532eccbedcc0551d | Python | Posnet/textbook | /figure-generating-scripts/square_wave_fft.py | UTF-8 | 1,158 | 3.109375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
y = np.repeat(np.arange(10)%2, 20)*2 - 1
y = y.astype(float)
Y = np.abs(np.fft.rfft(y, 10000))
print(Y)
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(10, 3))
plt.subplots_adjust(wspace=0.4)
# Plot stuff
ax1.plot(y)
ax1.text(len(y)+10, -0.05, 'Time')
ax1.set_ylabel("Amplitude")
ax2.plot(Y)
ax2.set_xlabel("Frequency")
ax2.set_ylabel("Magnitude")
# set the x-spine (see below for more info on `set_position`)
ax1.spines['left'].set_position('zero')
ax2.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax1.spines['right'].set_color('none')
ax1.yaxis.tick_left()
ax2.spines['right'].set_color('none')
ax2.yaxis.tick_left()
# set the y-spine
ax1.spines['bottom'].set_position('zero')
ax2.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax1.spines['top'].set_color('none')
ax1.xaxis.tick_bottom()
ax2.spines['top'].set_color('none')
ax2.xaxis.tick_bottom()
# Turn off tick numbering/labels
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax2.set_xticklabels([])
ax2.set_yticklabels([])
plt.show()
fig.savefig('../_static/square-wave.svg', bbox_inches='tight') | true |
c383e5da4a779fb17388b00166510c6f025048c0 | Python | Onebigbera/Daily_Practice | /test/unit_test_g/unittest_simple/parameterization_file.py | UTF-8 | 1,369 | 3.15625 | 3 | [] | no_license | # -*-coding:utf-8 -*-
# File :parameterlization_file.py
# Author:George
# Date : 2018/12/2
"""
参数化一般分两种情况: 一种是需要大量数据还有一种是有唯一性校验的情况下都需要参数化那么在unittest中是如何实现的呢
"""
import unittest, HTMLTestRunner
# 自己扩展的
from parameterized import parameterized
def login(username, password):
if username == 'george' and password == '123456':
return True
else:
return False
class LoginG(unittest.TestCase):
@parameterized.expand([
# 可以是列表 也可以是元祖
['george', '123456', True],
[' ', '123456', True],
['steven', '', False],
['hello', '123456', False]
]
)
# 这里面的参数对应上面二维列表中的参数,系统会遍历元素 直至调用遍历完成
def test_login(self, username, password, exception):
"""登陆"""
result = login(username, password)
self.assertEqual(result, exception)
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoginG))
fw = open(r'F:\Python_guide\Daily_Practice\test\unit_test_g\unittest_simple\test_report\param.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fw, title='parameterization', description='how')
runner.run(suite)
| true |
a089124b023ebfd2139d10086b604892d7d99187 | Python | rajjoan/PythonSamples | /Chapter 11/list3.py | UTF-8 | 213 | 4.03125 | 4 | [] | no_license | #Print the last item on the list
food=["cherry","strawberry","bannana","blueberries","apple"]
food[2]="orange"
print(food[-1])
print(food[2:5])
food.append("carrot")
print(food)
food.insert(2,"pear")
print(food)
| true |
5e1232fc24df0d5047e5f1521feb9a25dcc184de | Python | renjunxiang/Text_Generate | /generate_text/generate_tensorflow_correct.py | UTF-8 | 4,453 | 2.78125 | 3 | [
"MIT"
] | permissive | import os
import numpy as np
import tensorflow as tf
from .rnn import model_tensorflow
import re
import pickle
DIR = os.path.dirname(os.path.abspath(__file__))
def generate_tensorflow(process_path=DIR + '/model/poem/poem.pkl',
model_path=DIR + '/model/poem/train',
maxlen=80,
correct=True
):
'''
:param process_path: 训预处理模型路径
:param model_path: 网络参数路径
:param maxlen: maxlen创作最大长度
:return:
'''
with open(process_path, mode='rb') as f:
data_process = pickle.load(f)
word_index = data_process.word_index
input_data = tf.placeholder(tf.int32, [None, None])
output_targets = tf.placeholder(tf.int32, [None, None])
tensors = model_tensorflow(input_data=input_data,
output_targets=output_targets,
num_words=data_process.num_words,
num_units=data_process.num_units,
num_layers=data_process.num_layers,
batchsize=1)
saver = tf.train.Saver(tf.global_variables())
initializer = tf.global_variables_initializer()
while True:
with tf.Session() as sess:
sess.run(initializer)
checkpoint = tf.train.latest_checkpoint(model_path)
saver.restore(sess, checkpoint)
print('中文作诗,作诗前请确保有模型。输入开头,quit=离开;\n请输入命令:')
start_word = input()
if start_word == 'quit':
break
try:
print('开始创作')
input_index = []
for i in start_word:
index_next = word_index[i]
input_index.append(index_next)
input_index = input_index[:-1]
# 用于修正标点位置
punctuation = [word_index[','], word_index['。'], word_index['?']]
punctuation_index = len(start_word)
while index_next not in [0, word_index['E']]:
input_index.append(index_next)
[y_predict, last_state] = sess.run([tensors['prediction'], tensors['last_state']],
feed_dict={input_data: np.array([input_index])})
y_predict = y_predict[-1]
y_predict = {num: i for num, i in enumerate(y_predict)}
index_max = sorted(y_predict, key=lambda x: y_predict[x], reverse=True)[:10]
# p_max = [y_predict[i] for i in index_max]
index_next = np.random.choice(index_max)
punctuation_index += 1
if correct:
# [3,7]之间个字符出现标点正常,重置索引
if index_next in punctuation and punctuation_index > 3 and punctuation_index < 8:
punctuation_index = 0
# 当超过7个字符没有出现标点,且标点出现在候选中,选择标点
elif punctuation_index >= 8:
punctuation_index = 0
while (set(punctuation) & set(index_max)) and (index_next not in punctuation):
index_next = np.random.choice(index_max)
# 当少于3个字符出现标点,选择文字
elif punctuation_index <= 3:
while index_next in punctuation:
index_next = np.random.choice(index_max)
else:
pass
if len(input_index) > maxlen:
break
index_word = {word_index[i]: i for i in word_index}
text = [index_word[i] for i in input_index]
text = ''.join(text)
except Exception as e:
print(e)
text = '不能识别%s' % start_word
finally:
text_list = re.findall(pattern='[^。?!]*[。?!]', string=text)
print('作诗完成:\n')
for i in text_list:
print(i)
print('\n------------我是分隔符------------\n')
| true |
335d0b3f3b817b1e439c81adb7207da8029a4913 | Python | kevchengcodes/kcheng | /euler/euler_6.py | UTF-8 | 307 | 3.75 | 4 | [] | no_license | """
********* Euler Problem 6 *********
PROMPT:
Find the difference between the sum of the squares of the
first one hundred natural numbers and the square of the sum.
"""
hundo = list(range(1,101))
sums = sum(hundo)
sqsum = sums**2
sumsq = sum(x**2 for x in hundo)
answer = sqsum-sumsq
print(answer) | true |
db3acc237a52f3d0c5f3585cc045ef039bb382ab | Python | rahuljmt/dlithe_internship | /avg_3values.py | UTF-8 | 221 | 3.8125 | 4 | [] | no_license | #program to find sum and average of 3 values
a=int(input("Enter the first number:"))
b=int(input("Enter the second number:"))
c=int(input("Enter the third number:"))
avg=(a+b+c)/3
print("Average of three number is:",avg)
| true |
78ea35ac7a492711a548ea919733e5279fb6903b | Python | CMU-TBD/Spherical-Robot-Haptic | /SpheroLooper.py | UTF-8 | 16,834 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
from sphero_sprk.sphero import Sphero
import time
import threading
import copy
import numpy as np
import matplotlib.pyplot as plt
import tkinter as tk
from Tools import Tools
BREAK = Sphero.RAW_MOTOR_MODE_BRAKE
FORWARD = Sphero.RAW_MOTOR_MODE_FORWARD
REVERSE = Sphero.RAW_MOTOR_MODE_REVERSE
class SpheroLooper(object):
def _imu_callback(self,data):
time_now = time.time()
self._last_imu = data
#print("IMU Hz:{}".format(1/(time_now - self._imu_time)))
self._imu_time = time_now
def _accel_callback(self,data):
time_now = time.time()
if self._last_accel is None:
self._last_accel = data
else:
alpha = 0.5
self._last_accel['x'] = Tools.exponential_moving_average(self._last_accel['x'],data['x'],alpha)
self._last_accel['y'] = Tools.exponential_moving_average(self._last_accel['y'],data['y'],alpha)
self._last_accel['z'] = Tools.exponential_moving_average(self._last_accel['z'],data['z'],alpha)
self._accel_dt = time_now - self._accel_time
#print("Accel Hz:{}".format(1/(time_now - self._accel_time)))
self._accel_time = time_now
def __init__(self, orb, rate):
self._orb = orb
self._rate = rate
self._dir = [Sphero.RAW_MOTOR_MODE_FORWARD, Sphero.RAW_MOTOR_MODE_REVERSE]
self._power = [0,0]
self._pause_flag = False
self._stop_flag = False
self._control_thread = None
self._control_rate = 20 #20Hz is the maximum we can send to the system, seemed to be a limitation
self._last_imu = None
self._last_accel = None
self._accel_dt = 0
self._seq_flag = False
self._seq_thread = None
self._fo_heading = 0 #heading that is faked
self._imu_time = time.time()
self._orb.start_IMU_callback(20,self._imu_callback)
self._accel_time = time.time()
self._orb.start_accel_callback(20,self._accel_callback)
def change_power(self, new_power_setting):
for i, power in enumerate(new_power_setting):
if power == 0:
self._dir[i] = Sphero.RAW_MOTOR_MODE_BRAKE
self._power[i] = 0
elif power < 0:
self._dir[i] = Sphero.RAW_MOTOR_MODE_REVERSE
self._power[i] = -1 * power
else:
self._dir[i] = Sphero.RAW_MOTOR_MODE_FORWARD
self._power[i] = power
def start_control_loop(self):
self._stop_flag = False
self._control_thread = threading.Thread(target=self._control_loop)
self._control_thread.start()
print("control loop started")
def stop_control_loop(self):
self._stop_flag = True
self._control_thread.join()
self.breaking()
print("control loop stop")
def breaking(self):
self._orb.set_raw_motor_values(Sphero.RAW_MOTOR_MODE_BRAKE,0,Sphero.RAW_MOTOR_MODE_BRAKE,0)
def break_control_loop(self):
#stop the sequence looper
self._stop_seq_loop()
#break the control loop
self._dir[0] = Sphero.RAW_MOTOR_MODE_BRAKE
self._dir[1] = Sphero.RAW_MOTOR_MODE_BRAKE
self._power[0] = 0
self._power[1] = 0
print(self._power)
def up_down_back(self):
while np.abs(self._last_imu["pitch"]) > 5:
start_time = time.time()
direction = Sphero.RAW_MOTOR_MODE_FORWARD if self._last_imu["pitch"] < 0 else Sphero.RAW_MOTOR_MODE_REVERSE
pwd_val = 35#np.max([35,np.min([40,np.abs(self._last_imu["pitch"]) + 10])])
print(pwd_val)
self._dir[0] = direction
self._dir[1] = direction
self._power[0] = pwd_val
self._power[1] = pwd_val
sleep_time = 1/self._rate - (time.time() - start_time)
if(sleep_time > 0):
time.sleep(sleep_time)
#self._orb.set_raw_motor_values(direction,pwd_val,direction,pwd_val)
print("DONE")
self.break_control_loop()
#self.breaking()
def stabilization(self, final_heading=0):
#first pause the control loop
self._pause_flag = True
#run the stabalization code
self._orb.set_stabilization(True)
#we roll a bit because it shown to help stabalization
print((180-final_heading)%359)
self._orb.roll(0,(final_heading+180)%359)
time.sleep(1)
#print(final_heading)
self._orb.roll(0,final_heading)
#wait for a 1 second
time.sleep(1)
#self._orb.set_heading(0)
self._pause_flag = False
def acknowledgement_vibe(self):
#vibe to show that we get it
self.change_power([70,-70])
time.sleep(0.1)
self.change_power([-70,70])
time.sleep(0.1)
self.change_power([70,-70])
time.sleep(0.1)
self.change_power([-70,70])
time.sleep(0.1)
self.change_power([0,0])
time.sleep(0.2)
def push_calibration_action(self):
#action sequence that help calibrating the yaw by acclerating/push in certain direction
#store the current diff:
init_x = self._last_accel['x']
init_y = self._last_accel['y']
limit = 2000
#wait until either of them pass the limit
while True:
if np.abs(self._last_accel['x'] - init_x) > limit:
break
if np.abs( self._last_accel['y'] - init_y) > limit:
break
val_x = self._last_accel['x']
val_y = self._last_accel['y']
#send to backend
self._accel_based_calibration_backend(val_x, val_y)
def movement_calibration_action(self):
#action sequence that help calibrating the yaw by moving in certain direction
#store the current diff:
# vel_x = 0#self._last_accel['x']
# vel_y = 0#self._last_accel['y']
init_x = self._last_accel['x']
init_y = self._last_accel['y']
limit = 1000
val_x = 0
val_y = 0
while True:
#calculate the travelled distance
#store the difference between accelerations
val_x += (self._last_accel['x'] - init_x)
init_x = self._last_accel['x']
val_y += (self._last_accel['y'] - init_y)
init_y = self._last_accel['y']
if(np.abs(val_x) > limit or np.abs(val_y) > limit):
break
self._accel_based_calibration_backend(val_x, val_y)
def _accel_based_calibration_backend(self, val_x, val_y):
#normalize vector
vec = [val_x, val_y,0]
print(vec)
vec = vec/np.linalg.norm(vec)
#figure out how much to turn back
base_vec = np.array([0,-1,0]) #because we are trying to align the negative y-axis to the front
print(vec)
roll_val = np.rad2deg(Tools.get_2D_rotation(vec, base_vec))
roll_val = int(roll_val)
print(roll_val)
if(roll_val < 0):
#roll_val = roll_val*-1
roll_val = 360 + roll_val
print(roll_val)
#print("accel x:{}, accel y:{}".format(val_x, val_y))
#send the calibration to the backend
self._calibration_backend(roll_val)
def add_heading_offset(self, offsets):
self._fo_heading += offsets
print(self._fo_heading)
def hand_calibration_action(self):
#action sequence that help with the calibration
#first store the current diff
init_roll = self._last_imu['roll']
init_pitch = self._last_imu['pitch']
limit = 20
#wait until either of them is pass the limit
while True:
if np.abs(self._last_imu['roll'] - init_roll) > limit:
break
if np.abs(self._last_imu['pitch'] - init_pitch) > limit:
break
#save those two values
roll_x = self._last_imu['roll']
pitch_y = self._last_imu['pitch']
#calculate how much to rotate from current plane
point, normal = Tools.create_plane_from_roll_pitch(roll_x,pitch_y)
vec = Tools.project_to_plane(normal,np.array([0,0,1]))
vec = vec/np.linalg.norm(vec)
base_vec = np.array([1,0,0])
roll_val = np.rad2deg(Tools.get_2D_rotation(base_vec, vec))
roll_val = int(roll_val)
if(roll_val < 0):
roll_val = 360 + roll_val
#print("roll:{}, pitch:{}".format(roll_x, pitch_y))
self._calibration_backend(roll_val)
def _calibration_backend(self, roll_val):
#first do the acknowledge vibe
self.acknowledgement_vibe()
#print roll value for debugging
print("roll_val:{}".format(roll_val))
#pause control loop
self._pause_flag = True
#start stabalization
self._orb.set_stabilization(True)
self._orb.set_heading(0)
self._orb.roll(0,roll_val)
time.sleep(0.5)
#self.stabilization()
self._orb.set_heading(0)
self._fo_heading = 0
self._orb.set_stabilization(False)
#resume control loop
self._pause_flag = False
def reset_heading(self, cur_heading):
#first pause the control loop
self._pause_flag = True
#run the stabalization code
self._orb.set_stabilization(True)
#set the current heading as 0
self._orb.set_heading(0,True)
#roll to the 0 possition
# roll_heading = 360 - cur_heading
# self._orb.roll(0, roll_heading)
# time.sleep(0.5)
# self._orb.set_heading(0,True)
# self._orb.roll(0, cur_heading)
# time.sleep(0.5)
#calculate the difference from the current position to the expected heading
self._fo_heading = cur_heading
self._orb.set_stabilization(False)
self._pause_flag = False
def move_to_heading(self, heading):
#move to the heading
self._pause_flag = True
self._orb.set_stabilization(True)
#old code
#self._orb.roll(0,heading)
#time.sleep(1)
#instead of
new_heading = int(np.round(heading - self._fo_heading))
while new_heading < 0:
new_heading = new_heading + 360
while new_heading >= 360:
new_heading = new_heading - 360
print("True heading:{}".format(new_heading))
self._orb.roll(0,new_heading)
time.sleep(0.5)
self._orb.set_stabilization(False)
self._pause_flag = False
def stabilization_fast(self, final_heading=0):
#first pause the control loop
self._pause_flag = True
#run the stabalization code
self._orb.set_stabilization(True)
#we roll a bit because it shown to help stabalization
self._orb.roll(0,final_heading)
#wait for a 1 second
time.sleep(1)
#self._orb.set_heading(0)
self._pause_flag = False
def calibrate(self, new_heading):
#first pause the control loop
self._pause_flag = True
#run the stabalization code
self._orb.set_stabilization(True)
self._orb.roll(0,(new_heading+180)%359)
time.sleep(1)
self._orb.roll(0,new_heading)
time.sleep(1)
self._orb.set_heading(0)
self._pause_flag = False
def _control_loop(self):
while(not self._stop_flag):
start_time = time.time()
if(not self._pause_flag):
self._orb.set_raw_motor_values(self._dir[0],self._power[0],self._dir[1],self._power[1])
action_time = time.time() - start_time
sleep_time = 1/self._control_rate - action_time
#print("control Hz:{}".format(1/action_time))
if(sleep_time > 0):
time.sleep(sleep_time)
def _sequence_loop(self,seq_tuple,total_time,max_times=0):
total_start_time = time.time()
index = 0
counter = 0
#print("max:{}".format(max_times))
while(self._seq_flag and ( (max_times == 0 or counter < max_times) and (time.time() - total_start_time < total_time))):
start_time = time.time()
seq = seq_tuple[index]
self.change_power([seq[0], seq[1]])
sleep_time = 1/self._rate - (time.time() - start_time)
if(sleep_time > 0):
time.sleep(sleep_time)
index = (index + 1)%np.size(seq_tuple,0)
if(index == 0):
counter += 1
#print("update{}".format(counter))
#stop the thing from moving
self._dir[0] = Sphero.RAW_MOTOR_MODE_BRAKE
self._dir[1] = Sphero.RAW_MOTOR_MODE_BRAKE
self._power[0] = 0
self._power[1] = 0
def _start_seq_loop(self, seq_tuple,total_time,max_times=5):
if(self._seq_thread != None and self._seq_flag == True):
self._stop_seq_loop()
self._seq_flag = True
self._seq_thread = threading.Thread(target=self._sequence_loop,args=(seq_tuple,total_time,max_times))
self._seq_thread.start()
print("sequence loop started:{}".format(seq_tuple))
def _stop_seq_loop(self):
if(self._seq_thread != None and self._seq_flag == True):
self._seq_flag = False
self._seq_thread.join()
self._seq_thread = None
print("sequence loop stopped")
def sequence_pulsing(self, seq, total_time):
seq_tuple = []
#convert the sequence to the looping nature
for s in seq:
seq_tuple.append((s,-1*s))
self._start_seq_loop(seq_tuple,total_time)
def unbalanced_seq(self,seq_tuple, total_time, rate=-1, max_times=0):
self._start_seq_loop(seq_tuple,total_time,max_times)
# total_start_time = time.time()
# index = 0
# if(rate == -1):
# rate = self._rate
# while(time.time() - total_start_time < total_time):
# print(index)
# start_time = time.time()
# seq = seq_tuple[index]
# self.change_power([seq[0], seq[1]])
# sleep_time = 1/rate - (time.time() - start_time)
# if(sleep_time > 0):
# time.sleep(sleep_time)
# index = (index + 1)%len(seq_tuple)
def wait_for_unbalanced_seq(self,timeout=0):
self._seq_thread.join()
def manual_control(self, seq_list, total_time):
#first stop the control loop
self.stop_control_loop()
total_start_time = time.time()
index = 0
while(time.time() - total_start_time < total_time):
start_time = time.time()
dir1, power1, dir2, power2 = seq_list[index]
self._orb.set_raw_motor_values(dir1, power1, dir2, power2)
sleep_time = 1/self._rate - (time.time() - start_time)
if(sleep_time > 0):
time.sleep(sleep_time)
index = (index + 1)%len(seq_list)
def tick(self,interval,total_time):
#first stop the control loop
#self.stop_control_loop()
self.change_power([0,0])
total_start_time = time.time()
while(time.time() - total_start_time < total_time):
start_time = time.time()
self.change_power([50,-50])
time.sleep(0.1)
self.change_power([-50,50])
time.sleep(0.1)
self.change_power([0,0])
# self._orb.set_raw_motor_values(FORWARD, 100, REVERSE, 100)
# time.sleep(200)
# self._orb.set_raw_motor_values(REVERSE, 100, FORWARD, 100)
# time.sleep(200)
# self._orb.set_raw_motor_values(BREAK, 100, BREAK, 100)
sleep_time = interval
if(sleep_time > 0):
time.sleep(sleep_time)
def vibe(self,interval,total_time):
#first stop the control loop
#self.stop_control_loop()
self.change_power([0,0])
total_start_time = time.time()
index = 0
while(time.time() - total_start_time < total_time):
start_time = time.time()
if(index%2 == 0):
self.change_power([50,-50])
else:
self.change_power([-50,50])
time.sleep(0.1)
self.change_power([0,0])
# self._orb.set_raw_motor_values(FORWARD, 100, REVERSE, 100)
# time.sleep(200)
# self._orb.set_raw_motor_values(REVERSE, 100, FORWARD, 100)
# time.sleep(200)
# self._orb.set_raw_motor_values(BREAK, 100, BREAK, 100)
index = (index +1)%2
sleep_time = interval
if(sleep_time > 0):
time.sleep(sleep_time) | true |
4b2ab1595c26a6e12c5bdb122d18308377de835e | Python | Cassini-4B/Korean_OCR | /source/get_letter_features.py | UTF-8 | 2,495 | 2.953125 | 3 | [] | no_license | import pandas as pd
import numpy as np
import cv2 as cv
import glob
import os
import pickle
import matplotlib.pyplot as plt
%matplotlib inline
contour_vals = {}
def get_img_mts(png):
# read image
img = cv.imread(png)
# get shape and resize
shape = img.shape
ratio = 100.0 / shape[1]
newdimension = (100, int(shape[0]*ratio))
res = cv.resize(img, newdimension, interpolation=cv.INTER_AREA)
# transform image to grayscale
grayimg = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# apply thresholding for img segmentation
grayimg2 = cv.threshold(grayimg,0,255,cv.THRESH_BINARY)[1]
# find contours
im2, contours, hierarchy = cv.findContours(grayimg, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
# get moments
cnt = contours[0]
M = cv.moments(cnt)
# Feature 1: centroid
try:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
except:
cx, cy = 0,0
# Feature 2: area of contour
area = cv.contourArea(cnt)
# Feature 3: perimeter of contour
perimeter = cv.arcLength(cnt,True)
# Feature 4: radius of bounding circle
(x1,y1),radius = cv.minEnclosingCircle(cnt)
center = (int(x1),int(y1))
radius = int(radius)
# Feature 5: aspect ratio (ratio of width to height of bounding rectangle)
x2,y2,w,h = cv.boundingRect(cnt)
aspect_ratio = float(w)/h
# Feature 6: extent (ratio of contour area to bounding rectangle area)
rect_area = w*h
extent = float(area)/rect_area
# Feature 7: orientation (angle at which object is directed) and major/minor axis lengths
try:
(x3,y3), (MA, ma), angle = cv.fitEllipse(cnt)
except:
(MA, ma), angle = (0,0),0
# Feature 8: extreme points
leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
# store values
contour_vals[str(png)]= (cx, cy, area, perimeter, center, radius, aspect_ratio, w, h, extent, MA, ma, angle,
leftmost, rightmost, topmost, bottommost)
return contour_vals
img_directory = ('Directory_where_images_are_stored')
for dirpath, dirname, filenames in os.walk(img_directory):
for f in sorted(filenames):
get_img_mts(f)
with open('letter_features.pickle', 'wb') as l:
pickle.dump(contour_vals, l)
| true |
27a8c3a15456cc3365e9ad11014812df3c36d027 | Python | vamsiX1/reimagined-winner | /Examples/replaceString.py | UTF-8 | 349 | 3.96875 | 4 | [
"MIT"
] | permissive | # This is to verify the string
stg = 'this is some the first string'
stgs= stg.replace('this is some the first string','this is the second string')
print(stgs)
# Use the find() method to you
stg3 ="Vamsi,Vamsi,Vamsi,Vamsi,"
if "Vamsi" in stg3:
print("found it")
stg4 = input("Enter a string\n")
if "Vamsi" in stg4:
print("Welcome Vamsi")
| true |
9f88849037a06909c580e8350b937422de8903db | Python | Tokoy/MyPythonProject | /NetReptile.py | UTF-8 | 410 | 2.6875 | 3 | [] | no_license | # coding=utf-8
import re
import urllib
url = r'http://www.heibanke.com/lesson/crawler_ex00/'
reg = re.compile(r"<h3>[^\d<]*?(\d+)[^\d<]*?</h3>")
while True:
print '正在读取网址', url
html = urllib.urlopen(url).read()
num = reg.findall(html)
if len(num) == 0:
break
else:
url = r'http://www.heibanke.com/lesson/crawler_ex00/'
url = url+num[0]
print '结束'
| true |
cafdb1a9bb5674b47c0325628d312298c25c4c7a | Python | mattlyne94/Ansible | /ec2python.py | UTF-8 | 885 | 2.578125 | 3 | [] | no_license | # Creating an EC2 instance again but this time using Python
import boto3
dryRun = False; # variable to put the script into dry run mode where the function allows it
ec2Client = boto3.client('ec2')
ec2Resource = boto3.resource('ec2')
# Create the instance
instanceDict = ec2Resource.create_instances(
DryRun = dryRun,
ImageId = 'ami-0f75cb5a4a1ca2993',
MinCount = 1,
MaxCount = 1,
InstanceType = 't2.micro',
KeyName = 'studentT00164220',
SubnetId = 'subnet-2972a665')
# Wait for the instance to launch before assigning Elastic IP
instanceDict[0].wait_until_running();
# Alllocate an Elastic IP
eip = ec2Client.allocate_address(DryRun=dryRun, Domain='vpc')
# Associate the elastic IP address with the instance launched above
ec2Client.associate_address(
DryRun = dryRun,
InstanceId = instanceDict[0].id,
AllocationId = eip["AllocationId"])
| true |
88e60e83bdc7b3a9f03444234bb8e59b8a9fb86c | Python | rioforce/creationlab | /ble730/script.py | UTF-8 | 631 | 2.859375 | 3 | [] | no_license | import os
openHTML = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Creation Lab</title>
</head>
<body>
<h1>Creation Lab Index</h1>
<ul>
"""
closeHTML = """</ul>
</body>
</html>
"""
with open("index.html", "wt", encoding="utf-8") as f:
f.write(openHTML)
for dir in os.listdir():
if os.path.isdir(dir):
print(dir[10])
fName = (dir[11:] if dir[10] == "-" else dir[10:])
f.write(' <li><a href="{0}/{1}.html">{0}</a></li>\n'.format(dir, fName))
f.write(closeHTML)
| true |
1fafa8132c05f53ceca5bb7a1f55356db9302977 | Python | acislab/HuMaIN_Text_Extraction | /src/get_lines_google.py | UTF-8 | 10,362 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
##########################################################################################
# Developer: Icaro Alzuru Project: HuMaIN (http://humain.acis.ufl.edu)
# Description:
# Given an image, it uses the Google Cloud Vision API output to extract the text in lines.
# In a folder, the cropped lines in jpg format, as well as their correspond extracted text.
# In a summary output file, the list of cropped lines and their coordinated are stored.
# PRE-REQUISITE: (Google Credentials). Run something like the following to indicate the user
# and project that will be used in the Google Cloud:
# export GOOGLE_APPLICATION_CREDENTIALS="/home/user/Google/credential_file.json"
# (Install the Google Cloud Vision python libraries)
#
##########################################################################################
# Copyright 2019 Advanced Computing and Information Systems (ACIS) Lab - UF
# (https://www.acis.ufl.edu/)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################################
import argparse, io, os, sys
from enum import Enum
from google.cloud import vision
from google.cloud.vision import types
from PIL import Image
breaks = vision.enums.TextAnnotation.DetectedBreak.BreakType
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
def crop_save( img_path_filename, lines_boxes, lines_texts, lines_probs, filename, basename, output_dir_name ):
""" Crop and save the image for each line, its text files, and its probabilities files. It also returns the bbox statistics.
"""
# Read the image
image = Image.open( img_path_filename )
# Get image's size
width, height = image.size
i = 0
text_local = ""
text_global = ""
while i < len(lines_boxes):
##################################################################################################
# Left Upper Corner
x1 = lines_boxes[i][0]
x1 = x1 - 8
if x1 < 0:
x1 = 0
y1 = lines_boxes[i][1]
y1 = y1 - 1
if y1 < 0:
y1 = 0
# Right Lower Corner
x2 = lines_boxes[i][2]
x2 = x2 + 10
if x2 > (width - 1):
x2 = width - 1
y2 = lines_boxes[i][3]
y2 = y2 + 1
if y2 > (height - 1):
y2 = height - 1
# Crop the block and save it
n_line = "%03d" % (i+1)
line_filename = output_dir_name + "/" + basename + "_" + n_line + ".jpg"
img_cropped = image.crop( (x1, y1, x2, y2) )
img_cropped.save( line_filename, 'JPEG', quality = 100 )
##################################################################################################
# Create the information about the cropped line for the local and global text files
text_line = basename + "_" + n_line + ".jpg\t" + str(x1) + "\t" + str(y1) + "\t" + str(x2) + "\t" + str(y2) + "\t" + ''.join(lines_texts[i]) + "\n"
text_local += text_line
text_global += filename + "\t" + text_line
##################################################################################################
# Creation of the text and probability file for each line
j = 0
content_text_file = ""
content_prob_file = ""
while j<len(lines_texts[i]):
content_text_file += lines_texts[i][j]
content_prob_file += lines_texts[i][j] + '\t' + str(lines_probs[i][j]) + '\n'
j = j + 1
# Write to disk the text file
text_filename = output_dir_name + "/" + basename + "_" + n_line + ".txt"
with open( text_filename, "w+" ) as f_text:
f_text.write( content_text_file )
# Write to disk the probabilities file
prob_filename = output_dir_name + "/" + basename + "_" + n_line + ".prob"
with open( prob_filename, "w+" ) as f_prob:
f_prob.write( content_prob_file )
i = i + 1
return( text_local, text_global )
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
def process_paragraph( paragraph ):
""" The function will return lists of bounding boxes, lines' text, and lines' probabilities
"""
# Lists of bounding boxes, text, and probabilities
line_box_list = []
line_text_list = []
line_prob_list = []
# Line under processing
current_line_text = []
current_line_prob = []
# Bounding box temporary variables
x1 = 100000
y1 = 100000
x2 = 0
y2 = 0
for word in paragraph.words:
for symbol in word.symbols:
# x1, y1 (Left upper corner)
if symbol.bounding_box.vertices[0].x < x1:
x1 = symbol.bounding_box.vertices[0].x
if symbol.bounding_box.vertices[0].y < y1:
y1 = symbol.bounding_box.vertices[0].y
if symbol.bounding_box.vertices[1].y < y1:
y1 = symbol.bounding_box.vertices[1].y
if symbol.bounding_box.vertices[3].x < x1:
x1 = symbol.bounding_box.vertices[3].x
# x2, y2 (right lower corner)
if symbol.bounding_box.vertices[2].x > x2:
x2 = symbol.bounding_box.vertices[2].x
if symbol.bounding_box.vertices[2].y > y2:
y2 = symbol.bounding_box.vertices[2].y
if symbol.bounding_box.vertices[1].x > x2:
x2 = symbol.bounding_box.vertices[1].x
if symbol.bounding_box.vertices[3].y > y2:
y2 = symbol.bounding_box.vertices[3].y
current_line_text.append( symbol.text )
current_line_prob.append( symbol.confidence )
# Check for blank spaces
if symbol.property.detected_break.type in [ breaks.SPACE, breaks.SURE_SPACE ]:
current_line_text.append( ' ' )
current_line_prob.append( 0.95 )
# Check for new lines
if symbol.property.detected_break.type in [ breaks.EOL_SURE_SPACE, breaks.HYPHEN, breaks.LINE_BREAK ]:
line_box_list.append( [x1, y1, x2, y2] )
line_text_list.append( current_line_text )
line_prob_list.append( current_line_prob )
# Line under processing
current_line_text = []
current_line_prob = []
# Bounding box temporary variables
x1 = 100000
y1 = 100000
x2 = 0
y2 = 0
return( line_box_list, line_text_list, line_prob_list )
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
def process_image( img_path_filename, output_dir_name, output_path_filename):
""" Crop the text paragraphs and save the information about the cropped files
"""
########################### Google OCR #############################
client = vision.ImageAnnotatorClient()
lines_boxes_img = []
lines_texts_img = []
lines_probs_img = []
# Path + Base name for the block files
filename = img_path_filename.split('/')[-1]
basename = filename.split('.')[0]
content = None
with io.open( img_path_filename, 'rb' ) as image_file:
content = image_file.read()
try:
# Process image and recognize its parts and text
image = types.Image( content=content )
response = client.document_text_detection(image=image)
document = response.full_text_annotation
fulltext_path_filename = output_dir_name + "/" + basename + ".txt"
# Save all the extracted text in a text file
with open( fulltext_path_filename,'w') as f:
f.write( response.full_text_annotation.text )
# Collect the lines, their probabilities, and their bounding boxes
for page in document.pages:
for block in page.blocks:
for paragraph in block.paragraphs:
# Divide the paragraph in lines and get its lines, bounding boxes, and symbols' probabilities
lines_boxes_par, lines_texts_par, lines_probs_par = process_paragraph( paragraph )
# Extend the line lists
lines_boxes_img.extend( lines_boxes_par )
lines_texts_img.extend( lines_texts_par )
lines_probs_img.extend( lines_probs_par )
except Exception as e:
print("Error: " + img_path_filename + ", " + str(e))
return
# Crop and save the image for each paragraph, its text files, and its probabilities files. It also returns the bbox statistics.
text_local, text_global = "", ""
text_local, text_global = crop_save( img_path_filename, lines_boxes_img, lines_texts_img, lines_probs_img, filename, basename, output_dir_name )
# Save the bounding box information in the local and in the global file
if text_global != "":
# Save the data of the lines in the local text file
with open(output_dir_name + "/" + basename + "_lines.csv", "w+") as f:
f.write( text_local )
# Save the data of the lines in the global text file
with open(output_path_filename, "a+") as f:
f.write( text_global )
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
""" Extract the lines from a image (jpg) file using Google Cloud Text Detection.
"""
parser = argparse.ArgumentParser("Extract the lines from a image (jpg) file using Google Cloud Text Detection.")
parser.add_argument('-if', '--input_file', action="store", required=True, help="Path + Filename of the jpg image to crop in blocks.")
parser.add_argument('-od', '--output_dir', action="store", required=True, help="Directory where the images of the cropped blocks will be saved.")
parser.add_argument('-of', '--output_file', action="store", required=True, help="Path + Filename of the text file which will save the coordinates of the cropped lines.")
args = parser.parse_args()
# Arguments Validations
if ( not os.path.isfile( args.input_file ) ):
print("Error: The image (" + args.input_file + ") file was not found.\n")
parser.print_help()
sys.exit(1)
if not os.path.exists( args.output_dir ):
try:
os.makedirs( args.output_dir )
except:
print('Error: The destination directory was not found and could not be created.\n')
parser.print_help()
sys.exit(2)
# Crop the blocks and save the information about the cropped files
process_image(args.input_file, args.output_dir, args.output_file)
| true |
3650aba3ab7da85ba56e14ad90047b123557859b | Python | Umbriona/Homework-4 | /FourierAnalys.py | UTF-8 | 2,265 | 2.71875 | 3 | [] | no_license |
import numpy as np
from scipy.fftpack import rfft, irfft, fftfreq
def BandFilter(signal,time=1,lowerBound=0,higherBound=100):
#time = np.linspace(0,10,2000)
#signal = np.cos(5*np.pi*time) + np.cos(7*np.pi*time)
W = fftfreq(signal.size, d=time)
f_signal = rfft(signal)
# If our original signal time was in seconds, this is now in Hz
cut_f_signal = f_signal.copy()
#print('W',W,'\ncutSignal',cut_f_signal)
cut_f_signal[(W < lowerBound)] = 0
cut_f_signal[(W > higherBound)] = 0
cut_signal = irfft(cut_f_signal)
return cut_signal
def FilterdSignals(rawSignals,lB=0,hB=100):
aaSignalPopulation_1 = rawSignals[0, :]
AASignalPopulation_1 = rawSignals[1, :]
AaSignalPopulation_1 = rawSignals[2, :]
aaSignalPopulation_2 = rawSignals[3, :]
AASignalPopulation_2 = rawSignals[4, :]
AaSignalPopulation_2 = rawSignals[5, :]
ASignal1 = rawSignals[6, :]
ASignal2 = rawSignals[7, :]
filterSignalPopulation1aa = BandFilter(aaSignalPopulation_1[0, :], time=1, lowerBound=lB, higherBound=hB)
filterSignalPopulation1AA = BandFilter(AASignalPopulation_1[0, :], time=1, lowerBound=lB, higherBound=hB)
filterSignalPopulation1Aa = BandFilter(AaSignalPopulation_1[0, :], time=1, lowerBound=lB, higherBound=hB)
filterSignalPopulation2aa = BandFilter(aaSignalPopulation_2[0, :], time=1, lowerBound=lB, higherBound=hB)
filterSignalPopulation2AA = BandFilter(AASignalPopulation_2[0, :], time=1, lowerBound=lB, higherBound=hB)
filterSignalPopulation2Aa = BandFilter(AaSignalPopulation_2[0, :], time=1, lowerBound=lB, higherBound=hB)
filterASignal1 = BandFilter(ASignal1[0, :], time=1, lowerBound=lB, higherBound=hB)
filterASignal2 = BandFilter(ASignal2[0, :], time=1, lowerBound=lB, higherBound=hB)
rad1 = np.asarray(filterSignalPopulation1aa)
rad2 = np.asarray(filterSignalPopulation1AA)
rad3 = np.asarray(filterSignalPopulation1Aa)
rad4 = np.asarray(filterSignalPopulation2aa)
rad5 = np.asarray(filterSignalPopulation2AA)
rad6 = np.asarray(filterSignalPopulation2Aa)
rad7 = np.asarray(filterASignal1)
rad8 = np.asarray(filterASignal2)
returnData = np.array([[rad1], [rad2], [rad3], [rad4], [rad5], [rad6],[rad7],[rad8]])
return returnData | true |
5e4f317ed095dd7e728bb4241ffe07fa78b0aa12 | Python | pevandenburie/micropython-aes-ccm | /main.py | UTF-8 | 1,301 | 2.8125 | 3 | [] | no_license | import json
import binascii
import AesCcm
print("Hello AES-CCM")
# Key: 1AE1CC81F39199114EB794C944E655DF
# Timestamp: 5c52184b
# Plain payload: 7036A81BC8DF9CBF1542CDFBCE427E1EB996DD935C9E38E269AE82E85CF7245B
# Init vector: 3438343373369509000001
# Header: 34383433733695095C52184B
# Clear tag: 3AA2DCC6
# Crypted payload: BC9305C19CEF104CE6ABC6CEEACFA92073A39AF9A5F55F83DEF8A539D262F231
# R: 34383433733695095C52184B000001BC9305C19CEF104CE6ABC6CEEACFA92073A39AF9A5F55F83DEF8A539D262F2317EFE87F9
header = binascii.unhexlify('34383433733695095C52184B'.replace(' ',''))
data = binascii.unhexlify('7036A81BC8DF9CBF1542CDFBCE427E1EB996DD935C9E38E269AE82E85CF7245B'.replace(' ',''))
key = binascii.unhexlify('1AE1CC81F39199114EB794C944E655DF'.replace(' ',''))
nonce = binascii.unhexlify('3438343373369509000001'.replace(' ',''))
# Encrypt...
cipher = AesCcm.new(key, nonce=nonce, mac_len=4)
cipher.update(header)
ciphertext, tag = cipher.encrypt_and_digest(data)
json_k = [ 'nonce', 'header', 'ciphertext', 'tag' ]
json_v = [ binascii.hexlify(x) for x in [cipher.nonce, header, ciphertext, tag] ]
result = json.dumps(dict(zip(json_k, json_v)))
print(result)
# Decrypt...
cipher = AesCcm.new(key, nonce=nonce, mac_len=4)
cipher.update(header)
result = cipher.decrypt_and_verify(ciphertext, tag)
print( "Plaintext: {}".format(binascii.hexlify(result) ))
| true |
0acd5b9e0a12399076a9a985362584615476472f | Python | steve4w4/new-hire-test-stephen | /src/handler.py | UTF-8 | 4,185 | 2.71875 | 3 | [] | no_license | import json
import os
import dateparser
from pymongo import MongoClient
db_uri = os.environ.get("MONGO_DB_URI", "localhost")
db_name = os.environ.get("MONGO_DB_NAME", "new_hire_test")
db = MongoClient(db_uri)[db_name]
expected_columns = ["Name", "Email", "Manager", "Salary", "Hire Date"]
def handle_csv_upload(event, context):
response_body = {
"numCreated": 0,
"numUpdated": 0,
"errors": [],
}
# Split out the input into separate rows
input_rows = event.split('\n')
# Check that the input columns are correct
columns = input_rows[0].split(',')
if not validate_input_columns(columns):
exp_cols = str(expected_columns)
response_body["errors"].append("Input columns must match: " + exp_cols)
response = {
"statusCode": 400,
"body": json.dumps(response_body)
}
# Separate out the employees
employees = input_rows[1:]
for i in range(0, len(employees)):
# parse employee data and ignore the entry if there are missing colunns
employee = employees[i].split(',')
if not employee or len(employee) != 5:
continue
# get a reference to the employee's manager
mgr_id = None
mgr_match = None
if employee[2]:
manager_query = {"normalized_email": employee[2]}
mgr_match = db.user.find_one(manager_query)
mgr_id = mgr_match.get("_id")
# generate a new record for the employee
emp_record = {"name": employee[0], "manager_id": mgr_id}
try:
emp_record["salary"] = int(employee[3])
except:
response_body["errors"].append(
"Salary must be a valid number, recieved: " + employee[3])
hire_date = dateparser.parse(employee[4])
if hire_date:
emp_record["hire_date"] = hire_date
else:
response_body["errors"].append(
"Hire date must be a valid date, recieved: " + employee[4])
# check if the employee is already in the db, update or insert
employee_query = {"normalized_email": employee[1]}
emp_match = db.user.find_one(employee_query)
emp_id = None
if emp_match:
db.user.update_one(employee_query, {
"$set": emp_record
})
emp_id = emp_match.get("_id")
response_body["numUpdated"] = response_body["numUpdated"] + 1
else:
emp_record["normalized_email"] = employee[1]
emp_record["is_active"] = False
emp_record["hashed_password"] = None
emp_id = db.user.insert(emp_record)
response_body["numCreated"] = response_body["numCreated"] + 1
# update the chain of command
manager_coc_query = {"user_id": mgr_id}
mgr_coc_obj = db.chain_of_command.find_one(manager_coc_query)
# if there is a manager, update or insert as needed
# otherwise, insert the records with no chain of command
if mgr_coc_obj:
mgr_coc = mgr_coc_obj.get("chain_of_command")
new_coc = None
if mgr_id:
new_coc = [mgr_id] + mgr_coc
if emp_match:
db.chain_of_command.update_one({"user_id": emp_id}, {
"$set": {"chain_of_command": [new_coc]}
})
else:
coc_record = {
"user_id": emp_id,
"chain_of_command": new_coc
}
db.chain_of_command.insert(coc_record)
else:
if not emp_match:
coc_record = {
"user_id": emp_id,
"chain_of_command": []
}
db.chain_of_command.insert(coc_record)
response = {
"statusCode": 200,
"body": json.dumps(response_body)
}
return response
def validate_input_columns(columns):
if len(columns) != len(expected_columns):
return False
for i in range(0, len(columns)):
if (columns[i] != expected_columns[i]):
return False
return True
| true |
c524faa9f16142aed8f4072c38d7fd2e82683b4b | Python | Introduction-to-Programming-OSOWSKI/3-7-last-letter-AlexMartin23 | /main.py | UTF-8 | 72 | 3.296875 | 3 | [] | no_license | def lastLetter(w):
return w[len(w)-1]
print (lastLetter("alex")) | true |
70cd369947ef1954d4b9fc2195fab8d30d61b37f | Python | YizhuZhan/Intern-Life | /mission 4/fibonacci.py | UTF-8 | 540 | 3.40625 | 3 | [] | no_license | #!/usr/bin/python
# coding:utf-8
# Intern-Life - fibonacci.py
# 2017/11/16 11:15
#
__author__ = 'Benny <benny@bennythink.com>'
def fi(x):
if x == 0:
return 0
elif x == 1 or x == 2:
return 1
else:
return fi(x - 1) + fi(x - 2)
def test_f1():
assert 0 == fi(0)
def test_f2():
assert 1 == fi(1)
def test_f4():
assert 1 == fi(2)
def test_f5():
assert 2 == fi(3)
def test_f6():
assert 55 == fi(10)
def test_f7():
assert 6765 == fi(20)
if __name__ == '__main__':
print fi(10)
| true |
b4867b856a35afc2f5850f01377867a5513dde2f | Python | doublethinklab/ccp-alignment-public | /ccpalign/entity/topfreq.py | UTF-8 | 1,200 | 2.734375 | 3 | [] | no_license | """Top frequent entities."""
from collections import Counter
from typing import Dict, List, Set
from ccpalign.util import aggregate_counts
def filter_docs(docs: List[Counter], entities: List[str]) \
-> List[Dict]:
filtered_docs = []
for counts in docs:
filtered_counts = {}
for entity, count in counts.items():
if entity in entities:
filtered_counts[entity] = count
filtered_docs.append(filtered_counts)
return filtered_docs
def make_entity_set(label_to_docs: Dict[str, List[Counter]], k: int) \
-> List[str]:
entities = set()
for label, docs in label_to_docs.items():
counts = aggregate_counts(docs)
label_entities = take_top_k_frequent(counts, k)
entities = entities.union(label_entities)
# return a list, as that is what we want to work with downstream
return list(entities)
def take_top_k_frequent(counts: Counter, k: int) -> Set[str]:
counts = [{'entity': e, 'count': c} for e, c in counts.items()]
counts = list(reversed(sorted(counts, key=lambda x: x['count'])))
counts = counts[0:k]
entities = [x['entity'] for x in counts]
return set(entities)
| true |
4f34fc3c565b3f0c3ec8f459a084e969f40bdfd5 | Python | bathcat/pyOiler | /src/pyoiler/problems/euler052.py | UTF-8 | 1,162 | 3.65625 | 4 | [] | no_license | from ..shared.digits import to_digits
import itertools
from ..shared.solver import Solver
max_inclusive=6
def have_same_digits(left: int, right: int) -> bool:
lefts = to_digits(left)
rights = to_digits(right)
return set(lefts) == set(rights)
def hits_target(n:int)->bool:
digits = set(to_digits(n))
for i in range(2, max_inclusive + 1):
multiple = n * i
if not (set(to_digits(multiple)) == digits):
return False
return True
def get_thing():
for i in itertools.count(1):
if hits_target(i):
return i
def _solve(print=print):
target = get_thing()
print('here''s the number:' + str(target))
for i in range(1,max_inclusive +1):
print('n x ' + str(i) + ' = ' + str(target * i))
return True
description = '''It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits.
'''
solver = Solver(52,
'Permuted multiples',
description,
_solve
)
| true |
3dcd6fdd2410f7219b40733ed6f06fd79c949b45 | Python | Strashiloff/appwithlabs | /backend/DH.py | UTF-8 | 938 | 2.796875 | 3 | [] | no_license | import math, random, os, sys, utils
g, p = 0, 0 # public
a, b = 0, 0 # private
A, B = 0, 0 # public
K1, K2 = 0, 0 # private
def getDH():
global g, p, a, b, A, B, K1, K2
simpes = utils.getPrime(0)
arr = utils.getIndex(simpes, 0)
i = random.randint(1, len(arr) - 1)
g = arr[i]
arr.pop(i)
p = arr[random.randint(1, len(arr) - 1)]
simpes = utils.getPrime(4096)
arr = utils.getIndex(simpes, 4096)
index = random.randint(1, len(arr) - 1)
a = arr[index]
arr.pop(index)
b = arr[random.randint(1, len(arr) - 1)]
A = pow(g, a, p)
B = pow(g, b, p)
K1 = pow(B, a, p)
K2 = pow(A, b, p)
return g, p, a, b, A, B, K1, K2
def encrypt(text):
global K1
encrypt_text = ''
key = K1
for char in text:
encrypt_text += chr(ord(char) + key)
return encrypt_text
def decrypt(text):
global K2
decrypt_text = ''
key = K2
for char in text:
decrypt_text += chr(ord(char) - key)
return decrypt_text | true |
2a361b4bf97e4c513f66eab1bd6fc1397814ecfd | Python | naimulhq/YubiHSM2GUI | /Checkbox.py | UTF-8 | 4,722 | 2.859375 | 3 | [] | no_license | import tkinter as tk
def getChecboxNames(filePath):
with open(filePath) as checkboxNames:
return checkboxNames.readlines()
class Checkbox:
def __init__(self, page):
self.guiPage = page
self.DOMAINS_PATH = "/home/naimul/NaimulRepo/YubiHSM2Tool/textfiles/domains.txt"
self.CAPABILITIES_PATH = "/home/naimul/NaimulRepo/YubiHSM2Tool/textfiles/capabilities.txt"
self.domainDictionary = {}
self.capabilitiesDictionary = {}
self.configureCheckboxesAndValues()
def configureCheckboxesAndValues(self):
self.listOfDomains = getChecboxNames(self.DOMAINS_PATH)
self.listOfCapabilities = getChecboxNames(self.CAPABILITIES_PATH)
self.convertCheckboxEntriesToDictionary(self.listOfDomains, self.domainDictionary)
self.convertCheckboxEntriesToDictionary(self.listOfCapabilities, self.capabilitiesDictionary)
def convertCheckboxEntriesToDictionary(self, checkboxEntry, dictionary):
n = 0
for entry in checkboxEntry:
dictionary[entry.strip()] = 2 ** n
n+=1
def generateDomainCheckboxes(self,startx,starty,incrementx,incrementy,numRows):
variableCount = 0
xposition = startx
yposition = starty
self.variableForDomainsCheckbox = []
for domain in self.listOfDomains:
var = tk.IntVar()
domainCheckbox = tk.Checkbutton(self.guiPage,text=domain.strip(),variable=var)
domainCheckbox.place(x=xposition,y=yposition)
variableCount += 1
if variableCount == numRows:
xposition += incrementx
yposition = starty
variableCount = 0
else:
yposition += incrementy
self.variableForDomainsCheckbox.append(var)
def generateCapabilitiesCheckboxes(self,startx,starty,incrementx,incrementy,numRows):
variableCount = 0
xposition = startx
yposition = starty
self.variablesForCapabilitiesCheckbox = []
for capabilities in self.listOfCapabilities:
var = tk.IntVar()
capabilitiesCheckbox = tk.Checkbutton(self.guiPage, text=capabilities.strip(), variable=var)
capabilitiesCheckbox.place(x=xposition,y=yposition)
variableCount += 1
if variableCount == numRows:
xposition += incrementx
yposition = starty
variableCount = 0
else:
yposition += incrementy
self.variablesForCapabilitiesCheckbox.append(var)
def generateDelegatedCapabilitiesCheckboxes(self,startx,starty,incrementx,incrementy,numRows):
variableCount = 0
xposition = startx
yposition = starty
self.variablesForDelegatedCapabilitiesCheckbox = []
for capabilities in self.listOfCapabilities:
var = tk.IntVar()
capabilitiesCheckbox = tk.Checkbutton(self.guiPage, text=capabilities.strip(), variable=var)
capabilitiesCheckbox.place(x=xposition,y=yposition)
variableCount += 1
if variableCount == numRows:
xposition += incrementx
yposition = starty
variableCount = 0
else:
yposition += incrementy
self.variablesForDelegatedCapabilitiesCheckbox.append(var)
def convertDomainCheckboxToInt(self):
sum = 0
for index in range(len(self.variableForDomainsCheckbox)):
isCheckboxPressed = self.variableForDomainsCheckbox[index].get()
if isCheckboxPressed == 1:
domain = self.listOfDomains[index].strip()
value = self.domainDictionary[domain]
sum += value
return sum
def convertCapabilitiesCheckboxToInt(self):
sum = 0
for index in range(len(self.variablesForCapabilitiesCheckbox)):
isCheckboxPressed = self.variablesForCapabilitiesCheckbox[index].get()
if isCheckboxPressed == 1:
capability = self.listOfCapabilities[index].strip()
value = self.capabilitiesDictionary[capability]
sum += value
return sum
def convertDelegatedCapabilitiesCheckboxToInt(self):
sum = 0
for index in range(len(self.variablesForDelegatedCapabilitiesCheckbox)):
isCheckboxPressed = self.variablesForDelegatedCapabilitiesCheckbox[index].get()
if isCheckboxPressed == 1:
capability = self.listOfCapabilities[index].strip()
value = self.capabilitiesDictionary[capability]
sum += value
return sum
| true |
2c2b06013c3d907ef1706407d30b1846cb59838f | Python | yoojunwoong/python_review01 | /test2_04.py | UTF-8 | 386 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #생각해보기
#두 개의 숫자를 입력 받아 두수의 합을 구하고, 홀수 인지 짝수인지를 출력하시오.
# if esle 문과 %연산자를 이용하여 값을 짝수 여부 확인
num1 =input('input num1...?');
num2 =input('input num2...?');
sum = int(num1) + int(num2);
print(sum);
if sum % 2 == 0 :
print('짝수');
else:
print('홀수');
| true |
36a12cc4404bfd9b7b5ddbfb40a7407342cf0dea | Python | etmorefish/leetcode | /349.py | UTF-8 | 575 | 3.65625 | 4 | [] | no_license | """
349 两个数组的交集,不用set
"""
class Solution(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
map,result={},[]
for i in nums1:
map[i] = i
for j in set(nums2):
if j in map:
result.append(j)
return result
if __name__ == '__main__':
l1 = [1, 2,3 ,4,5,7,8]
l2 = [1,8, 12 , 21]
res = Solution()
r = res.intersection(l1, l2)
print(r) | true |
4af16c6b2797f07ecac0616139e0e607cfc43d3e | Python | hhongker/java | /笔记本电脑的东西/暑期线上培训/作业/1/2.py | UTF-8 | 805 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 09:11:35 2020
@author: hhr
"""
# In[[5-操作题]利用鸢尾花数据做如下操作]
# (1)数据加载
from sklearn.datasets import load_iris
from sklearn.cluster import KMeans
iris = load_iris()['data']
#(2)标准化处理
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(iris)
iris_std = scaler.transform(iris)#标准化后的数据
#(3)构建聚类模型并训练
model = KMeans(n_clusters=3).fit(iris_std)
#(4)聚类效果可视化展示
import matplotlib.pyplot as plt
for i in range(3):
plt.scatter(iris_std[model.labels_ == i, 0],iris_std[model.labels_ == i, 1])
plt.show()
#(5)对模型进行评价
from sklearn.metrics import silhouette_score
silhouette_score(iris_std, model.labels_) | true |
5d77f7fdd1f8a89e1380533a02e636f368f40e0e | Python | gverebl6/work_time_tracker | /workHours/commands.py | UTF-8 | 9,708 | 2.75 | 3 | [] | no_license | from os import sep
import click
from click.termui import prompt
from tabulate import tabulate
#import uuid
from workHours.models import Hour
from workHours.services import HourService
@click.group()
def hour():
"""Manages the hour commands"""
pass
# Show
@hour.command()
@click.option('--all', is_flag=True)
@click.option('--prev', is_flag=True)
@click.option('--custom', is_flag=True)
@click.option(
'-y', '--year',
type=int,
help='Year to insert hours in'
)
@click.option(
'-w', '--week',
type=int,
help='Week to insert hours in'
)
@click.option('--complete', is_flag=True)
@click.pass_context
def show(ctx, all, prev, custom, year, week, complete):
"""
Shows all hours
default: Current week's hours
with options:
--prev: Prints previous week,
--custom: Prompts to ask week and year
--year=j: jth year
--week=n : nth weeks hours
if --year and/or --week are used, --custom is skipped
"""
# Manage options
params = {'all': False, 'prev': False, 'week': None, 'year':None}
if all:
params['all'] = True
else:
if prev:
params['prev'] = True
elif year or week:
if year and not week:
click.echo('Error: Year parameter recieved, week paramater also needed...')
click.echo('\tTo send week parameter with year use <tracker hours show -y [n] -w [m]>')
ctx.abort()
else:
params['week'] = week
params['year'] = year
elif custom:
params['week'] = click.prompt('Which week? ', type=int, default=None)
params['year'] = click.prompt('Which year? ', type=int, default=None)
hour_service = HourService(ctx.obj['work_hours_table'])
hour_table = hour_service.get_hours(**params)
if hour_table:
#Normal and short prints
if complete:
print(tabulate(hour_table, headers=Hour.schema()))
else:
hour_table = map(
lambda x: [x[0][:8]] + x[1:-1], # Shotens id and eliminates date_created
hour_table)
hour_table = list(hour_table)
print(tabulate(hour_table, headers=Hour.schema()[:-1]))
else:
print('-'*50 + '\nThere\'s no record for the date range requested')
#Add hour
@hour.command()
@click.option(
'-y', '--year',
type=int,
help='Year to insert hours in'
)
@click.option(
'-w', '--week',
type=int,
help='Week to insert hours in'
)
@click.option(
'-d', '--day',
type=int,
help='Day of the week to insert hours in'
)
@click.argument('hours', type=int)
@click.argument('minutes', type=int)
@click.argument('description', type=str)
@click.pass_context
def add(ctx, year, week, day, hours, minutes, description):
"""
Adds a new hour for a day\n
default: Adds on current day\n
with options:\n
--week=n, --day=m, --year=j: to insert as
"""
work_hour = Hour(
hours=hours,
minutes=minutes,
description=description,
week=week,
day=day,
year=year
)
hour_service = HourService(ctx.obj['work_hours_table'])
hour_service.add_hour(work_hour)
#delete
@hour.command()
@click.argument('record_id', type=str)
@click.pass_context
def delete(ctx, record_id):
"""
Delete an our based on argument id
"""
hour_service = HourService(ctx.obj['work_hours_table'])
deleted = hour_service.delete_hour(record_id)
if not deleted:
click.echo(click.style('ERROR', bg='red'))
click.echo('Operation error:')
click.echo('No record found with the id you input.')
elif len(deleted) == 1:
click.echo(click.style('SUCCESS', bg='green'))
click.echo(f'The record with id {deleted[0]} was successfully deleted.')
else:
click.echo(click.style('ERROR', bg='red'))
click.echo('Operation error:')
click.echo('There are more than one record that match your input.')
click.echo(f'Try using the command <tracker hours uuid {record_id}> to get the specific id you want')
#update
@hour.command()
@click.argument('record_id', type=str)
@click.option(
'-y', '--year',
type=int,
help='Year to update hours in'
)
@click.option(
'-w', '--week',
type=int,
help='Week to update hours in'
)
@click.option(
'-d', '--day',
type=int,
help='Day to update hours in'
)
@click.option(
'-h', '--hours',
type=int,
help='Hour to update hours in'
)
@click.option(
'-m', '--minutes',
type=int,
help='Minutes to update hours in'
)
@click.option(
'-l', '--description',
type=str,
help='Minutes to update hours in'
)
@click.pass_context
def update(ctx, record_id, year=None, month=None, day=None, week=None, hours=None, minutes=None, description=None ):
"""
Updates a specific record based on id
"""
hour_service = HourService(ctx.obj['work_hours_table'])
update_options = {
'year': year,
'month': month,
'week': week,
'day': day,
'hours': hours,
'minute': minutes,
'description': description
}
updated = hour_service.update_hour(record_id, update_options)
if not updated:
click.echo(click.style('ERROR', bg='red'))
click.echo('Operation error:')
click.echo('No record found with the id you input.')
elif len(updated) == 1:
click.echo(click.style('SUCCESS', bg='green'))
click.echo(f'The record with id {updated[0]} was successfully updated.')
else:
click.echo(click.style('ERROR', bg='red'))
click.echo('Operation error:')
click.echo('There are more than one record that match your input.')
click.echo(f'Try using the command <tracker hours uuid {record_id}> to get the specific id you want')
# count
@hour.command()
@click.option('--all', is_flag=True)
@click.option(
'-w', '--week',
type = int,
help='Week to count the hours.'
)
@click.option(
'-i', '--start',
type = int,
help='From which week to start counting'
)
@click.option(
'-f', '--stop',
type = int,
help='Until which week to count'
)
@click.pass_context
def count(ctx, all, week, start, stop):
"""
Prints the sum of all hours in a week and generates
a report based on the description of all days
default: current
options:
--week=n : For the nth week
--all : For all the records
--start : from what week to start.
(If no --to , then from that week to end)
--stop : to what week to count
(If no --from, then from begining to --to)
"""
params = {'all': False, 'start': None, 'stop': None}
if all:
params['all'] = True
else:
if week:
params['start'] = week
params['stop'] = week
else:
if start:
params['start'] = start
if stop:
params['stop'] = stop
hour_service = HourService(ctx.obj['work_hours_table'])
table, work_time, report = hour_service.count_hours(**params)
print('Period of work')
hour_table = map(
lambda x: [x[0][:8]] + x[1:-1], # Shotens id and eliminates date_created
table)
hour_table = list(hour_table)
print(tabulate(hour_table, headers=Hour.schema()[:-1]))
print('\nTotal time worked: ')
print(f'\t\t\t\tHours: {work_time[0]}\tMinutes: {work_time[1]}')
print('\nActivities performed')
print(*report, sep=', ')
# get uuid
@hour.command()
@click.argument('uuid_segment', type=str)
@click.pass_context
def uuid(ctx, uuid_segment):
""" Returns the complete uuid from the first 4 digits """
hour_service = HourService(ctx.obj['work_hours_table'])
ids = hour_service.get_uuid(uuid_segment)
if ids:
if len(ids) > 1:
click.echo('There are multiple possible ids, choose one to pipe with update or delete: ')
for i, idx in enumerate(ids):
click.echo(f'{i}. {idx}')
option=click.prompt('Option', type=int)
shortest_id = _get_short_uuid(option, ids)
click.echo(f'The shortest id possible is {shortest_id}')
else:
click.echo(ids[0])
else:
click.echo('No id found to match that pattern....')
def _get_short_uuid(option, uuids):
"""Returns the shortes possible uuid when more than one is possible"""
top_len = 0
for elems in zip(*uuids):
if len(set(elems)) > 1:
return uuids[option][:top_len+1]
else:
top_len += 1
return uuids[option]
# Current time
@hour.command()
@click.pass_context
def current(ctx):
"""
Prints the current year day and week
"""
current_time = HourService.get_current_time()
print('_'*50)
print(f"\tCurrent year: {current_time['year']}\n\
Current week: {current_time['week']}\n\
Current day: {current_time['day']}")
#Maybe un get id porque el id ta muy largo a partir de un short
all = hour
| true |
934d05e7f8372e5ed9dcd37f4ab7f81475e21ecc | Python | vvscode/py--notes | /selenium-webdriver-with-python3/examples/oop/oop.py | UTF-8 | 1,082 | 4.40625 | 4 | [] | no_license | """
OOP
"""
s = "this is a string"
a = "one more string"
s.upper()
s.lower()
print(type('s')) # <class 'str'>
print(type('a')) # <class 'str'>
print(type([1, 2, 3])) # <class 'list'>
"""
Class
"""
class Car(object):
def __init__(self, make, model="550i"):
self.make = make
self.model = model
c1 = Car('bmw', 'xxl')
print(c1.make) # bmw
# print(c1['make']) - won't work
print(c1.model) # xxl
c2 = Car('benz')
print(c2.make) # benz
print(c2.model) # 550i
"""
Class Ext
"""
class CarExt(object):
wheels = 4
def __init__(self, make, model):
self.make = make
self.model = model
def info(self):
print("Make of the car: " + self.make)
print("Model of the car: " + self.model)
c1 = CarExt('bmw', '550i')
print(c1.make) # bmw
c1.info()
# Make of the car: bmw
# Model of the car: 550i
c2 = CarExt('benz', 'E350')
print(c2.make) # benz
c2.info()
# Make of the car: benz
# Model of the car: E350
print(CarExt.wheels) # 4
print(type(c1)) # <class '__main__.CarExt'>
print(isinstance(c1, CarExt)) # True
| true |
3cc4de61788918c9c151689c6b0e3a1bb3d1092e | Python | akarp0v/herokuapp_sel_test | /herokuapp/dynamic_page.py | UTF-8 | 799 | 2.578125 | 3 | [] | no_license | from selenium.webdriver.common.by import By
from .base_page import BasePage
from .const import Const
class Locators:
START_BTN = (By.XPATH, "//button[contains(text(), 'Start')]")
FINISH_TEXT = (By.CSS_SELECTOR, "#finish")
class DynamicPage(BasePage):
def should_be_dynamic_url(self):
assert 'dynamic_loading' in self.url
def click_start_btn(self):
start_btn = self.browser.find_element(*Locators.START_BTN)
start_btn.click()
def should_present_finish_text(self):
assert self.is_element_visible(*Locators.FINISH_TEXT), \
"Finish text is not presented"
def print_finish_text(self):
finish = self.browser.find_element(*Locators.FINISH_TEXT)
print(f'\nDisplayed text: {Const.GREEN}{finish.text}{Const.DEFAULT}')
| true |
6cb897e14327f9fdb129579e971abc2d6b151154 | Python | jmery24/python | /ippython/polinomios.py | UTF-8 | 3,460 | 3.875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 06:34:48 2013
@author: daniel
"""
#definir funcion <muestra>
def muestra(a):
print a[0],
for i in range(1, len(a)):
print '+', a[i], 'X **', i,
print
#definir funcion <evalua>
def evalua(a, x):
s = 0
for i in range(len(a)):
s = s + a[i] * x**i
return s
#definir funcion <suma>
def suma(a, b):
#creamos un polinomio nulo de orden igual al de mayor orden
c = [0] * max(len(a), len(b))
#sumamos los coeficientes hasta el orden menor
for i in range(min(len(a), len(b))):
c[i] = a[i] + b[i]
#copiamos el resto de los coeficientes del polinomio de mayor orden
if len(a) > len(b):
for i in range(len(b), len(c)):
c[i] = a[i]
else:
for i in range(len(a), len(c)):
c[i] = b[i]
#devolvemos el polinomio c
normaliza(c)
return c
#definir funcion <sumatorio> alternativa a funcion <suma>
def sumatorio( a, b):
c = []
m = min(len(a), len(b))
for i in range(m):
c.append(a[i] + b[i])
c = c + a[m:] + b[m:]
normaliza(c)
return c
#definir funcion <normaliza>
def normaliza(a):
while len(a) > 0 and a[-1] == 0:
del a[-1]
#definir funcion <resta>
def resta(a, b):
#creamos un polinomio nulo de orden igual al de mayor orden
c = [0] * max(len(a), len(b))
#sumamos los coeficientes hasta el orden menor
for i in range(min(len(a), len(b))):
c[i] = a[i] - b[i]
#copiamos el resto de los coeficientes del polinomio de mayor orden
if len(a) > len(b):
for i in range(len(b), len(c)):
c[i] = a[i]
else:
for i in range(len(a), len(c)):
c[i] = b[i]
#devolvemos el polinomio c
normaliza(c)
return c
#definir funcion <sustraccion> alternativa a funcion <resta>
def sustraccion( a, b):
c = []
m = min(len(a), len(b))
for i in range(m):
c.append(a[i] - b[i])
c = c + a[m:] + b[m:]
normaliza(c)
return c
#definir funcion <multiplica>
def multiplica(a, b):
orden = len(a) + len(b) - 2
c = [0] * (orden + 1)
for i in range(orden+1):
s=0
for j in range(i+1):
s += a[j] * b[i-j]
c[i] == s
return c
#prueba de las funciones del modulo, no funciona cuando se lo invoca
if __name__ == '__main__':
polinomio_a = [1, 2, 4, 5, 0, 6]
polinomio_b = [1, 3, 6, 7, 8, 6]
#prueba funcion <muestra>
print 'Polinomio A = ',
muestra(polinomio_a)
#prueba funcion <evalua>
variable = 2
print 'Polinomio A = ',
print evalua(polinomio_a, variable)
#prueba funcion <suma>
print 'polinomio A + polinomio B = ',
print suma(polinomio_a, polinomio_b)
muestra(suma(polinomio_a, polinomio_b))
#prueba funcion <sumatorio>
print 'polinomio A + polinomio B = ',
print sumatorio(polinomio_a, polinomio_b)
muestra(sumatorio(polinomio_a, polinomio_b))
#prueba funcion <resta>
print 'polinomio A - polinomio B = ',
print resta(polinomio_a, polinomio_b)
muestra(resta(polinomio_a, polinomio_b))
#prueba funcion <sustraccion>
print 'polinomio A - polinomio B = ',
print sustraccion(polinomio_a, polinomio_b)
muestra(sustraccion(polinomio_a, polinomio_b))
#prueba funcion <multiplica>
print 'polinomio A x polinomio B = ',
print multiplica(polinomio_a, polinomio_b)
muestra(multiplica(polinomio_a, polinomio_b)) | true |
e3a2c5c934f21056de4bc46ce4c1fc708094c8e1 | Python | JamesTurman/practicum | /scrape/FedExScraper.py | UTF-8 | 4,612 | 2.765625 | 3 | [] | no_license | """
Spyder Editor
This is a script to scrape shipping data from FedEx website via seleium server
Start up isntructions
1. download latest chrome driver at link below
https://sites.google.com/a/chromium.org/chromedriver/downloads
2. unzip and copy chromedriver.exe into directory below
'~\Python27\Scripts'
3. navigate to Python directory on command line then run commands below
pip install pandas
pip install selenium
pip install bs4
pip install html5lib
You're good to run the script after all 3 steps are completed
"""
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
from bs4 import BeautifulSoup
import html5lib as hlib
from selenium.common.exceptions import NoSuchElementException
# 15 origin zip codes from research
Orgin = [14513,90815,94520,77017,75235,33304,63110,
38116,15203,81003,98108,73179,30307,94128,92101]
Dest = pd.read_csv('destSample1.csv')
Dest = Dest['zip'].values.astype(str).tolist()
trans = []
driver = webdriver.Chrome()
#from selenium.common.exceptions import NoSuchElementException
#def check_exists_by_xpath(xpath):
# try:
# webdriver.find_element_by_xpath(xpath)
# except NoSuchElementException:
# return False
#return True
for i in range(len(Orgin)):
for j in range(len(Dest)):
# navigate to the application home page
driver.get("https://www.fedex.com/ratefinder/home?cc=US&language=en&locId=express")
elem = driver.find_element_by_name("origZip")
elem.send_keys(Orgin[i])
elem = driver.find_element_by_name("destZip")
elem.send_keys(Dest[j])
# most likely dont need this... auto populates at 1
#elem = driver.find_element_by_name("totalNumberOfPackages")
#elem.send_keys(1)
elem = driver.find_element_by_id("totalPackageWeight")
elem.send_keys(1) # 1 lbs package
elem = driver.find_element_by_xpath("//*[@id='raCodeId']/option[3]")
elem.click() # set option for pick up
elem = driver.find_element_by_id("quickQuote")
elem.click() # click to advance
# read results after moving to quote page
try:
ship_info = driver.find_element_by_xpath("//*[@id='content']/div/div/form/table[2]/tbody/tr/td/table[1]/tbody/tr[4]/td[2]/table/tbody/tr[1]/td/table/tbody/tr[4]/td[2]/b").get_attribute('innerHTML')
except NoSuchElementException:
ship_info = 0
try:
first_overNight = driver.find_element_by_xpath("//*[@id='FIRST_OVERNIGHT_dateTime0']").get_attribute('innerHTML')
except NoSuchElementException:
first_overNight = 0
try:
priority_overNight = driver.find_element_by_xpath("//*[@id='PRIORITY_OVERNIGHT_dateTime1']").get_attribute('innerHTML')
except NoSuchElementException:
priority_overNight = 0
try:
standard_overNight = driver.find_element_by_xpath("//*[@id='STANDARD_OVERNIGHT_dateTime2']").get_attribute('innerHTML')
except NoSuchElementException:
standard_overnight = 0
try:
twoday_AM = driver.find_element_by_xpath("//*[@id='FEDEX_2_DAY_AM_dateTime3']").get_attribute('innerHTML')
except NoSuchElementException:
twoday_AM = 0
try:
twoday = driver.find_element_by_xpath("//*[@id='FEDEX_2_DAY_dateTime4']").get_attribute('innerHTML')
except NoSuchElementException:
twoday = 0
try:
express_saver = driver.find_element_by_xpath("//*[@id='FEDEX_EXPRESS_SAVER_dateTime5']").get_attribute('innerHTML')
except NoSuchElementException:
express_saver = 0
try:
ground = driver.find_element_by_xpath("//*[@id='FEDEX_GROUND_dateTime6']").get_attribute('innerHTML')
except NoSuchElementException:
ground = 0
origin = Orgin[i]
destination = Dest[j]
# append to data frame "trans"
df = pd.DataFrame([[origin,destination,ship_info,first_overNight,
priority_overNight,standard_overNight,twoday_AM,
twoday,express_saver,ground]],
columns=['origin','destination','ship_info',
'first_overnight','priority_overnight','standard_overnight',
'twoday_AM','twoday','express_saver','ground'])
trans.append(df)
driver.close()
pd.concat(trans).to_csv('FedEx.csv')
| true |
d4cd5bb15409d858bb6c821d4971922cd6a63b51 | Python | Aasthaengg/IBMdataset | /Python_codes/p02681/s878557824.py | UTF-8 | 138 | 3.296875 | 3 | [] | no_license | s=input()
t=input()
ans=0
for i in range(len(s)):
if s[i]==t[i]:
ans+=1
if ans==len(s):
print("Yes")
else:
print("No") | true |
4877d79665853ab39fb45808b9a29691d4cecf5f | Python | woliverhl/d-minute-phyton | /meetingmanagement/models.py | UTF-8 | 4,002 | 2.546875 | 3 | [] | no_license | # -*- encoding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Usuario(models.Model):
user = models.OneToOneField(User)
#imagen_usuario = models.ImageField("imagen",upload_to = 'productos/',)
class Proyecto(models.Model):
nombre_proyecto = models.CharField("Nombre Proyecto", max_length=100)
descripcion_proyecto = models.TextField("Descripción")
fecha_inicio_proyecto = models.DateField("Fecha Inicio", null=True, blank=True)
fecha_fin_proyecto = models.DateField("Fecha Finalización", null=True, blank=True)
#falta parametro ACTIVO
def __str__(self):
return self.nombre_proyecto
class Acta(models.Model):
proyecto_acta = models.ForeignKey(Proyecto, verbose_name="proyecto del acta")
fecha_acta = models.DateTimeField("Fecha")
resumen_acta = models.TextField("Resumen del Acta")
correlativo = int(0)
def __str__(self):
return self.id
class Usuario_Acta(models.Model):
usuario = models.ForeignKey(Usuario, verbose_name="usuarios participantes")
acta = models.ForeignKey(Acta, verbose_name="acta")
presente = models.BooleanField("¿Asistió?", default=False)
secretario = models.BooleanField(default=False)
class Usuario_Proyecto(models.Model):
ROLES = (
('Miembro Regular', 'Miembro Regular'),
('Secretario', 'Secretario'),
('Jefe', 'Jefe')
)
usuario = models.ForeignKey(Usuario, verbose_name="usuario del proyecto")
proyecto = models.ForeignKey(Proyecto, verbose_name="usuario del proyecto")
rol_proyecto = models.CharField("Rol en el Proyecto", choices=ROLES, max_length=30)
def __str__(self):
return 'Usuario: '+self.usuario.user.username+'. Proyecto: '+self.proyecto.nombre_proyecto
class Tema(models.Model):
acta_tema = models.ForeignKey(Acta, verbose_name="acta del tema")
titulo_tema = models.CharField(max_length=100)
descripcion_tema = models.TextField("Descripción del Tema")
descripcion_tema_html = models.TextField("Descripción del Tema en codigo HTML", blank=True, null=True)
def __str__(self):
return self.titulo_tema
class Elemento(models.Model):
TIPOS = (
('AC', 'Acuerdo de Coordinación'),
('CI', 'Compromiso Individual'),
('DU', 'Duda o Busqueda'),
('DE', 'Desacuerdo o Brecha'),
('NC', 'Normas Comunes')
)
tipo_elemento = models.CharField("Tipo Elemento", choices=TIPOS, max_length=10)
elemento_padre = models.ForeignKey('self',null=True, blank=True, verbose_name="elemento padre")
usuario_responsable = models.ForeignKey(Usuario, null=True, blank=True, verbose_name="usuario responsable")
tema = models.ForeignKey(Tema, null=True, blank=True, verbose_name="tema al que pertenece")
fecha_inicio = models.DateField("Fecha inicio", null=True, blank=True)
fecha_termino = models.DateField("Fecha termino", null=True, blank=True)
estado_elemento = models.CharField("Estado", max_length=50)
titulo_elemento = models.CharField("Titulo", max_length=100)
descripcion_elemento = models.CharField("Descripcion", max_length=1200)
fecha_asignado = models.DateField("Fecha asignado", default=timezone.now)
"""
class Checklist_Kanban(models.Model):
item = models.CharField(max_length="50")
def __str__(self):
return self.item
class Tarea_Kanban(models.Model):
elemento_dialogico = models.ForeignKey(Elemento, null=True)
nombre_tarea = models.CharField(max_length=50)
fecha_inicio_tarea = models.DateField(null=True, blank=True)
fecha_vencimiento_tarea = models.DateField(null=True, blank=True)
descripcion_tarea = models.CharField(max_length=600, null=True, blank=True)
#checklist_tarea = models.ForeignKey(Checklist_Kanban, null=True, blank=True)
class Usuario_Tarea(models.Model):
usuario = models.ForeignKey(Usuario)
tarea = models.ForeignKey(Tarea_Kanban)
"""
| true |
ad4d9e3107cc7733c8f0d460a3751432d64bb3af | Python | LEEYongKyung/ch1.2 | /ch1.7/mymodule.py | UTF-8 | 378 | 3.421875 | 3 | [] | no_license | #module name: mymod
print('mymodule.py의 모듈이름: '+__name__)
def main():
print('최상의 모듈(실행모듈, 독립실행)시 출력 합니다. ')
def add(a,b):
return a+b
def subtract(a,b):
return a-b
def multiply(a,b):
return a*b
def divide(a,b):
return a/b
if __name__ == '__main__':
main()
else:
print('모듈이름: '+__name__)
| true |
aeed2b7833b6fa92afdfd87f8d78c4dec0329831 | Python | dataquestio/unite | /test.py | UTF-8 | 2,920 | 3.046875 | 3 | [] | no_license | """
Run tests on the data. Used when submitting the answer.
Usage -- python test.py TRAINING_FILE_PATH PREDICTION_FILE_PATH
"""
import StringIO
import argparse
import json
import math
import sys
import time
from pep8 import StyleGuide
from sklearn.metrics import mean_squared_error
import algo
import settings
# Parse input arguments.
parser = argparse.ArgumentParser(description='Test code to see if it works.')
parser.add_argument('train_file', type=str, help='The training file to use.')
parser.add_argument('prediction_file', type=str, help='The file to make predictions on.')
parser.add_argument('--write', default=False, help='Whether to write results to a file.', action="store_const", const=True, dest="write")
if __name__ == "__main__":
args = parser.parse_args()
# Read the training file.
with open(args.train_file) as f:
train_data = f.read()
with open(args.prediction_file) as f:
prediction_data = f.read()
start = time.time()
# Initialize the algorithm class.
alg = algo.Algorithm()
# Generate a dataframe from the train text.
train_df = alg.generate_df(train_data)
# Get the features from the dataframe
train_features = alg.generate_features(train_df, type="train")
# Train the algorithm using the training features.
alg.train(train_features, train_df["score"])
# Generate a prediction dataframe.
prediction_df = alg.generate_df(prediction_data)
# Generate features from the dataframe
prediction_features = alg.generate_features(prediction_df, type="test")
# Make predictions using the prediction dataframe.
predictions = alg.predict(prediction_features)
# Find how long it took to execute.
execution_time = time.time() - start
print("Execution time was {0} seconds.\n".format(execution_time))
# We're using RMSE as a metric.
error = math.sqrt(mean_squared_error(predictions, prediction_df[settings.PREDICTION_COLUMN]))
print("Found root mean squared error of: {0}\n".format(error))
# Setup a buffer to capture pep8 output.
buffer = StringIO.StringIO()
sys.stdout = buffer
# Initialize and run a pep8 style checker.
pep8style = StyleGuide(ignore="E121,E123,E126,E226,E24,E704,E501")
pep8style.input_dir(settings.BASE_DIR)
report = pep8style.check_files()
# Change stdout back to the original version.
sys.stdout = sys.__stdout__
pep8_results = buffer.getvalue()
if report.total_errors > 0:
print("Pep8 violations found! They are shown below.")
print("----------------------")
print(pep8_results)
# Write all the results to a file if needed.
if args.write:
write_data = {
"error": error,
"execution_time": execution_time,
"pep8_results": pep8_results
}
with open(settings.RESULTS_FILE, "w+") as f:
json.dump(write_data, f)
| true |
1c13b66016a2b0ae684424d9ef011e20663c0d17 | Python | xxNB/sword-offer | /leetcode/数组/merge_intevals.py | UTF-8 | 1,165 | 3.921875 | 4 | [] | no_license | """
156. Merge Intervals
Given a collection of intervals, merge all overlapping intervals.
Example
Given intervals => merged intervals:
[ [
(1, 3), (1, 6),
(2, 6), => (8, 10),
(8, 10), (15, 18)
(15, 18) ]
]
Challenge
O(n log n) time and O(1) extra space.
"""
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
"""
@param intervals: interval list.
@return: A new interval list.
"""
def merge(self, intervals):
intervals.sort(key=lambda x: x.start)
length = len(intervals)
res = []
for i in range(length):
if res == []:
res.append(intervals[i])
else:
size = len(res)
if res[size - 1].start <= intervals[i].start <= res[
size - 1].end:
res[size - 1].end = max(intervals[i].end, res[size - 1].end)
else:
res.append(intervals[i])
return res
r = Solution()
print(r.merge([(1, 3), (2, 6), (8, 10), (15, 18)]))
| true |
8a96f5d08b3490a0e664e7ddaff3403246be7297 | Python | Alcanforero/semiautomatic-chat | /semiautomatic-chat/main/rasa_client.py | UTF-8 | 783 | 2.609375 | 3 | [] | no_license | from threading import Event
import socketio
RASA_URL = 'http://localhost:5005'
# En este fichero se hace uso de un objeto de tipo Event para esperar
# que se resuelva la consulta al servidor de rasa.
class Aux(socketio.ClientNamespace):
ev = Event()
def on_bot_uttered(self, data):
self.result = data
self.ev.set()
class RasaClient:
def __init__(self):
self.sio = socketio.Client()
self.sio.connect(RASA_URL)
self.temp = Aux()
self.sio.register_namespace(self.temp)
def message(self, msg):
self.temp.ev.clear()
self.sio.emit("user_uttered", {"session_id": self.sio.sid, "message": msg})
self.temp.ev.wait()
return self.temp.result['text'] | true |
36d5323dfd25c4213f60432f77e0a56bd78ff72f | Python | caiyefan/2019_DeepLearning | /neural-network-model/pavlov_fusion.py | UTF-8 | 3,593 | 2.828125 | 3 | [] | no_license | from nn_model import *
plt.style.use('seaborn-whitegrid')
# input data: [food, ring]
# target2: [nothing, sail]
input = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
target1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
target2 = np.array([[1, 0], [1, 0], [0, 1], [0, 1]])
model_1 = NN_Model()
model_1.load_dataset(input, target1)
# model_1.add_layer(input_nodes=2, output_nodes=4, activation_type="sigmoid")
model_1.add_layer(input_nodes=2, output_nodes=4, activation_type="softmax")
model_1.weights = np.array([[[-3.57917628, -4.45802165, 5.51264774, 4.25348689], [-3.49516325, 5.45501402, -4.28458211, 4.33399178]]])
model_1.bias = np.array([[[3.95903399, -0.27024269, -0.41458959, -3.3507687]]])
# model_1.learning(epochs=1000, lr=0.1, regularization=False)
model_2 = NN_Model()
model_2.load_dataset(target1, target2)
model_2.add_layer(input_nodes=4, output_nodes=4, activation_type="sigmoid")
model_2.add_layer(input_nodes=4, output_nodes=2, activation_type="softmax")
model_2.learning(epochs=1000, lr=0.1, regularization=False)
p1 = model_1.prediction(input)
p2 = model_2.prediction(p1)
print("output:")
print(p1)
print(p2)
print("")
model_1.weights = np.array([[[-3.57917628, -4.45802165, 5.51264774, 4.25348689], [-3.49516325, 2.92702158, -0.73537574, 4.33399178]]])
model_1.bias = np.array([[[3.95903399, -2.79823512, 3.13461677, -3.3507687]]])
p1 = model_1.prediction(input)
p2 = model_2.prediction(p1)
print("output:")
print(p1)
print(p2)
print("")
#
# print("")
# print("Result (Before Learning): ")
# model_1.check(weights=True, bias=True)
# p1 = model_1.prediction(input)
# p2 = model_2.prediction(p1)
# print("output:")
# print(p1)
# print(p2)
# print("")
#
# # ----------------------------------------------------------------------------------------
# # Learning Process
# w = np.array([])
# for i in range(50):
# # model_1.train_dog(input[0], target1[0], lr=0.1, regularization=True)
# # model_1.train_dog(input[2], target1[2], lr=0.1, regularization=True)
# # model_1.train_dog(input[3], target1[3], lr=0.1, regularization=True)
# output = model_1.train_dog(input[1], target1[2], lr=0.1, regularization=False)
# res = model_2.prediction(output)
# w = np.append(w, res[0][1])
# # print(res)
# print()
#
# # Learning Result Print
# print("")
# print("Result (After Learning): ")
# model_1.check(weights=True, bias=True)
# p1 = model_1.prediction(input)
# p2 = model_2.prediction(p1)
# print("output:")
# print(p2)
# print("")
# # ----------------------------------------------------------------------------------------
#
#
# # ----------------------------------------------------------------------------------------
# # Forgetting Process
# for i in range(50):
# # model_1.train_dog(input[0], target1[0], lr=0.05, regularization=True)
# # model_1.train_dog(input[2], target1[2], lr=0.05, regularization=True)
# # model_1.train_dog(input[3], target1[3], lr=0.1, regularization=True)
# output = model_1.train_dog(input[1], target1[1], lr=0.05, regularization=False)
# res = model_2.prediction(output)
# w = np.append(w, res[0][1])
# # print(res)
#
# # Forgetting Result Print
# print("")
# print("Result (After Forgetting): ")
# model_1.check(weights=True, bias=True)
# p1 = model_1.prediction(input)
# p2 = model_2.prediction(p1)
# print("output:")
# print(p2)
# print("")
# # ----------------------------------------------------------------------------------------
#
# # Result: Probability of Saliva
# plt.plot(w)
# plt.xlabel("Epochs")
# plt.ylabel("Probability(saliva)")
# plt.show()
#
#
#
| true |
b181a395a505bd055bcc16acd2a629c64b8af5dc | Python | liujingling920216/api_frame_rewrite | /demo/计算文件内容.py | UTF-8 | 734 | 3.390625 | 3 | [] | no_license | """
将文件中的内容计算出来并且将值显示到文件中
15+34=
15-7=
43+59=
"""
file = open('.\cal_data',mode= 'r')
data_list = file.readlines()
all_data_list = []
for i in data_list:
if '-' in i:
list1 = i.split('-')
sub_value = "%s-%s=%d"%(list1[0],list1[1].split('=')[0],int(list1[0])-int(list1[1].split('=')[0]))
all_data_list.append(sub_value)
elif '+' in i:
list2 = i.split('+')
add_value = "%s+%s=%d" % (list2[0], list2[1].split('=')[0], int(list2[0]) + int(list2[1].split('=')[0]))
all_data_list.append(add_value)
# print(all_data_list)
file = open('.\cal_data',mode='w')
for line in all_data_list:
file.write(line)
file.write('\n')
file.close()
| true |
5ee1a36c5c29a1ef53975285a1ea909f7c36ad3d | Python | ashar-sarwar/python-works | /python_assignment/password.py | UTF-8 | 552 | 3.171875 | 3 | [] | no_license | import re
value = []
items=[x for x in input("enter Password").split(',')]
for password in items:
if len(password)<6 or len(password)>12:
continue
else:
pass
if not re.search("[a-z]",password):
continue
elif not re.search("[0-9]",password):
continue
elif not re.search("[A-Z]",password):
continue
elif not re.search("[$#@]",password):
continue
elif re.search("\s",password):
continue
else:
pass
value.append(password)
print (",".join(value))
| true |
543e8327b08dbaf619539f92f55bfb93066a54b2 | Python | Geeky-har/Python-Files | /coroutines1.py | UTF-8 | 982 | 4.125 | 4 | [] | no_license | import time
def find(n):
f = open("cour1.txt", "r")
file = f.read()
time.sleep(2) # lets assume it takes 2 seconds to read the above file from the system
while True:
n = (yield) # will use find() as a couroutine
if n in file:
print(f"Yes {n} you are my friend!! Congratulations")
else:
print(f"Sorry {n} we aren't friends yet!!")
if __name__ == '__main__':
lst = []
i = int(input("Please enter the number of friends you want to enter: "))
print(f"Now enter {i} names of student: ")
for _ in range(i): # will input the list of names
name = input()
lst.append(name)
print("Please wait for a second.....")
c = find(lst[0]) # instantiate the coroutine
next(c) # will start the coroutine
for horah in lst:
c.send(horah) # will send the name to the coroutine
c.close() # will close the coroutine
| true |
7b28e5a0031b0aae357ed04e92dafeed745c4500 | Python | anaxronik/Eulers-tasks-python | /9.py | UTF-8 | 792 | 3.84375 | 4 | [] | no_license | # Тройка Пифагора - три натуральных числа a < b < c, для которых выполняется равенство
#
# a^2 + b^2 = c^2
# Например, 32 + 42 = 9 + 16 = 25 = 52.
#
# Существует только одна тройка Пифагора, для которой a + b + c = 1000.
# Найдите произведение abc.
max_number = 1000
sum_numbers = 1000
def find_triple_pifagor(sum_numbers):
for a in range(1, max_number):
for b in range(1, max_number):
for c in range(1, max_number):
if a < b < c and a + b + c == sum_numbers and a * a + b * b == c * c:
print('Find it = ', a, b, c)
return a * b * c
print(find_triple_pifagor(sum_numbers))
| true |
03e9da004b0cd9673e8b3424095708b64f6b3829 | Python | Alan-FLX/code | /luogu/02_分支结构/P_5714.py | UTF-8 | 309 | 3.140625 | 3 | [] | no_license | import math
m, h = map(float, input().split())
BMI = m / (h ** 2)
if BMI < 18.5:
print("Underweight")
elif BMI >= 18.5 and BMI < 24:
print("Normal")
else:
BMI = format(BMI, ".4f")
B = str(BMI).strip("0")[:7]
if B[len(B) - 1] == ".":
B = B + "0"
print(B)
print("Overweight") | true |