blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7187d15865ed5bd6ee5b897bef86e8eaff3d3f2f | Python | SecretMG/Laplacian-Mesh-Deformation | /source/testbench/learn_bld.py | UTF-8 | 2,331 | 3.09375 | 3 | [
"MIT"
] | permissive | import bpy
import bmesh
'--- 生成cube阵列'
# for k in range(5):
# for j in range(5):
# for i in range(5):
# bpy.ops.mesh.primitive_cube_add(size=0.5, location=[i, j, k])
# # size is the length, and location belongs to the center
'--- 查看对象'
# objs = bpy.context.selected_objects # 获取所选对象列表
# print(objs)
# for obs in objs:
# print(obs.name, obs.location) # 查看某对象的名字和位置
'--- 选择对象'
def select(name, additive=True):
if not additive:
bpy.ops.object.select_all(action='DESELECT')
# action=['TOGGLE', 'SELECT', 'DESELECT', 'INVERT']
# TOGGLE:全部取消选中,若已经全部取消选中,则全部选中
# SELECT:全部选中
# DESELECT:全部取消选中
# INVERT:全部反转(原先选中的取消,原先取消的选中)
bpy.data.objects[name].select_set(True)
# select('Sphere')
# bpy.ops.transform.translate(value=[-1, -1, 0])
'--- 查看激活对象'
# # 若有多个选中对象,则激活对象为最后被选中的对象
# print(bpy.context.object)
# print(bpy.context.active_object) # 二者等效
def activate(name):
bpy.context.view_layer.objects.active = bpy.data.objects[name]
# activate('Sphere')
# print(bpy.context.object.name)
# print(bpy.context.selected_objects)
'--- 切换模式'
def mode_set(mode):
bpy.ops.object.mode_set(mode=mode)
if mode == 'EDIT':
bpy.ops.mesh.select_all(action='DESELECT') # 进入编辑模式时,对于所激活对象的所有点都不进行选中,更加安全
# mode_set('EDIT')
'--- 尝试使用bmesh'
# bpy.ops.mesh.primitive_cube_add(size=2, location=[0, 0, 0])
# bpy.ops.object.mode_set(mode='EDIT') # 添加一个cube并进入编辑模式
'--- 轻微形变'
def clear():
# 如果画布本身为空,则调用该函数会报错
bpy.ops.object.mode_set(mode="OBJECT") # 首先要进入对象模式
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.delete()
# clear()
# bpy.ops.mesh.primitive_cube_add(size=1, location=[3, 0, 0])
# bpy.ops.object.mode_set(mode='EDIT')
# bpy.ops.mesh.select_all(action='SELECT')
# bpy.ops.transform.vertex_random(offset=0.5) # 对所有顶点进行随机偏移
# bpy.ops.object.mode_set(mode='OBJECT')
| true |
dfefe0c66fc8f68b1f039de649f47a80e707258f | Python | averbukhs/tesxam | /test_second.py | UTF-8 | 1,219 | 2.578125 | 3 | [] | no_license | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
hostname = 'https://demo.b2b-center.ru/personal/'
login = '51370'
password = 'Password8'
"""
Тест на заполнение формы корректными данными. Тест является пройденным при проверке на наличии фразы "Личный кабинет"
в заголовке Title после нажатия кнопки Войти.
"""
def test_input_login_password_correct(driver):
driver.get(hostname)
wait = WebDriverWait(driver, 10)
assert "B2B-Center" in driver.title
login_field = driver.find_element(By.ID, value="login_control")
password_field = driver.find_element(By.ID, value="password_control")
submit_button = driver.find_element(By.ID, value="enter_button")
login_field.send_keys(login)
password_field.send_keys(password)
submit_button.click()
try:
element = wait.until(EC.title_contains("Личный кабинет"))
finally:
assert "Личный кабинет" in driver.title
| true |
f4c5f5b1c80c059acf738ba388c24828c7dd5614 | Python | 5l1v3r1/RuleGeneratorSCADA | /state-manager/TestUtilities.py | UTF-8 | 3,253 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
This file offeres generalized test functions for scenario testing
'''
import logging
import os
from LoggerUtilities import logAllChecksDescription, logAllChecksPassed, logError
from ValueStore import ValueStore
logger = logging.getLogger(__name__)
SCENARIO_PATH = "../../state-manager/Scenarios/"
def playScenarios(caseName, topology, rtusToTest=None, filterFunction=lambda filename: True):
"""
Load and test all or some cases of a scenario.
:param caseName: Name of case
:param topology: Topology list of RTUs
:param rtusToTest: RTUs which should be tested
:param filterFunction: Lambda filter function for filenames (e.g. for testing specific cases only)
"""
scenarioFiles = []
basicCaseFilename = "%s_%s.state" % (caseName, "BasicCase")
basicCaseFound = False
for filename in os.listdir(SCENARIO_PATH):
if filename == basicCaseFilename:
basicCaseFound = True
else:
if filename.startswith(caseName) and filename.endswith(".state"):
scenarioFiles.append(filename)
assert basicCaseFound
logger.warn("Testing %d scenarios of case %s." % (len(filter(filterFunction, sorted(scenarioFiles))), caseName))
for scenarioFilename in filter(filterFunction, sorted(scenarioFiles)):
stateScenario = ValueStore("T_{o}")
stateScenario.loadFromFile("%s%s" % (SCENARIO_PATH, basicCaseFilename))
stateScenario.loadFromFile("%s%s" % (SCENARIO_PATH, scenarioFilename))
logger.warn("")
logger.warn(stateScenario.description)
checkTopology(topology, stateScenario, rtusToTest)
def checkTopology(topology, state, rtusToTest=None):
"""
Evaluate all consistency and safety rules on the topology with the given state information
:param topology: Topology list of RTUs
:param state: State object with stateful information
:param rtusToTest: RTUs which should be tested
:return: (T,T) If all tests are successful, (F,T) if consistency violation, (T,F) if safety violation, (F,F) if violation in both
"""
logAllChecksDescription("ALL CHECKS", "TOPOLOGY", indentation=0)
checkStatusConsistency = dict()
checkStatusSafety = dict()
try:
if type(rtusToTest) == set or type(rtusToTest) == list:
relevantRTUs = [rtu for rtu in topology if rtu.name in rtusToTest]
else:
relevantRTUs = topology
for rtu in relevantRTUs:
checkStatusConsistency[rtu.name] = all(rtu.executeFullConsistencyCheck(state).values())
checkStatusSafety[rtu.name] = all(rtu.executeFullSafetyCheck(state).values())
logAllChecksPassed("ALL CHECKS", "TOPOLOGY", all(checkStatusConsistency.values()) and all(checkStatusSafety.values()), indentation=0)
except Exception, e:
logError("Unknown exception or error: %s" % e.message, indentation=0)
return (all(checkStatusConsistency.values()), all(checkStatusSafety.values()))
def generateRules(topology):
"""
Start the Bro rule generation process.
:param topology: Topology list of RTUs
"""
for rtu in topology:
rtu.generateFullBroConsistencyCheck()
rtu.generateFullBroSafetyCheck()
| true |
d9bbf0eb1acb3c0606ffdc2f81a01c4948bc567a | Python | rautla/programmable-web-project | /test_unittest.py | UTF-8 | 349 | 2.953125 | 3 | [] | no_license | #import random
from myclass import MyClass
import unittest
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.something = MyClass("pekka")
def test_return(self):
name = self.something.get_name()
self.assertEqual(name, "pekka")
if __name__ == '__main__':
unittest.main() | true |
8e6af9def4afa61182d4623a4df9d1e57f14c795 | Python | chongin12/Problem_Solving | /acmicpc.net/2547.py | UTF-8 | 145 | 3.0625 | 3 | [] | no_license | for _ in range(int(input())):
input()
n=int(input())
r=0
for i in range(n):
r+=int(input())
if r%n==0:
print("YES")
else:
print("NO") | true |
426809fbfe4074bd2e876f1c3e0a458bd082acd9 | Python | Jagrmi-C/jagrmitest | /jagrmi_logging.py | UTF-8 | 231 | 2.640625 | 3 | [
"MIT"
] | permissive | import logging
# add filemode="w" to overwrite
logging.basicConfig(filename="sample.log", level=logging.INFO)
logging.debug("This is a debug message")
logging.info("Informational message")
logging.error("An error has happened!")
| true |
7fc0ba004c8b4394792300a47a0d8fe818afe9f9 | Python | rikkarikka/nn_math_solver | /sni/data/preprocess.py | UTF-8 | 5,850 | 2.765625 | 3 | [] | no_license | import json
import numpy as np
import random
import math
import re
import sys
def main():
# LOAD DATA
data = json.loads(open('../../tencent/data/Math23K.json').read())
# PREPROCESS DATA
for d in data:
d['examples'] = preprocess(d['segmented_text'], d['equation'])
# 5 FOLD CROSS VALIDATION
print('Using existing cross validation splits')
#print('Preforming cross validation splits...')
#crossValidation(data, k = 5, k_test=5)
# SAVE SPLIT INDICES
split('./Math23K-train.txt', './Math23K-dev.txt', './Math23K-test.txt', k_test=5)
# SAVE SRC/TGT files
train_indices = np.genfromtxt('./Math23K-train.txt').astype(int)
dev_indices = np.genfromtxt('./Math23K-dev.txt').astype(int)
test_indices = np.genfromtxt('./Math23K-test.txt').astype(int)
json2txt(train_indices, data, './train.tsv')
json2txt(dev_indices, data, './val.tsv')
json2txt(test_indices, data, './test.tsv')
def crossValidation(data, k = 5, k_test=5):
# Saves k folds
# k: k fold cross validation
# k_test: fold to use for test
random.shuffle(data)
fold_size = math.floor(np.shape(data)[0] / k)
for i in range(1, k + 1):
output = open('fold' + str(i) + '.txt', 'w')
for d in data[(i-1) * fold_size: i * fold_size]:
output.write(d['id'] + '\n')
output.close()
print('fold' + str(i) + '.txt' + ' saved')
def split(train_path, dev_path, test_path, k_test=5):
train_dev = []
for i in range(1,6):
if not i == k_test:
train_dev = np.append(train_dev, open('fold' + str(i) + '.txt').readlines())
#random.shuffle(train_dev)
test = open('fold' + str(k_test) + '.txt').readlines()
# Train
output = open(train_path, 'w')
for d in train_dev[0:-1000]:
output.write(d)
output.close()
print(train_path + ' saved')
# Dev
output = open(dev_path, 'w')
for d in train_dev[-1000:]:
output.write(d)
output.close()
print(dev_path + ' saved')
# Test
output = open(test_path, 'w')
for d in test:
output.write(d)
output.close()
print(test_path + ' saved')
def mostCommon(data, percent):
# returns PERCENT of data by # of equation occurences
equation, count= np.unique([d['equation'] for d in data], return_counts=True)
indices = np.asarray((equation, count)).T[:,1].astype(int).argsort()
result = np.asarray([[equation[i], count[i]] for i in indices])
removed = np.array([])
total_eqs = np.sum(np.asarray(result[:,1]).astype(int))
occurences = 1
while len(removed) < total_eqs * (1 - percent):
print('Removing equations with', occurences, 'occurences...')
equations_to_remove = result[:,0][np.asarray(result[:,1]).astype(int) == occurences]
for eq in equations_to_remove:
eq = eq.strip()
removed = np.append(removed, [d for d in data if d['equation'].strip() == eq])
data = [d for d in data if not d['equation'].strip() == eq]
print('total # equations removed:', len(removed))
occurences += 1
return data, removed
def preprocess(question, equation):
#handle fractions and % and numbers with units
question = question.replace('%', ' % ')
fractions = re.findall('\(\d+\)/\(\d+\)', question)
fractions = np.append(fractions, re.findall('\(\d+/\d+\)', question))
for i,fraction in enumerate(fractions):
question = question.replace(fraction, str(sys.maxsize - i))
equation = equation.replace(fraction, str(sys.maxsize - i))
equation = equation.replace('+', ' + ')
equation = equation.replace('-', ' - ')
equation = equation.replace('*', ' * ')
equation = equation.replace('/', ' / ')
equation = equation.replace('(', ' ( ')
equation = equation.replace(')', ' ) ')
equation = equation.replace('=', ' = ')
equation = equation.replace('^', ' ^ ')
equation = equation.replace('%', ' % ')
equation = equation.split()
question = re.sub(r'(\d+)([A-z]{1,2})', r'\1 \2', question)
# Preprocess Question
question = question.split()
question = np.append(['null', 'null', 'null'], question)
question = np.append(question, ['null', 'null', 'null'])
numbers = np.array([token for token in question if isFloat(token)])# or float(token) == 2)])
_, indices = np.unique(numbers, return_index=True)
numbers = numbers[np.sort(indices)]
equation = np.array([token.strip() for token in equation])
examples = []
for i,number in enumerate(numbers):
if number.strip() in equation:
index = np.where(question == number)[0][0]
src = question[index-3:index+4]
src = ' '.join(src)
#print('np.shape(examples):', np.shape(examples))
examples = np.append(examples, [src + '\t' + 'yes'])
else:
index = np.where(question == number)[0][0]
src = question[index-3:index+4]
src = ' '.join(src)
examples = np.append(examples, [src + '\t' + 'no'])
#print(examples)
return examples
def json2txt(json_indices, data, output_path):
output = open(output_path, 'w')
for d in data:
if int(d['id']) in json_indices:
print(d['examples'])
for example in d['examples']:
print(example)
output.write(example + '\n')
output.close()
def isFloat(value):
try:
float(value)
return True
except ValueError:
return False
def txt2tsv(src_path, tgt_path, tsv_path):
src_txt = open(src_path).readlines()
tgt_txt = open(tgt_path).readlines()
tsv = open(tsv_path, 'w')
for i in range(len(src_txt)):
tsv.write(src_txt[i].strip() + '\t' + tgt_txt[i].strip() +'\n')
if __name__ == '__main__':
main()
| true |
9756f915539c5fa4ec577b0f4eb93e2c53f42e55 | Python | uog-mai/automated-bar | /Software/Server/index.py | UTF-8 | 1,054 | 2.578125 | 3 | [
"MIT"
] | permissive | from flask import Flask, request, jsonify, render_template
import requests
import database
import libextension
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/get_drink', methods=['POST'])
def get_drink():
data = request.get_json(silent=True)
mixer = data['queryResult']['parameters']['mixer']
alcohol = data['queryResult']['parameters']['alcohol']
# Get alcohol name
mixerString = ''.join(mixer)
alcString = ''.join(alcohol)
# Calculate required drink weights
mix_weight = database.get_mixer_weight(mixerString, 200)
alc_weight = database.get_alcohol_weight(alcString, 200);
response = database.random_response()
reply = {
"fulfillmentText": response,
}
print("Server recieved ", libextension.dispense_drink(mixerString, mix_weight))
print("Server recieved ", libextension.dispense_drink(alcString, alc_weight))
return jsonify(reply)
# run Flask app
if __name__ == "__main__":
app.run()
| true |
921dca6453a99bbd7ce455e7522ff334709c61f2 | Python | dhanesh47/sentiment-analysis | /sentiment-analysis.py | UTF-8 | 1,390 | 2.828125 | 3 | [] | no_license | import pandas as pd
import string
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report,confusion_matrix
from sklearn import metrics
data = pd.read_csv('C:/Users/HP/Desktop/dataset.csv', encoding='latin-1')
def process_text(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
clean_words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
return clean_words
data.head()
data['SentimentText'].apply(process_text)
x_train, x_test, y_train, y_test = train_test_split(data['SentimentText'],data['Sentiment'],test_size=0.2)
pipeline = Pipeline([
('bow',CountVectorizer(analyzer=process_text)), # converts strings to integer counts
('tfidf',TfidfTransformer()), # converts integer counts to weighted TF-IDF scores
('classifier',MultinomialNB()) # train on TF-IDF vectors with Naive Bayes classifier
])
pipeline.fit(x_train,y_train)
predictions = pipeline.predict(x_test)
print(metrics.accuracy_score(y_test,predictions)) | true |
448a56b7722b28f6f5c47ab153e0bb9f72bb9a3a | Python | cck0504/ML | /perceptron.py | UTF-8 | 1,597 | 2.984375 | 3 | [] | no_license | from __future__ import division, print_function
from typing import List, Tuple, Callable
import numpy as np
import scipy
import matplotlib.pyplot as plt
import random
class Perceptron:
def __init__(self, nb_features=2, max_iteration=10, margin=1e-4):
self.nb_features = nb_features
self.w = [0 for i in range(0,nb_features+1)]
self.margin = margin
self.max_iteration = max_iteration
def train(self, features, labels):
e = 0.00000001
for times in range(self.max_iteration):
flag = True
div = np.linalg.norm(self.w) + e
rand_index = list(range(len(features)))
np.random.shuffle(rand_index)
for i in range(len(rand_index)):
cur_margin = np.dot(self.w, features[rand_index[i]]) / div
if (labels[rand_index[i]] * np.dot(self.w, features[rand_index[i]]) < 0 or (-self.margin/2 < cur_margin < self.margin/2)): #
flag = False
self.w = np.add(self.w, labels[rand_index[i]] * np.array(features[rand_index[i]]))
if flag == True:
return True
return False
def reset(self):
self.w = [0 for i in range(0,self.nb_features+1)]
def predict(self, features: List[List[float]]) -> List[int]:
w = np.array(self.w)
prod = np.dot(features, w.T)
y_pred = prod.flatten().tolist()
for i in range(len(y_pred)):
y_pred[i] = 1 if y_pred[i] > 0 else -1
return y_pred
def get_weights(self):
return self.w
| true |
3885a125f24ff5f4badfa6a60499dd9f5d5ed0b3 | Python | virginiah894/python_codewars | /7KYU/line_numbering.py | UTF-8 | 173 | 3.65625 | 4 | [
"MIT"
] | permissive | def number(lines: list) -> list:
if len(lines) < 1:
return []
line_numbering = [f'{i}: {k}' for i, k in enumerate(lines, start=1)]
return line_numbering
| true |
8183961f507c44e33f16ad918722f25c214b582f | Python | DYSM/hangman2 | /unitTest2.py | UTF-8 | 678 | 2.6875 | 3 | [] | no_license | __author__ = 'KMK'
import unittest
import hangman
class hangmanTestCase(unittest.TestCase):
def test_CheckCorrectAnswerTrue(self):
answer=hangman.checkCorrectAnswer('tac','cat')
self.assertTrue(answer)
def test_CheckCorrectAnswerFalse(self):
answer = hangman.checkCorrectAnswer('ito', 'cat')
self.assertFalse(answer)
def test_CheckWrongAnswerTrue(self):
answer = hangman.checkWrongAnswer('rioooi', 'ourt')
self.assertTrue(answer)
def test_CheckWrongAnswerFalse(self):
answer = hangman.checkWrongAnswer('tac', 'cat')
self.assertFalse(answer)
if __name__ == '__main__':
unittest.main()
| true |
2c32e2aa7d52621cc3d0f44cebd02ea2104040e1 | Python | partofthething/ace | /ace/validation/validate_smoothers.py | UTF-8 | 6,082 | 2.609375 | 3 | [
"MIT"
] | permissive | """
A few validation problems to make sure the smoothers are working as expected.
These depend on the supsmu module, which was created using f2py from Breiman's supsmu.f
"""
import matplotlib.pyplot as plt
import numpy
from ace.samples import smoother_friedman82
import ace.smoother as smoother
import ace.supersmoother as supersmoother
# pylint: disable=protected-access, missing-docstring
try:
import mace
except ImportError:
print("WARNING: An F2Pyd version of Breiman's supsmu is not available. "
"Validations will not work")
raise
def validate_basic_smoother():
"""Run Friedman's test from Figure 2b."""
x, y = sort_data(*smoother_friedman82.build_sample_smoother_problem_friedman82())
plt.figure()
# plt.plot(x, y, '.', label='Data')
for span in smoother.DEFAULT_SPANS:
my_smoother = smoother.perform_smooth(x, y, span)
friedman_smooth, _resids = run_friedman_smooth(x, y, span)
plt.plot(x, my_smoother.smooth_result, '.-', label='pyace span = {0}'.format(span))
plt.plot(x, friedman_smooth, '.-', label='Friedman span = {0}'.format(span))
finish_plot()
def validate_basic_smoother_resid():
"""Compare residuals."""
x, y = sort_data(*smoother_friedman82.build_sample_smoother_problem_friedman82())
plt.figure()
for span in smoother.DEFAULT_SPANS:
my_smoother = smoother.perform_smooth(x, y, span)
_friedman_smooth, resids = run_friedman_smooth(x, y, span) # pylint: disable=unused-variable
plt.plot(x, my_smoother.cross_validated_residual, '.-',
label='pyace span = {0}'.format(span))
plt.plot(x, resids, '.-', label='Friedman span = {0}'.format(span))
finish_plot()
def validate_supersmoother():
"""Validate the supersmoother."""
x, y = smoother_friedman82.build_sample_smoother_problem_friedman82()
x, y = sort_data(x, y)
my_smoother = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmootherWithPlots)
# smoother.DEFAULT_BASIC_SMOOTHER = BasicFixedSpanSmootherBreiman
supsmu_result = run_freidman_supsmu(x, y, bass_enhancement=0.0)
mace_result = run_mace_smothr(x, y, bass_enhancement=0.0)
plt.plot(x, y, '.', label='Data')
plt.plot(x, my_smoother.smooth_result, '-', label='pyace')
plt.plot(x, supsmu_result, '--', label='SUPSMU')
plt.plot(x, mace_result, ':', label='SMOOTH')
plt.legend()
plt.savefig('supersmoother_validation.png')
def validate_supersmoother_bass():
"""Validate the supersmoother with extra bass."""
x, y = smoother_friedman82.build_sample_smoother_problem_friedman82()
plt.figure()
plt.plot(x, y, '.', label='Data')
for bass in range(0, 10, 3):
smooth = supersmoother.SuperSmoother()
smooth.set_bass_enhancement(bass)
smooth.specify_data_set(x, y)
smooth.compute()
plt.plot(x, smooth.smooth_result, '.', label='Bass = {0}'.format(bass))
# pylab.plot(self.x, smoother.smooth_result, label='Bass = {0}'.format(bass))
finish_plot()
def validate_average_best_span():
"""Figure 2d? from Friedman."""
N = 200
num_trials = 400
avg = numpy.zeros(N)
for i in range(num_trials):
x, y = smoother_friedman82.build_sample_smoother_problem_friedman82(N=N)
my_smoother = smoother.perform_smooth(
x, y, smoother_cls=supersmoother.SuperSmoother
)
avg += my_smoother._smoothed_best_spans.smooth_result
if not (i + 1) % 20:
print(i + 1)
avg /= num_trials
plt.plot(my_smoother.x, avg, '.', label='Average JCV')
finish_plot()
def validate_known_curve():
"""Validate on a sin function."""
plt.figure()
N = 100
x = numpy.linspace(-1, 1, N)
y = numpy.sin(4 * x)
smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmootherSlowUpdate
smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother)
plt.plot(x, smooth.smooth_result, label='Slow')
smoother.DEFAULT_BASIC_SMOOTHER = smoother.BasicFixedSpanSmoother
smooth = smoother.perform_smooth(x, y, smoother_cls=supersmoother.SuperSmoother)
plt.plot(x, smooth.smooth_result, label='Fast')
plt.plot(x, y, '.', label='data')
plt.legend()
plt.show()
def finish_plot():
"""Help with plotting."""
plt.legend()
plt.grid(color='0.7')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
def run_freidman_supsmu(x, y, bass_enhancement=0.0):
"""Run the FORTRAN supersmoother."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
flags = numpy.zeros((N, 7))
mace.supsmu(x, y, weight, 1, 0.0, bass_enhancement, results, flags)
return results
def run_friedman_smooth(x, y, span):
"""Run the FORTRAN smoother."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
residuals = numpy.zeros(N)
mace.smooth(x, y, weight, span, 1, 1e-7, results, residuals)
return results, residuals
def run_mace_smothr(x, y, bass_enhancement=0.0): # pylint: disable=unused-argument
"""Run the FORTRAN SMOTHR."""
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
flags = numpy.zeros((N, 7))
mace.smothr(1, x, y, weight, results, flags)
return results
class BasicFixedSpanSmootherBreiman(smoother.Smoother):
"""Runs FORTRAN Smooth."""
def compute(self):
"""Run smoother."""
self.smooth_result, self.cross_validated_residual = run_friedman_smooth(
self.x, self.y, self._span
)
class SuperSmootherBreiman(smoother.Smoother):
"""Run FORTRAN Supersmoother."""
def compute(self):
"""Run SuperSmoother."""
self.smooth_result = run_freidman_supsmu(self.x, self.y)
self._store_unsorted_results(self.smooth_result, numpy.zeros(len(self.smooth_result)))
def sort_data(x, y):
"""Sort the data."""
xy = sorted(zip(x, y))
x, y = zip(*xy)
return x, y
if __name__ == '__main__':
validate_basic_smoother()
# validate_basic_smoother_resid()
#validate_supersmoother()
| true |
06d3b158ffacd21d90b402b700605c5117753f6a | Python | tahe-ba/Programmation-Python | /serie/serie 1 I-O/code/first.py | UTF-8 | 895 | 3.265625 | 3 | [] | no_license | # Calcule PrixTTC
# Accepting only valid input
# Asking before quit
exit = 'F'
while exit != 'T' :
while True :
try :
prix=float(input("prix HT="))
break
except ValueError:
print("Wrong input")
while True :
try :
option=int(input("if tva = 15% press 1 or press 2 if tva is 19% : "))
if option==1 :
tva=0.15
break
elif option == 2 :
tva =0.19
break
else :
print("only 1 or 2 are acceptable")
except ValueError:
print("Wrong input")
prixttc=prix+(tva*prix)
print("Le prix ttc de votre prix ht est : %.2f est %.2f"%(prix, prixttc))
# print("Le prix ttc de votre prix ht est : {:.2f} est {:.2f}".format(prix, prixttc))
exit=input("if you want tou quit type T: ")
| true |
007d9bbcd0d40e9e5075e969cdaff5cb002410c3 | Python | zlodiak/lessons2 | /js/iterators_generators/doklad/python/custom_iterator.py | UTF-8 | 661 | 4.03125 | 4 | [] | no_license | class Obj(): # Итерируемый объект
def __init__(self, word):
self.word = word
def __iter__(self):
return Iterator(self.word)
class Iterator: # Итератор
def __init__(self, word):
self.word = word
self.index = 0
def __next__(self):
try:
letter = self.word[self.index]
self.index += 1
return letter
except IndexError:
raise StopIteration()
def __iter__(self):
return self
obj = Obj('sergey')
it = iter(obj)
print(it.__next__())
print(it.__next__())
print(it.__next__())
print(it.__next__())
print(it.__next__())
print(it.__next__())
print(it.__iter__()) | true |
5e3e36a062adcac30994b21b02f5ca245f8deb79 | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_3/dbzphi002/question3.py | UTF-8 | 684 | 2.953125 | 3 | [] | no_license | #Thembekile Dubazana
#DBZPHI002
word=input('Enter the message:\n')
r=eval(input('Enter the message repeat count:\n'))
w=eval(input('Enter the frame thickness:\n'))
i=0
j=0
l=len(word)+2*w
k=w-1
word=" %s "%word #spacing of word
while i < w:#while loop for top
print('|'*i,'+','-'*l,'+','|'*i,sep="")
l=l-2
i=i+1
if i == w:#while loop for message and repeat
while j < r:
print('|'*w +word+'|'*w)
j=j+1
if j == r:
l=l+2
while k > -1:#while loop for bottom
print('|'*k,"+",'-'*l,'+','|'*k,sep="")
l=l+2
k=k-1
| true |
e85caa76ded07fdcb857695d39d9ab27acbac75d | Python | netgroup-polito/when-latency-matters-datasets | /11-ResidentialCloudDetail/plotter.py | UTF-8 | 753 | 2.828125 | 3 | [] | no_license | import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def cdf(data):
x = np.sort(data)
y = np.linspace(0, 1, len(x))
return (x, y)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_files", help="Input files", nargs='+')
parser.add_argument("--xmax", help="X Max", type=int, default=50)
args = parser.parse_args()
for i, file in enumerate(sorted(args.input_files)):
print(f"Processing file: {file}")
data = pd.read_csv(file)
plt.boxplot(data['e2e-rtt'], whis=[1, 99], positions=[i, ], vert=False, sym="", widths=0.5, labels=[file.split('/')[-1], ])
plt.xlim(0, args.xmax)
plt.xlabel("FCT (ms)")
plt.show()
| true |
499290a64309997a8323a44e315a888956ccad5a | Python | NEHAISRANI/Python_Programs | /Debugging-Questions-master/users.py | UTF-8 | 528 | 3.109375 | 3 | [] | no_license | import json
data_file=open("users.json","r")
data = json.load(data_file)
print data
print type(data)
# users = data["users"]
# print users
# counter=0
# while counter<len(users):
# print ("users full name is " + users[counter]['firstName'] + ' ' + users[counter]['lastName'])
# print ("users mobile number is " + str(users[counter]['details']['mobileNo']))
# print ("users age is " + str(users[counter]['details']['age']))
# print ("users city is " + users[counter]['details']['City'])
# counter=counter+1 | true |
177d77e7f50bff8c954b3855dabdc5ddc2392563 | Python | yerkesobservatory/techcamp | /users/huber/Test Programs/flask_test.py | UTF-8 | 247 | 2.53125 | 3 | [] | no_license | from flask import Flask, Response
import time
app = Flask(__name__)
x=0
@app.route('/')
def index():
return Response("x is ", gen(x))
def gen(x):
x=x+1
yield (x)
#time.sleep(1)
if (__name__=='__main__'):
app.run(debug=True, host='0.0.0.0')
| true |
2c7d17284d5f8cd3bda48938174124d15021f3cf | Python | sejal-varu/visualization_matplotlib_project | /q04_plot_runs_by_balls/build.py | UTF-8 | 506 | 2.84375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
ipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)
# Solution
def plot_runs_by_balls():
pd_df1 = ipl_df.pivot_table(index="delivery",values="runs", columns=["match_code"], aggfunc="count")
balls_played = ipl_df.groupby(["match_code","inning","batsman"])["delivery"].count()
runs_scored = ipl_df.groupby(["match_code","inning","batsman"])["runs"].sum()
plt.scatter(runs_scored,balls_played)
plt.show()
| true |
cfa1505536574f83bbcb412cdd53f63ddd8846fe | Python | DimaMirana/Udemy-Machine-Learning-A-Z | /4-Clustering/1.kmeans_clustering.py | UTF-8 | 2,092 | 3.5 | 4 | [] | no_license | # Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3,4]].values
#y = dataset.iloc[:, 3].values we don't know what to look for
# Splitting the dataset into the Training set and Test set
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Using the elbow method to find the optimal number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1,11):
kmeans = KMeans(n_clusters= i, init='k-means++',max_iter=300,n_init=10,random_state=0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11),wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of cluster')
plt.ylabel('wcss')
plt.show()
# Applying data means to the mall dataset
kmeans = KMeans(n_clusters = 5, init='k-means++',max_iter=300,n_init=10,random_state = 0)
y_kmneans = kmeans.fit_predict(X) #fit the data and set the data into the cluster it belongs
# Visualising the clusters
plt.scatter(X[y_kmneans ==0, 0],X[y_kmneans ==0 , 1],s= 100,c = 'red', label = 'careful')
plt.scatter(X[y_kmneans ==1, 0],X[y_kmneans ==1 , 1],s= 100,c = 'blue', label = 'standard')
plt.scatter(X[y_kmneans ==2, 0],X[y_kmneans ==2 , 1],s= 100,c = 'green', label = 'Target')
plt.scatter(X[y_kmneans ==3, 0],X[y_kmneans ==3 , 1],s= 100,c = 'cyan', label = 'careless')
plt.scatter(X[y_kmneans ==4, 0],X[y_kmneans ==4 , 1],s= 100,c = 'magenta', label = 'sensible')
plt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1], s = 300, c = 'yellow', label = 'centroids')
plt.title('Cluster of clients')
plt.xlabel('Annual Income(k$)')
plt.ylabel('Spending Score(1-100)')
plt.legend()
plt.show()
| true |
5d9dce2d57244daed843530e3e687e17380e02c2 | Python | peterhogan/python | /pidigits.py | UTF-8 | 525 | 3.15625 | 3 | [
"MIT"
] | permissive | from math import factorial
from math import pi
from decimal import *
def pisum(n):
sum_term = ((-1)**n)/(2**(10*n))*(-(2**5)/(4*n+1)-(1/(4*n+3))+((2**8)/(10*n+1))-((2**6)/(10*n+3))-((2**2)/(10*n+5))-((2**2)/(10*n+7))+(1/(10*n+9)))
return sum_term
const = 1/(2**6)
print pisum(0)
def pi_nth_term(n):
term =Decimal( ( ( (factorial(n))**2. ) * (2.**(n+1.)) )/(factorial(2.*n + 1.)))
return term
pi_me = Decimal(0.)
for i in range(89):
pi_me += pi_nth_term(i)
print Decimal(pi_me)
print Decimal(pi)
| true |
d3b888a891303ddfc50050bb92cda031ed1bbf9a | Python | KKisly/Python_Files | /model/contact.py | UTF-8 | 2,032 | 2.578125 | 3 | [] | no_license | #helper class for add contact page
from sys import maxsize
class Contact:
def __init__(self, name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None, address=None, telephone_home=None, telephone_mobile=None,
telephone_work=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, secondary_address=None, home=None, notes=None, id=None,
all_telephones_from_home_page=None, all_emails_from_home_page=None):
self.name = name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.telephone_home = telephone_home
self.telephone_mobile = telephone_mobile
self.telephone_work = telephone_work
self.fax = fax
self.email_1 = email_1
self.email_2 = email_2
self.email_3 = email_3
self.homepage = homepage
# self.birthday_option_1 = birthday_option_1
# self.birthday_option_2 = birthday_option_2
# self.birthday_year = birthday_year
# self.anniversary_option_1 = anniversary_option_1
# self.anniversary_option_2 = anniversary_option_2
# self.anniversary_year = anniversary_year
self.secondary_address = secondary_address
self.home = home
self.notes = notes
self.id = id
self.all_telephones_from_home_page = all_telephones_from_home_page
self.all_emails_from_home_page=all_emails_from_home_page
def __repr__(self):
return "%s:%s%s:%s:%s" % (self.id, self.name, self.last_name, self.address, self.email_1)
def __eq__(self, other):
return ((self.id is None or other.id is None or self.id == other.id) or self.name == other.name or self.last_name == other.last_name, self.address == other.address)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| true |
42814b8b2f76121fdf030c67e445ecd2a09de615 | Python | Cuiqingyao/course-exercise | /login_demo/main.py | UTF-8 | 1,929 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | """
@Time: 2018/5/8 19:03
@Author: qingyaocui
"""
import os
import sys
from data_helper import load_userdata
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
auth_status = False
def menu():
user_operation = {
'1' : 'home()',
'2' : 'finance()',
'3' : 'book()',
'q' : 0,
'Q' : 0
}
while True:
print('welcome to the JD!'.center(30, '-'))
print('1. home page')
print('2. finance page')
print('3. book page')
print('Q/q. exit')
user_input = input('>>:')
if user_input == 'q' or user_input == 'Q':
exit(user_operation[user_input])
if user_input in user_operation:
eval(user_operation[user_input])
else:
print('please input correct options ~')
def login(auth_type):
'''
登录验证
:param auth_type:验证类型
:return: 验证装饰器函数
'''
def check(f):
data = load_userdata(auth_type)
def inner():
global auth_status
if auth_status == False:
while True:
username = input('please input %s name >>: ' % (auth_type))
password = input('please input %s password >>: ' % (auth_type))
if username.strip() == data['username'] and password.strip() == data['password']:
auth_status = True
f()
return
else:
print('incorrect username or password!')
else:
f()
return inner
return check
@login('jd_user') # home = check(home)
def home():
print('home page!!')
@login('wx_user')
def finance():
print('finance page!!')
@login('jd_user')
def book():
print('book page!!')
if __name__ == '__main__':
menu() | true |
8ea97c4faa0f1aa0637136faab8bc1b644e25e40 | Python | ul-gaul/Avionique_Software | /BaseStation/src/data_processing/apogee.py | UTF-8 | 952 | 3.28125 | 3 | [
"MIT"
] | permissive | class Apogee:
def __init__(self, timestamp: float, altitude: float):
self._timestamp = timestamp
self._altitude = altitude
self._is_reached = True
@property
def timestamp(self) -> float:
return self._timestamp
@property
def altitude(self) -> float:
return self._altitude
@property
def is_reached(self) -> bool:
return self._is_reached
@staticmethod
def unreached():
apogee = Apogee(0, 0)
apogee._is_reached = False
return apogee
def __eq__(self, other):
if not isinstance(other, Apogee):
return False
return (self._timestamp == other.timestamp and self._altitude == other.altitude and
self._is_reached == other.is_reached)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._timestamp, self._altitude, self._is_reached))
| true |
9d3f78b0ff41e27d3a3975a2df2a5401835b1e0f | Python | kirilcvetkov92/continuous_actions_rl | /PPO_models.py | UTF-8 | 5,131 | 2.90625 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def weights_init_lim(layer):
input_dim = layer.weight.data.size()[0]
lim = 1./np.sqrt(input_dim)
return (-lim, lim)
class PPO_ActorCritic(nn.Module):
"""
PPO Actor Critic Network.
2 Parts:
1) Actor: input state (array), convert into action. Based on that
action create a prob distribution. Based on that distribution
resample another action. Output the resampled action and prob dist
2) Critic: input a state and output a Q value (action is implicit)
The Q value is used to calculate advantage score and td value.
"""
def __init__(self, state_size, action_size, device, seed=0,
hidden_layer1=512, hidden_layer2=64, hidden_layer3=0):
"""Initialize parameters and build model.
Key Params
======
inputs:
input_channel (int): Dimension of input state
action_size (int): Dimension of each action
seed (int): Random seed
hidden_layer1(int): number of neurons in first hidden layer
hidden_layer2(int): number of neurons in second hidden layer
outputs:
probability distribution (float) range 0:+1
"""
super(PPO_ActorCritic, self).__init__()
self.seed = torch.manual_seed(seed)
# input size: batch_size, state_size
# common shared network
self.bn_0c = nn.BatchNorm1d(state_size) #batch norm
self.fc_1c = nn.Linear(state_size, hidden_layer1) #then relu
self.bn_1c = nn.BatchNorm1d(hidden_layer1) #batch norm
self.fc_2c = nn.Linear(hidden_layer1, hidden_layer2) #then relu
#self.bn_2c = nn.BatchNorm1d(hidden_layer2) #batch norm
#self.fc_3c = nn.Linear(hidden_layer2, hidden_layer3) #then relu
# common connecting layer
self.bn_3c = nn.BatchNorm1d(hidden_layer2) #batch norm for stability
# one extra layer for action
#self.fc_3a = nn.Linear(hidden_layer2, hidden_layer3)
#self.bn_3a = nn.BatchNorm1d(hidden_layer3) #batch norm for stability
#self.fc_4a = nn.Linear(hidden_layer3, action_size)
self.fc_4a = nn.Linear(hidden_layer2, action_size)
# for critic network (state->V)
self.fc_3v = nn.Linear(hidden_layer2, 1)
# for converting tanh value to prob
#self.std = nn.Parameter(torch.zeros(action_size))
self.std = nn.Parameter(torch.ones(1, action_size)*0.15)
self.to(device)
self.reset_parameters()
def reset_parameters(self):
# initialize the values
self.fc_1c.weight.data.uniform_(*weights_init_lim(self.fc_1c))
self.fc_2c.weight.data.uniform_(*weights_init_lim(self.fc_2c))
#self.fc_3c.weight.data.uniform_(*weights_init_lim(self.fc_3c))
#self.fc_3a.weight.data.uniform_(*weights_init_lim(self.fc_3a))
self.fc_4a.weight.data.uniform_(*weights_init_lim(self.fc_4a))
self.fc_3v.weight.data.uniform_(*weights_init_lim(self.fc_3v))
def forward(self, s, resampled_action=None, std_scale=1.0):
"""Build a network that maps state -> actions."""
# state, apply batch norm BEFORE activation
# common network
s = F.relu(self.fc_1c(self.bn_0c(s)))
s = F.relu(self.fc_2c(self.bn_1c(s)))
#s = F.relu(self.fc_3c(self.bn_2c(s)))
sc = self.bn_3c(s) # -> Q and action branch
# td Q branch
v = F.relu(self.fc_3v(sc)) # no activation
# action branch
#a = F.relu(self.fc_3a(sc))
#a = self.fc_4a(self.bn_3a(a)) # then tanh
a = self.fc_4a(sc) # then tanh
# proposed action, we will then use this action as mean to generate
# a prob distribution to output log_prob
a_mean = torch.tanh(a)
# base on the action as mean create a distribution with zero std...
#dist = torch.distributions.Normal(a_mean, F.softplus(self.std)*std_scale)
dist = torch.distributions.Normal(a_mean, F.hardtanh(self.std, min_val=0.05*std_scale, max_val=0.5*std_scale))
# sample from the prob distribution just generated again
if resampled_action is None:
resampled_action = dist.sample()
#handle nan value
#resampled_action[resampled_action != resampled_action] = 0.0
#v[v != v] = 0.0
# then we have log( p(resampled_action | state) ): batchsize, 1
log_prob = dist.log_prob(resampled_action).sum(-1).unsqueeze(-1)
# sum(-p * log(p))
entropy = dist.entropy().sum(-1).unsqueeze(-1) #entropy for noise
pred = {'log_prob': log_prob, # prob dist based on actions generated, grad true, (num_agents, 1)
'a': resampled_action.detach().cpu().numpy(), #sampled action based on prob dist (num_agents,action_size)
'ent': entropy, #for noise, grad true, (num_agents or m, 1)
'v': v #Q score, state's V value (num_agents or m,1)
}
# final output
return pred
| true |
a426a78565bf8cd2a93b6f043be1f4e153aff85b | Python | davidic2ofu/COMP-7712-Algorithms | /pa3.py | UTF-8 | 3,135 | 4.09375 | 4 | [] | no_license |
instructions = '''
David Rosenberg
U00063482
COMP 7712
Programming Assignment 3
NOTE: This dynamic programming solution runs in O( X Y n ) time, which
is the upper bound of time it takes to populate the value array. It
works basically like a 2-d n-knapsack problem.
CLOTH CUTTING PROBLEM...
6.14. Cutting cloth. You are given a rectangular piece of cloth with
dimensions X x Y , where X and Y are positive integers, and a list of
n products that can be made using the cloth. For each product i exists [1,
n] you know that a rectangle of cloth of dimensions ai x bi is needed
and that the final selling price of the product is ci. Assume the ai,
bi, and ci are all positive integers. You have a machine that can cut
any rectangular piece of cloth into two pieces either horizontally or
vertically. Design an algorithm that determines the best return on
the X x Y piece of cloth, that is, a strategy for cutting the cloth
so that the products made from the resulting pieces give the maximum
sum of selling prices. You are free to make as many copies of a given
product as you wish, or none if desired.
Assume the input is given as follows. The first line will be X and Y
separated by a space symbol. Then, comes n on the next line, and the
remaining n lines will be ai, bi, ci separated by spaces. The output
must contain the best return you can get.
'''
def cloth(X, Y, abc):
def _cloth(n, x, y):
# check the value array first as not to recalculate values
if value[n][x][y] is not None:
return value[n][x][y]
# cloth is gone or we're out of valuable sizes of cloth
if n == 0 or x == 0 or y == 0:
result = 0
# turn the piece of cloth around if it will get us a valuable shape to cut from
elif (x < abc[n][0] and x >= abc[n][1] and y >= abc[n][0]) or (y < abc[n][1] and y >= abc[n][0] and x >= abc[n][1]):
result = _cloth(n, y, x)
# if current valuable shape doesn't fit, try the next shape
elif x < abc[n][0] or y < abc[n][1]:
result = _cloth(n-1, x, y)
# otherwise see which of the remaining options is of max value, and choose it
else:
tmp1 = _cloth(n-1, x, y)
tmp2 = abc[n][2] + _cloth(len(abc), x-abc[n][0], y) + _cloth(len(abc), abc[n][0], y-abc[n][1])
tmp3 = abc[n][2] + _cloth(len(abc), x, y-abc[n][1]) + _cloth(len(abc), x-abc[n][0], abc[n][1])
result = max(tmp1, tmp2, tmp3)
# store the result in value array
value[n][x][y] = result
return result
# value array will store the values to make this a dynamic programming solution
value = [[[None for x in range(X+1)] for y in range(Y+1)] for i in range(len(abc)+1)]
return _cloth(len(abc), X, Y)
if __name__ == '__main__':
print(instructions)
import pdb;pdb.set_trace()
user_input = input('\nEnter X and Y separated by a space symbol: ')
X, Y = tuple(int(i) for i in user_input.split())
n = int(input('\nEnter value for n: '))
abc = {}
for i in range(n):
user_input = input('\nEnter a{i}, b{i}, c{i}, separated by spaces: '.format(i=i+1))
user_tuple = tuple(int(i) for i in user_input.split())
abc.update({i+1: user_tuple})
print('\nBest return: $' + str(cloth(X, Y, abc)) + '\n')
| true |
13cee3117f43f97f95696ab0e901c566d1f7ce2d | Python | devilhtc/leetcode-solutions | /0x03a3_931.Minimum_Falling_Path_Sum/solution.py | UTF-8 | 616 | 2.515625 | 3 | [] | no_license | class Solution(object):
def minFallingPathSum(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
m, n = len(A), len(A[0])
dp = [[0] * n for _ in range(m)]
for j in range(n):
dp[0][j] = A[0][j]
for i in range(1, m):
for j in range(n):
dp[i][j] = dp[i - 1][j]
if j > 0:
dp[i][j] = min(dp[i][j], dp[i - 1][j - 1])
if j < n - 1:
dp[i][j] = min(dp[i][j], dp[i - 1][j + 1])
dp[i][j] += A[i][j]
return min(dp[-1])
| true |
76d75b00ed36bb9b6e1ad8631410b0e074c557bd | Python | Anwaydeep2000/Andy-Desktop-Assistant | /Andy.py | UTF-8 | 6,293 | 2.875 | 3 | [
"MIT"
] | permissive | import speech_recognition as sr #pip install SpeechRecognition
import pywhatkit #pip install pywhatkit
from requests import get #pip install requests
import cv2 #pip install opencv-python
import random
import webbrowser #pip install webrowser
import datetime
import os
import wikipedia #pip install wikipedia
import pyttsx3 #pip install pyttsx3
engn = pyttsx3.init('sapi5')
voices = engn.getProperty('voices')
engn.setProperty('voice', voices[1].id)
#string to speech
def say_it(audio):
engn.say(audio)
engn.runAndWait()
#speech to string
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
#Performs speech recognition on an AudioData instance using the Google Speech Recognition API.
mycommand = r.recognize_google(audio, language='en-in')
print(f"User said: {mycommand}\n")
except Exception as e:
# print(e)
print("Say that again please...")
return "None"
return mycommand
#gtreetings
def welcome():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
say_it("Good Morning!")
elif hour>=12 and hour<18:
say_it("Good Afternoon!")
else:
say_it("Good Evening!")
say_it("I am Andy. Please tell me how may I help you")
if __name__ == "__main__":
welcome()
while True:
mycommand = takeCommand().lower()
#Offline tasks
if 'open camera' in mycommand or 'webcam' in mycommand:
# defining a video capture object
vid = cv2.VideoCapture(0)
while(True):
# Capture the video frame by frame
ret, frame = vid.read()
# Display the resulting frame
cv2.imshow('frame', frame)
# quitting button 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
cv2.destroyAllWindows()
elif 'command prompt' in mycommand:
say_it("Okay")
os.system('start cmd')
elif 'the time' in mycommand:
strtime = datetime.datetime.now().strftime("%H:%M:&S")
say_it(f"yea ,The time is {strtime}")
elif 'music' in mycommand :
say_it("just a second, playing a good music from your device")
musicdir = 'D:\Music\English Songs'
songs = os.listdir(musicdir)
print(songs)
os.startfile(os.path.join(musicdir,random.choice(songs)))
elif 'open vs code' in mycommand:
cpath = "C:\\Users\\natha\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(cpath)
elif 'shut down my device' in mycommand:
os.system('shutdown /s /t 5')
say_it('Shutting down. have a nice day ahead')
elif 'restart my device' in mycommand:
os.system('shutdown /r /t 5')
say_it('Restarting your device. Hold on')
#CHATBOT
elif 'thank you' in mycommand:
say_it("you're welcome:)")
say_it("can i do anything else for you?")
elif 'about yourself' in mycommand or 'who are you' in mycommand or 'what is your name' in mycommand:
say_it("my name is Andy")
say_it("I am an intelligent bot just like siri or google assistant\nI was developed by mr. Anwaydeep Nath ")
say_it("what can i do for you?")
elif 'who made you' in mycommand or 'who built you' in mycommand or 'who developed you' in mycommand:
say_it("I was developed by mr. Anwaydeep Nath ")
elif 'what are you doing' in mycommand:
say_it('Just doing my thing!')
elif 'bixbi' in mycommand or 'siri' in mycommand or 'google assistant' in mycommand or 'alexa' in mycommand:
say_it("Yeahh they all are good\n but I am also a sweat one like them")
elif 'exit' in mycommand or 'quit' in mycommand:
say_it('Okay quiting, have a nice day ahead')
exit()
#Online tasks
elif 'IP address' in mycommand or 'ip address' in mycommand:
ip = get("https://api.ipify.org").text
say_it(f"your ip adress is {ip}")
elif 'send message' in mycommand or 'whatsapp' in mycommand:
say_it("Okay ,what message should i send")
msg=takeCommand().lower()
pywhatkit.sendwhatmsg("+91number",f"{msg}",00,00)
elif 'open google' in mycommand:
say_it("Okay ,what should i search on google")
srch=takeCommand().lower()
webbrowser.open(f"{srch}")
say_it("Done")
elif 'wikipedia' in mycommand:
say_it('Searching Wikipedia...')
try:
query = mycommand.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=1)
say_it("According to Wikipedia")
print(results)
say_it(results)
say_it("do you want me to do anything else?")
except Exception as e:
say_it("sorry, i'm unable to search at this moment")
elif 'open youtube' in mycommand:
webbrowser.open("youtube.com")
say_it("Okay , I am Opening Youtube")
elif 'open facebook' in mycommand:
webbrowser.open("facebook.com")
say_it("Sure , I am Opening facebook")
elif 'open linkedin' in mycommand:
webbrowser.open("linkedin.com")
say_it("Okay , I am Opening linkedin")
elif 'open instagram' in mycommand:
webbrowser.open("instagram.com")
say_it("Yea!, Opening instagram")
else:
say_it("sorry,I did not get you!!")
| true |
579b38cd740f728ce45bb44e663918ed2d925586 | Python | sermoacidus/TCost_project | /main.py | UTF-8 | 1,584 | 2.5625 | 3 | [] | no_license | import apiprep
import config
from loguru import logger
import requests
import pandas as pd
URL = "https://api.jde.ru/vD/calculator/PriceAddress?"
def main_code(resdict):
"""
возвращает город и цену доставки полученных на выход данных
"""
for deliverytarget, boxes_amount in resdict.items():
params = {"addr_from": "Тверь",
"addr_to": deliverytarget,
"type": "1",
"weight": config.BOXWEIGHT,
"volume": config.BOXVOLUME,
"quantity": boxes_amount,
"pickup": '1',
"delivery": '1',
"user": config.USER,
"token": config.TOKEN}
r = requests.get(URL, params=params)
logger.info((r.json()['price'], type(r.json()['price'])))
yield int(r.json()['price'])
logger.debug('activating generator of requests to API')
a = main_code(apiprep.result_of_module)
new_column = []
logger.debug('uploading consumer.xls')
df = pd.read_excel('data_set_for_TCost.xlsx')
logger.debug('Filling column in the table with delivery prices')
for gp in range(0, len(df.values)):
for deltarget, boxes in apiprep.result_of_module.items():
i = 0
if deltarget == df.values[gp][2]:
new_column.append(int(next(a)))
i = 1
break
if i == 0:
new_column.append(None)
df['price'] = new_column
df.to_excel('data_set_for_TCost.xlsx', sheet_name='with_price', index=False)
logger.info('DONE')
| true |
8a40fd67ef11b3d19a2e0f2e69f533f4aa02bf41 | Python | shimoishiryusei/school_programing_backup | /Code_Py/check_QuickSort.py | UTF-8 | 1,227 | 3.8125 | 4 | [] | no_license | import time
import random
def performance_check(method, data, num=3):
s = time.time()
for i in range(num):
method(data)
e=time.time()
return e-s
def quick_sort4_7(array):
if not array:
return array
pivot = random.choice(array)
left = []
right = []
pivots = []
for v in array:
if v < pivot:
left.append(v)
elif v == pivot:
pivots.append(v)
else:
right.append(v)
return quick_sort4_7(left) + pivots + quick_sort4_7(right)
def quick_sort4_5(array):
if array == []:
return array
P = array[-1]
left = []
right = []
pivots = []
for v in array:
if v < P:
left.append(v)
elif v == P:
pivots.append(v)
else:
right.append(v)
return quick_sort4_5(left) + pivots + quick_sort4_5(right)
my_array = [random.randint(0, 100) for i in range(15)]
print("Code4.7の実行結果:", quick_sort4_7(my_array))
print("Code4.5の実行結果:", quick_sort4_5(my_array))
print("Code4.7の実行時間:", performance_check(quick_sort4_7, my_array))
print("Code4.5の実行時間:", performance_check(quick_sort4_5, my_array))
| true |
083778794af0940aac3f7446519f63a8a87e19aa | Python | Hirni-Meshram2/pants | /src/python/pants/build_graph/address.py | UTF-8 | 14,775 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from dataclasses import dataclass
from pathlib import PurePath
from typing import Optional, Sequence
from pants.engine.engine_aware import EngineAwareParameter
from pants.util.dirutil import fast_relpath, longest_dir_prefix
from pants.util.strutil import strip_prefix
# Currently unused, but reserved for possible future needs.
BANNED_CHARS_IN_TARGET_NAME = frozenset(r"@!?/\:=")
class InvalidSpecPath(ValueError):
"""Indicate an invalid spec path for `Address`."""
class InvalidTargetName(ValueError):
"""Indicate an invalid target name for `Address`."""
@dataclass(frozen=True)
class AddressInput:
"""A string that has been parsed and normalized using the Address syntax.
An AddressInput must be resolved into an Address using the engine (which involves inspecting
disk to determine the types of its components).
"""
path_component: str
target_component: Optional[str] = None
def __post_init__(self):
if self.target_component is not None or self.path_component == "":
if not self.target_component:
raise InvalidTargetName(
f"Address spec {self.path_component}:{self.target_component} has no name part."
)
# A root is okay.
if self.path_component == "":
return
components = self.path_component.split(os.sep)
if any(component in (".", "..", "") for component in components):
raise InvalidSpecPath(
f"Address spec has un-normalized path part '{self.path_component}'"
)
if os.path.isabs(self.path_component):
raise InvalidSpecPath(
f"Address spec has absolute path {self.path_component}; expected a path relative "
"to the build root."
)
@classmethod
def parse(
cls,
spec: str,
relative_to: Optional[str] = None,
subproject_roots: Optional[Sequence[str]] = None,
) -> AddressInput:
"""Parse a string into an AddressInput.
:param spec: Target address spec.
:param relative_to: path to use for sibling specs, ie: ':another_in_same_build_family',
interprets the missing spec_path part as `relative_to`.
:param subproject_roots: Paths that correspond with embedded build roots under
the current build root.
For example:
some_target(
name='mytarget',
dependencies=['path/to/buildfile:targetname'],
)
Where `path/to/buildfile:targetname` is the dependent target address spec.
In there is no target name component, it defaults the default target in the resulting
Address's spec_path.
Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is
normally not significant except when a spec referring to a root level target is needed
from deeper in the tree. For example, in `path/to/buildfile/BUILD`:
some_target(
name='mytarget',
dependencies=[':targetname'],
)
The `targetname` spec refers to a target defined in `path/to/buildfile/BUILD*`. If instead
you want to reference `targetname` in a root level BUILD file, use the absolute form.
For example:
some_target(
name='mytarget',
dependencies=['//:targetname'],
)
The spec may be a file, such as `a/b/c.txt`. It may include a relative address spec at the
end, such as `a/b/c.txt:original` or `a/b/c.txt:../original`, to disambiguate which target
the file comes from; otherwise, it will be assumed to come from the default target in the
directory, i.e. a target which leaves off `name`.
"""
subproject = (
longest_dir_prefix(relative_to, subproject_roots)
if relative_to and subproject_roots
else None
)
def prefix_subproject(spec_path: str) -> str:
if not subproject:
return spec_path
if spec_path:
return os.path.join(subproject, spec_path)
return os.path.normpath(subproject)
spec_parts = spec.rsplit(":", 1)
path_component = spec_parts[0]
target_component = None if len(spec_parts) == 1 else spec_parts[1]
normalized_relative_to = None
if relative_to:
normalized_relative_to = (
fast_relpath(relative_to, subproject) if subproject else relative_to
)
if path_component.startswith("./") and normalized_relative_to:
path_component = os.path.join(normalized_relative_to, path_component[2:])
if not path_component and normalized_relative_to:
path_component = normalized_relative_to
path_component = prefix_subproject(strip_prefix(path_component, "//"))
return cls(path_component, target_component)
def file_to_address(self) -> Address:
"""Converts to an Address by assuming that the path_component is a file on disk."""
if self.target_component is None:
# Use the default target in the same directory as the file.
spec_path, relative_file_path = os.path.split(self.path_component)
# We validate that this is not a top-level file. We couldn't do this earlier in the
# AddressSpec constructor because we weren't sure if the path_spec referred to a file
# vs. a directory.
if not spec_path:
raise InvalidTargetName(
"Top-level file specs must include which target they come from, such as "
f"`{self.path_component}:original_target`, but {self.path_component} did not "
f"have an address."
)
return Address(spec_path=spec_path, relative_file_path=relative_file_path)
# The target component may be "above" (but not below) the file in the filesystem.
# Determine how many levels above the file it is, and validate that the path is relative.
parent_count = self.target_component.count(os.path.sep)
if parent_count == 0:
spec_path, relative_file_path = os.path.split(self.path_component)
return Address(
spec_path=spec_path,
relative_file_path=relative_file_path,
target_name=self.target_component,
)
expected_prefix = f"..{os.path.sep}" * parent_count
if self.target_component[: self.target_component.rfind(os.path.sep) + 1] != expected_prefix:
raise InvalidTargetName(
"A target may only be defined in a directory containing a file that it owns in "
f"the filesystem: `{self.target_component}` is not at-or-above the file "
f"`{self.path_component}`."
)
# Split the path_component into a spec_path and relative_file_path at the appropriate
# position.
path_components = self.path_component.split(os.path.sep)
if len(path_components) <= parent_count:
raise InvalidTargetName(
"Targets are addressed relative to the files that they own: "
f"`{self.target_component}` is too far above the file `{self.path_component}` to "
"be valid."
)
offset = -1 * (parent_count + 1)
spec_path = os.path.join(*path_components[:offset]) if path_components[:offset] else ""
relative_file_path = os.path.join(*path_components[offset:])
target_name = os.path.basename(self.target_component)
return Address(spec_path, relative_file_path=relative_file_path, target_name=target_name)
def dir_to_address(self) -> Address:
"""Converts to an Address by assuming that the path_component is a directory on disk."""
return Address(spec_path=self.path_component, target_name=self.target_component)
class Address(EngineAwareParameter):
"""A target address.
An address is a unique name for a `pants.engine.target.Target`, and optionally a particular file
that it owns.
While not their only use, a noteworthy use of addresses is specifying
target dependencies. For example:
some_target(
name='mytarget',
dependencies=['path/to/buildfile:targetname'],
)
Where `path/to/buildfile:targetname` is the dependent target address.
"""
def __init__(
self,
spec_path: str,
*,
relative_file_path: Optional[str] = None,
target_name: Optional[str] = None,
) -> None:
"""
:param spec_path: The path from the build root to the directory containing the BUILD file
for the target.
:param relative_file_path: The relative path from the spec_path to an addressed file,
if any. Because files must always be located below targets that apply metadata to
them, this will always be relative.
:param target_name: The name of the target applying metadata to the file, defined in a
BUILD file in the spec_path directory, or None if this path refers to the default
target in that directory.
"""
self.spec_path = spec_path
self._relative_file_path = relative_file_path
# If the target_name is the same as the default name would be, we normalize to None.
self._target_name: Optional[str]
if target_name and target_name != os.path.basename(self.spec_path):
banned_chars = BANNED_CHARS_IN_TARGET_NAME & set(target_name)
if banned_chars:
raise InvalidTargetName(
f"The target name {target_name} (defined in directory {self.spec_path}) "
f"contains banned characters (`{'`,`'.join(banned_chars)}`). Please replace "
"these characters with another separator character like `_` or `-`."
)
self._target_name = target_name
else:
self._target_name = None
self._hash = hash((self.spec_path, self._relative_file_path, self._target_name))
if PurePath(spec_path).name.startswith("BUILD"):
raise InvalidSpecPath(
f"The address {self.spec} has {PurePath(spec_path).name} as the last part of its "
f"path, but BUILD is a reserved name. Please make sure that you did not name any "
f"directories BUILD."
)
@property
def is_file_target(self) -> bool:
return self._relative_file_path is not None
@property
def is_default_target(self) -> bool:
"""True if this is address refers to the "default" target in the spec_path.
The default target has a target name equal to the directory name.
"""
return self._target_name is None
@property
def filename(self) -> str:
if self._relative_file_path is None:
raise ValueError("Only a file Address (`self.is_file_target`) has a filename.")
return os.path.join(self.spec_path, self._relative_file_path)
@property
def target_name(self) -> str:
if self._target_name is None:
return os.path.basename(self.spec_path)
return self._target_name
@property
def spec(self) -> str:
"""The canonical string representation of the Address.
Prepends '//' if the target is at the root, to disambiguate root-level targets
from "relative" spec notation.
:API: public
"""
prefix = "//" if not self.spec_path else ""
file_portion = f"{prefix}{self.spec_path}"
if self._relative_file_path is not None:
file_portion = os.path.join(file_portion, self._relative_file_path)
# Relativize the target name to the dirname of the file.
parent_prefix = (
"../" * self._relative_file_path.count(os.path.sep) if self._relative_file_path else ""
)
if self._target_name is None and not parent_prefix:
return file_portion
target_name = self._target_name or os.path.basename(self.spec_path)
return f"{file_portion}:{parent_prefix}{target_name}"
@property
def path_safe_spec(self) -> str:
"""
:API: public
"""
if self._relative_file_path:
parent_count = self._relative_file_path.count(os.path.sep)
parent_prefix = "@" * parent_count if parent_count else "."
file_portion = f".{self._relative_file_path.replace(os.path.sep, '.')}"
else:
parent_prefix = "."
file_portion = ""
if parent_prefix == ".":
target_portion = f"{parent_prefix}{self._target_name}" if self._target_name else ""
else:
target_name = self._target_name or os.path.basename(self.spec_path)
target_portion = f"{parent_prefix}{target_name}"
return f"{self.spec_path.replace(os.path.sep, '.')}{file_portion}{target_portion}"
def maybe_convert_to_build_target(self) -> Address:
"""If this address is for a file target, convert it back into its BUILD target.
Otherwise, return itself unmodified.
"""
if not self.is_file_target:
return self
return self.__class__(self.spec_path, relative_file_path=None, target_name=self.target_name)
def __eq__(self, other):
if not isinstance(other, Address):
return False
return (
self.spec_path == other.spec_path
and self._relative_file_path == other._relative_file_path
and self._target_name == other._target_name
)
def __hash__(self):
return self._hash
def __repr__(self) -> str:
return f"Address({self.spec})"
def __str__(self) -> str:
return self.spec
def __lt__(self, other):
return (self.spec_path, (self._relative_file_path or ""), (self._target_name or "")) < (
other.spec_path,
(other._relative_file_path or ""),
(other._target_name or ""),
)
def debug_hint(self) -> str:
return self.spec
@dataclass(frozen=True)
class BuildFileAddress:
"""Represents the address of a type materialized from a BUILD file.
TODO: This type should likely be removed in favor of storing this information on Target.
"""
address: Address
# The relative path of the BUILD file this Address came from.
rel_path: str
| true |
e05e6fedfe675dc4714ddae965cf599afa629130 | Python | umeshkhaniya/Finding-Hidden-Messages-in-DNA-Bioinformatics-I- | /freq_mismatch.py | UTF-8 | 749 | 3.578125 | 4 | [] | no_license |
# Frequent Words with Mismatches Problem: Find the most frequent k-mers with mismatches in a string.
# Input: A string Text as well as integers k and d. (You may assume k ≤ 12 and d ≤ 3.)
# Output: All most frequent k-mers with up to d mismatches in Text.
#Code Challenge: Solve the Frequent Words with Mismatches Problem.
def FrequentWordsWithMismatches(Text, k, d):
Patterns = []
frequency_mis = {}
for i in range(len(Text) - k +1):
if Text[i:i+k] not in frequency_mis:
frequency_mis[Text[i:i+k]] = 1
else:
frequency_mis[Text[i:i+k]] += 1
return frequency_mis
def Neighbors(Pattern, d):
if d = 0:
return Pattern
if
Text = 'ACGTTGCATGTCGCATGATGCATGAGAGCT'
k = 4
d = 1
print(FrequentWordsWithMismatches(Text, k, d)) | true |
485f9a46daaa6d063d4a5499a23095cd27309368 | Python | barnesjake/python-stuff | /python_101/lists.py | UTF-8 | 1,321 | 4.71875 | 5 | [] | no_license | ### lists
squares = [1, 4, 9, 16, 25]
squares[0] # indexing returns the item
squares[-1] #25
squares[-3:] # slicing returns a new list
# [9, 16, 25]
# All slice operations return a new list containing the requested elements. This means that the following slice returns a shallow copy of the list:
squares[:]
# [1, 4, 9, 16, 25]
# A shallow copy constructs a new compound object and then (to the extent possible) inserts references into it to the objects found in the original.
# A deep copy constructs a new compound object and then, recursively, inserts copies into it of the objects found in the original.
# support concatenation
squares + [36, 49, 64, 81, 100]
#[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
# Unlike strings, which are immutable, lists are a mutable type, i.e. it is possible to change their content:
cubes = [1, 8, 27, 65, 125] # something's wrong here
# >>> 4 ** 3 # the cube of 4 is 64, not 65!
#64
cubes[3] = 64 # replace the wrong value
cubes
#[1, 8, 27, 64, 125]
# appending to end
cubes.append(216) # add the cube of 6
cubes.append(7 ** 3) # and the cube of 7
cubes
# [1, 8, 27, 64, 125, 216, 343]
# length:
letters = ['a', 'b', 'c', 'd']
len(letters)
# 4
# nested lists
a = ['a', 'b', 'c']
n = [1, 2, 3]
x = [a, n]
x
# [['a', 'b', 'c'], [1, 2, 3]]
x[0]
# ['a', 'b', 'c']
x[0][1]
# 'b'
| true |
cf4847e657da044ddaaf5ff323cbee43f7af19da | Python | JimVargas5/Sort-Alphabet-other- | /Sort Alphabetical + some other stuff.py | UTF-8 | 1,551 | 3.921875 | 4 | [] | no_license | import string
import random
def stretch(string, factor=2, letters=(string.ascii_lowercase)+(string.ascii_uppercase)):
NewString = ""
for character in string:
if letters.count(character) >0:
NewString = NewString + (factor*character)
else:
NewString = NewString + character
return NewString
flip = random.randrange(0,2)
randamount = random.randrange(0,52+1)
randfactor = random.randrange(-1,11)
randletter = 0
randlist = ""
for amount in range(0, randamount+1):
randletter = chr(random.randrange(65,123))
if (ord(randletter)>=91) and (ord(randletter)<=96):
if flip == 0:
randletter = chr(random.randrange(65,91))
else:
randletter = chr(random.randrange(97,123))
randlist = randlist+randletter
for character in randlist:
if randlist.count(character) >1:
index = randlist.find(character)
randlist = randlist[:index] + randlist[index+1:]
minord = 125
maxord = 60
for character in randlist:
if ord(character) >= maxord:
maxord = ord(character)
elif ord(character) <= minord:
minord = ord(character)
for i in range(maxord+1):
for character in randlist:
if ord(character) == (minord+i):
index = randlist.find(character)
randlist = randlist[:index]+randlist[index+1:]+character
print("Of these letters:", randlist, "("+str(len(randlist)), "letter(s))")
print("There will be this many of each letter chosen:", randfactor)
print(stretch("Jim Vargas", randfactor, randlist))
| true |
c552fe686ac250df3d9bbc80e2694c7fcf2ab802 | Python | Mike543/GB_python_homework_1 | /Lesson1/homework1_3.py | UTF-8 | 88 | 3.390625 | 3 | [] | no_license | n = input("Число: ")
nn = n+n
nnn = n+n+n
sum = int(n)+int(nn)+int(nnn)
print(sum)
| true |
9425c1b1608bf281ca422315516dbafa855185d0 | Python | SunithaE/python_ml | /numpystart.py | UTF-8 | 3,041 | 3.625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 29 15:25:16 2017
@author: seswaraiah
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
#import tensorflow as tf
from tensorflow import tensorflow
# A list
lst = [1,2,3]
# numpy list
a = np.array([1,2,3])
#iterate through the list
for i in a:
print(i)
#Cant append elements to the numpy list
#a = a.append([5]) # This will throw error. But default list will do append.
''' 2*lst will give [1,2,3,1,2,3]
2*a (numpy list) will give [2,4,6]
2**lst -> error
a**2 -> will do the squaring. (numpy does the squaring whereas the list doesnt)
'''
#squareroot and log
np_log = np.log(a)
np_sqrt = np.sqrt(a)
np_exp = np.exp(a)
print(np_log)
print(np_sqrt)
print(np_exp)
#dot product of two array
x = np.array([1,2])
y = np.array([2,1])
print(x*y)
print("Numpy multiplication is ", np.sum(x*y))
dot_prod = np.dot(x,y)
print("dot prod is ", dot_prod)
#magnitude
print(np.sqrt((x*x).sum()))
#linearalgebra
print(np.linalg.norm(x))
cosangle = np.dot(x,y)/(np.linalg.norm(x) * np.linalg.norm(y))
print(cosangle)
angle = np.arccos(cosangle)
print(angle)
mat = np.matrix([[1,2],[2,2]])
print(mat)
print(np.array([[1,2],[2,2]]))
#matrix with zeros
zero_mat = np.zeros([5])
print(zero_mat)
#matrix with ones
one_mat = np.ones([5])
print(one_mat)
# random numbers
rand_mat = np.random.random([5])
print(rand_mat)
gaussian_mat = np.random.randn(5)
print(gaussian_mat)
print("gaussion mean is ", gaussian_mat.mean())
print("gaussian variance is ", gaussian_mat.var())
''' problem solve: a event fair conducted where entry fee for childern per head
is $1.5 and for adults it is $4. $5050 was collected from the entire event.
2200 people attended. Find how many children and adult attended.
1.5x1 + 4x2 = 5050
x1 + x2 = 2200 '''
x1 = np.array([[1,1],[1.5,4]])
x2 = np.array([2200,5050])
lin = np.linalg.solve(x1, x2)
print(lin)
df_list = []
for line in open('data_2d.csv'):
row = line.split(',')
#print(row)
#sample = map(float,row)
df_list.append(row)
#print(df_list)
df_np = np.array(df_list)
print(df_np.shape)
# Pandas
pd_input = pd.read_csv("data_2d.csv", header=None)
print(type(pd_input))
print(pd_input.info())
print(pd_input.head(10))
pd_mat = pd_input.as_matrix()
print(type(pd_mat))
print(pd_input[0])
#matplot
x_axis = np.linspace(0,10,50)
y_axis = np.sin(x_axis)
plt.plot(x_axis,y_axis)
plt.show()
data_1d = pd.read_csv('data_1d.csv', header=None).as_matrix()
x_a = data_1d[:,0]
y_a = data_1d[:,1]
plt.scatter(x_a,y_a)
plt.show()
# Scipy
print(norm.pdf(0))
print(norm.pdf(0, loc=5, scale=10))
r = np.random.randn(10)
print(norm.pdf(r))
print(norm.logpdf(r))
r1 = np.random.randn(1000)
plt.hist(r1, bins=100)
plt.show()
r2 = np.random.randn(1000 , 2)
plt.scatter(r2[:,0], r2[:,1])
plt.show()
l1 = np.linspace(0,10,100)
print(l1)
s1 = np.sin(l1) + np.sin(3*l1)
plt.plot(s1)
plt.show()
#tensorflow
x = tf.constants(35, name='x')
y = tf.Variable(x+5, name='y')
| true |
0e998b0399caa9ad0c66ff1a540d11b07c559872 | Python | Imperdius/bpy | /I love colonel sanders.py | UTF-8 | 1,182 | 3.328125 | 3 | [] | no_license | import time
input("Welcome to: I love you Colonel Sanders!")
input("The finger licking good dating sim!")
input("Soon you will meet our handsome friend")
print("But first, what is your name?")
name = input(">")
input("...")
time.sleep(0.8)
print(name, "?")
time.sleep(0.8)
print(name.upper(), "???")
time.sleep(0.8)
input("No no no that just wont do!")
input("From now on, you will be called Joe")
input("Yes, Joe - a wonderful name")
input("So, Joe Swanson, do you think that you are ready to meet Colonel Sanders?")
input("Because I fucking don't - look at you, you are a mess, you can't meet him like this")
print("At least make some effort to prepare - you could do your (h)air, put on some (c)lothes")
check = False
while check == False:
choice1 = input(">").lower()
if choice1 == ("h") or choice1 == ("c"):
check = True
else:
print("Please enter h or c to choose an option")
if choice1 == ("h"):
prep = 0
input("You wash, blow dry and comb your head. You then put your hair on.")
elif choice1 == ("c"):
prep = 1
input("You put on your limited edition gucci off white yeezy 350s in diamond blue. You have no other clothes.")
| true |
7792f7c7f2ca18114780a932b65889824ccc447d | Python | 007xiaoliang/WeChat-For-BookStore | /wechat/utils/CreateSign.py | UTF-8 | 2,982 | 2.59375 | 3 | [] | no_license | import json
import string
import hashlib
import random
import time
import urllib.request
import redis
import requests
from utils.config import APP_ID, SECRET, REDISIP, REDISPORT
r = redis.Redis(host=REDISIP, port=REDISPORT) # 创建redis对象
class Sign:
def __init__(self, jsapi_ticket, url):
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': jsapi_ticket,
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
self.ret['signature'] = hashlib.sha1(string.encode('utf-8')).hexdigest()
return self.ret
def get__token():
ACCESS_TOKEN = r.get('ACCESS_TOKEN') # 从redis中获取ACCESS_TOKEN
if ACCESS_TOKEN:
return ACCESS_TOKEN
try:
token_url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={}&secret={}".format(
APP_ID, SECRET) # 创建获取token的url
response = urllib.request.urlopen(token_url)
b = response.read().decode('utf-8')
token = json.loads(b)
ACCESS_TOKEN = token.get("access_token")
r.set('ACCESS_TOKEN', ACCESS_TOKEN, ex=7200) # 将获取到的 ACCESS_TOKEN 存入redis中并且设置过期时间为7200s
return ACCESS_TOKEN
except Exception as e:
return e
def get_ticket():
ticket = r.get('TICKET') # 获取redis数据库中ticket
if ticket:
tic = str(ticket, encoding='utf-8')
return tic
else:
try:
token = get__token()
ticket_url = " https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token={}&type=jsapi".format(token)
get_ticket = urllib.request.urlopen(ticket_url)
c = get_ticket.read().decode("utf-8")
js_ticket = json.loads(c)
ticket = js_ticket.get("ticket")
r.set('TICKET', ticket, ex=7200)
return ticket
except Exception as e:
return e
def wx_config(url):
ticket = get_ticket()
sign = Sign(ticket, url).sign()
return APP_ID, sign['timestamp'], sign['signature'], sign['nonceStr']
def get_openId(code):
url = 'https://api.weixin.qq.com/sns/oauth2/access_token?appid=' + APP_ID + '&secret=' + SECRET + '&code=' + code + '&grant_type=authorization_code'
return json.loads(requests.get(url).text)
# 获取微信用户信息
def get_userInfo(code):
info1 = get_openId(code)
url = "https://api.weixin.qq.com/sns/userinfo?access_token=" + info1["access_token"] + "&openid=" + info1[
"openid"] + "&lang=zh_CN"
user_info = requests.get(url).content.decode("utf-8")
return json.loads(user_info)
| true |
8d44308826db27076e03c5eab524aa68a8902512 | Python | pedbcrespo/api_python | /api.py | UTF-8 | 2,826 | 2.53125 | 3 | [] | no_license | import json
from flask import Flask, request
from flask_restful import Resource, Api
from flask_cors import CORS
import data_bd
app = Flask(__name__)
api = Api(app)
CORS(app)
# se um metodo da classe pede outro parametro alem do self, todas as outras tambem vao ter que pedir, senao da erro
class User(Resource):
def get(self):
return data_bd.get_data('usuario')
def post(self):
data_local = json.loads(request.data)
try:
data_bd.post_data('usuario', data_local)
return {"status": "sucesso", "info": data_local}
except Exception:
return {"status": "ERRO", "info": Exception.__class__}
class UserAlt(Resource):
def get(self, id):
list_bd = data_bd.get_data('usuario')
try:
return [elem for elem in list_bd if elem['id'] == id][0]
except Exception:
return f"ERRO {Exception.__name__}"
def put(self, id):
data_local = json.loads(request.data)
try:
data_bd.put_data('usuario', id, data_local)
return {"status": "sucesso", "info": data_local}
except Exception:
return {"status": "ERRO", "info": Exception.__class__}
def delete(self, id):
try:
data_bd.delete_data('usuario', id)
return {"status": "sucesso", "info": "dado deletado"}
except Exception:
return {"status": "ERRO", "info": Exception.__class__}
class Lotes(Resource):
def get(self):
return data_bd.get_data('lotes')
def post(self):
data_local = json.loads(request.data)
try:
data_bd.post_data('lotes', data_local)
return {"status": "sucesso", "info": data_local}
except Exception:
return {"status": "ERRO", "info": Exception.__class__}
class LotesAlt(Resource):
def get(self, id):
list_bd = data_bd.get_data('lotes')
try:
return [elem for elem in list_bd if elem['id'] == id][0]
except Exception:
return f"ERRO {Exception.__name__}"
def put(self, id):
data_local = json.loads(request.data)
try:
data_bd.put_data('lotes', id, data_local)
return {"status": "sucesso", "info": data_local}
except Exception:
return {"status": "ERRO", "info": Exception.__class__}
def delete(self, id):
try:
data_bd.delete_data('lotes', id)
return {"status": "sucesso", "info": "dado deletado"}
except Exception:
return {"status": "ERRO", "info": Exception.__class__}
api.add_resource(User, '/users/')
api.add_resource(UserAlt, '/users/<int:id>/')
api.add_resource(Lotes, '/lotes/')
api.add_resource(LotesAlt, '/lotes/<int:id>/')
if __name__ == '__main__':
app.run(debug=True)
| true |
35409fe24dd0f35ff03cff4307f48cb188b60079 | Python | nenaoj227/Ojukwu_Nena_SnakeGame | /NenasSnakeGame/NenasSnakeGame/nenasslither.py | UTF-8 | 8,423 | 3.640625 | 4 | [] | no_license | #MUST HAVE PYGAME TO RUN THIS GAME!!!!!
#used SENTDEX SNAKE GAME TUTORIAL
#to change the colors I used the ADOBE COLOR WHEEL
#to customize the title I asked William for help.
#to create a smoother and easier frame rate I used trial and error
import pygame
import random
pygame.init()
#1.Changed colors of the Background
#background colors
white = (132, 112, 255)
black = (255, 255, 255)
red = (255, 0, 0)
green = (0, 155, 0)
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption("Slither")
#2.custom snake icon
#this is the image of the apple
icon = pygame.image.load("apple.png")
pygame.display.set_icon(icon)
#this is the image of the snake
img = pygame.image.load('snakehead.png')
img2 = pygame.image.load('apple.png')
pygame.display.flip()
clock = pygame.time.Clock()
block_size = 20
apple_thickness = 30
FPS = 30
direction = "right"
small_font = pygame.font.SysFont("comicsansms", 25)
med_font = pygame.font.SysFont("comicsansms", 50)
large_font = pygame.font.SysFont("comicsansms", 80)
#3.created new pause screen
def pause():
paused = True
message_to_screen("Paused", black, -100, "large")
message_to_screen("Press C to continue or Q to quit.", black, 25)
pygame.display.update()
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.QUIT()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
paused = False
elif event.key == pygame.K_q:
pygame.quit()
quit()
clock.tick(5)
# this shows the users score
def score(score):
text = small_font.render("Score: " + str(score), True, black)
gameDisplay.blit(text, [0, 0])
#generates an apple in a random place for the snake to catch
def rand_apple_gen():
rand_apple_x = round(random.randrange(0, display_width - apple_thickness))
rand_apple_y = round(random.randrange(0, display_height - apple_thickness))
return rand_apple_x, rand_apple_y
rand_apple_x, rand_apple_y = rand_apple_gen()
#introduction to the game
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
intro = False
if event.key == pygame.K_q:
pygame.quit()
quit()
#custom title
gameDisplay.fill(white)
message_to_screen("Welcome to Slither By Nena", green, -100, "medium")
message_to_screen("The objective of the game is to eat red apples",
black, -30)
message_to_screen("The more apples you eat, the longer you get", black,
10)
message_to_screen("If you run into yourself or the edges, you die!",
black, 50)
message_to_screen("Press C to play, P to pause, or Q to quit.", black,
180)
pygame.display.update()
clock.tick(5)
def snake(block_size, snake_list):
if direction == "right":
head = pygame.transform.rotate(img, 270)
if direction == "left":
head = pygame.transform.rotate(img, 90)
if direction == "up":
head = img
if direction == "down":
head = pygame.transform.rotate(img, 180)
gameDisplay.blit(head, (snake_list[-1][0], snake_list[-1][1]))
for XnY in snake_list[:-1]:
pygame.draw.rect(gameDisplay, green,
[XnY[0], XnY[1], block_size, block_size])
def text_objects(text, color, size):
if size == "small":
text_surface = small_font.render(text, True, color)
elif size == "medium":
text_surface = med_font.render(text, True, color)
elif size == "large":
text_surface = large_font.render(text, True, color)
return text_surface, text_surface.get_rect()
def message_to_screen(msg, color, y_displace=0, size="small"):
text_surf, text_rect = text_objects(msg, color, size)
text_rect.center = (display_width / 2), (display_height / 2) + y_displace
gameDisplay.blit(text_surf, text_rect)
def game_loop():
global direction
direction = "right"
game_exit = False
game_over = False
lead_x = display_width / 2
lead_y = display_height / 2
lead_x_change = 10
lead_y_change = 0
snake_list = []
snake_length = 1
rand_apple_x, rand_apple_y = rand_apple_gen()
#4.customized gameover and gamenew screens
while not game_exit:
if game_over is True:
message_to_screen("Game Over", red, y_displace=-50, size="large")
message_to_screen("Press C to play again or Q to quit", black,
y_displace=50, size="medium")
pygame.display.update()
while game_over is True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_exit = True
game_over = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_exit = True
game_over = False
if event.key == pygame.K_c:
game_loop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_exit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
direction = "left"
lead_x_change = -block_size
lead_y_change = 0
elif event.key == pygame.K_RIGHT:
direction = "right"
lead_x_change = block_size
lead_y_change = 0
elif event.key == pygame.K_UP:
direction = "up"
lead_y_change = -block_size
lead_x_change = 0
elif event.key == pygame.K_DOWN:
direction = "down"
lead_y_change = block_size
lead_x_change = 0
elif event.key == pygame.K_p:
pause()
if lead_x >= display_width or lead_x < 0 or lead_y >= display_height\
or lead_y < 0:
game_over = True
lead_x += lead_x_change
lead_y += lead_y_change
gameDisplay.fill(white)
apple = img2
gameDisplay.blit(apple, [rand_apple_x, rand_apple_y, apple_thickness,
apple_thickness])
snake_head = []
snake_head.append(lead_x)
snake_head.append(lead_y)
snake_list.append(snake_head)
if len(snake_list) > snake_length:
del snake_list[0]
for each_segment in snake_list[:-1]:
if each_segment == snake_head:
game_over = True
snake(block_size, snake_list)
score(snake_length - 1)
pygame.display.update()
if lead_x > rand_apple_x and lead_x < rand_apple_x + apple_thickness\
or \
lead_x + block_size > rand_apple_x \
and lead_x + block_size < rand_apple_x + \
apple_thickness:
if lead_y > rand_apple_y and lead_y < rand_apple_y + \
apple_thickness:
rand_apple_x, rand_apple_y = rand_apple_gen()
snake_length += 1
elif lead_y + block_size > rand_apple_y and lead_y + block_size \
< rand_apple_y + apple_thickness:
rand_apple_x, rand_apple_y = rand_apple_gen()
snake_length += 1
#5.created a smoother and easier frame rate
clock.tick(FPS)
pygame.quit()
quit()
game_intro()
game_loop()
| true |
163ac723ba7df276ac5f037fc3e1712fa5c7ab7d | Python | Stegallo/adventofcode | /tests/y_2020/test_2020_day19.py | UTF-8 | 4,013 | 2.875 | 3 | [
"MIT"
] | permissive | from unittest.mock import mock_open, patch
from y_2020.day19 import Day
with patch("builtins.open", mock_open(read_data="0: 0")):
day = Day()
def test__preprocess_input():
print()
day._input_data = ["0: 1 | 2", '1: "a"', '2: "b"', "", "a"]
day._preprocess_input()
assert day._Day__rules == {0: ["1", "2"], 1: ["a"], 2: ["b"]}
assert day._Day__messages == ["a"]
def test_calculate_1():
print()
day._input_data = [
"0: 4 1 5",
"1: 2 3",
"2: 4 4",
"3: 4 5",
'4: "a"',
'5: "b"',
"",
"aaaabb",
"aaaabb",
]
day._preprocess_input()
assert day._calculate_1() == 2
day._input_data = [
"0: 4 1 5",
"1: 2 3 | 3 2",
"2: 4 4 | 5 5",
"3: 4 5 | 5 4",
'4: "a"',
'5: "b"',
"",
"ababbb",
"bababa",
"abbbab",
"aaabbb",
"aaaabbb",
]
day._preprocess_input()
assert day._calculate_1() == 2
def test_calculate_2_logic1():
print()
day._input_data = [
"42: 9 14 | 10 1",
"9: 14 27 | 1 26",
"10: 23 14 | 28 1",
'1: "a"',
"11: 42 31",
"5: 1 14 | 15 1",
"19: 14 1 | 14 14",
"12: 24 14 | 19 1",
"16: 15 1 | 14 14",
"31: 14 17 | 1 13",
"6: 14 14 | 1 14",
"2: 1 24 | 14 4",
"0: 8 11",
"13: 14 3 | 1 12",
"15: 1 | 14",
"17: 14 2 | 1 7",
"23: 25 1 | 22 14",
"28: 16 1",
"4: 1 1",
"20: 14 14 | 1 15",
"3: 5 14 | 16 1",
"27: 1 6 | 14 18",
'14: "b"',
"21: 14 1 | 1 14",
"25: 1 1 | 1 14",
"22: 14 14",
"8: 42",
"26: 14 22 | 1 20",
"18: 15 15",
"7: 14 5 | 1 21",
"24: 14 1",
"",
"abbbbbabbbaaaababbaabbbbabababbbabbbbbbabaaaa",
"bbabbbbaabaabba",
"babbbbaabbbbbabbbbbbaabaaabaaa",
"aaabbbbbbaaaabaababaabababbabaaabbababababaaa",
"bbbbbbbaaaabbbbaaabbabaaa",
"bbbababbbbaaaaaaaabbababaaababaabab",
"ababaaaaaabaaab",
"ababaaaaabbbaba",
"baabbaaaabbaaaababbaababb",
"abbbbabbbbaaaababbbbbbaaaababb",
"aaaaabbaabaaaaababaa",
"aaaabbaaaabbaaa",
"aaaabbaabbaaaaaaabbbabbbaaabbaabaaa",
"babaaabbbaaabaababbaabababaaab",
"aabbbbbaabbbaaaaaabbbbbababaaaaabbaaabba",
]
day._preprocess_input()
assert day._calculate_1() == 3
def test_calculate_2():
print()
day._input_data = [
"42: 9 14 | 10 1",
"9: 14 27 | 1 26",
"10: 23 14 | 28 1",
'1: "a"',
"11: 42 31",
"5: 1 14 | 15 1",
"19: 14 1 | 14 14",
"12: 24 14 | 19 1",
"16: 15 1 | 14 14",
"31: 14 17 | 1 13",
"6: 14 14 | 1 14",
"2: 1 24 | 14 4",
"0: 8 11",
"13: 14 3 | 1 12",
"15: 1 | 14",
"17: 14 2 | 1 7",
"23: 25 1 | 22 14",
"28: 16 1",
"4: 1 1",
"20: 14 14 | 1 15",
"3: 5 14 | 16 1",
"27: 1 6 | 14 18",
'14: "b"',
"21: 14 1 | 1 14",
"25: 1 1 | 1 14",
"22: 14 14",
"8: 42",
"26: 14 22 | 1 20",
"18: 15 15",
"7: 14 5 | 1 21",
"24: 14 1",
"",
"abbbbbabbbaaaababbaabbbbabababbbabbbbbbabaaaa",
"bbabbbbaabaabba",
"babbbbaabbbbbabbbbbbaabaaabaaa",
"aaabbbbbbaaaabaababaabababbabaaabbababababaaa",
"bbbbbbbaaaabbbbaaabbabaaa",
"bbbababbbbaaaaaaaabbababaaababaabab",
"ababaaaaaabaaab",
"ababaaaaabbbaba",
"baabbaaaabbaaaababbaababb",
"abbbbabbbbaaaababbbbbbaaaababb",
"aaaaabbaabaaaaababaa",
"aaaabbaaaabbaaa",
"aaaabbaabbaaaaaaabbbabbbaaabbaabaaa",
"babaaabbbaaabaababbaabababaaab",
"aabbbbbaabbbaaaaaabbbbbababaaaaabbaaabba",
]
day._preprocess_input()
assert day._calculate_2() == 12
| true |
70b5dafda0bcae2e186cb460905db11a6beced9f | Python | SuperKuooo/Gibberish-HackDavis-2019 | /Object_detection_webcam.py | UTF-8 | 2,845 | 2.5625 | 3 | [] | no_license | # Import packages
import os
import numpy as np
#import tensorflow as tf
import math
import time
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
MODEL_NAME = 'inference_graph/saved_model_0207/'
CWD_PATH = os.getcwd()
PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, 'frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(CWD_PATH, 'Output Files/labelmap.pbtxt')
NUM_CLASSES = 1
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
def arm_detect(frame, prev_time, prev_avg_cood):
current_avg_cood = None
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
if scores[0][0] > 0.7:
height, width = frame.shape[:2]
ymin = int(boxes[0][0][0] * height)
xmin = int(boxes[0][0][1] * width)
ymax = int(boxes[0][0][2] * height)
xmax = int(boxes[0][0][3] * width)
current_avg_cood = [(xmin + xmax) / 2, (ymin + ymax) / 2]
delta = math.sqrt(
(current_avg_cood[0] - prev_avg_cood[0]) ** 2 + (current_avg_cood[1] - prev_avg_cood[1]) ** 2) * 2.54 / 96
speed = delta / (time.clock() - prev_time)
print(speed)
else:
current_avg_cood = prev_avg_cood
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
return frame, time.clock(), current_avg_cood
| true |
ca1821c25d1d4311cc1ff0123024f06bf26a2565 | Python | HailongZeng/mypython_learning | /24、网络爬虫/线程/1、线程.py | UTF-8 | 638 | 2.765625 | 3 | [] | no_license | '''
在一个进程的内部,要同时干多件事,就需要同时运行多个'子任务',我们把进程内的这些'子任务'叫做线程
线程通常叫做轻型的进程。线程是共享内存空间的并发执行的多任务,每一个线程都共享一个进程的资源
线程是最小的执行单元,而进程由至少一个线程组成。如何调度进程和线程,完全由操作系统决定,程序自己不能决定什么时候执行,执行多长时间。
线程应用模块
1、_thread模块 低级模块,比较接近于底层
2、threading模块 高级模块,对_thread进行了封装
''' | true |
a89c50b862205c7a933788fed204bdabf1608864 | Python | brunolmarques/DirectFeedbackAlignment | /network/layers/convolution_im2col.py | UTF-8 | 5,048 | 2.578125 | 3 | [
"MIT"
] | permissive | import numpy as np
from network.activation import Activation
from network.layer import Layer
from network.utils.im2col_cython import im2col_cython, col2im_cython
class Convolution(Layer):
def __init__(self, filter_shape, stride, padding, dropout_rate: float = 0, activation: Activation = None,
last_layer=False, weight_initializer=None, fb_weight_initializer=None) -> None:
assert len(filter_shape) == 4, \
"invalid filter shape: 4-tuple required, {}-tuple given".format(len(filter_shape))
super().__init__()
self.filter_shape = filter_shape
self.stride = stride
self.padding = padding
self.dropout_rate = dropout_rate
self.activation = activation
self.last_layer = last_layer
self.weight_initializer = weight_initializer
self.fb_weight_initializer = fb_weight_initializer
def initialize(self, input_size, num_classes, train_method) -> tuple:
assert np.size(input_size) == 3, \
"invalid input size: 3-tuple required for convolution layer"
c_in, h_in, w_in = input_size
f, c_f, h_f, w_f = self.filter_shape
assert c_in == c_f, \
"input channel dimension ({}) not compatible with filter channel dimension ({})".format(c_in, c_f)
assert (h_in - h_f + 2 * self.padding) % self.stride == 0, \
"filter width ({}) not compatible with input width ({})".format(h_f, h_in)
assert (w_in - w_f + 2 * self.padding) % self.stride == 0, \
"filter height ({}) not compatible with input height ({})".format(h_f, h_in)
self.h_out = ((h_in - h_f + 2 * self.padding) // self.stride) + 1
self.w_out = ((w_in - w_f + 2 * self.padding) // self.stride) + 1
# initialize weights
if self.weight_initializer is None:
sqrt_fan_in = np.sqrt(c_in * h_in * w_in)
self.W = np.random.uniform(low=-1 / sqrt_fan_in, high=1 / sqrt_fan_in, size=self.filter_shape)
else:
self.W = self.weight_initializer.init(dim=(f, c_f, h_f, w_f))
# initialize feedback weights
if self.fb_weight_initializer is None:
sqrt_fan_out = np.sqrt(f * self.h_out * self.w_out)
# self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f, self.h_out, self.w_out))
self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f * self.h_out * self.w_out))
else:
# self.B = self.fb_weight_initializer.init(dim=(num_classes, f, self.h_out, self.w_out))
self.B = self.fb_weight_initializer.init(dim=(num_classes, f * self.h_out * self.w_out))
# initialize bias units
self.b = np.zeros(f)
return f, self.h_out, self.w_out
def forward(self, X, mode='predict') -> np.ndarray:
n_in, c, h_in, w_in = X.shape
n_f, c, h_f, w_f = self.W.shape
self.x_cols = im2col_cython(X, h_f, w_f, self.padding, self.stride) # <->
z = self.W.reshape((n_f, -1)).dot(self.x_cols)
z += self.b.reshape(-1, 1) # +
z = z.reshape(n_f, self.h_out, self.w_out, n_in).transpose(3, 0, 1, 2)
self.a_in = X
if self.activation is None:
self.a_out = z
else:
self.a_out = self.activation.forward(z)
if mode == 'train' and self.dropout_rate > 0:
# self.dropout_mask = np.random.binomial(size=self.a_out.shape, n=1, p=1 - self.dropout_rate)
self.dropout_mask = (np.random.rand(*self.a_out.shape) > self.dropout_rate).astype(int)
self.a_out *= self.dropout_mask
return self.a_out
def dfa(self, E: np.ndarray) -> tuple:
# E = np.einsum('ij,jklm->iklm', E, self.B)
n_f, c_f, h_f, w_f = self.W.shape
E = np.dot(E, self.B).reshape((-1, n_f, self.h_out, self.w_out))
if self.dropout_rate > 0:
E *= self.dropout_mask
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
dW = E.transpose((1, 2, 3, 0)).reshape(n_f, -1).dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dW, db
def back_prob(self, E: np.ndarray) -> tuple:
if self.dropout_rate > 0:
E *= self.dropout_mask
n_in, c_in, h_in, w_in = self.a_in.shape
n_f, c_f, h_f, w_f = self.W.shape
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
delta_reshaped = E.transpose((1, 2, 3, 0)).reshape(n_f, -1)
dX_cols = self.W.reshape(n_f, -1).T.dot(delta_reshaped)
dX = col2im_cython(dX_cols, n_in, c_in, h_in, w_in, h_f, w_f, self.padding, self.stride)
dW = delta_reshaped.dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dX, dW, db
def has_weights(self) -> bool:
return True
| true |
d40a20201d2a0fb5d828569b6408156b72665134 | Python | shooler/FOSSCards | /lib/tk_windows.py | UTF-8 | 16,601 | 2.703125 | 3 | [] | no_license | import os
import sys
import db_functions
import fs_functions
import dropbox
import random
import platform
import tk_functions as tkfuncs
import keybinds
import textwrap
import json
if sys.version.startswith('2'):
pyver = 2
import Tkinter as tk
import tkFileDialog
import tkMessageBox as messagebox
else:
import tkinter as tk
from tkinter import filedialog
tkFileDialog = filedialog
from tkinter import messagebox
class spawnWindows:
intro_string = """
It looks like you haven't linked your DropBox account to the app yet.
In order to set up the program, you will need to link it via
a developer access token from Dropbox. I can't make a login box
as far as I'm aware without Dropbox Business API, and this is a free app.
To get your specific key:
"""
setup_string = """\
2: Click on "Create App"
3: Select Dropbox API (Not Dropbox Business API)
4: Choose Full Dropbox as the access type (User data isn't sent anywhere external)
5: Name it whatever you want(nonsense works, it wont be actively used)
6: Agree to the DropBox terms and conditions, and click "Create App"
7: On the next page, under OAuth2, click "Generate Access Token"
8: Paste said access token into the token box below and click done
(Do not share this token with anyone)
9: Enter your root folder for the program to use in the course box
e.g. /FOSSCards/ - The root will hold all course folders e.g.
/FOSSCards/Math/, /FOSSCards/English/, etc...
"""
def __init__(self, frame):
self.intro_string = textwrap.dedent(self.intro_string)
self.setup_string = textwrap.dedent(self.setup_string)
self.frame = frame
frame.withdraw()
if platform.system() != 'Windows':
self.datFilePath = (os.getcwd()+'/lib/dat')
else:
self.datFilePath = (os.getcwd()+'\\lib\\dat')
if os.path.exists(self.datFilePath):
with open(self.datFilePath, 'r') as f:
self.data = json.load(f)
self.accessToken = self.data["token"]
self.db_root = self.data["root"]
if self.accessToken != '':
self.initialize()
return
self.first_time_setup()
def first_time_setup(self):
"""
Used for setting up the users dropbox folder, spawns a window
and prints the instructions to create the access token and main folder
"""
self.setup_frame = tk.Toplevel(self.frame)
self.setup_frame.wm_title("Set Up Dropbox")
self.setup_frame.resizable(False, False)
upperLabel = tk.Label(self.setup_frame, text=self.intro_string)
iLabel = tk.Label(self.setup_frame, text="1: Follow this link",
justify=tk.LEFT, fg="blue", cursor="hand2")
lowerLabel = tk.Label(self.setup_frame,
text=self.setup_string, justify=tk.LEFT)
ok_button = tk.Button(self.setup_frame, text="OK",
command = lambda: self.setup_done(course_entry.get(),
token_entry.get()))
self.setup_token_frame = tk.Frame(self.setup_frame)
token_label = tk.Label(self.setup_token_frame, text="Token: ")
token_entry = tk.Entry(self.setup_token_frame)
token_label.pack(side=tk.LEFT)
token_entry.pack()
self.setup_course_frame = tk.Frame(self.setup_frame)
course_label = tk.Label(self.setup_course_frame, text="Path: ")
course_entry = tk.Entry(self.setup_course_frame)
course_label.pack(side=tk.LEFT)
course_entry.pack()
upperLabel.pack()
iLabel.pack()
lowerLabel.pack()
self.setup_token_frame.pack()
self.setup_course_frame.pack()
ok_button.pack()
bkbs = keybinds.Funcs('pass')
iLabel.bind("<Button-1>", lambda e: bkbs.hyperLink())
def setup_done(self, course_folder, accessToken):
"""
Finalizes first time setup, normally functions like this would occur
in the tk_functions class/file, but we have not initialized it since
we have no dropbox string.
This should probably be changed later as I'm sure
people wont like having to link db to use it, but for
current purposes its fine.
"""
if accessToken == "" or course_folder == "":
return
self.db_root = course_folder
self.accessToken = accessToken
self.setup_frame.withdraw()
with open(self.datFilePath, 'w') as f:
data = {"token":self.accessToken, "root":self.db_root}
json.dump(data, f)
self.initialize()
def initialize(self):
"""
In all my programming jenyus I had to split the __init__ function
to accomodate for checking the accessToken (as breaking out into a new
window will not stop the initalize function from running). So this is
the init function that should technically be __init__()
"""
self.frame.deiconify()#bring the main window back up after first init
transferData = tkfuncs.TransferData(self.accessToken)
self.flashText = tk.StringVar()
self.progressText = tk.StringVar()
self.q_text = tk.StringVar()
self.progressText.set("0 Left")
self.q_text.set("From: N/A")
self.frame.configure(bg = 'white')
self.dbx = dropbox.Dropbox(self.accessToken)
self.root_fs_folder = os.getcwd()
#initiate the fs_functions class here
self.fs_functions = fs_functions.Funcs(self.root_fs_folder)
#initiate the db_functions class here
self.db_functions = db_functions.Funcs(self.dbx, self.fs_functions,
self.db_root, self.root_fs_folder)
#initiate the tkFuncs class, because sharing is caring
self.funcs = tkfuncs.tkFuncs(self.db_functions, self.fs_functions,
self.flashText, self.progressText, self.q_text,
self.frame, transferData)
#initiate the keybinds class
self.kbs = keybinds.Funcs(self.funcs)
#Get the preferences from the saved preferences file
self.dl_o, self.sync_o = self.fs_functions.retrieve_prefs()
self.funcs.save_prefs("pass", self.sync_o, self.dl_o)
#creating a menu system for items that dont need to always be shown
menu = tk.Menu(self.frame)
self.frame.config(menu=menu)
filemenu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Select Course",
command = lambda: self.select_course_window())
filemenu.add_command(label="Create/Edit Notecards",
command = lambda: self.edit_notecards_window())
filemenu.add_command(label="Sync Course Files", command =
lambda: self.db_functions.sync(transferData,
self.funcs.course_folder))
filemenu.add_separator()
filemenu.add_command(label="Exit", command= self.frame.quit)
settings_menu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="Settings", menu=settings_menu)
settings_menu.add_command(label="Preferences",
command = lambda: self.preferences_window())
#Setting up the Label that the flashcard data goes into
flashFrame = tk.Frame(self.frame)
self.flashCard = tk.Label(flashFrame, textvariable=self.flashText,
font=("Helvetica", 32), bg='white',
justify=tk.LEFT, wraplength=1000)
self.funcs.init_fcard( self.flashCard)
#Setting up buttons that go along the top of the frame
top_button_frame = tk.Frame(self.frame)
restart_button = tk.Button(top_button_frame, padx=75, text="Restart",
command = lambda: self.funcs.restart())
shuffle_button = tk.Button(top_button_frame, padx=75, text="Shuffle",
command = lambda: self.funcs.shuffle_set
())
#Flip button along the bottom
flip_button = tk.Button(self.frame, text="Flip", padx=700,
command = lambda: self.funcs.flip_card())
#Setting up right and wrong answer buttons
right_button = tk.Button(self.frame, text="Right", pady=220, padx=15,
command = lambda: self.funcs.call_right())
wrong_button = tk.Button(self.frame, text="Wrong", pady=220,
command = lambda: self.funcs.call_wrong())
#Setting up the info bar on the bottom
info_bar_frame = tk.Frame(self.frame)
progress_label = tk.Label(info_bar_frame,
textvariable = self.progressText, font=("Helvetica", 8),
justify = tk.RIGHT, padx=300)
q_from_label = tk.Label(info_bar_frame,
textvariable = self.q_text, font=("Helvetica", 8),
justify = tk.LEFT, padx=300)
q_from_label.pack(side=tk.LEFT)
progress_label.pack(side=tk.RIGHT)
#pack everything in a super specific order
info_bar_frame.pack(side=tk.BOTTOM)
right_button.pack(side=tk.RIGHT)
wrong_button.pack(side=tk.LEFT)
restart_button.pack(side=tk.LEFT)
shuffle_button.pack(side=tk.LEFT)
top_button_frame.pack(side=tk.TOP)
flip_button.pack(side=tk.BOTTOM)
flashFrame.pack(side=tk.TOP, expand=True)
self.flashCard.pack(expand=True)
self.center(self.frame)
#Calls the select course window, because no course is selected by default
self.select_course_window()
def center(self, win):
win.update()
w_req, h_req = win.winfo_width(), win.winfo_height()
w_form = win.winfo_rootx() - win.winfo_x()
w = w_req + w_form*2
h = h_req + (win.winfo_rooty() - win.winfo_y()) + w_form
x = (win.winfo_screenwidth() // 2) - (w // 2)
y = (win.winfo_screenheight() // 2) - (h // 2)
win.geometry('{0}x{1}+{2}+{3}'.format(w_req, h_req, x, y))
win.lift()
def select_course_window(self):
"""
Spawns a window that contains a list of all the folders available
in the db account (from root, at this point in time) and allows the
user to select a course(folder) from the list so that they can go
over the flash cards from that folder
"""
self.c_sel_frame = tk.Toplevel(self.frame)
self.c_sel_frame.wm_title("Select A Course")
self.c_sel_frame.resizable(False, False)
l = tk.Label(self.c_sel_frame, text="Select A Course")
#setting up the listbox, with a default value to suppress empty selection errors
self.course_listbox = tk.Listbox(self.c_sel_frame, selectmode = tk.SINGLE)
course_list = self.db_functions.get_all_folders()
for item in course_list:
self.course_listbox.insert(tk.END, item)
self.course_listbox.select_set(0)
self.course_listbox.event_generate("<<ListboxSelect>>")
self.scrollbar = tk.Scrollbar(self.course_listbox, orient=tk.VERTICAL)
self.scrollbar.config(command=self.course_listbox.yview)
self.course_listbox.config(yscrollcommand=self.scrollbar.set)
ok_button = tk.Button(self.c_sel_frame, text="OK",
command = lambda: self.funcs.select_new_course(
self.course_listbox.get(
self.course_listbox.curselection()),
self.c_sel_frame))
add_button = tk.Button(self.c_sel_frame, text="Add Course",
command = lambda: self.add_new_course_window())
l.pack()
self.course_listbox.pack()
ok_button.pack(side=tk.RIGHT)
add_button.pack(side=tk.LEFT)
self.center(self.c_sel_frame)
def add_new_course_window(self):
"""
Spawn a window with a label to enter the name of the course to add,
also provides a cancel option
"""
self.c_add_frame = tk.Toplevel(self.c_sel_frame)
self.c_add_frame.wm_title("Name the Course")
self.c_add_frame.resizable(False, False)
c_entry = tk.Entry(self.c_add_frame)
c_entry.pack()
ok_button = tk.Button(self.c_add_frame,
text="OK", command = lambda: self.funcs.add_new_course(c_entry.get(), self.c_add_frame, self.c_sel_frame))
cancel_button = tk.Button(self.c_add_frame, text="Cancel",
command = lambda: self.c_add_frame.withdraw())
cancel_button.pack(side=tk.LEFT)
ok_button.pack(side=tk.RIGHT)
self.center(self.c_add_frame)
def edit_notecards_window(self):
"""
Spawns a window used for editing flashcards
"""
self.questions = []
self.answers = []
self.edit_cards_frame = tk.Toplevel(self.frame, width=500, height=400)
self.edit_cards_frame.wm_title("Edit Notecards")
self.edit_cards_frame.resizable(False, False)
question_label = tk.Label(self.edit_cards_frame, text="Question:",
font=("Helvetica", 12))
answer_label = tk.Label(self.edit_cards_frame, text="Answer:",
font=("Helvetica", 12))
card_answer_box = tk.Text(self.edit_cards_frame, bd = 30,
font=("Helvetica", 16), height = 7, width = 40,
padx=20, pady=20, relief=tk.SUNKEN, wrap=tk.WORD)
card_question_box = tk.Text(self.edit_cards_frame, bd = 30,
font=("Helvetica", 16), height = 7, width = 40,
padx=20, pady=20, relief=tk.SUNKEN, wrap=tk.WORD)
card_listbox = tk.Listbox(self.edit_cards_frame,bd=5, height=42,
width=25, selectmode = tk.SINGLE)
card_scrollbar = tk.Scrollbar(card_listbox, orient=tk.VERTICAL)
card_scrollbar.config(command=card_listbox.yview)
card_listbox.config(yscrollcommand=card_scrollbar.set)
menu = tk.Menu(self.edit_cards_frame)
self.edit_cards_frame.config(menu=menu)
filemenu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Select File",
command = lambda: self.init_edit_window(
card_question_box,card_answer_box,card_listbox))
add_card_button = tk.Button(self.edit_cards_frame, text=">",
command = lambda: self.funcs.add_new_card(
self.questions, self.answers,
card_question_box, card_answer_box, card_listbox))
edit_card_button = tk.Button(self.edit_cards_frame, text="<",
command = lambda: self.funcs.retrieve_card(
self.questions, self.answers,
card_question_box, card_answer_box, card_listbox))
done_button = tk.Button(self.edit_cards_frame, text="Save",
command = lambda: self.funcs.save_notecard(card_question_box,
card_answer_box, self.questions, self.answers,
self.funcs.course))
delete_button = tk.Button(self.edit_cards_frame, text="Delete Selected",
command = lambda: self.funcs.delete_selected_lb_item(
self.questions, self.answers, card_listbox))
cancel_button = tk.Button(self.edit_cards_frame, text="Cancel",
command = lambda: self.edit_cards_frame.withdraw())
#keybinds to switch between writing questions and answers
#Used a seperate method instead of just x.focus
#in order to stop Tab putting in a tab character
card_question_box.bind('<Tab>', lambda e: self.kbs.switchFocus(card_answer_box))
card_answer_box.bind('<Tab>', lambda e: self.kbs.switchFocus(card_question_box))
card_question_box.bind('<Control-Return>', lambda e:
self.kbs.addCard(
self.questions, self.answers,
card_question_box, card_answer_box, card_listbox))
card_answer_box.bind('<Control-Return>', lambda e:
self.kbs.addCard(
self.questions, self.answers,
card_question_box, card_answer_box, card_listbox))
card_listbox.pack(side=tk.RIGHT)
add_card_button.pack(side=tk.RIGHT)
edit_card_button.pack(side=tk.RIGHT)
question_label.pack(side=tk.TOP)
card_question_box.pack(side=tk.TOP)
answer_label.pack(side=tk.TOP)
card_answer_box.pack(side=tk.TOP)
done_button.pack(side=tk.LEFT)
delete_button.pack(side=tk.RIGHT)
cancel_button.pack(side=tk.BOTTOM)
self.center(self.edit_cards_frame)
def init_edit_window(self, qb, ab, lb):
"""
breakout method for the file select lambda to pass
the question and answer lists back up the chain to populate
the self.questions and self.answers lists here.
"""
self.questions, self.answers = self.funcs.populate_edit_window(
self.questions, self.answers, qb,
ab, lb)
self.edit_cards_frame.lift()
def preferences_window(self):
"""
Spawns a window for editing preferences on the functionality
of the program
"""
sync_o_option = tk.IntVar().set(self.sync_o)
dl_o_option = tk.IntVar().set(self.dl_o)
p_frame = tk.Toplevel(self.frame, width=300, height=300)
p_frame.winfo_toplevel().title("Preferences")
self.center(p_frame)
sync_o_frame = tk.Frame(p_frame, width=300, height = 200,
highlightbackground = "black",
highlightcolor="black",
highlightthickness=1, bd=1)
a_label = tk.Label(sync_o_frame, text="Sync Options")
a_label.pack(anchor=tk.W)
tk.Radiobutton(sync_o_frame, text="Overrides Dropbox files",
variable = sync_o_option, value = 0).pack(anchor=tk.W)
tk.Radiobutton(sync_o_frame, text="Asks to override Dropbox files",
variable = sync_o_option, value = 1).pack(anchor=tk.W)
sync_o_frame.pack(anchor=tk.W, fill=tk.X)
dl_o_frame = tk.Frame(p_frame, width=300, height = 200,
highlightbackground = "black",
highlightcolor="black",
highlightthickness=1, bd=1)
a_label = tk.Label(dl_o_frame, text="Download Options")
a_label.pack(anchor=tk.W)
tk.Radiobutton(dl_o_frame, text="Overrides Local files",
variable = dl_o_option, value = 0).pack(anchor=tk.W)
tk.Radiobutton(dl_o_frame, text="Asks to override Local files",
variable = dl_o_option, value = 1).pack(anchor=tk.W)
dl_o_frame.pack(anchor=tk.W, fill=tk.X)
done_button = tk.Button(p_frame, text="Save",
command = lambda: self.funcs.save_prefs(p_frame,
sync_o_option.get(), dl_o_option.get()))
done_button.pack(side=tk.BOTTOM)
| true |
2fc27f32802c6f5e83c79325d73d779463d1cdf3 | Python | Has3ong/Algorithm | /BOJ/01600/01668.py | UTF-8 | 395 | 3.34375 | 3 | [] | no_license | def solution():
N = int(input())
arr = []
for i in range(N):
arr.append(int(input()))
_max = 0
cnt = 0
for i in arr:
if _max < i:
_max = i
cnt += 1
print(cnt)
_max = 0
cnt = 0
for i in range(N):
if _max < arr[N - i - 1]:
_max = arr[N - i - 1]
cnt += 1
print(cnt)
solution() | true |
39d9fa7929d6b72206f6b58b0b998156df72a8c1 | Python | AlejandroArbelaez21/holbertonschool-web_back_end | /0x04-pagination/0-simple_helper_function.py | UTF-8 | 312 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python3
"""
0. Simple helper function
"""
def index_range(page: int, page_size: int) -> tuple:
"""
function should return a tuple of size two containing a start
index and an end index corresponding to the range of indexes
"""
return (page - 1) * page_size, page * page_size
| true |
d4f6644091ed395c57884eab47e3369c42d27e70 | Python | zompi2/HouseOffersCrawler | /soupmaker.py | UTF-8 | 562 | 2.921875 | 3 | [] | no_license | # Request a page from url and create a proper soup.
# It must pass proper user_agent, otherwise no server will
# trust us.
import urllib.request
from bs4 import BeautifulSoup
def makesoup(url):
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
headers = {'User-Agent':user_agent}
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request)
data = response.read()
return BeautifulSoup(data, features="html.parser") | true |
afeb0ccf980e36e473ff92f507ba6504ac53f22f | Python | herrBez/utilities | /license_adder.py | UTF-8 | 2,106 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
#MIT License
#
#Copyright (c) 2017 Mirko Bez
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
# This script adds the content of a given input file (e.g., a license)
# at the beggining of each file matching a given file extension (e.g., .java).
# It start from the specified root directory (e.g., '.') and performs this
# recursively
#
import sys
import os
if len(sys.argv) < 2:
print ("Usage: %s license_file [root_dir] [file_extension]" % sys.argv[0])
sys.exit(1)
file_a = sys.argv[1]
rootdir='.'
file_extension = '.java'
if len(sys.argv) > 2:
rootdir = sys.argv[2]
if len(sys.argv) > 3:
file_extension = sys.argv[3]
with open (file_a, "r") as a:
lines_a = a.readlines()
string_a = ''.join(lines_a)
a.close()
for root, subdirs, files in os.walk(rootdir):
for f in files:
if f.endswith(file_extension):
file_path = os.path.join(root, f)
print( file_path )
with open(file_path, "r") as b:
lines_b = b.readlines()
string_b = ''.join(lines_b)
b.close()
with open(file_path, "w") as out:
out.write(string_a + "\n" + string_b)
out.close()
| true |
085d6c21e26325ab9e02c8a31523c6118c26ba20 | Python | Parkyunhwan/BaekJoon | /21_06/9655_돌게임.py | UTF-8 | 192 | 3.015625 | 3 | [] | no_license | n = int(input())
dp = [0] * (n + 1)
dp[1] = 1
dp[2] = 2
dp[3] = 1
for i in range(4, n + 1):
dp[i] = min(dp[i - 3] + 1, dp[i - 1] + 1)
if dp[n] % 2:
print("SK")
else:
print("CY") | true |
8335cf3c9bc072be46458e283f1eb73103b40f20 | Python | DonVito1982/SprayHydraulic | /hydraulics/physics.py | UTF-8 | 2,889 | 3.171875 | 3 | [] | no_license | WMETER_TO_PSI = 1.4219702
# GAL_TO_LT = 3.785411
PSI_TO_KPA = 6.894757
IN_TO_MM = 25.4
class Measure(object):
units = []
conversion = [[]]
def __init__(self, value=None, unit=None):
self.values = {}
if unit:
self.set_single_value(value, unit)
def set_single_value(self, value, unit):
"""
Define the values for the whole list of possible units
:param value: the referential value
:type value: float
:param unit: the referential unit
:type unit: str
:return:
"""
u_index = self.__class__.units.index(unit)
conversion_factors = self.__class__.conversion[u_index]
for cont in range(len(self.__class__.units)):
current_unit = self.__class__.units[cont]
self.values[current_unit] = value * conversion_factors[cont]
class Pressure(Measure):
"""
This class will serve to instantiate _pressure measures
"""
units = ['psi', 'Pa', 'kPa', 'mH2O']
conversion = [[1, 1000 * PSI_TO_KPA, PSI_TO_KPA, 1 / WMETER_TO_PSI],
[0.001 / PSI_TO_KPA, 1, 1e-3,
PSI_TO_KPA * 1000 / WMETER_TO_PSI],
[1 / PSI_TO_KPA, 1e3, 1, PSI_TO_KPA / WMETER_TO_PSI],
[WMETER_TO_PSI, 9806.38, 9.80638, 1]]
class Length(Measure):
"""
Serves to instantiate elevation or length measures
"""
units = ['m', 'ft', 'in', 'mm']
ft_to_mm = IN_TO_MM * 12
conversion = [[1, 1000 / ft_to_mm, 1000 / IN_TO_MM, 1000],
[ft_to_mm / 1000, 1, 12.0, ft_to_mm],
[IN_TO_MM / 1000, 1 / 12.0, 1, IN_TO_MM],
[0.001, 1 / ft_to_mm, 1 / IN_TO_MM, 1]]
class Volume(Measure):
units = ['lt', 'gal', 'm3']
GAL_TO_LT = 3.785411
conversion = [[1, 1 / GAL_TO_LT, 0.001],
[GAL_TO_LT, 1, GAL_TO_LT / 1000],
[1000, 1000 / GAL_TO_LT, 1]]
class Time(Measure):
units = ['min', 'hr']
conversion = [[1, 1 / 60.0],
[60.0, 1]]
class VolFlow(Measure):
"""
Serves to instantiate volumetric flows
"""
units = ['gpm', 'm3/H', 'lpm']
gal_index = Volume.units.index('gal')
m3_index = Volume.units.index('m3')
gal_to_m3 = Volume.conversion[gal_index][m3_index]
conversion = [[1, gal_to_m3 * 60, Volume.conversion[1][0]],
[Volume.conversion[2][1] * Time.conversion[0][1], 1,
1000 / 60.0],
[1 / Volume.GAL_TO_LT, 60 / 1000.0, 1]]
class NozzleK(Measure):
units = ['gpm/psi^0.5', 'lpm/bar^0.5']
psi_index = Pressure.units.index('psi')
kPa_index = Pressure.units.index('kPa')
psi_to_kPa = Pressure.conversion[psi_index][kPa_index]
conversion = [[1, Volume.GAL_TO_LT * (100 / psi_to_kPa) ** .5],
[((psi_to_kPa / 100) ** .5) / Volume.GAL_TO_LT, 1]]
| true |
34faed873865b69e9b1aaf07d0f46fc2cf71ed53 | Python | BoxHen/ABLS-Autonomous-Beacon-Location-System | /Rover/test/import/ultrasonic_sensor.py | UTF-8 | 1,031 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env python2
import RPi.GPIO as GPIO
import time
GPIO.setmode (GPIO.BCM)
class Distance:
#set time vars
Start_time = time.time()
Stop_time = time.time()
#set speed of ultrasonic
speed_of_sonic = 34300
distance_there_and_back = 2
def __init__(self, trigger_pulse, echo_pulse):
self.trigger_pulse = trigger_pulse
self.echo_pulse = echo_pulse
#setup GPIO in/out direction
GPIO.setup(trigger_pulse, GPIO.OUT)
GPIO.setup(echo_pulse, GPIO.IN)
def create_trigger_pulse(self):
GPIO.output(self.trigger_pulse, True)
time.sleep(0.00001)
GPIO.output(self.trigger_pulse, False)
def receive_echo_pulse(self):
while GPIO.input(self.echo_pulse) == 0:
self.Start_time = time.time()
while GPIO.input(self.echo_pulse) == 1:
self.Stop_time = time.time()
def distance_from_obj(self):
self.create_trigger_pulse()
self.receive_echo_pulse()
Time_elapsed = self.Stop_time - self.Start_time
distance = (Time_elapsed * self.speed_of_sonic) / self.distance_there_and_back
return distance
| true |
fe7a74e7e8cc299d5ab8fcbb4034aa63fa3ea574 | Python | seintaridis/newsMining | /runClassification.py | UTF-8 | 10,776 | 2.765625 | 3 | [] | no_license | import pandas as pd
import os
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.model_selection import KFold
from sklearn.metrics import classification_report
from csvReader import writeStats
from csvReader import createTestSetCategoryCSV
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import euclidean_distances
from collections import Counter
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from scipy import interp
style.use("ggplot")
def preprocessData(data):
my_additional_stop_words=['said','th','month','much','thing','say','says']
stop_words = ENGLISH_STOP_WORDS.union(my_additional_stop_words)
vectorizer = TfidfVectorizer(stop_words=stop_words) #Stopwords to our vectorizer
# first get TFIDF matrix
X=vectorizer.fit_transform(data)
# second compress to 5 dimensions
svd = TruncatedSVD(n_components=5)
reduced=svd.fit_transform(X)
return reduced;
def knn(X_train_counts,X_test_counts,categories):
yPred = []
for test ,i in enumerate(X_test_counts):
# create list for distances and targets
distances = euclidean_distances(X_train_counts, [X_test_counts[test]])
distances = zip(distances,categories)
# sort the list
distances.sort()
# make a list of the k neighbors' targets
targets = [distances[x][1] for x in range(5)]
# print targets
c = Counter(targets)
# print c.most_common(1)
# print targets
yPred.append(c.most_common(1)[0][0])
return yPred
def classificationMethod(method,X_train_counts,X_test_counts,categories,train_index,test_index):
yPred=None;
C = 2.0
if method == 'naiveBayes':
clf_cv = GaussianNB().fit(X_train_counts,categories)
elif method == 'RandomForest':
clf_cv = RandomForestClassifier(n_estimators=128).fit(X_train_counts,categories)# the best result for random forest
elif method == 'SVM':
clf_cv = svm.SVC(kernel='linear', C=C,gamma=0.7).fit(X_train_counts,categories)
elif method == 'KNN':
return knn(X_train_counts,X_test_counts,categories)
yPred = clf_cv.predict(X_test_counts)#after training try to predi
return yPred;
#find categories for the test dataset
def findCategories(df,test_df):
my_additional_stop_words=['said','th','month','much','thing','say','says']
stop_words = ENGLISH_STOP_WORDS.union(my_additional_stop_words)
count_vect = TfidfVectorizer(stop_words=stop_words)
#count_vect = CountVectorizer(stop_words=stop_words)
count_vect.fit(df['Content'])
svd = TruncatedSVD(n_components=400)
svd.fit(count_vect.transform(df['Content']))
X_train_counts = count_vect.transform(df['Content'])
X_train_counts = np.add(X_train_counts, count_vect.transform(df['Title']))
X_test_counts = count_vect.transform(test_df['Content'])
X_test_counts = np.add(X_test_counts, count_vect.transform(test_df['Title']))
X_train_counts = svd.transform(X_train_counts)
X_test_counts = svd.transform(X_test_counts)
yPred = classificationMethod('SVM',X_train_counts,X_test_counts,df['Category'],44,44)
print yPred
createTestSetCategoryCSV(test_df['Id'],yPred)
def crossValidation(df,method,n_components,titleWeight):
avgAccuracy=0
nFolds=10
kf = KFold(n_splits=nFolds)
fold = 0
my_additional_stop_words=['said','th','month','much','thing','say','says']
stop_words = ENGLISH_STOP_WORDS.union(my_additional_stop_words)
count_vect = TfidfVectorizer(stop_words=stop_words)
#count_vect = CountVectorizer(stop_words=stop_words)
count_vect.fit(df['Content']+df['Title'])
svd = TruncatedSVD(n_components=n_components)
svd.fit(count_vect.transform(df['Content']+df['Title']))
for train_index, test_index in kf.split(df):
X_train_counts = count_vect.transform(df['Content'].iloc[train_index])
X_train_counts = np.add(X_train_counts, count_vect.transform(df['Title'].iloc[train_index])*titleWeight)
X_test_counts = count_vect.transform(df['Content'].iloc[test_index])
X_test_counts = np.add(X_test_counts, count_vect.transform(df['Title'].iloc[test_index])*titleWeight)
X_train_counts = svd.transform(X_train_counts)
X_test_counts = svd.transform(X_test_counts)
print "Fold " + str(fold)
if method=='ALL':
runAllClassificationMethods(df,nFolds,X_train_counts,X_test_counts,train_index,test_index)
else:
yPred = classificationMethod(method,X_train_counts,X_test_counts,df['Category'].iloc[train_index],train_index,test_index)
print(classification_report(yPred,df['Category'].iloc[test_index], target_names=df.Category.unique()))
avgAccuracy+=accuracy_score(df['Category'].iloc[test_index],yPred)
fold += 1
if method=='ALL':
produceStats(nFolds)
avgAccuracy=avgAccuracy/nFolds
print "the average accuracy of method "+ method
print avgAccuracy
return avgAccuracy
def runAllClassificationMethods(df,nFolds,X_train_counts,X_test_counts,train_index,test_index):
classification_method_array=['naiveBayes','RandomForest','SVM','KNN']
for idx,value in enumerate(classification_method_array):
yPred = classificationMethod(value,X_train_counts,X_test_counts,df['Category'].iloc[train_index],train_index,test_index)
averageAccurracyArray[idx] += accuracy_score(df['Category'].iloc[test_index],yPred)
averagePrecisionArray[idx] += precision_score(df['Category'].iloc[test_index], yPred, average='macro')
averageRecallArray[idx] += recall_score(df['Category'].iloc[test_index], yPred, average='macro')
averageFmeasureArray[idx]+= f1_score(df['Category'].iloc[test_index], yPred, average='macro')
def produceStats(nFolds):
for idx,val in enumerate(averageAccurracyArray):
averageAccurracyArray[idx] =averageAccurracyArray[idx]/nFolds
averagePrecisionArray[idx] =averagePrecisionArray[idx]/nFolds
averageRecallArray[idx]=averageRecallArray[idx]/nFolds
averageFmeasureArray[idx]=averageFmeasureArray[idx]/nFolds
writeStats(averageAccurracyArray,averagePrecisionArray,averageRecallArray,averageFmeasureArray,averageAUCarray)
def produceSVMstats(df):
componentsList = [2,3,4,5,6,10,20,30,40,50,60,70,80,90,100,300,400] #componentsList = [100,110,120,130]
accuracyList=[]
for idx,value in enumerate(componentsList):
accuracyList.append(crossValidation(df,'SVM',value))
print accuracyList
plt.ylim([0.5, 1.0])
plt.xlim([0.0,120.0])
plt.xlabel('Components')
plt.ylabel('Accuracy')
width = 1
plt.bar(componentsList,accuracyList, width, color="blue")
plt.savefig('output/LSIcomponentsAccuracy1')
plt.show()
def crossValidationRoc(df,method,n_components,category):
# Add noisy features
random_state = np.random.RandomState(0)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
avgAccuracy=0
nFolds=10
kf = KFold(n_splits=nFolds)
fold = 0
my_additional_stop_words=['said','th','month','much','thing','say','says']
stop_words = ENGLISH_STOP_WORDS.union(my_additional_stop_words)
count_vect = TfidfVectorizer(stop_words=stop_words)
#count_vect = CountVectorizer(stop_words=stop_words)
count_vect.fit(df['Content']+df['Title'])
svd = TruncatedSVD(n_components=n_components)
svd.fit(count_vect.transform(df['Content']+df['Title']))
for train_index, test_index in kf.split(df):
X_train_counts = count_vect.transform(df['Content'].iloc[train_index])
X_train_counts = np.add(X_train_counts, count_vect.transform(df['Title'].iloc[train_index])*2)
X_test_counts = count_vect.transform(df['Content'].iloc[test_index])
X_test_counts = np.add(X_test_counts, count_vect.transform(df['Title'].iloc[test_index])*2)
X_train_counts = svd.transform(X_train_counts)
X_test_counts = svd.transform(X_test_counts)
probas_ = classifier.fit(X_train_counts,df['Category'].iloc[train_index]).predict_proba(X_test_counts)
# Compute ROC curve and area the curve
test1 = label_binarize(df['Category'].iloc[test_index], classes=["Business","Film","Football","Politics","Technology"])
fpr, tpr, thresholds = roc_curve(test1[:,categories_map[category]], probas_[:, categories_map[category]])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (fold, roc_auc))
print "Fold " + str(fold)
fold += 1
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= 10
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic '+category)
plt.legend(loc="lower right")
plt.savefig('output/'+category+'_roc')
plt.close()
return avgAccuracy
categories_map={
'Business': 0,
'Film': 1,
'Football': 2,
'Politics': 3,
'Technology': 4
}
averageAccurracyArray=[0,0,0,0]
averagePrecisionArray=[0,0,0,0]
averageRecallArray=[0,0,0,0]
averageFmeasureArray=[0,0,0,0]
averageAUCarray=[0,0,0,0]
outputDir = "output/"
if not os.path.exists(outputDir):
os.makedirs(outputDir)
df = pd.read_csv('dataSets/train_set.csv', sep='\t')
crossValidation(df,'RandomForest',40,titleWeight=1.1) #ALL TO RUN ALL METHODS OTHERWIRSE PUT ONE METHOD OF THESE classification_method_array=['naiveBayes','RandomForest','SVM','KNN']
#produceSVMstats(df)
#testdf =pd.read_csv('dataSets/test_set.csv', sep='\t')
#findCategories(df,testdf)
#crossValidationRoc(df,'SVM',40,'Business')
#crossValidationRoc(df,'SVM',40,'Film')
#crossValidationRoc(df,'SVM',40,'Football')
#crossValidationRoc(df,'SVM',40,'Politics')
#crossValidationRoc(df,'SVM',40,'Technology')
| true |
f3912075084327ce61bc53a1dc12283f09366d2a | Python | hachmannlab/chemml | /tests/published/RI/test_lorentz_lorenz.py | UTF-8 | 1,317 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | import pytest
from chemml.published.RI import LorentzLorenz
@pytest.fixture()
def data():
from chemml.published.RI import load_small_organic_data_10k
_, targets = load_small_organic_data_10k()
import numpy as np
import pandas as pd
features = [pd.DataFrame(np.random.random((50, 100))) for _ in range(4)]
return features, targets
def test_feature_sizes(data):
features, t = data
targets = [t[t.columns[0]], t[t.columns[1]], t[t.columns[2]]]
# vary input features
for n in range(2, 5):
ll_model = LorentzLorenz(n_features=n)
X_train, X_test, y_train, y_test, scaler_y = ll_model.preprocessing(features=features[4-n:], targets=targets, return_scaler=True)
ll_model = ll_model.fit(X_train, y_train[:50])
y_pred = ll_model.predict(X_test)
def test_exceptions(data):
features, t = data
targets = [t[t.columns[0]], t[t.columns[1]], t[t.columns[2]]]
# target sizes
ll_model = LorentzLorenz(n_features=2)
with pytest.raises(ValueError) as e:
X_train, X_test, y_train, y_test, scaler_y = ll_model.preprocessing(features=features[2:], targets=targets[:-1], return_scaler=True)
error_msg = e.value
assert 'length of the target list should be 3' in str(error_msg)
| true |
6ff1722242f48aa15ef55c08c03c5be6fa272d61 | Python | vaporwavefm/PythonNotes | /AttendanceScript.py | UTF-8 | 1,055 | 3.984375 | 4 | [] | no_license | # George Juarez
'''
Attendance Script:
We use a python script to take attendance. Your goal is to write a script that will
take down a user's name and the time they entered their information and output the
data into a CSV file. The datetime and time modules will be useful to you. Feel
free to output the time in any time format you'd like. Feel free to input the user's name
in any format you'd like.
ex:
2015-02-06 01:56:11, Nasir Memon
'''
from datetime import datetime
import csv
userFile = 'attend_script.csv'
def main():
userDict = {}
print("Hello, welcome to this lil' sign-in sheet.")
userName = input("Please enter your name to sign in: ")
userDate = datetime.now().strftime('%Y-%m-%d %I:%M:%S')
userDict[userName] = userDate
for key, value in userDict.items():
csv.writer(open(userFile, 'a', newline = '')).writerow([key,value])
'''
current_time = datetime.datetime.now().time()
print(current_time)
'''
# call main
main()
| true |
ea91f5e6b01a56b2a3a52e1cbed784a97f759638 | Python | djatlantic/atomic-queue | /atomicqueue/barrier.py | UTF-8 | 715 | 2.921875 | 3 | [
"MIT"
] | permissive | class SequenceBarrier:
"""
Track sequence for smallest index number being processed by event processor or
group of event processors
"""
def __init__(self, sequencer, dependent_sequences, wait_strategy):
self.sequencer = sequencer
self.dependent_sequences = dependent_sequences
self.wait_strategy = wait_strategy
def wait_for(self, sequence):
""" Return the next highest available number in the
buffer after the consumer asks for the next event"""
available_sequence = self.wait_strategy.wait_for(
self.dependent_sequences, sequence
)
return self.sequencer.get_highest_published(sequence, available_sequence)
| true |
ce53c0f15e37d0ad71fc0810e3461a679c39f0a7 | Python | muhtarudinsiregar/codingbat-exercises | /list-1/max_end3.py | UTF-8 | 336 | 3.203125 | 3 | [] | no_license | def max_end3(nums):
max = 0
if nums[0] <= nums[len(nums) - 1]:
max = nums[len(nums) - 1]
else:
max = nums[0]
return [max] * 3
# or
# max_value = max(nums[0], nums[len(nums) - 1])
# return [max_value] * 3
print(max_end3([1, 2, 3]))
print(max_end3([11, 5, 9]))
print(max_end3([2, 11, 3]))
| true |
4e49defaf34355b13ce2fdabd498e931442bdac2 | Python | cserpell/param_prob_forec | /modules/util_test.py | UTF-8 | 731 | 2.640625 | 3 | [] | no_license | # coding=utf-8
"""Tests for util module."""
import unittest
import numpy as np
from numpy import testing
from modules import util
class UtilTest(unittest.TestCase):
"""Unit test for util module."""
@staticmethod
def test_mean_squared_loss_sigma():
"""Test mean_squared_loss_with_sigma method."""
y_data = np.array([[[1.0, 2.0, -1.0],
[-1.386, 0.0, 0.811]]]) # sigma = 0.5, 1.0, 1.5
y_true = np.array([[[1.2, 2.5, -1.2], [0.0, 0.0, 0.0]]])
out = util.mean_squared_loss_with_sigma(y_true, y_data)
testing.assert_allclose(
out.numpy(), (-1.226047 + 0.25 + 0.828777) / 3.0, rtol=0.00001)
if __name__ == '__main__':
unittest.main()
| true |
00b7532292dc2e2a39e00fc9e0641c73444488e6 | Python | serimj98/introtoml | /decisionTree.py | UTF-8 | 10,841 | 2.75 | 3 | [] | no_license | from __future__ import print_function
import sys
import csv
import math
import copy
if __name__ == '__main__':
train_input = sys.argv[1]
test_input = sys.argv[2]
max_depth = sys.argv[3]
train_out = sys.argv[4]
test_out = sys.argv[5]
metrics_out = sys.argv[6]
def readFile(path):
with open(path) as tsvfile:
return list(csv.reader(tsvfile, dialect='excel-tab'))
train = readFile(train_input)
train_copy1 = copy.deepcopy(train)
train_copy2 = copy.deepcopy(train)
train_copy3 = copy.deepcopy(train)
train_copy4 = copy.deepcopy(train)
train_copy5 = copy.deepcopy(train)
test = readFile(test_input)
test_copy1 = copy.deepcopy(test)
test_copy2 = copy.deepcopy(test)
test_copy3 = copy.deepcopy(test)
test_copy4 = copy.deepcopy(test)
test_copy5 = copy.deepcopy(test)
def data_info(data):
col_list, data_list = [], []
for col in range(len(data[0])):
for instance in data:
if (instance[col] not in col_list):
col_list.append(instance[col])
data_list.append(col_list)
col_list = []
return (data_list)
train_data_info_list = data_info(train) #[[attribute, category 1, category 2], ...]
test_data_info_list = data_info(test)
train.pop(0)
test.pop(0)
def entropy_calc(data, data_info_list):
y_data = data_info_list[-1]
y_list = [] #all instances in y column
for instance in data:
y_list.append(instance[-1])
ycount1, ycount2 = 0, 0
for i in range(len(y_list)): #counting number of particular instance in y columm
if (y_list[i] == y_data[1]):
ycount1 += 1
if (y_list[i] == y_data[2]):
ycount2 += 1
if (ycount1 == 0 or ycount2 == 0):
entropy = 0
else:
entropy = - (ycount1/len(data))*math.log2(ycount1/len(data)) - \
(ycount2/len(data))*math.log2(ycount2/len(data))
return (entropy)
def cond_entropy_calc(data, x_index, data_info_list):
x_data = data_info_list[x_index]
y_data = data_info_list[-1]
x_list = [] #all instances in x column
for instance in data:
x_list.append(instance[x_index])
xcount1, xcount2 = 0, 0
x_list1, x_list2 = [], [] #all instances of x = 0, x = 1
for i in range(len(x_list)): #counting number of particular instance in x column of x_index
if (x_list[i] == x_data[1]):
xcount1 += 1
x_list1.append(data[i])
if (x_list[i] == x_data[2]):
xcount2 += 1
x_list2.append(data[i])
ycount1_list1, ycount2_list1 = 0, 0
for instance in x_list1:
if (instance[-1] == y_data[1]):
ycount1_list1 += 1
if (instance[-1] == y_data[2]):
ycount2_list1 += 1
ycount1_list2, ycount2_list2 = 0, 0
for instance in x_list2:
if (instance[-1] == y_data[1]):
ycount1_list2 += 1
if (instance[-1] == y_data[2]):
ycount2_list2 += 1
if (len(data) == 0):
prob1 = 0
prob2 = 0
else:
prob1 = xcount1/len(data)
prob2 = xcount2/len(data)
if (ycount1_list1 == 0 or ycount2_list1 == 0):
entropy1 = 0
else:
entropy1 = -((ycount1_list1/xcount1)*math.log2((ycount1_list1/xcount1))) \
-((ycount2_list1/xcount1)*math.log2((ycount2_list1/xcount1)))
if (ycount1_list2 == 0 or ycount2_list2 == 0):
entropy2 = 0
else:
entropy2 = -((ycount1_list2/xcount2)*math.log2((ycount1_list2/xcount2))) \
-((ycount2_list2/xcount2)*math.log2((ycount2_list2/xcount2)))
return (prob1*entropy1 + prob2*entropy2)
class Node:
def __init__(self):
self.left = None
self.right = None
self.split = None
self.category = None
self.decision = None
self.data = None
self.depth = 0
train_tree = Node()
test_tree = Node()
def train_stump(node, data, depth, data_info_list):
node.data = data
depth = int(depth)
y_data = data_info_list[-1]
#majority vote classifier
y_list = [] #all instances in y column
for instance in data:
y_list.append(instance[-1])
ycount1, ycount2 = 0, 0
for instance in y_list:
if (instance == y_data[1]):
ycount1 += 1
if (instance == y_data[2]):
ycount2 += 1
#base case
if (depth == 0 or (len(data_info_list)-1) == 0 or ycount1 == 0 or ycount2 == 0):
if (ycount1 >= ycount2):
node.decision = y_data[1]
else:
node.decision = y_data[2]
return (node)
if (len(data_info_list)-1 == 1):
node.left = Node()
node.right = Node()
node.left.depth = node.depth+1
node.right.depth = node.depth+1
x_data = data_info_list[0]
node.left.split = x_data[0]
node.right.split = x_data[0]
data_info_list.pop(0)
info1 = copy.deepcopy(data_info_list)
info2 = copy.deepcopy(data_info_list)
x_list = [] #all instances in x column with maximum mutual information
for instance in data:
x_list.append(instance[0])
xcount1, xcount2 = 0, 0
x_list1, x_list2 = [], [] #all instances of x = 0, x = 1
for i in range(len(x_list)): #counting number of particular instance in x column of max_ind
if (x_list[i] == x_data[1]):
xcount1 += 1
x_list1.append(data[i])
if (x_list[i] == x_data[2]):
xcount2 += 1
x_list2.append(data[i])
if (xcount1 >= xcount2):
node.left.category = x_data[1]
node.right.category = x_data[2]
else:
node.left.category = x_data[2]
node.right.category = x_data[1]
train_stump(node.left, x_list1, depth-1, info1)
train_stump(node.right, x_list2, depth-1, info2)
return (node)
else:
node.left = Node()
node.right = Node()
node.left.depth = node.depth+1
node.right.depth = node.depth+1
#calculate mutual information for all x columns
mutual_info = []
for x in range(len(data_info_list)-1):
mutual_info.append(entropy_calc(data, data_info_list) - cond_entropy_calc(data, x, data_info_list))
#find maximum mutual information index (ex. x1, x2..., xi)
max_val = 0
for i in range(len(mutual_info)):
if (mutual_info[i] >= max_val):
max_ind = i
max_val = mutual_info[i]
if (max_val == 0):
#majority vote classifier
y_list = [] #all instances in y column
for instance in data:
y_list.append(instance[-1])
ycount1, ycount2 = 0, 0
for instance in y_list:
if (instance == y_data[1]):
ycount1 += 1
if (instance == y_data[2]):
ycount2 += 1
if (ycount1 >= ycount2):
node.decision = y_data[1]
else:
node.decision = y_data[2]
return (node)
else:
x_data = data_info_list[max_ind]
node.left.split = x_data[0]
node.right.split = x_data[0]
data_info_list.pop(max_ind)
info1 = copy.deepcopy(data_info_list)
info2 = copy.deepcopy(data_info_list)
#split based on attribute with maximum mutual information
x_list = [] #all instances in x column with maximum mutual information
for instance in data:
x_list.append(instance[max_ind])
xcount1, xcount2 = 0, 0
x_list1, x_list2 = [], [] #all instances of x = 0, x = 1
for i in range(len(x_list)): #counting number of particular instance in x column of max_ind
if (x_list[i] == x_data[1]):
xcount1 += 1
x_list1.append(data[i])
if (x_list[i] == x_data[2]):
xcount2 += 1
x_list2.append(data[i])
if (xcount1 >= xcount2):
node.left.category = x_data[1]
node.right.category = x_data[2]
else:
node.left.category = x_data[2]
node.right.category = x_data[1]
x_new_list1, x_new_list2 = [], [] #x_list1 and x_list2 with the splitting attribute removed
for instance in x_list1:
instance.pop(max_ind)
x_new_list1.append(instance) #get rid of x attribute chosen for x = 0
for instance in x_list2:
instance.pop(max_ind)
x_new_list2.append(instance) #get rid of x attribute chosen for x = 1
train_stump(node.left, x_new_list1, depth-1, info1)
train_stump(node.right, x_new_list2, depth-1, info2)
return (node)
def num_category(data, data_info_list):
y_data = data_info_list[-1]
y_list = [] #all instances in y column
if (data == None):
return("")
else:
for instance in data:
y_list.append(instance[-1])
ycount1, ycount2 = 0, 0
for instance in y_list:
if (instance == y_data[1]):
ycount1 += 1
if (instance == y_data[2]):
ycount2 += 1
return("[" + str(ycount1) + str(y_data[1]) + str("/") + str(ycount2) + str(y_data[2]) + "]")
#A function to do preorder tree traversal
def printPreorder(root, data_info_list, depth = 0):
if root:
if (depth == 0):
print(num_category(root.data, data_info_list))
elif (root.data == None):
pass
#First print the data of node
else:
print('| '*int(root.depth), str(root.split), '=', str(root.category), ":", \
root.decision, num_category(root.data, data_info_list), "\n", end = "")
depth += 1
#Then recur on left child
printPreorder(root.left, data_info_list, depth)
#Finally recur on right child
printPreorder(root.right, data_info_list, depth)
train_predict_list = [] #creating dataset without y category for train dataset
for instance in train_copy3:
instance.pop(-1)
train_predict_list.append(instance)
train_col_names = train_predict_list.pop(0)
test_predict_list = [] #creating dataset without y category for test dataset
for instance in test_copy3:
instance.pop(-1)
test_predict_list.append(instance)
test_col_names = test_predict_list.pop(0)
def predict_label(predict_elem, col_names, tree):
if (tree.decision != None):
return (tree.decision)
else:
attribute = tree.left.split
category1 = tree.left.category
category2 = tree.right.category
for i in range(len(col_names)):
if (col_names[i] == attribute):
decision = predict_elem[i]
if (decision == category1):
return (predict_label(predict_elem, col_names, tree.left))
if (decision == category2):
return (predict_label(predict_elem, col_names, tree.right))
def error_rate(predict_list, actual_list):
count = 0
for i in range(len(predict_list)):
if (predict_list[i] != actual_list[i]):
count += 1
return (count / len(predict_list))
train_y_list = [] #all instances in y column
for instance in train:
train_y_list.append(instance[-1])
test_y_list = [] #all instances in y column
for instance in test:
test_y_list.append(instance[-1])
train_tree_comp = train_stump(train_tree, train, max_depth, train_data_info_list)
printPreorder(train_tree_comp, train_data_info_list)
train_labels = []
for i in range(len(train_predict_list)):
train_labels.append(predict_label(train_predict_list[i], train_col_names, train_tree_comp))
# test_tree_comp = train_stump(test_tree, test, max_depth, test_data_info_list)
# printPreorder(train_tree_comp, test_data_info_list)
test_labels = []
for i in range(len(test_predict_list)):
test_labels.append(predict_label(test_predict_list[i], test_col_names, train_tree_comp))
with open(train_out, "w") as f:
for i in range(len(train_labels)):
f.writelines(train_labels[i] + "\n")
with open(test_out, "w") as f:
for i in range(len(test_labels)):
f.writelines(test_labels[i] + "\n")
with open(metrics_out, "w") as f:
f.writelines("error(train): " + str(error_rate(train_labels, train_y_list)) + "\n" + \
"error(test): " + str(error_rate(test_labels, test_y_list)))
| true |
3887bfa560ea6a0c718455a9284b27d45c7c5ef0 | Python | victor-qin/cs124-pa2-cw-vq | /triangles.py | UTF-8 | 1,122 | 3.546875 | 4 | [] | no_license | from random import choices
from strassen_v1 import strassen
def graphGen(dim, p):
""" Generates graph with dim vertices, edge probability p. """
matrix = [choices([0, 1], [1-p, p], k=dim)
for i in range(dim)]
# Set all values along diagonal to 0
for i in range(dim):
matrix[i][i] = 0
return matrix
def triangles(dim, p):
""" Computes number of triangles in a graph using Strassen's algorithm. """
graph = graphGen(dim, p)
cubedGraph = strassen(strassen(graph, graph), graph)
diagSum = 0
# Add diagonal entries
for i in range(dim):
diagSum += cubedGraph[i][i]
numTriangles = diagSum / 6
return numTriangles
if __name__ == "__main__":
# Edit dims, probabilities, numTrials here
dim = 1024
probs = [0.01, 0.02, 0.03, 0.04, 0.05]
numTrials = 5
# Compute average number of triangles for given parameters
for p in probs:
avg = 0
for i in range(numTrials):
avg += triangles(dim, p)
avg = avg / numTrials
print("Average number of triangles, probability %f: %f" % (p, avg))
| true |
d5d93b646ef25431b1792ebb52acc407ca32b322 | Python | tmeteorj/ncbi-analysis | /src/unittest/test_utils/test_str_util.py | UTF-8 | 460 | 2.828125 | 3 | [] | no_license | import unittest
from utils.str_util import StrConverter
class TestStrUtil(unittest.TestCase):
def test_extract_filename(self):
check_list = [
('18 rna utr', '18_rna_utr'),
('18_rna_utr.txt', '18_rna_utr')
]
for in_name, expect_name in check_list:
out_name = StrConverter.extract_file_name(in_name)
self.assertEqual(expect_name, out_name, 'Extract file name failed: %s' % in_name)
| true |
41678879be6d0c051fbc4eebc62560f95056fd34 | Python | ruizhang84/LeetCode-OJ | /largestNumber.py | UTF-8 | 608 | 3.21875 | 3 | [] | no_license | class Solution:
def largestNumber(self, nums):
ans = ''
st = [str(s) for s in nums]
st.sort(cmp_items)
if st[0] == '0':
return '0'
return ''.join(st)
def cmp_items(a, b):
if a[0] == b[0]:
c = a + b
d = b + a
if c < d:
return 1
elif c == d:
return 0
else:
return -1
else:
if a < b:
return 1
elif a == b:
return 0
else:
return -1
| true |
731fb7e4c5be60fa355d58071c4a333c8fd120e5 | Python | ChaseCondon/world-model-experiments | /models/ReplayBuffer.py | UTF-8 | 736 | 3.375 | 3 | [] | no_license | from collections import deque
from random import sample
class ReplayBuffer(object):
"""Implementation of the DQN's buffer for experience replay"""
def __init__(self, buffer_max=10000):
self.buffer = deque()
self.buffer_max = buffer_max
self.size = 0
def add(self, s, a, r, d, s2):
experience = (s, a, r, d, s2)
self.buffer.append(experience)
if len(self.buffer) > self.buffer_max:
self.buffer.popleft()
else:
self.size += 1
def sample(self, batch_size):
if len(self.buffer) < batch_size:
batch = sample(self.buffer, self.size)
else:
batch = sample(self.buffer, batch_size)
return batch
| true |
fc826781d50f58bc01e8ece351ad38086145751d | Python | ghdus4185/SWEXPERT | /N5521.py | UTF-8 | 778 | 3.015625 | 3 | [] | no_license | import sys
sys.stdin = open('input.txt')
def dfs(n):
global cnt
for i in range(2, N+1):
if adj[n][i] == 1 and i not in check:
cnt += 1
check.append(i)
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split()) # 사람수, 친한관계수
adj = [[0] * (N+1) for _ in range(N+1)]
visited = [0] * (N+1)
check = []
# 직접 초대된 친구들 저장
for i in range(M):
a, b = map(int, input().split())
if a == 1:
check.append(b)
adj[a][b] = 1
adj[b][a] = 1
# 직접 초대된 친구들이랑 인접한 친구들 찾기
cnt = 0
for i in range(len(check)):
dfs(check[i])
# print(check)
print('#{} {}'.format(tc, len(check))) | true |
7e94cfada3586b0491f6ede48be9ee4d1ca04f6e | Python | alexoah/PythonPlayground | /W3School-PyExercises/PY-IfElse/pyIfElseE4.py | UTF-8 | 426 | 4.125 | 4 | [] | no_license | """
from PYTHON If...Else: Exercise 4 ( https://www.w3schools.com/python/exercise.asp?filename=exercise_ifelse4 )
question:
Print "1" if a is equal to b, print "2" if a is greater than b, otherwise print "3".
a = 50
b = 10
__ a __ b_
print("1")
____ a _ b_
print("2")
_____
print("3")
"""
a = 50
b = 10
if a == b:
print("1")
elif a > b:
print("2")
else:
print("3") | true |
9c482ced86ed17574eadf9882c5be4207d575b4f | Python | AlderaminCph/PythonDataStructure | /LinkedLists/SinglyLinkedLists/linked_list.py | UTF-8 | 11,847 | 4.28125 | 4 | [] | no_license | '''Singly Linked List Data Structure
'''
class Node:
def __init__(self, data):
self.data = data
self.next = None #next pointer
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
current_node = self.head
while current_node:
print(current_node.data)
current_node = current_node.next
# Here are 3 ways how to insert(add) a new node to the list:
def append(self,data):
'''adds a node to the end of the list
'''
new_node = Node(data) #create a new node
if self.head is None: #if our list is empty
self.head = new_node
return
last_node = self.head # if our list is not empty we start from head and go to the end
while last_node.next: # while next of node pointer is not equal to None
last_node = last_node.next #go to the next node
last_node.next = new_node # here we put the new node
def prepend(self,data):
'''adds a node to the head of the list
'''
new_node = Node(data) #create new node
new_node.next = self.head #make it point to the current head of the list
self.head = new_node #change the head of the list
def insert_after_node(self, prev_node, data):
'''inserts a new node after given node
'''
if not prev_node:
print('Previous node is not on the list')
return
new_node = Node(data) #create a new node
new_node.next = prev_node.next # change a next pointer of new node
prev_node.next = new_node # change a next pointer of prev node
#Delete methods
def delete_node(self,key):
'''deletes a node from a list by its key
'''
current_node = self.head
#if we want to delete node that is a head node
if current_node and current_node.data == key: #if the list isn't empty and current_node is that what we seek
self.head = current_node.next #change the head of list
current_node = None #removes this node from list
return
prev_node = None #if node to be deleted is not head
while current_node and current_node.data != key:#iterate over list while current_node isn't None and its data isn't key
prev_node = current_node
current_node = current_node.next # move the head pointer along
if current_node is None: #this means that our element isn't present on the list
return
prev_node.next = current_node.next
current_node = None #delete this node
def delete_node_at_pos(self,pos):
'''deletes node at given position
'''
current_node = self.head
if pos == 0: #if we want to delete the head node
self.head = current_node.next
current_node = None #removes the head node
return
prev_node = None #if we want to delete not a head node we should iterate over the list
count = 0
while current_node and count != pos:
prev_node = current_node
current_node = current_node.next
count += 1
if current_node is None: # the position was greater than the number of elements in the list
return
prev_node.next = current_node.next
current_node = None # deletes node
def len_iterative(self):
'''returns the length of the list
'''
count = 0
current_node = self.head
while current_node: #while current node is valid
count += 1
current_node = current_node.next
return count
def len_recursive(self,node):
'''calculate the list length recursively
'''
if node is None:
return 0
return 1 + self.len_recursive(node.next)
def swap_nodes(self,key1,key2):
'''change places two nodes by their keys
'''
if key1 == key2:
return
prev_1 = None
curr_1 = self.head
while curr_1 and curr_1.data != key1:
prev_1 = curr_1
curr_1 = curr_1.next
prev_2 = None
curr_2 = self.head
while curr_2 and curr_2.data != key2:
prev_2 = curr_2
curr_2 = curr_2.next
if not curr_1 or not curr_2: # if any of these nodes is None we can't swap them
return
# case1: Neither of them are not a head node. This means that they have previous nodes.
if prev_1: #if node1 has previous node => it's not a head node
prev_1.next = curr_2
else: #node1 is the head node
self.head = curr_2
if prev_2:
prev_2.next = curr_1
else: #node2 is the head node
self.head = curr_1
curr_1.next,curr_2.next = curr_2.next,curr_1.next #swapping them
def print_helper(self,node,name):
if node is None:
print(name + ': None')
else:
print(name + ': ' + node.data)
#reverse list
# A->B->C->D->0
# D->C->B->A->0
# A<-B<-C<-D<-0
def reverse_iterative(self):
prev_node = None
current_node = self.head
while current_node:
nxt = current_node.next #temporary variable with pointer to the next node
current_node.next = prev_node
self.print_helper(prev_node,'PREV')
self.print_helper(current_node,'CURR')
self.print_helper(nxt,'NEXT')
print('\n')
prev_node = current_node
current_node = nxt
self.head = prev_node
def reverse_recursive(self):
def _reverse_recursive(current_node,prev_node):
if not current_node: #if we rich the end of the list
return prev_node
nxt = current_node.next
current_node.next = prev_node
prev_node = current_node
current_node = nxt
return _reverse_recursive(current_node,prev_node)
self.head = _reverse_recursive(current_node = self.head,prev_node = None)
def merge_sorted(self,llist):
'''merges two sorted lists
'''
p = self.head
q = llist.head
s = None
if not p: #this means that the first list doesn't exists
return q #we return the second sorted list
if not q:
return p
if p and q:
if p.data <= q.data:
s = p
p = s.next
else:
s = q
q = s.next
new_head = s #update list head
while p and q:
if p.data <= q.data:
s.next = p
s = p
p = s.next
else:
s.next = q
s = q
q = s.next
if not p:
s.next = q
if not q:
s.next = p
return new_head
def remove_duplicates(self):
'''deletes duplicating values from a list
'''
current_node = self.head
prev_node = None
dupl_values = {}
while current_node:
if current_node.data in dupl_values:
#Remove node
prev_node.next = current_node.next
current_node = None
else:
#Have not encountered element before
dupl_values[current_node.data] =1
prev_node = current_node
current_node = prev_node.next
def print_nth_from_last_1(self,n):
# Method 1:
total_length = self.len_iterative()
current_node = self.head
while current_node:
if total_length == n:
print(current_node.data)
return current_node
total_length -= 1
current_node = current_node.next
if current_node is None:
return
def print_nth_from_last2(self,n):
# Method 2:
p = self.head
q = self.head
count = 0
while q and count < n:
q = q.next
count += 1
if not q:
print(str(n) + ' is greater than the number of nodes in list')
while p and q:
p = p.next
q = q.next
return p.data
def count_occurances_iterative(self, data):
# 1->2->1->3->4->1->1
# Number of ones is 4
current_node = self.head
count = 0
while current_node:
if current_node.data == data:
count+=1
current_node = current_node.next
return count
def count_occurances_recursivly(self, node, data):
if not node:
return 0
if node.data == data:
return 1 + self.count_occurances_recursivly(node.next, data)
else:
return self.count_occurances_recursivly(node.next, data)
def rotate(self, k):
# 1->2->3->4->5->6->none
# p q
# k = 4
# 5 -> 6 -> 1 -> 2 -> 3 -> 4 -> None
#pnext q ->head-> p -> None
p = self.head
q = self.head
count = 0
prev_node = None
while p and count < k:
prev_node = p
p = p.next
q = q.next
count += 1
p = prev_node
while q:
prev_node = q
q = q.next
q = prev_node
q.next = self.head
self.head = p.next
p.next = None
def is_palindrome_1(self):
'''Example palindromes:
RACECAR, RADAR
Example Non-palindromes:
TEST, ABC. HELLO
'''
# Method 1
s = ""
p = self.head
while p:
s += p.data
p = p.next
return s == s[::-1]
def is_palindrome_2(self):
'''determines whether or not the singly linked list is palindrome
'''
# Method 2, using stack
p = self.head
s = [] #our stack
while p:
s.append(p.data)
p = p.next
p = p.head # make p back to the head of the list
while p:
data = s.pop()
if p.data != data:
return False
p = p.next
return True
def move_tail_to_head(self):
'''
A -> B -> C -> D -> None
D -> A -> B -> C -> None
'''
last = self.head # pointer to the last element
second_to_the_last = self.head # pointer to the second to the last element
while last.next:
second_to_the_last = last
last = last.next
last.next = self.head
second_to_the_last.next = None
self.head = last
def sum_two_lists(self,llist):
'''
3 6 5
2 4 8
-----
6 1 3
'''
p = self.head
q = llist.head
sum_list = LinkedList() # our final list
carry = 0
while p or q:
if not p:
i = 0
else:
i = p.data
if not q:
j = 0
else:
j = q.data
s = i + j + carry
if s >= 10:
carry = 1
remainder = s % 10
sum_list.append(remainder)
else:
carry = 0
sum_list.append(s)
if p:
p = p.next
if q:
q = q.next
sum_list.print_list()
'''
# 3 6 5
# 4 2
# ------
#
llist1 = LinkedList()
llist1.append(5)
llist1.append(6)
llist1.append(3)
llist2 = LinkedList()
llist2.append(8)
llist2.append(4)
llist2.append(2)
print(365 + 248)
llist1.sum_two_lists(llist2)
'''
| true |
d1639b17af1015c75f500046b712e9bd350f370d | Python | mahela37/CLGbot | /html_parser.py | UTF-8 | 1,111 | 3.328125 | 3 | [] | no_license | def parseMainPage():
from bs4 import BeautifulSoup
import re
import requests
r=requests.get("https://www.clg.org/Class-Action/List-of-Class-Actions")
soup=BeautifulSoup(r.text,'html.parser')
mydivs=soup.find("div",{"class":'classactions'})
div_array=[]
category=""
for div in mydivs:
try:
class_type=div.attrs['class'][0]+div.attrs['class'][1] #it's a lawsuit entry
if(class_type=="actionwith_image"):
#the regex is for french characters that the website has used for a few entries. We wanna strip ito out.
#for each lawsuit, make a dict entry with the name, href and the category
temp_entry={'name':re.sub(r'[^a-zA-Z]', "", div.contents[5].contents[0].contents[0].strip()),'href':div.contents[5].contents[0].attrs['href'],'category':category}
div_array.append(temp_entry)
except:
try:
if('cat' in class_type): #it's a new category
category=class_type
except:
""""""
return div_array
| true |
e0cb5879216aa86dc23cd783686d1446b5c70645 | Python | ivanma9/transfer-HaCK | /python/data_mapping_final.py | UTF-8 | 4,607 | 3.296875 | 3 | [] | no_license | import serial
import matplotlib.pyplot as plt
import numpy as np
import csv
import time
baud = 9600
port = "/dev/tty.HC-05-DevB"
def main():
try:
ser = serial.Serial(port, baud)
print("Connected to Arduino port:" + port)
except:
print("Error connecting to Arduino port")
exit()
ser.flushInput()
distancesFL = []
distancesFR = []
distancesSL = []
distancesSR = []
plt.ion()
#fig = plt.figure()
#x = 0
turns = 0
fieldnames = ["x_valueCar", "y_valueCar", "x_valueObject", "y_valueObject"]
x_valueCar = 0
y_valueCar = 0
x_valueObject = 0
y_valueObject = 0
#open data.csv to write
with open('data.csv', 'w') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
csv_writer.writeheader()
while True:
ser_bytes = ser.readline() # Get a line from the serial monitor
data = ser_bytes.decode().split() # Split each line by whitespace
if (len(data) != 4): # A complete line must have 4 numbers
continue
FL = float(data[0])
FR = float(data[1])
L = float(data[2])
R = float(data[3])
print (data)
if (FL > 150) or (FR > 150) or (L > 150) or (R > 150):
continue
distancesFL.append(FL)
distancesFR.append(FR)
distancesSL.append(L)
distancesSR.append(R)
w = 7
v = 12
with open('data.csv', 'a') as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
info = {
"x_valueCar": x_valueCar,
"y_valueCar": y_valueCar,
"x_valueObject": x_valueObject,
"y_valueObject": y_valueObject,
}
csv_writer.writerow(info)
print(x_valueCar, y_valueCar)
print(x_valueObject,y_valueObject)
time.sleep(1)
if turns % 4 == 0:
if (FL > 10.00) or (FR > 10.00):
##plt.scatter(150 - FL - v, L + w)
##plt.scatter(150 - FL - v, R)
x_valueCar = 150 - FL - v
y_valueCar = L + w
x_valueObject = 150 - FL - v
y_valueObject = R + (w*2)
else:
turns += 1
#plt.scatter(150 - FL - v, L + w)
#plt.scatter(150 - FL - v, R)
x_valueCar = 150 - FL - v
y_valueCar = L + w
x_valueObject = 150 - FL - v
y_valueObject = R + (w*2)
elif turns % 4 == 1:
if (FL > 10.00) or (FR > 10.00):
#plt.scatter(150 - L - w, FL - v)
#plt.scatter(150 - R, FL - v)
x_valueCar = 150 - L - w
y_valueCar = FL - v
x_valueObject = 150 - R - (w*2)
y_valueObject = FL - v
else:
turns += 1
#plt.scatter(150 - L - w, FL - v)
#plt.scatter(150 - R, FL - v)
x_valueCar = 150 - L - w
y_valueCar = FL - v
x_valueObject = 150 - R - (w*2)
y_valueObject = FL - v
elif turns % 4 == 2:
if (FL > 10.00) or (FR > 10.00):
#plt.scatter(FL + v, 150 - L)
#plt.scatter(FL + v, R)
x_valueCar = FL + v
y_valueCar = 150 - L
x_valueObject = FL + v
y_valueObject = R
else:
turns += 1
#plt.scatter(FL + v, 150 - L)
#plt.scatter(FL + v, R)
x_valueCar = FL + v
y_valueCar = 150 - L
x_valueObject = FL + v
y_valueObject = R
elif turns % 4 == 3:
if (FL > 10.00) or (FR > 10.00):
#plt.scatter(L + w, FL + v)
#plt.scatter(L + w, R)
x_valueCar = L + w
y_valueCar = FL + v
x_valueObject = R + (w*2)
y_valueObject = FL + v
else:
turns += 1
#plt.scatter(L + w, FL + v)
#plt.scatter(L + w, R)
x_valueCar = L + w
y_valueCar = FL + v
x_valueObject = R + (w*2)
y_valueObject = FL + v
#ax.set_xlim(-2, 2)
#ax.set_ylim(-1.5, 1.5)
plt.show()
plt.pause(0.0001)
main()
| true |
6c90f2d16297967c6b573bb51568f7f297a309d9 | Python | dengpeiyou/python-selenium | /python的无头模式演示.py | UTF-8 | 424 | 2.9375 | 3 | [] | no_license | #本示例演示如果用CHrome无头模式打开并抓取百度首页标题
#coding:utf-8
from selenium import webdriver
opt=webdriver.ChromeOptions()
opt.add_argument('disable-infobars')
opt.add_argument('--start-maximized')
opt.add_argument('--headless')
opt.add_argument('--disable-gpu') # 谷歌文档加它避免bug
b=webdriver.Chrome(options=opt)
b.get("http://www.baidu.com")
print(b.title)
b.quit()
| true |
ffceff44f9896ae97bbe0e1e0b5fbfd0709d0fba | Python | heeewo/Python | /Practice/Datatypes_practice.py | UTF-8 | 3,151 | 3.71875 | 4 | [] | no_license | # 더러운 학업주의자들을 위한 프로그램 만들기
Student_score = []
Sum = 0
Studend_highscore = []
for i in range(10):
Student_score.append(int(input("학생의 성적을 입력하시오 :")))
Sum += Student_score[i]
if Student_score[i] >= 90:
Studend_highscore.append(Student_score[i])
print(Sum/len(Student_score))
print(len(Studend_highscore))
######################################################################
# 피타고라스 정리에 만족하는 삼각형 구하기
triangle90 = []
for a in range(1, 51):
for b in range(1, 51):
for c in range(1, 51):
if a**2 + b**2 == c**2:
triangle90.append([a, b, c])
print(tuple(triangle90))
######################################################################
# 서로 다른 주제의 특강을 둘다 들은 학생들의 명단과 인원수를 계산하여 출력하는 프로그램
import random
first_class = set()
second_class = set()
name_tableA = ["김철수", "박철수", "최철수", "이철수", "김맹구", "박맹구", "최맹구", "이맹구"]
name_tableB = ["김훈", "박훈", "최훈", "이훈", "김유리", "박유리", "최유리", "이유리"]
name_tableC = ["김짱구", "박짱구", "최짱구", "이짱구", "김영희", "박영희", "최영희", "이영희"]
name_tableD = ["김철희", "박철희", "최철희", "이철희", "김영수", "박영수", "최영수", "이영수"]
name_table = name_tableA + name_tableB + name_tableC + name_tableD
while(1):
if len(first_class) == 10:
break
else:
first_class.add(name_table[random.randint(0, 31)])
while(1):
if len(second_class) == 15:
break
else:
second_class.add(name_table[random.randint(0, 31)])
both = first_class & second_class
print(both, len(both))
#####################################################################
# 영희가 화석이라서 메모리(RAM)을 걱정하면서 사진을 찍어야 됨으로 사진의 해상도를 구하는 프로그램
def Picxel(s):
A = list(map(int, s.split('*')))
return A[0]*A[1]*A[2]
picxel = input("가로*세로*심도 순으로 입력하시오 :")
result = int(Picxel(picxel))
print(result, "bit\n", result/2**3, "byte\n", result/2**13, "kbyte")
print(result/2**23, "Mbyte")
#####################################################################
# 장문의 문자열에 있는 문자의 개수 , 숫자의 개수, 공백의 개수 등을 알아내는 프로그램
A = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
number = []
empty = []
String = []
def appenddata(num):
if num[0] in A:
number.append(num[0])
elif num[0] == ' ':
empty.append(num[0])
else:
String.append(num[0])
def count_string(num):
# 만약 입력된 매개변수(num)이 공백과 같으면
if len(num) > 1:
appenddata(num)
count_string(num[1:])
else:
appenddata(num)
string = list(input("장문의 문자열을 입력하시오"))
count_string(string)
print(number, len(number), empty, len(empty), String, len(String))
###############################################################
| true |
e0ac7b059e255765bb882f61d6b9f337782b61be | Python | KonovalenkoS/Go-Bot | /dlgo/agent/helpers.py | UTF-8 | 936 | 3.125 | 3 | [] | no_license | from dlgo.gotypes import Point
def is_point_an_eye(board, point, color):
#Eyes should be empty points
if board.get(point) is not None:
return False
#Adjacent points must be friendly stones
for neighbor in point.neighbors():
neighbor_color = board.get(neighbor)
if neighbor_color != color:
return False
#Check that the proper spaces are controlled for an eye
friendly_corners = 0
off_board_corners = 0
corners = [
Point(point.row - 1, point.col - 1),
Point(point.row - 1, point.col + 1),
Point(point.row + 1, point.col - 1),
Point(point.row + 1, point.col + 1)
]
for corner in corners:
if board.is_on_grid(corner):
corner_color = board.get(corner)
if corner_color == color:
friendly_corners += 1
else:
off_board_corners += 1
if off_board_corners > 0:
#point is on the edge or corner
return off_board_corners + friendly_corners == 4
#Point is in the middle
return friendly_corners >= 3 | true |
a7705639f108485ac7925dd993839d0b05e322bd | Python | mcfletch/pyconca-tictactoe | /test_training.py | UTF-8 | 2,431 | 3.03125 | 3 | [] | no_license | import unittest
import main
import numpy as np
class env:
class observation_space:
shape = (4,)
class action_space:
n = 2
class TestNumerics(unittest.TestCase):
def test_reinforce_correct(self):
records = [
{
'state':[0,0,0,0],
'new_state':[0,0,0,0],
'reward': 0,
'action': 1,
'done': False,
},
]*5 + [
{
'state':[0,0,0,0],
'new_state':[0,0,0,1],
'reward': 1,
'action': 0,
'done': False,
},
]* 5
model = main.build_model(env)
main.train_model( model, records, env, batch_size=64)
prediction = main.predict(model,[0,0,0,0])
assert np.argmax(prediction) == 0, prediction
def test_predict_future_reward(self):
"""When predicting future rewards, we want to see the network give correct directions"""
good_sequence = [
([0,0,0,0],1,[0,0,0,1]),
([0,0,0,1],0,[1,0,1,0]),
([1,0,1,0],1,[1,1,1,1]),
]
bad_sequence = [
([0,0,0,0],0,[1,0,0,1]),
([1,0,0,1],1,[0,0,1,0]),
([0,0,1,0],1,[0,1,1,1]),
]
def expand(r, final_reward):
results = []
for i,(state,action,new_state) in enumerate(r):
record = {
'state': np.array(state,'f'),
'new_state': np.array(new_state,'f'),
'action': action,
'done': i >= len(r),
'reward': final_reward
}
results.append(record)
assert results[-1]['reward'] == final_reward
return results
records = expand(good_sequence,1.0) + expand(bad_sequence,-1.0)
print(records)
records = records * 256
model = main.build_model(env)
main.train_model( model, records, env, batch_size=8)
for (state,action,new_state) in good_sequence:
prediction = main.predict(model,state)
assert np.argmax(prediction) == action, (state,action,prediction)
for (state,action,new_state) in bad_sequence:
prediction = main.predict(model,state)
assert np.argmax(prediction) != action, (state,action,prediction)
| true |
2cef1bf0fcb1be86be8ecd4366be73f155c21105 | Python | AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges | /Challenges/dailyTemperature.py | UTF-8 | 5,183 | 4.6875 | 5 | [] | no_license | """
Given a list of daily temperatures T, return a list such that, for each day in the input, tells you how many days you would have to wait until a warmer temperature. If there is no future day for which this is possible, put 0 instead.
For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].
Note: The length of temperatures will be in the range [1, 30000]. Each temperature will be an integer in the range [30, 100].
Solution
Intuition
The problem statement asks us to find the next occurrence of a warmer temperature. Because temperatures can only be in [30, 100], if the temperature right now is say, T[i] = 50, we only need to check for the next occurrence of 51, 52, ..., 100 and take the one that occurs soonest.
Algorithm
Let's process each i in reverse (decreasing order). At each T[i], to know when the next occurrence of say, temperature 100 is, we should just remember the last one we've seen, next[100].
Then, the first occurrence of a warmer value occurs at warmer_index, the minimum of next[T[i]+1], next[T[i]+2], ..., next[100].
Stack
Intuition
Consider trying to find the next warmer occurrence at T[i]. What information (about T[j] for j > i) must we remember?
Say we are trying to find T[0]. If we remembered T[10] = 50, knowing T[20] = 50 wouldn't help us, as any T[i] that has its next warmer ocurrence at T[20] would have it at T[10] instead. However, T[20] = 100 would help us, since if T[0] were 80, then T[20] might be its next warmest occurrence, while T[10] couldn't.
Thus, we should remember a list of indices representing a strictly increasing list of temperatures. For example, [10, 20, 30] corresponding to temperatures [50, 80, 100]. When we get a new temperature like T[i] = 90, we will have [5, 30] as our list of indices (corresponding to temperatures [90, 100]). The most basic structure that will satisfy our requirements is a stack, where the top of the stack is the first value in the list, and so on.
Algorithm
As in Approach #1, process indices i in descending order. We'll keep a stack of indices such that T[stack[-1]] < T[stack[-2]] < ..., where stack[-1] is the top of the stack, stack[-2] is second from the top, and so on; and where stack[-1] > stack[-2] > ...; and we will maintain this invariant as we process each temperature.
After, it is easy to know the next occurrence of a warmer temperature: it's simply the top index in the stack.
Here is a worked example of the contents of the stack as we work through T = [73, 74, 75, 71, 69, 72, 76, 73] in reverse order, at the end of the loop (after we add T[i]). For clarity, stack only contains indices i, but we will write the value of T[i] beside it in brackets, such as 0 (73).
When i = 7, stack = [7 (73)]. ans[i] = 0.
When i = 6, stack = [6 (76)]. ans[i] = 0.
When i = 5, stack = [5 (72), 6 (76)]. ans[i] = 1.
When i = 4, stack = [4 (69), 5 (72), 6 (76)]. ans[i] = 1.
When i = 3, stack = [3 (71), 5 (72), 6 (76)]. ans[i] = 2.
When i = 2, stack = [2 (75), 6 (76)]. ans[i] = 4.
When i = 1, stack = [1 (74), 2 (75), 6 (76)]. ans[i] = 1.
When i = 0, stack = [0 (73), 1 (74), 2 (75), 6 (76)]. ans[i] = 1.
Complexity Analysis
Time Complexity: O(N), where NN is the length of T and WW is the number of allowed values for T[i]. Each index gets pushed and popped at most once from the stack.
Space Complexity: O(W). The size of the stack is bounded as it represents strictly increasing temperatures.
"""
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
result = [0]*len(T)
stack = []
for i in range(len(T)-1, -1, -1):
while stack and T[i] >= T[stack[-1]]:
stack.pop()
if stack:
result[i] = stack[-1] - i
stack.append(i)
return result
"""
Given a list of daily temperatures T, return a list such that, for each day in the input, tells you how many days you would have to wait until a warmer temperature. If there is no future day for which this is possible, put 0 instead.
For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73], your output should be [1, 1, 4, 2, 1, 1, 0, 0].
Note: The length of temperatures will be in the range [1, 30000]. Each temperature will be an integer in the range [30, 100].
"""
"""
Stack Approach
Time: O(N) where NN is the length of T and WW is the number of allowed values for T[i]. Each index gets pushed and popped at most once from the stack.
Space: O(W). The size of the stack is bounded as it represents strictly increasing temperatures.
Intuition:
Remember a list of indices representing a strictly increasing list of temperatures.
"""
class Solution(object):
def dailyTemperatures(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
stack = []
if not T:
return []
res = [0] * len(T)
for i in range(len(T)-1, -1, -1):
while stack and T[i] >= T[stack[-1]]:
stack.pop()
if stack:
res[i] = stack[-1] - i
stack.append(i)
return res
| true |
d822e57d093836d58e777438d8589f4ab43a7c0e | Python | felixdittrich92/fastapi_parts | /part_1_Basics/05_inbound_data/services/openweather_service.py | UTF-8 | 2,067 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | from typing import Optional, Tuple
import httpx
from httpx import Response
from infrastructure import weather_cache
from models.validation_error import ValidationError
api_key: Optional[str] = None
async def get_report_async(city: str, state: Optional[str], country: str, units: str) -> dict:
city, state, country, units = validate_units(city, state, country, units)
if forecast := weather_cache.get_weather(city, state, country, units):
return forecast
if state:
q = f'{city},{state},{country}'
else:
q = f'{city},{country}'
url = f'https://api.openweathermap.org/data/2.5/weather?q={q}&appid={api_key}&units={units}'
async with httpx.AsyncClient() as client:
resp: Response = await client.get(url)
if resp.status_code != 200:
raise ValidationError(resp.text, status_code=resp.status_code)
data = resp.json()
forecast = data['main']
weather_cache.set_weather(city, state, country, units, forecast)
return forecast
def validate_units(city: str, state: Optional[str], country: Optional[str], units: str) -> \
Tuple[str, Optional[str], str, str]:
city = city.lower().strip()
if not country:
country = "us"
else:
country = country.lower().strip()
if len(country) != 2:
error = f"Invalid country: {country}. It must be a two letter abbreviation such as US or GB."
raise ValidationError(status_code=400, error_msg=error)
if state:
state = state.strip().lower()
if state and len(state) != 2:
error = f"Invalid state: {state}. It must be a two letter abbreviation such as CA or KS (use for US only)."
raise ValidationError(status_code=400, error_msg=error)
if units:
units = units.strip().lower()
valid_units = {'standard', 'metric', 'imperial'}
if units not in valid_units:
error = f"Invalid units '{units}', it must be one of {valid_units}."
raise ValidationError(status_code=400, error_msg=error)
return city, state, country, units
| true |
58a3ae1aa927bf70c3f87682ee90450c9cfb40c3 | Python | Sever80/Zadachi | /12.py | UTF-8 | 460 | 3.40625 | 3 | [] | no_license | # Создать множество.
# Создать неизменяемое множество.
# Выполнить операцию объединения созданных множеств.
# Выполнить операцию пересечения созданных множеств.
set_1= {3, 783, 'fine', 67, 3}
print(set_1)
frozenset1=frozenset([23,674,'line',783])
print(frozenset1)
print(set_1|frozenset1)
print(set_1&frozenset1)
| true |
81b6ec461dca1659629f5830bfc45274e665e82f | Python | dev-fahim/retail_app | /owners/products/api/utils.py | UTF-8 | 450 | 2.78125 | 3 | [] | no_license | import random
import time
def get_random_int_id():
now = time.localtime()
day = now.tm_mday
year = now.tm_year
month = now.tm_mon
hour = now.tm_hour
mini = now.tm_min
sec = now.tm_sec
string1 = f'{day}{year - mini - sec - month - hour}'
ran = random.random()
ran = str(ran)
ran = ran.split('.')
ran = ran[1]
str_ran = f'{ran[:5] + string1}'
int_id = int(str_ran)
return int_id - 50000000
| true |
11c424559555e17a28b9e6913ba81957702b9b78 | Python | CasAndreu/legex | /memberParser.py | UTF-8 | 2,351 | 2.890625 | 3 | [] | no_license | ##################
# Member Parser
##################
# Takes a .yaml file containing info for a MoC
# converts it into a congress-by-congress record and includes various ids and
# attributes about the person
# returns a dictionary, ready to be loaded into db
def memberParser(leg):
memb = []
if int(leg['terms'][-1]['start'][0:4]) > 1965: # skip all members who finished before 1973
print leg['name']['last']
if 'thomas' not in leg['id']:
leg['id']['thomas'] = 0
if 'icpsr' in leg['id']: # pull out basic member bio information
if 'gender' in leg['bio']:
m = {'icpsr':leg['id']['icpsr'],'thomas':leg['id']['thomas'],'govtrack':leg['id']['govtrack'],'first':leg['name']['first'],
'last':leg['name']['last'],'gender':leg['bio']['gender'] }
else:
m = {'icpsr':leg['id']['icpsr'],'thomas':leg['id']['thomas'],'govtrack':leg['id']['govtrack'],'first':leg['name']['first'],
'last':leg['name']['last'],'gender':'M' }
else:
if 'gender' in leg['bio']:
m = {'icpsr':0,'thomas':leg['id']['thomas'],'govtrack':leg['id']['govtrack'],'first':leg['name']['first'],
'last':leg['name']['last'],'gender':leg['bio']['gender'] }
else:
m = {'icpsr':0,'thomas':leg['id']['thomas'],'govtrack':leg['id']['govtrack'],'first':leg['name']['first'],
'last':leg['name']['last'],'gender':'M' }
for term in leg['terms']:
if term['type'] == 'rep':
if 'district' in term:
t = {'type': term['type'],'state':term['state'],'start':term['start'],'end':term['end'],
'district': term['district'],'class':'NA','party':term['party'],'cong': congPicker(term['start'])}
else:
t = {'type': term['type'],'state':term['state'],'start':term['start'],'end':term['end'],
'district': 0,'class':'NA','party':term['party'],'cong': congPicker(term['start'])}
if t['cong']>92:
tt = dict(m.items() + t.items())
memb.append(tt)
elif term['type'] == 'sen':
s = int(term['start'][0:4]) # starting year
e = int(term['end'][0:4]) # ending year
c = list(set([congPickerY(year) for year in range(s,e)]))
for no in c:
t = {'type': term['type'],'state':term['state'],'start':term['start'],'end':term['end'],
'district': 0,'class':term['class'],'party':term['party'],'cong':no}
if t['cong']>92:
tt = dict(m.items() + t.items())
memb.append(tt)
return(memb)
| true |
2b4b10c628e1da1c67ed583d880ca96db55324f2 | Python | nishanthrs/WallStreetBetTendies | /tendies/analytics/get_moving_volatility.py | UTF-8 | 922 | 2.53125 | 3 | [] | no_license | import collections
import datetime
import json
import requests
from string import printable
import sys
import urllib
sys.path.append('..')
import libs.db_helpers as db_helpers
def get_moving_volatility_res(stock_symbol, start_date, end_date):
conn = db_helpers.connect_to_postgres()
cur = conn.cursor()
cur.callproc('calVolatility2', ['1999-11-01 00:00:00', '2001-12-07 00:00:00'])
conn.commit()
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
res = cur.fetchall()
final_res = []
for row in res:
row = row[0]
curr_stock_symbol = row['stock_symbol']
timestamp = datetime.datetime.strptime(row['ts'], '%Y-%m-%d %H:%M:%S')
if curr_stock_symbol == stock_symbol and timestamp >= start_date and timestamp <= end_date:
final_res.append(row)
return final_res
| true |
c262f274e61373607654e1d36d53c960e1d15969 | Python | MethodJiao/VsProjectRename | /change_word.py | UTF-8 | 2,265 | 3.40625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@Author : JiaoJingWei
@File : change_word.py
@Time : 2020/5/19 9:54 上午
@desc :
"""
import os
import chardet
def iter_files(old_kw, new_kw, root_dir):
"""
遍历根目录
:param old_kw: 旧词
:param new_kw: 新词
:param root_dir: 目录的绝对路径
:return:
"""
for root, dirs, files in os.walk(root_dir, topdown=False):
# 先遍历最内层,逐步向上
for file_name in files:
old_file_path = os.path.join(root, file_name)
file_data = ""
# 读该文件编码格式
with open(old_file_path, 'rb')as file:
curr_encode = chardet.detect(file.read())['encoding']
# 如果被替换的字符串在文件内容中,先按行读出来,在替换
with open(old_file_path, 'r', encoding=curr_encode, errors='ignore') as f:
for line in f.readlines():
new_line = line.replace(old_kw, new_kw)
file_data += new_line
with open(old_file_path, 'w', encoding=curr_encode, errors='ignore') as f:
f.write(file_data)
# 如果被替换的字符串在文件的名中,则替换成新的
if old_kw in file_name:
new_file_name = file_name.replace(old_kw, new_kw)
new_file_path = os.path.join(root, new_file_name)
os.rename(old_file_path, new_file_path)
for dir_name in dirs:
old_dir_path = os.path.join(root, dir_name)
# 如果被替换的字符串在文件夹的名中,则替换成新的
if old_kw in dir_name:
new_dir_name = dir_name.replace(old_kw, new_kw)
new_dir_path = os.path.join(root, new_dir_name)
os.rename(old_dir_path, new_dir_path)
def run():
print('输入项目文件夹绝对路径《会修改此文件夹层级之下所有匹配关键字》')
path = input()
path = path.replace('\\', '/')
print('输入原项目名称')
old_project_name = input()
print('输入新项目名称')
new_project_name = input()
iter_files(old_project_name, new_project_name, path)
if __name__ == '__main__':
run()
| true |
283a47abba6a16b9bb577cf2ed41a7e12da47960 | Python | oisincar/cnp-5 | /cnp/util.py | UTF-8 | 1,251 | 3.1875 | 3 | [] | no_license | import math
import cmath
def dist(c1, c2):
"""
Distance in the complex plane
"""
c = c2 - c1
return math.sqrt(c.imag*c.imag + c.real*c.real)
def allPairs(lst):
"""
All pairs of a list.
"""
if len(lst) <= 1:
return []
h, *t = lst
return [(h,x) for x in t] + allPairs(t)
def allSumPairs(lst):
"""
Sums of all pairs in a list.
"""
return [a+b for (a,b) in allPairs(lst)]
def allProd(ws):
"""
Products of all subsets of a list.
"""
if len(ws) == 0:
return []
h, *t = ws
rec = allProd(t)
return [h] + rec + [h * k for k in rec]
def cont(pp, lst, approx=True):
"""
Check if a point is contained in a list (naively)
Set approx=False to check sympy values simplify to exactly 0.
"""
a,aV = pp
for (b, bV) in lst:
if aV == bV:
return True
if dist(aV, bV) < 0.000000001:
# To speed up code... Disable exact checking.
# Check formulas simplify to exactly 0
if approx or sp.simplify(sp.Abs(a-b)) == 0:
return True
return False
def csToPnts(cs):
"""
Convert complex to coord
"""
return [(c.real, c.imag) for c in cs]
| true |
c0a0d366b032754e0e4b5b86a7f2e75bed46ed9a | Python | christian-mann/My-Little-Tower-Defense | /Pony.py | UTF-8 | 325 | 2.75 | 3 | [] | no_license |
import pygame
class Pony(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.ghost = False
self.cost = self.__class__.cost
def makeGhost(self):
self.ghost = True
def makeNotGhost(self):
self.ghost = False
| true |
e7ec8cddf40fed874de77830634526ad789646be | Python | BlazZupan/uozp-zapiski | /gradivo/gradient-descent-one.py | UTF-8 | 705 | 3.75 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def J(x):
return (x - 6.5)**2 + 3
def derivative(f, x, eps=1e-3):
return (f(x+eps) - f(x-eps)) / (2*eps)
def find_min(f, x0=0, alpha=0.1, cond=1e-3, verbose=True):
"""return min by gradient descent"""
x = x0
i = 0
while True:
i += 1
x_new = x - alpha * derivative(f, x)
delta = abs(x_new - x)
if delta < cond:
break
x = x_new
if verbose:
print(f"Iterations {i}")
return x_new
# plot a function
a = np.arange(0, 10, 0.1)
y = J(a)
plt.plot(a, y, "k-")
plt.savefig("f.pdf")
# find a minimum by gradient descent
theta = find_min(J, cond=1e-3)
print(theta)
| true |
c90943ac4f8bf4e7690fdd9fdc963f521111eaa9 | Python | getterk96/python-spider | /getPic.py | UTF-8 | 3,054 | 2.71875 | 3 | [] | no_license | # coding=utf-8
import urllib
import urllib2
import re
import sys
import os
import requests
from pyquery import PyQuery as Pq
class PicSpider(object):
def __init__(self, searchText):
self.url = "http://www.baidu.com/baidu?wd=%s&tn=monline_4_dg" % searchText
self.headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/600.5.17 (KHTML, like Gecko) Version/8.0.5 Safari/600.5.17"}
self._page = None
reload(sys)
sys.setdefaultencoding('utf8')
def baiduSearch(self):
if not self._page:
r = requests.get(self.url, headers=self.headers)
self._page = Pq(r.text)
return [site.attr('href') for site in self._page('div.result.c-container h3.t a').items()]
def getUrls(self):
tmpUrls = self.baiduSearch()
self.siteUrls = []
for tmpUrl in tmpUrls:
tmpPage = requests.get(tmpUrl, allow_redirects=False)
if tmpPage.status_code == 200:
urlMatch = re.search(r'URL=\'(.*?)\'', tmpPage.text.encode('utf-8'), re.S)
self.siteUrls.append(urlMatch.group(1))
elif tmpPage.status_code == 302:
self.siteUrls.append(tmpPage.headers.get('location'))
else:
print('An invalid url in search result.')
def getContents(self,siteUrl):
response = requests.get(siteUrl, headers=self.headers)
response.encoding = 'utf-8'
pattern = re.compile(r'<img src="(.*?)"',re.S)
items = re.findall(pattern,response.text)
contents = []
for item in items:
contents.append(item)
return contents
def saveImgs(self,images,name):
number = 1
print u"发现",name,u"共有",len(images),u"张图片"
for imageURL in images:
splitPath = imageURL.split('.')
fileName = name + "/" + str(number) + "." + splitPath[-1]
self.saveImg(imageURL,fileName)
number += 1
def saveImg(self,imageUrl,fileName):
if re.match(r'^https?:/{2}\w.+$', imageUrl):
req = urllib2.Request(imageUrl)
req.add_header("User-Agent",self.headers["User-Agent"])
req.add_header("Referer", imageUrl)
try:
u = urllib2.urlopen(req)
except:
return
data = u.read()
f = open(fileName, 'wb')
f.write(data)
f.close()
print(imageUrl+' ok.')
def mkdir(self,path):
isExists=os.path.exists(path)
if not isExists:
print u"为\"",path,u"\"新建文件夹:"
os.makedirs(path)
def getPics(self):
self.getUrls()
for siteUrl in self.siteUrls:
print(siteUrl+":")
contents = self.getContents(siteUrl)
self.mkdir(siteUrl.strip()[8:-1])
self.saveImgs(contents,siteUrl.strip()[8:-1])
sys.setdefaultencoding('ascii')
s=PicSpider("")
s.getPics()
| true |
c128d7a056b137474ec08625c3ed2b1f208d42b7 | Python | garrettdieckmann/timekeepingapi | /hourglass.py | UTF-8 | 5,209 | 2.640625 | 3 | [] | no_license | # Flask imports
from flask import Flask, request, render_template
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.security import check_password_hash, generate_password_hash
from functools import wraps
# Database imports
# DB creation
from db.createdevdb import Database_creation
# SQL tables
from db.createdevdb import User, Customer_Information, Category, Tag, Category_Tag, Time_Event, Time_Tag
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# JSON import
from flask import json, jsonify
# Create the database engine
dbc = Database_creation()
engine = create_engine(dbc.create_engine_string(), echo=True)
app = Flask(__name__)
### BASIC SITE ###
# Index/home page
@app.route('/')
def index():
return render_template('index.html')
### END BASIC SITE ###
### BEGIN AUTH SECTION ###
# Authorization - source: http://blog.luisrei.com/articles/flaskrest.html
def check_auth(username, password):
# Check if the user is in the database
Session = sessionmaker(bind=engine)
session = Session()
for user in session.query(User).filter_by(user_name=username):
return (user.user_name == username and check_password_hash(user.password, password))
# Return error for bad authentication or bad authorization
def auth_failure(failure_type):
if failure_type == 'Authorization':
message = {'message': "Requires Authorization"}
if failure_type == 'Authenticate':
message = {'message': "Requires Authentication"}
resp = jsonify(message)
resp.status_code = 401
return resp
# Basic admin validation
def requires_admin(username, password):
return (username == 'admin' and password == 'secret')
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
# No parameters passed in
if not auth:
return auth_failure(Authenicate)
# Check if user passed in valid credentials
elif not check_auth(auth.username, auth.password):
return auth_failure(Authenticate)
return f(*args, **kwargs)
return decorated
# Check if user is requesting their own data
def check_privs(userid, username):
Session = sessionmaker(bind=engine)
session = Session()
for user in session.query(User).filter_by(user_name=username):
return (user.user_id == int(userid))
### END AUTH SECTION ###
### BEGIN API ROUTES ###
# Get all users in the database
@app.route('/api/users/', methods = ['GET'])
@requires_auth
def db_users():
Session = sessionmaker(bind=engine)
session = Session()
builder = []
for user in session.query(Customer_Information).all():
data = {'firstname':user.first_name,
'lastname':user.last_name}
builder.append(data)
return jsonify(users=builder)
# Show Categories for a user
@app.route('/api/category/user/<userid>/', methods = ['GET'])
@requires_auth
def db_user_categories(userid):
# Check if user is accessing their own data
if check_privs(userid, request.authorization.username):
Session = sessionmaker(bind=engine)
session = Session()
builder = []
# All categories for a specific user
for category in session.query(Category).filter_by(user_id=userid):
data = {'category_name':category.category_name,
'category_description':category.category_description}
builder.append(data)
return jsonify(categories=builder)
# User doesnt have correct privileges
return auth_failure(Authorize)
# Create user/password combo
@app.route('/api/user/login/', methods = ['POST'])
def db_user_pw():
#TODO: Create this check as a decorator
auth = request.authorization
if not auth:
auth_failure(Authenticate)
else:
if requires_admin(auth.username, auth.password):
# Auth successful - Insert user into DB
Session = sessionmaker(bind=engine)
session = Session()
# Mimic'ing a FORM
new_username = request.form['username']
new_pass = request.form['password']
# Create the user object
new_user = User(new_username, generate_password_hash(new_pass))
# Save new user to the database
session.add(new_user)
session.flush()
session.commit()
# Creation successful
resp = jsonify({'creation': "Successful"})
resp.status_code = 200
return resp
else:
auth_failure(Authorize)
# User specifics
@app.route('/api/user/<userid>/', methods = ['GET'])
@requires_auth
def db_user(userid):
if check_privs(userid, request.authorization.username):
Session = sessionmaker(bind=engine)
session = Session()
# Using 'get' because only fetching 1 row for a specific user
user = session.query(Customer_Information).get(userid)
# Check if anything returned, if not 404
if(user):
return jsonify(user={'firstname':user.first_name, 'lastname':user.last_name, 'age':user.age, 'email':user.email, 'city':user.city, 'state':user.state, 'joined':str(user.date_joined)})
else:
return not_found()
return auth_failure(Authorize)
### END API ROUTES ###
# Internal 404 to API
@app.errorhandler(404)
def not_found(error=None):
message = {
'status' : 404,
'message' : 'Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
app.run()
| true |
ab75f39c57de5a18f5482d3ec32628096d0b3229 | Python | mnk343/Automate-the-Boring-Stuff-with-Python | /partI/ch1/hello.py | UTF-8 | 228 | 4.1875 | 4 | [] | no_license | print("Hello World!!")
print("Whats your name?")
name = input()
print("Good to meet you {0}".format(name))
print(len(name))
print("Your age?")
age = input()
print("You will be {:d} years old next year ".format(int(age)+1)) | true |
98b86aeef50913ac9ccd0c8dff8396d87ae4c98f | Python | JIEunnnnn/Backjoon_Notes | /정렬/좌표압축.py | UTF-8 | 750 | 3.453125 | 3 | [] | no_license | #백준18870 좌표압축
#입력값에 따른, 본인보다 작은 수 개수 구하기
#
#시간초과 발생이유 = 이중for문... 딕셔너리 활용하자!
#
num = input()
list_num = list(map(int, input().split()))
sort_list = sorted(set(list_num))
answer = {x : y for y, x in enumerate(sort_list)}
#print(answer)
for i in list_num :
print(answer[i], end=" ") # \n 대신 다른것으로 출력!!
=========================================
#1차시도 시간초과발생
num = input()
list_num = list(map(int, input().split()))
sort_list = sorted(set(list_num))
answer = []
for i in list_num :
for idx, j in enumerate(sort_list) :
if j >= i :
answer.append(str(idx))
break
print(' '.join(answer))
| true |
7f144c5b1e49b4164efd1af9c008b2727a074451 | Python | rajansaini691/gtzan-beat-tracking | /clean_data.py | UTF-8 | 3,043 | 2.609375 | 3 | [] | no_license | """
Parse the json beat annotation records and convert to tfrecord
"""
from pathlib import Path
import os
from scipy.io import wavfile
import numpy as np
import json
import tensorflow as tf
from cfg import *
# FIXME Need to refactor into proper functions and stuff
Path(numpy_annotations_root).mkdir(exist_ok=True, parents=True)
def json_to_output_vector(json_file_path, wav_file_path):
fs, audio_data = wavfile.read(wav_file_path)
# Number of samples after resampling to required SAMPLE_RATE
num_samples = int(SAMPLE_RATE / fs * len(audio_data))
# Input vector length
num_fourier_samples = int(num_samples / SAMPLE_SIZE)
# Ground truth needs same length as input, since we're doing seq2seq
output_annotations = np.zeros(num_fourier_samples, dtype=np.uint8)
with open(json_file_path) as f:
json_data = json.load(f)
# There should also be a downbeat annotation type
# TODO Refactor to use filter
for annotation in json_data['annotations']:
if annotation['sandbox']['annotation_type'] == 'beat':
for datapoint in annotation['data']:
seconds_from_start = float(datapoint['time'])
samples_from_start = seconds_from_start * SAMPLE_RATE
fourier_samples_from_start = int(samples_from_start / SAMPLE_SIZE)
if fourier_samples_from_start < len(output_annotations):
output_annotations[fourier_samples_from_start] = 1
if fourier_samples_from_start < len(output_annotations) - 1:
output_annotations[fourier_samples_from_start + 1] = 0.5
if fourier_samples_from_start > 0 \
and fourier_samples_from_start < len(output_annotations):
output_annotations[fourier_samples_from_start - 1] = 0.5
return output_annotations
raise("We shouldn't have reached this point")
def update_metadata(metadata, beat_annotation_array):
metadata['max_sequence_length'] = \
max(metadata['max_sequence_length'], len(beat_annotation_array))
classes, current_class_counts = np.unique(beat_annotation_array, return_counts=True)
metadata['class_count'][0] += int(current_class_counts[0])
metadata['class_count'][1] += int(current_class_counts[1])
metadata = { "max_sequence_length": 0, "class_count": {0: 0, 1: 0} }
for root, dirs, files in os.walk(wav_data_root):
for f in files:
wav_file_path = os.path.join(root, f)
json_file_path = os.path.join(json_annotations_root, f + ".jams")
annotation_path = os.path.join(numpy_annotations_root, f + ".npy")
beat_annotation_array = json_to_output_vector(json_file_path, wav_file_path)
update_metadata(metadata, beat_annotation_array)
np.save(annotation_path, beat_annotation_array)
print(wav_file_path)
# TODO Put schema into cfg.py
with open(dataset_metadata_path, 'w') as f:
json.dump(metadata, f)
| true |
d0627501d59f82c862009f6cc68f08e3caeb6a0a | Python | OceanMetSEPA/extractfromeft | /EFT_Tests/prepareToExtract.py | UTF-8 | 1,923 | 2.59375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Tests for the extractEFT programmes.
Created on Mon Apr 23 10:55:37 2018
@author: edward.barratt
"""
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
import unittest
from extractfromeft import EFT_Tools as tools
inputDir = os.path.normpath('C:/Users/edward.barratt/Documents/Development/Python/extractfromeft/input')
inputFiles = {6: 'EFT2014_v6.0.2_empty.xls', 7: 'EFT2016_v7.0_empty.xlsb', 8: 'EFT2017_v8.0_emptyAlternativeTech.xlsb'}
class extractVersion_TestCases(unittest.TestCase):
def test_finds_correct_versions(self):
for v, fi in inputFiles.items():
fpath = os.path.join(inputDir, fi)
vr, vro = tools.extractVersion(fpath, verbose=False)
# Version numbers should be identical and correct.
self.assertEqual(v, vr)
self.assertEqual(v, vro)
def test_fails_with_nonsense_in(self):
self.assertRaises(ValueError, tools.extractVersion, 'ThisIsNotAFileThisIsNotAFileThisIsNot', verbose=False)
def test_assumes_most_recent(self):
vr, vro = tools.extractVersion('ThisIsNotAFileThisIsNotAFileThisIsNot', checkExist=False, verbose=False, availableVersions=[6.0, 7.0, 7.4, 8.0])
self.assertEqual(8.0, vr)
self.assertEqual('Unknown Version as 8.0', vro)
class prepareToExtract_TestCases(unittest.TestCase):
def test_returns_correct(self):
for v, fi in inputFiles.items():
fpath = os.path.join(inputDir, fi)
ahk_ahkpathGot, fileNames, versionNos, versionForOutputs = tools.prepareToExtract(fpath, verbose=False)
# ahk path should exist.
self.assertTrue(os.path.exists(ahk_ahkpathGot))
# should be only one filename, and it should exist.
self.assertEqual(len(fileNames), 1)
self.assertTrue(os.path.exists(fileNames[0]))
#class splitSourceNameTestCases(unittest.TestCase):
# testname = '54 - Car -
#
# def test_return
if __name__ == '__main__':
unittest.main() | true |
fdc8f4263f5900924dd9794df7f0e182f40f8d5b | Python | WFRT/Comps | /graphics/files/VerifData.py | UTF-8 | 7,648 | 2.75 | 3 | [] | no_license | import numpy as np
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter, DayLocator, HourLocator, WeekdayLocator
from matplotlib.ticker import ScalarFormatter
import matplotlib.pyplot as mpl
import sys
import Common
# Wrapper on file to only return a subset of the data
class Data:
def __init__(self, file, offsets=None, locations=None, dates=None, clim=None, by="offset"):
self.file = file
self.offsets = offsets
self.locations = locations
self.clim = clim
self.by = by
if(self.by != "offset" and self.by != "date" and self.by != "location" and self.by != "locationId" and self.by != "locationElev" and self.by != "locationLat" and self.by != "locationLon" and self.by != "threshold"):
print "Invalid '-x' option"
sys.exit(1)
allDates = self.file.getDates()
if(dates == None):
self.dateIndices = range(0,len(allDates))
else:
self.dateIndices = np.in1d(allDates, dates)
if(clim != None):
climDates = self.clim.getDates()
self.climIndices = np.in1d(climDates, dates);
# Return the recommended x-values (if you want to abide by the user-requested dimension)
def getX(self):
if(self.by == "offset"):
return self.getOffsets()
elif(self.by == "date"):
return Common.convertDates(self.getDates())
elif(self.by == "location"):
return np.array(range(0, len(self.getLocations())))
elif(self.by == "locationId"):
return self.getLocations()
elif(self.by == "locationElev"):
return self.getElevs()
elif(self.by == "locationLat"):
return self.getLats()
elif(self.by == "locationLon"):
return self.getLons()
else:
print "Invalid 'by' option in Data"
sys.exit(1)
def getXHeader(self):
if(self.by == "offset"):
return "Offset (h)"
elif(self.by == "date"):
return "Date"
elif(self.by == "location" or self.by == "locationId" or self.by == "locationElev" or self.by == "locationLat" or self.by == "locationLon"):
return "%6s %5s %5s %5s" % ("id", "lat", "lon", "elev")
else:
print "Invalid 'by' option in Data"
sys.exit(1)
# Get human readable x-values
def getXHuman(self):
if(self.by == "offset"):
return self.getOffsets()
elif(self.by == "date"):
return self.getDates()
elif(self.by == "location" or self.by == "locationId" or self.by == "locationElev" or self.by == "locationLat" or self.by == "locationLon"):
lats = self.getLats()
lons = self.getLons()
elevs = self.getElevs()
ids = self.getLocations()
x = list()
for i in range(0, len(ids)):
x.append("%6d %5.2f %5.2f %5.0f" % (ids[i], lats[i], lons[i], elevs[i]))
return x
else:
print "Invalid 'by' option in Data"
sys.exit(1)
# Return the recommended y-values (if you want to abide by the user-requested dimension)
def getY(self, metric):
values = self.getScores(metric)
mvalues = np.ma.masked_array(values,np.isnan(values))
mvalues.set_fill_value(np.nan)
N = mvalues.count()
if(self.by == "offset"):
N = np.ma.sum(mvalues.count(axis=2), axis=0)
r = np.ma.sum(np.ma.sum(mvalues,axis=2), axis=0)/N
elif(self.by == "date"):
N = np.ma.sum(mvalues.count(axis=2), axis=1)
r = np.ma.sum(np.ma.sum(mvalues,axis=2), axis=1)/N
elif(self.by == "location" or self.by == "locationId" or self.by == "locationElev" or self.by == "locationLat" or self.by == "locationLon"):
N = np.ma.sum(mvalues.count(axis=1), axis=0)
r = np.ma.sum(np.ma.sum(mvalues,axis=1), axis=0)/N
else:
print "Invalid 'by' option in Data"
sys.exit(1)
return np.ma.filled(r, np.nan)
def getXFormatter(self):
if(self.by == "date"):
return DateFormatter('\n%Y-%m-%d')
else:
return ScalarFormatter()
def getXLabel(self):
if(self.by == "offset"):
return "Offset (h)"
else:
return self.by.capitalize()
def getByAxis(self):
if(self.by == "offset"):
return 1
elif(self.by == "date"):
return 0
elif(self.by == "location" or self.by == "locationId" or self.by == "locationElev" or self.by == "locationLat" or self.by == "locationLon"):
return 2
else:
pass
#print "Invalid 'by' option in Data (2)"
#sys.exit(1)
def getLength(self):
if(self.by == "offset"):
return len(self.getOffsets())
elif(self.by == "date"):
return len(self.getDates())
elif(self.by == "location" or self.by == "locationId" or self.by == "locationElev" or self.by == "locationLat" or self.by == "locationLon"):
return len(self.getLocations())
else:
return 1
def getDates(self):
dates = self.file.getDates()
return dates[self.dateIndices]
def getLocations(self):
locations = self.file.getLocations()
if(self.locations != None):
locations = locations[self.locations]
return locations
def getLats(self):
lats = self.file.getLats()
return lats
def getLons(self):
lons = self.file.getLons()
return lons
def getElevs(self):
elevs = self.file.getElevs()
return elevs
def hasScore(self, metric):
return self.file.hasScore(metric)
def getUnitsString(self):
units = self.file.getUnits()
if(units == ""):
return ""
return "(" + units + ")"
def getScores(self, metrics):
data = self.file.getScores(metrics)
data = data[self.dateIndices,:,:]
if(self.clim != None and (metrics == "fcst" or metrics == "obs")):
clim = self.clim.getScores("fcst")
data = data - clim[self.climIndices,:,:]
if(self.locations is not None):
data = data[:,:,self.locations]
if(self.offsets is not None):
data = data[:,self.offsets,:]
return data
def getClimScores(self, metrics):
if(self.clim == None):
Common.error("Climatology file not specified");
return data
def getFlatScores(self, metrics):
if(not isinstance(metrics, list)):
data = self.getScores(metrics)
data = data.flatten()
mask = np.where(np.isnan(data) == 0)
data = data[mask]
return data
else:
data = list()
curr = self.getScores(metrics[0]).flatten()
data.append(curr)
I = np.where(np.isnan(data[0]) == 0)[0]
for i in range(1, len(metrics)):
curr = self.getScores(metrics[i]).flatten()
data.append(curr)
Icurr = np.where(np.isnan(curr) == 0)[0]
I = np.intersect1d(I, Icurr)
for i in range(0, len(metrics)):
data[i] = data[i][I]
return data
def getOffsets(self):
if(self.offsets == None):
return self.file.getOffsets()
else:
return self.offsets
def getFilename(self):
return self.file.getFilename()
def getUnits(self):
return self.file.getUnits()
def getPvar(self, threshold):
minus = ""
if(threshold < 0):
# Negative thresholds
minus = "m"
if(abs(threshold - int(threshold)) > 0.01):
var = "p" + minus + str(abs(threshold)).replace(".", "")
else:
var = "p" + minus + str(int(abs(threshold)))
return var
def getVariable(self):
return self.file.getVariable()
| true |
d75c4ef7c634352a1727b8c10bb7ce3f08c78cb6 | Python | nxm0206/PCL_codes | /OPA_simulation/PSO_sparse_array.py | UTF-8 | 1,840 | 2.53125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import random
from scipy import signal
from OneDCalculation import array_factor, sparse_array_sidepeak, array_plot
from PSO import PSO
def fitness_func(xn_sparse):
resolution = 10000
wav = 1.55
xn_sparse = np.asarray(xn_sparse)
An_sparse = np.ones_like(xn_sparse)
varphin_sparse = np.zeros_like(xn_sparse)
phi_sparse, p_sparse = array_factor(xn_sparse, varphin_sparse, An_sparse, wav, resolution)
max_peak_angle, max_peak_power = sparse_array_sidepeak(p_sparse, phi_sparse)
return max_peak_power
if __name__ == '__main__':
N_sparse = 64
ArraySize = 256.0*3.0
average_gap = ArraySize/N_sparse
xn_sparse = np.zeros(N_sparse)
min_gap = 4
xn_sparse[-1] = ArraySize
xn = np.linspace(0.0, ArraySize, N_sparse)
initial = xn + np.random.rand(N_sparse)
for i in range(int(N_sparse/2-1)):
xn_sparse[i+1] = xn_sparse[i]+(min_gap + np.random.rand() * (average_gap-min_gap)*2.0)
for i in range(int(N_sparse/2-1)):
xn_sparse[N_sparse-i-2] = xn_sparse[N_sparse-i-1]-(min_gap + np.random.rand() * (average_gap-min_gap)*2.0)
xn_sparse = np.around(xn_sparse, decimals=1)
# initial = [x1,x2...]
# bounds = [(x1_min,x1_max),(x2_min,x2_max)...]
N_p = 50
Maxiter = 200
best_xn_sparse = np.asarray(PSO(fitness=fitness_func, x0=xn_sparse, bounds=min_gap, num_particles=N_p, maxiter=Maxiter).run_PSO())
resolution = 10000
wav = 1.55
An_sparse = np.ones_like(best_xn_sparse)
varphin_sparse = np.zeros_like(best_xn_sparse)
best_phi_sparse, best_p_sparse = array_factor(best_xn_sparse, varphin_sparse, An_sparse, wav, resolution)
best_p_sparse_log = 10 * np.log10(best_p_sparse)
array_plot(best_phi_sparse, best_p_sparse_log)
plt.ylim([-15, 0])
plt.show()
| true |
520f12f7d5b0a91e95d7851dbe2c1ca3dfd632e4 | Python | harshit987/captcha_breaker | /rotate.py | UTF-8 | 2,402 | 2.765625 | 3 | [] | no_license | import cv2
import numpy as np
from scipy import ndimage
def rotateImage(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(
image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_REPLICATE)
return result
def rotateAndSave(image, angle, name):
rot_img = rotateImage(image, angle)
# rot_img = image
gray_img = cv2.cvtColor(rot_img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_img, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh,
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
l = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
l.append((x, y, w, h))
# x, y, w, h = cv2.boundingRect(cnt)
# rect = cv2.minAreaRect(cnt)
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# cv2.drawContours(rot_img, [box], 0, (0, 0, 255), 2)
# cv2.imshow('after rotation',rot_img)
x, y, w, h = l[1]
# cv2.rectangle(rot_img, (x-5, y-5), (x+w+5, y+h+5), (255, 255, 255), 3)
new_img = rot_img[y-5:y+h+5, x-5: x+w+5]
resized_image = cv2.resize(new_img,(int(100),int(100)))
cv2.imwrite('RotatedData/' + name + ".jpg", resized_image)
# fl = []
# for item1 in l:
# flag = 0
# for item2 in l:
# if ((item1[0] > item2[0]) and ((item1[0] + item1[2]) < (item2[0] + item2[2])) and (item1[1] > item2[1]) and ((item1[1] + item1[3]) < (item2[1] + item2[3]))):
# flag = 1
# break
# if flag == 0:
# fl.append(item1)
# x, y, w, h = item1
# cv2.rectangle(rot_img, (x-5, y-5), (x+w+5, y+h+5), (0, 255, 0), 3)
# cv2.imwrite('RotatedData/' + name + ".jpg", rot_img[y-5:y+h+5, x-5: x+w+5])
# img = cv2.imread('reference/B.png')
angles = [-30, -20, -10, 0, 10, 20, 30]
for code in range(ord('A'), ord('Z') + 1):
img = cv2.imread('reference/' + chr(code) + '.png')
i=0
for ang in angles:
i=i+1
rotateAndSave(img, ang, chr(code) + str(i))
#rotation angle in degree
# rotated = ndimage.rotate(img, 45)
# rotateAndSave(img, 40, 'a new')
# cv2.rectangle(rot_img, (x-5, y-5), (x+w+10, y+h+10), (255,0, 0), 3)
# print(fl)
# cv2.imshow('before rotation', img)
# cv2.imshow('after rotation', rotated)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
13bc7cf032aa6332006f5dd03d3fac4ba91c02e3 | Python | ericsolis8/curso_Python | /holamundo.py | UTF-8 | 70 | 2.875 | 3 | [] | no_license | def holamundo(nombre):
print ('Hola: '+nombre)
holamundo('erick') | true |
9d833fee5a01f2d6f217fa5ef2b12f8df0b62be6 | Python | nlesc-nano/Nano-Utils | /nanoutils/testing_utils.py | UTF-8 | 3,232 | 3 | 3 | [
"Apache-2.0"
] | permissive | """Utility functions related to unit-testing.
Index
-----
.. currentmodule:: nanoutils
.. autosummary::
{autosummary}
API
---
{autofunction}
"""
from __future__ import annotations
import os
import shutil
import warnings
from typing import TypeVar, Callable, Any, AnyStr, Tuple, cast
from os.path import isdir, isfile, join
from functools import wraps
from .typing_utils import PathType
from .utils import construct_api_doc
__all__ = ['FileNotFoundWarning', 'delete_finally']
_FT = TypeVar('_FT', bound=Callable[..., Any])
class FileNotFoundWarning(ResourceWarning):
"""A :exc:`ResourceWarning` subclass for when a file or directory is requested but doesn’t exist.""" # noqa: E501
def _delete_finally(path: PathType, warn: bool = True) -> None:
"""Helper function for :func:`delete_finally`."""
try:
if isdir(path):
shutil.rmtree(path)
elif isfile(path):
os.remove(path)
elif warn:
_warning = FileNotFoundWarning(f'No such file or directory: {path!r}')
warnings.warn(_warning, stacklevel=3)
# In case an unexpected exception is encountered
except Exception as ex:
_warning2 = RuntimeWarning(str(ex))
_warning2.__cause__ = ex
warnings.warn(_warning2, stacklevel=3)
def delete_finally(
*paths: AnyStr | os.PathLike[AnyStr],
prefix: None | AnyStr = None,
warn: bool = True,
) -> Callable[[_FT], _FT]:
r"""A decorater which deletes the specified files and/or directories after calling the deocrated function.
Examples
--------
.. code:: python
>>> import os
>>> from nanoutils import delete_finally
>>> file1 = 'file1.txt'
>>> dir1 = 'dir1/'
>>> os.path.isfile(file1) and os.path.isdir(dir1) # doctest: +SKIP
True
>>> @delete_finally(file1, dir1)
... def func():
... pass
>>> func() # doctest: +SKIP
>>> os.path.isfile(file1) or os.path.isdir(dir1) # doctest: +SKIP
False
Parameters
----------
\*paths : :class:`str`, :class:`bytes` or :class:`os.PathLike`
Path-like objects with the names of to-be deleted files and/or directories.
prefix : :class:`str`, :class:`bytes` or :class:`os.PathLike`, optional
The directory where all user specified **paths** are located.
If :data:`None`, asume the files/directories are either absolute or
located in the current working directory.
warn : :class:`bool`
If :data:`True` issue a :exc:`~nanoutils.FileNotFoundWarning` if
a to-be deleted file or directory cannot be found.
""" # noqa: E501
if prefix is not None:
_PATH_TUP: Tuple[PathType, ...] = tuple(join(prefix, path) for path in paths)
else:
_PATH_TUP = paths
def decorator(func: _FT) -> _FT:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
finally:
for path in _PATH_TUP:
_delete_finally(path, warn)
return cast(_FT, wrapper)
return decorator
__doc__ = construct_api_doc(globals(), decorators={'delete_finally'})
| true |
e5b61c6fd2e8f62bdc9784cfb70f6335ac4ebbbd | Python | prashravoor/cfis-sql-injection | /create_db.py | UTF-8 | 1,667 | 3.453125 | 3 | [] | no_license | import sqlite3
from sqlite3 import Error
def create_db(db_file='db/users.db'):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def create_table(conn, db_name):
try:
c = conn.cursor()
c.execute(
'''
CREATE TABLE users
(
userid INTEGER PRIMARY KEY AUTOINCREMENT,
user TEXT NOT NULL,
password VARCHAR(32) NOT NULL
);
'''
)
c.close()
except Error as e:
print(e)
def insert_user(conn, username, password):
try:
c = conn.cursor()
q = "INSERT INTO users (user,password) VALUES ('{}','{}')".format(
username, password)
print(q)
c.execute(q)
print('User {} added!'.format(username))
conn.commit()
c.close()
except Error as e:
print('Error, user may already exist, or invalid username / password: {}'.format(e))
def read_entry():
u = input('Enter a username: ')
p = input('Enter a password: ')
return (u, p)
if __name__ == '__main__':
database = 'db/users.db'
conn = create_db(database)
create_table(conn, 'users')
# insert_user(conn, 'user', 'password')
u, p = read_entry()
insert_user(conn, u, p)
rows = conn.cursor().execute('SELECT * from users').fetchall()
for r in rows:
print(r)
conn.commit()
conn.close()
| true |
1ee5cc3ee28d3f45c8eb916bfa42f019bdbd3c4e | Python | leonardoalvesprodepa/python_para_zumbis | /Lista de Exercicios/Lista_de_Exercicios_2/exercicio6.py | UTF-8 | 511 | 3.375 | 3 | [] | no_license | # -*- coding:utf-8 -*-
valor_hora = int(input("Insira o valor da sua hora: "))
horas_mes = int(input("Insira quantas horas trabalha por mês: "))
salario_bruto = valor_hora * horas_mes
ir = salario_bruto * 0.11
inss = salario_bruto * 0.08
sindicato = salario_bruto * 0.05
salario_liquido = salario_bruto - ir - inss - sindicato
print '+ Salário Bruto : R$', salario_bruto
print '-IR : R$', ir
print '-INSS : R$', inss
print '-Sindicato : R$', sindicato
print '= Salário liquido : R$', salario_liquido
| true |
241a0e800bc002a4700e7e1be16c784c116bf885 | Python | ShubhamGulia/ShubhamsRoom | /Finding Largest and Smallest.py | UTF-8 | 339 | 3.328125 | 3 | [] | no_license | import heapq
grades= [10,20,30,80,40]
print(heapq.nlargest(3,grades))
stocks = [
{'ticker': 'AAPL', 'price': 201},
{'ticker': 'GOOG', 'price': 801},
{'ticker': 'FB', 'price': 52},
{'ticker': 'MSFT', 'price': 312},
{'ticker': 'TUNA', 'price': 68}
]
print(heapq.nlargest(3, stocks, key=lambda stock: stock['price'])) | true |
03865fc3ab052a1074cb4ee2a68f301e42a24640 | Python | mtlynch/sia_load_tester | /sia_load_tester/sia_conditions.py | UTF-8 | 2,937 | 2.875 | 3 | [
"MIT"
] | permissive | import logging
import time
import sia_client as sc
_MAX_CONCURRENT_UPLOADS = 5
_SLEEP_SECONDS = 15
logger = logging.getLogger(__name__)
class Error(Exception):
pass
class WaitInterruptedError(Error):
pass
def make_waiter(exit_event):
"""Factory for creating a Waiter using production settings."""
return Waiter(sc.make_sia_client(), time.sleep, exit_event)
class Waiter(object):
"""Waits for conditions in Sia node to become true."""
def __init__(self, sia_client, sleep_fn, exit_event):
"""Creates a new Waiter instance.
Args:
sia_client: An implementation of the Sia client API.
sleep_fn: A callback function for putting the thread to sleep for
a given number of seconds.
exit_event: An event that, when set, indicates Waiter should stop
waiting and raise an exception.
"""
self._sia_client = sia_client
self._sleep_fn = sleep_fn
self._exit_event = exit_event
def wait_for_available_upload_slot(self):
"""Waits until the number of concurrent uploads is below the maximum.
Raises:
WaitInterruptedError if the exit event is set during function
execution.
"""
upload_count = self._count_uploads_in_progress()
while self._too_many_uploads_in_progress(upload_count):
logger.info(('Too many uploads in progress: %d >= %d.'
' Sleeping for %d seconds'), upload_count,
_MAX_CONCURRENT_UPLOADS, _SLEEP_SECONDS)
self._sleep_fn(_SLEEP_SECONDS)
upload_count = self._count_uploads_in_progress()
def wait_for_all_uploads_to_complete(self):
"""Waits until all in-progress uploads are complete.
Raises:
WaitInterruptedError if the exit event is set during function
execution.
"""
upload_count = self._count_uploads_in_progress()
while upload_count > 0:
logger.info(
('Waiting for remaining uploads to complete.'
' %d uploads still in progress. Sleeping for %d seconds'),
upload_count, _SLEEP_SECONDS)
self._sleep_fn(_SLEEP_SECONDS)
upload_count = self._count_uploads_in_progress()
def _count_uploads_in_progress(self):
self._check_exit_event()
n = 0
for sia_file in self._sia_client.renter_files():
if sia_file[u'uploadprogress'] < 100:
n += 1
return n
def _too_many_uploads_in_progress(self, concurrent_uploads):
return concurrent_uploads >= _MAX_CONCURRENT_UPLOADS
def _check_exit_event(self):
if self._exit_event.is_set():
logger.critical('Exit event is set. Stopping wait.')
raise WaitInterruptedError(
'Sia condition wait has been interrupted')
| true |
421f86c021265c9c6ac08ab69aaa9693bb2c2620 | Python | heliumdatacommons/PIVOT | /swagger/base.py | UTF-8 | 7,308 | 2.796875 | 3 | [] | no_license | import datetime
import swagger
class Path:
def __init__(self, path, ops=[]):
self.__path = path
self.__ops = list(ops)
@property
def path(self):
return self.__path
def add_operation(self, op):
self.__ops.append(op)
def to_dict(self):
return {op.method: op.to_dict() for op in self.__ops}
class Operation:
def __init__(self, tag, method, summary='', request_body=None, params=[], responses=[]):
self.__tag = tag
self.__method = method
self.__summary = summary
self.__request_body = request_body
self.__params = list(params)
self.__responses = list(responses)
@property
def method(self):
return self.__method
@property
def summary(self):
return self.__summary
@property
def request_body(self):
return self.__request_body
@summary.setter
def summary(self, summary):
self.__summary = summary
@request_body.setter
def request_body(self, request_body):
self.__request_body = request_body
def add_parameter(self, p):
self.__params.append(p)
def add_response(self, r):
self.__responses.append(r)
def to_dict(self):
op = dict(tags=[self.__tag],
parameters=[p.to_dict() for p in self.__params],
responses={r.code: r.to_dict() for r in self.__responses})
if self.__summary:
op.update(summary=self.__summary)
if self.__request_body:
op.update(requestBody=self.__request_body.to_dict())
return op
class RequestBody:
def __init__(self, content):
self.__content = content
def to_dict(self):
return dict(content=self.__content.to_dict())
class Parameter:
def __init__(self, name, type, show_in, description='', items=None, required=False,
*args, **kwargs):
self.__name = name
self.__type = type
self.__in = show_in
self.__description = description
self.__items = items
self.__required = required
@property
def name(self):
return self.__name
@property
def show_in(self):
return self.__in
@show_in.setter
def show_in(self, show_in):
self.__in = show_in
def to_dict(self):
res = {'name': self.__name, 'in': self.__in,
'schema': swagger._convert_data_type(self.__type)}
if self.__required:
res.update(required=self.__required)
if self.__description:
res.update(description=self.__description)
if self.__items:
res.update(items=swagger._convert_data_type(self.__items))
return res
class Response:
def __init__(self, code, content=None, description=''):
self.__code = code
self.__content = content
self.__description = description
@property
def code(self):
return self.__code
@property
def content(self):
return self.__content
@content.setter
def content(self, content):
self.__content = content
def to_dict(self):
resp = {}
if self.__content:
resp.update(content=self.__content.to_dict())
if self.__description:
resp.update(description=self.__description)
return resp
class Content:
def __init__(self, schemas):
self.__schemas = dict(schemas)
def _parse_schema(self, schema):
if isinstance(schema, dict):
scm = {}
type = schema.pop('type', None)
if type:
scm.update(**swagger._convert_data_type(type))
items = schema.pop('items', None)
if items:
scm.update(items=swagger._convert_data_type(items))
return scm
return swagger._convert_data_type(schema)
def to_dict(self):
return {fmt: dict(schema=self._parse_schema(scm['schema']))
for fmt, scm in self.__schemas.items()}
class Enum:
def __init__(self, name, type, values=[]):
self.__name = name
self.__type = type
self.__values = list(values)
def to_dict(self):
return {self.__name: dict(type=self.__type, enum=list(self.__values))}
class Model:
def __init__(self, name, type='object', description='', properties=[], ref=None):
self.__name = name
self.__type = type
self.__description = description
self.__properties = list(properties)
self.__ref = ref
@property
def name(self):
return self.__name
@property
def description(self):
return self.__description
@property
def type(self):
return self.__type
@description.setter
def description(self, description):
self.__description = description
@type.setter
def type(self, type):
self.__type = type
def add_property(self, p):
assert isinstance(p, Property)
self.__properties.append(p)
def to_dict(self):
res = dict(type='object')
if self.__description:
res.update(description=self.description)
if self.__ref:
res.update(ref=self.__ref)
if self.__properties:
res.update(required=[p.name for p in self.__properties if p.required],
properties={p.name: p.to_dict() for p in self.__properties})
if self.type != 'object':
res = {'allOf': [{'$ref': '#/components/schemas/%s'%self.type}, res]}
return res
class Property:
def __init__(self, name, **kwargs):
self.__name = name
self.update(**kwargs)
@property
def name(self):
return self.__name
@property
def type(self):
return self.__type
@property
def description(self):
return self.__description
@property
def required(self):
return self.__required
@type.setter
def type(self, type):
self.__type = type and swagger._convert_data_type(type, self.__additional_properties)
@description.setter
def description(self, description):
self.__description = description
def update(self, type=None, description=None, items=None, required=False,
nullable=False, additional_properties=None, default=None,
read_only=False, write_only=False, example=None,
maximum=None, minimum=None, **kwargs):
assert not (read_only and write_only)
self.__type = type and swagger._convert_data_type(type, additional_properties)
self.__description = description
self.__items = items and swagger._convert_data_type(items, additional_properties)
self.__required = required
self.__nullable = nullable
self.__additional_properties = additional_properties
self.__default = default
self.__read_only = read_only
self.__write_only = write_only
self.__example = example
self.__maximum = maximum
self.__minimum = minimum
def to_dict(self):
res = {}
if self.__type:
res.update(**self.__type)
if self.__description:
res.update(description=self.__description)
if self.__nullable:
res.update(nullable=self.__nullable)
if self.__items:
res.update(items=self.__items)
if self.__default is not None:
res.update(default=self.__default)
if self.__read_only:
res.update(readOnly=self.__read_only)
if self.__write_only:
res.update(writeOnly=self.__write_only)
if self.__example:
res.update(example=self.__example.strftime("%Y-%m-%d %H:%M:%S")
if isinstance(self.__example, datetime.datetime)
else self.__example)
if self.__maximum is not None:
res.update(maximum=self.__maximum)
if self.__minimum is not None:
res.update(minimum=self.__minimum)
return res
| true |
c1c893768e06d06f3c322b51d49b2bffd4fba4c0 | Python | iamkushel/myprojects | /flexiblearguments.py | UTF-8 | 161 | 3.625 | 4 | [] | no_license | def mul_num(*args):
total = 1;
for a in args:
total *= a
print(total)
mul_num(5,2)
mul_num(5,2,4)
numbers = [5,4,3,2,1]
mul_num(*numbers)
| true |