blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7f166fc88eb1bdde3f58e6711e1b420d9070c498 | Python | DenisFeoktistov/CasinoProject | /pythonProject/Состовляющие класса Casino/Interface.py | UTF-8 | 1,129 | 2.53125 | 3 | [] | no_license | from RegistrationWindow import RegistrationWindow
from CasinoWindow import CasinoWindow
from LoginWindow import LoginWindow
class Interface:
def __init__(self):
self.login_window = LoginWindow(self)
self.registration_window = RegistrationWindow(self)
self.casino_window = CasinoWindow(self)
def set_responder(self, responder):
self.responder = responder
self.login_window.set_responder(responder.login_window_responder)
self.registration_window.set_responder(responder.registration_window_responder)
self.casino_window.set_responder(responder.casino_window_responder)
def show_login_window(self):
self.login_window.show()
def close_login_window(self):
self.login_window.close()
def show_registration_window(self):
self.registration_window.show()
def close_registration_window(self):
self.registration_window.close()
def show_casino_window(self):
self.casino_window.show()
def close_casino_window(self):
self.casino_window.close()
def show(self):
self.login_window.show()
| true |
c698ea5dc43e2f61d1636ff3fbb8582ad966f97d | Python | 3207-Rhims/100days-of-code-challenge | /codechef/sum.py | UTF-8 | 227 | 2.8125 | 3 | [] | no_license | t=int(input())
for i in range(t):
if 1<=t<=1000:
line=input().split(" ")
a,b=line
a=int(a)
b=int(b)
sum=a+b
if 0<=a<=10000 and 0<=a<=10000:
print(sum) | true |
8a8ca7923a0ec1a027a9e07e8ae42448ab94c105 | Python | fredford/maze-generator | /src/maze.py | UTF-8 | 3,308 | 3.6875 | 4 | [] | no_license | import random
from src import cell
RED = (255, 0, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
class Maze:
"""Object used to represent a maze and the information needed to specify the dimensions, cells contained, start and finish.
"""
def __init__(self, size, scale):
self.directions = {"above":(0, -1), "below":(0, 1), "left":(-1, 0), "right":(1,0)}
self.size = size
self.scale = scale
self.start = None
self.end = None
self.grid = []
self.create_cells()
self.mesh_cells()
self.set_start_end()
def create_cells(self):
"""Method to create the required number of cells for the maze.
"""
for i in range(self.size):
temp = []
for j in range(self.size):
temp.append(cell.Cell(i, j, self.scale))
self.grid.append(temp)
def mesh_cells(self):
"""Method to mesh the cells of the grid together so they are connected for usage and manipulation.
"""
for row in self.grid:
for cell in row:
for direction, (x,y) in self.directions.items():
if cell.x + x < 0 or cell.y + y < 0:
cell.neighbors[direction] = None
elif cell.x + x == self.size or cell.y + y == self.size:
cell.neighbors[direction] = None
else:
cell.neighbors[direction] = self.grid[cell.x + x][cell.y + y]
def set_start_end(self):
"""Method to set the start and end of the maze, by randomly picking two locations and setting the corresponding information in cells chosen.
"""
start = (random.randint(0, self.size-1), random.randint(0, self.size-1))
end = start
while start == end:
end = (random.randint(0, self.size-1), random.randint(0, self.size-1))
self.start = self.grid[start[0]][start[1]]
self.end = self.grid[end[0]][end[1]]
self.start.isStart = True
self.end.isEnd = True
self.start.background = BLUE
self.end.background = RED
def update_start_end(self):
"""Method to update the start and end of the maze.
"""
for row in self.grid:
for cell in row:
if cell.isStart:
self.start = cell
elif cell.isEnd:
self.end = cell
def change_start(self, new_start):
"""Method to change the location of the start of the maze and remove the previous start location information.
Arguments:
new_start {Cell} -- The new starting location for the maze.
"""
self.start.isStart = False
self.start.background = WHITE
self.start = new_start
self.start.isStart = True
self.start.background = BLUE
def change_end(self, new_end):
"""Method to change the location of the end of the maze and remove the previous end location information.
Arguments:
new_end {Cell} -- The new ending location for the maze.
"""
self.end.isEnd = False
self.end.background = WHITE
self.end = new_end
self.end.isEnd = True
self.end.background = RED
| true |
d9f99e25b778b0bbca89c7c30fa998de65c9c1ac | Python | nswarner/poker | /hand.py | UTF-8 | 1,061 | 3.859375 | 4 | [] | no_license | #!/usr/bin/python3
from card import Card
from logger import Logger
class Hand:
hand = None
def __init__(self, num_cards = 2):
Logger.log("Hand: Creating a new hand with cards: " + str(num_cards))
self.hand = []
for i in range(0, num_cards):
Logger.log("Hand: Calling add_card(Card()).")
self.add_card(Card())
def __del__(self):
pass
def hand_to_string(self):
Logger.log("Hand: Converting hand to string.")
results = "[ "
for i in range(0, 2):
results += Card.translate(self.hand[i]) + " ], ["
return results[:-3]
def add_card(self, g_card):
Logger.log("Hand: add_card(Card) called.")
if (len(self.hand) < 2):
self.hand.append(g_card)
else:
raise Exception('Hand already has two cards.')
def end_hand(self, g_card):
self.hand.clear()
if (__name__ == '__main__'):
one_hand = Hand()
one_hand.add_card("1:9")
one_hand.add_card("1:12")
one_hand.display_hand()
| true |
db0fc827e3e427abd38335ad0d1addf75aa5820d | Python | Kr0n0/tensorflow-metal-osx | /mnist.py | UTF-8 | 1,055 | 2.921875 | 3 | [] | no_license | import tensorflow as tf
from tensorflow import keras
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
y_train = y_train[:1000]
y_test = y_test[:1000]
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train[:1000].reshape(-1, 28*28)
x_test = x_test[:1000].reshape(-1, 28*28)
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(1000, activation='relu'),
keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.metrics.SparseCategoricalAccuracy()])
return model
# Create a basic model instance
model = create_model()
# Display the model's architecture
model.summary()
predictions = model(x_train[:1]).numpy()
model.fit(x_train, y_train, epochs=10)
loss, acc = model.evaluate(x_test, y_test, verbose=2)
print("Accuracy: {:5.2}%".format(100*acc))
| true |
e0d7373dee703dc6a82273d0494fe8845fdac89f | Python | dejori/this-and-that | /NaiveBayes/bayes.py | UTF-8 | 5,220 | 3.25 | 3 | [] | no_license | import sys, getopt
import re
import pickle
from sets import Set
from os import listdir
from os.path import isfile, join
class Bayes(object):
def __init__(self, th=.9):
self.tokens = {}
self.pos_count = 0
self.neg_count = 0
def _train_token(self, token, pos):
if token in self.tokens:
token = self.tokens[token]
else:
token = Token(token)
self.tokens[token] = token
if pos:
token.pos_appear += 1
else:
token.neg_appear += 1
def train_on_text(self, words, pos):
for word in words:
self._train_token(word, pos)
if pos:
self.pos_count += 1
else:
self.neg_count += 1
def _get_p(self, var):
total = self.pos_count + self.neg_count
if var == 'pos':
return self.pos_count / float(total)
else:
return self.neg_count / float(total)
# returns p(s|w)
# score p(s|w) based on
# p(s|w) = p(w|s) / p(w|s) + p(w|h)
# s - spam, w - word, h - ham (non-spam)
def _score_token(self, tk, klass):
if tk in self.tokens:
return self._p_token_given(tk, klass)
else: # word not seen before
return 0.5
def _p_token_given(self, token, klass):
tk = self.tokens[token]
# +1 for laplacian smoothing
if klass == 'pos':
appear = tk.pos_appear
if klass == 'neg':
appear = tk.neg_appear
count = tk.pos_appear + tk.neg_appear
return (appear + 1) / float(count)
def train(self, pos_dir, neg_dir):
pos_files = [f for f in listdir(pos_dir) if isfile(join(pos_dir, f))]
for f in pos_files:
pos_file = open(join(pos_dir, f))
payload = pos_file.read()
self.train_on_text(clean_text(payload), True)
neg_files = [f for f in listdir(neg_dir) if isfile(join(neg_dir, f))]
for f in neg_files:
neg_file = open(join(neg_dir, f))
payload = neg_file.read()
self.train_on_text(clean_text(payload), False)
def test(self, pos_dir, neg_dir):
tp = 0
fp = 0
tn = 0
fn = 0
pos_files = [f for f in listdir(pos_dir) if isfile(join(pos_dir, f))]
for f in pos_files:
pos_file = open(join(pos_dir, f))
klass = self.classify_text(clean_text(pos_file.read()))
if klass == 'pos':
tp +=1
else:
fn +=1
neg_files = [f for f in listdir(neg_dir) if isfile(join(neg_dir, f))]
for f in neg_files:
neg_file = open(join(neg_dir, f))
klass = self.classify_text(clean_text(neg_file.read()))
if klass == 'neg':
tn +=1
else:
fp +=1
print "--------------------"
print "\t\tpred pos\tpred neg"
print "true pos\t%s\t\t%s" % (tp,fn)
print "true neg\t%s\t\t%s" % (fp,tn)
print "--------------------"
print "accuracy %0.2f" % (float(tp+tn)/float(tp+tn+fp+fn))
# returns most likely klass label
def classify_text(self, words):
p_pos = 1
p_neg = 1
for word in words:
p_pos *= self._score_token(word, 'pos')
p_neg *= self._score_token(word, 'neg')
# multiply by klass prior
p_pos *= self._get_p('pos')
p_neg *= self._get_p('neg')
if p_pos > p_neg:
return 'pos'
else:
return 'neg'
class Token(object):
def __init__(self, tk):
self.token = tk
# Laplace estimation
self.pos_appear = 1
self.neg_appear = 1
def __hash__(self):
return hash(self.token)
def __eq__(self, other):
return self.token == other
def clean_text(text):
regex = re.compile('<.*>|[^a-zA-Z0-9_\s]|http://.*')
return Set(regex.sub('', text).lower().split())
if __name__ == '__main__':
mode = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hm:p:n:", ["mode=", "posdir=", "negdir="])
except getopt.GetoptError:
print 'bayes.py -p <posdir> -n <negdir>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'bayes.py -m <mode> -p <posdir> -n <negdir>'
sys.exit()
elif opt in ("-m", "--mode"):
mode = arg
elif opt in ("-p", "--posdir"):
pos_dir = arg
elif opt in ("-n", "--negdir"):
neg_dir = arg
if not mode:
print 'bayes.py -m <mode> -p <posdir> -n <negdir>'
sys.exit()
if mode == "train":
print "training ..."
b = Bayes()
b.train(pos_dir, neg_dir)
pickle.dump(b, open("model.p", "wb"))
elif mode == "test":
print "testing ..."
b = pickle.load( open("model.p", "rb"))
b.test(pos_dir, neg_dir)
# print 'is pos w/ probability', b.classify_text(clean_text("I do love this great movie"))
# print 'is pos w/ probability', b.classify_text(clean_text("I do not like this movie"))
else:
print "mode unknown"
sys.exit(2)
| true |
5a75b9d0d143945b0ce3c726977797b319ee538a | Python | lspence40/engineering4notebook | /python/LEDblinkPython.py | UTF-8 | 215 | 2.8125 | 3 | [] | no_license | import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
pin = 4
GPIO.setup(pin, GPIO.OUT)
sleep(1)
for i in range(5):
GPIO.output(pin, 1)
sleep(.5)
GPIO.output(pin, 0)
sleep(.5)
GPIO.cleanup()
| true |
5ddfc82dfe44aa966b9c0c01b1913a7b1343f525 | Python | unknownboyy/GUVI | /code16.py | UTF-8 | 174 | 3.203125 | 3 | [] | no_license | for _ in range(int(input())):
n = int(input())
x = int((2*n)**0.5)
if x*(x+1)//2==n:
print('Go On Bob',x)
else:
print('Better Luck Next Time') | true |
2630e21c0ae1861b8a642960c8297b0ebe5c1ce0 | Python | domingoesteban/robolearn | /robolearn/torch/models/transitions/linear_regression.py | UTF-8 | 2,470 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | import torch
import torch.nn as nn
from robolearn.torch.core import PyTorchModule
from robolearn.utils.serializable import Serializable
import robolearn.torch.utils.pytorch_util as ptu
from robolearn.models import Transition
from robolearn.torch.utils.ops.gauss_fit_joint_prior import gauss_fit_joint_prior
class TVLGDynamics(PyTorchModule, Transition):
def __init__(self, horizon, obs_dim, action_dim):
self._T = horizon
Transition.__init__(self, obs_dim=obs_dim, action_dim=action_dim)
self._serializable_initialized = False
Serializable.quick_init(self, locals())
super(TVLGDynamics, self).__init__()
self.Fm = nn.Parameter(ptu.zeros(horizon, obs_dim, obs_dim+action_dim))
self.fv = nn.Parameter(ptu.ones(horizon, obs_dim))
self.dyn_covar = nn.Parameter(ptu.zeros(horizon, obs_dim, obs_dim))
# Prior
self._prior = None
def get_next(self, observation, action):
pass
def forward(self, obs, act, time=None, stochastic=False):
if time is None:
raise NotImplementedError
obs_and_act = torch.cat((obs, act), dim=-1)
batch = obs.shape[:-1]
mean = obs_and_act.mm(torch.t(self.Fm[time])) + self.fv[time]
cov = self.dyn_covar[time]
next_obs = mean
return next_obs
def get_prior(self):
return self._prior
def fit(self, States, Actions, regularization=1e-6):
""" Fit dynamics. """
N, T, dS = States.shape
dA = Actions.shape[2]
if N == 1:
raise ValueError("Cannot fit dynamics on 1 sample")
it = slice(dS+dA)
# Fit dynamics with least squares regression.
dwts = (1.0 / N) * ptu.ones(N)
for t in range(T - 1):
Ys = torch.cat((States[:, t, :], Actions[:, t, :],
States[:, t + 1, :]),
dim=-1)
# Obtain Normal-inverse-Wishart prior.
mu0, Phi, mm, n0 = self._prior.eval(dS, dA, Ys)
sig_reg = ptu.zeros((dS+dA+dS, dS+dA+dS))
sig_reg[it, it] = regularization
Fm, fv, dyn_covar = \
gauss_fit_joint_prior(Ys, mu0, Phi, mm, n0,
dwts, dS+dA, dS, sig_reg)
self.Fm[t, :, :] = Fm
self.fv[t, :] = fv
self.dyn_covar[t, :, :] = dyn_covar
def set_prior(self, prior):
self._prior = prior
| true |
16d97479b965679a0577708e215d05d34ac07311 | Python | donzucchero/homework_week_2 | /hw_week2_exc2.py | UTF-8 | 848 | 4 | 4 | [] | no_license | def get_grades():
while True:
try:
grade = (int(input('Enter grade(2/3/4/5/6): ')))
if grade in [2,3,4,5,6]:
grades.append(grade)
answer = input("Would you like to add another grade?(y/n): ")
if answer == "n":
break
else:
print("Invalid input.")
except:
print("Bro, What the fuck?")
students = {}
for i in range(0,3):
name = input("Enter name: ")
grades = []
get_grades()
my_student = {'name': name, 'grades': grades}
students[i] = my_student
for i in range(0,len(students)):
print(students[i]["name"]+ "'s grade average")
sum = 0
for grade in students[i]["grades"]:
sum += grade
sum = sum/len(students[i]["grades"])
print(sum)
| true |
83c00da578e7bf8cc3d12c623f3f72f304ca4f5b | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_201/2167.py | UTF-8 | 1,014 | 3.546875 | 4 | [] | no_license | def construct(n):
right = 0
left = 0
if (n % 2 == 0):
# even
right, left = (n//2), max(0, (n//2 - 1))
else:
right, left = (n//2), (n//2)
return right, left
def get_stall(n, m):
if (n == m):
return 0, 0
elif (m == 1):
if (n % 2 == 0):
# even
return (n//2), max(0, (n//2 - 1))
else:
# odd
return (n//2), (n // 2)
else:
right, left = construct(n)
temp = [(right, left)]
idx = 0
while (idx != m):
nr = temp[idx][0]
nl = temp[idx][1]
temp.append(construct(nr))
temp.append(construct(nl))
idx += 1
temp = sorted(temp, reverse=True)
return temp[m - 1]
def main():
# input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
n, m = [int(s) for s in input().split(" ")]
y, z = get_stall(n, m)
print("Case #{}: {} {}".format(i, y, z))
if __name__ == '__main__':
main() | true |
f1b7429c6e376820133ce069e1bd9e04f74caa6e | Python | THUMNLab/AutoGL | /autogl/datasets/utils/conversion/_to_pyg_dataset.py | UTF-8 | 1,435 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import typing as _typing
import torch
import torch_geometric
from autogl.data import Dataset, InMemoryDataset
from autogl.data.graph import GeneralStaticGraph
from autogl.data.graph.utils import conversion
def to_pyg_dataset(
dataset: _typing.Union[Dataset, _typing.Iterable[GeneralStaticGraph]]
) -> Dataset[torch_geometric.data.Data]:
transformed_datalist: _typing.MutableSequence[torch_geometric.data.Data] = []
for item in dataset:
if isinstance(item, torch_geometric.data.Data):
transformed_datalist.append(item)
elif isinstance(item, GeneralStaticGraph):
transformed_datalist.append(conversion.static_graph_to_pyg_data(item))
elif (
isinstance(item, _typing.Mapping) and
all([
(isinstance(k, str) and isinstance(v, torch.Tensor))
for k, v in item.items()
])
):
transformed_datalist.append(torch_geometric.data.Data(**item))
else:
raise NotImplementedError(
f"Unsupported data item {type(item)}<{item}> to convert as "
f"{torch_geometric.data.Data}"
)
return (
InMemoryDataset(transformed_datalist, dataset.train_index, dataset.val_index, dataset.test_index, dataset.schema)
if isinstance(dataset, InMemoryDataset)
else InMemoryDataset(transformed_datalist)
)
| true |
14321c1252e55e3b5f2ca03c6cc86c92da9f8795 | Python | DeepikaSampangi/Addtnl | /minesweeper.py | UTF-8 | 553 | 3.359375 | 3 | [] | no_license | def mine_sweeper(bombs , n_rows , n_cols):
fields = [[0 for i in range (n_cols)] for j in range (n_rows)]
for bomb_loc in bombs:
(b_rows , b_cols) = bomb_loc
fields[b_rows][b_cols] = -1
r_range = range (b_rows - 1 , b_rows + 2)
c_range = range (b_cols - 1 , b_cols + 2)
for i in (r_range):
for j in (c_range):
if( 0 <= i < n_rows and 0 <= j < n_cols and fields[i][j] != -1):
fields[i][j] += 1
return fields
print(mine_sweeper([[0,0] , [1,2]], 3, 4)) | true |
8af62d2a3e958d39ecd1612dd211a6e71dbb5a35 | Python | Krystiano8686/python_studia | /Zad_cw2/zad5.py | UTF-8 | 247 | 3.984375 | 4 | [] | no_license | # ZAD5
a, b, c = input('Podaj 3 liczby: '), input(), input()
a = float(a)
b = float(b)
c = float(c)
if a <= 10 and a >= 0 and a > b and b > c:
print("Wszystkie warunki zostały spełnione")
else:
print("Warunki nie zostały spełnione")
| true |
fcc68c10fe0694f29db5c638069f0a57d40cde9f | Python | Furricane/Camera | /on_motion_script.py | UTF-8 | 790 | 2.5625 | 3 | [] | no_license | #!/usr/bin/python
#!/usr/bin/env python
import os, sys
sys.path.append('/home/pi/PythonUtilities')
import socketcomm
os.chdir('/home/pi/Camera/') # Change working directory
HostAddress = '192.168.1.92'
HostPort = 44444
def notify_host(host_address, host_port, message):
connectedstatus = False
client, connectedstatus = socketcomm.create_client(host_address, host_port)
print('connectedstatus = %s' % (str(connectedstatus)))
if connectedstatus:
print('connection established')
client.write(message)
else:
print("unconnected")
print("# of arguments= ", len(sys.argv))
if len(sys.argv) == 1:
Message = 'MotionDetected'
else:
Message = 'MotionDetectedZone'+str(sys.argv[1])
notify_host(HostAddress, HostPort, Message)
| true |
49a1e99006cdf9db11cde3e11626e18a44521700 | Python | omazhary/dm-oscars | /OscarDataset/dataLoader.py | UTF-8 | 1,654 | 3.203125 | 3 | [
"MIT"
] | permissive | import csv
import numpy as np
from sklearn import preprocessing
#
# converts a csv file to 2D array
def csvToArray(filename):
ret = []
with open(filename) as x:
entryreader = csv.reader(x, delimiter=',')
for row in entryreader:
ret.append(row)
return ret
feat_train = csvToArray('feat_train.csv')
feat_test = csvToArray('feat_test.csv')
label_train = csvToArray('label_train.csv')
label_test = csvToArray('label_test.csv')
#
# The first rows are names of columns
feature_names = feat_train[0]
label_names = label_train[0]
#
# These lines remove the column names and turn lists to numpy arrays
# If you need lists, use for example data_train.tolist() to convert them back to lists
feat_train = np.array(feat_train)[1:, :].astype(float)
feat_test = np.array(feat_test)[1:, :].astype(float)
label_train = np.array(label_train)[1:, :].astype(int)
label_test = np.array(label_test)[1:, :].astype(int)
####################### Save important features before scaling ###########
train_years = np.copy(feat_train[:, feature_names.index('title_year')])
test_years = np.copy(feat_test[:, feature_names.index('title_year')])
print train_years, test_years
################# Do scaling if you need it ###################
# scaling the feature columns
# Don't scale 'original row' feature
for i in range(1, len(feat_train[0])):
feat_train[:, i] = preprocessing.scale(feat_train[:, i])
for i in range(1, len(feat_test[0])):
feat_test[:, i] = preprocessing.scale(feat_test[:, i])
# Just printing
print '-> ', feature_names
print '-> ', label_names
################## Your code goes here ######################### | true |
4166f89ca12e70242ed11f1f03ee78fbab1d371d | Python | kantmp/CAmodule | /getTick.py | UTF-8 | 1,716 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
get the option tick
格式为
gettick tick.csv
'''
import tables as tbl
import os
import tsData
import pandas as pd
import sys
import getopt
#
__version__ = '0.1'
baseurl = os.getcwd()
def openHDFfile(hdf_file):
'''
open the hdf5 file
need in the cwd
'''
try:
fileh = tbl.open_file(baseurl+'\\' + hdf_file, mode='r')
return fileh
except Exception, e:
print Exception, ":", e
def readOPfile(fileh, input_file):
'''
read op file ***.csv
csv is column op_num | start_date | end_date
use pandas
read hdf5
put it to csv
'''
try:
baseurl = os.getcwd()
df = pd.read_csv(baseurl+'\\' + input_file)
except Exception, e:
print Exception, ":", e
print df
for index, row in df.iterrows():
DD = tsData.readRange(fileh, row.op_num, row.start_date, row.end_date, 'tick')
# print DD
DD.to_csv(baseurl + '\\' + str(row.op_num)+'_' + str(index) + '.csv')
def main(argv):
'''
main 函数,解析argv
need to close hdf5
'''
try:
opts, args = getopt.getopt(argv[1:], "i:f:")
# print args
except getopt.GetoptError:
sys.exit()
# 在工作文件夹下面的相对地址
for name, value in opts:
if name in ("-i"):
print "option list is " + baseurl + "\\" + value
input_file = value
if name in ("-f"):
print "database is " + baseurl + "\\" + value
hdf_file = value
fileh = openHDFfile(hdf_file)
readOPfile(fileh, input_file)
if __name__ == '__main__':
print 'getTick is start'
main(sys.argv)
print "getTick is end "
| true |
e432d08da4dfa64cedb982b46c48c3b47977e55d | Python | turovod/Otus | /8_Lesson8/oop/example2-mro-newstyle2.py | UTF-8 | 506 | 3.90625 | 4 | [
"MIT"
] | permissive | """
In Python 2, search path is F, A, X, Y, B.
With Python 3, search path should be : F, A, X, Y, B, Y, X and after removing « bad heads » : F, A, B, Y, X.
"""
class X():
def who_am_i(self):
print("I am a X")
class Y():
def who_am_i(self):
print("I am a Y")
class A(Y, X):
def who_am_i(self):
print("I am a A")
class B(Y, X):
def who_am_i(self):
print("I am a B")
class F(A, B):
def who_am_i(self):
print("I am a F")
print(F.mro())
| true |
dea2f65b6d7baedf2cbff2c73646c677df54d338 | Python | C2SM-RCM/emiproc | /tests/test_country_mask.py | UTF-8 | 501 | 2.59375 | 3 | [
"CC-BY-4.0"
] | permissive | import numpy as np
from emiproc.utilities import compute_country_mask
from emiproc.grids import RegularGrid
def test_create_simple_mask():
arr = compute_country_mask(
RegularGrid(
xmin=47.5,
xmax=58.5,
ymin=7.5,
ymax=12.5,
nx=10,
ny=10,
),
resolution="110m",
)
# check that there are some countries in there
# Not just -1 values
assert len(np.unique(arr)) > 1
| true |
c35343c6a2b725a53966247c10dd080809990177 | Python | mvabf/URI_python | /ex_1061.py | UTF-8 | 745 | 3.390625 | 3 | [] | no_license |
diaInicial = int(input()[4:])
horaInicial, minutoInicial, segundoInicial = map(int,input().split(':'))
diaFinal = int(input()[4:])
horaFinal, minutoFinal, segundoFinal = map(int,input().split(':'))
diaTotal = diaFinal - diaInicial
horaTotal = horaFinal - horaInicial
if horaTotal < 0:
horaTotal += 24
diaTotal -= 1
minutoTotal = minutoFinal - minutoInicial
if minutoTotal < 0:
minutoTotal += 60
horaTotal -= 1
segundoTotal = segundoFinal - segundoInicial
if segundoTotal < 0:
segundoTotal += 60
minutoTotal -= 1
if diaTotal <= 0:
diaTotal = 0
print("{} dia(s)".format(diaTotal))
print("{} hora(s)".format(horaTotal))
print("{} minuto(s)".format(minutoTotal))
print("{} segundo(s)".format(segundoTotal))
| true |
e99a9e53abe8f0329a95508d607c840883abb218 | Python | igenic/deep-rl-ofc-poker | /rlofc/ofc_agent.py | UTF-8 | 1,507 | 3.625 | 4 | [] | no_license | import numpy as np
from treys import Card
street_to_row = {
0: 'front',
1: 'mid',
2: 'back'
}
class OFCAgent(object):
"""An OFC decision maker."""
def place_new_card(self, card, board):
"""Return 0, 1, 2 for front, mid, back."""
pass
class OFCRandomAgent(OFCAgent):
"""Place cards at random!"""
def place_new_card(self, card, board):
roll = np.random.uniform(0, 1, 3) * board.get_free_streets()
street = np.argmax(roll)
return street
class OFCRLAgent(OFCAgent):
"""Insert neural network here."""
pass
class OFCHumanAgent(OFCAgent):
"""Insert neural network here."""
def __init__(self, name):
self.name = name
def place_new_card(self, card, board):
free_streets = board.get_free_street_indices()
if len(free_streets) == 0:
return -1
print ('Current card:'),
print(Card.print_pretty_card(Card.new(card)))
print("Available streets:")
for street in free_streets:
print ('%s: %s' % (street, street_to_row[street]))
print ("%s, please enter the street (number) to play your card." % (self.name))
street = int(input())
while street not in free_streets:
print("Please enter a valid street: " + str(free_streets))
street = int(input())
print('')
return street
class OFCComputerAgent(OFCAgent):
"""Insert computer player with some strategy here."""
pass | true |
3560f874cdf6bbddde48a3d8d59e7eb3d4ce7dc7 | Python | starrrr1/traveltimeprediction | /traveltimecalc.py | UTF-8 | 2,332 | 2.75 | 3 | [] | no_license | import sys
import pandas as pd
import datetime
if __name__ == '__main__':
weekend = ['03/07/2015','03/14/2015','03/21/2015','03/28/2015','04/04/2015']
df = pd.read_csv(sys.argv[1])
sortdf = df.sort(['V1','section'])
sortdf['date'] = sortdf['V1'].apply(lambda x: x[:10])
xsortdf = sortdf
xsortdf = xsortdf.loc[xsortdf['date'] != '03/07/2015']
xsortdf = xsortdf.loc[xsortdf['date'] != '03/14/2015']
xsortdf = xsortdf.loc[xsortdf['date'] != '03/21/2015']
xsortdf = xsortdf.loc[xsortdf['date'] != '03/28/2015']
xsortdf = xsortdf.loc[xsortdf['date'] != '04/04/2015']
output = []
# timestamp = sortdf['V1'].values.tolist()
# section = sortdf['section'].values.tolist()
# traveltime = sortdf['traveltime'].values.tolist()
# Group data, take only weekdays
groupbytime = xsortdf.groupby('V1')
for name, group in groupbytime:
# print name
cumtime = 0
time_window = 5
currentdatetime = datetime.datetime.strptime(name,'%m/%d/%Y %H:%M:%S')
currentname = name
for st in group['section'].values.tolist():
if cumtime > time_window:
currentdatetime = currentdatetime + datetime.timedelta(minutes = 5)
currentname = str(currentdatetime.strftime('%m/%d/%Y %H:%M:%S'))
time_window = time_window + 5
ttime = sortdf['traveltime'].loc[(sortdf['V1'] == currentname) & (sortdf['section'] == st)].values.tolist()
cumtime = cumtime + ttime[0]
simpletime = group['traveltime'].sum()
output.append([name,simpletime,cumtime])
print name
with open('traffic_cumulative_time.csv', 'w') as f:
for n in output:
#print n[0], n[1]
f.write(str(n[0]) + ',' + str(n[1]) + ',' + str(n[2]) + '\n')
# f.closed
# traveltime = group['traveltime'].values.tolist()
# for st in station:
# cultime = cultime + time
# if cultime >= time_window:
# print name
# print sortgroup['section']
# print sortgroup['traveltime']
# cultime = group['traveltime'].sum()
# output[name] = cultime
# for i,v in output.iteritems():
# print i, v
| true |
1e4be08abc6d7687af8e2009642b2108a46f524c | Python | chrishefele/kaggle-sample-code | /SemiSupervised/analysis/src/col_vals.py | UTF-8 | 2,329 | 2.828125 | 3 | [] | no_license | import sys
INVERT_FLAG = False
INVERT_THRESHOLD = 500000 # if more than this, use nonzero(1+(data+zeros)) vs the data
TRAIN = "/home/chefele/SemiSupervised/download/competition_data/unlabeled_data.svmlight.dat"
TRAIN_LINES = 1000000
line_counter = 0
col_vals = {}
print "Reading:", TRAIN
print "Reading line:",
for line in open(TRAIN,"r"):
line_counter+=1
if line_counter % 10000 ==0:
print line_counter,
sys.stdout.flush()
for field in line.split(" "):
if ":" in field:
tokens = field.split(":")
col = int(tokens[0])
col_val = float(tokens[1])
if col not in col_vals:
col_vals[col]=[]
col_vals[col].append(col_val)
def invert_data(data):
num_zeros = TRAIN_LINES - len(data)
data_plus_zeros = data + [0]*num_zeros
data_inverted = [1-dz for dz in data_plus_zeros if (1-dz)!=0]
return( data_inverted )
print "Checking for columns requiring inverted values..."
for col in col_vals:
if INVERT_FLAG and (len(col_vals[col]) > INVERT_THRESHOLD) :
old_len = len(col_vals[col])
col_vals[col] = invert_data( col_vals[col] )
if col_vals[col]==[]:
col_vals[col]=[0]
new_len = len(col_vals[col])
print "Inverted Col:", col,"Length before:", old_len, "After inverted:", new_len
sys.stdout.flush()
# count_stats = sorted([(vals[val], val) for val in vals])
# print count_stats
tot_cnt = 0
print "Sorting..."
sys.stdout.flush()
col_count_stats = sorted([(len(col_vals[col]), col) for col in col_vals])
for n, (cnt, col) in enumerate(col_count_stats):
tot_cnt += cnt
print n,
print "Col:", col, "NumVals:", len(col_vals[col]),
uniques=len(set(col_vals[col]))
print "Uniques:", uniques,
print "CumPts:", tot_cnt,
print "Max:", max(col_vals[col]), "Min:",min(col_vals[col]),
print "Avg:", sum(col_vals[col])*1.0/len(col_vals[col]),
sys.stdout.flush()
# only write non-binary column values (or potentiall thresholded to 0/1)
# (seems ok if min values ==1, then <1-->0 & >=1-->1 )
if uniques>1 :
print "Writing...",
fout=open("col_vals_"+str(col)+".csv","w")
for aVal in col_vals[col]:
fout.write(str(aVal)+"\n")
fout.close()
print
| true |
1e96763245ab65c1568641a3395421d5d778a65f | Python | olber027/AdventOfCode2020 | /Day_16/Part2.py | UTF-8 | 3,018 | 3.734375 | 4 | [] | no_license | '''
Now that you've identified which tickets contain invalid values, discard those tickets entirely. Use the remaining valid tickets to determine which field is which.
Using the valid ranges for each field, determine what order the fields appear on the tickets. The order is consistent between all tickets: if seat is the third field, it is the third field on every ticket, including your ticket.
For example, suppose you have the following notes:
class: 0-1 or 4-19
row: 0-5 or 8-19
seat: 0-13 or 16-19
your ticket:
11,12,13
nearby tickets:
3,9,18
15,1,5
5,14,9
Based on the nearby tickets in the above example, the first position must be row, the second position must be class, and the third position must be seat; you can conclude that in your ticket, class is 12, row is 11, and seat is 13.
Once you work out which field is which, look for the six fields on your ticket that start with the word departure. What do you get if you multiply those six values together?
'''
def findValidRules(num, ruleDict):
results = []
for ruleName in ruleDict:
for validRange in ruleDict[ruleName]:
if num >= validRange[0] and num <= validRange[1]:
results.append(ruleName)
break
return results
ticketRules = {}
myTicket = []
otherTickets = []
inputSections = []
with open("Data\input.txt", "r") as inputFile:
inputSections = inputFile.read().split("\n\n")
for rule in inputSections[0].splitlines():
ruleName, ranges = rule.split(":")
ticketRules[ruleName] = []
ranges = [x.strip() for x in ranges.split("or")]
for r in ranges:
parsedRange = [int(x) for x in r.split("-")]
ticketRules[ruleName].append(tuple(parsedRange))
myTicket = [int(x) for x in inputSections[1].splitlines()[1].split(",")]
for line in inputSections[2].splitlines():
if "," in line:
otherTickets.append([int(x) for x in line.split(",")])
validTickets = []
possibleFields = [set(ticketRules.keys()) for _ in range(len(myTicket))]
for ticket in otherTickets:
isValid = True
for field in ticket:
if len(findValidRules(field, ticketRules)) == 0:
isValid = False
break
if isValid:
validTickets.append(ticket)
for ticket in validTickets:
for index in range(len(ticket)):
possibleRules = findValidRules(ticket[index], ticketRules)
possibleFields[index].intersection_update(set(possibleRules))
certainFields = [(x[0], str(list(x[1])[0])) for x in enumerate(possibleFields) if len(x[1]) == 1]
while len(certainFields) < len(possibleFields):
for field in certainFields:
for fields in possibleFields:
if field[1] in fields:
fields.remove(field[1])
for index in range(len(possibleFields)):
if len(possibleFields[index]) == 1:
certainFields.append((index, list(possibleFields[index])[0]))
total = 1
for field in certainFields:
if "departure" in field[1]:
total *= myTicket[field[0]]
print(total)
| true |
4a17aa26e827154a3f137dfc3c08159a6723825d | Python | Da1anna/Data-Structed-and-Algorithm_python | /基础知识/动态规划/贪心算法/20.3.17.py | UTF-8 | 5,094 | 4.1875 | 4 | [] | no_license | '''
有一堆石头,每块石头的重量都是正整数。
每一回合,从中选出两块最重的石头,然后将它们一起粉碎。假设石头的重量分别为 x 和 y,且 x <= y。那么粉碎的可能结果如下:
如果 x == y,那么两块石头都会被完全粉碎;
如果 x != y,那么重量为 x 的石头将会完全粉碎,而重量为 y 的石头新重量为 y-x。
最后,最多只会剩下一块石头。返回此石头的重量。如果没有石头剩下,就返回 0。
提示:
1 <= stones.length <= 30
1 <= stones[i] <= 1000
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/last-stone-weight
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
import heapq
# class Solution:
def lastStoneWeight(stones) -> int:
k = len(stones)
stones = [-stones[i] for i in range(k)]
heapq.heapify(stones)
while len(stones) > 1:
top1 = heapq.heappop(stones)
top2 = heapq.heappop(stones)
heapq.heappush(stones,top1-top2)
return - stones[0]
# res = lastStoneWeight([3,5,7])
# print(res)
'''
2.跳跃游戏
给定一个非负整数数组,你最初位于数组的第一个位置。
数组中的每个元素代表你在该位置可以跳跃的最大长度。
判断你是否能够到达最后一个位置。
示例 1:
输入: [2,3,1,1,4]
输出: true
解释: 我们可以先跳 1 步,从位置 0 到达 位置 1, 然后再从位置 1 跳 3 步到达最后一个位置。
示例 2:
输入: [3,2,1,0,4]
输出: false
解释: 无论怎样,你总会到达索引为 3 的位置。但该位置的最大跳跃长度是 0 , 所以你永远不可能到达最后一个位置。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/jump-game
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
def keepJump(lst:list) -> bool:
if len(lst) == 1:
return True
k = 0
for i in range(len(lst)):
if i > k:
return False
k = max(k,i+lst[i])
if k >= len(lst) - 1:
return True
# res = keepJump([3,2,1,1,4])
# print(res)
'''
3.跳跃游戏2
给定一个非负整数数组,你最初位于数组的第一个位置。
数组中的每个元素代表你在该位置可以跳跃的最大长度。
你的目标是使用最少的跳跃次数到达数组的最后一个位置。
示例:
输入: [2,3,1,1,4]
输出: 2
解释: 跳到最后一个位置的最小跳跃数是 2。
从下标为 0 跳到下标为 1 的位置,跳 1 步,然后跳 3 步到达数组的最后一个位置。
说明:
假设你总是可以到达数组的最后一个位置。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/jump-game-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
def jump(lst:list) -> int:
step = 0
end = 0 #表示起跳的最远距离,若遍历的i==end了,说明该起跳了
max_position = 0
for i in range(len(lst)-1):
max_position = max(max_position,i + lst[i])
if i == end: #这里虽然step不应该在i=3这个位置+1,但是steps是相同的
step += 1
end = max_position
return step
# res = jump([3,1,4,1,1,1,0])
# print(res)
'''
4.判断子序列
给定字符串 s 和 t ,判断 s 是否为 t 的子序列。
你可以认为 s 和 t 中仅包含英文小写字母。字符串 t 可能会很长(长度 ~= 500,000),而 s 是个短字符串(长度 <=100)。
字符串的一个子序列是原始字符串删除一些(也可以不删除)字符而不改变剩余字符相对位置形成的新字符串。(例如,"ace"是"abcde"的一个子序列,而"aec"不是)。
示例 1:
s = "abc", t = "ahbgdc"
返回 true.
示例 2:
s = "axc", t = "ahbgdc"
返回 false.
'''
def is_Subsequence(s:str,t:str) -> bool:
'''
朴素的双子针法
:param s: 原字符串
:param t: 子字符串
:return:
'''
i,j = 0,0
while i < len(t) and j < len(s):
if t[i] == s[j]:
i += 1
j += 1
else:
j += 1
return i == len(t)
# res = is_Subsequence('abcde','ace')
# print(res)
'''
给你一个仅包含小写字母的字符串,请你去除字符串中重复的字母,使得每个字母只出现一次。
需保证返回结果的字典序????最小(要求不能打乱其他字符的相对位置)。
示例 1:
输入: "bcabc"
输出: "abc"
示例 2:
输入: "cbacdcbc"
输出: "acdb"
'''
def str_quchong(s:str) -> str:
'''
注:自己写的没有考虑到什么是字典序,所以不是该题的解
:param s:
:return:
'''
lst_s = [c for c in s]
single = []
for c in lst_s:
if c not in single:
single.append(c)
else:
continue
single.sort()
res = ''.join(c for c in single)
print(res)
return res
# str_quchong('ddcabdca')
| true |
7211772918339234af209e4cb3785d55b2b57ee5 | Python | NaveenKudari/Assignment_6 | /Assignment_6.2.py | UTF-8 | 228 | 3.265625 | 3 | [] | no_license |
# coding: utf-8
# In[38]:
list1=[3,21,98,203,17,9]
mean = sum(list1)/sum([1 for i in list1])
value=0
for i in list1:
value+=(i-mean)**2
variance=value/(sum([1 for i in list1])-1)
print("variance is:"+" "+str(variance))
| true |
5ca048990621fc4c4d3872a071665349439c583d | Python | yaohongyi/identify_ui_test | /operate/operate_tool.py | UTF-8 | 12,637 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 都君丨大魔王
import time
from selenium.webdriver import ActionChains
from public import api
from page_object.tool_page import ToolPage
from page_object.case_page import CasePage
class OperateTool:
def __init__(self, browser):
self.browser = browser
self.tool_page = ToolPage(self.browser)
self.case_page = CasePage(self.browser)
def click_tool_button(self, button_name):
"""
点击工具栏按钮
:return: None
"""
if button_name in ('播放', '报告'):
self.tool_page.tool_button2(button_name).click()
else:
self.tool_page.tool_button(button_name).click()
def open_inspection_record(self):
"""
打开检验记录窗口
:return: 检验记录窗口对象
"""
# 点击报告按钮
self.click_tool_button('报告')
# 点击打开检验记录
self.tool_page.report(report_type='检验记录').click()
# 定位“检验记录”窗口
inspection_record_window = api.level_2_window(self.browser)
return inspection_record_window
def click_inspection_record_button(self, button_name):
"""点击检验记录窗口的【插入图片】【预览】【保存】【导出】按钮"""
inspection_record_window = api.level_2_window(self.browser)
self.tool_page.inspection_record_button(inspection_record_window, button_name).click()
def add_inspection_record(self, **data):
""" 添加检验记录 """
# 打开检验记录窗口
inspection_record_window = self.open_inspection_record()
time.sleep(1)
# 对“报告标题”标签页进行输入
title = self.tool_page.inspection_record_input(inspection_record_window, field_name='标题')
title.clear()
title.send_keys(data.get('title'))
join_no = self.tool_page.inspection_record_input(inspection_record_window, field_name='参加编号')
join_no.clear()
join_no.send_keys(data.get('join_no'))
page_header = self.tool_page.inspection_record_input(inspection_record_window, field_name='页眉')
page_header.clear()
page_header.send_keys(data.get('page_header'))
page_footer = self.tool_page.inspection_record_input(inspection_record_window, field_name='页脚')
page_footer.clear()
page_footer.send_keys(data.get('page_footer'))
time.sleep(1)
# 对“送检材料情况”标签页进行输入
self.tool_page.inspection_record_tab(inspection_record_window, tab_name='送检材料情况').click()
time.sleep(1)
title_1 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_1.clear()
title_1.send_keys(data.get('title_1'))
textarea_1 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_1.clear()
textarea_1.send_keys(data.get('textarea_1'))
# 对“检材、样本的采集”标签页进行输入
self.tool_page.inspection_record_tab(inspection_record_window, tab_name='检材、样本的采集').click()
time.sleep(1)
title_2 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_2.clear()
title_2.send_keys(data.get('title_2'))
textarea_2 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_2.clear()
textarea_2.send_keys(data.get('textarea_2'))
# 对“总体情况”标签页进行输入
self.tool_page.inspection_record_tab(inspection_record_window, tab_name='总体情况').click()
time.sleep(1)
title_3 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_3.clear()
title_3.send_keys(data.get('title_3'))
textarea_3 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_3.clear()
textarea_3.send_keys(data.get('textarea_3'))
# 对“检验分析”标签页进行输入
self.tool_page.inspection_record_tab(inspection_record_window, tab_name='检验分析').click()
time.sleep(1)
title_4 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_4.clear()
title_4.send_keys(data.get('title_4'))
textarea_4 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_4.clear()
textarea_4.send_keys(data.get('textarea_4'))
time.sleep(1)
# 输入听辨分析内容
self.tool_page.inspection_record_left_common(inspection_record_window, '听辩分析').click()
title_4_1 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_4_1.clear()
title_4_1.send_keys(data.get('title_4_1'))
textarea_4_1 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_4_1.clear()
textarea_4_1.send_keys(data.get('textarea_4_1'))
time.sleep(1)
# 输入分析及校验内容
self.tool_page.inspection_record_left_common(inspection_record_window, '分析及检验').click()
title_4_2 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_4_2.clear()
title_4_2.send_keys(data.get('title_4_2'))
textarea_4_2 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_4_2.clear()
textarea_4_2.send_keys(data.get('textarea_4_2'))
time.sleep(1)
# 输入比对检验结果
self.tool_page.inspection_record_left_common(inspection_record_window, '比对检验结果').click()
title_4_2_1 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_4_2_1.clear()
title_4_2_1.send_keys(data.get('title_4_2_1'))
textarea_4_2_1 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_4_2_1.clear()
textarea_4_2_1.send_keys(data.get('textarea_4_2_1'))
time.sleep(1)
# 对“综合评断”标签页进行输入
self.tool_page.inspection_record_tab(inspection_record_window, tab_name='综合评断').click()
time.sleep(1)
title_5 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_5.clear()
title_5.send_keys(data.get('title_5'))
textarea_5 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_5.clear()
textarea_5.send_keys(data.get('textarea_5'))
# 对“鉴定结论”标签页进行输入
self.tool_page.inspection_record_tab(inspection_record_window, tab_name='鉴定结论').click()
time.sleep(1)
title_6 = self.tool_page.inspection_record_title_input(inspection_record_window)
title_6.clear()
title_6.send_keys(data.get('title_6'))
textarea_6 = self.tool_page.inspection_record_textarea(inspection_record_window)
textarea_6.clear()
textarea_6.send_keys(data.get('textarea_6'))
time.sleep(1)
self.click_inspection_record_button('保存')
def upload_picture(self, file_path, insert_window_name):
self.click_inspection_record_button("插入图片")
insert_picture_window = self.tool_page.insert_picture_window()
self.tool_page.upload_picture_button(insert_picture_window).click()
api.import_file(file_path, insert_window_name)
time.sleep(3)
def del_picture(self, picture_name):
self.case_page.unfold_and_hide_button(folder_name='我的图片').click()
time.sleep(0.5)
picture_file = self.case_page.find_file_by_name(picture_name)
picture_file.click()
time.sleep(0.5)
ActionChains(self.browser).context_click(picture_file).perform()
self.case_page.file_context_menu('删除').click()
time.sleep(0.5)
prompt_window = api.level_2_window(self.browser)
self.case_page.window_button(prompt_window, '确定').click()
def export_inspection_record(self, file_path, window_name):
"""
导出检验记录
:param file_path: 文件保存路径及名称
:param window_name: 导出窗口名
:return: None
"""
# 定位检验记录窗口
inspection_record_window = api.level_2_window(self.browser)
self.tool_page.inspection_record_button(inspection_record_window, '导出').click()
self.click_inspection_record_button("导出")
api.export_file(file_path, window_name)
time.sleep(5)
def open_identify_opinion(self):
"""
打开【鉴定意见】
:return: None
"""
# 点击报告按钮
self.click_tool_button('报告')
# 点击打开检验记录
self.tool_page.report(report_type='鉴定意见').click()
def switch_inspection_record_tab(self, window, tab_name):
"""切换检验记录标签页"""
self.tool_page.inspection_record_tab(window, tab_name).click()
def add_identify_opinion(self, **data):
# 定位鉴定意见窗口
identify_opinion_window = api.level_2_window(self.browser)
# 输入标题
title = self.tool_page.identify_opinion_input(identify_opinion_window, '标题')
title.clear()
title.send_keys(data.get('title'))
# 输入页眉
page_header = self.tool_page.identify_opinion_input(identify_opinion_window, '页眉')
page_header.clear()
page_header.send_keys(data.get('page_header'))
# 输入页脚
page_footer = self.tool_page.identify_opinion_input(identify_opinion_window, '页脚')
page_footer.clear()
page_footer.send_keys(data.get('page_footer'))
time.sleep(1)
# 切换到案件简介标签页进行输入
self.tool_page.identify_opinion_tab(identify_opinion_window, '案件简介').click()
title_1 = self.tool_page.identify_opinion_input(identify_opinion_window, '一')
title_1.clear()
title_1.send_keys(data.get('title_1'))
textarea_1 = self.tool_page.identify_opinion_textarea(identify_opinion_window)
textarea_1.clear()
textarea_1.send_keys(data.get('textarea_1'))
time.sleep(1)
# 切换到比对结果进行输入
self.tool_page.identify_opinion_tab(identify_opinion_window, '比对结果').click()
title_3 = self.tool_page.identify_opinion_input(identify_opinion_window, '三')
title_3.clear()
title_3.send_keys(data.get('title_3'))
textarea_3 = self.tool_page.identify_opinion_textarea(identify_opinion_window)
textarea_3.clear()
textarea_3.send_keys(data.get('textarea_3'))
identifier = self.tool_page.identify_opinion_input(identify_opinion_window, '鉴定人')
identifier.clear()
identifier.send_keys(data.get('identifier'))
identify_data = self.tool_page.identify_opinion_input(identify_opinion_window, '鉴定日期')
identify_data.clear()
identify_data.send_keys(data.get('identify_data'))
time.sleep(1)
self.tool_page.identify_opinion_button(identify_opinion_window, '保存').click()
time.sleep(1)
def export_identify_opinion(self, file_path, window_name):
"""
导出鉴定意见
:param file_path: 文件路径及文件名
:param window_name: 导出窗口标题
:return: None
"""
# 定位鉴定意见窗口
identify_opinion_window = api.level_2_window(self.browser)
self.tool_page.identify_opinion_button(identify_opinion_window, '导出').click()
api.export_file(file_path, window_name)
time.sleep(5)
def insert_picture_to_text(self):
"""
将图片插入到检验记录正文
:return: None
"""
picture_window = self.tool_page.insert_picture_window()
self.tool_page.import_picture_button(picture_window).click()
def click_confirm_button(self):
# 点击提示窗口【确定】按钮
self.tool_page.prompt_message_button('确定').click()
def close_level_2_window(self):
""""""
level_2_window = api.level_2_window(self.browser)
api.close_window_button(level_2_window).click()
def close_level_3_window(self):
""""""
level_3_window = api.level_3_window(self.browser)
api.close_window_button(level_3_window).click()
if __name__ == '__main__':
...
| true |
3a9f8e1c8f83185ce82b022d74d09428e6078fc3 | Python | liliangqi/person_search_triplet | /__init__.py | UTF-8 | 579 | 2.671875 | 3 | [
"MIT"
] | permissive | # -----------------------------------------------------
# Initial Settings for Taining and Testing SIPN
#
# Author: Liangqi Li
# Creating Date: Apr 14, 2018
# Latest rectified: Apr 14, 2018
# -----------------------------------------------------
import time
import functools
def clock_non_return(func):
@functools.wraps(func)
def clocked(*args, **kwargs):
t0 = time.time()
func(*args, **kwargs)
eplapsed = time.time() - t0
print('\n' + '*' * 40)
print('Entire process costs {:.2f} hours.'.format(eplapsed / 3600))
return clocked
| true |
35cf46b77bf570c28ae3edcdc42615112bcec1d5 | Python | atharrison/python-adventofcode2020 | /day15/day15.py | UTF-8 | 944 | 3.53125 | 4 | [
"MIT"
] | permissive | import copy
class Day15:
# started 0:12 after
def __init__(self, data):
self.data = data
self.iterations = 2021
def solve_part1(self):
turn_lookup = {}
for idx, val in enumerate(self.data):
turn_lookup[val] = idx + 1
print(turn_lookup)
# first turn after reading list:
first_turn = len(self.data) + 1
spoken = 0
for turn in range(first_turn, self.iterations):
if turn % 100000 == 1 or turn > self.iterations - 5:
print(f"Turn {turn}, Spoken: {spoken}")
if spoken in turn_lookup.keys():
next_spoken = turn - turn_lookup[spoken]
turn_lookup[spoken] = turn
else:
turn_lookup[spoken] = turn
next_spoken = 0
spoken = next_spoken
def solve_part2(self):
self.iterations = 30000001
self.solve_part1()
| true |
c483b4457c9cc8807e03406750b987d727c517aa | Python | saiprasadvk/pythonprogram | /workout/perimeter of a circle.py | UTF-8 | 591 | 4.4375 | 4 | [] | no_license | Write a Python class named Circle constructed by a radius and two methods which will compute the area and the perimeter of a circle
Ans::
class circle:
def __init__(self,radius):
self.radius = radius
def perimeter(self):
a = 3.14*(self.radius)**2
print("Area of a circle",a)
def area(self):
b = 2*3.14*self.radius
print("perimeter of a circle",b)
def main():
a = circle(3)
a.area()
a.perimeter()
if __name__ == "__main__":
main()
O/P::
perimeter of a circle 18.84
Area of a circle 28.26
| true |
389b4cc1a23e9c787a31e515472804751350aa16 | Python | nathanielanozie/anozie_tools | /py/na_addToLayer.py | UTF-8 | 2,688 | 2.953125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | ##@file na_addToLayer.py Tools to find Maya scene transforms and put them into a display Layer.
#@note ex put all the transforms in group1 into layer1.
#@code import na_addToLayer as na @endcode
#@code na.addToLayer( 'group1', ['transform'], 'layer1' ) @endcode
#
#@author Nathaniel Anozie
import maya.cmds as cmds
import maya.mel as mel
##add all objects in hierarchy of specified type to an existing layer chosen
#
##
def addToLayer( hierarchyParent = "group1", types = [], layer = "layer1" ):
#verify input
if cmds.objExists(layer) == 0:
print 'Requires '+layer+' Exist';
return 0
if cmds.objectType(layer) != 'displayLayer':
print 'Requires '+layer+' type displayLayer';
return 0
sel = cmds.ls(sl = True)
if cmds.objExists(hierarchyParent) == 0:
print 'Requires '+hierarchyParent+' exists';
return 0
cmds.select(clear=True)
cmds.select(hierarchyParent, hierarchy=True)
#done verifying input
#put certain scene objects into given layer
#
selected = cmds.ls(sl = True)
#make sure the objects are only of the types we specified in order to add them
addToLayerObjects = []
for obj in selected:
if getIsTypeSupported( obj, types ) == 1:
addToLayerObjects.append(obj)
#putting them into layer here i thought
#i had to use mel for it
#
tempcmd = "layerEditorAddObjects %s" %layer
for ob in addToLayerObjects:
cmds.select(ob,replace = True)
print ob
mel.eval(tempcmd)
#restore user scene
if len(sel) > 0:
cmds.select(sel,replace = True)
##
#
##
def addToLayer_unitTest():
cube = cmds.polyCube()
layerName = cmds.createDisplayLayer(number=1, empty = True)
addToLayer( hierarchyParent = cube[0], types = ['transform'], layer = layerName )
def addToLayer_unitTest_1():
cube = cmds.polyCube()
layerName = cmds.createDisplayLayer(number=1, empty = True)
addToLayer( hierarchyParent = cube[0], types = ['transform'], layer = 'idontExist' )
##get 1 if type of single object input is in specified type list zero otherwise
#
##
def getIsTypeSupported( obj = "", supportedTypes = []):
result = 0
for i in range( len(supportedTypes) ):
if cmds.objectType(obj) == supportedTypes[i]:
result = 1
break
return result
def getIsTypeSupported_unitTest():
cube = cmds.polyCube()
print getIsTypeSupported( cube[0], ['transform'] )
print "\n"
print getIsTypeSupported( cube[0], ['joint'] )
print "\n"
| true |
d3b37b9c6fe2a92e7936560463b4035cd335f410 | Python | ideaqiwang/leetcode | /Array/39_CombinationSum.py | UTF-8 | 1,516 | 3.78125 | 4 | [] | no_license | '''
39. Combination Sum
Given an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target. You may return the combinations in any order.
The same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the frequency of at least one of the chosen numbers is different.
It is guaranteed that the number of unique combinations that sum up to target is less than 150 combinations for the given input.
Example 1:
Input: candidates = [2,3,6,7], target = 7
Output: [[2,2,3],[7]]
Explanation:
2 and 3 are candidates, and 2 + 2 + 3 = 7. Note that 2 can be used multiple times.
7 is a candidate, and 7 = 7.
These are the only two combinations.
'''
class Solution:
# Backtracking
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
if not candidates:
return []
self.res = []
self.dfs(candidates, target, [], 0)
return self.res
def dfs(self, candidates, target, combination, startIdx):
if target == 0:
self.res.append(list(combination))
return
if target < 0:
return
for i in range(startIdx, len(candidates)):
combination.append(candidates[i])
self.dfs(candidates, target-candidates[i], combination, i)
# backtrack, remove the number from the combination
combination.pop() | true |
5d26ac68e18555fecc365914261f24fc61a9567e | Python | imaginechen/EzaPython | /Email/SendEmail.py | UTF-8 | 838 | 2.890625 | 3 | [] | no_license | import smtplib
from email.mime.text import MIMEText
# third-part smtp service
mail_host = "applesmtp.126.com" # SMTP server
mail_user = "eric_python_auto@126.com" # user name
mail_pass = "qijzxcqj00838488" # passcode
sender = 'eric_python_auto@126.com' # sender
receivers = ['imaginechen@126.com', 'eric_python_auto@126.com'] # reciever
content = 'Python Send Mail !'
title = 'Python SMTP Mail' # title
message = MIMEText(content, 'plain', 'utf-8') # content
message['From'] = "{}".format(sender)
message['To'] = ",".join(receivers)
message['Subject'] = title
try:
smtpObj = smtplib.SMTP_SSL(mail_host, 465) # Launch SSL, general port 465
smtpObj.login(mail_user, mail_pass) # login authorization
smtpObj.sendmail(sender, receivers, message.as_string()) # send out the message
print("mail has been send successfully.")
except smtplib.SMTPException as e:
print(e) | true |
252d7096d8b2d216e97b575de5f52c522c57f50d | Python | dung-ngviet/LeetD | /LeetCode/DP/55/55.py | UTF-8 | 1,722 | 3.625 | 4 | [] | no_license | from typing import List
# class Solution:
# def canJump(self, nums: List[int]) -> bool:
# max = 0
# for i in range(0, len(nums)):
# if i > max: return False
# num = nums[i]
# if i + num > max: max = i + num
# if max > len(nums): return True
# return max >= len(nums) - 1
# Backtracking solution, try every steps
# class Solution:
# def canJump(self, nums: List[int]) -> bool:
# return self.canJumpFromPosition(0, nums)
# def canJumpFromPosition(self, pos: int, nums: List[int]) -> bool:
# if pos == len(nums) - 1: return True
# maxPos = min(pos + nums[pos], len(nums) - 1)
# for i in range(pos + 1, maxPos + 1):
# if self.canJumpFromPosition(i, nums): return True
# return False
# # Backtracking solution with momezation
# class Solution:
# def canJump(self, nums: List[int]) -> bool:
# return self.canJumpFromPosition(0, nums)
# def canJumpFromPosition(self, pos: int, nums: List[int]) -> bool:
# if pos == len(nums) - 1: return True
# maxPos = min(pos + nums[pos], len(nums) - 1)
# for i in range(pos + 1, maxPos + 1):
# if self.canJumpFromPosition(i, nums): return True
# return False
# Greedy solution
class Solution:
def canJump(self, nums: List[int]) -> bool:
lastIndex = len(nums) - 1
for i in range(len(nums) - 2, -1, -1):
if i + nums[i] >= lastIndex:
lastIndex = i
return lastIndex == 0
print(Solution().canJump([2,3,1,1,4]))
print(Solution().canJump([3,2,1,0,4]))
print(Solution().canJump([0]))
print(Solution().canJump([2,0,0]))
| true |
b6903c86f398377444a9bb1812e662a7dac24f96 | Python | linxumelon/examplifier | /netStat.py | UTF-8 | 3,464 | 2.765625 | 3 | [] | no_license | import psutil
import time
import socket
def get_global_stat():
stats = psutil.net_io_counters(pernic=False, nowrap=True)
bytes_sent = stats.bytes_sent
bytes_recv = stats.bytes_recv
packets_sent = stats.packets_sent
packets_recv = stats.packets_recv
errin = stats.errin # total number of errors while receiving
errout = stats.errout # total number of errors while sending
dropin = stats.dropin # total number of incoming packets which were dropped
dropout = stats.dropout # total number of outgoing packets which were dropped (always 0 on macOS and BSD)
def get_socket_connections(kind="all"):
connections = psutil.net_connections(kind=kind)
for c in connections:
fd = c.fd # the socket file descriptor
family = c.family # the address family, either AF_INET, AF_INET6 or AF_UNIX.
type = c.type # the address type, either SOCK_STREAM, SOCK_DGRAM or SOCK_SEQPACKET.
laddr = c.laddr # the local address as a(ip, port) or a path in case of AF_UNIX sockets
raddr = c.raddr # the remote address as a(ip, port) or an absolute path in case of UNIX sockets
status = c.status # the status of a TCP connection
pid = c.pid # PID of the process which opened the socket
def get_NIC_address():
nic_addresses = psutil.net_if_addrs()
for interface_name, addresses in nic_addresses:
for a in addresses:
family = a.family # the address family
address = a.address # primary NIC address
netmask = a.netmask # netmask address
broadcast = a.broadcast # broadcast address
ptp = a.ptp # destination address on a point to point interface
def get_NIC_stats():
nic_stats = psutil.net_if_stats()
for interface_name, stat in nic_stats:
is_up = stat.isup # indicate if the interface is running
duplex = stat.duplex # the duplex communication type
speed = stat.speed # speed in MB
mtu = stat.mtu # MTU in B
def socket_monitoring():
# get socket information at the beginning
remote_ips = set()
sockets = psutil.net_connections(kind='all')
num_of_socks = len(sockets)
for s in sockets:
remote_ips.add(s.raddr)
print("**************BEGINNING STATISTICS*****************")
print(("number of sockets connections: {}").format(num_of_socks))
print(remote_ips)
step = 1
sleep_time = 60 # in second
while True:
time.sleep(sleep_time) # check statistics in every 2 minutes
current_sockets = psutil.net_connections(kind='all')
new_connection = set()
for current_s in current_sockets:
remote_ip = current_s.raddr
if remote_ip in remote_ips:
continue
else:
ip = remote_ip.ip
port = remote_ip.port
try:
addr = socket.gethostbyaddr(ip)
new_connection.add((addr[0], ip, port))
except socket.herror:
new_connection.add(remote_ip)
pass
print(("**************TIME={} min STATISTICS*****************").format(step * sleep_time / 60))
print(("number of sockets connections: {}").format(len(current_sockets)))
print("new connections not found in at the beginning: ")
print(new_connection)
step += 1
if __name__ == '__main__':
socket_monitoring()
| true |
b3d743e915b7e2909f8260a7fa08fc556f8d14a5 | Python | poke53280/ml_mercari | /Train_Index_Group.py | UTF-8 | 1,388 | 2.984375 | 3 | [] | no_license |
import pandas as pd
import numpy as np
id = [0,0,1,1,3, 0]
d = [3,3,4,5,6, 3]
s = [4,4,4,4,4, 4]
ix = [7,2, 0, 3, 1, 4]
t = ['A', 'B', 'C', 'D', 'E', 'C']
df = pd.DataFrame({'id': id, 'd' : d, 's': s, 'idx': ix, 't':t})
df
# Group by id, d, s. Check t ordering.
df_grouped = df.groupby(['id', 'd', 's'])
for group_key, item in df_grouped:
print(group_key)
print(item)
"""c"""
g_id = []
g_d = []
g_s = []
g_t = []
for key, item in df_grouped:
print (key)
g = df_grouped.get_group(key)
l = list (g.t)
s = ".".join(l)
g_id.append(key[0])
g_d.append(key[1])
g_s.append(key[2])
g_t.append(s)
"""c"""
df_g = pd.DataFrame({'id': g_id, 'd':g_d, 's': g_s, 't' : g_t})
df_g
for x in g:
print (x.index)
print (x.names)
def full_sum(df):
l_b = list()
for x in df.b:
l_b.append(x)
l_c = list()
for x in df.c:
l_c.append(x)
t = list (tuple(zip (l_b, l_c)))
return t
"""c"""
num_objects = 90
df = pd.DataFrame({'id': np.random.choice(range(num_objects), 1000),
'a': np.random.randn(1000),
'b': np.random.randn(1000),
'c': np.random.randn(1000),
'd': np.random.randn(1000),
'N': np.random.randint(100, 1000, (1000))})
g = df.groupby('id')
g.agg(['sum'])
q = df[df.id == 89]
| true |
921d77f26ae0b97b936a7fbba071677d281dfcae | Python | Felienne/spea | /Python files/39 Week 7 - About Sets/06 test_set_have_arithmetic_operators/78855_01_code.step.py | UTF-8 | 500 | 3.5 | 4 | [] | no_license | #
class AboutSets(unittest.TestCase):
def test_set_have_arithmetic_operators(self):
beatles = {'John', 'Ringo', 'George', 'Paul'}
dead_musicians = {'John', 'George', 'Elvis', 'Tupac', 'Bowie'}
great_musicians = beatles | dead_musicians
self.assertEqual(__, great_musicians)
living_beatles = beatles - dead_musicians
self.assertEqual(__, living_beatles)
dead_beatles = beatles & dead_musicians
self.assertEqual(__, dead_beatles) | true |
2dfdab9376b8740407e15a91d1987f8adaa5ede9 | Python | adamr2/dhutil | /dhutil/mongo_utils.py | UTF-8 | 1,077 | 2.515625 | 3 | [
"MIT"
] | permissive | """Python based utilities for the registration system."""
import os
import json
from urllib.parse import quote_plus
from functools import lru_cache
import pymongo
CRED_DIR_PATH = os.path.expanduser('~/.datahack/')
CRED_FNAME = 'mongodb_credentials.json'
def _get_credentials():
fpath = os.path.join(CRED_DIR_PATH, CRED_FNAME)
with open(fpath, 'r') as cred_file:
return json.load(cred_file)
MONGODB_URI = "mongodb://{usr}:{pwd}@{host}:{port}"
def _get_mongodb_uri():
cred = _get_credentials()
return MONGODB_URI.format(
usr=quote_plus(cred['usr']),
pwd=quote_plus(cred['pwd']),
host=cred['host'],
port=cred['port'],
)
@lru_cache(maxsize=2)
def _get_mongodb_client():
cred = _get_credentials()
return pymongo.MongoClient(
host=_get_mongodb_uri(),
authSource=cred['authSource'],
)
def _get_mongo_database():
return _get_mongodb_client()['datahack-reg']
def get_users_collection():
"""Returns the DataHack users collection."""
return _get_mongo_database()['users']
| true |
4d1426383b3bb6382e8245a214c533032ea84e64 | Python | Kaynelua/SUSH-SpectralSensor | /SpectralSensor.py | UTF-8 | 2,404 | 2.765625 | 3 | [] | no_license | import smbus
from I2C import write,read
import time
import math
import numpy as np
import bitstring as bs
class SpectralSensor:
def __init__(self):
self.bus = smbus.SMBus(1)
self.gain(2)
# Set sensor gain
def gain(self,level):
if(level >=0 and level <=3):
reg = read(self.bus,0x07)
reg = reg & 0xCF
write(self.bus,0x07,reg|level<<4)
# Turn on LED indicator light
def ledInd(self,state:bool):
reg = read(self.bus,0x07)
if(state):
write(self.bus,0x07,reg | 0x01)
else:
write(self.bus,0x07,reg & 0xFE)
# Turn on Driver LED light
def ledDrv(self,level):
reg = read(self.bus,0x07)
if(level == 1):
reg = reg & 0xCF
level = (level -1) << 4
write(self.bus,0x07,reg|0x08|level)
else :
write(self.bus,0x07,reg& 0xF7)
# Resets sensor
def reset(self):
reg = read(self.bus,0x04)
write(self.bus,0x04,reg|0x80)
# Sets bank for differnet Sensor reading mode i.e one shot or continuous reading
def setBank(self,bank:int):
reg = read(self.bus,0x04)
reg = reg & 0xF1
write(self.bus,0x04, reg|bank<<2)
# Check if data is ready in the register
def dataReady(self):
reg = read(self.bus,0x04)
return bool(reg & 0x02)
# Read raw values from a specific channel
def readChan(self,chan):
while( not self.dataReady()):
time.sleep(0.01)
pass
addr = {'V' : [0x08,0x09], 'B' : [0x0A,0x0B], 'G' : [0x0C,0x0D], 'Y' : [0x0E,0x0F], 'O' : [0x10,0x11], 'R' : [0x12,0x13] }
hi = read(self.bus,addr[chan][0])
lo = read(self.bus,addr[chan][1])
return (hi << 8 | lo)
# Read raw values from all 6 channels.
def readAll(self):
colors =['V','B','G','Y','O','R']
listSpectrum=[]
for color in colors :
val = self.readChan(color)
listSpectrum.append(val)
return listSpectrum
# Read calibrated readings from all 6 channels
def readAllCal(self):
colors_add =[(0x14,0x15,0x16,0x17),(0x18,0x19,0x1A,0x1B),
(0x1C,0x1D,0x1E,0x1F),(0x20,0x21,0x22,0x23),
(0x24,0x25,0x26,0x27),(0x28,0x29,0x2A,0x2B)]
calSpectrum=[]
while(not self.dataReady()):
time.sleep(0.01)
for color in colors_add :
b3 = read(self.bus,color[0])
b2 = read(self.bus,color[1])
b1 = read(self.bus,color[2])
b0 = read(self.bus,color[3])
val = (b3<<24) | (b2<<16) | (b1<<8) |b0
bin_val = np.binary_repr(val,width = 32)
c = bs.BitArray(bin=bin_val)
calSpectrum.append(c.float)
return np.array(calSpectrum)
| true |
b076a489b8bda48899060faa8a055a05bea1da6a | Python | pthorn/eor-filestore | /eor_filestore/images/image_ops.py | UTF-8 | 3,162 | 2.765625 | 3 | [] | no_license | # coding: utf-8
import os
import errno
import math
from io import BytesIO
from PIL import Image
from ..exceptions import FileException, NotAnImageException
import logging
log = logging.getLogger(__name__)
def get_image_format(file_obj):
ext = os.path.splitext(file_obj.filename)[1]
if ext.lower() in('.gif', '.png'):
return 'png'
else:
return 'jpg'
def open_image(parsed_id, source_file):
try:
image = Image.open(source_file)
except IOError as e:
if str(e).find('annot identify image file') != -1:
raise NotAnImageException(id=parsed_id, exc=e)
else:
raise FileException(id=parsed_id, exc=e)
if image.mode != 'RGB':
image = image.convert('RGB')
return image
# TODO FILL
# Thumbnail size is exactly the specified size. Either top and bottom or left and right sides
# have been cropped to fit proportions.
def make_thumbnail_crop_to_size(image, size):
image = image.copy()
# calculate crop window centered on image
# TODO!!! won't work if original is smaller than thumbnail
factor = min(float(image.size[0]) / size[0], float(image.size[1]) / size[1])
crop_size = (size[0] * factor, size[1] * factor)
crop_window = (
math.trunc((image.size[0] - crop_size[0]) / 2), # left
math.trunc((image.size[1] - crop_size[1]) / 2), # upper
math.trunc((image.size[0] + crop_size[0]) / 2), # right
math.trunc((image.size[1] + crop_size[1]) / 2) # lower
)
#print '\n----------', 'image.size', image.size, 'thumb_def.size', thumb_def.size, 'factor', factor, 'crop_size', crop_size, 'crop', crop
image = image.crop(crop_window)
image.thumbnail(size, Image.ANTIALIAS)
return image
# TODO FIT
# Thumbnail fits into the specified maximum size while keeping proportions. Resulting
# thumbnail size may be smaller on one of the dimensions
def make_thumbnail_keep_proportions(image, size):
image = image.copy()
if image.size[0] > size[0] or image.size[1] > size[1]:
image.thumbnail(size, Image.ANTIALIAS)
return image
# TODO FIT WIDTH
# Thumbnail width is exactly the specified width, height is calculated to keep
# original image proportions. Height is ignored.
def make_thumbnail_fit_width(image, size):
if image.size[0] <= size[0]:
return image
image = image.copy()
factor = image.size[0] / size[0]
thumb_size = (size[0], image.size[1] / factor)
image.thumbnail(thumb_size, Image.ANTIALIAS)
return image
def save_image(image, save_path, quality, progressive=False):
"""
"""
if os.path.exists(save_path):
log.warn('overwriting existing image: %s', save_path)
image.save(save_path, quality=quality, progressive=progressive)
def save_image_to_buffer(image, extension, quality, progressive=False):
data = BytesIO()
# Pillow uses name attribute to infer image format
data.name = 'foo.{}'.format(extension)
image.save(data, quality=quality, progressive=progressive)
# calculate size
data.seek(0, os.SEEK_END)
size = data.tell()
data.seek(0)
return data, size
| true |
d28617d064b72c5690830654683948626338d579 | Python | anastasia1002/my-labs | /lab8/lab8.1(2).py | UTF-8 | 288 | 3.140625 | 3 | [] | no_license | x=float(input("x="))
y=float(input("y="))
z=float(input("z="))
def get_max(x,z):
if x>z:
return x
else:
return y
sum=x+y
dob=x*y
def get_max(sum,dob):
if sum>dob:
return sum
else:
return dob
u=max(x,z)+max(x+y,x*y)/max(x+y,x*y)**2
print(u)
| true |
a3c1d031f338227dd66c79bd2fc8c694275a139a | Python | georgetown-cset/ai-definitions-for-policymaking | /tests/test_query.py | UTF-8 | 2,386 | 2.53125 | 3 | [] | no_license | import pytest
from google.api_core.exceptions import NotFound
from google.cloud import bigquery
from bq import query, create_client
from settings import DATASET, PROJECT_ID
TOY_QUERY = """select * from unnest(array<struct<x int64, y string>>[(1, 'foo'), (3, 'bar')])"""
ALT_TOY_QUERY = """select * from unnest(array<struct<x int64, y string>>[(2, 'baz'), (4, 'bam')])"""
@pytest.fixture
def cleanup_test_table():
yield
client = create_client()
try:
client.delete_table(f'{DATASET}.test', not_found_ok=True)
except NotFound:
pass
@pytest.fixture
def client():
return create_client()
def test_create_client():
# We shouldn't get a UserWarning about using GCP user credentials
with pytest.warns(None) as warnings:
client = create_client()
assert len(warnings) == 0
assert client.project == PROJECT_ID
def test_create_table(cleanup_test_table, client):
result = query(TOY_QUERY, table='test', dataset=DATASET)
assert result.errors is None
# Check the result
job = client.query(TOY_QUERY)
assert isinstance(job, bigquery.QueryJob)
job_result = job.result()
assert isinstance(job_result, bigquery.table.RowIterator)
rows = [row for row in job_result]
assert len(rows) == 2
assert list(rows[0].keys()) == ['x', 'y']
def test_recreate_table(cleanup_test_table, client):
"""If the cleanup fixture works, creating the test table a second time won't raise NotFound.
Keep this test below test_create_table().
"""
# Create the test table a second time
job_2 = query(TOY_QUERY, table='test', dataset=DATASET)
assert job_2.state == 'DONE'
table_2_rows = [row for row in job_2.result()]
assert table_2_rows[0]['x'] == 1 and table_2_rows[0]['y'] == 'foo'
table_2 = client.get_table(f'{DATASET}.test')
# Trying to create the table a third time and passing truncate=True should replace the contents of the table
job_3 = query(ALT_TOY_QUERY, table='test', dataset=DATASET, truncate=True)
assert job_3.state == 'DONE'
table_3 = client.get_table(f'{DATASET}.test')
# The table isn't recreated
assert table_3.created == table_2.created
# Its contents are replaced
assert table_3.modified > table_2.created
table_3_rows = [row for row in job_3.result()]
assert table_3_rows[0]['x'] == 2 and table_3_rows[0]['y'] == 'baz'
| true |
0e6bc8babd9029fa1737979a87c7e8fdf99fa068 | Python | covid-maps/covid-maps | /scripts/database_helper.py | UTF-8 | 292 | 2.828125 | 3 | [] | no_license | from sqlalchemy import create_engine
def load_engine(db_url):
print('Connecting to the PostgreSQL database...')
return create_engine(db_url, echo=False)
def close_connection(session):
if session is not None:
session.close()
print('Database connection closed.')
| true |
2cb83902d515adff0d8599e261579aa60d507e13 | Python | ahmedhussiien/Disaster-Response-NLP-Pipeline | /data/process_data.py | UTF-8 | 3,693 | 3.0625 | 3 | [] | no_license | # load, clean and save the datasets
import pandas as pd
from sqlalchemy import create_engine
import argparse
CATEGORIES_DEFAULT_FILENAME = './data/categories.csv'
MESSAGES_DEFAULT_FILENAME = './data/messages.csv'
DATABASE_DEFAULT_FILENAME = './data/labeled_messages_db.sqlite3'
TABLE_NAME = 'labeled_messages'
def load_data(messages_filename, categories_filename):
'''
Load the data from the input files
Args:
categories_filename (str): categories filename
messages_filename (str): messages filename
Returns:
df (pandas.DataFrame): dataframe containing the two datasets merged
'''
df_messages = pd.read_csv(messages_filename)
df_categories = pd.read_csv(categories_filename)
return pd.merge(df_messages, df_categories, on='id')
def clean_data(df):
'''
Clean the data
Args:
df (pandas.DataFrame): dataframe containing the uncleaned data
Returns:
df (pandas.DataFrame): dataframe containing the cleaned data
'''
# create a dataframe of the 36 individual category columns
categories = df.categories.str.split(';', expand=True)
categories_names = categories[:1].applymap(lambda s: s[:-2]).iloc[0, :].tolist()
categories.columns = categories_names
# convert category values to just numbers 0 or 1
categories = categories.applymap(lambda s: int(s[-1]))
categories['related'].replace(2, 0, inplace=True)
# replace categories column in df with new category columns
df.drop('categories', axis=1, inplace=True)
df = pd.concat([df, categories], axis=1)
# remove duplicates and drop null values
df.drop_duplicates(inplace=True)
df.dropna(subset=categories_names, inplace=True)
return df
def save_data(df, database_filename):
'''
saves the dataframe to a sqllite database file
Args:
df (pandas.DataFrame): dataframe containing the data
database_filename (str): database filename
'''
engine = create_engine('sqlite:///'+ database_filename)
df.to_sql(TABLE_NAME, engine, index=False)
engine.dispose()
def parse_arguments():
'''
Parse the command line arguments
Returns:
categories_filename (str)
messages_filename (str)
database_filename (str)
'''
parser = argparse.ArgumentParser(description = "Disaster Response Pipeline Process Data", prefix_chars='-+')
parser.add_argument('-m', '--messages-filename', type = str, default = MESSAGES_DEFAULT_FILENAME, help = 'Messages dataset filename')
parser.add_argument('-c', '--categories-filename', type = str, default = CATEGORIES_DEFAULT_FILENAME, help = 'Categories dataset filename')
parser.add_argument('-d', '--database-filename', type = str, default = DATABASE_DEFAULT_FILENAME, help = 'Database filename')
args = parser.parse_args()
return args.messages_filename, args.categories_filename, args.database_filename
def process_data(messages_filename, categories_filename, database_filename):
'''
Process the data and save it in a database
Args:
categories_filename (str)
messages_filename (str)
database_filename (str)
'''
print('\nLoading data ⌛...')
df = load_data(messages_filename, categories_filename)
print('\nCleaning data 🧹...')
df = clean_data(df)
print('\nSaving the database as {} 💾...'.format(database_filename))
save_data(df, database_filename)
print('\nDone processing ✅')
if __name__ == '__main__':
messages_filename, categories_filename, database_filename = parse_arguments()
process_data(messages_filename, categories_filename, database_filename)
| true |
432eda01889a1c14e18a28e61fecb57a1b84dbd9 | Python | sedasugur/homeworks | /Learning from Data/HW1/lfd_1.py | UTF-8 | 4,716 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#Seda SUGUR 150160130
import random
iter_num=1000
learning_rate=0.01
m=[]
sum_x=0
sum_y=0
m.append([])
m.append([])
with open('./regression_data.txt','r') as file:
file.readline()
a=0
lines=file.readlines()
for line in lines:
for word in line.split():
word=float(word)/1000
if a==0:
m[0].append(word)
a=1
else:
m[1].append(word)
a=0
lines=[]
for i in range(len(m[0])):
lines.append([m[0][i],m[1][i]])
for i in range(len(m[0])):
sum_x += m[0][i]
sum_y +=m[1][i]
av_x=sum_x/len(m[0])#for init w
av_y=sum_y/len(m[1])#for init b
def cost_func(x,y,w,b):
sum_error=0
for i in range (len(x)):
y_calc= w * x[i] + b
error= y_calc-y[i]
sum_error += error**2
return sum_error /(2 * len(x))
def deriv_cost_func(x,y,w_current,b_current):
w_der=0
b_der=0
for i in range(len(x)):
w_der += ((w_current*x[i]+b_current)-y[i]) * x[i] /len(x)
b_der += ((w_current*x[i]+b_current)-y[i]) / len(x)
return [b_der,w_der]
def gradient_descent(x,y,w,b,learning_rate,num_iter):
for i in range(num_iter):
delta_b,delta_w=deriv_cost_func(x,y,w,b)
w = w - learning_rate * delta_w
b= b - learning_rate * delta_b
return [b,w]
def fold(lines,k,data_size,size=0):
size=round(data_size/k)
lines_cpy=[row[:] for row in lines]
random.shuffle(lines_cpy)
fold=[]
for x in range(0,k):
if x==k-1:
fold.append([])
for y in range(data_size-(k-1)*size):
fold[x].append([])
for z in range(2):
fold[x][y].append(lines_cpy[size*(k-1)+y][z])
break
fold.append([])
for y in range(size):
fold[x].append([])
for z in range(2):
fold[x][y].append(lines_cpy[size*x+y][z])
return fold
def function_for_take_list_from_folds(fold_al,train1,train2,train3,train4):
fold1,fold2,fold3,fold4=[],[],[],[]
fold1=fold_al[train1]
fold2=fold_al[train2]
fold3=fold_al[train3]
fold4=fold_al[train4]
y_list=[]
x_list=[]
size=len(fold1)
for i in range(size):
x_list.append(fold1[i][0])
y_list.append(fold1[i][1])
size=len(fold2)
for i in range(size):
x_list.append(fold2[i][0])
y_list.append(fold2[i][1])
size=len(fold3)
for i in range(size):
x_list.append(fold3[i][0])
y_list.append(fold3[i][1])
size=len(fold4)
for i in range(size):
x_list.append(fold4[i][0])
y_list.append(fold4[i][1])
return [x_list,y_list]
fold_al=[]
fold_al=fold(lines,5,199)
def list_for_test(fold_al,test_index):#x and y data set for test
test=[]
xlist=[]
ylist=[]
test=fold_al[test_index]
size=len(test)
for i in range(size):
xlist.append(test[i][0])
ylist.append(test[i][1])
return [xlist,ylist]
def overall_mse(error):#for calculate the overall mse , error is the array which include error's in the each model.In this example there are 5 models.
_sum=0
for i in range(len(error)):
_sum+=error[i]
return _sum/len(error)
#for model1 folds:0,1,2,3 train set, fold 5. is test set
error=[]
x_list,y_list=function_for_take_list_from_folds(fold_al,0,1,2,3)
b,w= gradient_descent(x_list,y_list,av_x,av_y,learning_rate,iter_num)
x_test,y_test=list_for_test(fold_al,4)
error.insert(0,cost_func(x_test,y_test,w,b))
#for model2
x_list,y_list=function_for_take_list_from_folds(fold_al,0,1,2,4)
b,w= gradient_descent(x_list,y_list,av_x,av_y,learning_rate,iter_num)
x_test,y_test=list_for_test(fold_al,3)
error.insert(1,cost_func(x_test,y_test,w,b))
#for model3
x_list,y_list=function_for_take_list_from_folds(fold_al,0,1,3,4)
b,w= gradient_descent(x_list,y_list,av_x,av_y,learning_rate,iter_num)
x_test,y_test=list_for_test(fold_al,2)
error.insert(2,cost_func(x_test,y_test,w,b))
#for model4
x_list,y_list=function_for_take_list_from_folds(fold_al,0,3,2,4)
b,w= gradient_descent(x_list,y_list,av_x,av_y,learning_rate,iter_num)
x_test,y_test=list_for_test(fold_al,1)
error.insert(3,cost_func(x_test,y_test,w,b))
#for model5
x_list,y_list=function_for_take_list_from_folds(fold_al,3,1,2,4)
b,w= gradient_descent(x_list,y_list,av_x,av_y,learning_rate,iter_num)
x_test,y_test=list_for_test(fold_al,0)
error.insert(4,cost_func(x_test,y_test,w,b))
print(overall_mse(error))
| true |
c3647b2d8d7b5716d148060078d9f7ee5481c324 | Python | ArtskydJ/project-euler | /020_FactorialDigitSum.py | UTF-8 | 188 | 2.875 | 3 | [] | no_license | from math import *
import string
#from string import *
n=factorial(100)
s=format(n)
sTemp="hi"
x=0
for i in range(len(s)):
sTemp=str.index(s,i)
x+=int(sTemp)
print(x)
| true |
5ede18c830281d936688bc7f3ff8f8e721b92607 | Python | torenunez/ud120-projects | /datasets_questions/explore_enron_data.py | UTF-8 | 2,080 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python
"""
Starter code for exploring the Enron dataset (emails + finances);
loads up the dataset (pickled dict of dicts).
The dataset has the form:
enron_data["LASTNAME FIRSTNAME MIDDLEINITIAL"] = { features_dict }
{features_dict} is a dictionary of features associated with that person.
You should explore features_dict as part of the mini-project,
but here's an example to get you started:
enron_data["SKILLING JEFFREY K"]["bonus"] = 5600000
"""
import os
# abspath = os.path.abspath(__file__)
# dname = os.path.dirname(abspath)
dname = '/home/torenunez/Projects/ud120-projects/datasets_questions/'
os.chdir(dname)
import pickle
enron_data = pickle.load(open("../final_project/final_project_dataset.pkl", "r"))
print len(enron_data.keys())
print len(enron_data['METTS MARK'].keys())
poi = 0
for key in enron_data.iterkeys():
if enron_data[key]["poi"] == 1:
poi += 1
print poi
for key in enron_data.iterkeys():
if 'prentice' in key.lower():
print key
for key2 in enron_data[key].iterkeys():
if 'stock' in key2.lower():
print key2
print enron_data['PRENTICE JAMES']['total_stock_value']
for key in enron_data.iterkeys():
if 'colwell' in key.lower():
print key
for key2 in enron_data[key].iterkeys():
print key2
print enron_data['COLWELL WESLEY']['from_this_person_to_poi']
for key in enron_data.iterkeys():
if 'skilling' in key.lower():
print key
for key2 in enron_data[key].iterkeys():
print key2
print enron_data['SKILLING JEFFREY K']['exercised_stock_options']
for key in enron_data.iterkeys():
if 'lay' in key.lower() or 'skilling' in key.lower() or 'fastow' in key.lower():
print key
print enron_data[key]['total_payments']
import pandas as pd
import numpy as np
df = pd.DataFrame.from_dict(enron_data, orient ='index')
df = df.replace('NaN', np.nan)
print df.info()
print df.shape
print df.isnull().sum()
print df[df.poi]['total_payments'].isnull().sum()
| true |
abbf03ffe895b458704d292d01142d6e09504d2c | Python | Ackermannn/MyLeetcode | /src/edu/neu/xsz/leetcode/lcof/lcof37/Main.py | UTF-8 | 2,089 | 3.875 | 4 | [] | no_license | #! usr/bin/env python3
from queue import Queue
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return "[]"
ans = []
q = Queue()
q.put(root)
while not q.empty():
tmp = q.get()
if tmp is None:
ans.append('null')
else:
ans.append(str(tmp.val))
q.put(tmp.left)
q.put(tmp.right)
while ans[-1] is 'null':
ans.pop()
return "[" + ",".join(ans) + "]"
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if data == "[]":
return None
l = data[1:-1].split(',')[::-1]
q = Queue()
root = TreeNode(int(l[-1]))
q.put(root)
l.pop()
while len(l) > 0:
tmp = q.get()
if l[-1] != 'null':
tmp.left = TreeNode(l[-1])
q.put(tmp.left)
else:
tmp.left = None
l.pop()
if len(l) == 0:
break
if l[-1] != 'null':
tmp.right = TreeNode(l[-1])
q.put(tmp.right)
else:
tmp.right = None
l.pop()
return root
if __name__ == '__main__':
# Your Codec object will be instantiated and called as such:
# "[1,2,3,null,null,4,5]"
root1 = TreeNode(1)
root2 = TreeNode(2)
root3 = TreeNode(3)
root4 = TreeNode(4)
root5 = TreeNode(5)
root1.left = root2
root1.right = root3
root3.left = root4
root3.right = root5
codec = Codec()
serialized = codec.serialize(root1)
deserialized = codec.deserialize(serialized)
print(deserialized)
| true |
002af52aaabfbf3869840413fae4dcb4d9dc39fd | Python | LamThanhNguyen/HackerEarth-Solutions | /Fitting-Circles.py | UTF-8 | 142 | 3.46875 | 3 | [] | no_license | t = int(input())
for i in range(t):
a,b = map(int,input().split())
if(a >= b):
print(a//b)
elif(a<b):
print(b//a) | true |
f83143318388cad3273a0eb9cf962565c96da736 | Python | thomasgauvin/LeetcodePractice | /leetcode-reverse-integer.py | UTF-8 | 446 | 2.859375 | 3 | [] | no_license | def reverse(x: int) -> int:
negative = False
if x < 0:
negative = True
x = 0 - x
x = str(x)
result = ""
for i in x:
result = i+result
result = int(result)
if negative:
result = -result
if result > 2**31-2 or result < -2**31:
result = 0
print(result)
reverse(-111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111119999994) | true |
c0eba7eb46ded6dc70201840d15bfd63b86e06b4 | Python | onaio/tasking | /tests/models/test_locations.py | UTF-8 | 1,092 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | """
Test for Location model
"""
from django.test import TestCase
from model_mommy import mommy
class TestLocations(TestCase):
"""
Test class for Location models
"""
def test_location_model_str(self):
"""
Test the str method on Location model with Country Defined
"""
nairobi = mommy.make("tasking.Location", name="Nairobi", country="KE")
expected = "Kenya - Nairobi"
self.assertEqual(expected, nairobi.__str__())
def test_location_model_str_no_country(self):
"""
Test the str method on Location model without Country Defined
"""
nairobi = mommy.make("tasking.Location", name="Nairobi")
expected = "Nairobi"
self.assertEqual(expected, nairobi.__str__())
def test_location_parent_link(self):
"""
Test the parent link between Locations
"""
nairobi = mommy.make("tasking.Location", name="Nairobi")
hurlingham = mommy.make("tasking.Location", name="Hurlingham", parent=nairobi)
self.assertEqual(nairobi, hurlingham.parent)
| true |
74ba6bab32b06a88d5c6f938b47627a808154a96 | Python | Chandan-CV/school-lab-programs | /Program2.py | UTF-8 | 542 | 4.59375 | 5 | [] | no_license | #Program 2
#Write a program to accept 2 numbers and interchange the values without using a temporary variable
#Name : Adeesh Devanand
#Date of Execution: July 17, 2020
#Class 11
a = int(input("Enter first number"))
b = int(input("Enter second number"))
a = a + b
b = a - b
a = a - b
print("Interchanged value of the first number is", a)
print("Interchanged value of the second number is", b)
'''Output of Program 2
Enter first number3
Enter second number5
Interchanged value of the first number is 5
Interchanged value of the second number is 3'''
| true |
8169cca045bb096d8d290bae52a99ed0f07b93e0 | Python | posuna19/pythonBasicCourse | /course2/week5/W5_01_arrange_name_test.py | UTF-8 | 881 | 3.859375 | 4 | [] | no_license | import unittest
from W5_01_arrange_name import rearrange_name
class TestRearrange(unittest.TestCase):
def test_basic(self):
#Arrange
username = "Lovecale, Ada"
expectedName = "Ada Lovecale"
#Act
resultName = rearrange_name(username)
#Assert
self.assertEqual(resultName, expectedName)
def test_empty(self):
username = ""
expectedName = ""
resultName = rearrange_name(username)
self.assertEqual(resultName, expectedName)
def test_double_name(self):
username = "Hopper, Grace M."
expectedName = "Grace M. Hopper"
resultName = rearrange_name(username)
self.assertEqual(resultName, expectedName)
def test_one_name(self):
testcase = "Voltaire"
expected = "Voltaire"
self.assertEqual(rearrange_name(testcase), expected)
#unittest.main() | true |
9fd8e94a89212556fce1cb300a2627e72d611c5f | Python | dfarache/hackerrank | /loveLetterMistery/loveLetter.py | UTF-8 | 460 | 3.71875 | 4 | [] | no_license | def apply_changes(string):
number_of_changes = 0
length = len(string)
low = 0
high = length-1
for index in range(int(length/2)):
number_of_changes += abs(ord(string[low]) - ord(string[high]))
high -= 1
low += 1
print(number_of_changes)
def calculate_answers():
for i in range(number_of_tests):
string = str(input())
apply_changes(string)
number_of_tests = int(input())
calculate_answers() | true |
ef0dfd16468612e3f77bd995b10213dc22e43d3f | Python | anuragvij264/covid-social-distancing-scoring | /api/api_utils.py | UTF-8 | 1,086 | 2.53125 | 3 | [] | no_license | from torchvision import transforms
from PIL import Image
import numpy as np
import torch
from model import CSRNet
transform = transforms.Compose([
transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
def gen_img_counts(img_path, model):
"""
given an image return the head count in the image
"""
img = transform(Image.open(img_path).convert('RGB'))
print(type(img))
output = model(img.unsqueeze(0))
pred_count = int(output.detach().cpu().sum().numpy())
return pred_count
def score(output):
if output < 400:
return np.tanh(152 * output + 0.1578)
else:
return 1.
def load_model(model_path, device):
"""
model_path: saved model (.pth or .pth.tar)
#TODO: map_location
"""
model = CSRNet()
checkpoint = torch.load(model_path, map_location=device)
model.load_state_dict(checkpoint['state_dict'])
return model
def debug_pred(img_path, model, orig_img_path):
"""
debug
"""
pass
| true |
4caf73f8a532f644161ac8ed84aa0f7bef99093a | Python | fanonwue/ScannerTool | /SmtpConfig.py | UTF-8 | 738 | 2.625 | 3 | [
"MIT"
] | permissive | class SmtpConfig:
def __init__(self, host: str, port: int, username: str, password: str, starttls: bool, mail_from: str):
self.host = host
self.port = port
self.username = username
self.password = password
self.starttls = starttls
if not mail_from:
mail_from = self.username
self.mail_from = mail_from
@staticmethod
def get_default():
"""creates a default object for testing purposes"""
config = {
"host": "smtp.domain.com",
"port": 587,
"username": "user",
"password": "password",
"starttls": True,
"mail_from": None
}
return SmtpConfig(**config) | true |
83bd2e6837094aa9cb7eeeb6261058ea5c0c7dc3 | Python | YaojieLu/LAI_optimization | /MDP_class.py | UTF-8 | 4,541 | 3.015625 | 3 | [] | no_license |
"""
We define an Markov Decision Process.
We represent a policy as a dictionary of {state: action} pairs.
"""
import numpy as np
def Ef(dL, gs, L, slope, dt):
""" Given leaf area and stomatal conductance, return whole-plant transpiration """
return slope*(L+dL)*gs*dt
def gsmax_sf(dL, L, s, slope, dt):
""" Given soil moisture and leaf area, return maximum stomatal conductance """
return s/slope/(L+dL)/dt
# Farquhar model
def Af(gs, ca,
T=25, I=430,
Kc=460, q=0.3, R=8.314, Jmax=48, Vcmax=31, z1=0.9, z2=0.9999, tau=43):
Km = Kc+tau/0.105
# Rubisco limitation
Ac = 1/2*(Vcmax+(Km+ca)*gs-(Vcmax**2+2*Vcmax*(Km-ca+2*tau)*gs+((ca+Km)*gs)**2)**(1/2))
J = (q*I+Jmax-((q*I+Jmax)**2-4*z1*q*I*Jmax)**0.5)/(2*z1)
# RuBP limitation
Aj = 1/2*(J+(2*tau+ca)*gs-(J**2+2*J*(2*tau-ca+2*tau)*gs+((ca+2*tau)*gs)**2)**(1/2))
# Am = min(Ac, Aj)
Am = (Ac+Aj-((Ac+Aj)**2-4*z2*Ac*Aj)**0.5)/(2*z2)
return Am
def Anf(dL, gs, ca, L, mL, cL, dt):
""" Whole-plant photosynthesis rate """
# self-shading
L_effective = 10*(L+dL)/(L+dL+10)
A = L_effective*Af(gs, ca)*dt
""" Leaf maintenance cost and leaf construction cost """
C = mL*(L+dL)*dt+cL*max(0, dL)
return A-C
def rd_prob(rain_prob, mean_rd, rd, delta_rd):
return rain_prob*(np.exp(-(1.0/mean_rd)*rd)-np.exp(-(1.0/mean_rd)*(rd+delta_rd)))
class MDP:
""" An MDP is defined by a transition model and a reward function. """
def __init__(self,
gamma=0.99, dt=1.0,
k=0.1, mean_rd=0.2,
slope=0.05, ca=400, mL=1, cL=10,
dL_unit=0.1,
gs_min=0, gs_max=1.0, gs_unit=0.02,
L_min=0, L_max=10.0,
s_min=0, s_max=1.0):
if not (0 < gamma <= 1.0):
raise ValueError("An MDP must have 0 < gamma <= 1")
self.gamma = gamma
self.dt = dt
self.k = k
self.mean_rd = mean_rd
self.slope = slope
self.ca = ca
self.mL = mL
self.cL = cL
self.dL_unit = dL_unit
self.gs_min = gs_min
self.gs_max = gs_max
self.gs_unit = gs_unit
self.L_min = L_min
self.L_max = L_max
self.L_unit = dL_unit
self.L_space = np.round(np.arange(L_min, L_max+self.L_unit, self.L_unit), 6)
self.s_min = s_min
self.s_max = s_max
self.s_unit = np.round(Ef(self.L_unit, gs_unit, 0, slope, dt), 6)
self.s_space = np.round(np.arange(s_min, s_max+self.s_unit, self.s_unit), 6)
def R(self, dL, gs, L):
""" Return the current reward for the state and action. """
return Anf(dL, gs, self.ca, L, self.mL, self.cL, self.dt)
def T(self, dL, gs, L, s):
"""
Transition model. From a state and an action,
return a list of (probability, result-state) pairs.
"""
E = Ef(dL, gs, L, self.slope, self.dt)
sE = s-E
rd_sE_len = int(round((self.s_max-sE) / self.s_unit+1))
rd_sE = np.linspace(0, 1-sE, rd_sE_len)
if rd_sE_len >= 2:
p_sE1 = 1.0-self.k*self.dt
p_sE2 = rd_prob(self.k*self.dt, self.mean_rd, 0, self.s_unit)
p_sE = p_sE1+p_sE2
p = [(p_sE, np.round(sE, 6))]
p_1 = self.k*self.dt*(np.exp(-(1.0/self.mean_rd)*(1.0-sE)))
if rd_sE_len >= 3:
for r in rd_sE[1:-1]:
p_si = rd_prob(self.k*self.dt, self.mean_rd, r, self.s_unit)
p.append((p_si, np.round(r+sE, 6)))
p.append((p_1, 1.0))
else:
p.append((p_1, 1.0))
else:
p = [(1.0, 1.0)]
return p
def dL_space(self, L):
""" Return a list of dL values that can be performed in this state. """
return np.arange(self.L_min-L, self.L_max-L+self.L_unit, self.L_unit)
def gs_space(self, dL, L, s):
""" Return a list of gs values that can be performed in this state. """
if L+dL == 0:
return [0]
else:
gs_full_space_len = int((self.gs_max-self.gs_min) / self.gs_unit+1)
gs_full_space = np.linspace(self.gs_min, self.gs_max, gs_full_space_len)
gsmax_s = gsmax_sf(dL, L, s, self.slope, self.dt)
if gsmax_s > self.gs_max:
return gs_full_space
else:
boundary = int(gsmax_s / self.gs_unit)+1
return gs_full_space[:boundary]
| true |
b1808f3ebe9420e737934a10f91b39d09a152a5a | Python | LuizaM21/Learn_python | /Python_server_testing/Genios_threads.py | UTF-8 | 1,306 | 2.828125 | 3 | [] | no_license | from timeit import default_timer as timer
import bs4
import urllib.request
import ConfigData as config_data
from multiprocessing import Process
from Python_files_manipulation.CSVManipulation import CSVManipulation as csv_manipulation
conf_data = config_data.ConfigData.get_instance()
cube_types_file = conf_data.get_value(config_data.CUBE_TYPES_CSV)
URL_lists = csv_manipulation(cube_types_file).read_csv_specific_column(1)
def get_page_content(site_url):
"""Loads html content of the site"""
with urllib.request.urlopen(site_url) as response:
if 'text/html' in response.headers['Content-Type']:
current_header = bs4.BeautifulSoup(response.read(), features="html.parser").find(id="header")
return current_header
raise ValueError('Not a valid HTML page.')
if __name__ == '__main__':
process_list = []
for site in URL_lists:
start_time = timer()
process = Process(target=get_page_content, args=(site,))
process_list.append(process)
process.start()
end_time = timer()
duration = end_time - start_time
print("\ncalled link: {0}\nprocess name: {1}\nprocess duration: {2:.3f}"
.format(site, process.name, duration))
for process_ in process_list:
process_.join()
| true |
b842b98dbbaabbd5a0f1a66ffe0246e88d5e1255 | Python | Charlie-Ren/ML5525 | /hw1-logistic.py | UTF-8 | 3,524 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[39]:
import numpy as np, pandas as pd
from matplotlib import pyplot as pl
feat=pd.read_csv("IRISFeat.csv",header=None)
label=pd.read_csv("IRISlabel.csv",header=None)
idx=np.random.permutation(feat.index)# shuffle
X_shuffle=feat.reindex(idx).to_numpy()
y_shuffle=label.reindex(idx).to_numpy()
X_shuffle=np.c_[X_shuffle,np.ones(len(X_shuffle))] #add one extra column with only 1 exists.
train_err=[-1,-1,-1,-1,-1]
valid_err=[-1,-1,-1,-1,-1]
#data process
# In[40]:
def sigmoid(para):
b=np.negative(para)
return (1.0/(1+np.exp(b))) #it works without changing what it is supposed to be
# In[41]:
def y_predict_class(X_valid,model_weights,model_intercept):
weight=model_weights # We include intercept in model_weights, so we can do it directly by np.dot.
y_pred=sigmoid(np.dot(X_valid,weight)) #根据sigmoid判断
y_pred[y_pred>=1/2]=1
y_pred[y_pred<1/2]=0
return y_pred
# In[42]:
def cost(y_pred,y_train):
count=0;
for i in range(len(y_pred)):
if (y_pred[i]!=y_train[i]):
count+=1
return count/len(y_pred)
# In[46]:
def train(X_train,y_train):
weights=np.random.random((3,1)) # get the intercept into the weights so we can do multipy directly.
for i in range(500):
y_pred=y_predict_class(X_train,weights,weights[2]) #get 2 rows
err=cost(y_pred,y_train)
if err<0.001:
break
lr=0.005
hout=sigmoid(np.dot(X_train,weights)) #pred-ground_truth
gradient=np.dot(np.transpose(X_train),(hout-y_train))
weights=weights-lr*gradient
#print(err)
return weights,weights[2]
# In[47]:
def get_next_train_valid(X_shuffle,y_shuffle,itr):
if itr==0:
X_valid=X_shuffle[:30]
y_valid=y_shuffle[:30]
X_train=X_shuffle[30:]
y_train=y_shuffle[30:]
elif itr==1:
X_valid=X_shuffle[30:60]
y_valid=y_shuffle[30:60]
X_train=np.delete(X_shuffle,range(30,60),0)
y_train=np.delete(y_shuffle,range(30,60),0)
elif itr==2:
X_valid=X_shuffle[60:90]
y_valid=y_shuffle[60:90]
X_train=np.delete(X_shuffle,range(60,90),0)
y_train=np.delete(y_shuffle,range(60,90),0)
elif itr==3:
X_valid=X_shuffle[90:120]
y_valid=y_shuffle[90:120]
X_train=np.delete(X_shuffle,range(90,120),0)
y_train=np.delete(y_shuffle,range(90,120),0)
elif itr==4:
X_valid=X_shuffle[120:]
y_valid=y_shuffle[120:]
X_train=X_shuffle[:120]
y_train=y_shuffle[:120]
return X_train,y_train,X_valid,y_valid
# In[66]:
for i in range(5):
X_train,y_train,X_valid,y_valid=get_next_train_valid(X_shuffle,y_shuffle,i)
weight,intercept=train(X_train,y_train)
y_pred0=y_predict_class(X_train,weight,intercept)
train_err[i]=cost(y_pred0,y_train)
y_pred1=y_predict_class(X_valid,weight,intercept)
#pos1,neg1=count(y_valid) #for confusion matrix
#pos2,neg2=count(y_pred1)
#print(",valid,",pos1,neg1)
#print(",pred",pos2,neg2)
valid_err[i]=cost(y_pred1,y_valid)
iteration=[1,2,3,4,5]
pl.plot(iteration,train_err,label="training error")
pl.plot(iteration,valid_err,label="validation error")
pl.xlabel("iteration time")
pl.ylabel("error rate")
pl.grid(True)
pl.legend()
pl.show()
# In[63]:
def count(y_train):
pos=0
neg=0
for i in range(len(y_train)):
if y_train[i]==1:
pos+=1
else:
neg+=1
return pos,neg
# In[ ]:
| true |
3503bb1b4bf94a92d8df3bae82e7f3ad34eed40a | Python | 0xfirefist/cryptopals | /l1-basics/chal4.py | UTF-8 | 718 | 3.484375 | 3 | [] | no_license | # Detect single-character XOR
from pprint import pprint
from chal3 import decrypt
# filter list based on printable character
def filter(decryptedList):
for decryptedString in decryptedList:
for c in decryptedString:
if c>126 :
return True
return False
# this will return a list of possible ciphers
def detect(ciphers):
possibleCiphers = []
for cipher in ciphers:
possPlain = decrypt(bytes.fromhex(cipher))
if not filter(possPlain):
possibleCiphers = possibleCiphers + [cipher]
return possibleCiphers
if __name__ == "__main__":
# read file input
ciphers = open("files/4.txt").read().split("\n")
pprint(detect(ciphers)) | true |
da07b99cb2bf9fd29fbd1ca213723ed82149c405 | Python | m-niemiec/space-impact | /ship.py | UTF-8 | 2,189 | 3.546875 | 4 | [] | no_license | import pygame
from settings import Settings
class Ship:
"""A class to manage the ship."""
def __init__(self, si_game):
"""Initialize the ship and set its starting positon."""
self.screen = si_game.screen
self.screen_rect = si_game.screen.get_rect()
self.settings = Settings()
# Load the ship image and get its react.
self.image = pygame.image.load("images/ship_1.png")
self.image = pygame.transform.scale(self.image, (80*int(self.settings.screen_width*0.0019),
56*int(self.settings.screen_width*0.0019)))
self.rect = self.image.get_rect()
# This value is needed later to make ship appear in exact center in every resolution
# v = 56*int(self.settings.screen_width*0.0019)
# Start each new ship at the left center of the screen.
self.rect.center = (int(self.settings.screen_width*0.07), int(self.settings.screen_height*0.5))
# Movement flag
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
self.ship_speed = self.settings.screen_width*0.005
def update(self):
"""Update ship's position based on the movement flag."""
if self.moving_right and self.rect.x < self.settings.screen_width*0.85:
self.rect.x += self.ship_speed
elif self.moving_left and self.rect.x > 0:
self.rect.x -= self.ship_speed
elif self.moving_up and self.rect.y > 0:
self.rect.y -= self.ship_speed
elif self.moving_down and self.rect.y < self.settings.screen_height*0.84:
self.rect.y += self.ship_speed
def blitme(self):
"""Draw ship at its current location."""
# Little system that will make sure that ship will be scaled with resolution
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""Center the ship on the screen."""
self.rect.center = (int(self.settings.screen_width*0.07), int(self.settings.screen_height*0.5))
self.x = float(self.rect.x)
| true |
456de6a70ade0e55c29d1c631b1dd33447409582 | Python | dapr/python-sdk | /dapr/actor/runtime/context.py | UTF-8 | 4,721 | 2.546875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | # -*- coding: utf-8 -*-
"""
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from dapr.actor.id import ActorId
from dapr.actor.runtime._state_provider import StateProvider
from dapr.clients.base import DaprActorClientBase
from dapr.serializers import Serializer
from typing import Callable, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from dapr.actor.runtime.actor import Actor
from dapr.actor.runtime._type_information import ActorTypeInformation
class ActorRuntimeContext:
"""A context of ActorRuntime.
This defines the context of actor runtime, which carries the type information of Actor,
the serializers for invocation and state, and the actor clients for Dapr runtime.
Attributes:
actor_type_info(:class:`ActorTypeInformation`): the type information to initiate
Actor object.
message_serializer(:class:`Serializer`): the serializer for actor invocation request
and response body.
state_serializer(:class:`Serializer`): the seralizer for state value.
state_provider(:class:`StateProvider`): the provider which is the adapter used
for state manager.
dapr_client(:class:`DaprActorClientBase`): the actor client used for dapr runtime.
"""
def __init__(
self, actor_type_info: 'ActorTypeInformation',
message_serializer: Serializer, state_serializer: Serializer,
actor_client: DaprActorClientBase,
actor_factory: Optional[Callable[['ActorRuntimeContext', ActorId], 'Actor']] = None):
"""Creates :class:`ActorRuntimeContext` object.
Args:
actor_type_info(:class:`ActorTypeInformation`): the type information to initiate
Actor object.
message_serializer(:class:`Serializer`): the serializer for actor invocation
request and response body.
state_serializer(:class:`Serializer`): the seralizer for state value.
actor_client(:class:`DaprActorClientBase`): the actor client used for dapr runtime.
actor_factory(Callable, optional): the factory to create Actor object by
actor_type_info.
"""
self._actor_type_info = actor_type_info
self._actor_factory = actor_factory or self._default_actor_factory
self._message_serializer = message_serializer
self._state_serializer = state_serializer
# Create State management provider for actor.
self._dapr_client = actor_client
self._provider: StateProvider = StateProvider(self._dapr_client, state_serializer)
@property
def actor_type_info(self) -> 'ActorTypeInformation':
"""Return :class:`ActorTypeInformation` in this context."""
return self._actor_type_info
@property
def message_serializer(self) -> Serializer:
"""Return message serializer which is used for Actor invocation."""
return self._message_serializer
@property
def state_serializer(self) -> Serializer:
"""Return state serializer which is used for State value."""
return self._state_serializer
@property
def state_provider(self) -> StateProvider:
"""Return state provider to manage actor states."""
return self._provider
@property
def dapr_client(self) -> DaprActorClientBase:
"""Return dapr client."""
return self._dapr_client
def create_actor(self, actor_id: ActorId) -> 'Actor':
"""Create the object of :class:`Actor` for :class:`ActorId`.
Args:
actor_id (:class:`ActorId`): ActorId object representing :class:`ActorId`
Returns:
:class:`Actor`: new actor.
"""
return self._actor_factory(self, actor_id)
def _default_actor_factory(
self, ctx: 'ActorRuntimeContext', actor_id: ActorId) -> 'Actor':
"""Creates new Actor with actor_id.
Args:
ctx (:class:`ActorRuntimeContext`): the actor runtime context for new actor.
actor_id (:class:`ActorId`): the actor id object.
Returns:
:class:`Actor`: new actor.
"""
return self.actor_type_info.implementation_type(ctx, actor_id)
| true |
269996c00984e84d54e9f0dd8f40e42de2e75cc2 | Python | tehzeebb1/Project104 | /read.py | UTF-8 | 136 | 2.6875 | 3 | [] | no_license | import csv
with open('height-weight.csv',newline='') as f:
reader=csv.reader(f)
file_data=list(reader)
print(file_data) | true |
869b66d8ac7825c08fa0c22ecf20cf0753744dab | Python | wills201/Challenge-Probs | /mergeindex.py | UTF-8 | 535 | 3.28125 | 3 | [] | no_license | l1 = [1,7,3,4,9,3,8,6,8,9]
l2 = [0,3,8,6,8,4,7,6,8,9]
def mergeindex(l1,l2):
idx = 0
while idx < len(l1):
idx += 1
if l1[idx:] == l2[idx:]:
return idx
def mergeindex2(l1,l2):
idx = 0
while idx < len(l1):
idx += 1
if l1[idx] == l2[idx]:
if l1[-1] == l2[-1]:
return idx
def mergeindex3(l1,l2):
idx = len(l1) - 1
while idx >= 0:
idx -= 1
if l1[idx] != l2[idx]:
return idx + 1
print(mergeindex3(l1,l2))
| true |
79ea7ac1fa5080ff2626a47bdbcad600254570c3 | Python | TemistoclesZwang/HackerRank_e_URI | /URI judgeOnline/1771.py | UTF-8 | 3,119 | 3.40625 | 3 | [] | no_license | class Numero:
CLASSEB = list (range(1,16))
CLASSEI = list (range(16,31))
CLASSEN = list (range(31,46))
CLASSEG = list (range(46,61))
CLASSEO = list (range(61,76))
def __init__(self, numero, classe):
self.numero = numero
self.classe = classe
def valido(self):
if (self.classe == 'B'):
return self.numero in Numero.CLASSEB
if (self.classe == 'I'):
return self.numero in Numero.CLASSEI
if (self.classe == 'N'):
return self.numero in Numero.CLASSEN
if (self.classe == 'G'):
return self.numero in Numero.CLASSEG
if (self.classe == 'O'):
return self.numero in Numero.CLASSEO
def mudancas_possiveis(self):
numeros = self.numero
list_classe = ['B','I','N','G','O']
for i in range(5):
if (self.classe == list_classe[i]) and (self.valido() == False):
if numeros in self.CLASSEB:
return (Numero(numeros, 'B'))
elif numeros in self.CLASSEI:
return (Numero(numeros, 'I'))
elif numeros in self.CLASSEN:
return (Numero(numeros, 'N'))
elif numeros in self.CLASSEG:
return (Numero(numeros, 'G'))
elif numeros in self.CLASSEO:
return (Numero(numeros, 'O'))
def __str__(self):
return '[{}, {}, {}]'.format(self.numero, self.classe, self.valido())
def __repr__(self):
return str(self)
def main():
while True:
try:
#entradas = input()
cartela = list(map(int, input().split()))
cartela.insert(12, '*')
numeros = []
for i in range(0, len(cartela), 5):
numeros.append(Numero(cartela[i], 'B'))
for i in range(1, len(cartela), 5):
numeros.append(Numero(cartela[i], 'I'))
for i in range(2, len(cartela), 5):
numeros.append(Numero(cartela[i], 'N'))
for i in range(3, len(cartela), 5):
numeros.append(Numero(cartela[i], 'G'))
for i in range(4, len(cartela), 5):
numeros.append(Numero(cartela[i], 'O'))
quantidade_trocaveis = 0
for i in range(len(numeros)):
trocado = numeros[i].mudancas_possiveis()
if trocado != None:
quantidade_trocaveis += 1
quantidade_falso = 0
for i in range(len(numeros)):
if not numeros[i].valido():
quantidade_falso += 1
def tipo_tabela(quantidade_falso):
if quantidade_falso == 0:
return "OK"
if quantidade_trocaveis/2 < 1 or quantidade_falso > 0:
return "DESCARTAVEL"
else:
return "RECICLAVEL"
print(tipo_tabela(quantidade_falso - 1))
except EOFError:
break
main() | true |
27dabaf21b0d96fb3ed72f62a63167969d8ce239 | Python | john-hewitt/cs229-head-tracking | /util.py | UTF-8 | 13,590 | 2.828125 | 3 | [] | no_license | import csv
import json
import os
import numpy as np
import sklearn as sk
import re
import cnn
# globals
mos = [0, 2, 6, 12]
exps = ['R', 'N1', 'N2', 'P1', 'P2']
# file naming conventions
id_reg = '[a-z]{2}[0-9]{5}'
mo_reg = '(((2)|(6)|(12))mo)?'
exp_reg = '((n1)|(n2)|(r)|(p1)|(p2))'
tfname_reg = r'tracking_{}{}{}\.txt'.format(id_reg,
mo_reg,
exp_reg)
def valid_tfname(fname):
fname = fname.lower()
val = re.match(tfname_reg, fname) is not None
return val
def tfname_parts(fname):
assert valid_tfname(fname)
fname = fname.lower()
Id = fname[9:16]
if fname[17:19] == 'mo':
Mo = int(fname[16])
elif fname[18:20] == 'mo':
Mo = 12
else:
Mo = 0
if fname[-5] == 'r':
Exp = 'r'
else:
Exp = fname[-6:-4]
assert Exp.upper() in exps
assert Mo in mos
return Id,Mo,Exp
def tracking_file(part, mo, exp):
""" Helper function.
For a given participant PART, time frame (month number) MO,
and experience type EXP, returns the string associated with
the relevant .txt filename
"""
valid_pId = lambda x : len(x) == 7 # good enough
assert valid_pId(part)
assert mo in mos
assert exp.upper() in exps
if mo > 0:
tfilename = 'tracking_{}{}mo{}.txt'.format(part, mo, exp).upper()
else:
tfilename = 'tracking_{}{}.txt'.format(part, exp).upper()
base = os.path.relpath('../data/Tracking/')
tfile = os.path.join(base, tfilename)
return tfile
def which_months(part):
""" Helper function
Returns all of the months for which we have the head tracking
data of participant PART for all experience types.
"""
return filter(lambda mo: have_part_mo(part, mo), mos)
def have_part_mo(part, mo):
""" Helper function
Returns True if we have month MO's head tracking
data of participant PART for all experience types.
Returns False otherwise.
"""
tfiles = [tracking_file(part, mo, exp) for exp in exps]
have = all([os.path.isfile(tfile) for tfile in tfiles])
return have
# SARAH
def load_participant_scores(csvfile):
""" Load participant data (GAD7 and SCL20 scores) from CSVFILE.
Only load a participant's data if we have their head tracking
data. Useful helper function: have_part_mo.
Returns a dictionary mapping participant ID string to
a tuple (GAD7 score, SCL20 score).
"""
scores_dict = {}
# load labels from file
with open(csvfile,'rt', encoding = "utf8") as csvfile:
reader = csv.DictReader(csvfile)
next(reader) # skip headings
for row in reader:
part = row["subNum"]
if have_part_mo(part, 0):
gad7 = row["GAD7_score"]
scl20 = row["SCL_20"]
# only add labels if both scores are valid
if (gad7 != "NA") and (scl20 != "NA" ):
scores_dict[part] = (gad7, scl20)
return scores_dict
gad7 = 'GAD7_score'
scl20 = 'SCL_20'
def load_scores(csvfile, pid_mos, score_type):
""" Given a list of tuples PID_MOS (the first element is participant
id, the second is the month), load the SCORE_TYPE score for
each from CSVFILE.
SCORE_TYPE is the name of the column that contains the score.
Note: this function should only be passed pid_mo pairs for
which we have the given score for (use which_parts_have_score).
Returns a len(part_mos)-by-1 numpy array.
"""
scores_dict = {}
with open(csvfile,'rt', encoding = "utf8") as csvfile:
reader = csv.DictReader(csvfile)
next(reader) # skip headings
for row in reader:
pid = row["subNum"].lower()
mo = int(row["time"])
if (pid,mo) in pid_mos:
score = row[score_type]
assert score != "NA"
scores_dict[(pid,mo)] = int(score)
scores = [scores_dict[pid_mo] for pid_mo in pid_mos]
return scores
def which_parts_have_score(csvfile, score_type):
""" For scoring metric SCORETYPE, return all tuples
(lowercase participant id, month) for which we have that score
in CSVFILE.
SCORE_TYPE is the name of the column that contains the score.
Returns a list of tuples.
"""
pid_mos = []
with open(csvfile,'rt', encoding = "utf8") as csvfile:
reader = csv.DictReader(csvfile)
next(reader) # skip headings
for row in reader:
pid = row['subNum'].lower()
mo = int(row['time'])
score = row[score_type]
if score != "NA":
pid_mos.append((pid, mo))
# ensure each element of pid_mos is unique
assert len(set(pid_mos)) == len(pid_mos)
return pid_mos
def which_parts_have_tracking_data(folder, verbose=False):
""" Returns all tuples (lowercase participant id, month) for which
we have tracking data in FOLDER.
"""
vprint = print if verbose else lambda x : x
# get lowercase name of all fles
tfiles = [f.lower() for f in os.listdir(folder)]
vprint('number of tracking files found: {}'.format(len(tfiles)))
# regex-filter for all "usable" filenames
val_tfiles = filter(valid_tfname, tfiles)
# parse filenames to generate tuples
pid_mos = [tfname_parts(f)[0:2] for f in val_tfiles]
vprint('number of VALID tracking files found: {}'.format(len(pid_mos)))
pid_mos_uniq = list(set(pid_mos))
vprint('number of (pid,mo) pairs found: {}'.format(len(pid_mos_uniq)))
# make sure all returned pairs have all experience types
pid_mos_filt = list(filter(lambda pm : have_part_mo(*pm), pid_mos_uniq))
return pid_mos_filt
# SARAH
def GAD7_labels(parts, scoresDict):
""" For each of N participants given by PARTS, determine the GAD7
score label.
Returns the labels as a N-by-1 numpy array.
"""
gad_labels = np.zeros(len(parts))
for part, part_idx in zip(parts, range(0, len(parts))):
gad_labels[part_idx] = scoresDict[part][0]
return gad_labels
# SARAH
def SCL20_labels(parts, scoresDict):
""" For each of N participants given by PARTS, determine the SCL20
score label.
Returns the labels as a N-by-1 numpy array.
"""
scl_labels = np.zeros(len(parts))
for part, part_idx in zip(parts, range(0, len(parts))):
scl_labels[part_idx] = scoresDict[part][1]
# return actual scl score or {0,1} label?
# if want y in {0,1}:
scl_labels = SCL20_threshold(scl_labels)
return scl_labels
def SCL20_threshold(scores):
scores[scores < 0.5] = 0
scores[scores >= 0.5] = 1
return scores
def compute_fvec_magnitude_freqs(tfile):
""" Takes in a tracking file path, and computes the feature vector
corresponding to head movement data for each experience type.
The featurization is described in the README.
"""
with open(tfile) as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')
rot = []
for index, row in enumerate(tsvin):
ch1 = row[1]
ch2 = row[2]
roti = json.loads(ch1) + json.loads(ch2)
rot.append(roti)
# compute features
rot = np.array(rot)
delta = np.absolute(np.diff(rot, axis=0))
sums = []
sums.append(np.sum((delta > 10), 0))
sums.append(np.sum((delta < 10) & (delta >1), 0))
sums.append(np.sum((delta < 1) & (delta >.1), 0))
sums.append(np.sum((delta < .1) & (delta >.01), 0))
sums.append(np.sum((delta < .01) & (delta >.001), 0))
sums.append(np.sum((delta < .001) , 0))
fvec = (np.array(sums) / np.sum(sums, axis=0)).flatten()
fvec = np.expand_dims(fvec, 0)
return fvec
def load_channels(tfile):
""" Returns (data, deltas), both ~N-by-6 numpy arrays created from
the 6-channel data associated with each of PID_MOS.
"""
# load data from file
with open(tfile) as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')
rot = []
for index, row in enumerate(tsvin):
ch1 = row[1]
ch2 = row[2]
roti = json.loads(ch1) + json.loads(ch2)
rot.append(roti)
delta = np.absolute(np.diff(rot, axis=0))
rot = np.array(rot)
delta = np.array(delta)
print(rot.shape)
print(delta.shape)
return rot, delta
def compute_freq_fvec(tfile, N=20):
""" Takes in a tracking file path, and computes the feature vector
corresponding to the DFT of the head movement data for each
experience type.
Takes a N-pt DFT of each channel data and difference data.
There are N*2 features / time-series * 3 time-series / channel
* 2 channels = 12N features.
Returns the feature vector as a 1-by-12N numpy array.
"""
# load data from file
with open(tfile) as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')
rot = []
for index, row in enumerate(tsvin):
ch1 = row[1]
ch2 = row[2]
roti = json.loads(ch1) + json.loads(ch2)
rot.append(roti)
# compute features
rot = np.array(rot)
delta = np.absolute(np.diff(rot, axis=0))
rot_f = np.fft.fft(rot, n=N, axis=0)
delta_f = np.fft.fft(delta, n=N, axis=0)
fvec = np.abs(np.concatenate([rot_f.flatten('F'), delta_f.flatten('F')]))
fvec = np.expand_dims(fvec, 0)
assert fvec.shape == (1, 12*N)
return fvec
def compute_fvec(tfile):
""" Takes in a tracking file path, and computes the feature vector
corresponding to head movement data for each experience type.
The features are as described in meeting.
There are 12 features / channel * 2 channels = 24 features.
Returns the feature vector as a 1-by-24 numpy array.
"""
# load data from file
with open(tfile) as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')
rot = []
for index, row in enumerate(tsvin):
ch1 = row[1]
ch2 = row[2]
roti = json.loads(ch1) + json.loads(ch2)
rot.append(roti)
# compute features
rot = np.array(rot)
rotsigmas = np.var(rot, axis=0)
delta = np.absolute(np.diff(rot, axis=0))
deltasums = np.sum(delta, axis=0) / rot.shape[0]
deltasigmas = np.var(delta, axis=0)
# shape output
fvec = np.concatenate([rotsigmas, deltasums, deltasigmas])
fvec = np.expand_dims(fvec, 0)
return fvec
def compute_fvecs_for_parts(pid_mos, featurization):
""" For each (pid, month) given by PID_MOS, compute features for
each of the experience types and concatenate them to form
one feature vector per participant.
Note: We must have tracking data across all 5 experience types
for each (pid, month) pair.
There are 24 features / experience * 5 experiences = 120 features
Returns the training matrix as an N-by-120 numpy array, where
N is the number of full sets of VR data we have on the given
PARTS.
"""
fvecs = None
for index, (pid, mo) in enumerate(pid_mos):
print(index, pid, mo)
tfiles = [tracking_file(pid, mo, exp) for exp in exps]
if featurization == 'summary_stats':
expvecs = [compute_fvec(tfile) for tfile in tfiles]
elif featurization == 'norm_hist':
expvecs = [compute_fvec_magnitude_freqs(tfile) for tfile in tfiles]
elif featurization == 'dft':
expvecs = [compute_freq_fvec(tfile, 30) for tfile in tfiles]
else:
raise ValueError("Unknown featurization method: {}".format(featurization))
fvec = np.concatenate(expvecs, axis=1)
if fvecs is None:
fvecs = np.array(fvec)
else:
fvecs = np.concatenate([fvecs, fvec], axis=0)
return fvecs
def load_raw_hm_data(tfile):
""" Takes in a tracking file path, and loads raw head movement data for use in CNN featurization learning
Returns the feature vector as a 6-by-n numpy array.
"""
# load data from file
with open(tfile) as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')
rot = []
for index, row in enumerate(tsvin):
ch1 = row[1]
ch2 = row[2]
roti = json.loads(ch1) + json.loads(ch2)
rot.append(roti)
rot = np.array(rot)
#print("rot shape is {} \n".format(rot.shape))
return rot
def compute_CNN_featurization():
''' Should only be done once to learn model, then just read from disk when want to use'''
return
def get_experience_indices(experience):
"""Given an experience type in ['R', 'N1', 'N2', 'P1', 'P2']
return the indices in the feature vector that the
experience maps to
"""
if experience == 'R':
indices = (0, 24)
elif experience == 'N1':
indices = (24, 48)
elif experience == 'N2':
indices = (48, 72)
elif experience == 'P1':
indices = (72, 96)
elif experience == 'P2':
indices = (96, 120)
return indices
| true |
7a88241a3037fbdf8e159972c08c6162dda7e3ca | Python | stefanct/avr-lib | /scripts/timers.py | UTF-8 | 5,130 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, math, argparse, time
from cdecimal import Decimal
from prettytable import PrettyTable
from common import *
def main(*args):
global verbose, long_width, timer_width, param_width, timer
# docs: http://docs.python.org/dev/library/argparse.html#argparse.ArgumentParser.add_argument
parser = argparse.ArgumentParser(description='Script to calculate AVR timer constants.')
parser.add_argument('-v', action='count', dest='verbose', help='How much output is printed')
parser.add_argument('-f', action='store', dest='freq', default='-1', help='Frequency in Hz')
parser.add_argument('-r', '--res', action='store', dest='res', default='-1', help='Resolution in seconds. The smallest period the timer should support.')
parser.add_argument('-tw', '--timer_width', action='store', dest='timer_width', default='8', help='Width of timer counter in bits')
parser.add_argument('-lw', '--long_width', action='store', dest='long_width', default='8', help='Width of the variable storing the long count')
parser.add_argument('-pw', '--param_width', action='store', dest='param_width', default='8', help='Width of the parameter configuring the duration')
parser.add_argument('-t', '--timer', action='store', dest='timer', default='0', help='Timer number')
orig_args = " ".join(args)
args = parser.parse_args()
verbose = args.verbose
timer = int(args.timer)
freq = int(args.freq)
res = Decimal(args.res)
timer_width = int(args.timer_width)
long_width = int(args.long_width)
param_width = int(args.param_width)
if param_width < long_width:
param_width = long_width
if \
freq < 0 or \
freq > 100 * pow(10, 6) or \
res < 0 or \
not isPowOfTwo(timer_width) or \
not isPowOfTwo(param_width) or \
not isPowOfTwo(long_width) \
:
printerr("Invalid arguments")
print("args: %s" % str(args))
return 1
if verbose:
print("args: %s" % str(args))
print
print("RES [s]:\t%f" % res)
print("RES [ms]:\t%f" % (res * pow(10, 3)))
print("RES [us]:\t%f" % (res * pow(10, 6)))
print("RES [ns]:\t%f" % (res * pow(10, 9)))
print
min_res = "min period [RES]"
min_ns = "min period [µs]"
max_res = "max period [RES]"
max_s = "max period [s]"
presc_tbl = PrettyTable(["prescaler", min_res, min_ns, max_res, max_s])
presc_tbl.align = "r"
clk_period = (1 / Decimal(freq))
timer_max = pow(2, timer_width) - 1
long_max = pow(2, long_width) - 1
ocr_short = timer_max + 1
presc_short = 0
for presc in (1, 8, 64, 256, 1024):
if verbose:
presc_tbl.add_row([
presc,
(presc * clk_period / res).quantize(Decimal("0.0001")),
(presc * clk_period * pow(10, 6)).quantize(Decimal("0.000001")),
(presc * clk_period * timer_max / res).quantize(Decimal("0.001")),
(presc * clk_period * timer_max).quantize(Decimal("0.000001")),
])
for i in xrange (1, timer_max + 1):
val = Decimal(clk_period * presc * i / res)
if val == 1:
ocr_short = i
presc_short = presc
continue
if verbose:
print presc_tbl
if presc_short == 0:
print "No perfect match of prescaler and top value found for this resolution (%f), frequency and word lengths" % res
return 1
short_max_time = Decimal(clk_period * presc_short * ocr_short * 255)
short_max_res = Decimal(short_max_time / res)
if verbose:
print "OCR_SHORT: %d, PRESC_SHORT: %d" % (ocr_short, presc_short)
print "maximum interval reachable with the short prescaler (%d) alone: %fs (%d RES)" % (presc_short, short_max_time, short_max_res)
ocr_long = timer_max + 1
presc_long = 0
for presc in (1, 8, 64, 256, 1024):
if presc < presc_short:
continue
for i in xrange (1, timer_max + 1):
val = Decimal(clk_period * presc * i / res)
if val <= short_max_res and Decimal.remainder(val, 1) == 0:
presc_long = presc
ocr_long = i
long_dur = clk_period * presc_long * ocr_long
long_max_time = Decimal(long_dur * long_max)
long_max_res = int(long_max_time / res)
if verbose:
print "OCR_LONG: %d, PRESC_LONG: %d" % (ocr_long, presc_long)
print "long interval is %fs (%d RES)" % (long_dur, long_dur / res)
print "maximum interval reachable with the long prescaler (%d) alone: %fs (%d RES)" % (presc_long, long_max_time, long_max_res)
print
print """\
#define PRESC{timer}_SHORT PRESC_{presc_short}
#define PRESC{timer}_LONG PRESC_{presc_long}
#define OCR{timer}_SHORT {ocr_short}
#define OCR{timer}_LONG {ocr_long}
#define DUR{timer}_LONG {dur_long}
#define DUR{timer}_LONG_WIDTH {long_width}
#define DUR{timer}_PARAM_WIDTH {param_width}
/* for documentation only:
created by '{orig_args}' */
#define DUR{timer}_MAX {max_time}f /* [s] */
#define F_TIMER{timer} {freq} /* [Hz] */
#define RES{timer} {res}f /* [s] */""".format(timer=timer, presc_short=presc_short, presc_long=presc_long, ocr_short=ocr_short, ocr_long=ocr_long, dur_long=int(long_dur/res), long_width=long_width, param_width=param_width, orig_args=orig_args, max_time=long_max_time + short_max_time, freq=freq, res=res)
return 0
if __name__ == '__main__':
sys.exit(main(*sys.argv))
| true |
1f39440ee996093565fb96452af0b457958c451b | Python | jasonyu0100/General-Programs | /2018 Programs/Dynamic Programming/WoodCutter/test.py | UTF-8 | 859 | 2.828125 | 3 | [] | no_license | with open('input.txt') as f:
length = float(f.readline())
positions = list(map(float,f.readline().strip().split()))
cache = {}
def woodCutter(positions, cost, start, end, sequence):
if (start,end) in cache:
return cache[(start,end)]
allCuts = {}
cutCost = (end - start)
for cut in positions:
if start < cut and cut < end:
left,leftSequence = woodCutter(positions,cost + cutCost,start,cut, sequence + [cut])
right,rightSequence = woodCutter(positions,cost + cutCost,cut, end, sequence + [cut])
totalCost = left + right + cutCost
newSequence = [cut] + leftSequence + rightSequence
allCuts[totalCost] = newSequence
if len(allCuts) == 0:
return 0, []
else:
minCost = min(allCuts)
minSequence = allCuts[minCost]
cache[(start,end)] = (minCost, minSequence)
return cache[(start,end)]
print(woodCutter(positions,0,0,length,[])) | true |
21846fd3aeac2362aa4e46ea2dda5052997aef38 | Python | bagherhussaini/matrix-multiplication-algorithms-runtime-comparison-python | /src/main.py | UTF-8 | 5,310 | 3.171875 | 3 | [] | no_license | from time import time
import numpy as np
import pandas as pd
import xlsxwriter
def main():
n = [2 ** i for i in range(2, 10)]
log = pd.DataFrame(index=[],
columns=['N', 'Normal_Multiplication_Time', 'Divide_and_Conquer_Time', 'Strassen_Time'])
normal_multiplication_duration_db = []
divide_and_conquer_duration_db = []
strassen_duration_db = []
for size in n:
a = np.random.uniform(low=-500000, high=500000, size=(size, size))
b = np.random.uniform(low=-500000, high=500000, size=(size, size))
start_time = 0
start_time = time()
result = normal_multiplication(a, b)
normal_multiplication_duration = time() - start_time
normal_multiplication_duration_db.append(normal_multiplication_duration)
start_time = 0
start_time = time()
result = divide_and_conquer(a, b)
divide_and_conquer_duration = time() - start_time
divide_and_conquer_duration_db.append(divide_and_conquer_duration)
start_time = 0
start_time = time()
result = strassen(a, b)
strassen_duration = time() - start_time
strassen_duration_db.append(strassen_duration)
tmp = pd.Series([size, normal_multiplication_duration, divide_and_conquer_duration, strassen_duration],
index=['N', 'Normal_Multiplication_Time', 'Divide_and_Conquer_Time', 'Strassen_Time'])
log = log.append(tmp, ignore_index=True)
log.to_csv('time-complexity.csv', index=False)
workbook = xlsxwriter.Workbook('chart.xlsx')
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
bold = workbook.add_format({'bold': True})
headings = ['N', 'Normal_Multiplication', 'Divide_and_Conquer', 'Strassen']
worksheet.write_row('A1', headings, bold)
worksheet.write_column('A2', n)
worksheet.write_column('B2', normal_multiplication_duration_db)
worksheet.write_column('C2', divide_and_conquer_duration_db)
worksheet.write_column('D2', strassen_duration_db)
chart.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$9',
'values': '=Sheet1!$B$2:$B$9',
})
chart.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$9',
'values': '=Sheet1!$C$2:$C$9',
})
chart.add_series({
'name': '=Sheet1!$D$1',
'categories': '=Sheet1!$A$2:$A$9',
'values': '=Sheet1!$D$2:$D$9',
})
worksheet.insert_chart('G10', chart)
workbook.close()
return log
def normal_multiplication(a, b):
n = len(a)
result = np.zeros((n, n))
for i in range(n):
for j in range(n):
for k in range(n):
result[i, j] += a[i, k] * b[k, j]
return result
def divide_and_conquer(a, b):
n = len(a)
if n == 1:
return a * b
else:
a11 = a[:int(len(a) / 2), :int(len(a) / 2)]
a12 = a[:int(len(a) / 2), int(len(a) / 2):]
a21 = a[int(len(a) / 2):, :int(len(a) / 2)]
a22 = a[int(len(a) / 2):, int(len(a) / 2):]
b11 = b[:int(len(b) / 2), :int(len(b) / 2)]
b12 = b[:int(len(b) / 2), int(len(b) / 2):]
b21 = b[int(len(b) / 2):, :int(len(b) / 2)]
b22 = b[int(len(b) / 2):, int(len(b) / 2):]
c11 = divide_and_conquer(a11, b11) + divide_and_conquer(a12, b21)
c12 = divide_and_conquer(a11, b12) + divide_and_conquer(a12, b22)
c21 = divide_and_conquer(a21, b11) + divide_and_conquer(a22, b21)
c22 = divide_and_conquer(a21, b12) + divide_and_conquer(a22, b22)
result = np.zeros((n, n))
result[:int(len(result) / 2), :int(len(result) / 2)] = c11
result[:int(len(result) / 2), int(len(result) / 2):] = c12
result[int(len(result) / 2):, :int(len(result) / 2)] = c21
result[int(len(result) / 2):, int(len(result) / 2):] = c22
return result
def strassen(a, b):
n = len(a)
if n <= 4:
return normal_multiplication(a, b)
else:
a11 = a[:int(len(a) / 2), :int(len(a) / 2)]
a12 = a[:int(len(a) / 2), int(len(a) / 2):]
a21 = a[int(len(a) / 2):, :int(len(a) / 2)]
a22 = a[int(len(a) / 2):, int(len(a) / 2):]
b11 = b[:int(len(b) / 2), :int(len(b) / 2)]
b12 = b[:int(len(b) / 2), int(len(b) / 2):]
b21 = b[int(len(b) / 2):, :int(len(b) / 2)]
b22 = b[int(len(b) / 2):, int(len(b) / 2):]
p1 = strassen(a11, b12 - b22)
p2 = strassen(a11 + a12, b22)
p3 = strassen(a21 + a22, b11)
p4 = strassen(a22, b21 - b11)
p5 = strassen(a11 + a22, b11 + b22)
p6 = strassen(a12 - a22, b21 + b22)
p7 = strassen(a11 - a21, b11 + b12)
result = np.zeros((n, n))
result[:int(len(result) / 2), :int(len(result) / 2)] = p5 + p4 - p2 + p6
result[:int(len(result) / 2), int(len(result) / 2):] = p1 + p2
result[int(len(result) / 2):, :int(len(result) / 2)] = p3 + p4
result[int(len(result) / 2):, int(len(result) / 2):] = p5 + p1 - p3 - p7
return result
if __name__ == '__main__':
log = main()
| true |
ba48a58445f113708babc1f9247d9b8d50b38921 | Python | Vincent105/python | /04_The_Path_of_Python/05_if/ch5_01_if.py | UTF-8 | 85 | 3.953125 | 4 | [] | no_license | age = input('請輸入年齡:')
if (int(age) < 18):
print('You are too young.') | true |
97d62992639a42ee5dbd7effd7c610e406635a04 | Python | glasnt/emojificate | /tests/test_graphemes.py | UTF-8 | 530 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | import pytest
from emojificate.filter import emojificate
def valid(emoji, title, fuzzy=False):
parsed = emojificate(emoji)
assert emoji in parsed
assert 'alt="{}'.format(emoji) in parsed
assert title in parsed
if not fuzzy:
assert 'aria-label="Emoji: {}'.format(title) in parsed
def test_flag():
valid("🇦🇺", "Australia", fuzzy=True)
def test_pride():
valid("🏳️🌈", "Rainbow Flag")
def test_farmer():
valid("👩🏼🌾", "Woman Farmer Medium-Light Skin Tone")
| true |
916768ecaf5ee6442a9c206c7c5557f2817bf2a7 | Python | auretsky1/BasicPuzzleGame | /PuzzleGraphics.py | UTF-8 | 7,969 | 3.546875 | 4 | [] | no_license | """ This class will be responsible for drawing the cubes to the game screen and updating the highlighting in accordance
with which ones are on and off as well as where the user's mouse is located. These changes can be called as functions
by an outside module or class with the relevant data needed to make a change"""
import pygame
from GlobalConstants import *
class PuzzleGraphics(object):
# Constructor
def __init__(self, size=3):
# Get the size of the puzzle board
self.size = size
# Sets size of puzzle board image
self.width = int(SCREEN_SIZE[0] / 1.75)
self.length = int(SCREEN_SIZE[1] / 1.75)
self.puzzle_area = (self.width, self.length)
# Set the unit for each cube
self.cube_width = self.puzzle_area[0] // self.size
self.cube_length = self.puzzle_area[1] // self.size
# Reset puzzle area based on cube width and length
self.width = self.cube_width * self.size
self.length = self.cube_length * self.size
self.puzzle_area = (self.width, self.length)
# Get the position of the puzzle board that is drawn to the screen
self.board_position = self.set_position()
# Creates a surface for the puzzle board
self.puzzle_image = pygame.Surface([self.puzzle_area[0], self.puzzle_area[1]])
self.puzzle_image.fill(BLACK)
# Get the maximum number that is possible for this grid size
self.n_max = self.size ** 2
# Creates the images of the cube
self.on_cube = self.create_image('Images/lightblue.jpg')
self.off_cube = self.create_image('Images/gray.jpg')
# Create a list of surfaces that will hold each cube
self.puzzle_board = self.create_cube_surface()
# Gets the position of the mouse
self.mouse_pos = (0, 0)
# Draw Board
self.draw_board = self.draw_puzzle_board()
self.trigger_list = []
self.activations = []
# sets the trigger list
def set_trigger_list(self, trigger_list):
self.trigger_list = trigger_list
def set_activation_list(self, activation_list):
self.activations = activation_list
# Highlights cubes that are being hovered by mouse
def highlight(self, x):
for y in range(len(self.trigger_list[x])):
element = self.trigger_list[x][y] - 1
current_puzzle_row = element // self.size
current_puzzle_column = element % self.size
trigger = self.trigger_list[x][0] - 1
trigger_puzzle_row = trigger // self.size
trigger_puzzle_column = trigger % self.size
highlight_top = pygame.Surface([self.cube_width, 2])
highlight_sides = pygame.Surface([2, self.cube_length])
if self.activations[current_puzzle_row][current_puzzle_column] == 0:
highlight_top.fill(BLUE)
highlight_sides.fill(BLUE)
self.puzzle_board[element].blit(highlight_top, [0, 0])
self.puzzle_board[element].blit(highlight_top, [0, self.cube_length - 2])
self.puzzle_board[element].blit(highlight_sides, [self.cube_width - 2, 0])
self.puzzle_board[element].blit(highlight_sides, [0, 0])
if self.activations[current_puzzle_row][current_puzzle_column] == 1:
highlight_top.fill(RED)
highlight_sides.fill(RED)
self.puzzle_board[element].blit(highlight_top, [0, 0])
self.puzzle_board[element].blit(highlight_top, [0, self.cube_length - 2])
self.puzzle_board[element].blit(highlight_sides, [self.cube_width - 2, 0])
self.puzzle_board[element].blit(highlight_sides, [0, 0])
# if self.activations[trigger_puzzle_row][trigger_puzzle_column] == 0:
# self.puzzle_board[element].blit(self.on_cube, [0, 0])
# if self.activations[current_puzzle_row][current_puzzle_column] == 1:
# self.puzzle_board[element].blit(self.off_cube, [0, 0])
# if self.activations[trigger_puzzle_row][trigger_puzzle_column] == 1:
# self.puzzle_board[element].blit(self.off_cube, [0, 0])
# if self.activations[current_puzzle_row][current_puzzle_column] == 0:
# self.puzzle_board[element].blit(self.on_cube, [0, 0])
# Sets all cubes to off depending on there activation status
def change_to_off(self):
for x in range(len(self.activations)):
for y in range(len(self.activations[x])):
element = (x * self.size) + y
if self.activations[x][y] == 1:
self.puzzle_board[element].blit(self.on_cube, [0, 0])
else:
self.puzzle_board[element].blit(self.off_cube, [0, 0])
# converts the mouse position to a location in a list
def convert_to_list_position(self):
y_pos = self.mouse_pos[0] // self.cube_width
x_pos = self.mouse_pos[1] // self.cube_length
list_location = (x_pos * self.size) + y_pos
return list_location
# Checks to see if mouse is hovering over a cube
def is_highlighted(self):
if self.mouse_pos[0] < 0 or self.mouse_pos[0] > self.puzzle_area[0] - 1\
or self.mouse_pos[1] < 0 or self.mouse_pos[1] > self.puzzle_area[1] - 1:
return False
else:
return True
# Loads and creates an image
def create_image(self, name):
# Loads the image
image = pygame.image.load(name).convert()
# Resizes image to fit grid unit
new_image = pygame.transform.scale(image, (self.cube_width, self.cube_length))
return new_image
# Sets the position of the puzzle board image on the game screen
def set_position(self):
screen_center_x = SCREEN_SIZE[0] // 2
screen_center_y = SCREEN_SIZE[1] // 2
position_x = screen_center_x - (self.puzzle_area[0]//2)
position_y = screen_center_y - (self.puzzle_area[1]//2)
return position_x, position_y
# Creates a list of cube surfaces
def create_cube_surface(self):
cube_surface = []
for x in range(self.n_max):
cube = pygame.Surface([self.cube_width, self.cube_length])
cube.blit(self.off_cube, [0, 0])
cube_surface.append(cube)
return cube_surface
# Updates the position of the mouse in relation to the board position
def update_mouse_pos(self, mouse_pos):
mouse_x = mouse_pos[0] - self.board_position[0]
mouse_y = mouse_pos[1] - self.board_position[1]
self.mouse_pos = (mouse_x, mouse_y)
# Check if highlighted
if self.is_highlighted():
# Call convert to list position
list_location = self.convert_to_list_position()
# Call turn_off_cubes
self.change_to_off()
# Turn on cubes for the list position
self.highlight(list_location)
self.draw_puzzle_board()
else:
self.change_to_off()
self.draw_puzzle_board()
def activate_cube(self, location):
mouse_x = location[0] - self.board_position[0]
mouse_y = location[1] - self.board_position[1]
self.mouse_pos = (mouse_x, mouse_y)
if self.is_highlighted():
list_location = self.convert_to_list_position()
list_location += 1
return list_location
else:
return False
def draw_puzzle_board(self):
# Draws cubes to puzzle board surface
for x in range(self.n_max):
puzzle_row = x // self.size
puzzle_column = x % self.size
cube_x = puzzle_column * self.cube_length
cube_y = puzzle_row * self.cube_width
self.puzzle_image.blit(self.puzzle_board[x], [cube_x, cube_y])
return self.puzzle_image
| true |
ff5c4826288f3f2ad76efbde8ba7a3aacbba34f9 | Python | Kamilos1337/pp1 | /03-FileHandling/18.py | UTF-8 | 181 | 3.53125 | 4 | [] | no_license | tablica = []
with open("03-FileHandling/numbers.txt", 'r') as tekst:
for line in tekst:
tablica.append(int(line))
tablica.sort()
for n in tablica:
print(n, end=' ')
| true |
df1f8bfde2f60756578ee51d21c69cd47e07e9b7 | Python | dolphingarlic/seniorrobotics2018 | /test.py | UTF-8 | 831 | 2.625 | 3 | [] | no_license | from src.robot import Robot
from time import sleep
ROBOT = Robot()
print("Started")
ROBOT.follow_until_next_node()
sleep(10)
ROBOT.stop()
print("Stopped")
'''
for i in range(40):
print("L:"+str(ROBOT.left_colour_sensor.reflected_light_intensity))
print("R:"+str(ROBOT.right_colour_sensor.reflected_light_intensity))
'''
'''
print("Left: "+str(ROBOT.left_colour_sensor.reflected_light_intensity))
print("Right: "+str(ROBOT.right_colour_sensor.reflected_light_intensity))
print("Inner: "+str(ROBOT.inner_colour_sensor.reflected_light_intensity))
print("Outer: "+str(ROBOT.outer_colour_sensor.reflected_light_intensity))
'''
'''
for i in range(1):
print("Left: " + str(ROBOT.left_colour_sensor.reflected_light_intensity))
print("Right: " + str(ROBOT.right_colour_sensor.reflected_light_intensity))
sleep(0.2)
''' | true |
a2e14e94527a98bfa95424959cfbd5111e4ad6c5 | Python | pirobtumen/pymediator | /test/test_mediator.py | UTF-8 | 1,598 | 2.90625 | 3 | [
"BSD-3-Clause"
] | permissive | from pymediator import Event, EventHandler, Mediator
def test_base_event():
assert Event.EVENT_NAME is ''
def test_base_event_handler():
handler = EventHandler()
res = handler.handle(Event())
assert res is None
def test_mediator_register_event():
test_event_name = 'test_event'
test_mediator = Mediator()
test_mediator.on(test_event_name)(EventHandler)
assert test_event_name in test_mediator.handlers
assert test_mediator.handlers[test_event_name] is not None
def test_mediator_emit_event():
test_event_name = 'test_event'
test_mediator = Mediator()
test_mediator.on(test_event_name)(EventHandler)
assert test_event_name in test_mediator.handlers
assert test_mediator.handlers[test_event_name] is not None
def test_mediator_handle_event():
mediator = Mediator()
@mediator.on(Event.EVENT_NAME)
class BaseEventHandler(EventHandler):
def handle(self, event: Event):
return None
event = Event()
res = mediator.emit(event)
assert res is None
def test_mediator_simple_event():
mediator = Mediator()
class SimpleEvent(Event):
EVENT_NAME = 'simple_event'
def __init__(self, text):
self._text = text
def get_text(self):
return self._text
@mediator.on(SimpleEvent.EVENT_NAME)
class SimpleEventHandler(EventHandler):
def handle(self, event: SimpleEvent):
return event.get_text()
event = SimpleEvent('some text')
res = mediator.emit(event)
assert res is not None
assert res == 'some text'
| true |
1d774ffbb52a1629ea6a0d97d74b2672973e5865 | Python | iCodeIN/competitive-programming-5 | /leetcode/Two-Pointers/permutation-in-string.py | UTF-8 | 1,134 | 3.046875 | 3 | [] | no_license | from itertools import permutations
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s1) > len(s2):
print('here')
return False
di = {}
for i in s1:
di[i] = di.get(i, 0) + 1
ls1 = len(s1)
di_sliding = {}
for i in range(ls1):
di_sliding[s2[i]] = di_sliding.get(s2[i], 0) + 1
if di_sliding == di:
return True
for i in range(ls1, len(s2)):
di_sliding[s2[i]] = di_sliding.get(s2[i], 0) + 1
di_sliding[s2[i - ls1]] -= 1
if di_sliding[s2[i - ls1]] == 0:
del di_sliding[s2[i - ls1]]
if di_sliding == di:
return True
return False
def TimeLimitExceeded_checkInclusion((self, s1: str, s2: str) -> bool:
"""
:type s1: str
:type s2: str
:rtype: bool
"""
S1 = list(permutations(s1))
for i in range(len(S1)):
S1[i] = ''.join(S1[i])
for i in S1:
if i in s2:
return True
return False
| true |
dd15cc73c67fcc5a97e2ee77a7339d6f866dffa1 | Python | CatalystOfNostalgia/hoot | /server/hoot/emotion_processing/compound_emotions.py | UTF-8 | 427 | 2.53125 | 3 | [
"MIT"
] | permissive | from enum import Enum
from enum import unique
@unique
class CompoundEmotion(Enum):
"""
Represents all possible compound emotions.
"""
optimism = 1
frustration = 2
aggressiveness = 3
anxiety = 4
frivolity = 5
disapproval = 6
rejection = 7
awe = 8
love = 9
envy = 10
rivalry = 11
submission = 12
gloat = 13
remorse = 14
contempt = 15
coercion = 16
| true |
882973f64b9e2f292aaa052a78d88e4e744a4f34 | Python | alan-yjzhang/AIProjectExamples1 | /HW2/part1-convnet/modules/max_pool.py | UTF-8 | 3,554 | 2.796875 | 3 | [] | no_license | import numpy as np
class MaxPooling:
'''
Max Pooling of input
'''
def __init__(self, kernel_size, stride):
self.kernel_size = kernel_size
self.stride = stride
self.cache = None
self.dx = None
self.mask = None
def forward(self, x):
'''
Forward pass of max pooling
:param x: input, (N, C, H, W)
:return: The output by max pooling with kernel_size and stride
'''
out = None
#############################################################################
# TODO: Implement the max pooling forward pass. #
# Hint: #
# 1) You may implement the process with loops #
#############################################################################
N = x.shape[0]
in_channels = x.shape[1]
H_out = int((x.shape[2] - self.kernel_size)/self.stride + 1)
W_out = int((x.shape[3] - self.kernel_size)/self.stride + 1)
out = np.zeros((N, in_channels, H_out, W_out))
for k in range(N):
for i in range(in_channels):
for j in range(H_out):
for l in range(W_out):
out[k,:,j,l] = np.amax(x[k,:,j*self.stride:j*self.stride+self.kernel_size,l*self.stride:l*self.stride+self.kernel_size], axis = (1,2))
#############################################################################
# END OF YOUR CODE #
#############################################################################
self.cache = (x, H_out, W_out)
return out
def backward(self, dout):
'''
Backward pass of max pooling
:param dout: Upstream derivatives
:return:
'''
x, H_out, W_out = self.cache
#############################################################################
# TODO: Implement the max pooling backward pass. #
# Hint: #
# 1) You may implement the process with loops #
# 2) You may find np.unravel_index useful #
#############################################################################
self.dx = np.zeros(x.shape)
N = x.shape[0]
in_channels = x.shape[1]
H_out = int((x.shape[2] - self.kernel_size)/self.stride + 1)
W_out = int((x.shape[3] - self.kernel_size)/self.stride + 1)
for k in range(N):
for i in range(in_channels):
for j in range(H_out):
for l in range(W_out):
res = np.amax(x[k,i,j*self.stride:j*self.stride+self.kernel_size,l*self.stride:l*self.stride+self.kernel_size], axis = (0,1))
self.dx[k,i,j*self.stride:j*self.stride+self.kernel_size,l*self.stride:l*self.stride+self.kernel_size] += np.multiply((x[k,i,j*self.stride:j*self.stride+self.kernel_size,l*self.stride:l*self.stride+self.kernel_size] >= res), dout[k, i, j, l])
#############################################################################
# END OF YOUR CODE #
#############################################################################
| true |
024d3e1d7a83f8a2f7e10ad7e349e892e100b646 | Python | vdrhtc/Two-qubit-AT-paper | /Pictures/Plotting/StationaryPlot.py | UTF-8 | 7,639 | 2.59375 | 3 | [] | no_license | import pickle
from numpy import *
import matplotlib
from matplotlib import ticker, colorbar as clb, patches
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
class StationaryPlot:
def __init__(self):
with open("stationary.pkl", "rb") as f:
currs, freqs, population10, energies = pickle.load(f)
fig, ax = plt.subplots(1, 1, figsize=(5*0.9, 3*0.9), sharey=True)
# mappable = axes[0].pcolormesh(currs, freqs, log10(array(population10)), rasterized=True,
# cmap="Spectral_r")
rect1 = patches.Rectangle((3.5, 5.25), .3, .04,
linewidth=1, edgecolor='black',
facecolor='none', linestyle=":")
ax.add_patch(rect1)
rect2 = patches.Rectangle((2.925, 5.2275),
3.225-2.925,
5.2675-5.2275,
linewidth=1, edgecolor='black',
facecolor='none', linestyle=":")
ax.add_patch(rect2)
rect3 = patches.Rectangle((3.16, 5.3), .3, .04,
linewidth=1, edgecolor='black',
facecolor='none', linestyle=":")
ax.add_patch(rect3)
ax.annotate("1", xy=(3.48, 5.23), xytext=(3.575, 5.205), ha="center", fontsize=10,
arrowprops=dict(facecolor='black', width=.5, headwidth=3, headlength=3.5,
shrink=0.05))
ax.annotate('2', xy=(5.080001, 5.21), xytext=(5.08, 5.18), ha="center", fontsize=10,
arrowprops=dict(facecolor='black', width=.5, headwidth=3, headlength=3.5,
shrink=0.05))
ax.annotate('3', xy=(3.31, 5.25), xytext=(3.31, 5.265), ha="center", fontsize=10,
arrowprops=dict(facecolor='black', width=.5, headwidth=3, headlength=3.5,
shrink=0.05))
ax.annotate('4', xy=(4.9, 5.195), xytext=(4.76, 5.195), ha="center", va="top", fontsize=10,
arrowprops=dict(facecolor='black', width=.5, headwidth=3, headlength=3.5,
shrink=0.05))
ax.annotate("5", xy=(3.5, 5.175), xytext=(3.63, 5.18), ha="center", va="center",
fontsize=10,
arrowprops=dict(facecolor='black', width=.5, headwidth=3, headlength=3.5,
shrink=0.05))
mappable = ax.pcolormesh(currs, freqs, log10(array(population10)), rasterized=True,
cmap="Spectral_r")
m = 1
t = .5
typ1 = (0, (5, 0))
typ2 = "-" #(0, (1, 2))
secondary_colour = (0.5, 0.5, 0.5, 0.25)
energies = energies[len(currs)//2+12:, :]
currs = currs[len(currs)//2+12:]
plt.plot(currs, ((energies[:, 5].T - energies[:, 2]).T), linewidth=t, linestyle=typ2,
color=secondary_colour); # 01->11
plt.plot(currs, ((energies[:, 5].T - energies[:, 1]).T), linewidth=t, linestyle=typ2,
color=secondary_colour); # 10-> 11
plt.plot(currs, ((energies[:, 6].T - energies[:, 1]).T / 2), linestyle=typ2, linewidth=t,
color=secondary_colour); # 01->21
plt.plot(currs, ((energies[:, 6].T - energies[:, 2]).T / 2), linestyle=typ2, linewidth=t,
color=secondary_colour); # 01->21
plt.plot(currs, ((energies[:, 6].T - energies[:, 3]).T), linestyle=typ2, linewidth=t,
color=secondary_colour); # 20->21
plt.plot(currs, ((energies[:, 6].T - energies[:, 4]).T), linestyle=typ2, linewidth=t,
color=secondary_colour); # 02->21
plt.plot(currs, ((energies[:, 7].T - energies[:, 1]).T / 2), linestyle=typ2, linewidth=t,
color=secondary_colour, label='others'); # 10->12
plt.plot(currs, ((energies[:, 7].T - energies[:, 2]).T / 2), linestyle=typ2, linewidth=t,
color=secondary_colour); # 01->12
plt.plot(currs, ((energies[:, 7].T - energies[:, 3]).T), linestyle=typ2, linewidth=t,
color=secondary_colour); # 20->12
plt.plot(currs, ((energies[:, 7].T - energies[:, 4]).T), linestyle=typ2, linewidth=t,
color=secondary_colour); # 02->12
plt.plot(currs, ((energies[:, 7].T - energies[:, 5]).T), linestyle=typ2, linewidth=t,
color=secondary_colour) # 11->12
plt.plot(currs, ((energies[:, 8].T - energies[:, 1]).T / 3), linestyle=typ2, linewidth=t,
color=secondary_colour); # 10->22
plt.plot(currs, ((energies[:, 8].T - energies[:, 2]).T / 3), linestyle=typ2, linewidth=t,
color=secondary_colour); # 01->22
plt.plot(currs, ((energies[:, 8].T - energies[:, 3]).T / 2), linestyle=typ2, linewidth=t,
color=secondary_colour); # 20->22
plt.plot(currs, ((energies[:, 8].T - energies[:, 4]).T / 2), linestyle=typ2, linewidth=t,
color=secondary_colour); # 02->22
plt.plot(currs, ((energies[:, 8].T - energies[:, 6]).T), linestyle=typ2, linewidth=t,
color=secondary_colour); # 21->22
plt.plot(currs, ((energies[:, 8].T - energies[:, 0]).T / 4), linestyle=typ2, linewidth=t,
color=secondary_colour); # 00->22
plt.plot(currs, ((energies[:, 2].T - energies[:, 0]).T), label=r"01",
linewidth=m,
linestyle=typ1); # 00->01
plt.plot(currs, ((energies[:, 1].T - energies[:, 0]).T), label=r"10",
linewidth=m,
linestyle=typ1); # 00->10
plt.plot(currs, ((energies[:, 3].T - energies[:, 0]).T / 2),
label=r"20/2", color = "C1",
linewidth=m, linestyle=":"); # 00->20
plt.plot(currs, ((energies[:, 4].T - energies[:, 0]).T / 2),
label=r"02/2",color = "C0",
linewidth=m, linestyle=":"); # 00->02
plt.plot(currs, ((energies[:, 4].T - energies[:, 1]).T),
label=r"10-02", linewidth=m,
linestyle="--"); # 10->02
plt.plot(currs, ((energies[:, 5].T - energies[:, 0]).T / 2), label=r"11/2",
linewidth=m, linestyle="--", color="C5") # , color='black'); # 00->11
plt.plot(currs, ((energies[:, 6].T - energies[:, 0]).T / 3), label=r"21/3",
linestyle="-.", linewidth=m , color='C7'); # 00->21/3
plt.plot(currs, ((energies[:, 7].T - energies[:, 0]).T / 3), label=r"12/3",
linestyle="-.", linewidth=m, color="C9"); # 00->12
plt.ylim(5.1, 5.5)
plt.xlim(2,6)
ax.set_ylabel("$\omega_{d}/2\pi$ (GHz)")
ax.set_xlabel("Current ($10^{-4}$ A)")
plt.legend(ncol=3, fontsize=7)
cbaxes1 = fig.add_axes([0.15, .975, 0.6, .02])
# clb.make_axes(axes[0, 0], location="top", shrink=0.8,
# aspect=50, pad=0.075, anchor=(0, 1))[0]
cb = plt.colorbar(mappable, ax=ax, cax=cbaxes1, orientation="horizontal")
cb.set_ticks([-3, -2, -1, -0.523])
cb.ax.set_xticklabels([.001, 0.01, 0.1, 0.3])
cb.ax.set_title(r"$ P_{\left|10\right\rangle}$", position=(1.125, -3.5))
plt.text(-0.15, 1.1, "(d)", fontdict={"name": "STIX"}, fontsize=17,
transform=ax.transAxes)
plt.savefig("../stationary.pdf", bbox_inches="tight", dpi=600)
StationaryPlot()
| true |
8fe8f2b0f369781c4ce0b68602c09eb5f39eb147 | Python | abhijit26110709/python | /decsitree_iris.py | UTF-8 | 1,564 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
# In[3]:
# now loading IRIS data only
iris=load_iris()
# In[4]:
dir(iris) #exploring variable
# In[ ]:
# In[7]:
iris.feature_names #this are feature names
# In[9]:
# labels or answer
iris.target_names
# In[12]:
# actual data with attributes is
features=iris.data
features.shape
# In[13]:
type(features)
# In[17]:
# now time for label data that will be exactly same as number of features data
label=iris.target
label.shape
# In[21]:
SL=features[0:,0]
# In[22]:
SW=features[0:,1]
# In[31]:
plt.xlabel("length")
plt.ylabel("width")
plt.scatter(SL,SW)
plt.scatter(features[0:,2],features[0:,3],label="petal_data",marker='x')
plt.legend()
# In[ ]:
# now time for seprating data nto 2 category
#1.------training data
#2.------ testing data---questions
from sklearn.model_selection import train_test_split
train_data,test_data,label,label_test=train_test_split(features,label,test_size=0.1)
# In[ ]:
# calling decisiontree classiier
clf=DecisionTreeClassifier()
# In[ ]:
# now time for training clf
trained=clf.fit(train_data,label_train)
# In[ ]:
# now predicting flowers
predicted_flowers=trained.predict(test_data)
# In[ ]:
predicted_flowers # algo answer
# In[ ]:
label_test # actual answer
# In[ ]:
# find accuracy score
accuracy_score(label_test,predicted_flowers)
# In[ ]:
| true |
3ac7a45f1c93cbd04c8aa60fa845a050a822f114 | Python | NilsJPWerner/autoDocstring | /src/test/integration/python_test_files/file_2_output.py | UTF-8 | 524 | 2.96875 | 3 | [
"MIT"
] | permissive | from typing import Union, List, Generator, Tuple, Dict
def function(
arg1: int,
arg2: Union[List[str], Dict[str, int], Thing],
kwarg1: int = 1
) -> Generator[Tuple[str, str]]:
"""_summary_
:param arg1: _description_
:type arg1: int
:param arg2: _description_
:type arg2: Union[List[str], Dict[str, int], Thing]
:param kwarg1: _description_, defaults to 1
:type kwarg1: int, optional
:yield: _description_
:rtype: Generator[Tuple[str, str]]
"""
yield ("abc", "def")
| true |
bc712597c75c5f664f6a5c323f5aa183087917dd | Python | uniqxh/tensorflow | /pdes.py | UTF-8 | 1,714 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python
import tensorflow as tf
import numpy as np
from PIL import Image
from cStringIO import StringIO
import images2gif
#from IPython.display import clear_output, Image, display
images = []
def DisplayArray(a, fmt='jpeg', rng=[0,1]):
a = (a-rng[0])/float(rng[1] - rng[0])*255
a = np.uint8(np.clip(a, 0 , 255))
f = StringIO()
m = Image.fromarray(a)
#clear_output(wait = True)
images.append(m)
sess = tf.InteractiveSession()
def make_kernel(a):
a = np.asarray(a)
a = a.reshape(list(a.shape) + [1,1])
return tf.constant(a, dtype=1)
def simple_conv(x, k):
x = tf.expand_dims(tf.expand_dims(x, 0), -1)
y = tf.nn.depthwise_conv2d(x, k, [1,1,1,1], padding='SAME')
return y[0, :, :, 0]
def laplace(x):
laplace_k = make_kernel([[0.5, 1.0, 0.5],
[1.0, -6., 1.0],
[0.5, 1.0, 0.5]])
return simple_conv(x, laplace_k)
N = 500
u_init = np.zeros([N, N], dtype=np.float32)
ut_init = np.zeros([N, N], dtype=np.float32)
for n in range(40):
a,b = np.random.randint(0, N, 2)
u_init[a,b] = np.random.uniform()
DisplayArray(u_init, rng=[-0.1, 0.1])
eps = tf.placeholder(tf.float32, shape=())
damping = tf.placeholder(tf.float32, shape=())
U = tf.Variable(u_init)
Ut = tf.Variable(ut_init)
U_ = U + eps*Ut
Ut_ = Ut + eps*(laplace(U) - damping*Ut)
step = tf.group(U.assign(U_), Ut.assign(Ut_))
tf.global_variables_initializer().run()
for i in range(10):
step.run({eps: 0.1, damping: 0.1})
DisplayArray(U.eval(), rng=[-0.1, 0.1])
size = (500,500)
for im in images:
im.thumbnail(size, Image.ANTIALIAS)
images2gif.writeGif('test.gif', images, duration=0.1, subRectangles=False)
| true |
68328f531f700bbf47394c47ef61c901de83237b | Python | achalddave/maskrcnn-benchmark | /maskrcnn_benchmark/utils/parallel/pool_context.py | UTF-8 | 1,860 | 3.015625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import multiprocessing as mp
from collections.abc import Iterable
_PoolWithContext_context = None
def _PoolWithContext_init(initializer, init_args):
global _PoolWithContext_context
_PoolWithContext_context = {}
if init_args is None:
initializer(context=_PoolWithContext_context)
else:
initializer(init_args, context=_PoolWithContext_context)
def _PoolWithContext_run(args):
task_fn, task_args = args
return task_fn(task_args, context=_PoolWithContext_context)
class PoolWithContext:
"""Like multiprocessing.Pool, but pass output of initializer to map fn.
Usage:
def init(context):
context['init_return'] = 'init'
def run(args, context):
return (context['init_return'], args)
p = PoolWithContext(4, init)
print(p.map(run, ['task1', 'task2', 'task3']))
# [('init', 'task1'), ('init', 'task2'), ('init', 'task3')]
# NOTE: GPUs may be in different order
"""
def __init__(self, num_workers, initializer, initargs=None):
self.pool = mp.Pool(
num_workers,
initializer=_PoolWithContext_init,
initargs=(initializer, initargs))
def map(self, task_fn, tasks):
return self.pool.map(_PoolWithContext_run,
((task_fn, task) for task in tasks))
def close(self):
self.pool.close()
def imap_unordered(self, task_fn, tasks):
return self.pool.imap_unordered(_PoolWithContext_run,
((task_fn, task) for task in tasks))
if __name__ == "__main__":
def _test_init(context):
context['init_return'] = 'hi'
def _test_run(args, context):
return (args, context['init_return'])
p = PoolWithContext(4, _test_init)
print(p.map(_test_run, ['task1', 'task2', 'task3']))
| true |
aed2c42eb01432869b16ea8df0495672658a1b46 | Python | linhuaxin93/LearnPython | /matplotilb/matplotlib_05.py | UTF-8 | 557 | 3.484375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False
#随机x,y各十个散点图
plt.subplot(2,2,1)
x = np.random.rand(10)
y = np.random.rand(10)
plt.scatter(x, y)
#柱形图
plt.subplot(2,2,2)
x = np.arange(1, 6)
y = np.array([13, 15, 14, 17, 16])
plt.bar(x, y,color="green")
#饼状图
plt.subplot(2,2,3)
labels = ['1','2','3','4']
values = [10,30,45,15]
colors = ['red','green','blue','yellow']
plt.pie(values,labels=labels,colors=colors)
plt.show() | true |
1af27c27ffeee246f5ec3b69728e96329eeb31f6 | Python | gilReyes/SaphireSQL | /Programming Languages/syntaxAnalyzerworking.py | UTF-8 | 954 | 2.546875 | 3 | [] | no_license | import ply.yacc as yacc
#Getting the token
from lexicalAnalyzer import tokens
var = [[]]
resultQueries = []
tracker = 0
def p_expression_table(p):
'expression : ID ASSIGNMENT LB ID RB LP IDS RP EOL'
var[tracker].insert( 0, p[4])
#print('entering table')
def p_expression_ids(p):
'''IDS : IDS SEPARATOR IDS
| ID'''
if(not(p[1] is None)):
var[tracker].append(p[1])
#print('entering ids')
def p_expression_display(p):
'expression : DISPLAY LP ID CONCATENATION QGEN RP EOL'
#print('entering display')
for elem in range(1, len(var[0])):
resultQueries.append('SELECT ' + var[0][elem] + ' FROM ' + var[0][0])
#Error rule
def p_error(p):
print("Syntax error in input!")
# Build the parser
parser = yacc.yacc()
while True:
try:
s = input('syntax > ')
except EOFError:
break
if not s: continue
result = parser.parse(s)
print(resultQueries)
| true |
93cab0e0b143889fefbf1875811a0afc84383416 | Python | khawajaosama/Algebra_Python | /algebra_4.py | UTF-8 | 2,376 | 3.1875 | 3 | [] | no_license | #Dot Product
from collections import defaultdict
def dot(v,w):
return sum([v_i*w_i
for v_i,w_i in zip(v,w)])
print (dot([1,2,3],[4,5,6]))
#Vector Product
def vector_product(v,w):
adder = defaultdict(int)
for n_1,v_i in enumerate(v):
for n_2,w_i in enumerate(w):
if (n_1!=n_2):
if (n_1==0 and n_2==1):
rectangular_component="k"
num = 1
elif (n_1==1 and n_2==2):
rectangular_component="i"
num = 1
elif (n_1==2 and n_2==0):
rectangular_component="j"
num=1
elif (n_1==1 and n_2==0):
rectangular_component="k"
num = -1
elif (n_1==0 and n_2==2):
rectangular_component="j"
num = -1
else:
rectangular_component="i"
num = -1
adder[rectangular_component]+= v_i*w_i*num
#list.append(str(v_i*w_i*num)+(rectangular_component))
return (adder)
print(vector_product([1,2,3],[4,5,6]))#Dot Product
from collections import defaultdict
def dot(v,w):
return sum([v_i*w_i
for v_i,w_i in zip(v,w)])
print (dot([1,2,3],[4,5,6]))
#Vector Product
def vector_product(v,w):
adder = defaultdict(int)
for n_1,v_i in enumerate(v):
for n_2,w_i in enumerate(w):
if (n_1!=n_2):
if (n_1==0 and n_2==1):
rectangular_component="k"
num = 1
elif (n_1==1 and n_2==2):
rectangular_component="i"
num = 1
elif (n_1==2 and n_2==0):
rectangular_component="j"
num=1
elif (n_1==1 and n_2==0):
rectangular_component="k"
num = -1
elif (n_1==0 and n_2==2):
rectangular_component="j"
num = -1
else:
rectangular_component="i"
num = -1
adder[rectangular_component]+= v_i*w_i*num
#list.append(str(v_i*w_i*num)+(rectangular_component))
return (adder)
print(vector_product([1,2,3],[4,5,6])) | true |
9f8ef20552481f3811825e97136d7f15fcd30567 | Python | dawnonme/Eureka | /main/leetcode/466.py | UTF-8 | 1,897 | 3.609375 | 4 | [] | no_license | class Solution:
def getMaxRepetitions(self, s1: str, n1: int, s2: str, n2: int) -> int:
# hashtable to store the patterns
patterns = {}
# pointers on s1 and s2
p1, p2 = 0, 0
# number of occurance of s1 and s2 so far
c1, c2 = 1, 0
# execute the loop when number of occurance of s1 has not been used up
while c1 <= n1:
# if a character match is found, move p2 forward
if s1[p1] == s2[p2]:
p2 += 1
# p2 reaches the end of s2, meaning 1 occurance of s2
if p2 == len(s2):
c2 += 1
p2 = 0
# store the pattern if not exists
if p1 not in patterns:
patterns[p1] = (c1, c2)
# a repeat has been found, handle the repeat
else:
# previous occurance of s1 and s2
prev_c1, prev_c2 = patterns[p1]
# number of occurance of s1 and s2 in a single repeat
n_s1_repeat, n_s2_repeat = c1 - prev_c1, c2 - prev_c2
# number of repeats
n_repeats = (n1 - prev_c1) // n_s1_repeat
# number of s2 occurances in the repeats
c2_repeats = n_repeats * n_s2_repeat
# the remain available occurances of s1
remain_c1 = (n1 - prev_c1) % n_s1_repeat
# update c1 and c2
c1 = n1 - remain_c1
c2 = c2_repeats + prev_c2
# move forward p1 every iteration
p1 += 1
# p1 reaches the end of s1, move it back to 0 and mark 1 occurance of s1
if p1 == len(s1):
c1 += 1
p1 = 0
# divide c2 by n2 to get the result
return c2 // n2
| true |
752a66e25c86d705c8267dff31778a729bdea21f | Python | iotrusina/M-Eco-WP3-package | /xsafar13/locations/filters/filter_allc_forload | UTF-8 | 563 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python
f1 = open("allCountries","r")
while True:
line = f1.readline()
if line == '':
f1.close()
break
sp = line.split(" ")
if (sp[6] == "P"):
print sp[2] + "\t" + sp[4] + "\t" + sp[5] + "\t" + sp[0] + "\t" + sp[7] + "\t" + sp[8] + "\t" + sp[14]
if (sp[6] == "L") and (sp[7] == "RGN"):
print sp[2] + "\t" + sp[4] + "\t" + sp[5] + "\t" + sp[0] + "\t" + sp[7] + "\t" + sp[8] + "\t" + sp[14]
if (sp[6] == "A"):
print sp[2] + "\t" + sp[4] + "\t" + sp[5] + "\t" + sp[0] + "\t" + sp[7] + "\t" + sp[8] + "\t" + sp[14]
| true |
d2466f2cd469e19567cafcc62b34b5cd32aacd37 | Python | DDR7707/Final-450-with-Python | /Dynamic Programming/453.Longest Alternating Subsequence.py | UTF-8 | 678 | 4.125 | 4 | [] | no_license | def LAS(arr, n):
# "inc" and "dec" initialized as 1
# as single element is still LAS
inc = 1
dec = 1
# Iterate from second element
for i in range(1,n):
if (arr[i] > arr[i-1]):
# "inc" changes iff "dec"
# changes
inc = dec + 1
elif (arr[i] < arr[i-1]):
# "dec" changes iff "inc"
# changes
dec = inc + 1
# Return the maximum length
return max(inc, dec)
# Driver Code
if __name__ == "__main__":
arr = [10, 22, 9, 33, 49, 50, 31, 60]
n = len(arr)
# Function Call
print(LAS(arr, n))
| true |
50c12377804a67e387707bb55931766e8aaa591f | Python | SaurabhThube/Competitive-Programming-Templates | /FastExpo.py | UTF-8 | 141 | 3.21875 | 3 | [] | no_license | def FastExpo(x,y,mod):
res=1
while(y>0):
if y&1:
res=(res*x)%mod
x=(x*x)%mod
y/=2
return res
| true |
61ba73b0485e119a918e282f88308ee7704e51ef | Python | tonmoy50/Bangla-Sign-Language-Detection | /Model/d.py | UTF-8 | 721 | 3.96875 | 4 | [] | no_license | import math
# Function to check
# palindrome
def isPalindrome(s):
left = 0
right = len(s) - 1
while (left <= right):
if (s[left] != s[right]):
return False
left = left + 1
right = right - 1
return True
# Function to calculate
# the sum of n-digit
# palindrome
def getSum(n):
start = 100
end = 990
sum = 0
# Run a loop to check
# all possible palindrome
for i in range(start, end + 1):
s = str(i)
# If palndrome
# append sum
if (isPalindrome(s)):
sum = sum + i
return sum
# Driver code
n = 1
ans = getSum(n)
print(ans) | true |
5d96e42ed110d4d14db6d9513d4f7ec19fa08509 | Python | programmer-666/Codes | /Python/Tensorflow/tnf1.py | UTF-8 | 2,295 | 3.09375 | 3 | [
"MIT"
] | permissive | import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
import tensorflow as wtf
from tensorflow.keras.models import Sequential # çalışılacak katmanları belirtir
from tensorflow.keras.layers import Dense # modele katmanları eklemek için
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error # hata değerleri, gerçek hatayı görmek için
from tensorflow.keras.models import load_model # model load etmek için
data = pd.read_csv('btk_data.csv')
#sb.pairplot(data)
# data import
y_price = data['price'].values
xps = data[['prop1', 'prop2']].values
xtr, xte, ytr, yte = train_test_split(xps, y_price, test_size=0.33)
# create test side
scaler = MinMaxScaler()
scaler.fit(xtr)
xtr = scaler.transform(xtr)
xte = scaler.transform(xte)
"""scaling/fitting"""
model = Sequential()
model.add(Dense(8, activation='relu')) # neuron num, actv func (adds layer)
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1)) # output neuron
model.compile(optimizer = 'rmsprop', loss = 'mse')
# creating model
model.fit(xtr, ytr, epochs=150)
loss = model.history.history['loss'] # loss
##sb.lineplot(x = range(len(loss)), y = loss)
trloss = model.evaluate(xtr, ytr, verbose = 0)
teloss = model.evaluate(xte, yte, verbose = 0)
print(trloss, teloss)
# training
testPredicts = pd.Series(model.predict(xte).reshape(330, ))
predictDf = pd.DataFrame(yte, columns = ['Reals Y'])
concDf = pd.concat([predictDf, testPredicts], axis = 1)
concDf.columns = ['Real Y', 'Predict Y']
print(concDf)
sb.scatterplot(x = "Real Y", y = "Predict Y", data = concDf)
mae = mean_absolute_error(concDf['Real Y'], concDf['Predict Y'])
mse = mean_squared_error(concDf['Real Y'], concDf['Predict Y'])
print(mae, mse)
# mae için ortalama bir fiyat 870 ise 870+-mae çok önemli değil
# model evaluation
newDataProps = [[1760, 1758]]
newDataProps = scaler.transform(newDataProps)
print(model.predict(newDataProps)) # özelliklerden fiyat tahmini
model.save('btk_model.h5')
#loaded_model = load_model('btk_model.h5')
# model predicts | true |
6a5c06800c91495b5fe95c83d72abac65fa3afd9 | Python | Shobhit05/Hackerranksolutions | /Python/mobileno.py | UTF-8 | 161 | 2.96875 | 3 | [] | no_license | N=int(input())
a=[]
for i in range(0,N):
c=raw_input()
c=c[-10:]
a.append(c)
a.sort()
for j in a:
print("+91"+" "+j[:5]+" " +j[-5:])
| true |
19fb562b91c7094da27b52527eeb2a5cc5773677 | Python | jesusalvador2911/AdmonODatos | /2.13/2.13.py | UTF-8 | 242 | 2.53125 | 3 | [] | no_license | import pickle
nombre = "Bartolo"
apellido = "Andropolis"
edad = 20
soltero = False
salario =8523.20
registro= [nombre, apellido,edad,soltero,salario]
archivo = open("ArchivoX.txt","wb")
pickle.dump(registro,archivo)
archivo.colse()
| true |
51de96aff7508305f260cf886de56c2f5d33a9c0 | Python | RevansChen/online-judge | /Codewars/8kyu/5-without-numbers/Python/test.py | UTF-8 | 118 | 2.5625 | 3 | [
"MIT"
] | permissive | # Python - 3.6.0
test.describe('Basic test')
test.it('Should return 5')
test.assert_equals(unusual_five(), 5, 'lol')
| true |
9d19104c37108c01e35bf007c449fd0a5033cedf | Python | geyang/plan2vec | /plan2vec/scratch/td_lambda.py | UTF-8 | 2,449 | 2.921875 | 3 | [] | no_license | import numpy as np
from params_proto.neo_proto import ParamsProto
class Args(ParamsProto):
gamma = 0.9
lam = 0.9
T = 20
N = 20 # truncation for TD(λ)
def td_lambda():
el_rewards = np.zeros(Args.T)
el_states = np.zeros(Args.T)
# We fix the G_t to the left side, and focus
# on computing the target value on the right
# hand side.
for n in range(1, Args.N - 1):
# 1 step:
for t in range(0, n):
el_rewards[t] += Args.gamma ** t * Args.lam ** (n - 1)
el_states[n] += Args.gamma ** n * Args.lam ** (n - 1)
return el_rewards, el_states
if __name__ == '__main__':
import matplotlib.pyplot as plt
colors = ['#49b8ff', '#66c56c', '#f4b247', '#ff7575']
xticks = [0, 1, 2, 3, 4, 5, 10, 15, 20]
plt.figure(figsize=(3, 2), dpi=200)
plt.title('TD-λ (Unnormalized)')
for i, Args.lam in enumerate([0.99, 0.9, 0.5, 0]):
_, el = td_lambda()
plt.plot(range(1, len(el)), el[1:], 'o-', markeredgecolor='white', markersize=4,
color=colors[i % 4], alpha=0.8, label=f'{Args.lam}')
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().xaxis.set_ticks(xticks)
plt.ylim(None, 1.1)
plt.xlim(None, 20)
plt.legend(frameon=False)
plt.show()
plt.figure(figsize=(3, 2), dpi=200)
plt.title('TD-λ (Normalized)')
for i, Args.lam in enumerate([0.99, 0.9, 0.5, 0]):
_, el = td_lambda()
plt.plot(range(1, len(el)), el[1:] / el[1:].sum(), 'o-', markeredgecolor='white',
markersize=4, color=colors[i % 4], alpha=0.8, label=f'{Args.lam}')
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().xaxis.set_ticks(xticks)
plt.ylim(None, 1.1)
plt.xlim(None, 20)
plt.legend(frameon=False)
plt.show()
plt.figure(figsize=(3, 2), dpi=200)
plt.title('TD-λ (Reward Eligibility)')
for i, Args.lam in enumerate([0.99, 0.9, 0.5, 0]):
el_r, _ = td_lambda()
plt.plot(range(len(el_r)), el_r, 'o-', markeredgecolor='white',
markersize=4, color=colors[i % 4], alpha=0.8, label=f'{Args.lam}')
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.gca().xaxis.set_ticks(xticks)
# plt.ylim(None, 1.1)
plt.xlim(None, 19)
plt.legend(frameon=False)
plt.show()
| true |
effd2e6a01eb9a715eb2ca287d00fa134fdf2474 | Python | CCALITA/CNNthings | /week10/10_3.py | UTF-8 | 1,982 | 3.015625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
boston_housing=tf.keras.datasets.boston_housing
(train_x,train_y),(test_x,test_y)=boston_housing.load_data()
#数据归一化处理
x_train=(train_x-train_x.min(axis=0))/(train_x.max(axis=0)-train_x.min(axis=0))
x_test=(test_x-test_x.min(axis=0))/(test_x.max(axis=0)-test_x.min(axis=0))
#数据选择
x1_train=x_train[:,7]
x2_train=x_train[:,12]
y_train=train_y
x1_test=x_test[:,7]
x2_test=x_test[:,12]
y_test=test_y
#整理数据
X_train=tf.cast(tf.stack([x1_train,x2_train],axis=1),tf.float32)
X_test=tf.cast(tf.stack([x1_test,x2_test],axis=1),tf.float32)
Y_train=tf.constant(y_train.reshape(-1,1),tf.float32)
Y_test=tf.constant(y_test.reshape(-1,1),tf.float32)
learn_rate=0.01
iter=2000
display_step=80
np.random.seed(612)
W=tf.Variable(np.random.randn(2,1),dtype=tf.float32)
mse_train=[]
mse_test=[]
for i in range(0,iter+1):
with tf.GradientTape() as tape:
pred_train=tf.matmul(X_train,W)
loss_train=0.5*tf.reduce_mean(tf.square(Y_train-pred_train))
pred_test=tf.matmul(X_test,W)
loss_test=0.5*tf.reduce_mean(tf.square(Y_test-pred_test))
mse_train.append(loss_train)
mse_test.append(loss_test)
dL_dW=tape.gradient(loss_train,W)
W.assign_sub(learn_rate*dL_dW)
if i % display_step==0:
print("i:%i,Train Loss:%f,Test Loss:%f" %(i,loss_train,loss_test))
plt.figure(figsize=(8,4))
plt.suptitle("RM && LSTAT———— Price")
plt.subplot(131)
plt.ylabel('MSE')
plt.plot(mse_train,color='blue',linewidth=3)
plt.plot(mse_train,color='red',linewidth=1.5)
plt.subplot(132)
plt.ylabel('Price')
plt.plot(Y_train,color='blue',marker='o',label="true_price")
plt.plot(pred_train,color='red',marker='.',label="predicrt")
plt.legend()
plt.subplot(133)
plt.ylabel('Price')
plt.plot(Y_test,color='blue',marker='o',label="true_price")
plt.plot(pred_test,color='red',marker='.',label="predicrt")
plt.legend()
plt.show()
| true |
42e0bbba8ed554a6b5f49904383bc057e6c7d6c5 | Python | PaulB99/Tessa | /new/lines.py | UTF-8 | 7,707 | 3.390625 | 3 | [] | no_license | # Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage
import scipy.stats
# Line class
class Line(object):
vertical_threshold = 30
def __init__(self, m, b, center, min_x, max_x, min_y, max_y):
'''
m: slope
b: y-intercept
center: center point along the line (tuple)
'''
self.m = m
self.b = b
self.center = center
self.min_x = min_x
self.max_x = max_x
self.min_y = min_y
self.max_y = max_y
def y(self, x):
'''
Returns the y-value of the line at position x.
If the line is vertical (i.e., slope is close to infinity), the y-value
will be returned as None
'''
# Line is vertical
if self.m > self.vertical_threshold:
return None
else:
return self.m*x + self.b
def x(self, y):
'''
Returns the x-value of the line at posiion y.
If the line is vertical (i.e., slope is close to infinity), will always
return the center point of the line
'''
# Line is vertical
if self.m > self.vertical_threshold:
return self.center[0]
# Line is not vertical
else:
return (y - self.b)/self.m
# Show the image (for debugging)
def plot_img(img, show = True):
fig = plt.figure(figsize = (16,12))
plt.imshow(img, cmap = 'gray', interpolation = 'none')
plt.xticks([])
plt.yticks([])
if show:
plt.show()
# Apply gaussian blur with kernel length sigma
def gaussian_blur(img, sigma):
proc_img = scipy.ndimage.filters.gaussian_filter(img, sigma = (sigma, sigma))
return proc_img
# Reduce size given times
def downsample(img, num_downsamples):
proc_img = np.copy(img)
for i in range(num_downsamples):
proc_img = scipy.ndimage.interpolation.zoom(proc_img,.5)
return proc_img
# Calculate sobel x^2
def sobel_x_squared(img):
proc_img = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = -1)**2.
return proc_img
# Sobel y gradient
def sobel_y_squared(img):
proc_img = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = -1)**2.
return proc_img
# Laplace transformation
def laplace_squared(img):
sobel_x_img = sobel_x_squared(img)
sobel_y_img = sobel_y_squared(img)
proc_img = (sobel_x_img**2. + sobel_y_img**2.)**.5
return proc_img
# Standardise the image intensity
def standardise(img):
proc_img = (img - np.min(img))/(np.max(img)-np.min(img))
return proc_img
# Binarise by setting any value > 0 to 1
def dynamic_binarise(img, cutoff):
for i in range(20):
cutoff = i*.01
bright_pixel_ratio = len(np.where(img > cutoff)[0])/(img.shape[0]*img.shape[1])
if bright_pixel_ratio <= 0.4:
break
img[img > cutoff] = 1
img[img <= cutoff] = 0
return img
# Get rid of horizontal lines
def vertical_erode(img, structure_length, iterations):
structure = np.array([[0,1,0],[0,1,0],[0,1,0]])*structure_length
proc_img = scipy.ndimage.morphology.binary_erosion(img, structure, iterations)
return proc_img
# Connect close lines
def vertical_dilate(img, structure_length, iterations):
structure = np.array([[0,1,0],[0,1,0],[0,1,0]])*structure_length
proc_img = scipy.ndimage.morphology.binary_dilation(img, structure, iterations)
return proc_img
# Find connected components and assign values to component
def connected_components(img):
proc_img, levels = scipy.ndimage.label(img, structure = np.ones((3,3)))
levels = list(range(1, levels + 1))
return proc_img, levels
# Remove lines that are too short
def remove_short_clusters_vertical(img, levels, threshold_fraction):
drop_values = []
ptps = []
# Calculate peak-to-peak height of line
for level in levels:
bright_pixels = np.where(img == level)
ptp = np.ptp(bright_pixels[0])
ptps.append(ptp)
# Determine which lines to drop
threshold = np.max(ptps)/2.
for i in range(len(ptps)):
if ptps[i] < threshold:
drop_values.append(levels[i])
# Drop the lines
for drop_value in drop_values:
img[img == drop_value] = 0
return img
# Restore image to overall size
def upsample(img, upsample_factor):
proc_img = img.repeat(upsample_factor, axis = 0).repeat(upsample_factor, axis = 1)
return proc_img
# Get lines in a binary image and return Line objects
def get_lines_from_img(img, levels):
lines = []
for level in levels:
line = np.where(img == level)
xs = line[1]
ys = line[0]
center = [np.mean(xs), np.mean(ys)]
min_x = np.min(xs)
max_x = np.max(xs)
min_y = np.min(ys)
max_y = np.max(ys)
spread = (np.max(ys) - np.min(ys))/(np.max(xs) - np.min(xs))
# Line is vertical
if spread > 10:
line = Line(1000, 0, center, min_x, max_x, min_y, max_y)
# Line is not vertical
else:
m, b, r, p, std = scipy.stats.linregress(xs,ys)
line = Line(m, b, center, min_x, max_x, min_y, max_y)
lines.append(line)
# Sort the lines by their centre x positions
lines.sort(key = lambda line: line.center[0])
return lines
# Get edges of spines in image
def get_book_lines(img, angles = [0], spaces = ['h']):
# Convert to HSV
gs_img = np.mean(img, axis = 2)
final_img = np.zeros((gs_img.shape[0], gs_img.shape[1]))
lines = []
for angle in angles:
# Rotate
proc_img = scipy.ndimage.rotate(gs_img, angle = angle, reshape = False)
# Blur
sigma = 3
proc_img = gaussian_blur(proc_img, sigma = sigma)
# Sobel x
proc_img = sobel_x_squared(proc_img)
# Down sample
num_downsamples = 2
proc_img = downsample(proc_img, num_downsamples)
# Standardise
proc_img = standardise(proc_img)
# Binarize
cutoff = np.max(proc_img)/12.
proc_img = dynamic_binarise(proc_img, cutoff)
# Vertical erode
structure_length = 200
iterations = 8
proc_img = vertical_erode(proc_img, structure_length, iterations)
# Vertical dilate
structure_length = 500
iterations = 10
proc_img = vertical_dilate(proc_img, structure_length, iterations)
# Connected components
proc_img, levels = connected_components(proc_img)
# Remove short clusters
threshold_fraction = 0.10
proc_img = remove_short_clusters_vertical(proc_img, levels, threshold_fraction)
# Up sample
upsample_factor = 2**num_downsamples
proc_img = upsample(proc_img, upsample_factor)
# Un-rotate image
proc_img = scipy.ndimage.rotate(proc_img, angle = -1*angle, reshape = False)
proc_img.resize((img.shape[0], img.shape[1]))
final_img = final_img + proc_img
# Convert the final image to binary
final_img[final_img > 0] = 1
# Connect components label
final_img, levels = connected_components(final_img)
# Get the lines from the label
lines = get_lines_from_img(final_img, levels)
# Plot the result
new_img = np.copy(img)
plot_img(new_img, show = False)
for line in lines:
y0 = line.min_y
y1 = line.max_y
x0 = line.x(y0)
x1 = line.x(y1)
plt.plot([x0, x1], [y0, y1], color = np.array([0,169,55])/255., lw = 6)
plt.xlim(0, img.shape[1])
plt.ylim(img.shape[0], 0)
plt.xticks([])
plt.yticks([])
plt.savefig('proc_img.png', bbox_inches = 'tight', dpi = 300)
return lines
| true |
4e5369318ad362551632d48abfa4e4f0f23782d3 | Python | 401-python-final/wheres_my_bus_backend | /api_caller/views.py | UTF-8 | 11,512 | 2.875 | 3 | [] | no_license | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework.views import APIView
#import speech_recognition as sr
import requests
import time
import json
with open('bus_routes/finalRoutesAndIds.json') as all_routes:
route_data = json.load(all_routes)
print(route_data)
#def voice_to_text(path):
# sound = path
# r = sr.Recognizer()
# with sr.AudioFile(sound) as source:
# r.adjust_for_ambient_noise(source)
# print("Converting Audio To Text ..... ")
# audio = r.listen(source)
# try:
# print("Converted Audio Is : \n" + r.recognize_google(audio))
#except Exception as e:
# print("Error {} : ".format(e) )
class show_me_the_request(APIView):
def post(self, request, lat, lon, format=None):
theFile = request.body
# 1st .wav to text
theBusRoute = '8' #the ana-leo function (.wav to text)
return get_a_routes_closest_stop_and_arrival_time(request, lat, lon, theBusRoute)
#2. Gets the two closest stops (both directions)
# 3. Finds the soonest arrival time of the requested bus at both stops
# 4. Returns (for each direction): [bus_id, direction, stop_name, arrival time (in minutes)]
# return HttpResponse()
def get_a_routes_closest_stop_and_arrival_time(request, lat, lon, bus_route):
"""
1. Cleans Data
2. Gets the two closest stops (both directions)
3. Finds the soonest arrival time of the requested bus at both stops
4. Returns (for each direction): [bus_id, direction, stop_name, arrival time (in minutes)]
"""
# 1
clean_data = clean_route_data(lat,lon,bus_route)
if not clean_data:
return HttpResponse(f'ERROR! {bus_route} is not a valid route.')
bus_id = clean_data['bus_id']
bus_route = clean_data['bus_route']
user_lat = clean_data['user_lat']
user_lon = clean_data['user_lon']
# 2
closest_stops = find_closest_stops(user_lat,user_lon,bus_id)
name_of_closest = closest_stops['name_of_closest']
name_of_next_closest = closest_stops['name_of_next_closest']
closest_stop_id = closest_stops['closest_stop_id']
next_closest_stop_id = closest_stops['next_closest_stop_id']
closest_direction = closest_stops['closest_direction']
next_closest_direction = closest_stops['next_closest_direction']
closest_lat = closest_stops['closest_stop_lat']
next_closest_lat = closest_stops['next_closest_stop_lat']
closest_lon = closest_stops['closest_stop_lon']
next_closest_lon = closest_stops['next_closest_stop_lon']
# 3
# Sequential API calls - Finding estimated Arrival Time of: the specific_bus at the nearest_stop
closest_arrival = find_estimated_arrival(closest_stops['closest_stop_id'], bus_id)
next_closest_arrival = find_estimated_arrival(closest_stops['next_closest_stop_id'], bus_id)
# 4
# Check that a valid time was returned from find_estimated_arrival
# print('NC: ', name_of_closest, 'cArrival: ', closest_arrival, 'NNC: ', name_of_next_closest, 'nCArrival', next_closest_arrival)
if closest_arrival or next_closest_arrival:
return JsonResponse({
'route': bus_route,
'closest_stop': {
'closest_name': name_of_closest,
'closest_direction': closest_direction,
'closest_stop_id': closest_stop_id,
'closest_minutes': closest_arrival,
'closest_lat': closest_lat,
'closest_lon': closest_lon,
},
'next_closest_stop': {
'next_closest_name': name_of_next_closest,
'next_closest_direction': next_closest_direction,
'next_closest_stop_id': next_closest_stop_id,
'next_closest_minutes': next_closest_arrival,
'next_closest_lat': next_closest_lat,
'next_closest_lon': next_closest_lon,
}
})
return HttpResponse(f'ERROR! We\'re sorry, route {bus_route} is not available at this time.')
def clean_route_data(lat, lon, bus_route):
query = bus_route.lower().split()
user_lat = float(lat) or 47.9365944
user_lon = float(lon) or -122.219628
result=''
alphabet = set(['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'])
num_chars = set('1234567890')
key_words = set(['line', 'route', 'bus'])
special_cases = {
'link': ['link'],
'sounder':['sounder south', 'sounder north'],
'amtrak':['amtrak'],
'tlink': ['tlink'],
'swift':['swift blue', 'swift green'],
'duvall':['duvall monroe shuttle'],
'trailhead': ['trailhead direct mt. si','trailhead direct mailbox peak','trailhead direct cougar mt.','trailhead direct issaquah alps']
}
for word in range(len(query)):
if query[word] in alphabet:
result += query[word].upper()
result +='-Line'
break
# is a number or leter
if query[word] in key_words:
# print('found key word: ', query[word])
#if the word is a key word, grab the first word before the key if it
if query[word -1]:
if query[word-1] in alphabet:
result += query[word-1].upper()
result +='-Line'
break
if any(char in query[word-1] for char in num_chars):
result += query[word-1]
break
# grabs the first word after the key if it is a letter or number
if query[word +1]:
if query[word+1] in alphabet:
result += query[word +1].upper()
result +='-Line'
break
if any(char in query[word+1] for char in num_chars):
result += query[word+1]
break
else:
# if no key words are found, grab the first number found
if any(char in query[word] for char in num_chars):
result += query[word]
break
# checks if word is a key in special case, and returns a value inside
# that key's list
if query[word] in special_cases:
if len(special_cases[query[word]]) == 1:
result += special_cases[query[word]][0]
break
else:
if query[word +1] in special_cases[query[word]][0]:
result += special_cases[query[word]][0]
break
if query[word +1] in special_cases[query[word]][1]:
result += special_cases[query[word]][1]
break
bus_route = result
# print(bus_route)
# Check our dictionary of Puget Sound Area Routes
if bus_route not in route_data:
return None
# TODO: elif bus_route+'o' in route_data:
# handle 20 cases where there are repeated routes (Northern)
return {'bus_id':route_data[bus_route], 'user_lat':user_lat, 'user_lon':user_lon, 'bus_route': bus_route}
def clean_route_data_deprecated(lat, lon, bus_route):
# Clean input
bus_route = bus_route.lower()
user_lat = float(lat) or 47.9365944
user_lon = float(lon) or -122.219628
alphabet = set(['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'])
special_cases = ['link', 'sounder south','amtrak','sounder north','tlink','swift blue','swift green','duvall monroe shuttle','trailhead direct mt. si','trailhead direct mailbox peak','trailhead direct cougar mt.','trailhead direct issaquah alps']
# Check for non-integer route numbers. Format them to be "<capitol letter> -Line"
if bus_route[0] in alphabet and bus_route not in special_cases:
temp = bus_route[0].upper()
temp += '-Line'
bus_route = temp
# Check our dictionary of Puget Sound Area Routes
if bus_route not in route_data:
return None
# TODO: elif bus_route+'o' in route_data:
# handle 20 cases where there are repeated routes (Northern)
return {'bus_id':route_data[bus_route], 'user_lat':user_lat, 'user_lon':user_lon, 'bus_route': bus_route}
def find_closest_stops(user_lat, user_lon, bus_id):
response = requests.get(f'http://api.pugetsound.onebusaway.org/api/where/stops-for-route/{bus_id}.json?key=TEST&version=2')
bus_data = response.json()
bus_stops = bus_data['data']['references']['stops']
closest, next_closest = None, None
closest_stop_id, next_closest_stop_id = 0,0
closest_stop_lat, next_closest_stop_lat = 0,0
closest_stop_lon, next_closest_stop_lon = 0,0
name_of_closest, name_of_next_closest = 'a', 'b'
closest_direction, next_closest_direction = 'n', 's'
for stop in bus_stops:
difference_lat = abs(user_lat - stop['lat'])
difference_lon = abs(user_lon - stop['lon'])
difference = difference_lat + difference_lon
if not closest:
closest, next_closest = difference, difference
if difference < closest:
# updating closest
closest = difference
name_of_closest = stop['name']
closest_direction = stop['direction']
closest_stop_id = stop['id']
closest_stop_lat = stop['lat']
closest_stop_lon = stop['lon']
if difference < next_closest and difference != closest:
#change next closest
next_closest = difference
name_of_next_closest = stop['name']
next_closest_direction = stop['direction']
next_closest_stop_id = stop['id']
next_closest_stop_lat = stop['lat']
next_closest_stop_lon = stop['lon']
return {
'closest_stop_id':closest_stop_id,
'next_closest_stop_id':next_closest_stop_id,
'name_of_closest':name_of_closest,
'name_of_next_closest':name_of_next_closest,
'closest_direction':closest_direction,
'next_closest_direction':next_closest_direction,
'closest_stop_lon':closest_stop_lon,
'closest_stop_lat':closest_stop_lat,
'next_closest_stop_lat':next_closest_stop_lat,
'next_closest_stop_lon':next_closest_stop_lon
}
def find_estimated_arrival(stop_id, bus_id):
response = requests.get(f'http://api.pugetsound.onebusaway.org/api/where/arrivals-and-departures-for-stop/{stop_id}.json?key=TEST')
stop_data = response.json()
list_of_arrivals = stop_data['data']['entry']['arrivalsAndDepartures']
arrival_time = 0
current_time = ((time.time()) *1000) # convert to epoch time
for arrival in list_of_arrivals: #check all arrivals
if arrival['routeId'] == bus_id: # find the correct arrival listing
if arrival['predictedArrivalTime'] != 0: # predicted time is available (it is not always)
arrival_time = arrival['predictedArrivalTime']
else: # predicted time is not available
arrival_time = arrival['scheduledArrivalTime']
# make sure to only show busses that have NOT arrived yet. (arriving in the future)(Arrivaltime > current_time -- i.e in the future)
if arrival_time > current_time:
return ((arrival_time - current_time)//60000) # time in minutes (rounded)
return None
| true |
bce12d9ab605840040a770164a47bc653c32c599 | Python | samuxiii/prototypes | /aigym/cartpole/cartpole.py | UTF-8 | 4,104 | 3.359375 | 3 | [
"MIT"
] | permissive | import os
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from time import sleep
class Agent:
def __init__(self):
self.memory = []
self.epsilon = 1.0 #exploration rate
self.model = self.__model()
def __model(self):
features = 4
learning_rate = 0.01
model = Sequential()
model.add(Dense(8, input_dim=features, activation='tanh'))
model.add(Dense(16, activation='tanh'))
model.add(Dense(2, activation='linear'))
#the loss function will be MSE between the action-value Q
model.compile(loss='mse', optimizer=Adam(lr=learning_rate, decay=0.01))
return model
def remember(self, state, action, reward, next_state, done):
#store in memory the different states, actions, rewards...
self.memory.append( (state, action, reward, next_state, done) )
def replay(self):
#fit model from memory
gamma = 1.0 #importance of the next reward
max_batch_size = 64
#take care the memory could be big, so using minibatch
minibatch = random.sample(self.memory, min(max_batch_size, len(self.memory)))
list_x_batch, list_y_batch = [], []
for state, action, reward, next_state, done in minibatch:
target = self.model.predict(state)[0]
if not done: #calculate discounted reward
action_values = self.model.predict(next_state)[0]
#following the formula of action-value expectation
reward = reward + gamma * np.amax(action_values)
#customize the obtained reward with the calculated
target[action] = reward
#append
list_x_batch.append(state)
list_y_batch.append(target)
#train the model
x_batch = np.vstack(list_x_batch)
y_batch = np.vstack(list_y_batch)
self.model.fit(x_batch, y_batch, verbose=0)
#decrease exploration rate
if self.epsilon > 0.01:
self.epsilon *= 0.997
def act(self, state):
if self.epsilon > np.random.rand():
return random.randint(0,1)
#predict the action to do
action_values = self.model.predict(state)[0]
return np.argmax(action_values)
if __name__ == "__main__":
backupfile = 'weights.hdf5'
trained = False
env = gym.make('CartPole-v1')
agent = Agent()
if os.path.isfile(backupfile):
print("Already trained. Recovering weights from backup")
agent.model.load_weights(backupfile)
trained = True
total_wins = 0
for episode in range(1000):
state = env.reset()
#row vector
state = state.reshape(1, -1)
for step in range(1, 700):
#env.render()
#perform the action
# 0->left, 1->right
action = agent.act(state)
#print("action: {}".format(action))
next_state, reward, done, info = env.step(action)
#row vector
next_state = next_state.reshape(1, -1)
#save the current observation
agent.remember(state, action, reward, next_state, done)
#update state
state = next_state
#evaluate
if done:
#solved when reward >= 195 before 100 episodes
if step > 195:
solved = 'SOLVED'
total_wins += 1
else:
solved = ''
total_wins = 0
print("Episode: {} Step: {} Epsilon: {:.3f} {}".format(episode, step, agent.epsilon, solved))
break
#at the end of episode, train the model
if not trained:
agent.replay()
#end game when 100 wins in a row
if total_wins == 100:
print("You win!!")
agent.model.save_weights(backupfile)
break
#before exit
sleep(2)
| true |