blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7f55d17f03a023013d5e21b4b641190311e019c2 | Python | Bruno81930/scripts | /jsoncat | UTF-8 | 789 | 2.828125 | 3 | [] | no_license | #!/usr/bin/python3
import tarfile
import tempfile
import os
import json
import click
@click.command()
@click.option('-i', '--input', help="File of type tar.gz with a set of json files.",
type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True), required=True)
def jsoncat(input):
json_out = list()
with tempfile.TemporaryDirectory() as tmpdir:
tar = tarfile.open(input, "r:gz")
tar.extractall(path=str(tmpdir))
for _, _, files in os.walk(tmpdir):
for file in files:
file = os.path.join(tmpdir, file)
with open(file, 'r') as fobj:
json_out = json_out + json.load(fobj)
click.echo(json.dumps(json_out))
if __name__ == '__main__':
jsoncat()
| true |
5d053b279c599901b3b6d496ed1c13debf73d0dd | Python | robertoweller/tra_nuvem | /entropia.py | UTF-8 | 713 | 2.9375 | 3 | [] | no_license | from data.generate_datasets import make_point_clouds
from gtda.homology import VietorisRipsPersistence
from gtda.plotting import plot_diagram
import pandas as pd
n_samples_per_class = 10
point_clouds, labels = make_point_clouds(n_samples_per_class, 10, 0.1)
point_clouds.shape
print(f"There are {point_clouds.shape[0]} point clouds in {point_clouds.shape[2]} dimensions, "
f"each with {point_clouds.shape[1]} points.")
# Aqui são 30 nuvens de pontos em 3 dimensões, cada uma com 100 pontos.
print(point_clouds)
VR = VietorisRipsPersistence(homology_dimensions=[0, 1, 2]) # Parameter explained in the text
diagrams = VR.fit_transform(point_clouds)
print(diagrams.shape)
i = 0
plot_diagram(diagrams[i])
| true |
3ecf23cc00189083c71e13566442994876f26c0e | Python | carlosvega/convert_ts | /convert_ts.py | UTF-8 | 5,871 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
from __future__ import print_function
import time, datetime, sys, argparse, logging, os, fileinput
from argparse import RawTextHelpFormatter
os.environ['TZ'] = 'GMT' #oddly faster with GMT :-O
date_format = '%d/%m/%Y:%H.%M.%S'
def parse_args():
global date_format
description = """
_~
_~ )_)_~
)_))_))_)
_!__!__!_
\_______/
~~~~~~ ~~~~~~
~~ ~~~~ ~~~~
Carlos Vega
14/09/17
v1.0
Timestamp Converter.
Execute with pypy for better performance
"""
parser = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('-i', '--input', dest='input', required=False, default='-', help='Input file. Default: stdin')
parser.add_argument('-o', '--output', dest='output', required=False, type=argparse.FileType('w'), default=sys.stdout, help='Output file. It will use the same separator. Default stdout')
parser.add_argument('-s', '--separator', dest='separator', required=False, default=';', help='File Separator. Default: Semicolon ";"')
parser.add_argument('-t', '--ts_column', dest='ts_column', nargs='+', required=False, type=int, default=[0], help='Number or list of numbers of the columns with timestamps. Default 0.')
parser.add_argument('-x', '--exclude', dest='exclude', nargs='+', required=False, type=int, default=[], help='List of numbers starting in 0 of the columns to be excluded. Default: none. If a column appears in both include and exclude it will be excluded')
parser.add_argument('-z', '--include', dest='include', nargs='+', required=False, type=int, default=[], help='List of numbers starting in 0 of the columns to be included. Default all, except those excluded with the parameter -x.')
parser.add_argument('--start', dest='start', type=int, required=False, default=None, help='Filter output by time range. This parameter indicates the min timestamp IN SECONDS. Default: None')
parser.add_argument('--main_ts', dest='main_ts', type=int, required=False, default=0, help='Which of the given timestamp columns is the main ts? By the fault it will be the first number provided in option -t.')
parser.add_argument('--end', dest='end', type=int, required=False, default=None, help='Filter output by time range. This parameter indicates the min timestamp IN SECONDS. Default: None')
parser.add_argument('-f', '--ts_format', dest='ts_format', nargs='+', default=date_format, required=False, help="Indicate the date format. Default: %s" % date_format.replace(r"%", r"%%"))
parser.add_argument('--ms', dest='ms', default=False, action='store_true', help='Prints the converted timestamps in miliseconds, otherwise are printed in seconds.')
parser.add_argument('--version', dest='version', default=False, action='store_true', help="Prints the program version.")
parser.add_argument('--where', dest='where', default='GMT', help="Where the timestamps come from? By default we assume GMT.")
args = parser.parse_args()
if args.version:
logging.info('Timestamp Converter v0.1')
sys.exit()
os.environ['TZ'] = args.where
return args
# Initialization of parameters
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
args = parse_args()
# Returns false if the column is included in the excluded list or if it's included in the included list
# Returns true if the column is not included in the excluded list or if it's included in the included list
# Additionally returns true if the both lists are empty
def column_is_included(i, included, excluded):
if len(excluded) > 0 and i in excluded:
return False
if len(included) > 0 and i not in included:
return False
return True
# Checks if timestamp is in range. If start and end are None, return True
def timestamp_in_range(ts, start, end):
if start is not None and start > ts:
return False
if end is not None and end < ts:
return False
return True
# This function converts the given string to an epoch timestamp in milliseconds
def convert_ts_single(string, date_format):
return int(time.mktime(datetime.datetime.strptime(string, date_format).timetuple()))
# This function converts the given string to an epoch timestamp in milliseconds trying multiple formats
def convert_ts_multi(string, date_formats):
for fmt in date_formats:
try:
return int(time.mktime(datetime.datetime.strptime(string, fmt).timetuple()))
except:
continue
raise ValueError
# def convert_ts_single(string, date_format):
convert_ts = convert_ts_multi if isinstance(args.ts_format, list) else convert_ts_single
# MAIN PROGRAM
ctr=0
for line in fileinput.input(args.input):
next_line = False
o_line = line
ctr+=1
line = line.rstrip().split(args.separator)
# Convert the columns in args.ts_column to timestamp
try:
for column in args.ts_column:
line[column] = convert_ts(line[column], args.ts_format)
if column == args.ts_column[args.main_ts] and not timestamp_in_range(line[column], args.start, args.end):
next_line = True
elif args.ms:
line[column] = line[column]*1000
except Exception as e:
logging.warn('Error converting column {} ({}) from line number {}. Ignoring entire line: {}'.format(column, line[column], ctr, o_line))
continue
if next_line:
continue
# Remove the columns the user wants to exclude and convert to string
line = [str(c) for i,c in enumerate(line) if column_is_included(i, args.include, args.exclude)]
line = args.separator.join(line)
print(line, file=args.output)
| true |
03f45d4fa06b404735260ec7f880fbc6062f8bc9 | Python | johncoleman83/bootcampschool-higher_level_programming | /0x11-python-network_1/102-starwars.py | UTF-8 | 1,502 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
"""
102-starwars.py
"""
import requests
import sys
def request_to_star_wars(the_url, payload):
"""makes a request to input URL with q as a parameter"""
res = requests.get(the_url, params=payload).json()
results_dict = {}
name_list = []
count = res.get('count')
if count > 0:
results = res.get('results')
for character in results:
name = character.get('name')
films = character.get('films')
name_list.append(name)
results_dict[name] = films
next_page = res.get('next')
while next_page:
res = requests.get(next_page).json()
results = res.get('results')
for character in results:
name = character.get('name')
films = character.get('films')
name_list.append(name)
results_dict[name] = films
next_page = res.get('next')
for k, v in results_dict.items():
films_list = []
for film in v:
res = requests.get(film).json()
title = res.get('title')
films_list.append(title)
results_dict[k] = films_list
print("Number of results: {}".format(count))
for name in name_list:
print(name)
for title in results_dict[name]:
print('\t{}'.format(title))
if __name__ == "__main__":
"""MAIN APP"""
the_url = "https://swapi.co/api/people/"
payload = {'search': sys.argv[1]}
request_to_star_wars(the_url, payload)
| true |
f39c5835db8d7f0bc1b882e35a898502052c4285 | Python | arivolispark/datastructuresandalgorithms | /leetcode/30_day_leetcoding_challenge/202007/20200701_arranging_coins/arranging_coins_solution_1.py | UTF-8 | 3,268 | 4.0625 | 4 | [] | no_license | """
Title: Arranging Coins
You have a total of n coins that you want to form in a
staircase shape, where every k-th row must have exactly k coins.
Given n, find the total number of full staircase rows that
can be formed.
n is a non-negative integer and fits within the range of
a 32-bit signed integer.
Example 1:
n = 5
The coins can form the following rows:
¤
¤ ¤
¤ ¤
Because the 3rd row is incomplete, we return 2.
Example 2:
n = 8
The coins can form the following rows:
¤
¤ ¤
¤ ¤ ¤
¤ ¤
Because the 4th row is incomplete, we return 3.
"""
class Solution:
def arrangeCoins(self, n: int) -> int:
sum, i = 0, 1
while True:
sum += i
if sum > n:
return i - 1
elif sum == n:
return i
else:
i += 1
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('{} got: {} expected: {}'.format(prefix, repr(got), repr(expected)))
if __name__ == "__main__":
solution = Solution()
test(solution.arrangeCoins(0), 0)
test(solution.arrangeCoins(1), 1)
test(solution.arrangeCoins(2), 1)
test(solution.arrangeCoins(3), 2)
test(solution.arrangeCoins(4), 2)
test(solution.arrangeCoins(5), 2)
test(solution.arrangeCoins(6), 3)
test(solution.arrangeCoins(7), 3)
test(solution.arrangeCoins(8), 3)
test(solution.arrangeCoins(9), 3)
test(solution.arrangeCoins(10), 4)
test(solution.arrangeCoins(11), 4)
test(solution.arrangeCoins(12), 4)
test(solution.arrangeCoins(13), 4)
test(solution.arrangeCoins(14), 4)
test(solution.arrangeCoins(15), 5)
test(solution.arrangeCoins(16), 5)
test(solution.arrangeCoins(17), 5)
test(solution.arrangeCoins(18), 5)
test(solution.arrangeCoins(19), 5)
test(solution.arrangeCoins(20), 5)
test(solution.arrangeCoins(21), 6)
test(solution.arrangeCoins(22), 6)
test(solution.arrangeCoins(23), 6)
test(solution.arrangeCoins(24), 6)
test(solution.arrangeCoins(25), 6)
test(solution.arrangeCoins(26), 6)
test(solution.arrangeCoins(27), 6)
test(solution.arrangeCoins(28), 7)
test(solution.arrangeCoins(29), 7)
test(solution.arrangeCoins(30), 7)
test(solution.arrangeCoins(31), 7)
test(solution.arrangeCoins(32), 7)
test(solution.arrangeCoins(33), 7)
test(solution.arrangeCoins(34), 7)
test(solution.arrangeCoins(35), 7)
test(solution.arrangeCoins(36), 8)
test(solution.arrangeCoins(37), 8)
test(solution.arrangeCoins(38), 8)
test(solution.arrangeCoins(39), 8)
test(solution.arrangeCoins(40), 8)
test(solution.arrangeCoins(41), 8)
test(solution.arrangeCoins(42), 8)
test(solution.arrangeCoins(43), 8)
test(solution.arrangeCoins(44), 8)
test(solution.arrangeCoins(45), 9)
test(solution.arrangeCoins(46), 9)
test(solution.arrangeCoins(47), 9)
test(solution.arrangeCoins(48), 9)
test(solution.arrangeCoins(49), 9)
test(solution.arrangeCoins(50), 9)
test(solution.arrangeCoins(51), 9)
test(solution.arrangeCoins(52), 9)
test(solution.arrangeCoins(53), 9)
test(solution.arrangeCoins(54), 9)
test(solution.arrangeCoins(55), 10)
| true |
459779467972ac31d0d87dc5df52e30c34b78e8a | Python | username014/R.o.b.o.t | /main.py | UTF-8 | 1,283 | 3.21875 | 3 | [] | no_license | from tkinter import *
root = Tk()
root.title("R.o.b.o.t")
root.mainloop()
WIDTH = 800
HEIGHT = 600
Z = 1
IN_GAME = True
c = Canvas(root, width=WIDTH, height=HEIGHT, bg="#003300")
c.grid()
c.focus_set()
class Object(object):
def __init__(self, x, y):
self.instance = c.create_rectangle(x, y,
x, y,
fill="white")
class Robot(object):
def __init__(self, objects):
self.objects = objects
self.mapping = {"Down": (0, 1), "Up": (0, -1), "Left": (-1, 0), "Right": (1, 0)}
self.vector = self.mapping["Right"]
def move(self):
for index in range(len(self.objects) - 1):
object = self.objects[index].instance
x1, y1, x2, y2 = c.coords(self.objects[index + 1].instance)
c.coords(object, x1, y1, x2, y2)
x1, y1, x2, y2 = c.coords(self.objects[-2].instance)
c.coords(self.objects[-1].instance,
x1 + self.vector[0] * Z,
y1 + self.vector[1] * Z,
x2 + self.vector[0] * Z,
y2 + self.vector[1] * Z)
def change_direction(self, event):
if event.keysym in self.mapping:
self.vector = self.mapping[event.keysym]
objects = [Object(Z, Z)]
s = Robot(objects)
| true |
693c157230918d78bbecbf2cc05a05b5478b7e8c | Python | janeon/automatedLabHelper | /testFiles/match/match69.py | UTF-8 | 1,046 | 3.578125 | 4 | [] | no_license |
def bestSeq (p,s):
bestStart = 0
bestMis = len(s)
for j in range(0,len(p)-len(s)):
mismatch = 0
for i in range (0, len(s)):
if s[i] != p[i+j]:
mismatch = mismatch + 1
if mismatch < bestMis:
bestStart = j
bestMis = mismatch
return (bestMis, bestStart)
def main():
x = True
while x == True:
try:
fileName = input("Please enter a file name: ")
inputFile = open(fileName,"r")
x = False
protein = inputFile.readline()
sequence = inputFile.readline()
sequenceList = inputFile.readlines()
num = 1
for sequence in sequenceList:
bestMis,bestStart=bestSeq(protein[:-1],sequence[:-1])
print("Sequence", num, "has", bestMis, "errors at position", bestStart)
num = num+1
except IOError:
print("Looks like you entered the wrong file name, Try again!")
main()
| true |
5e01a7eb90930a3e0fbf170e50e5dd19a9d8fdd2 | Python | CIDARLAB/MIT-BroadFoundry | /dynamic_circuits/DynamicCircuits/Examples/simple_activator_repressor_circuit.py | UTF-8 | 1,463 | 3.1875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 15:30:30 2015
@author: Alex Lim
"""
#Circuit Models: Simple Activator and Repressor
# cell transcription circuit modeling
import numpy as np
import matplotlib.pyplot as plt
import inputs
from scipy.integrate import odeint
plt.ion()
#Simple cases where K = 1, n = 1 for Hill Equations
xMax = 200
yMax = 55
B = 1 # rate of production of Y
a = 0.02 # rate of degradation/dilution
Yst = B/a # steady state
Tr = np.log(2)/a # response time
init = [0,Yst] # initial concentrations
t = np.linspace(0, xMax, 1000) # time grid (x-axis)
# solve the system of DEs. X is the input signal given by 'inputs' file
def f(ini, t):
Xi = inputs.linInput(t,0,1)
Yi = ini[0]
Zi = ini[1]
# the model equations
f0 = B*Xi/(1+Xi) - a*Yi
f1 = B/(1+Xi) - a*Zi
return [f0,f1]
# solve the DE
soln = odeint(f, init, t)
X =inputs.linInput(t,0,10)
Y = soln[:,0]
Z = soln[:,1]
# Simple Activator, K=1, n=1
plt.figure()
plt.axis([0, xMax, 0, yMax])
plt.plot(t,Y)
plt.axhline(y=Yst, color='r', ls='dashed')
plt.title('Simple Activator')
plt.xlabel('Time')
plt.ylabel('Concentration Y')
# Simple Repressor, K=1, n=1
plt.figure()
plt.axis([0, xMax, 0, yMax])
plt.plot(t,Z)
plt.axhline(y=Yst, color='r', ls='dashed')
plt.title('Simple Repressor')
plt.xlabel('Time')
plt.ylabel('Concentration Z')
| true |
a9f029d6f3fcd2ebaa9ae87d22e94007a899f32a | Python | yourmean/Programmers_Algorithm_HBYM | /Hyebin/Lv2_예상대진표.py | UTF-8 | 185 | 3.171875 | 3 | [] | no_license | def solution(n,a,b):
answer = 0
while a != b:
answer +=1
a = (a+1)//2
b = (b+1)//2
return answer
# N = 8
# A = 4
# B = 7
# print(solution(N,A,B)) | true |
243b9298c839a85d8202e3f4e1972ecf70fbf458 | Python | SeisSol/SeisSol | /postprocessing/science/compute_diff_seissol_data.py | UTF-8 | 8,453 | 2.65625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python3
import h5py
import numpy as np
import argparse
import os
import seissolxdmf as sx
import seissolxdmfwriter as sw
# These 2 latter modules are on pypi (e.g. pip install seissolxdmf)
class seissolxdmfExtended(sx.seissolxdmf):
def ReadTimeStep(self):
try:
return super().ReadTimeStep()
except NameError:
return 0.0
def read_reshape2d(sx, dataname):
"""read seissol dataset
and if there is only one time stamp
create a second dimension of size 1"""
myData = sx.ReadData(dataname)
if len(myData.shape) == 1:
myData = myData.reshape((1, myData.shape[0]))
return myData
def fuzzysort(arr, idx, dim=0, tol=1e-6):
"""
return indexes of sorted points robust to small perturbations of individual components.
https://stackoverflow.com/questions/19072110/numpy-np-lexsort-with-fuzzy-tolerant-comparisons
note that I added dim<arr.shape[0]-1 in some if statement (else it will crash sometimes)
"""
arrd = arr[dim]
srtdidx = sorted(idx, key=arrd.__getitem__)
i, ix = 0, srtdidx[0]
for j, jx in enumerate(srtdidx[1:], start=1):
if arrd[jx] - arrd[ix] >= tol:
if j - i > 1 and dim < arr.shape[0] - 1:
srtdidx[i:j] = fuzzysort(arr, srtdidx[i:j], dim + 1, tol)
i, ix = j, jx
if i != j and dim < arr.shape[0] - 1:
srtdidx[i:] = fuzzysort(arr, srtdidx[i:], dim + 1, tol)
return srtdidx
def lookup_sorted_geom(geom, atol):
"""return the indices to sort the
geometry array by x, then y, then z
and the associated inverse look-up table
"""
ind = fuzzysort(geom.T, list(range(0, geom.shape[0])), tol=atol)
# generate inverse look-up table
dic = {i: index for i, index in enumerate(ind)}
ind_inv = np.zeros_like(ind)
for k, v in dic.items():
ind_inv[v] = k
return ind, ind_inv
def read_geom_connect(sx):
return sx.ReadGeometry(), sx.ReadConnect()
def return_sorted_geom_connect(sx, atol):
"""sort geom array and reindex connect array to match the new geom array"""
geom, connect = read_geom_connect(sx)
nv = geom.shape[0]
try:
import pymesh
geom, connect, inf = pymesh.remove_duplicated_vertices_raw(
geom, connect, tol=atol
)
print(f"removed {inf['num_vertex_merged']} duplicates out of {nv}")
except ModuleNotFoundError:
print("pymesh not found, trying trimesh...")
import trimesh
trimesh.tol.merge = atol
mesh = trimesh.Trimesh(geom, connect)
mesh.merge_vertices()
geom = mesh.vertices
connect = mesh.faces
print(f"removed {nv-geom.shape[0]} duplicates out of {nv}")
ind, ind_inv = lookup_sorted_geom(geom, atol)
geom = geom[ind, :]
connect = np.array([ind_inv[x] for x in connect.flatten()]).reshape(connect.shape)
# sort along line (then we can use multidim_intersect)
connect = np.sort(connect, axis=1)
return geom, connect
def multidim_intersect(arr1, arr2):
"""find indexes of same triangles in 2 connect arrays
(associated with the same geom array)
generate 1D arrays of tuples and use numpy function
https://stackoverflow.com/questions/9269681/intersection-of-2d-numpy-ndarrays
"""
arr1_view = arr1.view([("", arr1.dtype)] * arr1.shape[1])
arr2_view = arr2.view([("", arr2.dtype)] * arr2.shape[1])
intersected, ind1, ind2 = np.intersect1d(arr1_view, arr2_view, return_indices=True)
ni, n1, n2 = intersected.shape[0], arr1.shape[0], arr2.shape[0]
print(
f"{ni} faces in common, n faces connect 1:{n1}, 2:{n2} (diff: {n1-ni}, {n2-ni})"
)
return ind1, ind2
def same_geometry(sx1, sx2, atol):
geom1 = sx1.ReadGeometry()
geom2 = sx2.ReadGeometry()
if geom1.shape[0] != geom2.shape[0]:
return False
else:
return np.all(np.isclose(geom1, geom2, rtol=1e-3, atol=atol))
def compute_areas(geom, connect):
triangles = geom[connect, :]
a = triangles[:, 1, :] - triangles[:, 0, :]
b = triangles[:, 2, :] - triangles[:, 0, :]
return 0.5 * np.linalg.norm(np.cross(a, b), axis=1)
def l1_norm(areas, q):
return np.dot(areas, np.abs(q))
def l2_norm(areas, q):
return np.dot(areas, np.power(q, 2))
parser = argparse.ArgumentParser(
description="make difference between 2 (paraview) output files: f2-f1. \
The output must be from the same mesh, but the partionning may differ."
)
parser.add_argument("xdmf_filename1", help="filename1")
parser.add_argument("xdmf_filename2", help="filename2")
parser.add_argument(
"--idt",
nargs="+",
required=True,
help="list of time step to differenciate (1st = 0); -1 = all",
type=int,
)
parser.add_argument(
"--Data",
nargs="+",
required=True,
metavar=("variable"),
help="Data to differenciate (example SRs); all for all stored quantities",
)
parser.add_argument(
"--atol",
nargs=1,
metavar=("atol"),
help="absolute tolerance to merge vertices",
type=float,
default=[1e-3],
)
args = parser.parse_args()
atol = args.atol[0]
sx1 = seissolxdmfExtended(args.xdmf_filename1)
sx2 = seissolxdmfExtended(args.xdmf_filename2)
dt1 = sx1.ReadTimeStep()
dt2 = sx2.ReadTimeStep()
step1, step2 = 1, 1
if min(dt1, dt2) > 0:
# allows comparison a differently sampled data
eps = 1e-6
if abs(round(dt2 / dt1) - dt2 / dt1) < eps:
step1 = round(dt2 / dt1)
if abs(round(dt1 / dt2) - dt1 / dt2) < eps:
step2 = round(dt1 / dt2)
if abs(dt2 - dt1) > eps and (max(step1, step2) == 1):
raise ValueError("dt1 != dt2 and not multiples")
same_geom = same_geometry(sx1, sx2, atol)
if same_geom:
print("same indexing detected, no need to reindex arrays")
geom1, connect1 = read_geom_connect(sx1)
geom2, connect2 = read_geom_connect(sx2)
ind1 = slice(None, None, None)
ind2 = slice(None, None, None)
else:
geom1, connect1 = return_sorted_geom_connect(sx1, atol)
geom2, connect2 = return_sorted_geom_connect(sx2, atol)
if not np.all(np.isclose(geom1, geom2, rtol=1e-3, atol=atol)):
raise ValueError("geometry arrays differ")
ind1, ind2 = multidim_intersect(connect1, connect2)
connect1 = connect1[ind1, :]
areas = compute_areas(geom1, connect1)
if args.idt[0] == -1:
args.idt = list(range(0, min(sx1.ndt // step1, sx2.ndt // step2)))
aData = []
if args.Data == ["all"]:
variable_names = set()
for elem in sx1.tree.iter():
if elem.tag == "Attribute":
variable_names.add(elem.get("Name"))
variable_names2 = set()
for elem in sx2.tree.iter():
if elem.tag == "Attribute":
variable_names2.add(elem.get("Name"))
# return only variables in common
variable_names = variable_names.intersection(variable_names2)
for to_remove in ["partition", "locationFlag"]:
if to_remove in variable_names:
variable_names.remove(to_remove)
else:
variable_names = args.Data
print("#idt relative_error_l2 relative_error_l1 min_abs_error max_abs_error")
for dataname in variable_names:
print(dataname)
myData1 = read_reshape2d(sx1, dataname)
myData2 = read_reshape2d(sx2, dataname)
ndt = min(myData1.shape[0] // step1, myData2.shape[0] // step2)
myData = (
myData1[0 : ndt * step1 : step1, ind1] - myData2[0 : ndt * step2 : step2, ind2]
)
for idt in args.idt:
if idt < ndt:
ref_norm1 = l1_norm(areas, myData1[idt * step1, ind1])
ref_norm2 = l2_norm(areas, myData1[idt * step1, ind1])
relative_error_l1 = (
l1_norm(areas, myData[idt, :]) / ref_norm1 if ref_norm1 else np.nan
)
relative_error_l2 = (
l2_norm(areas, myData[idt, :]) / ref_norm2 if ref_norm2 else np.nan
)
min_error, max_error = np.amin(myData[idt, :]), np.amax(myData[idt, :])
print(
f"{idt} {relative_error_l2} {relative_error_l1} {min_error} {max_error}"
)
else:
print(f"removing idt={idt}>{ndt} from args.idt")
args.idt.pop(idt)
aData.append(myData)
prefix, ext = os.path.splitext(args.xdmf_filename1)
fname = f"diff_{os.path.basename(prefix)}"
out_names = ["diff_" + name for name in variable_names]
sw.write_seissol_output(
fname, geom1, connect1, out_names, aData, max(dt1, dt2), args.idt
)
| true |
8037d58cb99fe86441940dc248c625f14fc5d582 | Python | Dashlane/uprime | /tests/uprime_tests.py | UTF-8 | 4,021 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
from uprime import Uprime
import pandas as pd
directory = os.path.dirname(os.path.realpath(__file__))
relative_file_path = 'uprime_test_data.csv'
full_path = os.path.join(directory, relative_file_path)
df = pd.read_csv(full_path)
def ooc_stats(chart_dataframe):
ooc = len(chart_dataframe[chart_dataframe['ooc'] == True])
ooc_low = len(chart_dataframe[chart_dataframe['ooc_low'] == True])
ooc_high = len(chart_dataframe[chart_dataframe['ooc_high'] == True])
potential_alert = len(chart_dataframe[chart_dataframe['potential_alert'] == True])
suppress_realert = len(chart_dataframe[chart_dataframe['suppress_realert'] == True])
alert = len(chart_dataframe[chart_dataframe['alert'] == True])
return {'ooc': ooc, 'ooc_low': ooc_low, 'ooc_high': ooc_high, 'potential_alert': potential_alert,
'suppress_realert': suppress_realert, 'alert': alert}
class UprimeTester(unittest.TestCase):
def setUp(self):
pass
def test_rolling_periods(self):
up = Uprime(df, 'date', 'occurrences', 'subgroup_size', method='rolling', periods=41)
up_df = up.frame()
self.assertEqual(len(up_df), len(df) - up.periods - len(up.index_na_list))
def test_initial_periods(self):
up = Uprime(df, 'date', 'occurrences', 'subgroup_size', method='initial', periods=22)
up_df = up.frame()
self.assertEqual(len(up_df), len(df) - len(up.index_na_list))
def test_all_periods(self):
up = Uprime(df, 'date', 'occurrences', 'subgroup_size', method='all', periods=1000)
up_df = up.frame()
self.assertEqual(len(up_df), len(df) - len(up.index_na_list))
def test_exclusions(self):
up = Uprime(df, 'date', 'occurrences', 'subgroup_size', method='all')
up.frame()
self.assertEqual(['2019-06-10', '2019-06-12', '2019-06-14', '2019-06-16', '2019-06-27', '2019-06-28'], up.index_na_list)
def test_sort(self):
up = Uprime(df, 'date', 'occurrences', 'subgroup_size')
up.frame()
pd.testing.assert_frame_equal(up.chart_df, up.chart_df.sort_index())
def test_sd_sensitivity(self):
up_adjusted_sd_sensitivity = Uprime(df, 'date', 'occurrences', 'subgroup_size', sd_sensitivity=4.23)
up_adjusted_sd_sensitivity.frame()
up = Uprime(df, 'date', 'occurrences', 'subgroup_size')
up.frame()
pd.testing.assert_series_equal((up.chart_df['ucl'] - up.chart_df['ubar'])*(4.23/3.0),
(up_adjusted_sd_sensitivity.chart_df['ucl'] - up_adjusted_sd_sensitivity.chart_df['ubar']))
pd.testing.assert_series_equal((up.chart_df['ubar'] - up.chart_df['lcl'])*(4.23/3.0),
(up_adjusted_sd_sensitivity.chart_df['ubar'] - up_adjusted_sd_sensitivity.chart_df['lcl']))
def test_ooc(self):
up_rolling = Uprime(df, 'date', 'occurrences', 'subgroup_size', method='rolling', periods=30, realert_interval=4, ooc_rule='high')
up_rolling.frame()
ooc_rolling = ooc_stats(up_rolling.chart_df)
up_initial = Uprime(df, 'date', 'occurrences', 'subgroup_size', method='initial', periods=30, realert_interval=4, ooc_rule='high')
up_initial.frame()
ooc_initial = ooc_stats(up_initial.chart_df)
up_all = Uprime(df, 'date', 'occurrences', 'subgroup_size', method='all', realert_interval=4, ooc_rule='high')
up_all.frame()
ooc_all = ooc_stats(up_all.chart_df)
def ooc_assertions(oocs):
self.assertEqual(9, oocs['ooc'], oocs['ooc_low'] + oocs['ooc_high'])
self.assertEqual(8, oocs['potential_alert'])
self.assertEqual(6, oocs['suppress_realert'])
self.assertEqual(2, oocs['alert'])
ooc_assertions(ooc_rolling)
ooc_assertions(ooc_initial)
ooc_assertions(ooc_all)
if __name__ == '__main__':
unittest.main() | true |
b777c26ece53d4969966026a25ab74b8a9e2bd3a | Python | defgsus/solstice | /dev/merge_time_zones.py | UTF-8 | 1,393 | 2.75 | 3 | [] | no_license | from __future__ import print_function
import dbf
t = dbf.Table("ne_10m_time_zones.dbf")
zones = dict()
k = 0
for i in t:
key = float(i.name);
try:
a = zones[key]
a.append(k)
except KeyError:
a = [k]
zones[key] = a
k += 1
print("num: " + str(len(zones)))
zoneSorted = sorted(zones)
zoneIndex = dict()
k = 0
for z in zoneSorted:
zoneIndex[z] = k
k += 1;
def printMerged():
for z in zoneSorted:
print(str(z) + " " + str(zones[z]))
def printCode():
print("//---")
for z in zoneSorted:
print("if (", end="");
first = True
for i in zones[z]:
if first: first = False
else: print(" || ", end="");
print("idx == " + str(i), end="");
print(") { name = " + str(z) + "; timeZone = " + str(zoneIndex[z]) + "; }")
def utcString(z):
"""convert a float z to UTC+/-x string"""
if z == 0: return "UTC(0)"
s = "UTC"
if z >= 0: s += "+"
h = int(z)
s += str(h)
if h == z: return s
s = s + ":"
m = int((abs(z) - abs(h)) * 60)
s += str(m)
return s# + "(" + str(z) + ")"
def printHtmlSelect():
print("<select>");
for z in zoneSorted:
print("<option>" + utcString(z) + "</option>");
print("</select>");
def printFloatArray():
print("times = [", end="");
first = True
for z in zoneSorted:
if first: first = False
else: print(", ", end="")
print(str(z), end="")
print("];")
#printMerged()
#printCode()
#printHtmlSelect()
printFloatArray()
| true |
1c43d056236f44e24d0693455f9296e545e9de99 | Python | rubenros1795/dhl-ads | /extract_occupations.py | UTF-8 | 4,369 | 2.640625 | 3 | [] | no_license | import os, glob, pandas as pd
import string, re
import numpy as np
from collections import Counter
import numbers
from string import ascii_lowercase
import pandas as pd
import time
from operator import itemgetter
from functions import ExtractOccupations
from functions import hasNumbers
from functions import ExtractQual
from functions import GetNum
from functions import NumberCandidateClass
from functions import ExtractNum
from functions import NonNumbClass
from functions import NormalizeNumbers
from functions import Subset
import pickle
from nltk import ngrams
from flashtext import KeywordProcessor
from tqdm import tqdm
path_base = os.getcwd()
##### Import resources
path_resources = path_base + '//resources'
os.chdir(path_resources)
## Create List of Occupations
with open('list_occupations.txt', encoding = 'utf-8') as f:
list_words = list(set(f.read().splitlines()))
with open('stopwords-nl.txt', encoding = 'utf-8') as f:
stopwords = f.read().splitlines()
with open('qualitative_indicators.txt', encoding = 'utf-8') as f:
qual_words = f.read().splitlines()
with open('wage_indicators.txt', encoding = 'utf-8') as f:
wage_words = f.read().splitlines()
with open('negatives.txt', encoding = 'latin1') as f:
negatives = f.read().splitlines()
print("resources imported: occupations, stopwords, qualitative indicators, wage indicators & negatives")
############# DATA PROCESSING #####
# After importing the resources, we loop over de data and extract occupations for every ad
# Then, we subset the dataframe and discard all ads that do not contain advertisements
# set path_data to 'data' folder where raw csvs are stored
path_data = path_base + '/data'
os.chdir(path_data)
list_csv = glob.glob('*.csv')
for csv in list_csv:
df = pd.read_csv(csv,sep = '\t')
df = df.sample(n=10)
df = df.reset_index(drop=True)
lines_count = len(df)
print("dataframe imported: {} lines (ads)".format(lines_count))
## Clean Raw OCR and split, bind again for subsetting, subset based on job ad indicators
df = Subset(df)
## Extract Occupations from ads and subset only ads w/occupations
vocab = set([item for sublist in df['clean'] for item in sublist])
list_words = [w for w in list_words if w not in negatives + stopwords]
list_words = [w for w in list_words if len(w.split(' ')) == 1]
list_words = list(set(list_words).intersection(vocab))
print('{} keywords used'.format(len(list_words)))
df['oc'] = ""
for c,i in tqdm(enumerate(df['clean'])):
df['oc'][c] = ExtractOccupations(i,list_words)
df = df[(df['oc'] != "na")]
df = df[df.oc.map(len)>0]
df = df.reset_index(drop=True)
print("occupation-containing ads subsetted: {} ads = {}% of subsetted ads".format(len(df),round(len(df) / lines_count * 100, 3)))
#df.to_csv('test.csv',index=False)
## Loop over subsetted ads and extract wage information
## The windows are stored in a new dataframe
lid = []
ldate = []
limg = []
lpapt = []
lwindow = []
loccupation = []
lqual = []
lquan = []
for c,list_oc_ind in enumerate(df['oc']):
for c2,occupation_index in enumerate(list_oc_ind):
id = df['id'][c].replace(":","") + '_' + str(c2)
string = df['clean'][c]
processed_occupations = []
if occupation_index in processed_occupations:
continue
processed_occupations.append(occupation_index)
occupation = occupation_index.split('_')[0]
index = int(occupation_index.split('_')[1])
window = string[index-12:index+40]
qual = ExtractQual(" ".join(window), qual_words)
quan = ExtractNum(" ".join(window), qual_words)
lid.append(id)
ldate.append(df['date'][c])
limg.append(df['image_url'][c])
lpapt.append(df['paper_title'][c])
lwindow.append(" ".join(window))
loccupation.append(occupation)
lqual.append(qual)
lquan.append(quan)
df_windows = {"id":lid,"date":ldate,"image_url":limg,"paper_title":lpapt,"window":lwindow,'occupation':loccupation, "extracted_qual":lqual,"extracted_quan":lquan}
df_windows = pd.DataFrame(df_windows)
fn = csv[:-4] + "_windows.csv"
df_windows.to_csv(fn,index=False,sep = "\t")
| true |
a4dab5b3cc5974cb4e7cd374bba3b4407dc74967 | Python | lilyfofa/python-exercises | /exercicio103.py | UTF-8 | 634 | 3.875 | 4 | [] | no_license | # def jogador(nome, gols):
# if nome == '':
# nome = '<desconhecido>'
# if gols == '':
# gols = 0
# else:
# int(gols)
# return f'O jogador {nome} fez {gols} gols no campeonato.'
#
#
# player = str(input('Jogador: '))
# goals = str(input('Númro de gols: '))
# print(jogador(player, goals))
def jogador(nome='<desconhcido>', gol=0):
return f'O jogador {nome} fez {gol} gols.'
name = input('Nome: ')
goals = str(input('Número de gols: '))
if goals.isnumeric():
goals = int(goals)
else:
goals = 0
if name == '':
print(jogador(gol=goals))
else:
print(jogador(name, goals)) | true |
f3a7d0f801d0256e6a8480f06ab77d07a6add304 | Python | ShreyasDobhal/FaceLens | /sort_files.py | UTF-8 | 4,541 | 2.6875 | 3 | [] | no_license | from recognize_faces import recognizeFacesInImage as dnn
from knn_train import recognizeFacesInImage as knn
from svm_train import recognizeFacesInImage as svm
from threading import Thread
from shutil import copyfile
import os
from glob import glob
from accepted_extensions import IMAGE_FILES
from paths import *
from time import sleep
foldernames = []
currentfolderpath = ''
conflictFiles = []
conflictWithFiles = []
faceRecognitionMethod = knn
total = 0
files = []
def sortIntoFolders(currentfolder, folders):
global foldernames,currentfolderpath
foldernames = folders
currentfolderpath = currentfolder
if currentfolderpath[-1]!='/':
currentfolderpath+='/'
for folder in foldernames:
try:
os.mkdir(currentfolderpath+folder)
except:
print("Folder "+folder+" already exists")
def handleConflictFiles(UI=None):
global total
if UI!=None:
if UI.isApplicationClosed():
exit()
UI.setConflictHandleFrame()
total = len(conflictFiles)
count = 0
userResponse = 'UNDEFINED'
for i in range(0,total):
print ("Process %.2f %%"%(count/total*100))
print ("Waiting for user response")
if UI!=None:
if UI.isApplicationClosed():
exit()
UI.setProgress(count/total*100)
UI.setConflictImages(conflictWithFiles[i],conflictFiles[i])
if UI.getApplySameResponseValue()==False or userResponse=='UNDEFINED':
UI.setUserResponse(value=None)
while UI.getUserResponse()==None:
sleep(0.1)
if UI.getUserResponse()==UI.REPLACE:
userResponse='REPLACE'
else:
userResponse='SKIP'
else:
# No UI available
print(conflictWithFiles[i],conflictFiles[i])
userResponse = input('Enter REPLACE or SKIP : ')
if userResponse=='REPLACE':
print("Replacing file")
copyfile(conflictFiles[i],conflictWithFiles[i])
if conflictFiles[i] not in conflictFiles[i+1:]:
os.remove(conflictFiles[i])
else:
print("Skipping this file")
count+=1
print("Finished")
def startSorting(method='knn',allowMultipleCopy=True,UI=None):
global faceRecognitionMethod
global total, files, conflictFiles, conflictWithFiles
if method=='knn':
faceRecognitionMethod=knn
elif method=='dnn':
faceRecognitionMethod=dnn
elif method=='svm':
faceRecognitionMethod=svm
files = []
conflictFiles = []
conflictWithFiles = []
for ext in IMAGE_FILES:
files.extend(glob(currentfolderpath+ext))
total = len(files)
count = 0
for filename in files:
print("Process %.2f %%"%(count/total*100))
if UI!=None:
if UI.isApplicationClosed():
exit()
UI.setProgress(count/total*100)
UI.setDisplayImage(filename)
faces = faceRecognitionMethod(filename)
imgname = filename[filename.rfind('/')+1:]
isCopied = False
for foldername in foldernames:
if foldername in faces:
# Save this image there
if len(glob(currentfolderpath+foldername+'/'+imgname))==0:
# Copy the image
copyfile(filename, currentfolderpath+foldername+'/'+imgname)
isCopied = True
else:
# File with same name already exists
conflictFiles.append(filename)
conflictWithFiles.append(currentfolderpath+foldername+'/'+imgname)
if allowMultipleCopy==False:
break
if UI!=None:
if UI.isApplicationClosed():
exit()
UI.setProgress(100)
if isCopied:
# File is copied somewhere delete the original file
os.remove(filename)
count+=1
print ("Copying finished")
print ("Handling conflicts")
handleConflictFiles(UI)
print ("Exiting Application")
if UI!=None:
if UI.isApplicationClosed():
exit()
UI.exitApplication()
exit()
exit()
print ("Done")
# To run in non UI mode
# sortingOrder = ['Shreyas']
# sortIntoFolders('/home/shreyas/Desktop/FaceRecog/Test/',sortingOrder)
# startSorting(allowMultipleCopy=False)
| true |
88646152fc095f7be7fe69485b9a484ae4d65ff9 | Python | kyleellefsen/UCI_Programming_Club | /2015_05_04_Mouse_Tracking/kyles_solution.py | UTF-8 | 3,941 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 09:04:53 2015
@author: kyle
"""
from __future__ import division
import matplotlib.pyplot as plt #this is the package that reads in images and can display them
import os
import numpy as np
os.chdir('/Users/kyle/Github/ParkerLab/FLIKA/PyFLIKA2/')
from FLIKA import *
app = QApplication(sys.argv)
initializeMainGui()
image_file_path='/Users/kyle/Github/UCI_Programming_Club/2015_spring_week5/mouse video/' #Replace this with your own path to the image directory
os.chdir(image_file_path) #now we are inside the directory which contains the images
file_names=os.listdir('.') #This creates a list containing all the files in the current directory. The dot '.' represents the current directory
images=[]
for file_name in file_names:
image=plt.imread(file_name)
image=np.mean(image,2) # There are 3 dimensions, [x,y,color]. This averages over the third dimension.
images.append(image)
images=np.array(images)
''' Now we have an array of images. The array has dimensions [t,x,y]. We can pull out the first image and play with it. '''
image=images[0]
plt.gray()
plt.imshow(image)
''' Lets find the background by taking the median image '''
background=np.median(images,0)
background_subtracted=images-background
thresholded=background_subtracted<-40
import numpy as np
import scipy
from scipy.ndimage.measurements import label
from process.file_ import open_file
from window import Window
def keepLargestBinary(image): # replace with skimage.morphology.remove_small_objects
s=scipy.ndimage.generate_binary_structure(2,2)
for i in np.arange(len(image)):
labeled_array, num_features = label(image[i], structure=s)
if num_features>1:
lbls = np.arange(1, num_features+1)
largest=0
largest_lbl=0
for lbl in lbls:
length=np.count_nonzero(labeled_array==lbl)
if length>largest:
largest=length
largest_lbl=lbl
image[i]=labeled_array==largest_lbl
return image
original=g.m.currentWindow
nFrames=len(original.image)
median=zproject(0,nFrames,'Median',keepSourceWindow=True)
original.image-=median.image
g.m.currentWindow=original
gaussian_blur(2)
threshold(-10,darkBackground=True)
image=keepLargestBinary(g.m.currentWindow.image)
Window(image)
#filename="D:/Old/Software/2014.05 motiontracking/t1_1.tif"
#original=open_file(g.m.settings['filename'])
"""
from PyQt4.QtCore import pyqtSlot as Slot
@Slot(int)
def updateTime(time):
x0,y0,x1,y1=bbox[time]
roi=g.m.currentWindow.currentROI
roi.draw_from_points([(x0,y0),(x0,y1),(x1,y1),(x1,y0),(x0,y0)])
roi.translate_done.emit()
print(time)
bbox=np.zeros((len(image),4))
for i in np.arange(len(image)):
B = np.argwhere(image[i])
bbox[i,:]=np.concatenate((B.min(0),B.max(0)))
original.imageview.sigTimeChanged.connect(updateTime)
background=np.copy(g.m.currentWindow.image)
for i in np.arange(len(background)):
x0,y0,x1,y1=bbox[i]
for j in np.arange(i,len(background)): #j is the first index where the bounding boxes don't collide
left,top,right,bottom=bbox[j]
if (left>x1 or top>y1 or bottom<y0 or right<x0): #if the bounding boxes to not intersect
break
if j==len(background)-1:
for j in np.arange(i,0,-1): #j is the first index where the bounding boxes don't collide
left,top,right,bottom=bbox[j]
if (left>x1 or top>y1 or bottom<y0 or right<x0): #if the bounding boxes to not intersect
break
background[i,x0:x1,y0:y1]=background[j,x0:x1,y0:y1]
show(background)
image=np.copy(g.m.currentWindow.image)
g.m.currentWindow.image=image-background; g.m.currentWindow.reset()
threshold(-27,darkBackground=True)
binary_dilation(2,1,1,keepSourceWindow=True)
g.m.currentWindow.image=keepLargestBinary(g.m.currentWindow.image)
g.m.currentWindow.reset()
"""
| true |
b5810cdd2ccbe1a792c9a0d1ef6f49680d684f2b | Python | gmelodie/Apresentacoes | /Aulas/Frentes/Criptografia/One-time pad/ataque_manytimepad.py | UTF-8 | 3,368 | 3.390625 | 3 | [
"MIT"
] | permissive | # deve existir, no mesmo diretório, um arquivo 'ciphertexts.txt' com os ciphertexts
# em hexadecimal separados por uma quebra de linha
# calcula a frequencia em que foram encontrados possíveis espaços em cada posição
# da string cipher1 xor cipher2
def encontraEspacos(cipher1, cipher2, frequencias):
cipher1 = bytes.fromhex(cipher1)
cipher2 = bytes.fromhex(cipher2)
c1Xc2 = xorBytes(cipher1, cipher2).hex()
for i in range(0, len(c1Xc2), 2):
numAscii = int(c1Xc2[i:i+2], 16)
if temEspaco(numAscii):
frequencias[int(i/2)] += 1
def temEspaco(numAscii):
if 65 <= numAscii <= 90 or\
97 <= numAscii <= 122 or\
numAscii == 0:
return True
else:
return False
def xorBytes(msg1, msg2):
return bytes(a^b for a, b in zip(msg1, msg2))
def calculaFrequencias(ciphertexts):
tamanhos = (int(len(msg)/2) for msg in ciphertexts)
frequencias = tuple([0]*tam for tam in tamanhos)
for cipher1, freq in zip(ciphertexts, frequencias):
for cipher2 in ciphertexts:
if cipher1 != cipher2:
encontraEspacos(cipher1, cipher2, freq)
return frequencias
def calculaChave(ciphertexts, frequencias):
tamChave = max(int(len(msg)/2) for msg in ciphertexts)
chave = [None]*tamChave
maximaFrequencia = [0]*tamChave
for cipher, freqs in zip(ciphertexts, frequencias):
cipher = bytes.fromhex(cipher)
for pos, freq in zip(range(len(freqs)), freqs):
limite = calculaLimite(ciphertexts, pos) - 1
tolerancia = int(limite/4)
if freq >= limite-tolerancia and freq > maximaFrequencia[pos]:
maximaFrequencia[pos] = freq
chave[pos] = cipher[pos]^ord(' ')
return chave
# quantidade de mensagens que possuem tamanho maior do que pos (2*pos em hexa)
def calculaLimite(ciphertexts, pos):
pos = 2*pos
return sum(1 for msg in ciphertexts if len(msg) > pos)
def calculaPlaintext(ciphertext, chave):
ciphertext = bytes.fromhex(ciphertext)
plaintext = ""
for a,b in zip(ciphertext, chave):
if b != None:
plaintext += chr(a^b)
else:
plaintext += "_"
return plaintext
def calculaPlaintexts(ciphertexts, chave):
plaintexts = []
for msg in ciphertexts:
plaintexts.append(calculaPlaintext(msg, chave))
return plaintexts
def imprimePlaintexts(ciphertexts, chave):
plaintexts = []
for msg, i in zip(ciphertexts, range(len(ciphertexts))):
plaintexts.append(calculaPlaintext(msg, chave))
print("{}.".format(i), plaintexts[-1])
return plaintexts
def imprimeLista(lista):
for item, i in zip(lista, range(len(lista))):
print("{}.".format(i), item)
def alteraChave(ciphertexts, chave, string, pos, msg):
tam = len(string)
for i in range(pos, pos+tam):
chave[i] = ord(string[i-pos])^int(ciphertexts[msg][i*2:i*2+2], 16)
def leCiphertexts(arquivo):
with open(arquivo) as f:
ciphertexts = f.readlines()
return ciphertexts
def main():
ciphertexts = leCiphertexts('ciphertexts.txt')
frequencias = calculaFrequencias(ciphertexts)
chave = calculaChave(ciphertexts, frequencias)
plaintexts = calculaPlaintexts(ciphertexts, chave)
imprimeLista(plaintexts)
if __name__ == '__main__':
main()
| true |
385cb39d8e1b2dabc4f93f602e763391ae80f743 | Python | TIM245-W16/twitter-sentiment | /PySpark.py | UTF-8 | 1,289 | 3.21875 | 3 | [] | no_license | """
Notice that this program is for PySpark which handle 1 million tweets.
1.Read train set and data set from txt files.
2.Put data set into Spark system, and transform them into RDD.
3.Run the bayse algorithm from MLlib.
"""
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint
def parseLine(line):
parts = line.split(', #')
label = float(parts[0])
features = Vectors.dense([float(x) for x in parts[1].split('#')])
return LabeledPoint(label, features)
tr1 = sc.textFile('/Users/yuanjun/Desktop/train1.txt').map(parseLine)
tr2 = sc.textFile('/Users/yuanjun/Desktop/train2.txt').map(parseLine)
tr3 = sc.textFile('/Users/yuanjun/Desktop/train3.txt').map(parseLine)
tr4 = sc.textFile('/Users/yuanjun/Desktop/train4.txt').map(parseLine)
te1 = sc.textFile('/Users/yuanjun/Desktop/test1.txt').map(parseLine)
te2 = sc.textFile('/Users/yuanjun/Desktop/test2.txt').map(parseLine)
tr1 = tr1.union(tr2)
tr3 = tr3.union(tr4)
train = tr1.union(tr3)
test = te1.union(te2)
model = NaiveBayes.train(train, 1.0)
predictionAndLabel = test.map(lambda p : (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()
print accuracy
| true |
0bef7aebedde9cab7326cdefe564c23215ff3b66 | Python | Pavel-rud/hotel_system | /code/make_and_read_documents.py | UTF-8 | 2,159 | 3.046875 | 3 | [] | no_license | from docxtpl import DocxTemplate
import xlsxwriter
import xlrd, xlwt
import csv
import pandas as pd
def admin_and_guest(surname_guest, name_guest, name_guest_father, hotel,
room, surname_admin, name_admin, name_guest_admin, date, action, location):
doc = DocxTemplate("example_document_for_admin_and_guest.docx")
context = {'surname_guest': surname_guest,
'name_guest': name_guest,
'name_guest_father': name_guest_father,
'hotel': hotel,
'room': room,
'surname_admin': surname_admin,
'name_admin': name_admin,
'name_guest_admin': name_guest_admin,
'date': date,
'action': action}
doc.render(context)
doc.save(f"{location}.docx")
def info_about_guests(guests, location):
data = []
for i in guests:
data.append({"фамилия": i[0],
"имя": i[1],
"отчество": i[2],
"дата рождения": i[3],
"пол": i[4],
"номер телефона": i[5],
"паспортные данные": i[6]})
pd.DataFrame(data).to_csv(f'{location}.csv', index=False)
def write_time(time, chance):
f = open('time_block.txt', 'w')
f.write(time + '\n')
f.write(chance)
def read_time():
f = open('time_block.txt')
first = True
lines = []
for line in f:
lines.append(line)
return lines[0][:-1], lines[1]
def read_info_about_administrators(location):
rb = xlrd.open_workbook(location)
page = rb.sheet_by_index(0)
admistrators = [page.row_values(rownum) for rownum in range(1, page.nrows)]
return admistrators
# info_about_guests([["ad", "dada", "ad", "dada", "ad", "dada", "ad", "dada"], ["dfdad", "dadaadda", "adaadd", "daddada", "addads", "dada", "ad", "dada"]], "g")
# write_time("09:55", "2")
# print(read_time())
# read_info_about_administrators()
# admin_and_guest("Rudnik", "Pavel", "Alekseevich", "grand", "7", "rey", "maks", "sergeevich", "10.20.10", "выселение") | true |
41c921d23e013c0b472403e58e783019c565b741 | Python | SAGESAlgorithm/SAGES | /util/utils.py | UTF-8 | 6,761 | 2.640625 | 3 | [] | no_license |
import torch
import numpy as np
import scipy.sparse as sp
from preprocess.normalization import fetch_normalization, row_normalize
from time import perf_counter
def construct_adj(data):
"""
:param data: pyG dataset
:return: sp.coo_matrix
"""
N = data.x.size(0)
sp_adj = sp.coo_matrix((np.ones((data.edge_index.size(1))),
(data.edge_index[0].numpy(), data.edge_index[1].numpy())), shape=(N, N))
# may be undricted
return sp_adj
def convert_undirec(adj):
"""
given adj,return adj of undricted graph, and have self-loop
:param adj: sparse adj
:return:
"""
adj = adj.tocoo()
adj = adj + adj.T + sp.eye(adj.shape[0])
adj.data = np.ones(len(adj.data))
return adj
def prepare_graph_data(adj):
"""
:param adj: saprse adj
:return:
"""
num_nodes = adj.shape[0]
adj = adj + sp.eye(num_nodes) # self-loop
#data = adj.tocoo().data
adj[adj > 0.0] = 1.0
G_torch = sparse_mx_to_torch_sparse_tensor(adj)
adj = adj.tocoo()
return G_torch, torch.LongTensor(adj.row), torch.LongTensor(adj.col)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""
Convert a scipy sparse matrix to a torch sparse tensor.
"""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def preprocess_citation(adj, features, normalization="FirstOrderGCN"):
adj_normalizer = fetch_normalization(normalization)
adj = adj_normalizer(adj)
features = row_normalize(features)
return adj, features
def lgc_process_citation(adj, features, normalization="AugNormAdj"):
"""
:param adj: sparse adj, with self-loop and undrecited
:param features: numpy.array
:param normalization:
:return:
"""
# features变为稀疏矩阵
features = sp.csr_matrix(features)
# 没看出用处,这里可能还是adj吧
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# adj是稀疏矩阵,且undirected以及有self-loop
adj, features = preprocess_citation(adj, features, normalization)
# porting to pytorch
features = torch.FloatTensor(np.array(features.todense())).float()
adj = sparse_mx_to_torch_sparse_tensor(adj).float()
return adj, features
def set_seed(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda: torch.cuda.manual_seed(seed)
def lgc_precompute(features, adj, degree):
t = perf_counter()
for i in range(degree):
features = torch.spmm(adj, features)
precompute_time = perf_counter()-t
return features, precompute_time
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def mask_test_edges(adj):
# Function to build test set with 10% positive links
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
# TODO: Clean up.
# 逻辑,首先收集所有边,然后打乱,然后选对应比例的边做测试以及验证集,然后删除上述测试边和验证边重构训练用的邻接矩阵
# Remove diagonal elements
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
import pdb
pdb.set_trace()
# 返回上三角矩阵/下三角矩阵,这样的话,不会重复
adj_triu = sp.triu(adj)
adj_tuple = sparse_to_tuple(adj_triu)
edges = adj_tuple[0]
# adj_tuple[0].shape
# (11596, 2)
edges_all = sparse_to_tuple(adj)[0]
# sparse_to_tuple(adj)[0].shape
# (23192, 2)
# edges all是边a,b和b,a都包括
num_test = int(np.floor(edges.shape[0] / 10.))
num_val = int(np.floor(edges.shape[0] / 20.))
# 这里的edges_all的个数是edges的两倍,所以num_test和num_val都是一半,论文中写的是10%用来测试,5%用来验证集
all_edge_idx = list(range(edges.shape[0]))
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges = edges[test_edge_idx]
val_edges = edges[val_edge_idx]
train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)
def ismember(a, b, tol=5):
rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)
return np.any(rows_close)
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_all):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], train_edges):
continue
if ismember([idx_j, idx_i], train_edges):
continue
if ismember([idx_i, idx_j], val_edges):
continue
if ismember([idx_j, idx_i], val_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
val_edges_false.append([idx_i, idx_j])
#assert ~ismember(test_edges_false, edges_all)
#assert ~ismember(val_edges_false, edges_all)
#assert ~ismember(val_edges, train_edges)
#assert ~ismember(test_edges, train_edges)
#assert ~ismember(val_edges, test_edges)
data = np.ones(train_edges.shape[0])
# Re-build adj matrix
adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)
adj_train = adj_train + adj_train.T
# NOTE: these edge lists only contain single direction of edge!
return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false | true |
8605bd7ee8415cc9c3cec9968ff13dc8cdb01f5e | Python | cghnassia/Roki_Rakat | /ZygBee/VirtualEnv/scripts/lireKeyboard.py | UTF-8 | 515 | 2.796875 | 3 | [] | no_license | import sys
import requests
import json
import time
infos = {}
while 1:
print("-------Creation d'une nouvelle mesure----------")
infos['nArduino'] = input("Numero Arduino : ")
infos['nSensor'] = input("Numero Sensor : ")
infos['value'] = input("Valeur : ")
infos['measureTime'] = time.time()
r=requests.post('http://localhost:6543/sensors', data=json.dumps(infos), headers={'content-type': 'application/json'})
r.raise_for_status()
print("-----Fin de creation--------\n")
#post la valeur sur l'API
| true |
74da8acaef434b21e0b5326fa07429e5df1e278c | Python | YoonSungLee/Algorithm_python | /이것이 취업을 위한 코딩테스트다 with 파이썬/DFS_BFS/인구 이동.py | UTF-8 | 1,890 | 3.046875 | 3 | [] | no_license | # Recursion Error
# input 1
# 2 20 50
# 50 30
# 20 40
# input 2
# 2 40 50
# 50 30
# 20 40
# input 3
# 2 20 50
# 50 30
# 30 40
# input 4
# 3 5 10
# 10 15 20
# 20 30 25
# 40 22 10
# input 5
# 4 10 50
# 10 100 20 90
# 80 100 60 70
# 70 20 30 40
# 50 20 100 10
from collections import deque
import copy
n, l, r = map(int, input().split())
all_map = []
for _ in range(n):
all_map.append(list(map(int, input().split())))
count = 0
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
while True:
after_map = copy.deepcopy(all_map)
visited = [[False] * n for _ in range(n)]
for i in range(n):
for j in range(n):
if visited[i][j] == False:
visited[i][j] = True
queue = deque()
population = all_map[i][j]
queue.append([i, j, all_map[i][j]])
union_city_list = [[i, j]]
while queue:
x, y, before_pop = queue.popleft()
for idx in range(4):
nx = x + dx[idx]
ny = y + dy[idx]
if nx < 0 or nx >= n or ny < 0 or ny >=n:
continue
if visited[nx][ny] == True:
continue
current_pop = all_map[nx][ny]
if l <= abs(before_pop - current_pop) <= r:
population += current_pop
union_city_list.append([nx, ny])
queue.append([nx, ny, current_pop])
visited[nx][ny] = True
mean_pop = int(population / len(union_city_list))
for x, y in union_city_list:
after_map[x][y] = mean_pop
if after_map == all_map:
break
else:
count += 1
all_map = after_map
print(count) | true |
4f1e8d2df5cc74bd39261c6130ac3774f67a061e | Python | koboyiv/ethereum-ee | /get_top_eth_account.py | UTF-8 | 3,439 | 2.640625 | 3 | [] | no_license | import asyncio
import json
import math
import time
import csv
from operator import itemgetter
import aiohttp
import requests
import web3
import numpy as np
from bs4 import BeautifulSoup
from sqlalchemy import create_engine, text
from tqdm import tqdm
import config
engine = create_engine(config.DB.ethereum(), echo=False)
async def http_v1_get(address_list,block_number):
msg_list = []
result = []
async with aiohttp.ClientSession() as session:
async with session.ws_connect("ws://x.stockradars.co:8546/") as ws:
data = json_rpc_block(block_number)
for index, address in enumerate(tqdm(address_list)):
await ws.send_str(data % (address, index))
msg_list.append(await ws.receive())
for msg in msg_list:
res = msg.json()
if 'result' in res:
result.append({
'address': address_list[int(res['id'])],
'balance': web3.Web3.toInt(hexstr=res['result'])
})
else:
print(res)
engine.execute(text("""
INSERT IGNORE INTO ethereum.balance (address, token, blockNumber, balance)
VALUES (:address, 'ETH', {}, :balance)
""".format(block_number)), result)
return result
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_address():
result = []
s = requests.Session()
for page in tqdm(range(1,11)):
r = s.get('https://etherscan.io/accounts/{}?ps=100'.format(page))
html = r.text
soup = BeautifulSoup(html, 'html.parser')
td = soup.select('td[width="330px"]')
for t in td:
result.append(t.text)
return result
def get_numblocks():
numblock = []
with open('numblocks.csv') as csv_file:
csv_reader = csv.reader(csv_file)
print(csv_reader)
for row in csv_reader:
numblock.append(row[0])
""" Convert str to int list"""
numblock = list(map(int, numblock))
return numblock
def json_rpc_block(numblock):
data_font ='{"jsonrpc": "2.0", "method": "eth_getBalance", "params": ["%s", "'
hex_block = hex(numblock)
data_back = '"], "id": %s}'
return data_font + hex_block + data_back
if __name__ == '__main__':
all_address = get_address()
numblock = get_numblocks()
numblock = np.asarray(numblock)
start = time.time()
print(start)
loop = asyncio.get_event_loop()
result = []
for i in numblock:
if i == 2:
break
else:
return_result = loop.run_until_complete(asyncio.gather(
*[http_v1_get(part_address,i) for part_address in chunks(all_address, math.ceil(len(all_address) / 4.0))]
))
for r in return_result:
result += r
toplist = sorted(result, key=itemgetter('balance'), reverse=True)
print(time.time() - start)
print("block number:", i)
#print(toplist[0:101])
result = []
loop.close()
"""
result = []
for r in return_result:
result += r
toplist = sorted(result, key=itemgetter('balance'), reverse=True)
print(toplist[0:101])
"""
| true |
af63cb9464a082e6ae07e06584aa761e0c4a4ccb | Python | Le-Stroika/skedge-suggestion-service | /generator.py | UTF-8 | 1,918 | 2.921875 | 3 | [] | no_license | def get_schedules(course_data: list) -> list:
"""get a list of courses and return back the same thing
but the option map is replaced by the actual choice and return
all of them"""
def is_conflict(c1: tuple, c2: tuple) -> bool:
return not (c1[1] < c2[0] or c2[1] < c1[0])
# ----------------------------------------
from collections import defaultdict
def enemyDicc(dislikes):
result = defaultdict(set)
for a, b in dislikes:
result[a].add(b)
result[b].add(a)
return result
def formGroups(dislikes, max_):
enemiesOf = enemyDicc(dislikes)
ppl = list(enemiesOf.keys())
# sort by most to least hated this is a heuristic
ppl = sorted(ppl, key=lambda x: len(enemiesOf[x]), reverse=True)
# you want to deal with the most problematic people first
# if you leave them for last it will be very bad
return findFrom(ppl, {}, max_, enemiesOf)
def findFrom(people, grouping, max_, enemiesOf):
if not people:
# everyone is in this grouping and we are done
return grouping
grp_ids = set(grouping.values())
# extra param for edge case at start
num_grps = max(grp_ids, default=0)
person, rest = people[0], people[1:]
# find a group that can take this person
for id_ in grp_ids:
hater_in_grp = any(enemy in grouping and grouping[enemy] == id_
for enemy in enemiesOf[person])
if not hater_in_grp:
grouping[person] = id_
next_ = findFrom(rest, grouping, max_, enemiesOf)
if next_:
return next_
else:
del grouping[person]
# if no one can, try to make a new one
if num_grps < max_:
grouping[person] = num_grps + 1
next_ = findFrom(rest, grouping, max_, enemiesOf)
if next_:
return next_
else:
del grouping[person]
return None
| true |
9a5c0f438b8cb8a8907a308feda746f86d9ff42a | Python | firdavsxon/morning_news_telegram_bot | /hnews.py | UTF-8 | 2,898 | 2.59375 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import telegram
import schedule
from threading import Thread
from time import sleep
from datetime import date
import datetime
def create_custom_hnews():
file = requests.get('https://news.ycombinator.com/').text
soup = BeautifulSoup(file, 'html.parser')
links = soup.select('.storylink')
athing = soup.select('.athing')
hnews = []
string =[]
for idx, val in enumerate(links):
title_text = links[idx].getText()
href_url = links[idx].get('href', None)
rnk = athing[idx].select('.rank')
r = int(rnk[0].getText().replace('.', ''))
hnews.append({'rank': r, 'title': title_text, 'url': href_url})
for idx, i in enumerate(range(len(hnews))):
title = hnews[i]['title']
link = hnews[i]['url']
rank = hnews[i]['rank']
string.append(f'\n {rank}. {title},\n{link}\n')
if idx == 25:
break
return ''.join(string)
# telegram bot configuration
my_token = 'your token'
msg = create_custom_hn()
chat_id = 'telegram chat id'
updater = Updater(token=my_token, use_context=True)
dispatcher = updater.dispatcher
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm a bot, please talk to me!")
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
updater.start_polling()
# inline caps for telegram channel for opening youtube, wikipedia .etc in bot chat
def inline_caps(update, context):
query = update.inline_query.query
if not query:
return
results = list()
results.append(
InlineQueryResultArticle(
id=query.upper(),
title='Caps',
input_message_content=InputTextMessageContent(query.upper())
)
)
context.bot.answer_inline_query(update.inline_query.id, results)
inline_caps_handler = InlineQueryHandler(inline_caps)
dispatcher.add_handler(inline_caps_handler)
def send(msg, chat_id, token=my_token):
"""
Send a message to a telegram user specified on chatId
chat_id must be a number!
"""
bot = telegram.Bot(token=token)
return bot.sendMessage(chat_id=chat_id, text=msg)
def date_string():
today = date.today()
date_string = today.strftime("%a, %d %B, %Y")
return date_string
send(f'Top news for {date_string()} \n'+msg, chat_id_me, my_token)
def calling():
return send(f'Top news for {date_string()} \n'+msg, chat_id_me, my_token)
def schedule_checker():
while True:
schedule.run_pending()
sleep(1)
if __name__ == "__main__":
# Create the job in schedule.
# schedule.every().hour.do(calling)
schedule.every().day.at("10:00").do(calling)
#
# Spin up a thread to run the schedule check so it doesn't block your bot.
# This will take the function schedule_checker which will check every second
# to see if the scheduled job needs to be ran.
Thread(target=schedule_checker).start()
#
# And then of course, start your server.
# server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
| true |
ca33d23c50e64390c8ea77c4af2ebe4223d458ef | Python | yangbo111111/ceshi | /project/test_function/information/unit_channel_function.py | UTF-8 | 2,109 | 2.6875 | 3 | [] | no_license | """
created: 20210201 by yb
description: 单位频道函数库
Modification History:
1、2021-02-01 yb创建了此文件
"""
from page.information.information_page import Information_page
from test_function.office_function import Office_method, Mycsv, WindowsOption
import time
class Unit_channel(object):
"""
单位频道操作函数
"""
def __init__(self, browser):
self.page = Information_page(browser)
self.office_menthod = Office_method(browser)
def star_unit_channel(self, browser):
"""
登录,进入后台/内容中心/单位频道
"""
mycsv = Mycsv()
# 登录系统
self.office_menthod.login(mycsv.read(1, 0), mycsv.read(1, 1))
# 点击头像
self.page.head_portrait.click()
# 点击进入后台
self.page.enter_base.click()
# 切换到后台
now_handle = browser.current_window_handle
change_widows = WindowsOption(browser)
change_widows.swtich(now_handle)
change_widows.closeA()
time.sleep(2)
# 点击内容中心
self.page.content_center.click()
# 点击单位频道
self.page.unit_channel.click()
def add_unit_channel(self, status):
"""
新建单位频道,status:0 点击新增 1 新增保存
"""
# 点击批量新建按钮
self.page.unit_channel_batch_add_button.click()
time.sleep(2)
# 选择默认组织
self.page.largest_organization.click()
# 点击确定按钮
self.page.unit_channel_chose_user_ok_button.click()
time.sleep(1)
if status == 1:
# 点击保存按钮
self.page.add_unit_channel_save_buton.click()
def refresh_unit_channel(self, browser):
"""
刷新页面,进入内容中心/单位频道
"""
# 刷新页面
browser.refresh()
time.sleep(1)
# 点击内容中心
self.page.content_center.click()
# 点击单位频道
self.page.unit_channel.click()
| true |
d2538dff192597eee755e379aec9bccc62ee70d0 | Python | RosaToral/algoritmos_distribuidos | /Fuentes/event.py | UTF-8 | 1,281 | 3.71875 | 4 | [] | no_license | # Este archivo contiene la implementacion de la clase Event (11.11.10)
""" Un objeto de la clase Event encapsula la informacion que se intercambia
entre las entidades activas de un sistema distribuido """
# ----------------------------------------------------------------------------------------
class Event: # Descendiente de la clase "object" (default)
""" Atributos: "name", "time", "target" y "source",
contiene tambien un constructor y los metodos que devuelven cada
uno de los atributos individuales """
def __init__(self, name, time, target, source):
""" Construye una instancia con los atributos inicializados """
self.name = name
self.time = time
self.target = target
self.source = source
def getName(self):
""" Devuelve el nombre del evento """
return (self.name)
def getTime(self):
""" Devuelve el tiempo en el que debe ocurrir el evento """
return (self.time)
def getTarget(self):
""" Devuelve la identidad del proceso al que va dirigido """
return (self.target)
def getSource(self):
""" Devuelve la identidad del proceso que origina el evento """
return (self.source)
| true |
f40c659fc3bbaa84fe8d57c55d2457df53a90d96 | Python | annaship/python-scripts | /cut_barcodes.py | UTF-8 | 3,143 | 2.8125 | 3 | [] | no_license | #! /usr/bin/env python
import IlluminaUtils.lib.fastqlib as fq
import os
import sys
import argparse
# from argparse import RawTextHelpFormatter
def get_files(walk_dir_name, ext = ""):
files = {}
filenames = []
for dirname, dirnames, filenames in os.walk(walk_dir_name, followlinks=True):
if ext:
filenames = [f for f in filenames if f.endswith(ext)]
for file_name in filenames:
full_name = os.path.join(dirname, file_name)
(file_base, file_extension) = os.path.splitext(os.path.join(dirname, file_name))
files[full_name] = (dirname, file_base, file_extension)
return files
def parse_args():
parser = argparse.ArgumentParser(description = """Cuts the first 5 characters from sequences and quality lines.
Input: gzipped fastq files.
Output: the new shortened fastq entries and a log file with original file name and the cut 5nt sequences.""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('-d', '--dir_name',
required = True, action = 'store', dest = 'start_dir',
help = """Start directory name""")
parser.add_argument("-ve", "--verbatim",
required = False, action = "store_true", dest = "is_verbatim",
help = """Print an additional information""")
parser.add_argument("-e", "--extension",
required = False, action = "store", dest = "extension",
help = """File(s) extension""")
args = parser.parse_args()
print('args = ')
print(args)
return args
def go_trhough_fastq():
barcode_log = set()
for file_name in fq_files:
if (is_verbatim):
print(file_name)
try:
f_input = fq.FastQSource(file_name, True)
f_output = fq.FastQOutput(file_name + ".out")
while f_input.next(raw = True):
e = f_input.entry
barcode_log.add("%s: %s\n" % (file_name, e.sequence[0:5]))
cut_barcodes(e, f_output)
except RuntimeError:
if (is_verbatim):
print(sys.exc_info()[0])
except:
print("Unexpected error:", sys.exc_info())
print("Check if there are no '.out' files and remove if any.")
next
print_barcode_log(barcode_log)
def cut_barcodes(e, f_output):
e.sequence = e.sequence[5:]
e.qual_scores = e.qual_scores[5:]
f_output.store_entry(e)
def print_barcode_log(barcode_log):
log_f_name = "barcode_files.log"
log_f = open(log_f_name, "w")
to_print = "".join(list(barcode_log))
log_f.write(str(to_print))
if __name__ == '__main__':
args = parse_args()
is_verbatim = args.is_verbatim
start_dir = args.start_dir
print("Start from %s" % start_dir)
# min = a if a < b else b
ext = args.extension if args.extension else "test4_2.fastq.gz"
print("Getting file names")
fq_files = get_files(start_dir, ext)
print("Found %s fastq.gz files" % (len(fq_files)))
go_trhough_fastq() | true |
03198f84a1206d9d79b9e2cc8c2f723c24496321 | Python | aasanov/Python_Classes | /3.py | UTF-8 | 432 | 3.796875 | 4 | [] | no_license | weekday = input("Please enter random number(1-7):")
weekday ==int(weekday)
if weekday == '1':
print("monday")
elif weekday =='2':
print("Tuesday")
elif weekday == '3':
print("Wednesday")
elif weekday == '4':
print("Thursday")
elif weekday == '5':
print("Friday yyyyassss")
elif weekday == '6':
print("Saturday")
elif weekday == '7':
print("Sunday yaaas")
else:
print("please enter a valid number")
| true |
674419c1d9b6cba433498219124604d890051c67 | Python | nccs-neduet/ML_on-_cuckoo_reports | /code/random_forest_for_cuckoo.py | UTF-8 | 518 | 2.703125 | 3 | [] | no_license | import pandas as pd
import csv
import os
from sklearn.ensemble import RandomForestClassifier
path = "./resources/feature_frame.csv"
# save extracted features in a CSV file
cuckoo_report_attributes = pd.read_csv(path)
print( cuckoo_report_attributes.head() )
# # Create the model with 100 trees
# model = RandomForestClassifier(n_estimators=100,
# bootstrap = True,
# max_features = 'sqrt')
# # Fit on training data
# model.fit(train, train_labels) | true |
11eadce15cb0db4a48c029e1cd09a22ef5058077 | Python | SatarupaChak/AOC_2020Challenges | /day6/day6.py | UTF-8 | 1,330 | 2.703125 | 3 | [] | no_license | uniquekey = list()
uniquekeylength = 0
somelengtg = 0
finalcnt = 0
string = ''
somelist= list()
newline =False
counterLine = 0
with open('test.txt') as input:
for x in input.readlines():
if x != '\n':
counterLine += 1
string = x.rstrip('\n')
for y in string:
if y not in uniquekey:
uniquekey.append(y)
else:
somelist.append(y)
line = 0
elif x == '\n':
if len(uniquekey) != 0 and len(somelist) == 0 and counterLine >= 2:
finalcnt += len(somelist)
elif len(uniquekey) != 0 and len(somelist) == 0 and counterLine ==1:
finalcnt += len(uniquekey)
elif len(uniquekey) != 0 and len(somelist) != 0 and counterLine >1:
if len(somelist) == 0 :
finalcnt += len(somelist)
else:
newlist = list()
for x in somelist:
if x not in newlist:
newlist.append(x)
finalcnt += len(newlist)
counterLine = 0
uniquekeylength += len(uniquekey)
somelengtge = len(uniquekey)
uniquekey.clear()
somelist.clear()
print(finalcnt) | true |
d5e32ebcf069a1b8b27302a2069ce4732c243c2a | Python | ybwork/tips | /python/new.py | UTF-8 | 36,480 | 3.390625 | 3 | [] | no_license | '''
Оглавление:
История
О языке
Среда разработки IDLE
Синтаксис
Типы данных:
Числа
Строки
Списки (list)
Кортежи (tuple)
Словари (dict)
Множества (set)
Структуры данных:
Стек
Очередь
Условные операторы if, else, elif
Циклы:
while
for
Функции
lambda функции
Замыкания
Рекурсия
Функции map, filter, reduce, enumerate, zip
Итерируемые
Итераторы
Генераторы
ООП
Исключения
Менеджеры контекста
Регулярные выражения
Метаклассы
Многопоточность
Метаклассы
'''
# История
'''
Создал голандец Гвидо ван Россум в 1991 году.
'''
# О языке
'''
Это язык общего назначения.
Python поддерживает объектно-ориентированное, функциональное, структурное, императивное и аспектно-ориентированное.
Основные черты языка это динамическая типизация, автоматическое управление памятью, поддержка многопоточных вычислений.
Python это интерпретируемый язык программирования, то есть нтерпретатор на ходу исполняет текстовый файл с кодом.
Эталонной реализацией Python является интерпретатор CPython.
В Python всё объект.
'''
# Среда разработки IDLE
'''
IDLE - среда разработки на языке Python, поставляемая вместе с дистрибутивом
'''
# Синтаксис
'''
Конец строки является концом инструкции.
Вложенные инструкции объединяются в блоки по величине отступов. Для отступов лучше использовать 4 пробела.
Иногда возможно записать несколько инструкций в одной строке, разделяя их точкой с запятой.
Допустимо записывать одну инструкцию в нескольких строках. Достаточно ее заключить в пару круглых, квадратных или фигурных скобок.
'''
# Типы данных:
# Числа
'''
Бывают целые, вещественные, комплексные.
Целые называются int, вещественные float, комплексные complex.
Вещественные числа дают неточный результат при округлении, поэтому лучше использовать встроенный модуль decimal. Ключевым компонентом для работы с числами в этом модуле является класс Decimal. При это с Decimal нельзя смешивать float, но можно int.
Комплексное число — это выражение вида a + bi, где a, b — действительные числа, а i — так называемая мнимая единица. Коплексные числа нельзя сравнить.
Для сложных операций над числами используется модель math.
Для работы с комплексными числами используется также модуль cmath.
'''
a = int('19')
b = 1
float('1.23')
f = 12.9
d = Decimal('0.1')
d + 2
c = complex(1, 2) # (1+2j)
abs(12.9 - 150) # позволяет получить модуль числа, то есть отбрасывает знак и получает абсолютную велечину (137.1)
bin(19) # преобразует в двоичную строку ('0b10011')
oct(19) # преобразует в восьмеричную строку ('0b10011')
hex(19) # преобразует в шестнадцатеричную строку ('0b10011')
round(12.9) # округляет число в меньшую сторону (137)
# Строки
'''
Не изменяемы.
Все функции и методы могут лишь создавать новую строку.
У строки очень медленно работает оператор прибавления, потому что она не изменяемая и интерпретатор удаляет обе строки и создаёт новую, поэтому лучше собрать куски строки в список, а потом сделать join.
Существует возможность использовать два варианта кавычек. Это позволяет не использовать экранирование.
При использовании сырой строки механизм экранирования отключается.
'''
r'test' # сырая строка
'test' + 'new'
'test' * 3
t = 'test'
t[0] # взятие значения по индексу (t)
a = 'testimo'
a[5:2] # mo (срез, начало не будет включенно)
a[2:2:1] # (срез с шагом 1)
'hello {}'.format('Ilya') # подставляет значение на место фигурных скобок (форматирование)
'{0} - {1}'.format('index', 1)
'{text} - {number}'.format(text='index', number=1)
'%s - %d' % ('text', 1)
len('test') # определяет длинну строки
'test'.split(',') # разбивает строчку по символу, при этом если между строками ничего нет, то будет пустая строка
parts = [i for i in 'test']
''.join(parts) # собирает элементы листа в строку
rstrip(), lstrip(), strip() # удалют пробелы в строке (справа, слева, все)
'aga4'.isalpha() # проверяет все ли символы являются буквами
'3434bcg'.isdigits() # проверяет все ли символы являются цифрами
# Списки (list)
'''
Список - это коллекция объектов произвольных типов.
Список можно изменять.
Если вставлять элемент в список сначала, то это медленно. Если в конец, то быстро.
Со списками можно делать операции извлечения, вставки, удаления, слияния, среза.
Есть такое понятие, как список списков. Это матрица.
'''
a = list('test') # ['t', 'e', 's', 't']
b = ['a', 'b', 1]
c = [i for i in 'test'] # создание с помощью генератора ['t', 'e', 's', 't']
a[START:STOP:STEP]
a[1:4:2]
b.append('g') # добавляет элемент в конец списка
b.extend(a) # расширяет список, добавляя в конец все элементы другого списка
b.insert('c', 0) # вставляет значение на место индекса, который указан вторым параметром
b.remove('t') # удаляет первый элемент в списке, имеющий значение 't'. ValueError, если такого элемента не существует
b.pop(0) # удаляет указанный элемент и возвращает его, если индекс не указан, удаляется последний элемент
b.count('e') # возвращает количество элементов со значением 'e'
def my_func(e):
return len(e)
cars = ['Ford', 'Mitsubishi', 'BMW', 'VW']
cars.sort(key=my_func) # сортирует список на основе функции my_func
b.reverse() # разворачивает список и элементы идут в обратном порядке
b.copy() # делает копию списка
b.clear() # очищает список
# Кортежи (tuple)
'''
Кортеж - это неизменяемый список.
Имеет меньший размер чем список.
Можно использовать в качестве ключа для словаря.
'''
a = tuple('a', 'b', 'c')
b = ('a',) # если не поставить запятую, то интерпретатор не поймёт, что это кортеж и тип данных будет строка
c = 'a',
a.reverse()
a.copy()
a.clear()
b.count('e')
# Словари (dictionaries)
'''
Словари - это неупорядочная коллекция произвольных объектов с доступом по ключу.
Иногда их называют ассоциативный массив или хэш-таблица.
Это изменяемый тип данных.
При изменении значения словаря оно меняется, но при этом новая пара ключ-значение не создаётся.
Перебор словаря может идти не в том порядке, как в нём лежат элементы. Это исправляет функция order dict из модуля collections.
'''
a = dict(name='ilya', surname='kaduk')
b = {'name': 'ilya', 'surname': 'kaduk'}
c = dict.fromkeys(['a', 'b'], 100) # создание словаря из ключей со значениями 100
d = {a: a for a in range(7)} # создание с помощью генератора со значениями от 0 до 6
b['name']
if 'jack' in data # проверка наличия значения в словаре
del b['name']
for key, value in data.items(): # возвращает пары (ключ, значение)
d.clear()
d.copy()
d.keys()
d.values()
d.pop('name') # удаляет ключ и возвращает значение
d.popitem() # удаляет и возвращает пару (ключ, значение), если словарь пуст, бросает исключение KeyError, помнить что словарь неупорядочен
d.items() # возвращает пары (ключ, значение)
d.setdefault('phone', 111) # возвращает значение ключа, если его нет, то создает ключ с значением default (по умолчанию None).
# Множества (set)
'''
Это контейнер, содержащий не повторяющиеся элементы в случайном порядке.
Изменяемый тип данных.
Не может быть ключом словаря.
Множества удобно использовать для удаления повторяющихся элементов.
'''
a = set('hello')
a = {'a', 'b', 'c', 'd'}
a = {i for i in range(10)}
words = ['hello', 'daddy', 'hello', 'mum']
set(words) # удалили повторяющиеся элементы
len(a)
if 'a' in a: # принадлежить ли элемент 'a' множеству a
a.copy()
a.union(b) # объединение множеств
a.add('f')
a.remove('f') # KeyError, если такого элемента не существует
a.discard('f') # удаляет элемент, если он находится в множестве
a.pop('f') # удаляет первый элемент из множества. Так как множества не упорядочены, нельзя точно сказать, какой элемент будет первым
# Замороженные множества (frozenset)
'''
Это контейнер, содержащий не повторяющиеся элементы в случайном порядке.
Неизменяемый тип данных.
'''
b = frozenset('qwerty')
len(b)
b.copy()
# Структуры данных
'''
Структуры данных - это способы хранить и организовывать данные.
Структуры данных позволяют производить 4 основных типа действий: доступ, поиск, вставку и удаление.
Структуры данных реализованы с помощью алгоритмов, алгоритмы — с помощью структур данных.
Алгоритм — последовательность совершаемых действий.
'''
# Стек
'''
Это структура данных в которой последний вошедший элемент выходит первым. (последний вошёл - первый вышел)
Стек может переполниться.
'''
stack = [3, 4, 5]
stack.append(6)
stack.pop()
# Очередь
'''
Это структура данных в которой первый вошедший элемент выходит первым.
Списки не эффективны для реализации очереди, потому что извлечение с помощью pop() из начала списка происходит медленно из-за того, что все другие элементы должны быть сдвинуты на один.
Для реализации очереди лучше использовать функцию deque из встроенного модуля collections
'''
queue = deque(["Eric", "John", "Michael"])
queue.append("Terry") # вставляет в конец очереди
queue.popleft() # Первый прибывший теперь покинул
# Условные операторы if, else, elif
'''
Условные операторы выбирают, какое действие следует выполнить, в зависимости от значения переменных в момент проверки условия.
Числа равные 0, пустые объекты и значение None это False.
'''
if a == b:
print('good')
elif a < b:
print('bad')
else:
print('normal')
if (a == 1 and b == 2 and
c == 3 and d == 4):
print('spam' * 3)
if x > y: print(x)
# while
'''
Выполняет тело цикла до тех пор, пока условие цикла истинно.
Работает медленно.
'''
i = 5
while i < 15:
print(i)
i = i + 2
if i == 9:
break # выходит из цикла
if i == 10:
continue # заканчивает текущую ветку исполнения и переходит к новой итерации цикла
else:
print('all worked') # сработает если выход из цикла произошел без помощи break
# for
'''
Этот цикл проходится по любому итерируемому объекту и во время каждого прохода выполняет тело цикла.
Работает быстрее while.
'''
for i in 'hello world':
print(i)
if i == 'c':
break # выходит из цикла
if i == 'o':
continue # заканчивает текущую ветку исполнения и переходит к новой итерации цикла
else:
print('all worked') # сработает если выход из цикла произошел без помощи break
for user in users:
print(user)
[i for i in data]
{i for i in range(10)}
for key, value in data.items():
print(key, value)
# Функции
'''
Функция в python - объект, который может принимать аргументы и возвращать значение.
Именованные аргументы должны идти позже чем порядковые.
Нельзя создавать функции с одинаковыми именами.
Если функция ничего не возвращает, то она возвращает объект None.
Если передаём изменяемые аргументы: list, dict, set в функцию и изменяем их, то их значения меняются не только внтури функции, но и снаружи.
Если передаём не изменяемые аргументы: tuple, str, float, complex, bool, int, frozenset, то значения меняются только внутри функции и создаётся новый объект.
'''
def all():
return 'all users'
def calculate(x, y):
return x + y
def get():
pass
def set(*args): # *args используется для передачи произвольного числа неименованных аргументов функции
return args
def unset(**kwargs): # **kwargs позволяет вам передавать произвольное число именованных аргументов в функцию
return kwargs
all()
calculate(1, 2)
get() # вернёт None
set(1, 2, 3)
kwargs = {"arg3": 3, "arg2": "two", "arg1": 5}
unset(**kwargs)
# Lambda функции
'''
Лямбда функции - это функции анонимные функции или функции без имени.
Анонимные функции могут содержать лишь одно выражение.
Выполняются быстрее обычных.
Не рекомендуется использовать функцию map вместе с лямбда.
'''
lambda x: return x + 1
func = lambda x: return x + 1
func(1)
(lambda x, y: x + y)(1, 2)
# Замыкание
'''
Замыкание - это функция, в теле которой присутствуют ссылки на переменные, объявленные вне тела этой функции и не являющиеся ее аргументами.
'''
def make_adder(x):
def adder(y):
return x + y # захват переменной x из внешнего контекста
return adder
make_adder = lambda x: (
lambda n: x + n
)
f = make_adder(10)
f(5) # 15
# Рекурсия
'''
Это фукнция, которая вызвает саму себя.
Обязательно нужно оставливать рекурсию, потому что она будет продолжаться бесконечно, пока не сожрёт всю память компьютера.
'''
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n - 1)
print(factorial(5))
# Функции map, filter, reduce, enumerate, zip
'''
Это функции для работы с итерируемыми.
'''
'''
map - берёт какую то функцию и применяет для каждого итерируемого, возвращает новый объект, не рекомендуется использовать функцию map вместе с Лямбда.
'''
def func(x):
return x + x
map(func, [1, 2, 3])
print(list(map(func, [1, 2, 3]))) # [2, 4, 6]
'''
filter - возвращает новый набор данных, отфильтрованный по какому то параметру.
'''
a = [1, -4, 6, 8, -10]
def func(x):
if x > 0:
return 1
else:
return 0
b = filter(func, a)
b = list(b)
print(b) # [1, 6, 8]
'''
reduce - применяет переданную функцию сначала к первым двум элементам, потом к результату первых двух элементов и третьему, потом к резльтату первых двух, третьего и четвёртого элемета и т.д.
'''
v = [0, 1, 2, 3, 4]
r = reduce(lambda x, y: x + y, v)
print(r) # 10
'''
enumerate - нумерует каждое итерируемое и возращает новый, пронумерованный объект типа enumerate.
'''
choices = ['pizza', 'pasta', 'salad', 'nachos']
list(enumerate(choices)) # [(0, 'pizza'), (1, 'pasta'), (2, 'salad'), (3, 'nachos')]
'''
zip - позволяет пройтись одновременно по нескольким итерируемым объектам, создает объект-итератор, из которого при каждом обороте цикла извлекается кортеж, состоящий из двух элементов. Первый берется из списка a, второй - из b.
'''
list(zip([1, 2], [5, 7])) # [(1, 5), (2, 7)]
# Итерируемые
'''
Итерируемые - это набор объектов, которые можно по очереди извлекать.
Итерируемым объектом является любая коллекция: список, кортеж, словарь, множество, замороженное множество.
'''
# Итератор
'''
Итератор - это объект, который позволяет двигаться по итерируемым и получать очередной элемент.
Если мы хотим получить все элементы итерируемого, то мы создаем итератор и вызываем у него функцию next пока не закончятся элементы.
Когда элементы заканчиваются выбрасывается исключение StopIteration.
Итератор должен быть итерируемым и возвращать самого себя чтобы его можно было использовать с for.
'''
items = [1, 2, 3]
item = iter(items)
next(item) # 1
next(item) # 2
next(item) # 3
# Генератор
'''
Генератор - функция, которая генерирует значения.
Могут истощаться, то есть если один раз вызвали функцию генератор и прошлись по всем элементам, то при втором вызове она вернёт пустой список.
Для решения проблемы истощения нужно обернуть генератор в список.
'''
def generator():
for i in range(3):
yield i # наличие ключевого слова yield в функции говорит, что это функция генератор при этом return в конце функции не доступен
# ООП
'''
ООП - объектное ориентированное программирование.
Основными состовляющими являются классы, объекты, методы, атрибуты (св-ва).
Класс это модель какой то сущности.
Объект это экземпляр класса.
Методы описывают действия сущности. Компьютер умеет включаться, выключаться, выполнять какие то действия. У каждого метода класса должен быть обязательный аргумент self в который передаётся экземпляр текущего класса. Это позволяет иметь доступ к другим методам и свойствам класса внутри метода.
Атрибуты (св-ва) хранят какие то дополнительные характеристики сущности. Например тип корпуса, размер оперативной памяти и т.д. Нет возможности заранее перечислить какие атрибуты будут в классе. Они создаются на лету в момент выполнения конструктора или метода. Смотря где определены.
Основные постулаты ООП это наследование, инкапсуляция, полиморфизм.
Наследование - это возможность получить в дочернем классе все атрибуты и методы класса родителя.
Инкапсуляция - это ограничение доступа к составляющим объект компонентам (методам или атрибутам).
Полиморфизм - разное поведение одного и того же метода в разных классах.
'''
class Animal():
def __init__(self): # это конструктор класса, он срабатывает автоматически при создании экземпляра класса
self.type = 'type'
self.speed = 200
def eat(self, eat):
return eat
@staticmethod # делает метод класса статическим и при его вызове не создаётся экземпляр класса
def sleep():
return 'sleep'
class Dog(Animal): # Наследование - это механизм, которые позволяет получить в одном классе св-ва и методы другого
pass
class A(B, C): # Множественное наследование
pass
class D:
def make_material(self):
return 'make_material'
class F(D):
def create(self):
material = super().make_material() # позволяет обратиться к методу класса родителя
return 'create' + material
class G:
def _show(self): # защищённый метод, который доступен в классе родителе и его наследниках
pass
def __set(self): # приватный метод, который доступен только в классе родителе
pass
# данные модификаторы действуют на уровне соглашения, потому что доступ к этим методам и св-ва возможен.
# Исключения
'''
Исключения - это тип данных необходимый для сообщения об ошибках.
При ошибке интерпретатор генерирует исключение и это тоже объект.
Исключения можно выбрасывать с помощью ключевого слова raise.
Исключение можно перехватывать с помощью try/except.
Делать ошибки своего типа это хорошая практика, потому что таким образом мы их изолируем от системных ошибок. Обычно все классы-исключения создаются в начале модуля. Собственные классы исключений лучше заканчивать словом Error.
Есть два подхода обработки ошибок:
Перед тем как прыгать, нужно посмотреть.
Пишем код в функции, а потом оборачиваем её вызов в if. Если все хорошо, то идём дальше, если нет, то выбрасываем исключение.
Проще попросить прощения, чем разрешения.
Пишем всё что мы хотим сделать, а потом перехватываем все возможные исключения. Питонисты за этот способ.
'''
def get_arg(a):
if a == 0:
raise ValueError() # выбрасываем исключение
try:
get_arg(0)
except ValueError as e:
print(e.message) # выполнится если было перехваченно исключение и выведет сообщение об ошибке
else:
print('all rigth') # выполняется в том случае если исключения не было
finally:
print('need show') # выполняется в любом случае
class MyError(Exception): # создание собственного исключения
'''
Внутри кастомного класса исключения можно использовать две функции:
__init__ для создания исключения
__str__ для вывода на экран
Но можно просто отнаследоваться от базового класса исключения и завершить свой класс оператором pass.
'''
pass
# Менеджеры контекста
'''
Менеджер контекста - это объект, который создает контекст выполнения внутри себя.
Нужны для гарантии того, что критические функции выполнятся в любом случае.
Используются для закрытия файлов и с транзакциями.
'''
# в этом случае файл в любом случае будет закрыт
with open('newfile.txt', 'w', encoding='utf-8') as g:
d = int(input())
with transaction.atomic():
do_something()
# Многопоточность
'''
Процесс – это часть виртуальной памяти и ресурсов, которую ОС выделяет для выполнения программы. Если открыть несколько экземпляров одного приложения, под каждый система выделит по процессу.
В современных браузерах за каждую вкладку может отвечать отдельный процесс.
Тяжёлый процесс делят на потоки, которые занимают меньше ресурсов и скорее доносят код до вычислителя.
У каждого приложения есть как минимум один процесс, а у каждого процесса минимум один поток, который называют главным и из которого при необходимости запускают новые.
Потоки используют память, выделенную под процесс, а процессы требуют себе отдельное место в памяти. Поэтому потоки создаются и завершаются быстрее.
Процессы работают каждый со своими данными — обмениваться чем-то они могут только через механизм межпроцессного взаимодействия. Потоки обращаются к данным и ресурсам друг друга напрямую: что изменил один — сразу доступно всем.
Если вам нужно как можно быстрее обработать большой объём данных, разбейте его на куски, которые можно обрабатывать отдельными потоками, а затем соберите результат воедино.
Многопоточность — это когда процесс приложения разбит на потоки, которые параллельно — в одну единицу времени — обрабатываются процессором.
Вычислительная нагрузка распределяется между двумя или более ядрами, так что интерфейс и другие компоненты программы не замедляют работу друг друга.
Многопоточные приложения можно запускать и на одноядерных процессорах, но тогда потоки выполняются по очереди: первый поработал, его состояние сохранили — дали поработать второму, сохранили — вернулись к первому или запустили третий.
Представьте, что несколько потоков пытаются одновременно изменить одну и ту же область данных. Чьи изменения будут в итоге приняты, а чьи — отменены? Чтобы работа с общими ресурсами не приводила к путанице, потокам нужно координировать свои действия. Для этого они обмениваются информацией с помощью сигналов. Каждый поток сообщает другим, что он сейчас делает и каких изменений ждать. Этот процесс называется синхронизацией.
Основные средства синхронизации:
Взаимоисключение - флажок, переходящий к потоку, который в данный момент имеет право работать с общими ресурсами. Исключает доступ остальных потоков к занятому участку памяти. Мьютексов в приложении может быть несколько, и они могут разделяться между процессами. Есть подвох: mutex заставляет приложение каждый раз обращаться к ядру операционной системы, что накладно.
Семафор — позволяет вам ограничить число потоков, имеющих доступ к ресурсу в конкретный момент. Так вы снизите нагрузку на процессор при выполнении кода, где есть узкие места. Проблема в том, что оптимальное число потоков зависит от машины пользователя.
Событие — вы определяете условие, при наступлении которого управление передаётся нужному потоку.
Организовать параллельные вычисления в Python без внешних библиотек можно с помощью модулей:
threading (для управления потоками)
queue (для работы с очередями)
multiprocessing (для управления процессами)
'''
# Метаклассы
'''
Это объект создающий другие объекты.
В django работа с базой данный реализованна с помощью метаклассов.
type(name, base, attrs) - самый главный метакласс, name - имя класса, bases - классы родителей, attrs - атрибуты
Класс является объектом для метакласса.
Перед тем как создастся класс он попадёт в функцию type()
Выгода метаклассов в том, что мы можем что то сделать до создания экземпляра класса. В django так сделана работа с БД. То есть в классе модель мы определяем поля нашей базы и они создаются до создания экземпляра класса модели.
'''
class TestClass(object):
pass
TestClass = type('TestClass', (), {}) # функция type позволяет создать классы на ходу | true |
d4aa25d10ea1cf248c56ab14b7c0fc31ccdc125c | Python | AmyShackles/algo-practice | /LeetCode/Easy/Python3/tests/test_longestpalindrome.py | UTF-8 | 734 | 3.125 | 3 | [] | no_license | import unittest
from Python3.longestpalindrome import Solution
class TestlongestPalindrome(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1(self):
# For sanity checking:
# Input: s = "abccccdd"
# Output: 7
self.assertEqual(Solution.longestPalindrome("abccccdd"), 7)
def test_2(self):
# For sanity checking:
# Input: s = "a"
# Output: 1
self.assertEqual(Solution.longestPalindrome("a"), 1)
def test_3(self):
# For sanity checking:
# Input: s = "bb"
# Output: 2
self.assertEqual(Solution.longestPalindrome("bb"), 2)
if __name__ == "__main__":
unittest.main()
| true |
b0f2bb89e1bbfacd95df9147d48e30934883181e | Python | ankitk50/AMS | /part3_simstudy.py | UTF-8 | 774 | 3.34375 | 3 | [] | no_license | """
This file should be used to keep all necessary code that is used for the verification section in part 3 of the
programming assignment. It contains tasks 3.2.1 and 3.2.2.
"""
def task_3_2_1():
"""
This function plots two histograms for verification of the random distributions.
One histogram is plotted for a uniform distribution, the other one for an exponential distribution.
"""
# TODO Task 3.2.1: Your code goes here
pass
def task_3_2_2():
"""
Here, we execute task 3.2.2 and print the results to the console.
The first result string keeps the results for 100s, the second one for 1000s simulation time.
"""
# TODO Task 3.2.2: Your code goes here
pass
if __name__ == '__main__':
task_3_2_1()
task_3_2_2()
| true |
36abcda20570d6ea45819978ab4dc4f53599b4aa | Python | thushanp/CodingPuzzles | /binarysearch2.py | UTF-8 | 673 | 4.25 | 4 | [] | no_license | # better binary search
# uses two indices instead of recursion so you can return the index value
# uses less memory
def binary_search(array, target):
lower = 0
upper = len(array)
while lower < upper: # use < instead of <=
x = lower + (upper - lower) // 2
val = array[x]
if target == val:
return x
elif target > val:
if lower == x: # this two are the actual lines
break # you're looking for
lower = x
elif target < val:
upper = x
print(binary_search([1,2,3,4,5,5,6,7,8,8,9,10,11,13,15,15,16,17,18,18,20,21,23,23,23,25,26], 4)) | true |
2b5cda96a12ed664203ef73b8f0919f9485b3216 | Python | amirf147/IoT_Battery_Management_System | /settings.py | UTF-8 | 5,119 | 3.25 | 3 | [] | no_license | import sys
import pandas as pd
import matplotlib.pyplot as plt
if __name__ == "__main__":
print("I prefer to be a module.")
sys.exit()
class Csv:
rawfiles_path = 'datalogs\\raw\\'
cleanfiles_path = 'datalogs\\cleaned\\'
column_names = []
def __init__(self, file_name):
self.file_name = file_name
def to_lines(self):
a_file = open(self.rawfiles_path + self.file_name, "r")
lines = a_file.readlines()
a_file.close()
self.column_names = lines[4] # holds the column names of the table
lines = lines[6:-4] # remove first 6 and last 4 lines w/ no data
return lines
def to_chars(self, lines):
# Create a list of lists of each character
divided_lines = [] # holder for list of lists
char_list = [] # used to temporarily hold chars in each line
for line_num in range(len(lines)):
for char in lines[line_num]:
char_list.append(char)
divided_lines.append(char_list)
char_list = []
return divided_lines
def fix_decimals(self, divided_lines):
# Change X,XXXX to X.XXXX in data
comma_count = 0 # num of commas tracker
for line in range(len(divided_lines)):
for char in range(len(divided_lines[line])):
if divided_lines[line][char] == ',':
comma_count += 1
if ((comma_count >= 10) and (comma_count % 2 == 0)):
#this if statement is to do with the fact that the first decimal
#point comes from the 10th occurence of a comma in the line and
#then every other instance thereafter until the end of the line
divided_lines[line][char] = '.'
comma_count = 0
# Recreate the list of lines
line = ''
lines = []
for line_num in range(len(divided_lines)):
for char in range(len(divided_lines[line_num])):
line += divided_lines[line_num][char]
lines.append(line)
line = ''
return lines
def write_to_file(self, lines, new_file_suffix = '_clean.csv'):
f = open(self.cleanfiles_path + self.file_name[:-4] + new_file_suffix,
'w+')
f.write(self.column_names) # add the column names to the first line
for line in lines:
f.write(line)
f.close()
class Dataframe:
def __init__(self, file_name, row_interval = 1):
self.file_name = 'datalogs\\cleaned\\' + file_name
self.row_interval = row_interval
def clean(self):
df = pd.read_csv(self.file_name)
df = df.rename(columns = {'Elapsed Time ': 'Elapsed Time',
' CELL01': 'Cell 1', ' CELL02': 'Cell 2',
' CELL03': 'Cell 3', ' CELL04': 'Cell 4',
' CELL05': 'Cell 5', ' CELL06': 'Cell 6',
' SOC ': 'SoC'})
df = df.iloc[1::self.row_interval] #only keep the first and every interval after
df = df.reset_index()
df = df.drop(['index'], axis = 1)
return df
def add_time_column(self, data_frame, hms):
'''call with dataframe object and specify if want time in hours
minutes or seconds by passing 'h', 'm' or 's' to hms parameter'''
times_list = [] #holder for the conversion of elapsed time to time in hours
A, B, C = 0, 0, 0 #Time conversion coefficients
if hms == 'h':
A, B, C = 1, 1 / 60, 1 / 3600
column_title = 'Time (hours)'
elif hms == 'm':
A, B, C = 60 , 1, 1 / 60
column_title = 'Time (minutes)'
elif hms == 's':
A, B, C = 3600, 60, 1
column_title = 'Time (seconds)'
else:
return 'Error: hms should be string "h" or "m" or "s"'
for row_number in range(len(data_frame)):
times_list.append(data_frame.loc[row_number, 'Elapsed Time'])
hours = int(times_list[row_number][3])
minutes = int(times_list[row_number][5:7])
seconds = int(times_list[row_number][8:10])
times_list[row_number] = (hours * A) + (minutes * B) + (seconds * C)
hours_column = pd.Series(times_list)
data_frame.insert(loc = 0, column = column_title, value = hours_column)
class Plot:
def __init__(self, title, x_values, y_values):
self.title = title
self.x_values = x_values
self.y_values = y_values
def set_options(self, df, nx = 20, ny = 20, show_plot = False):
df.plot(x = self.x_values, y = self.y_values)
plt.title(self.title)
plt.ylabel('Voltage (V)')
ax = plt.axes()
ax.xaxis.set_major_locator(plt.MaxNLocator(nx))
ax.yaxis.set_major_locator(plt.MaxNLocator(ny))
plt.minorticks_on()
plt.grid(axis = 'both', which='both', color = 'gainsboro',
linestyle = '-', linewidth = 0.7)
if show_plot:
plt.show()
| true |
5c5f8a7c1eb58d49982efc974e8e90753928c662 | Python | sjhonny03/inferential_stats_project | /q01_cond_prob/build.py | UTF-8 | 332 | 2.765625 | 3 | [] | no_license | # So that float division is by default in python 2.7
from __future__ import division
import pandas as pd
df = pd.read_csv('data/house_pricing.csv')
# Enter Code Here
def cond_prob(df):
cnt=len(df)
cnt1=len(df.loc[df['Neighborhood']=='OldTown'])
res=( (cnt1*(cnt1-1)*(cnt1-2))/ (cnt*(cnt-1)*(cnt-2)) )
return res
| true |
17e64b805f9cdf96fc7a9ca25728a60c0d947ffd | Python | slipsnip/bitesofpy | /91/anyall.py | UTF-8 | 722 | 3.6875 | 4 | [] | no_license | import re
VOWELS = 'aeiou'
PYTHON = 'python'
def contains_only_vowels(input_str):
"""Receives input string and checks if all chars are
VOWELS. Match is case insensitive."""
# use regex for this one
vowels_found = re.findall(r'[aeiou]', input_str, flags=re.I)
return len(vowels_found) == len(input_str)
def contains_any_py_chars(input_str):
"""Receives input string and checks if any of the PYTHON
chars are in it. Match is case insensitive."""
return any([True for char in input_str if char.upper() in PYTHON.upper()])
def contains_digits(input_str):
"""Receives input string and checks if it contains
one or more digits."""
return re.search(r'\d+', input_str)
| true |
cf5b39f78a07b0f9d6a063fccbb9a286178c0e02 | Python | tom-uchida/brightness-adjustment | /src/detect_quartile.py | UTF-8 | 3,998 | 3.078125 | 3 | [] | no_license | #
# 四分位数を検出するプログラム
#
import cv2, matplotlib
import numpy as np
import matplotlib.pyplot as plt
import sys
args = sys.argv
plt.style.use('seaborn-white')
from matplotlib import cycler
colors = cycler('color', ['#EE6666', '#3388BB', '#9988DD', '#EECC55', '#88BB44', '#FFBBBB'])
plt.rc('axes', facecolor='#E6E6E6', edgecolor='none', axisbelow=True, grid=False, prop_cycle=colors)
plt.rc('grid', color='w', linestyle='solid')
plt.rc('patch', edgecolor='#E6E6E6')
plt.rc('lines', linewidth=2)
# ------------------------------
# ----- Placement settings -----
# ------------------------------
fig, ax = plt.subplots(3, figsize=(9, 8)) # figsize(width, height)
fig.subplots_adjust(hspace=0.4, wspace=0.4) # interval
ax[0] = plt.subplot2grid((2,2), (0,0))
ax[1] = plt.subplot2grid((2,2), (0,1))
ax[2] = plt.subplot2grid((2,2), (1,0), colspan=2)
# ----------------------------
# ----- Read input image -----
# ----------------------------
def read_img(_img_name):
# read input image
img = cv2.imread(_img_name)
# convert color (BGR → RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
img_1 = read_img(args[1])
img_2 = read_img(args[2])
# image information(height × width × 色数)
# print("img_origin : ", img_1.shape)
# print("img_noised : ", img_2.shape)
# print("\n")
# ----------------------------
# ----- Show input image -----
# ----------------------------
def show_img(_i, _img, _img_name):
ax[_i].set_title(_img_name)
# show image
ax[_i].imshow(_img)
return
show_img(0, img_1, "Input image")
show_img(1, img_2, "Input image (LR=1)")
# -------------------------------
# ----- Convert RGB to Gray -----
# -------------------------------
img_1_gray = cv2.cvtColor(img_1, cv2.COLOR_RGB2GRAY)
img_2_gray = cv2.cvtColor(img_2, cv2.COLOR_RGB2GRAY)
# exclude pixel value == 0
img_1_gray_nonzero = img_1_gray[img_1_gray > 0]
img_2_gray_nonzero = img_2_gray[img_2_gray > 0]
print("gray_img_1 : ", img_1_gray.shape)
print("gray_img_2 : ", img_2_gray.shape)
print("\n")
# -----------------------------------------------
# ----- Get statistical data of pixel value -----
# -----------------------------------------------
from scipy import stats
def get_data_of_pixel_value(_pixel_values):
print("===== Statistical Data of Pixel Values =====")
#最大値
print ("Max : ", np.max(_pixel_values))
#最小値
print ("Min : ", np.min(_pixel_values))
#平均値
mean = np.mean(_pixel_values)
print ("Mean : ", mean)
#第1四分位
first_quater = stats.scoreatpercentile(_pixel_values, 25)
print ("1Q : ", first_quater)
#中央値
median = np.median(_pixel_values)
print ("Median : ", median)
#第3四分位
third_quater = stats.scoreatpercentile(_pixel_values, 75)
print ("3Q : ", third_quater)
#標準偏差
print ("SD : " + str(np.std(_pixel_values)))
print("\n")
return mean, first_quater, median, third_quater
mean1, first_quater1, median1, third_quater1 = get_data_of_pixel_value(img_1_gray_nonzero)
mean2, first_quater2, median2, third_quater2 = get_data_of_pixel_value(img_2_gray_nonzero)
# ----------------------
# ----- Matplotlib -----
# ----------------------
ax[2].hist(img_1_gray_nonzero.ravel(), bins=255, color='r', alpha=0.5, label="Input image")
ax[2].hist(img_2_gray_nonzero.ravel(), bins=255, color='b', alpha=0.5, label="Input image (LR=1)")
# draw line
ax[2].axvline(mean1, color='black')
ax[2].axvline(median1, color='r')
ax[2].axvline(first_quater1, color='r')
ax[2].axvline(third_quater1, color='r')
ax[2].axvline(mean2, color='black')
ax[2].axvline(median2, color='b')
ax[2].axvline(first_quater2, color='b')
ax[2].axvline(third_quater2, color='b')
ax[2].set_title("Comparative histograms", fontsize=12)
ax[2].set_xlabel("Pixel value", fontsize=12)
ax[2].set_ylabel("Number of pixels", fontsize=12)
ax[2].set_xlim([-5, 260])
ax[2].legend(fontsize=12)
plt.show() | true |
54cc975dbfe20f006b295f16161fd5647d6e7b5a | Python | ayush1301/Martingale-Betting-System-in-Blackjack | /blackjack.py | UTF-8 | 13,678 | 3.078125 | 3 | [] | no_license | import csv
import sys
import random
import matplotlib.pyplot as plt
#for efficiency, consider using lists instead of dictionaries
#for efficiency, consider addinf the indeces of all 11s in a list to make searching easier
#"Variable constants"
#number of decks of cards played with
decks = 6
#n such that (1/n) of total money represents min bet --- (Most important)
fraction = 1000
max_money = 1000
#maximum_splits_allowed = 3
num_players = 2
rounds = 1000
currency = '$'
show_plot = True
print_result = True
#'£' '₹' '€' '¥' '$'
#Other important global variables
shoe = []
players = []
game_type = None
soft_data = None
hard_data = None
split_data = None
dealer = None
decks_played = 0
#all data is a dictionary in form -- variable['player number']['dealer number']
def main():
game()
def game():
global game_type, soft_data, hard_data, split_data, dealer, rounds, decks_played
if len(sys.argv) == 1 or sys.argv[1].upper() == 'H17':
game_type = 'H17'
get_data()
elif sys.argv[1].upper() == 'S17':
game_type = 'S17'
get_data()
else:
print("Enter correct command line arguments")
sys.exit(1)
#pre-trial setup
for i in range(num_players):
players.append(Player(max_money))
dealer = Dealer()
#setup for plotting
x = [0]
y = {f'Player {i + 1}': [max_money] for i in range(len(players))}
round_num = 1
while round_num <= rounds:
#dealing two cards to each player and 1 to dealer
for player in players:
if player.paused:
continue
player.add_hand(None)
player.hands[0].deal()
dealer.hand.deal()
for player in players:
if player.paused:
continue
player.hands[0].deal()
#players playing the basic strategy
for player in players:
if player.paused:
continue
for hand in player.hands:
hand.action()
dealer.hand.action()
#calculating results of all hands
for n, player in enumerate(players):
if player.paused:
continue
for hand in player.hands:
hand.result()
if print_result:
print(f'Round {round_num}: Player {n + 1} ', end="")
hand.print_all()
if print_result:
print(f'Round {round_num}: ', end="")
dealer.hand.print_all()
#plotting updates
x.append(round_num)
for i in range(len(players)):
y[f'Player {i + 1}'].append(players[i].money_left)
#preparing for next round
for player in players:
if player.paused:
continue
player.reset()
dealer.reset()
#pausing players if they're out of money
for player in players:
if player.money_left == 0:
player.paused = True
#ending game if all players are out
if all(player.money_left == 0 for player in players):
print('All players lost')
break
#extending rounds until the player wins the current martingale game
if round_num == rounds:
for player in players:
if player.current_loss <= 0:
player.paused = True
rounds += 1
#finishing game if no players have a loss in the current martingale game
if all(player.paused for player in players):
print('Game successfully over')
break
# while loop continuation
if round_num < rounds:
round_num += 1
#Printing Final stats
print(f'Rounds played = {round_num}, Decks finished <= {decks_played}')
#Plotting graph
if show_plot:
for player, f in sorted(y.items()):
fmax = max(f)
fmax_indeces = [index for index, item in enumerate(f) if item == fmax]
fmin = min(f)
if fmin == 0:
fmin_indeces = [f.index(fmin)]
else:
fmin_indeces = [index for index, value in enumerate(f) if value == fmin]
#Critical in order [start, end, max, min]
critical = [x[0], x[-1], x[fmax_indeces[-1]], x[fmin_indeces[-1]]]
plt.plot(x, f, '-o', markevery=critical, label=player)
plt.annotate(f'({x[0]}, {f[0]:.2f})', (x[0], f[0]), ha='center', xytext=(0, 5), textcoords='offset points')
plt.annotate(f'({x[-1]}, {f[-1]:.2f})', (x[-1], f[-1]), ha='center', xytext=(0, 5), textcoords='offset points')
plt.annotate(f'({critical[2]}, {fmax:.2f})', (critical[2], fmax), ha='center', xytext=(0, 5), textcoords='offset points')
plt.annotate(f'({critical[3]}, {fmin:.2f})', (critical[3], fmin), ha='center', xytext=(0, 5), textcoords='offset points')
plt.title(f'Martingale Betting Strategy Applied to {game_type} Blackjack Game')
plt.xlabel('Number of Rounds')
plt.ylabel(f'Money in {currency} with Player')
plt.legend()
plt.show()
def get_data():
global soft_data, hard_data, split_data
soft_data = form_dict(f'{game_type} Soft.csv')
hard_data = form_dict(f'{game_type} Hard.csv')
split_data = form_dict(f'{game_type} Split.csv')
def form_dict(file_name):
file = open(file_name, encoding='utf-8-sig')
return {row['Input'] : row for row in csv.DictReader(file)}
class Player:
def __init__(self, money):
self.hands = []
self.money_left = money
self.current_loss = 0
self.min_bet = money / fraction
self.paused = False
self.dying = False
def add_hand(self, bet):
if bet == None:
bet = self.decide_bet()
self.hands.append(Hand(self, bet))
self.money_left -= bet
self.current_loss += bet
return self.hands[-1]
def decide_bet(self):
if self.current_loss < 0:
self.current_loss = 0
bet = self.current_loss + self.min_bet
if bet > self.money_left:
self.dying = True
return self.money_left
else:
return bet
def reset(self):
self.hands.clear()
self.dying = False
class Dealer(Player):
def __init__(self):
self.net = 0
self.hand = DealerHand(self)
def reset(self):
self.hand = DealerHand(self)
class Hand:
def __init__(self, player, bet):
self.bet = bet
self.player = player
self.hand_setup()
def hand_setup(self):
self.cards = []
self.aces_dealt = 0
self.type = 'hard'
self.value = 0
self.is_split = False
def deal(self):
while True:
if len(shoe) == 0:
global decks_played
decks_played += decks
for i in range(decks * 4):
shoe[len(shoe):] = [2,3,4,5,6,7,8,9,10,10,10,10,11]
else:
new_card = shoe.pop(random.randint(0, len(shoe) - 1))
if new_card == 11:
self.aces_dealt += 1
self.cards.append(new_card)
if self.aces_dealt:
self.hand_type()
self.hand_value()
break
#only for testig purposes
###############
def deal_particular_card(self, card):
self.cards.append(card)
if card == 11:
self.aces_dealt += 1
self.update()
##############
def print_all(self):
print(f'cards = {self.cards}, value = {self.value}, bet = {self.bet:.2f}{currency}, ', end="")
print(f'money_left = {self.player.money_left:.2f}{currency}, current_loss = {self.player.current_loss:.2f}{currency}')
def hand_type(self):
if self.aces_dealt:
self.type = 'soft'
else:
self.type = 'hard'
def hand_value(self):
value = sum(self.cards)
if len(self.cards) == 2 and value == 21 and not self.is_split: #len(self.player.hands) == 1
self.value = 'blackjack'
elif value <= 21:
self.value = value
else:
if self.type == 'hard':
self.value = 'bust'
else:
if self.aces_dealt:
for i in range(len(self.cards)):
if self.cards[i] == 11:
self.cards[i] = 1
self.aces_dealt -= 1
self.update()
break
def update(self):
#efficiency improvement opportunity
self.hand_type()
self.hand_value()
def response_val(self, data):
return data[str(self.value)][str(dealer.hand.value)]
def response(self, data_type):
if self.response_val(data_type) == 'D':
if self.player.money_left < self.bet: #check logic
if not self.value in range(18, 20):
self.deal()
self.action()
return
else:
return
self.player.money_left -= self.bet
self.player.current_loss += self.bet
self.bet *= 2
self.deal()
elif self.response_val(data_type) == 'H':
self.deal()
self.action()
elif self.response_val(data_type) == 'S':
pass
else:
print('Error in response')
sys.exit(3)
def split_max_check(self):
try:
if len(self.player.hands) > maximum_splits_allowed:
self.response(hard_data)
#return
else:
self.split()
self.action()
#return
except NameError:
self.split()
self.action()
def action(self):
if isinstance(self.value, str):
return
if self.is_split:
self.deal()
self.is_split = False
if len(self.cards) == 2:
if self.cards[0] == self.cards[1]:
if self.response_val(split_data) == 'Y':
#add max split error block
if self.player.money_left < self.bet: #or len(self.player.hands) > maximum_splits_allowed:
self.response(hard_data)
#return
else:
self.split_max_check()
return
elif self.cards[0] == 1 and self.cards[1] == 11:
if self.player.money_left < self.bet: #or len(self.player.hands) > maximum_splits_allowed: # check logic
self.response(hard_data)
#return
else:
self.split_max_check()
return
if self.type == 'soft':
self.response(soft_data)
elif self.type == 'hard':
self.response(hard_data)
def split(self):
new_hand = self.player.add_hand(self.bet)
new_hand.cards.append(self.cards.pop(1))
if 1 in self.cards:
self.cards[0] = 11
self.aces_dealt = 1
new_hand.aces_dealt = 1
self.update()
self.is_split = True
new_hand.is_split = True
new_hand.update()
#need to check
def result(self):
#efficiency improvement opportunity
if self.value == 'bust':
self.lose()
elif self.value == 'blackjack':
if dealer.hand.value == 'blackjack':
self.push()
else:
self.win()
elif isinstance(dealer.hand.value, str):
if dealer.hand.value == 'blackjack':
self.lose()
else:
self.win()
elif dealer.hand.value > self.value:
self.lose()
elif dealer.hand.value < self.value:
self.win()
elif dealer.hand.value == self.value:
self.push()
else:
print('Logic error in result function')
sys.exit(2)
def win(self):
if self.value == 'blackjack':
self.player.money_left += 2.5 * self.bet
self.player.current_loss -= 2.5 * self.bet
dealer.net -= 1.5 * self.bet
else:
self.player.money_left += 2 * self.bet
self.player.current_loss -= 2 * self.bet
dealer.net -= self.bet
if self.player.dying:
self.player.current_loss = 0
#add change to betting
def lose(self):
dealer.net += self.bet
def push(self):
self.player.money_left += self.bet
self.player.current_loss -= self.bet
class DealerHand(Hand):
def __init__(self, player): #think if is_split affects dealer
self.hand_setup()
self.dealer = player
def action(self):
while True:
if isinstance(self.value, str):
break
elif self.value < 17:
self.deal()
elif self.value > 17:
break
elif self.value == 17:
if self.type == 'hard':
break
else:
if game_type == 'H17':
self.deal()
else:
break
def print_all(self):
print(f'Dealer cards = {self.cards}, value = {self.value}, net = {self.dealer.net:.2f}{currency}')
main() | true |
059f9fcb4c2fb35ff0f4298abb94360df699f8b4 | Python | zackbaker/space_invaders | /projectiles/laser.py | UTF-8 | 656 | 3.21875 | 3 | [] | no_license | import os
import pygame
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.velocity = 2
dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.img = pygame.image.load(os.path.join(dirname, 'assets', img))
self.mask = pygame.mask.from_surface(self.img)
def move(self, direction):
if direction == 'up':
self.y -= self.velocity
elif direction == 'down':
self.y += self.velocity
def draw(self, window):
window.blit(self.img, (self.x, self.y))
def get_height(self):
return self.img.get_height()
| true |
58a204e9cba10b4cb53a363854cb11de8abad684 | Python | DeseanCarter/wiki-search | /test_wiki.py | UTF-8 | 1,064 | 2.625 | 3 | [] | no_license | import requests
from flask import Flask, jsonify
app = Flask(__name__)
API = ''
#GET request
@app.route('/')
def hello_world():
payload = {
"action": "query",
"format": "json",
"titles": "dog",
"prop": "info|iwlinks",
"limit": 500,
"iwprop": "url",
}
search_results = requests.get('http://en.wikipedia.org/w/api.php?', params= payload)
search_results.status_code
DATA = search_results.json()
PAGES = DATA["query"]["pages"]
links = []
new_dict = {"links":links}
for key, value in PAGES.items():
print(key)
singleurl = PAGES[key]["iwlinks"]
singleurl2 = PAGES[key]["iwlinks"][0]["url"]
print(singleurl2)
# length of point in dict
print(len(PAGES[key]))
for x in singleurl:
links.append(x["url"])
print(new_dict)
import json
return jsonify(new_dict)
import pdb; pdb.set_trace()
if __name__ == '__main__':
# run app in debug mode
app.config['SERVER_NAME'] = 'wiki-search.com:5000'
app.run(debug=True)
| true |
392abfa4c90fdb35c6057f1e617669e90ccb6e4f | Python | kteja95/CSPP1 | /cspp1-assignments/m18exam4final/assignment3/tokenize.py | UTF-8 | 652 | 4.28125 | 4 | [] | no_license | '''
Write a function to tokenize a given string and return a dictionary with the frequency of
each word
'''
import re
def tokenize(string):
'''THIS FUNCTION RETURNS THE DICTIONARY'''
dict1 = {}
text = re.sub(r'[^a-zA-Z0-9]', " ", string)
words = text.split()
for items in words:
if items in dict1:
dict1[items] += 1
else:
dict1[items] = 1
return dict1
def main():
'''THE MAIN FUNCTION HERE PASSES THE STRING TO THE FUCNTION'''
lines = int(input())
for _ in range(lines):
text = ''.join(input())
print(tokenize(text))
if __name__ == '__main__':
main()
| true |
74c05705ed2615721bacd560fb2ff803997d53c7 | Python | yyw13/wang_yuanyuan_Assignment1 | /question 5/problem 5.py | UTF-8 | 668 | 3.734375 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
start = -14 ## the start number is 10^-14
stop = -4 ## the stop number is 10^-4
## cut the total interval to n small intervals
## k is the array of every start point of small intervals
d = np.logspace(start, stop, num=100, base=10, endpoint=True)
## computer the derivative of f(x), when x = 1
x = 1
def f(d):
return (((x+d)*(x+d-1)) - (x*(x-1)))/d
plt.plot(d,f(d))
plt.ylabel('Derivative of f(x)') ## it's the plot of frequency vs chi
plt.xlabel('Delta')
plt.show()
## We found that the plot is a straight line
## the smaller the k we choose, the closer the derivative of f(x) is close to the true value | true |
9adb8ee2fa2e678540ce697e1ddd0a336d8fe262 | Python | jasdiq/Data-Science-Assignments | /Forecasting/cocacola.py | UTF-8 | 3,590 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat May 30 12:09:41 2020
@author: HP
"""
import pandas as pd
coca = pd.read_csv("C:\\Users\\HP\\Desktop\\ABubakar Files\\abu_Data_Science\Assignments\\Forecasting\\CocaCola_Sales_Rawdata.csv")
quarter =["Q1", "Q2", "Q3", "Q4"]
import numpy as np
p = coca["Quarter"][0]
p[0:3]
coca['quarter']= 0
for i in range(42):
p = coca["Quarter"][i]
coca['quarter'][i]= p[0:3]
coca.head()
q_dummies = pd.DataFrame(pd.get_dummies(coca['quarter']))
coca1 = pd.concat([coca,q_dummies],axis = 1)
coca1["t"] = np.arange(1,43)
coca1["t_squared"] = coca1["t"]*coca1["t"]
coca1.columns
coca1["log_sales"] = np.log(coca1["Sales"])
#airline1.rename(columns={"Ridership ": 'Ridership'}, inplace=True)
coca1.Sales.plot()
Train = coca1.head(30)
Test = coca1.tail(13)
# to change the index value in pandas data frame
# Test.set_index(np.arange(1,13))
####################### L I N E A R ##########################
import statsmodels.formula.api as smf
linear_model = smf.ols('Sales~t',data=Train).fit()
pred_linear = pd.Series(linear_model.predict(pd.DataFrame(Test['t'])))
rmse_linear = np.sqrt(np.mean((np.array(Test['Sales'])-np.array(pred_linear))**2))
rmse_linear
##################### Exponential ##############################
Exp = smf.ols('log_sales~t',data=Train).fit()
pred_Exp = pd.Series(Exp.predict(pd.DataFrame(Test['t'])))
rmse_Exp = np.sqrt(np.mean((np.array(Test['Sales'])-np.array(np.exp(pred_Exp)))**2))
rmse_Exp
#################### Quadratic ###############################
Quad = smf.ols('Sales~t+t_squared',data=Train).fit()
pred_Quad = pd.Series(Quad.predict(Test[["t","t_squared"]]))
rmse_Quad = np.sqrt(np.mean((np.array(Test['Sales'])-np.array(pred_Quad))**2))
rmse_Quad
################### Additive seasonality ########################
add_sea = smf.ols('Sales~Q1_+Q2_+Q3_+Q4_',data=Train).fit()
pred_add_sea = pd.Series(add_sea.predict(Test[['Q1_','Q2_','Q3_','Q4_']]))
rmse_add_sea = np.sqrt(np.mean((np.array(Test['Sales'])-np.array(pred_add_sea))**2))
rmse_add_sea
################## Additive Seasonality Quadratic ############################
add_sea_Quad = smf.ols('Sales~t+t_squared+Q1_+Q2_+Q3_+Q4_',data=Train).fit()
pred_add_sea_quad = pd.Series(add_sea_Quad.predict(Test[['Q1_','Q2_','Q3_','Q4_','t','t_squared']]))
rmse_add_sea_quad = np.sqrt(np.mean((np.array(Test['Sales'])-np.array(pred_add_sea_quad))**2))
rmse_add_sea_quad
################## Multiplicative Seasonality ##################
Mul_sea = smf.ols('log_sales~Q1_+Q2_+Q3_+Q4_',data = Train).fit()
pred_Mult_sea = pd.Series(Mul_sea.predict(Test))
rmse_Mult_sea = np.sqrt(np.mean((np.array(Test['Sales'])-np.array(np.exp(pred_Mult_sea)))**2))
rmse_Mult_sea
##################Multiplicative Additive Seasonality ###########
Mul_Add_sea = smf.ols('log_sales~t+Q1_+Q2_+Q3_+Q4_',data = Train).fit()
pred_Mult_add_sea = pd.Series(Mul_Add_sea.predict(Test))
rmse_Mult_add_sea = np.sqrt(np.mean((np.array(Test['Sales'])-np.array(np.exp(pred_Mult_add_sea)))**2))
rmse_Mult_add_sea
################## Testing #######################################
data = {"MODEL":pd.Series(["rmse_linear","rmse_Exp","rmse_Quad","rmse_add_sea","rmse_add_sea_quad","rmse_Mult_sea","rmse_Mult_add_sea"]),"RMSE_Values":pd.Series([rmse_linear,rmse_Exp,rmse_Quad,rmse_add_sea,rmse_add_sea_quad,rmse_Mult_sea,rmse_Mult_add_sea])}
table_rmse=pd.DataFrame(data)
table_rmse
# so rmse_add_sea has the least value among the models prepared so far
# Predicting new values
| true |
5754de3722c223e41c6f265f46ac60281dfb69cf | Python | xiaomengyu/test | /两种计算比较.py | UTF-8 | 158 | 3.421875 | 3 | [] | no_license | x=5
y=6
num=x*y
print('num=%d' %num)
x=5
y=6
num=x*y
print('num=',num)
print("3乘以5=%d" %(3*5))
x=5
y=6
print('x乘以y=',x*y)
| true |
6e119595e909382e80f02e5a9836efece656e3d4 | Python | mbc7867/EE4371 | /basic.py | UTF-8 | 512 | 4.375 | 4 | [] | no_license | #Basic
#Q1a Sum of squares of numbers less than n
na=int(input("Enter any integer n:"))
s=0
for i in range(1,na):
s+=i**2;
#print("Sum of squares of integers less than n is",s)
#Q1b Sum of squares of odd integers less than n
nb=int(input("Enter any integer n:"))
s=0
for i in range(1,nb,2):
s+=i**2;
#print("Sum squares of odd integers less than n is",s)
#Q2a
list_1=[]
for i in range(60,90,10):
list_1.append(i)
print(list_1)
#Q2b
list_2=[]
for i in range(4,-6,-2):
list_2.append(i)
print(list_2)
| true |
4c56749eab94b09c1dddefa3b42d5ef7975bc1e8 | Python | nBidari/Year9DesignCS4-PythonNB | /ContestQuestions/CCCJ2015/CCCJ12015.py | UTF-8 | 196 | 3.234375 | 3 | [] | no_license | m = int(input(""))
d = int(input(""))
if m != 2:
if m < 7:
print("Before")
else:
print("After")
else:
if d == 18:
print("Special")
elif d < 18:
print("Before")
else:
print("After") | true |
da722c590801da84c3f8840a80b4607a8162f22b | Python | oortj/origin | /Python/Courses/Coursera/DDA_old/archive/CalcStats.py | UTF-8 | 410 | 3.109375 | 3 | [] | no_license | import numpy as np
def calc_stats(file):
data = np.loadtxt(file, delimiter=',')
return(np.round(np.mean(data),decimals=1), np.round(np.median(data), decimals=1))
# You can use this to test your function.
# Any code inside this `if` statement will be ignored by the automarker.
if __name__ == '__main__':
# Run your `calculate_mean` function with examples:
print(calc_stats('../csv/data.csv')) | true |
18e55004e2afa25c6285c6685b90bd823ad05135 | Python | JeromeGodin/Chess | /source/animations/MoveAnimation.py | UTF-8 | 1,692 | 2.75 | 3 | [] | no_license | from source.animations.Animation import Animation
import math
class MoveAnimation(Animation):
def __init__(self, element, element_type, original_position, target_position, duration_in_frames,
target_image=None, target_image_position=None, target_image_remove_timing=0):
super().__init__(element, element_type)
self.original_position = original_position
self.target_position = target_position
self.duration = duration_in_frames
self.target_image = target_image
self.target_image_position = target_image_position if target_image_position is not None else target_position
self.target_image_remove_timing = target_image_remove_timing
self.display_target_image = self.target_image is not None
self.__frame_counter = 0
self.__pixel_per_frame = ((original_position[0] - target_position[0]) / duration_in_frames,
(original_position[1] - target_position[1]) / duration_in_frames)
def animate(self):
if self.__frame_counter < self.duration:
self.element.display_position = (self.element.display_position[0] - self.__pixel_per_frame[0],
self.element.display_position[1] - self.__pixel_per_frame[1])
if self.__frame_counter == (self.duration - self.target_image_remove_timing):
self.display_target_image = False
self.__frame_counter = self.__frame_counter + 1
else:
self.element.display_position = self.target_position
self.is_over = True
self.element.currently_animated = False
| true |
dd996b2502360c3295b7df84a9cdc018db794d92 | Python | EliottNys/IA_Abalone | /IA.py | UTF-8 | 13,175 | 3.4375 | 3 | [] | no_license | import random
import copy
#---------------------- légendes ----------------------
print
symbols = ['B', 'W']
def opponent(color): #inverse la couleur
if color == 'W':
return 'B'
return 'W'
directions = {
'NE': (-1, 0),
'SW': ( 1, 0),
'NW': (-1, -1),
'SE': ( 1, 1),
'E': ( 0, 1),
'W': ( 0, -1)
}
opposite = {
'NE': 'SW',
'SW': 'NE',
'NW': 'SE',
'SE': 'NW',
'E': 'W',
'W': 'E'
}
posValues = [ #tableau de valorisation des postions sur le plateau
[1.6, 1.6, 1.6, 1.6, 1.6, "X", "X", "X", "X"],
[1.6, 2.5, 2.5, 2.5, 2.5, 1.6, "X", "X", "X"],
[1.6, 2.5, 3.0, 3.0, 3.0, 2.5, 1.6, "X", "X"],
[1.6, 2.5, 3.0, 3.35, 3.35, 3.0, 2.5, 1.6, "X"],
[1.6, 2.5, 3.0, 3.35, 3.5, 3.35, 3.0, 2.5, 1.6],
["X", 1.6, 2.5, 3.0, 3.35, 3.35, 3.0, 2.5, 1.6],
["X", "X", 1.6, 2.5, 3.0, 3.0, 3.0, 2.5, 1.6],
["X", "X", "X", 1.6, 2.5, 2.5, 2.5, 2.5, 1.6],
["X", "X", "X", "X", 1.6, 1.6, 1.6, 1.6, 1.6]
]
#---------------------- fonctions basiques ----------------------
def count(state,letter): #compte le nombre de fois qu'un symbole apparait sur le plateau
board = state["board"]
cnt = 0
for line in board:
for elem in line:
if elem == letter:
cnt += 1
return cnt
def getStatus(state, pos): #renvoie l'etat d'une case (W, B, E ou X)
return state['board'][pos[0]][pos[1]]
def winner(state): #donne le gagnant d'un etat du jeu (ou None si il n'y en a pas)
black, white = count(state,"B"), count(state,"W")
if black < 9:
return 1
if white < 9:
return 0
return None
def gameOver(state): #renvoie si la partie est terminée pour cet état du jeu
if winner(state) is not None:
return True
return False
def score(state, color):
points = 0.0
board = state["board"]
for line in range(0,9):
for column in range(0,9):
if board[line][column] == color:
points += posValues[line][column]
return points
def utility(state): #utilité d'un noeud final
player = symbols[state["current"]]
winner = winner(state)
if player == winner:
return 25
if winner == None:
return 0
return -25
def heuristic(state): #heuristique (écart de points avec l'adversaire en fonction des positions sur le plateau)
player = state["current"]
if gameOver(state):
theWinner = winner(state)
if theWinner == player:
return 25
return -25
color = symbols[player]
res = score(state,color) - score(state,opponent(color))
return res
def positions(state, color): #renvoie une liste des positions des pions d'une certaine couleur
board = state["board"]
pos = []
for line in range(0,9):
for column in range(0,9):
if board[line][column] == color:
pos.append([line, column])
return pos
def newPos(pos, direction): #calcule une position à partir d'une position de départ et d'une direction (équivalent à addDirection() dans le code du serveur)
D = directions[direction]
return [pos[0] + D[0], pos[1] + D[1]]
def sameLine(direction1, direction2): #renvoie si deux directions sont les mêmes (peu importe le sens)
if direction1 == direction2:
return True
if direction1 == opposite[direction2]:
return True
return False
def getDirectionName(directionTuple): #transforme un tuple en direction
for dirName in directions:
if directionTuple == directions[dirName]:
return dirName
def computeAlignement(marbles): #renvoie la direction d'une ligne
D = set()
for i in range(len(marbles)-1):
direction = (marbles[i+1][0]-marbles[i][0], marbles[i+1][1]-marbles[i][1])
D.add(direction)
return getDirectionName(D.pop())
def computeAlignementSort(marbles): #renvoie la direction d'une ligne
marbles = sorted(marbles, key=lambda L: L[0]*9+L[1])
D = set()
for i in range(len(marbles)-1):
direction = (marbles[i+1][0]-marbles[i][0], marbles[i+1][1]-marbles[i][1])
if direction not in directions.values():
return None
D.add(direction)
return getDirectionName(D.pop()) if len(D) == 1 else None
def isOnBoard(pos): #renvoie si les coordonnees sont sur le plateau
l, c = pos
if min(pos) < 0:
return False
if max(pos) > 8:
return False
if abs(c-l) >= 5:
return False
return True
def Out(position, posOut): #dit si une position est en dehors du plateau
if position in posOut:
return True
if max(position) > 8 or min(position) < 0:
return True
return False
def isEmpty(state, pos): #renvoie si une case est vide
return getStatus(state, pos) == 'E'
def isFree(state, pos): #renvoie si une case est libre (inclut le dehors du plateau)
if isOnBoard(pos):
return isEmpty(state, pos)
else:
return True
#---------------------- Détermination des coups possibles ----------------------
def TwoAlign(positions): #renvoie les paires de pions qui sont alignés
lines = []
dir = {'NE': (-1, 0),'E': (0, 1),'NW': (-1, -1)}
for direction in dir:
for position1 in positions:
for position2 in positions:
if newPos(position1, direction) == position2:
lines.append([position1,position2])
return lines
def ThreeAlign(positions): #renvoie les triplets de pions qui sont alignés
lines = []
dir = {'NE': (-1, 0),'E': (0, 1),'NW': (-1, -1)}
for direction in dir:
for position1 in positions:
for position2 in positions:
for position3 in positions:
if newPos(position1, direction) == position2 and newPos(position2, direction) == position3:
lines.append([position1,position2,position3])
return lines
def MoveOne(positions, freePos): #renvoie les déplacements possibles avec un seul pion (dans des cases vides)
moves = []
for position in positions:
for direction in directions:
if newPos(position, direction) in freePos:
moves.append([[position],direction])
return moves
def MoveTwo(positions, freePos): #renvoie les déplacements possibles avec deux pions (dans une case vide)
moves = []
for position in positions:
for direction in directions:
if newPos(position[0], direction) in freePos or newPos(position[0], direction) in position:
if newPos(position[1], direction) in freePos or newPos(position[1], direction) in position:
moves.append([position,direction])
return moves
def MoveThree(positions, freePos): #renvoie les déplacements possibles avec deux pions (dans des cases vides)
moves = []
for position in positions:
for direction in directions:
if newPos(position[0], direction) in freePos or newPos(position[0], direction) in position:
if newPos(position[1], direction) in freePos or newPos(position[1], direction) in position:
if newPos(position[2], direction) in freePos or newPos(position[2], direction) in position:
moves.append([position,direction])
return moves
def TwoPushOne(pos, posAdv, freePos, posOut): #renvoie les coups possibles ou 2 pions en poussent 1
moves = []
for position in pos:
dir = computeAlignement(position)
if newPos(position[1], dir) in posAdv:
if newPos(newPos(position[1], dir), dir) in freePos or Out(newPos(newPos(position[1], dir), dir), posOut) == True:
moves.append([position,dir])
dirOpp = opposite[dir]
if newPos(position[0], dirOpp) in posAdv:
if newPos(newPos(position[0], dirOpp), dirOpp) in freePos or Out(newPos(newPos(position[0], dirOpp), dirOpp), posOut) == True:
moves.append([position,dirOpp])
return moves
def ThreePushOne(pos, posAdv, freePos, posOut): #renvoie les coups possibles ou 3 pions en poussent 1
moves = []
for position in pos:
dir = computeAlignement(position)
if newPos(position[2], dir) in posAdv:
if newPos(newPos(position[2], dir), dir) in freePos or Out(newPos(newPos(position[2], dir), dir), posOut) == True:
moves.append([position,dir])
dirOpp = opposite[dir]
if newPos(position[0], dirOpp) in posAdv:
if newPos(newPos(position[0], dirOpp), dirOpp) in freePos or Out(newPos(newPos(position[0], dirOpp), dirOpp), posOut) == True:
moves.append([position,dirOpp])
return moves
def ThreePushTwo(pos, posAdv, freePos, posOut): #renvoie les coups possibles ou 3 pions en poussent 2
moves = []
for position in pos:
dir = computeAlignement(position)
if newPos(position[2], dir) in posAdv and newPos(newPos(position[2], dir), dir) in posAdv:
if newPos(newPos(newPos(position[2], dir), dir), dir) in freePos or Out(newPos(newPos(newPos(position[2], dir), dir), dir), posOut) == True:
moves.append([position,dir])
dirOpp = opposite[dir]
if newPos(position[0], dirOpp) in posAdv and newPos(newPos(position[0], dirOpp), dirOpp) in posAdv:
if newPos(newPos(newPos(position[0], dirOpp), dirOpp), dirOpp) in freePos or Out(newPos(newPos(newPos(position[0], dirOpp), dirOpp), dirOpp), posOut) == True:
moves.append([position,dirOpp])
return moves
def moves(state): #renvoie les coups possibles
color = symbols[state["current"]]
pos = positions(state,color)
posAdv = positions(state,opponent(color))
freePos = positions(state,"E")
posOut = positions(state,"X")
res = []
one = MoveOne(pos, freePos)
random.shuffle(one)
res.extend(one)
two = MoveTwo(TwoAlign(pos), freePos)
random.shuffle(two)
res.extend(two)
three = MoveThree(ThreeAlign(pos), freePos)
random.shuffle(three)
res.extend(three)
twoone = TwoPushOne(TwoAlign(pos), posAdv, freePos, posOut)
random.shuffle(twoone)
res.extend(twoone)
threeone = ThreePushOne(ThreeAlign(pos), posAdv, freePos, posOut)
random.shuffle(threeone)
res.extend(threeone)
threetwo = ThreePushTwo(ThreeAlign(pos), posAdv, freePos, posOut)
random.shuffle(threetwo)
res.extend(threetwo)
return res
#---------------------- Applique un coup au plateau de jeu ----------------------
def moveOneMarble(state, pos, direction): #met à jour l'état après le déplacement d'un pion dans une case vide
li, ci = pos #"ligne initiale" et "colonne initiale"
ld, cd = newPos(pos, direction)
color = getStatus(state, pos)
try:
destStatus = getStatus(state, (ld, cd))
except:
destStatus = 'X'
res = copy.copy(state) #vide la case initiale
res['board'] = copy.copy(res['board'])
res['board'][li] = copy.copy(res['board'][li])
res['board'][li][ci] = 'E'
if destStatus == 'E': #remplit la case ou l'on se deplace
res['board'][ld] = copy.copy(res['board'][ld])
res['board'][ld][cd] = color
return res
def moveMarbles(state, marbles, direction): #met à jour l'état après le déplacement de plusieurs pions dans une case vide
for pos in marbles:
state = moveOneMarble(state, pos, direction)
return state
def moveMarblesTrain(state, marbles, direction): #met à jour l'état après avoir poussé des pions adverses
if direction in ['E', 'SE', 'SW']:
marbles = sorted(marbles, key=lambda L: -(L[0]*9+L[1]))
else:
marbles = sorted(marbles, key=lambda L: L[0]*9+L[1])
color = getStatus(state, marbles[0])
pos = newPos(marbles[0], direction)
toPush = []
while not isFree(state, pos):
toPush.append(pos)
pos = newPos(pos, direction)
state = moveMarbles(state, list(reversed(toPush)) + marbles, direction)
return state
def apply(state, move): #met à jour l'etat du jeu (plateau de jeu, tour...)
marbles = move['marbles']
if len(marbles) != 0:
marblesDir = computeAlignementSort(marbles)
if len(marbles) == 1:
state = moveOneMarble(state, marbles[0], move['direction'])
elif sameLine(move['direction'], marblesDir):
state = moveMarblesTrain(state, marbles, move['direction'])
else:
state = moveMarbles(state, marbles, move['direction'])
state['current'] = (state['current'] + 1) % 2
return state
#---------------------- Détermine le meilleur coup ----------------------
def NegamaxWithPruningLimitedDepth(state, depth=2, alpha=float('-inf'), beta=float('+inf')):
if gameOver(state) or depth==0:
return -heuristic(state), None
theValue, theMove = float('-inf'), None
for move in reversed(moves(state)):
mov = {"marbles":move[0],"direction":move[1]}
newState = apply(state, mov)
value, _ = NegamaxWithPruningLimitedDepth(newState, depth-1, -beta, -alpha)
if value > theValue:
theValue, theMove = value, mov
alpha = max(alpha, theValue)
if alpha >= beta:
break
return -theValue, theMove
#---------------------- Renvoie le coup ----------------------
def next(state): #renvoie le coup à jouer
_, move = NegamaxWithPruningLimitedDepth(state)
return move | true |
ca74e7bd27e4f26e95b0d48966e7fd4431cbc5a7 | Python | danglingmind/remote-file-editor | /server/host2hostFileSend.py | UTF-8 | 812 | 2.703125 | 3 | [] | no_license | #!/usr/local/bin/python3
import os
import socket
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print('No file given !!!')
sys.exit(0)
server_ip = socket.gethostname()
port = 5003
buffer_size = 1024
separator = '<SEPARATOR>'
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server_ip, port))
full_path = os.path.abspath(sys.argv[1].strip())
print(f'[+] File : {full_path}')
file_path = full_path
file_name = file_path.split('/')[-1]
file_size = os.path.getsize(file_path)
sock.send(f'{file_name}{separator}{file_path}{separator}{file_size}'.encode())
print('[+] Opening !!!')
except socket.error as e:
print(f'[+] Service is not running !!!')
| true |
3c703d3b52989f0712dcf895983e13566b19e6d4 | Python | qihualiang/PanViz | /linkNodes.py | UTF-8 | 3,037 | 2.859375 | 3 | [] | no_license | from getTop2 import getTop2
def linkNodes(unlink):
linked = dict()
while bool(unlink):
curNode, neighbors = unlink.popitem()
## two most common neighbor nodes without ties
top2c = getTop2(neighbors)
top2 = []
for node in top2c:
if node in unlink:
top2.append(node)
curPath = [curNode]
extendLeft = False
extendRight = False
if len(top2) == 1:
curPath = [top2[0], curNode]
extendLeft = True
elif len(top2) == 2:
curPath = [top2[0], curNode, top2[1]]
extendLeft = True
extendRight = True
while extendLeft:
leftNode = curPath[0]
if leftNode in unlink:
top2 = getTop2(unlink[leftNode])
unlink.pop(leftNode)
else:
top2 = []
## pick the next one to append to the left
for node in top2:
if node not in curPath:
leftNode = node
## could not find the next leftNode
if leftNode == curPath[0]:
extendLeft = False
else:
## next leftNode is unlink --> just add it and continue
if leftNode in unlink:
curPath = [leftNode] + curPath
## next leftNode is linked --> append the whole path
elif leftNode in linked:
extendPath = linked[leftNode]
linked.pop(extendPath[0])
if extendPath[-1] in linked:
linked.pop(extendPath[-1])
if leftNode == extendPath[0]:
extendPath.reverse()
curPath = extendPath + curPath
extendLeft = False
else:
extendLeft = False ##add May13
while extendRight:
rightNode = curPath[-1]
if rightNode in unlink:
top2 = getTop2(unlink[rightNode])
unlink.pop(rightNode)
else:
top2 = []
for node in top2:
if node not in curPath:
rightNode = node
if rightNode == curPath[-1]:
extendRight = False
else:
if rightNode in unlink:
curPath = curPath + [rightNode]
elif rightNode in linked:
extendPath = linked[rightNode]
linked.pop(extendPath[0])
if extendPath[-1] in linked:
linked.pop(extendPath[-1])
if rightNode == extendPath[-1]:
extendPath.reverse()
curPath = curPath + extendPath
extendRight = False
else:
extendRight = False ##add May13
linked[curPath[0]] = curPath
linked[curPath[-1]] = curPath
return linked
| true |
2a2877d19dc9eea4530c17ac2bdefe8d18d58861 | Python | athenarc/smpc-local-driver | /scripts/global_mapping.py | UTF-8 | 1,048 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
import argparse
from utils import read_json, write_json
def convert_to_type(value):
try:
return int(value)
except Exception:
try:
return float(value)
except Exception:
return value
def map_mesh_terms(args):
global_map = {}
mesh_terms = read_json(args.mesh)
for term in mesh_terms:
global_map[term['id']] = {}
counter = 0
for child in term['children']:
global_map[term['id']][child['id']] = counter
counter += 1
write_json(args.output, global_map)
def main():
parser = argparse.ArgumentParser(
description='SMPC global mapping generator')
parser.add_argument('mesh', help='File containing mesh terms with their childer. See catalogue.py --help.')
parser.add_argument('output', help='Output file (JSON)')
parser.add_argument('--version', action='version', version='%(prog)s 0.2')
args = parser.parse_args()
map_mesh_terms(args)
if __name__ == '__main__':
main()
| true |
61ea010d993a6058dc8a8bcda25dd826204791c4 | Python | ignasiusiswara0672000/Monitor_NODE | /Perhitungan.py | UTF-8 | 1,073 | 3.78125 | 4 | [] | no_license | def Pilih():
Pilihan = (input())
if Pilihan == "1":
Segitiga()
elif Pilihan == "2":
Lingkaran()
elif Pilihan == "3":
Persegi()
def Segitiga():
inAlas = float(input())
inTinggi = float(input())
inSisia = float(input())
inSisib = float(input())
inSisic = float(input())
Luas = 0.5 * inAlas * inTinggi
Keliling = inSisia + inSisib + inSisic
print("\nNilai diambil dari Node ke-1")
print("Luas Segitiga\t\t:", Luas)
print("Keliling Segitiga\t:", Keliling)
def Lingkaran():
inPhi = float(input())
inJari = float(input())
Luas = inPhi * (inJari*inJari)
Keliling = 2 * inPhi * inJari
print("\nNilai diambil dari Node ke-1")
print("Luas Lingkaran\t\t:", Luas)
print("Keliling Lingkaran\t:", Keliling)
def Persegi():
inSisi = float(input())
Luas = inSisi * inSisi
Keliling = 4 * inSisi
print("\nNilai diambil dari Node ke-1")
print("Luas Persegi\t\t:", Luas)
print("Keliling Persegi\t:", Keliling)
Pilih()
| true |
cd779e50f84d8f9ae5ab446b95ee2a57dd4a4a28 | Python | shakcho/LangScrap | /datadictionary/total_count.py | UTF-8 | 866 | 3.140625 | 3 | [] | no_license | import sys
import nltk
from nltk.util import ngrams
from nltk.tokenize import word_tokenize
import csv
'''
Requierd argument
1. Text file consists of linugstic data
Filename should be in the form of year.txt where year can be any 4 digit integr
2. N value of N-gram
N can be 1 to 4 .
'''
file_name = sys.argv[1]
gram_value = int(sys.argv[2])
year_value = file_name[:4]
def gen_ngram(data,gram_value):
ngramlist = []
ngram = ngrams(data.split(),gram_value)
if gram_value == 1:
for grams in ngram:
ngramlist.append(grams[0])
#ngramlist = word_tokenize(data)
return len(ngramlist)
f = open(file_name,'r',encoding = 'utf-8')
data = f.read()
total_count = gen_ngram(data,gram_value)
year = year_value
new_dict = {year:total_count}
with open('total_count.csv','a',encoding='utf-8') as f:
csv.writer(f).writerows((k,v) for k,v in new_dict.items())
| true |
a96cf55b843673581de7496e51f4814ae99caea4 | Python | yuecong/tools | /data_intrgraty_check_mysql.py | UTF-8 | 6,654 | 3 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python -tt
"""A tiny Python program to make mysql can write data into db with current time
Before using this tool, you need to intall mysql server, and configured db_user and db_pwd.
Also you need to inall mysql.connector, which is a python package to handle mysql.
"""
import sys
import mysql.connector
from mysql.connector import errorcode
import time
import datetime
#Connect to mysql server without indicating database name
def db_connect():
config = {
'user': 'root',
'password': 'root',
'host': '127.0.0.1',
#'database': 'test',
'raise_on_warnings': True,
}
try: cnx = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
else:
print(err)
exit(1)
return cnx
#Create Database with indicated database name
def db_create(db_cursor,db_name):
try:
db_cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(db_name))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
exit(1)
#Remove Databse with indicated database name
def db_remove(db_cursor,db_name):
try:
db_cursor.execute(
"DROP DATABASE IF EXISTS {}".format(db_name))
except mysql.connector.Error as err:
print("Failed removing database: {}".format(err))
exit(1)
#Create db tables with indicated table definition
def db_create_tables(cursor,tables):
for name, ddl in tables.iteritems():
try:
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno != errorcode.ER_TABLE_EXISTS_ERROR:
print(err.msg)
exit(1)
#Insert one record with current timestamp
def insert_one_record (cursor):
now = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
add_record = ("INSERT INTO video_records"
"(rec_time) "
"VALUES ('" + now + "')")
#print add_record
cursor.execute(add_record,'')
#check all timestamp whether there are some neiborhood timestamp are more than indicated value
def check_all_records(cursor, interval):
query = ("SELECT rec_no, rec_time FROM test.video_records ")
cursor.execute(query,'')
first_flag = 0
result_flag = 'true'
#Check whether there are some neighbor records are with the interval more than indicated values
for (rec_no, rec_time) in cursor:
if first_flag == 0:
#record the first record for later comparison
(rec_no_before,rec_time_before) = (rec_no,rec_time)
first_flag = 1
continue
time_delta =rec_time - rec_time_before
if time_delta.total_seconds()/ (rec_no - rec_no_before ) > interval :
result_flag = 'false'
print "Some timestamp is lost in rec_no:",rec_no,"Time interval is",time_delta,"Time stampe is ",rec_time
#update it for the compasison target for future comparason
(rec_no_before, rec_time_before) = (rec_no, rec_time)
if result_flag == 'true' :
print 'All records are OK.'
else :
print 'There are some data needs to be confirmed.'
#Usage print function
def print_help():
print "Usage simple data intergrity check tool."
print "--help \t print this information"
print "--startWrite [interval] \t write a timestamp for the excution time into databse continously per [interval] seconds"
print "--stopWrite \t stop writing a timestamp into database table. "
print "--reset \t Database, table and the data will be removed forcely"
print "--checkData [interval] \t Read the data from dabase to check how many data in neibourhood are with larger interval than the indicidated value. "
#update write flag
def update_write_flag (filename,flag):
write_flag = ['write',flag]
f= open (filename,'w')
f.write(':'.join(write_flag))
f.close()
#check write flag to jusitify whethere database wrting should continue or not
def check_write_flag (filename) :
f = open(filename,'r')
write_flag = f.read()
f.close()
return write_flag.split(':')[1]
#Main function
def main():
DB_NAME = 'test'
TABLES = {}
TABLES['video_records'] = (
"CREATE TABLE `video_records` ("
" `rec_no` int(11) NOT NULL AUTO_INCREMENT,"
" `rec_time` timestamp NOT NULL,"
" PRIMARY KEY (`rec_no`)"
") ENGINE=InnoDB")
#conf file to record write flag to control database wrting continue/stop
write_flag_conf_file = sys.argv[0]+'.conf'
#get command option to decide which command need to be excuted
if len(sys.argv) >= 2:
cmd = sys.argv[1]
else:
cmd = '--help'
if cmd == '--help' : # print help information
print_help()
exit(0)
#Stop the database writing..
if cmd == '--stopWrite':
update_write_flag(write_flag_conf_file,'false')
exit(0)
#Connect to mydql without indicating database name
db_cnx = db_connect()
db_cursor= db_cnx.cursor()
#Remove databse to reset test enviroment
if cmd =='--reset' :
update_write_flag(write_flag_conf_file,'false')
time.sleep(0.5)
db_remove(db_cursor,'test')
exit(0)
#Create test database if it does not exist
try:
db_cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
db_create(db_cursor,DB_NAME)
db_cnx.database = DB_NAME
else:
print(err)
exit(1)
#create tables if they do not exist
db_create_tables(db_cursor, TABLES)
#Start write data into database continously until it is stoped by --stopWrite
if cmd == '--startWrite' :
if len(sys.argv)>2 and int(sys.argv[2]) >0 :
update_write_flag(write_flag_conf_file,'true')
print ('writing data....')
while check_write_flag(write_flag_conf_file) == 'true' :
#insert one record with current timestamp continously untill write_flag is set to false by --stopWrite per [interval] seconds
insert_one_record(db_cursor)
# Make sure data is committed to the database
db_cnx.commit()
time.sleep(int(sys.argv[2]))
print('writing data is stopped.')
else : #[inerval] is missed in the command line
print('Usage --startWrite [interval]')
#check record with indicated value
if cmd == '--checkData' :
if len(sys.argv)>2 and int(sys.argv[2]) >0 :
check_all_records(db_cursor,int(sys.argv[2]))
else : #[interval] is missed in the command line
print('Usage --checkData [interval]')
#close handling
db_cursor.close()
db_cnx.close()
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| true |
40bb1afd1293977d6983f8aed7d40832974ed8f6 | Python | ortizmj12/drone | /get-drone-agent.py | UTF-8 | 1,472 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
'''
This script is used to find the Drone agent that executed a past build/deploy.
Example:
python get-drone-agent.py https://drone.example.net/SERVICE/frontend-service/173
'''
import argparse
import yaml
import os
import requests
import json
args = ''
drone_build = ''
headers = ''
DRONE_API_URL = 'https://drone.example.net/api/repos/{}/{}/builds/{}'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('drone_build',
help='URL of the Drone build',
type=str)
args = parser.parse_args()
return args
def get_token():
home = os.path.expanduser('~')
credentials_file = home + '/.credentials'
if os.path.isfile(credentials_file):
credentials = yaml.load(open(credentials_file))
token = credentials['drone']['token']
return token
else:
token = raw_input('Enter Drone Token: ')
return token
def create_api_url(url):
project = url.split('/')[3]
repo = url.split('/')[4]
build = url.split('/')[5]
api_url = DRONE_API_URL.format(project, repo, build)
return api_url
def main():
args = get_args()
token = get_token()
headers = {'Authorization': 'Bearer ' + token}
api_url = create_api_url(args.drone_build)
r = requests.get(api_url, headers=headers)
data = r.json()
print('Build agent: ' + data['procs'][0]['machine'])
if __name__ == '__main__':
main()
| true |
9d92ca819acf59e070bb27da06d5f21c20554ea4 | Python | tking8994/my-isc-work | /dictionary1.py | UTF-8 | 222 | 2.828125 | 3 | [] | no_license | band=["mel","geri","victoria","mel","emma"]
counts={}
for member in band:
if member not in counts:
counts[member]=1
else:
counts[member]+=1
for member in counts:
print(member, counts[member])
| true |
b0560b35ea954023d743915485fd8ca932e78606 | Python | hajlaouiyakin/GP_DRF | /models/DRF.py | UTF-8 | 5,704 | 2.515625 | 3 | [] | no_license | import torch
import numpy as np
from torch.autograd import Variable
from torch import nn
import utils as ut
class DRF(nn.Module):
def __init__(self, n_features, n_outputs,
kernel_name="rbf", n_layers=1, n_rff=10, df=10):
super(DRF, self).__init__()
d_in = n_features
d_out = n_outputs
dtype = torch.FloatTensor
if kernel_name == "rbf":
self.K = ut.rbf
self.n_layers = n_layers
n_rff = n_rff
self.n_rff = n_rff * np.ones(n_layers, dtype = np.int64)
self.df = df * np.ones(n_layers, dtype=np.int64)
self.d_in = np.concatenate([[d_in], self.df[:(n_layers - 1)]])
self.d_out = self.n_rff
self.dhat_in = self.n_rff * 2
self.dhat_out = np.concatenate([self.df[:-1], [d_out]])
# SET UP PARAMETERS
self.theta_logsigma = Variable(torch.randn(self.n_layers).type(dtype),
requires_grad=True)
self.theta_loglength = Variable(torch.ones(self.n_layers).type(dtype),
requires_grad=False)
self.W_mean_prior = [Variable(torch.zeros(self.dhat_in[i], self.dhat_out[i]),
requires_grad=False) for i in range(self.n_layers)]
self.W_mean= [Variable(torch.zeros(self.dhat_in[i], self.dhat_out[i]),
requires_grad=True) for i in range(self.n_layers)]
self.W_logsigma_prior = [Variable(torch.zeros(self.dhat_in[i], self.dhat_out[i]),
requires_grad=False) for i in range(self.n_layers)]
self.W_logsigma = [Variable(torch.zeros(self.dhat_in[i], self.dhat_out[i]),
requires_grad=True) for i in range(self.n_layers)]
self.Omega_mean_prior = [Variable(torch.zeros(self.d_in[i], self.d_out[i]),
requires_grad=False) for i in range(self.n_layers)]
self.Omega_mean= [Variable(torch.zeros(self.d_in[i], self.d_out[i]),
requires_grad=False) for i in range(self.n_layers)]
self.Omega_logsigma_prior = [self.theta_loglength[i].expand(self.d_in[i], self.d_out[i]) * Variable(-2. * torch.ones(self.d_in[i], self.d_out[i]), requires_grad=False) for i in range(self.n_layers)]
self.Omega_logsigma = [Variable(
self.Omega_logsigma_prior[i].data.clone(), requires_grad=True) for i in range(self.n_layers)]
self.Omega_eps = [Variable(torch.randn(self.d_in[i], self.d_out[i]),
requires_grad=False) for i in range(self.n_layers)]
self.W_eps = [Variable(torch.randn(self.dhat_in[i], self.dhat_out[i]),
requires_grad=False) for i in range(self.n_layers)]
def forward(self, x):
F = x
N = x.size(0)
for i in range(self.n_layers):
N_rf = self.n_rff[i]
# TODO: FeedForward Approach - add X input at each hiden layer
# OMEGA - Reparametrization (Section 3.3 in [1])
Omega_approx = (self.Omega_mean[i] +
torch.exp(self.Omega_logsigma[i] / 2.) *
self.Omega_eps[i])
# Equation 6 in [1]
phi_half = torch.mm(F, Omega_approx)
phi = torch.exp(0.5 * self.theta_logsigma[i]).expand(N, N_rf)
phi = phi / np.sqrt(1. * N_rf)
A = phi * torch.cos(phi_half)
B = phi * torch.sin(phi_half)
phi = torch.cat([A,B], 1)
# W - Reparametrization (Equation 12 in [1])
W_approx = (self.W_mean[i] +
torch.exp(self.W_logsigma[i] / 2.) *
self.W_eps[i])
# First line under Equation 6 in [1]
F = torch.mm(phi, W_approx)
return F
def compute_objective(self, X, y):
F = self.forward(X)
# SOFTMAX LOSS
y_logprob = nn.LogSoftmax()(F)
loss = nn.NLLLoss()(y_logprob, y)
# ADD KLL regularization on the parameters
for i in range(self.n_layers):
# REGULARIZE W with KLL
loss += 0.001 * ut.KL_diagLog(self.W_mean[i], self.W_mean_prior[i], self.W_logsigma[i], self.W_logsigma_prior[i])
#
# REGULARIZE Omega with KLL
loss += 0.001 * ut.KL_diagLog(self.Omega_mean[i], self.Omega_mean_prior[i],
self.Omega_logsigma[i],
self.Omega_logsigma_prior[i])
return loss, torch.exp(y_logprob)
def fit(self, X, y, verbose=0, lr=1e-3, epochs=100):
for i in range(epochs):
loss, y_prob = self.compute_objective(X, y)
loss.backward()
print "\n%d - loss: %.3f" % (i, loss.data[0])
acc = np.mean(np.argmax(ut.var2numpy(y_prob), 1) == ut.var2numpy(y))
print "%d - acc: %.3f" % (i, acc)
self.theta_logsigma.data -= lr * self.theta_logsigma.grad.data
self.theta_logsigma.grad.data.zero_()
# self.theta_loglength.data -= lr * self.theta_loglength.grad.data
# self.theta_loglength.grad.data.zero_()
for i in range(self.n_layers):
self.W_mean[i].data -= lr * self.W_mean[i].grad.data
self.W_mean[i].grad.data.zero_()
self.W_logsigma[i].data -= lr * self.W_logsigma[i].grad.data
self.W_logsigma[i].grad.data.zero_()
print y_prob
print "%d - loss: %.3f" % (i, loss.data[0])
print "%d - acc: %.3f" % (i, acc)
# [1] Random Feature Expansions for Deep Gaussian Processes | true |
927e75f0bbc64b16c1610cfbe86f55bc7b584142 | Python | smuazzam21/python | /input.py | UTF-8 | 713 | 4.1875 | 4 | [] | no_license | # WAP in python
# =========================
# read a student name
# read marks in 3 subjects
# Calculate sum of 3 subjects and average of 3 subjects
# using single print() - display name , subject marks
# total and average
# dont use \n
name = input("Enter student's name : ")
sub1 = float(input("Enter marks in Subject 1: "))
sub2 = float(input("Enter marks in Subject 2: "))
sub3 = float(input("Enter marks in Subject 3: "))
total = sub1 + sub2 + sub3
average = total / 3
print(''' -----------------------
Name of the student : {}
Marks in Subject 1 : {}
Marks in Subject 2 : {}
Marks in Subject 3 : {}
-----------------------------
Total Marks :{}
Average : {}'''.format(name,sub1,sub2,sub3,total,average)) | true |
55c60fbb41f6861a42f80da8a9ec53d3d6f8acca | Python | potgieterbt/sms2 | /Untitled_Folder/Untitled.py | UTF-8 | 470 | 3.34375 | 3 | [] | no_license | from tkinter import *
master = Tk()
Label(master, text="This is a test").grid(row=0, column=0)
mytext1 = Text(master, width=30,height=5)
mytext1.grid(row=1, column=0, sticky="nsew")
mytext2 = Text(master, width=30,height=5)
mytext2.grid(row=2, column=0, sticky="nsew")
master.columnconfigure(0, weight=1)
master.rowconfigure(0, weight=0) # not needed, this is the default behavior
master.rowconfigure(1, weight=1)
master.rowconfigure(2, weight=1)
master.mainloop() | true |
f640b7ec71c23dddd4de8beecc7724667d1ec3e2 | Python | unaigarciarena/Discrete | /Escritorio/MultilabelNN/multiLabel2.0.py | UTF-8 | 3,885 | 2.984375 | 3 | [] | no_license | '''
A logistic regression learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import numpy as np
import tensorflow as tf
from sklearn.model_selection import KFold
import os
import warnings
import shutil
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# Import MNIST data
warnings.filterwarnings("ignore")
tf.logging.set_verbosity(tf.logging.ERROR)
def batch(x, y, size, i):
"""
:param x: 2darray containing all rows and features in the data
:param y: 2darray with the corresponding labels
:param size: size of the batch to be given
:param i: last index used
:return: new last index used, x_batch and y_batch of size size.
"""
if i + size > x.shape[0]:
index = i + size-x.shape[0]
return index, np.concatenate((x[i:,:], x[:index,:])), np.concatenate((y[i:], y[:index]))
else:
index = i+size
return index, x[i:index,:], y[i:index]
LOGDIR = "Board/" # Folder where log is stored
# Remove previous log.
if os.path.exists(LOGDIR):
shutil.rmtree(LOGDIR)
#
data = np.loadtxt("emotions.dat", delimiter=",") # Load data
# Initialize tensors
x = tf.placeholder(tf.float32, [None, 72], name="x") # x will contain the data batches
y = tf.placeholder(tf.float32, [None, 6], name ="y") # y will contain the label batches
w = tf.Variable(tf.truncated_normal([72, 6]), name="w") # w will contain the weights
b = tf.Variable(tf.truncated_normal([6]), name="b") # b will contain the biases
logit = tf.matmul(x, w) + b # logit function
aux = tf.map_fn(lambda arg: tf.map_fn(lambda arg1: tf.cond(tf.less(0.3, arg1), lambda: tf.constant(1, dtype=tf.float32, name='one'), lambda: tf.constant(0, dtype=tf.float32, name='one')), arg), logit)
hamming_loss = tf.divide(tf.divide(tf.reduce_sum(tf.abs(tf.subtract(aux, y))), tf.cast(tf.shape(y)[0], tf.float32)), tf.cast(tf.shape(y)[0], tf.float32))
tf.summary.scalar("Hamming", hamming_loss)
# Minimize error using cross entropy
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=y))
# Values to be logged
tf.summary.histogram("Weights", w)
tf.summary.histogram("Biases", b)
tf.summary.histogram("Logit", logit)
tf.summary.scalar("XEntropy", cost)
# Gradient Descent
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
summ = tf.summary.merge_all()
fold = 0
index = 0
# Launch the graph
sess = tf.Session()
saver = tf.train.Saver()
sess.run(init)
learning_rate = 0.01
training_epochs = 20000
batch_size = 100
display_step = 100
saver = tf.train.Saver()
for x1, y1 in KFold(n_splits=5).split(data[:,-6:], data[:,:-6]):
train_labels = data[x1,-6:]
train_features = data[x1,:-6]
test_labels = data[y1,-6:]
test_features = data[y1,:-6]
if fold == 0:
writer = tf.summary.FileWriter(LOGDIR + "Graph")
writer.add_graph(sess.graph)
writer = tf.summary.FileWriter(LOGDIR + "Fold" + str(fold))
for epoch in range(training_epochs):
avg_cost = 0.
index, batch_xs, batch_ys = batch(train_features, train_labels, batch_size, index)
_, c, weights, bias = sess.run([optimizer, cost, w, b], feed_dict={x: batch_xs, y: batch_ys})
avg_cost += c / batch_size
if epoch % display_step == 0:
print("Epoch:", '%04d' % epoch, "cost =", "{:.9f}".format(avg_cost))
s = sess.run(summ, feed_dict={x: batch_xs, y: batch_ys, w: weights, b: bias})
writer.add_summary(s, epoch)
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), epoch)
fold += 1
print("Optimization Finished!")
print("Hamming loss: ", hamming_loss.eval({x: test_features, y: test_labels}, session=sess))
| true |
18a2919f5ffdea5a87f76522f5167934ec8d3727 | Python | SaItFish/PySundries | /algorithm_questions/LeetCode/剑指Offer/09用两个栈实现队列.py | UTF-8 | 1,273 | 4.03125 | 4 | [] | no_license | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author: SaltFish
# @file: 09用两个栈实现队列.py
# @date: 2020/06/28
"""
用两个栈实现一个队列。队列的声明如下,请实现它的两个函数 appendTail 和 deleteHead ,分别完成在队列尾部插入整数和在队列头部删除整数的功能。(若队列中没有元素,deleteHead 操作返回 -1 )
示例 1:
输入:
执行的方法:["CQueue","appendTail","deleteHead","deleteHead"]
使用的参数:[[],[3],[],[]]
输出:[null,null,3,-1]
示例 2:
输入:
执行的方法:["CQueue","deleteHead","appendTail","appendTail","deleteHead","deleteHead"]
使用的参数:[[],[],[5],[2],[],[]]
输出:[null,-1,null,null,5,2]
"""
class CQueue:
def __init__(self):
self.A, self.B = [], []
def appendTail(self, value: int) -> None:
self.A.append(value)
def deleteHead(self) -> int:
if self.B: # 当B中仍有已经倒序的元素,B顶端仍为最先放入的元素,弹出B顶端元素
return self.B.pop()
if not self.A: # A, B都没有元素,返回-1
return -1
while self.A: # 将A内的元素倒入B中
self.B.append(self.A.pop())
return self.B.pop()
| true |
04f5d094914bc853430f8146ab96093c2a46f584 | Python | abeersalam/Algorithms--TSP-and-Longest-Common-Substring | /Version 1/Python Files/TSP/tsp.py | UTF-8 | 2,501 | 3.515625 | 4 | [] | no_license | #Brian Mitzel 893038547
#Sorasit Wanichpan 897260477
#Abeer Salam 899423594
#CPSC 335
#Project 2 v1
#python3 tsp.py <weighted_graph.xml.zip>
import sys
import time
import tsplib
import candidate
#TSP Algorithm from the lecture notes, modified to use lazy permutation over eager
def tsp_algo(weighted_graph):
best = None
#Generates all the permutations of the list
factory = candidate.PermutationFactory(list(range(0,weighted_graph.vertex_count())))
#Find the best candidate if one exists
while factory.has_next(): #Iterates through n! lazily, eager resulted in memory error
perm = factory.next() #Next permutation
cycle = perm + [perm[0]] #Path of distinct vertices/closed off by duplicating first vertex
if verify_tsp(weighted_graph, cycle): #Verifier
if best is None or cycle_weight(weighted_graph, cycle) < cycle_weight(weighted_graph, best):
best = cycle
#return the best Hamiltonian cycle candidate
return best
#From the lecture notes
def cycle_weight(graph, cycle):
total = 0
for i in range(len(cycle)-1):
total += graph.distance(cycle[i], cycle[i+1])
return total
#From the lecture notes
def verify_tsp(graph, cycle):
for i in range(len(cycle)-1):
if not graph.is_edge(cycle[i], cycle[i+1]):
return False
return True
def main():
#Verify the correct number of command line arguments were used
if len(sys.argv) != 2:
print('error: you must supply exactly one arguments\n\n' +
'usage: python3 tsp.py <weighted_graph.xml.zip file>')
sys.exit(1)
#Capture the command line arguments
weighted_graph = sys.argv[1]
print('TSP Instance:') #Get file input
tspfile = tsplib.load(weighted_graph) #Load the TSP file based upon user input
print("n = ", tspfile.vertex_count()) #Print out the # of vertices
start = time.perf_counter() #Get beginning time
result = tsp_algo(tspfile) #Find the Hamiltonian cycle of minimum total weight
end = time.perf_counter() #Get end time
cost = cycle_weight(tspfile, result) #Compute the cost
#Prints out the result
print('Elapsed time: ' + str(end - start))
print('Optimal Cycle: ' + str(result))
print('Optimal Cost: ' + str(cost))
if __name__ == "__main__":
main() | true |
e4099f2e17082774318a4ecf9b14d4b3c64a2260 | Python | Samih754/NSI-CS-2021-2022 | /skool/turtle/7.py | UTF-8 | 298 | 4.34375 | 4 | [] | no_license | #creates a function to make the square.
import turtle
import math
#start up turtle
turtle.shape("turtle")
turtle.screensize(1000, 1000)
turtle.reset()
turtle.clear()
def square(size):
for i in range(0, 4):
turtle.forward(size)
turtle.right(90)
square(30)
turtle.mainloop()
| true |
cb53eaecdd241c0c632f64e9a0e776715095416f | Python | npkhanhh/codeforces | /python/1216D.py | UTF-8 | 191 | 2.890625 | 3 | [] | no_license | import math
n = int(input())
a = list(map(int, input().split()))
m = max(a)
z = m - a[0]
y = 0
for i in a[1:]:
z = math.gcd(z, (m-i))
for i in a:
y += (m - i) // z
print(*[y, z])
| true |
f4d698f38bff5bc06529b9881a3d5cdd96493401 | Python | ikaroswyc/NLTK | /NLTK_Chapter3b_practice.py | UTF-8 | 4,552 | 3.453125 | 3 | [] | no_license | from __future__ import division
import nltk, re, pprint
from urllib.request import urlopen
from nltk.book import *
from nltk.corpus import gutenberg
from nltk.corpus import brown
from nltk.corpus import wordnet as wn
SimpleText = 'One day, his horse ran away. The neighbors came to express their concern: "Oh, that\'s too bad. How are you going to work the fields now?" The farmer replied: "Good thing, Bad thing, Who knows?" In a few days, his horse came back and brought another horse with her. Now, the neighbors were glad: "Oh, how lucky! Now you can do twice as much work as before!" The farmer replied: "Good thing, Bad thing, Who knows?"'
def exercise1():
# 1. Wh-words in English are used in questions, relative clauses and exclamations. Consider the set of wh-words to consist exactly of the following members: what, where, when, which, who, whom, whose, why.
# a) Report how many wh-words occurred in text2.
# b) Repeat the exercise for text7. Report how many wh-words occurred in text 7.
size2 = len([w.lower() for w in text2 if re.findall(r'what|where|when|which|who|whom|whose|why', w)])
print("text2: ", size2)
def avg_letter(text,category):
word_num = len(text.words(categories = category))
smash_text = ''.join(text.words(categories = category))
len_smash = len(smash_text)
avg = len_smash / word_num
return avg
def avg_word(text, category):
sent_num = len(text.sents(categories = category))
word_num = len(text.words(categories = category))
avg = word_num / sent_num
return avg
def ari(text, category):
uw = avg_letter(text,category)
us = avg_word(text,category)
ari = (4.71 * uw) + (0.5 * us) - 21.43
return ari
def exercise29():
# Readability measures are used to score the reading difficulty of a text, for the purposes of selecting texts of appropriate difficulty for language learners. Let us define μw to be the average number of letters per word, and μs to be the average number of words per sentence, in a given text. The Automated Readability Index (ARI) of the text is defined to be: 4.71 μw + 0.5 μs - 21.43. Compute the ARI score for various sections of the Brown Corpus, including section f (lore) and j (learned). Make use of the fact that nltk.corpus.brown.words() produces a sequence of words, while nltk.corpus.brown.sents() produces a sequence of sentences
for category in brown.categories():
print(category+ ':' + str(ari(brown,category)))
def exercise30():
# 3. exercise 30. In this question, consider SimpleText for reporting your results.
# ◑ Use the Porter Stemmer to normalize some tokenized text, calling the stemmer on each word. Do the same thing with the Lancaster Stemmer and see if you observe any differences.
tokens = nltk.word_tokenize(SimpleText)
porter = nltk.PorterStemmer()
lancaster = nltk.LancasterStemmer()
porter_list = [porter.stem(t) for t in tokens]
lancaster_list = [lancaster.stem(t) for t in tokens]
print("porter: ", porter_list)
print("lancaster: ", lancaster_list)
def ARI(raw):
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sents = [nltk.word_tokenize(s) for s in sent_tokenizer.tokenize(raw)]
words = nltk.word_tokenize(raw)
print('words', words)
w = ''.join(words)
uw = len(w)/len(words)
us = len(words)/len(sents)
return (4.71 * uw) + (0.5 * us) - 21.43
def exercise40():
# 4. exercise 40. Section 3.8 in “Sentence Segmentaion” lists an example of using Punkt. Use nltk.word_tokenize() function to tokenize given text into words.
# a) Test your code on “ABC rural News”. Command nltk.corpus.abc.raw('rural.txt') allows one to access “ABC rural News” as a string. Report your results on this text.
# b) Report what your code computes for “ABC Science news” from ntlk.corpus.abc .
# ★ Obtain raw texts from two or more genres and compute their respective reading difficulty scores as in the earlier exercise on reading difficulty. E.g. compare ABC Rural News and ABC Science News (nltk.corpus.abc). Use Punkt to perform sentence segmentation.
print("rural: ", ARI(nltk.corpus.abc.raw('rural.txt')))
def exercise(exNum):
print("Exercise {}".format(exNum))
globals()["exercise" + str(exNum)]()
print("")
def main():
# exercise(1)
# exercise(29)
# exercise(30)
exercise(40)
if __name__ == "__main__":
main()
| true |
06afb20980cf81a69683732a10253def9f4e7454 | Python | kshen91/sortAlgorithms | /heapSort.py | UTF-8 | 1,332 | 3.40625 | 3 | [] | no_license | ##!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: cylisery@outlook.com
# sort algorithm: heap sort
# Time Complexity: n*log(n)
# Space Complexity: 1
# class Node(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def Runner(lst):
ret = []
# convert the list into a heap (BST)
# for each node index from floor(n/2) ~ 0, make sure the root val is the largest
while len(lst) >= 1:
for i in range(len(lst)/2, 0, -1):
# node = Node(lst[i-1])
# node.left = Node(lst[2*i-1])
# if 2*i < len(lst):
# node.right = Node(lst[2*i])
# sortNode(node)
if 2*i < len(lst): #node has a right child
if lst[2*i-1] > lst[i-1] and lst[2*i-1] >= lst[2*i]:
lst[i-1],lst[2*i-1] = lst[2*i-1], lst[i-1]
elif lst[2*i] > lst[i-1] and lst[2*i] > lst[2*i-1]:
lst[i-1],lst[2*i] = lst[2*i],lst[i-1]
else: #node only has a left child
if lst[2*i-1] > lst[i-1]:
lst[i-1],lst[2*i-1] = lst[2*i-1],lst[i-1]
# swap index 0 and the last one in the BST, and remove last one from the tree
lst[0], lst[-1] = lst[-1], lst[0]
ret = [lst.pop()] + ret
return ret
| true |
bef803e2740a08daa6cf96dffd11b06f3809999d | Python | kushagraagarwal19/HackerRank-Interview-Preparation-Kit | /String Manipulation/Making Anagrams.py | UTF-8 | 382 | 3.078125 | 3 | [] | no_license | # https://www.hackerrank.com/challenges/ctci-making-anagrams/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=strings
def makeAnagram(a, b):
count = 0
for char in a:
if char not in b:
count += 1
print(count)
for char in b:
if char not in a:
count += 1
print(count)
return count | true |
6ad8def832585394f2fe18c7afe518b112a03797 | Python | ASA11599/DataStructures | /Python/binary_tree.py | UTF-8 | 1,403 | 3.640625 | 4 | [] | no_license | class Binary_tree:
class Node:
def __init__(self,value,left=None,right=None):
self.value = value
self.left = left
self.right = right
def __repr__(self):
return "Node(" + str(self.value) + ")"
def __init__(self):
self.root = None
def add(self,value):
if self.root == None:
self.root = Binary_tree.Node(value)
return
current = self.root
while current != None:
if current.value == value:
return
elif value > current.value:
if current.right != None:
current = current.right
else:
current.right = Binary_tree.Node(value)
return
elif value < current.value:
if current.left != None:
current = current.left
else:
current.left = Binary_tree.Node(value)
return
def contains(self,value):
current = self.root
while current != None:
if current.value == value:
return True
elif value > current.value:
current = current.right
elif value < current.value:
current = current.left
return False | true |
c2e938c7a4ba1e115285f43e8352bf827223a89a | Python | arturUkr/common | /tests_practice/task_test.py | UTF-8 | 3,379 | 3.25 | 3 | [] | no_license | import unittest
from unittest.mock import patch
from tests_practice.task_list import (
task_1, task_2, task_3, task_4, task_5, task_6, task_7, task_8, task_9, task_10,
task_11, task_12, task_13, task_14, task_15, task_16, task_17, task_18, task_19,
task_20
)
class TestTask(unittest.TestCase):
def test_task_1(self):
l1, l2 = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.assertListEqual(task_1(l1, l2), [1, 2, 3, 5, 8, 13])
def test_task_2(self):
self.assertEqual(task_2("I am a good developer. I am also a writer"), 5)
def test_task_3(self):
self.assertTrue(task_3(27))
self.assertFalse(task_3(4))
self.assertRaises(ValueError, task_3, 9.1)
def test_task_4_equal(self):
self.assertEqual(task_4(59), 5)
self.assertEqual(task_4(48), 3)
def test_task_4_notequal(self):
self.assertNotEqual(task_4(11), 3)
self.assertNotEqual(task_4(12), 4)
def test_task_5_equal(self):
self.assertEqual(task_5([0, 2, 3, 0, 4, 6, 7, 10]), [2, 3, 4, 6, 7, 10, 0, 0])
self.assertEqual(task_5([2, 3, 4, 6, 7, 10]), [2, 3, 4, 6, 7, 10])
def test_task_6_true(self):
self.assertTrue(task_6([5, 7, 9, 11]))
def test_task_6_false(self):
self.assertFalse(task_6([5, 7, 9, 999]))
def test_task_7_equal(self):
self.assertListEqual(task_7([5, 3, 4, 3, 4]), [5])
self.assertListEqual(task_7([5, 1, 3, 4, 3, 4]), [1, 5])
def test_task_8_equal(self):
self.assertListEqual(task_8([1, 2, 3, 4, 6, 7, 8]), [5])
def test_task_9_equal(self):
self.assertEqual(task_9([1, 2, 3, (1, 2), 3]), 3)
self.assertEqual(task_9([1, 2, 3, 0, (1, 2), 3, (1, )]), 4)
def test_task_10_equal(self):
self.assertEqual(task_10("Hello World and Coders"), "sredoC dna dlroW olleH")
def test_task_10_notequal(self):
self.assertNotEqual(task_10("Artur"), "rutr")
def test_task_11_equal(self):
self.assertEqual(task_11(63), "1:3")
self.assertEqual(task_11(57), "0:57")
def test_task_12_equal(self):
self.assertEqual(task_12("fun&!! time"), "time")
self.assertEqual(task_12("I love dogs"), "love")
@patch('builtins.input', return_value="My name is Michele")
# @patch('tests_practice.task_list.task_13', return_value="My name is Michele")
def test_task_13_equal(self, mock):
self.assertEqual(task_13(), "Michele is name My")
@patch("builtins.input", return_value=9)
def test_task_14_equal(self, mock):
self.assertListEqual(task_14(), [1, 1, 2, 3, 5, 8, 13, 21, 34])
def test_task_15_equal(self):
self.assertListEqual(task_15([1, 4, 9, 16, 25, 36, 49, 64, 81, 100]), [4, 16, 36, 64, 100])
def test_task_16_equal(self):
self.assertEqual(task_16(4), 10)
def test_task_17_equal(self):
self.assertEqual(task_17(4), 24)
self.assertEqual(task_17(5), 120)
def test_task_18_equal(self):
self.assertEqual(task_18("abcd"), "bcdE")
def test_task_19_equal(self):
self.assertEqual(task_19("edcba"), "abcde")
def test_task_20_equal(self):
self.assertTrue(task_20(1, 2))
self.assertFalse(task_20(2, 1))
self.assertEqual(task_20(1, 1), "-1")
if __name__ == '__main__':
unittest.main()
| true |
cbef4f16c30bf167db276740f4e7b024d021767e | Python | jkulhanek/mnist-recognition-pytorch | /src/train.py | UTF-8 | 1,517 | 3.09375 | 3 | [
"MIT"
] | permissive | import torch
import torch.optim as optim
import torch.nn as nn
from model import Net
import dataset
learningRate = 0.01
epochs = 2
net = Net()
optimizer = optim.SGD(net.parameters(), lr=learningRate)
criterion = nn.CrossEntropyLoss()
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(dataset.trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
# Store weights
torch.save(net.state_dict(), 'net.pth')
# Run the test
def test():
correct = 0
total = 0
with torch.no_grad():
for data in dataset.testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
test() | true |
859c4e473fe4f1093e4bd77107c3fae70769c3de | Python | kobaltkween/python3 | /Lesson 05 - More on Regular Expressions/sentenceSplit.py | UTF-8 | 74 | 2.609375 | 3 | [] | no_license | import re
def sentenceSplit(text):
return re.split(r"[?.!]\s+", text) | true |
d8aa61dd922615942f6c659ff1f580d0381c0911 | Python | Nikitashukla01/random_project | /92.py | UTF-8 | 490 | 4.25 | 4 | [] | no_license | # Reverse string
# Using a while loop
str = "JavaTpoint" # string variable
print ("The original string is : ",str)
reverse_String = "" # Empty String
count = len(str) # Find length of a string and save in count variable
while count > 0:
reverse_String += str[ count - 1 ] # save the value of str[count-1] in reverseString
count = count - 1 # decrement index
print ("The reversed string using a while loop is : ",reverse_String)# reversed string
| true |
f5d75b905a6c02b628c156ddc8681f27b763d190 | Python | krtk0/job-bot | /jobs-vub-bot.py | UTF-8 | 10,707 | 2.625 | 3 | [] | no_license | import requests
from lxml import html
import re
import psycopg2
import psycopg2.extras
from telegram.ext import Updater, CommandHandler, MessageHandler, Job, Filters
import logging
from datetime import datetime
"Enable logging"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
"Get a telegram bot token"
TOKEN = ''
with open(TOKENPATH, 'r') as token:
for line in token:
TOKEN += line
class Connection:
"""
Connection with the database
"""
def __init__(self):
"""
Open database connection
"""
self.dbc = psycopg2.connect(CONNECTION_PARAMS)
self.c = self.dbc.cursor(cursor_factory=psycopg2.extras.DictCursor)
def do(self, sql, *args):
"""
Execute SQL query
"""
self.c.execute(sql, tuple(args))
self.dbc.commit()
try:
return self.c.fetchall()
except psycopg2.ProgrammingError:
return None
def close(self):
"""
Close database connection
"""
self.dbc.close()
class Query:
"""
SQL query methods
"""
@classmethod
def is_job_new(cls, dbc, job_id):
"""
Check whether job is newposted
"""
sql = """
SELECT job_id
FROM jobs
WHERE job_id=%s
"""
result = dbc.do(sql, job_id)
return False if len(result) > 0 else True
@classmethod
def add_job(cls, dbc, job_id, job_title):
"""
Add a new job to the database
"""
sql = """
INSERT INTO jobs (job_id, job_title) VALUES (%s, %s)
"""
dbc.do(sql, job_id, job_title)
logging.info('{0:%Y-%b-%d %H:%M:%S} Job {1} added'.format(datetime.now(), job_id))
@classmethod
def add_to_sublist(cls, dbc, chat_id, username=None, first_name=None,
last_name=None):
"""
Add user to the sublist
"""
sql = """
INSERT INTO subscribers (chat_id, username, first_name, last_name)
VALUES (%s, %s, %s, %s)
"""
return Connection.do(dbc, sql, chat_id, username, first_name, last_name)
@classmethod
def get_subs(cls, dbc, status=None):
"""
Get all subscribers
"""
sql = """
SELECT *
FROM subscribers
"""
if status:
sql += """
WHERE status = %s
"""
return Connection.do(dbc, sql, status)
return Connection.do(dbc, sql)
@classmethod
def get_sub_one(cls, dbc, chat_id):
"""
Get subscriber by his chat_id
"""
sql = """
SELECT *
FROM subscribers
WHERE chat_id = %s
"""
return Connection.do(dbc, sql, chat_id)
@classmethod
def count_subs(cls, dbc):
"""
Get the total number of subscribers
"""
sql = """
SELECT COUNT(*) as total
FROM subscribers
"""
return Connection.do(dbc, sql)
@classmethod
def set_subscription(cls, dbc, chat_id, subscription):
"""
Set user's subscription status
"""
sql = """
UPDATE subscribers
SET status = %s
WHERE chat_id = %s
"""
return Connection.do(dbc, sql, subscription, chat_id)
def parse_jobs(bot, job):
"""
Parse the VUB student jobs website to find new posted jobs
and push them to Telegram
"""
dbc = Connection()
# logging.info('Connection with DB established')
url = 'http://jobs.vub.ac.be/jobs'
page = requests.get(url).text
doc = html.document_fromstring(page)
"Get jobs' ids from the first page"
ids = []
for spam in doc.xpath("//*[contains(@class, 'views-field views-field-nid')]"):
job_id = re.search(re.compile(r'\d\d\d\d'), spam.text)
if job_id:
ids.append(job_id.group(0))
"Get jobs' titles from the first page"
titles = []
for spam in doc.xpath("//*[contains(@class, 'views-field views-field-title')]/a"):
titles.append(spam.text)
titles = titles[1:]
"Create jobs dictionary: {job_id: job:title}"
# jobs_dict = dict(zip(ids, titles))
_job_new = True
i = 0
while i < len(ids) and _job_new:
url_job = 'http://jobs.vub.ac.be/node/'
_job_new = Query.is_job_new(dbc, ids[i])
if _job_new:
Query.add_job(dbc, ids[i], titles[i])
url_job += ids[i]
logging.info('{0:%Y-%b-%d %H:%M:%S} New job is posted: {1}'.format(datetime.now(), url_job))
subs = Query.get_subs(dbc, status='active')
if subs:
_count = 0
for sub in subs:
"Push to Telegram"
bot.send_message(chat_id=sub['chat_id'],
text='New job "{0}" is posted at {1}'.format(titles[i], url_job))
_count += 1
logging.info(
'{2:%Y-%b-%d %H:%M:%S} Notification about the job {0} is sent to {1} user(s)'.format(ids[i],
_count,
datetime.now()))
i += 1
i = None
_job_new = None
dbc.close()
# logging.info('Connection with DB closed')
def start_com(bot, update):
"""
COMMAND /start
Subscribe user for job updates
"""
dbc = Connection()
on_sublist = Query.get_sub_one(dbc, str(update.message.chat_id))
if not on_sublist:
user = update.message.from_user
try:
un = user['username']
except KeyError:
un = None
try:
fn = user['first_name']
except KeyError:
fn = None
try:
ln = user['last_name']
except KeyError:
ln = None
Query.add_to_sublist(dbc, str(update.message.chat_id),
username=un,
first_name=fn,
last_name=ln)
logging.info('{1:%Y-%b-%d %H:%M:%S} User {0} is added on the sublist.'.format(update.message.chat_id,
datetime.now()))
bot.send_message(chat_id=update.message.chat_id,
text='You will be notified about newposted student '
'jobs at VUB.')
elif on_sublist[0]['status'] == 'inactive':
Query.set_subscription(dbc, update.message.chat_id, 'active')
bot.send_message(chat_id=update.message.chat_id,
text='You will be notified about newposted student '
'jobs at VUB.')
logging.info('{1:%Y-%b-%d %H:%M:%S} User {0} is added on the sublist.'.format(update.message.chat_id,
datetime.now()))
else:
bot.send_message(chat_id=update.message.chat_id,
text='You are already subscribed.')
dbc.close()
def stop_com(bot, update):
"""
COMMAND /stop
Unsubscribe user from job updates
"""
dbc = Connection()
on_sublist = Query.get_sub_one(dbc, update.message.chat_id)
if on_sublist and on_sublist[0]['status'] != 'inactive':
Query.set_subscription(dbc, update.message.chat_id, 'inactive')
logging.info('{1:%Y-%b-%d %H:%M:%S} User {0} is removed from the sublist.'.format(update.message.chat_id,
datetime.now()))
bot.send_message(chat_id=update.message.chat_id,
text='You canceled your subscription successfully.'
'\nSend me /start to subscribe again.')
else:
bot.send_message(chat_id=update.message.chat_id, text='You are not subscribed.')
dbc.close()
def help_com(bot, update):
"""
COMMAND /help
"""
bot.send_message(chat_id=update.message.chat_id,
text='Something helpful should be here.'
'\n/start — subscribe for updates'
'\n/stop – unsubscribe from updates'
'\n/help – obviously')
def sub_com(bot, update):
"""
COMMAND /sub
For admin's private usage. Get list and total number of subscribers
"""
if update.message.chat_id == ID_ADMIN:
dbc = Connection()
subs = Query.get_subs(dbc)
sub_list = ''
for sub in subs:
sub_list += str_dict(sub)
sub_list += '\n'
bot.send_message(chat_id=ID_ADMIN,
text=sub_list)
total = Query.count_subs(dbc)[0]['total']
bot.send_message(chat_id=ID_ADMIN,
text='Total: {0} sub(s).'.format(total))
dbc.close()
else:
guest = update.message.from_user
bot.send_message(chat_id=update.message.chat_id,
text='Access denied.')
bot.send_message(chat_id=ID_ADMIN,
text='{0} tried to get subs'.format(guest))
def reply(bot, update):
"""
Handle all non-command messages
"""
bot.send_message(chat_id=update.message.chat_id,
text='Seems like it is not a command, I cannot understand you.'
'\nUse /help to get the full command-list.')
def str_dict(dictio):
"""
Transform dict with sub's data to a better format for reading
"""
result = ''
keys = ['id', 'username', 'first_name', 'last_name', 'status']
for key in keys:
if key != 'chat_id' and dictio[key] not in ['', None]:
try:
result += '{0}: {1} '.format(key, dictio[key].upper())
except AttributeError:
result += '{0}: {1} '.format(key, dictio[key])
return result
def main():
updater = Updater(TOKEN)
dp = updater.dispatcher
j = updater.job_queue
dp.add_handler(CommandHandler('start', start_com))
dp.add_handler(CommandHandler('help', help_com))
dp.add_handler(CommandHandler('stop', stop_com))
dp.add_handler(CommandHandler('sub', sub_com))
dp.add_handler(MessageHandler(Filters.text, reply))
j.put(Job(parse_jobs, 60.0), next_t=0.00)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| true |
80d3d8ec27e0f11862b7267c830b73c6b53aad91 | Python | ResidentMario/gtfs-tripify | /tests/util_tests.py | UTF-8 | 7,356 | 2.875 | 3 | [
"MIT"
] | permissive | """
`gtfs-tripify` utilities test module. Asserts that utility functions are correct.
"""
import unittest
import pandas as pd
import gtfs_tripify as gt
from gtfs_tripify.ops import cut_cancellations, discard_partial_logs
class TestCutCancellations(unittest.TestCase):
"""
Tests the cut-cancellation heuristic.
"""
def setUp(self):
self.log_columns = [
'trip_id', 'route_id', 'action', 'minimum_time', 'maximum_time', 'stop_id',
'latest_information_time'
]
def test_no_op(self):
"""
The heuristic remove already-empty entries.
"""
log = pd.DataFrame(columns=self.log_columns)
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result) == 0
def test_zero_confirmed(self):
"""
The heuristic should return an empty log if there are zero confirmed stops in the log.
"""
log = pd.DataFrame(
columns=self.log_columns,
data=[['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', '_']]
)
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result) == 0
def test_zero_tailing_unconfirmed(self):
"""
The heuristic should return an unmodified log if there are no tailing
`STOPPED_OR_SKIPPED` records.
"""
log = pd.DataFrame(
columns=self.log_columns,
data=[['_', '_', 'STOPPED_AT', '_', '_', '_', '_']]
)
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result['uuid']) == 1
def test_one_tailing_unconfirmed(self):
"""
The heuristic should return an unmodified log if there is one tailing
`STOPPED_OR_SKIPPED` record.
"""
log = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', 'STOPPED_AT', '_', '_', '_', '_'],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', '_']
])
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result['uuid']) == 2
def test_many_unique_tailing_unconfirmed(self):
"""
The heuristic should return an unmodified log if there is at least one `STOPPED_AT`
record and many tailing `STOPPED_OR_SKIPPED` records, but the logs have two or more
unique `LATEST_INFORMATION_TIME` values.
"""
log = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', 'STOPPED_AT', '_', '_', '_', 0],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 0],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 1]
])
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result['uuid']) == 3
def test_many_nonunique_tailing_unconfirmed(self):
"""
The heuristic should return a block-cleaned log if there is at least one `STOPPED_AT`
record and many tailing `STOPPED_OR_SKIPPED` records, but the logs have just one unique
`LATEST_INFORMATION_TIME` values.
"""
log = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', 'STOPPED_AT', '_', '_', '_', 0],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 1],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 1]
])
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result['uuid']) == 1
def test_many_nonunique_tailing_unconfirmed_stop_skip(self):
"""
Sometimes the last record before the trip is cut off is a `STOPPED_OR_SKIPPED` record
(or multiple such records). The heuristic should account for this and only cut
`STOPPED_OR_SKIPPED` trips with multiple copies of the same timestamp.
"""
log = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 0],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 1],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 1]
])
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result['uuid']) == 1
def test_many_unconfirmed_stop_skip(self):
"""
The heuristic should return an empty log in cases when there is no information in the log.
"""
log = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 0],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 0],
['_', '_', 'STOPPED_OR_SKIPPED', '_', '_', '_', 0]
])
logbook = {'uuid': log}
result = cut_cancellations(logbook)
assert len(result) == 0
class TestDiscardPartialLogs(unittest.TestCase):
"""
Tests the partial log heuristic.
"""
def setUp(self):
self.log_columns = [
'trip_id', 'route_id', 'action', 'minimum_time', 'maximum_time', 'stop_id',
'latest_information_time'
]
def test_single_discard(self):
"""
If there's just one record matching the first-or-last `LATEST_INFORMATION_TIME`
condition, discard that one.
"""
first = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', '_', '_', '_', '_', 0],
['_', '_', '_', '_', '_', '_', 2]
])
second = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', '_', '_', '_', '_', 1]
])
logbook = {'_0': first, '_1': second}
result = discard_partial_logs(logbook)
assert len(result) == 1
def test_multiple_discard(self):
"""
If there's more than one record matching the first-or-last `LATEST_INFORMATION_TIME`
condition, discard them all.
"""
first = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', '_', '_', '_', '_', 0],
['_', '_', '_', '_', '_', '_', 1]
])
second = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', '_', '_', '_', '_', 1]
])
third = pd.DataFrame(columns=self.log_columns,
data=[
['_', '_', '_', '_', '_', '_', 0],
['_', '_', '_', '_', '_', '_', 2]
])
logbook = {'_0': first, '_1': second, '_2': third}
result = discard_partial_logs(logbook)
assert len(result) == 1
| true |
c0af6cd730d624d5b75471d5eb18caae9d756ba8 | Python | jl2199/MechVibEP2Q10 | /interactive.py | UTF-8 | 1,443 | 3.125 | 3 | [] | no_license | # note: this is defo not the most efficent way to make
# and interactive graph, its just a quick solution
from math import pi
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
init_f_signal = 80 # Hz
# Constants
f_n = 100 # Hz
w_n = 2 * pi * f_n # rad s^-1
zeta = 0.03
T = 0.4
delta_t = 0.0003
t = np.arange(0, T, delta_t)
y_unit_imp_response = np.exp(-1 * zeta * w_n * t) * np.sin(w_n * t)
plt.style.use("seaborn")
fig, axs = plt.subplots(2, 1, figsize=(
14, 7), gridspec_kw={'height_ratios': [1, 2]})
def plot_input(f):
input_signal = np.sin(2 * pi * f * t)
axs[0].plot(t, input_signal)
axs[0].set(ylabel="$X$")
axs[0].set_title(f"Input signal with $f={round(f,2)}Hz$")
def plot_response(f):
input_signal = np.sin(2 * pi * f * t)
response = np.convolve(y_unit_imp_response, input_signal)
axs[1].plot(t, response[0:len(t)])
axs[1].set(xlabel="$time/ s$", ylabel="$Y$")
axs[1].set_title(f"Response to input signal")
plot_input(init_f_signal)
plot_response(init_f_signal)
axfreq = plt.axes([0.2, .95, 0.65, 0.03])
freq_slider = Slider(
ax=axfreq,
label='Input Frequency/Hz',
valmin=50,
valmax=200,
valinit=init_f_signal,
)
def update(val):
axs[0].clear()
axs[1].clear()
plot_input(freq_slider.val)
plot_response(freq_slider.val)
fig.canvas.draw_idle()
freq_slider.on_changed(update)
plt.show()
| true |
d3ba22a2bf889a61e88c5b071f5f73b4daf74ac9 | Python | silky/bell-ppls | /env/lib/python2.7/site-packages/observations/r/benefits.py | UTF-8 | 2,269 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def benefits(path):
"""Unemployment of Blue Collar Workers
a cross-section from 1972
*number of observations* : 4877
*observation* : individuals
*country* : United States
A time serie containing :
stateur
state unemployment rate (in %)
statemb
state maximum benefit level
state
state of residence code
age
age in years
tenure
years of tenure in job lost
joblost
a factor with levels
(slack\\\_work,position\\\_abolished,seasonal\\\_job\\\_ended,other)
nwhite
non-white ?
school12
more than 12 years of school ?
sex
a factor with levels (male,female)
bluecol
blue collar worker ?
smsa
lives is smsa ?
married
married ?
dkids
has kids ?
dykids
has young kids (0-5 yrs) ?
yrdispl
year of job displacement (1982=1,..., 1991=10)
rr
replacement rate
head
is head of household ?
ui
applied for (and received) UI benefits ?
McCall, B.P. (1995) “The impact of unemployment insurance benefit levels
on recipiency”, *Journal of Business and Economic Statistics*, **13**,
189–198.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `benefits.csv`.
Returns:
Tuple of np.ndarray `x_train` with 4877 rows and 18 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'benefits.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Benefits.csv'
maybe_download_and_extract(path, url,
save_file_name='benefits.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| true |
63e636c105ab6610c53c0e0e6d82ea013f1dd59b | Python | artyom-beilis/dlprimitives | /docs/table_to_md.py | UTF-8 | 1,322 | 2.796875 | 3 | [
"MIT",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ###############################################################################
###
### Copyright (c) 2021-2022 Artyom Beilis <artyomtnk@yahoo.com>
###
### MIT License, see LICENSE.TXT
###
###############################################################################
import sys
def print_sep(w):
new_row = ['-' * s for s in w]
print("|%s|" % '|'.join(new_row))
def print_row(r,w):
new_row = ['%*s' % (w[i],r[i]) for i in range(len(r))]
print("|%s|" % '|'.join(new_row))
def make_table(t):
max_cols=0
max_width=[]
for r in t:
max_cols = max(max_cols,len(r))
if len(max_width) < max_cols:
max_width += [0]*(max_cols - len(max_width))
for i,item in enumerate(r):
if len(item) > max_width[i]:
max_width[i] = len(item)
for r in t:
if len(r) < max_cols:
r +=['']*(max_cols - len(r))
print_row(t[0],max_width)
print_sep(max_width)
for r in t[1:]:
print_row(r,max_width)
print("")
with open(sys.argv[1],'r') as f:
table=[]
for line in f.readlines():
line = line.strip()
if line=='' and len(table) > 0:
make_table(table)
table=[]
else:
table.append([v.replace('_','\\_') for v in line.split('\t')])
if table:
make_table(table)
| true |
a690c94c16851a2815143486148be50e3c586cb7 | Python | w0L-g0R/bio-cluster | /backend/bio_cluster/src/data/DEVELOPMENT/create_radial_tree_datastructure_v3.py | UTF-8 | 7,626 | 3.03125 | 3 | [] | no_license |
# %%
import pandas as pd
from pathlib import Path
from anytree import Node, NodeMixin, RenderTree
# import anytree
# import itertools
from anytree.exporter import JsonExporter
from anytree.search import find as find_nodes
from anytree.search import find_by_attr
import json
import numpy as np
class TreeNode(NodeMixin): # Add Node feature
def __init__(
self,
name="Bio",
flag_LT=None,
item=None,
parent=None,
children=None
):
super(TreeNode, self).__init__()
self.name = name
self.flag_LT = flag_LT
self.item = item
self.parent = parent
if children:
self.children = children
return
def __repr__(self):
return self.name
def create_root_node(name: str) -> TreeNode:
return Node(name=name)
# def add_node(
# node_name: str,
# parent: TreeNode,
# to_root: bool = False,
# **kwargs
# ):
# already_existing_node = find_nodes(
# node=parent.root,
# filter_=lambda node: node.name == node_name)
# # print()
# # print('already_existing_node: ', already_existing_node)
# # print()
# # If node does not exist in tree ..
# if already_existing_node is None:
# # .. add new node, which is also the new parent
# return Node(
# name=node_name, parent=parent, value=333)
# else:
# # Don't add new node, and keep current parent
# return already_existing_node.path[-1]
def create_radial_tree_datastructure_from_df(
df: pd.DataFrame,
root: TreeNode
) -> TreeNode:
'''
Creates a tree-like datastructure, using the anytree-library and an input dataframe with single index and several columns. Each column of the dataframe represents a set of nodes, starting with the first set of children nodes in the most left column. Hence, the most right column only consists of leaf nodes, without further descendents.
Parameter
-------
df: pd.DataFrame
Inherits the tree data structure
root: TreeNode
Named root node
Returns
-------
TreeNode
Tree structure.
'''
tree = root
def mark_nodes_and_leafs(column):
column_idx = columns_with_layers.index(column.name)
# In the last layer a cell either has value and is a LEAF or it remains
# NaN
if column_idx == len(columns_with_layers) - 1:
return pd.Series(
data=np.where(
column.isna(),
column,
"LEAF"),
index=column.index)
else:
# Extract next column to the right to lookup rows with NaN values
next_column_to_the_right = df[[
columns_with_layers[column_idx + 1]]].squeeze()
# If there's a NaN value in the cell of the next right column, then
# mark the cell in this column as LEAF, otherwise it's a NODE
return pd.Series(
data=np.where(
next_column_to_the_right.isna(),
"LEAF",
"NODE",
),
index=column.index)
def create_nodes_and_leafs(column):
print('column: ', column)
print()
# print('row: ', row.name)
# print('row: ', row)
# tree_blueprint = df_tree_blueprint[column.name]
# tree.children = [Node(name=)
# row.where(tree_blueprint == "LEAF")
# print()
# last_added_node = Node(
# name=row[0], parent=root, value=333)
# print('last_added_node.children: ', last_added_node.children)
# parent = root
# def fill_branch_recursively(cell):
# # print('parent: ', parent.children)
# # BASE CASE:
# if isinstance(cell, str):
# if find_nodes(node=parent,
# filter_=lambda node: node.name == cell) is None:
# # LEAF
# Node(name=cell, parent=parent, value=333)
# else:
# return fill_branch_recursively(
# cell=next(tree_blueprint.values))
if column.name == columns_with_layers[0]:
_ = [Node(name=cell, parent=root, value=333)
for cell in column.unique()]
else:
col_index = columns_with_layers.index(column.name) - 1
# print('last_index: ', last_index)
# fill_branch_recursively(cell=tree_blueprint[0])
for cell in column.unique(): # .where(tree_blueprint == "NODE"):
if isinstance(cell, str):
row_index = column[column == cell].index[0]
parent = df.iloc[row_index, col_index]
f = find_by_attr(tree, parent)
print('f: ', f)
print('parent: ', parent)
# s = df[columns_with_layers[last_index]]
# df.query( == "foo"')
# print('s: ', s)
# print(s.loc[df[''].isin(['one','three'])])
Node(name=cell, parent=f, value=333)
# # print('node: ', node)
# if find_nodes(
# node=root,
# filter_=lambda node: node.name == cell) is None:
# Node(name=cell, parent=, value=333)
# # parent = Node(
# name=cell, parent=parent, value=333)
# print('parent: ', parent)
# print()
# else:
# f = find_nodes(
# node=tree,
# filter_=lambda node: node.name == cell,
# )
# print('f: ', f)
# print('node: ', node)
# last_added_node = Node(
# name=node, parent=last_added_node, value=333)
# print('last_added_node: ', last_added_node)
# Extract the names of all columns that contains "layer" in
# the name
columns_with_layers = [l for l in df.columns if "layer" in l]
# Create a df copy that contains only layer columns and write
# into each cell wheter it is a NODE,LEAF or NaN cell ( -> cells in the
# last layer that aren't leafs)
# df_tree_blueprint = df[columns_with_layers].apply(
# mark_nodes_and_leafs
# )
# print('df_tree_blueprint: ', df_tree_blueprint)
# Iter over rows in the original dataframe and build the tree from root to
# leaf in each row
df[columns_with_layers].apply(
create_nodes_and_leafs
)
print('tree.children: ', tree.children)
return tree
# //////////////////////////////////////////////////////////////////////////////
# Load excel data
df = pd.read_excel(
Path("C:/Code/bio-economy-cluster/backend/database/excel/Search_scheme/branchen_scheme_2.xlsx")
)
# Create radial tree structure
radial_tree_datastructure = create_radial_tree_datastructure_from_df(
df=df,
root=create_root_node(name="Bio")
)
# # Create anytree json export
exporter = JsonExporter(indent=4)
radial_tree_json = exporter.export(radial_tree_datastructure)
# print("-" * 100)
# # print('radial_tree_json: ', radial_tree_json[:1650])
# with open('tree_WKO.json', 'a') as f:
# f.write(radial_tree_json)
print(RenderTree(radial_tree_datastructure))
# # print('radial_tree_datastructure: ', radial_tree_datastructure)
# print(exporter.export(radial_tree_datastructure))
# %%
| true |
c17d0d70bb53817f127f6119fbcafb1986240f2e | Python | kedgarnguyen298/nguyenquockhanh-fundamental-c4e24 | /session4/homework/turtleex1.py | UTF-8 | 245 | 3.453125 | 3 | [] | no_license | from turtle import *
colors = ['red', 'blue', 'brown', 'yellow', 'grey']
i = 3
while i < 8:
for col in colors:
color(col)
for j in range(i):
forward(100)
left(360 / i)
i += 1
mainloop()
| true |
97328dd8bb1646af7697181b14263cbb348d62b2 | Python | mathmed/the-best-dice | /src/rice.py | UTF-8 | 2,867 | 3.90625 | 4 | [
"MIT"
] | permissive |
import random
class Dice:
def __init__(self):
print("\n\n****************************************************")
print("************** WELCOME TO BEST DICE ****************")
print("****************************************************\n\n")
self.main()
def main(self):
# Seting the informations
dices_ = int(input("Tell the amount of dice you will test:"))
faces_ = int(input("Tell me how many faces the dice has:"))
array_sum = self.calc_sum(dices_, faces_)
array_mult = self.calc_mult(dices_, faces_)
self.analyze(array_sum, array_mult)
def calc_sum(self, dices_, faces_):
max_sum = dices_*faces_
array = self.fill(max_sum+1)
for i in range(1, 1000000):
_num = 0
for j in range(0, dices_):
_num += random.randint(1, faces_)
array[_num] = array[_num]+1
return array
def calc_mult(self, dices_, faces_):
max_sum = dices_*faces_
array = self.fill(max_sum+1)
for i in range(0, 1000000):
_num = 2*(random.randint(1, faces_))
array[_num] = array[_num]+1
return array
def fill(self, max_sum):
array = []
for i in range(0, max_sum):
array.append(0)
return array
def analyze(self, array_sum, array_mult):
print("\n\nresults sum\n")
cont = 0
for i in array_sum:
print("result = "+str(cont)+": "+str(round((i/1000000)*100,2))+"%")
cont+=1
print("\n\nresults mult\n")
cont = 0
for i in array_mult:
print("result = "+str(cont)+": "+str(round((i/1000000)*100,2))+"%")
cont+=1
faces = int(len(array_mult)/2)
cont_mult = 0
for i in range (faces, len(array_mult)):
cont_mult+=round((array_mult[i]/1000000)*100,2)
cont_sum = 0
for i in range(faces, len(array_sum)):
cont_sum+=round((array_sum[i]/1000000)*100,2)
print("\nchance to take a number above "+str(faces)+" with added dice: "+str(round(cont_sum,2))+"%")
print("\nchance to take a number below "+str(faces)+" with added dice: "+str(round(100-cont_sum, 2))+"%\n")
print("\nchance to take a number above "+str(faces)+" with dice multiplied by 2: "+str(round(cont_mult,2))+"%")
print("\nchance to take a number below "+str(faces)+" with dice multiplied by 2: "+str(round(100-cont_mult,2))+"%")
if(cont_sum > cont_mult):
print("\nthe best choice is to play dices added\n")
elif(cont_mult > cont_sum):
print("\nthe best choice is to play dice multiplied by 2\n")
else:
print("\nthe choice does not matter\n")
c = Dice()
| true |
a76a8ac375e707388205ca4aa7e569199ea35a6a | Python | rafaelassacconi/work-at-olist | /olistlibrary/books/management/commands/generate_huge_file.py | UTF-8 | 741 | 3.046875 | 3 | [] | no_license | import os
import csv
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Generates a huge file with authors to test"
def handle(self, *args, **options):
# Set the number of rows here:
rows = 2000000
# The file will be generate in here:
file_path = 'olistlibrary/books/tests/test_files/authors_huge.csv'
with open(file_path, 'w') as f:
writer = csv.writer(f)
writer.writerow(['name'])
n = 0
while n < rows:
writer.writerow(['JustABigFirstname WithABigLastname'])
n += 1
print("Finished! %d in %dMB file." % (rows, os.path.getsize(file_path)/(1024*1024)))
| true |
63a3ac7da55f1656a61f0334cc9f27c1371232fb | Python | ourobotics/RoboticsEngine | /Source/Library/Utils/ConfigLoader.py | UTF-8 | 1,059 | 2.515625 | 3 | [] | no_license | # ||=======================================================================||
# ||
# || Program/File: ConfigLoader.py
# ||
# || Description:
# ||
# || Author: Logan Wilkovich
# || Email: LWilkovich@gmail.com
# || Creation Date: 23 July 2018 | Logan Wilkovich
# ||=======================================================================||
# ||=======================||
# Premades
from time import strftime, localtime
import traceback
# ||=======================||
# Global Variables
# ||=======================||
# Notes
# ||=======================||
# ||=======================================================================||
class ConfigLoader(object):
def __init__(self):
return
def getConfig(self, filename):
if (filename.find('.conf')) == -1:
filename = filename + '.conf'
try:
config = dict(line.strip().split('=') for line in open('../Settings/Config/' + filename))
# print(filename)
return config
except Exception as e:
return {"Error": e}
# ||=======================================================================|| | true |
25ca333df788481b326aa79fe99813fa56303613 | Python | toodom02/alevel-coursework | /src/report.py | UTF-8 | 15,036 | 3 | 3 | [] | no_license | from prettytable import PrettyTable
from fpdf import FPDF
from tkinter import messagebox
from datetime import datetime
import subprocess
from . import config
# Using the records fetched from the previous report, which was output into a treeview,
# this puts the same records (i.e. exact same report) into a table, which is then pasted into
# a PDF document. The process is identical for donation reports, order and expenditure reports.
class donationReport:
def __init__(self, startDate, endDate, records, totalDonat):
self.startDate = startDate
self.endDate = endDate
records.reverse()
self.records = records
self.totalDonat = totalDonat
self.createPDF()
def createPDF(self):
self.table = PrettyTable()
self.table.field_names = ['DonationID', 'Amount', 'Cash/Bank', 'Reference No', 'Date', 'DonorID', 'Donor Name',
'StaffID']
for record in self.records:
self.table.add_row(record)
header, data = self.get_data_from_prettytable(self.table)
self.export_to_pdf(header, data)
def get_data_from_prettytable(self, data):
"""
Get a list of list from pretty_data table
Arguments:
:param data: data table to process
:type data: PrettyTable
"""
def remove_space(liste):
"""
Remove space for each word in a list
Arguments:
:param liste: list of strings
"""
list_without_space = []
for mot in liste: # For each word in list
list_without_space.append(mot) # list of word without space
return list_without_space
# Get each row of the table
string_x = str(self.table).split('\n') # Get a list of row
header = string_x[1].split('|')[1: -1] # Columns names
rows = string_x[3:len(string_x) - 1] # List of rows
list_word_per_row = []
for row in rows: # For each word in a row
# Remove first and last arguments
row_resize = row.split('|')[1:-1]
list_word_per_row.append(remove_space(row_resize)) # Remove spaces
return header, list_word_per_row
def export_to_pdf(self, header, data):
"""
Create a a table in PDF file from a list of row
:param header: columns name
:param data: List of row (a row = a list of cells)
:param spacing=1:
"""
pdf = FPDF() # New pdf object
pdf.set_font('Arial', size=10) # Font style
epw = pdf.w - 2 * pdf.l_margin # Width of document
col_width = pdf.w / 9 # Column width in table
row_height = pdf.font_size * 1.5 # Row height in table
spacing = 1.3 # Space in each cell
pdf.add_page() # add new page
pdf.image(config.logo, 20, 5, 12, 13)
pdf.image(config.logo, epw - 20, 5, 12, 13)
pdf.add_font('algerian', '', config.font, uni=True)
pdf.set_font('algerian', size=14)
pdf.set_text_color(35, 128, 183) # Trust logo colour
pdf.cell(epw, 0, 'Kingfisher Trust', align='C')
pdf.ln(row_height * spacing)
pdf.set_font('Arial', size=10)
pdf.set_text_color(0, 0, 0)
pdf.cell(epw, 0, 'Donation Report', align='C') # create title cell
pdf.ln(row_height * spacing) # Define title line style
pdf.cell(epw, 0, 'From ' + str(datetime.isoformat(self.startDate)[:10]) + ' to '
+ str(datetime.isoformat(self.endDate)[:10]) + ':', align='L')
pdf.ln(row_height * spacing)
pdf.cell(10, 0, 'Total Donated = £{:.2f}'.format(
self.totalDonat), align='L')
pdf.cell(epw - 10, 0, 'Report Created: ' +
str(datetime.isoformat(datetime.today())[:10]), align='R')
pdf.ln(row_height * spacing)
# Add header
for item in header: # for each column
pdf.cell(col_width, row_height * spacing, # Add a new cell
txt=item, border='B', align='C')
pdf.ln(row_height * spacing) # New line after header
for row in data: # For each row of the table
for item in row: # For each cell in row
pdf.cell(col_width, row_height * spacing, # Add cell
txt=item, border=0, align='C')
pdf.ln(row_height * spacing) # Add line at the end of row
title = ('reports/DONATION ' + str(datetime.isoformat(self.startDate)[:10]) + ' to ' +
str(datetime.isoformat(self.endDate)[:10]) + '.pdf')
try:
pdf.output(title) # Create pdf file
pdf.close()
openFile = messagebox.askyesno('Success',
'PDF File Created\n' + title + '\n\nOpen File?') # Outputs success dialogue
except OSError:
openFile = messagebox.askyesno(
'Error', 'Unable to Overwrite Existing File\n\nOpen File?', icon='error')
if openFile:
subprocess.Popen([title], shell=True) # Opens file
class orderReport:
def __init__(self, startDate, endDate, records, totalCost, totalRevenue, totalProfit):
self.startDate = startDate
self.endDate = endDate
records.reverse()
self.records = records
self.totalCost = totalCost
self.totalRevenue = totalRevenue
self.totalProfit = totalProfit
self.createPDF()
def createPDF(self):
self.table = PrettyTable()
self.table.field_names = ['OrderID', 'CustomerID', 'Date', 'ItemID', 'ItemName', 'Quantity', 'SalePrice',
'SupplierCost', 'SupplierID']
for record in self.records:
self.table.add_row(record)
header, data = self.get_data_from_prettytable(self.table)
self.export_to_pdf(header, data)
def get_data_from_prettytable(self, data):
"""
Get a list of list from pretty_data table
Arguments:
:param data: data table to process
:type data: PrettyTable
"""
def remove_space(liste):
"""
Remove space for each word in a list
Arguments:
:param liste: list of strings
"""
list_without_space = []
for mot in liste: # For each word in list
list_without_space.append(mot) # list of word without space
return list_without_space
# Get each row of the table
string_x = str(self.table).split('\n') # Get a list of row
header = string_x[1].split('|')[1: -1] # Columns names
rows = string_x[3:len(string_x) - 1] # List of rows
list_word_per_row = []
for row in rows: # For each word in a row
# Remove first and last arguments
row_resize = row.split('|')[1:-1]
list_word_per_row.append(remove_space(row_resize)) # Remove spaces
return header, list_word_per_row
def export_to_pdf(self, header, data):
"""
Create a a table in PDF file from a list of row
:param header: columns name
:param data: List of row (a row = a list of cells)
:param spacing=1:
"""
pdf = FPDF() # New pdf object
pdf.set_font('Arial', size=10) # Font style
epw = pdf.w - 2 * pdf.l_margin # Width of document
col_width = pdf.w / 10 # Column width in table
row_height = pdf.font_size * 1.5 # Row height in table
spacing = 1.3 # Space in each cell
pdf.add_page() # add new page
pdf.image(config.logo, 20, 5, 12, 13)
pdf.image(config.logo, epw - 20, 5, 12, 13)
pdf.add_font('algerian', '', config.font,
uni=True) # Adds algerian font
pdf.set_font('algerian', size=14)
pdf.set_text_color(35, 128, 183) # Trust logo colour
pdf.cell(epw, 0, 'Kingfisher Trust', align='C')
pdf.ln(row_height * spacing)
pdf.set_font('Arial', size=10)
pdf.set_text_color(0, 0, 0)
pdf.cell(epw, 0, 'Revenue Report', align='C') # create title cell
pdf.ln(row_height * spacing) # Define title line style
pdf.cell(epw, 0, 'From ' + str(datetime.isoformat(self.startDate)[:10]) + ' to '
+ str(datetime.isoformat(self.endDate)[:10]) + ':', align='L')
pdf.ln(row_height * spacing)
pdf.cell(10, 0, str('Total Cost = £{:.2f}'.format(
self.totalCost)), align='L')
pdf.cell(epw - 10, 0, 'Report Created: ' +
str(datetime.isoformat(datetime.today())[:10]), align='R')
pdf.ln(row_height * spacing)
pdf.cell(10, 0, str('Total Revenue = £{:.2f}'.format(
self.totalRevenue)), align='L')
pdf.ln(row_height * spacing)
pdf.cell(10, 0, str('Total Profit = £{:.2f}'.format(
self.totalProfit)), align='L')
pdf.ln(row_height * spacing)
# Add header
for item in header: # for each column
pdf.cell(col_width, row_height * spacing, # Add a new cell
txt=item, border='B', align='C')
pdf.ln(row_height * spacing) # New line after header
for row in data: # For each row of the table
for item in row: # For each cell in row
pdf.cell(col_width, row_height * spacing, # Add cell
txt=item, border=0, align='C')
pdf.ln(row_height * spacing) # Add line at the end of row
title = ('reports/REVENUE ' + str(datetime.isoformat(self.startDate)[:10]) + ' to ' +
str(datetime.isoformat(self.endDate)[:10]) + '.pdf')
try:
# Will overwrite any existing file
pdf.output(title) # Create pdf file
pdf.close()
openFile = messagebox.askyesno('Success',
'PDF File Created\n' + title + '\n\nOpen File?') # Outputs success dialogue
except OSError:
openFile = messagebox.askyesno(
'Error', 'Unable to Overwrite Existing File\n\nOpen File?', icon='error')
if openFile:
subprocess.Popen([title], shell=True) # Opens file
class expenditureReport:
def __init__(self, startDate, endDate, records, totalSpent):
self.startDate = startDate
self.endDate = endDate
records.reverse()
self.records = records
self.totalSpent = totalSpent
self.createPDF()
def createPDF(self):
self.table = PrettyTable()
self.table.field_names = ['ExpenditureID',
'Amount', 'Details', 'Date', 'StaffID']
for record in self.records:
self.table.add_row(record)
header, data = self.get_data_from_prettytable(self.table)
self.export_to_pdf(header, data)
def get_data_from_prettytable(self, data):
"""
Get a list of list from pretty_data table
Arguments:
:param data: data table to process
:type data: PrettyTable
"""
def remove_space(liste):
"""
Remove space for each word in a list
Arguments:
:param liste: list of strings
"""
list_without_space = []
for mot in liste: # For each word in list
list_without_space.append(mot) # list of word without space
return list_without_space
# Get each row of the table
string_x = str(self.table).split('\n') # Get a list of row
header = string_x[1].split('|')[1: -1] # Columns names
rows = string_x[3:len(string_x) - 1] # List of rows
list_word_per_row = []
for row in rows: # For each word in a row
# Remove first and last arguments
row_resize = row.split('|')[1:-1]
list_word_per_row.append(remove_space(row_resize)) # Remove spaces
return header, list_word_per_row
def export_to_pdf(self, header, data):
"""
Create a a table in PDF file from a list of row
:param header: columns name
:param data: List of row (a row = a list of cells)
:param spacing=1:
"""
pdf = FPDF() # New pdf object
pdf.set_font('Arial', size=10) # Font style
epw = pdf.w - 2 * pdf.l_margin # Width of document
col_width = pdf.w / 5.5 # Column width in table
row_height = pdf.font_size * 1.5 # Row height in table
spacing = 1.3 # Space in each cell
pdf.add_page() # add new page
pdf.image(config.logo, 20, 5, 12, 13)
pdf.image(config.logo, epw - 20, 5, 12, 13)
pdf.add_font('algerian', '', config.font, uni=True)
pdf.set_font('algerian', size=14)
pdf.set_text_color(35, 128, 183) # Trust logo colour
pdf.cell(epw, 0, 'Kingfisher Trust', align='C')
pdf.ln(row_height * spacing)
pdf.set_font('Arial', size=10)
pdf.set_text_color(0, 0, 0)
pdf.cell(epw, 0, 'Expenditure Report', align='C') # create title cell
pdf.ln(row_height * spacing) # Define title line style
pdf.cell(epw, 0, 'From ' + str(datetime.isoformat(self.startDate)[:10]) + ' to '
+ str(datetime.isoformat(self.endDate)[:10]) + ':', align='L')
pdf.ln(row_height * spacing)
pdf.cell(10, 0, 'Total Spent = £{:.2f}'.format(
self.totalSpent), align='L')
pdf.cell(epw - 10, 0, 'Report Created: ' +
str(datetime.isoformat(datetime.today())[:10]), align='R')
pdf.ln(row_height * spacing)
# Add header
for item in header: # for each column
pdf.cell(col_width, row_height * spacing, # Add a new cell
txt=item, border='B', align='C')
pdf.ln(row_height * spacing) # New line after header
for row in data: # For each row of the table
for item in row: # For each cell in row
pdf.cell(col_width, row_height * spacing, # Add cell
txt=item, border=0, align='C')
pdf.ln(row_height * spacing) # Add line at the end of row
title = ('reports/EXPENDITURE ' + str(datetime.isoformat(self.startDate)[:10]) + ' to ' +
str(datetime.isoformat(self.endDate)[:10]) + '.pdf')
try:
pdf.output(title) # Create pdf file
pdf.close()
openFile = messagebox.askyesno('Success',
'PDF File Created\n' + title + '\n\nOpen File?') # Outputs success dialogue
except OSError:
openFile = messagebox.askyesno(
'Error', 'Unable to Overwrite Existing File\n\nOpen File?', icon='error')
if openFile:
subprocess.Popen([title], shell=True) # Opens file
| true |
9959609f45571c92bbb82fdfb5041fc4907660c3 | Python | 82seongkyum/python_lecture | /functions/func_keyword.py | UTF-8 | 399 | 3.5625 | 4 | [] | no_license | # 함수를 여러번 순서 바꿔 호출 연습(예. v3 맨 앞으로 등)
def para_func(v1, v2, v3=0):
result = 0
result = v1 + v2 + v3
return result
hap = 0
hap = para_func(10, 20)
print("매개변수가 2개인 함수를 호출한 결과 ==> {hap}")
hap1 = para_func(10, 20, 30)
print("매개변수가 3개인 함수를 호출한 결과 ==> {hap1}")
# 교수님 블로그 참조 | true |
975c68d64d28724f53649cda294367fab5b88691 | Python | EllDy96/cpac_course_2021 | /labs/lab1_tool_world/solutions/ex3_human_readable/your_code.py | UTF-8 | 994 | 3.234375 | 3 | [] | no_license | import numpy as np
TEACHER_CODE=False
def sort_songs(audio_features):
""""Receive audio features and sort them according to your criterion"
Args:
audio_features (list of dictionaries): List of songs with audio features
Returns:
list of dict: the sorted list
"""
sorted_songs=[]
# Random shuffle: replace it!
if TEACHER_CODE:
random_idxs=np.random.permutation(len(audio_features))
for idx in random_idxs:
sorted_songs.append(audio_features[idx])
else:
danceability=[]
for song in audio_features:
danceability.append(song["danceability"])
idxs_sorted = np.argsort(danceability)
ramp_up=idxs_sorted[0::2]
ramp_down=idxs_sorted[1::2]
ramp_down=ramp_down[::-1]
for idx in ramp_up:
sorted_songs.append(audio_features[idx])
for idx in ramp_down:
sorted_songs.append(audio_features[idx])
return sorted_songs | true |
68724be45881f899e10c29e44d70d8bdb8792d3e | Python | StephenNeville/BCS | /sample_code/func/square_nums_lambda.py | UTF-8 | 134 | 3.53125 | 4 | [] | no_license |
# The lambda function squares each item in the nums list.
nums = [1,2,4,5,7]
list(map((lambda item: item **2), nums))
| true |
e14ee00b3d1bc94463ec2bce4269fb4fde897b16 | Python | kalyc/project-inf-554-group-9 | /python/readConfig.py | UTF-8 | 583 | 2.546875 | 3 | [] | no_license | from collections import defaultdict
my_dict = defaultdict(set)
with open('/home/abhi/Downloads/ADMS_DATA/hw_config_2014_Jan_Feb_Mar.txt', 'r') as f:
count = 0
for line in f:
ll = [x.strip() for x in line.split('|')]
#key = (ll[6], ll[4])
highway = ll[6]
sensorID = ll[4]
my_dict[highway].add(sensorID)
#print "COUNT!!!!!", count
count += 1
#for w in sorted(my_dict, key=my_dict.get, reverse=True):
# print w, my_dict[w]
with open('outFile.csv', 'w') as f:
for x in my_dict:
s = x + "|" + str(len(my_dict[x])) + "|" + str(my_dict[x]) + "\n"
f.write(s)
| true |
6b4e1ffe0744419e0ae18d8644a7925371efc36f | Python | ayushigarg097/Gender-Classification | /GenderClassification.py | UTF-8 | 226 | 2.65625 | 3 | [] | no_license | from sklearn import tree
clf=tree.DecisionTreeClassifier()
X=[['45','56',7],['123','54','8'],['76','54','5'],['335','65','4']]
Y=['female','male','male','female']
clf=clf.fit(X,Y)
prediction=clf.predict(X,Y)
print(prediction)
| true |
6f058a539977032a0803989c694bcd08c89b2d55 | Python | via/log2json | /packetreader.py | UTF-8 | 2,799 | 2.8125 | 3 | [] | no_license | from io import FileIO
import struct
import yaml
import json
import sys
class PacketReader(FileIO):
_START = 0xAA
_STOP = 0xCC
_ESCAPE = 0xBB
def _skip_to_packet_start(self):
inchar = ord(self.read(1))
while inchar != self._START:
inchar = ord(self.read(1))
return inchar
def _parse_input(self, inchar):
if inchar == self._ESCAPE:
return ord(self.read(1)) ^ inchar
else:
return inchar
def readPacket(self):
packet = ""
try:
inchar = self._skip_to_packet_start()
while inchar != self._STOP:
inchar = ord(self.read(1))
packet += chr(self._parse_input(inchar))
except:
return None
return packet[0:-1]
def decodeHeader(self, packet):
res = {}
(flags, payload_id) = struct.unpack(">BH", packet[0:3])
res['flags'] = flags
res['id'] = payload_id
if flags & 0x4 == 0x4:
res['seq'] = struct.unpack(">B", packet[3])[0]
if flags & 0x5 == 0x5:
res['len'] = struct.unpack(">H", packet[4:6])[0]
elif flags & 0x1 == 0x1:
res['len'] = struct.unpack(">H", packet[3:5])[0]
return res
def packetData(self, packet):
flags = struct.unpack(">B", packet[0:1])[0]
start = 3
if flags & 0x1 == 0x1:
start += 2
if flags & 0x4 == 0x4:
start += 1
return packet[start:-1]
class FieldDecoder():
def __init__(self, filename):
self.yml = yaml.load(open('defaultFreeEMSMetaData.yaml','r').read())
def _structName(self, type):
if type == "UINT8":
return (1, ">B")
if type == "UINT16":
return (2, ">H")
if type == "SINT8":
return (1, ">b")
if type == "SINT16":
return (2, ">h")
if type == "BITS8":
return (1, ">B")
if type == "BITS16":
return (2, ">H")
def decodePacket(self, packet):
addr = 0
res = {}
for field in self.yml['fields']:
(len, s) = self._structName(field['type'])
val = struct.unpack(s, packet[addr:addr + len])[0]
val /= field['divBy']
addr += len
res[field['name']] = val
return res
if __name__ == "__main__":
binfile = sys.argv[1]
yamlfile = sys.argv[2]
x = PacketReader(binfile)
fields = FieldDecoder(yamlfile)
count = 0
points = []
while True:
y = x.readPacket()
if y is None:
break
if x.decodeHeader(y)['id'] == 401:
points.append(fields.decodePacket(x.packetData(y)))
print json.dumps(points)
| true |
55fe02d45fab38910cd967ffeb9dc61593b650c5 | Python | ilhamdwibakti/Python3.x_Object_Oriented_Programming | /Episode #12 - Pengenalan Inheritance/Main.py | UTF-8 | 309 | 3.34375 | 3 | [
"MIT"
] | permissive | class Hero:
def __init__(self,name,health):
self.name = name
self.health = health
class Hero_intelligent(Hero):
pass
class Hero_strength(Hero):
pass
lina = Hero('lina',100)
techies = Hero_intelligent('techies',50)
axe = Hero_strength('axe',200)
print(lina.name)
print(techies.name)
print(axe.name) | true |
89a5a090a80603cd9164e81e18f7cae1e4de70e1 | Python | MakeTheBrainHappy/Continued-Fraction-Encoders-Decoders | /Step 0 - Text Processing of the Riemann Zeta Zeroes/riemannZeroesInPython.py | UTF-8 | 578 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Takes the data from a txt file (1000 digits of the first hundred zeta zeroes) and inputs this into a python list.
"""
from decimal import *
import requests
def zetaZeroes():
getcontext().prec = 1024
a = requests.get('http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros2').text.splitlines()
b = []
for i in a:
if '.' in i:
b.append(i.strip())
elif len(b) > 0:
b[-1] += i.strip()
b = list(map(Decimal, b))
return b;
def main():
print(zetaZeroes())
main() | true |
b97fb88f5d454e69caf0ffb501937b06fe369a4b | Python | mmurnaneGitHub/DataUpdateAllPermits- | /Data_Download.py | UTF-8 | 1,718 | 2.515625 | 3 | [] | no_license | # Download the file from `url` and save it locally under `file_name`
# Tacoma Permits from CivicData (json)
# Don't download any records without a lat/long.
# Updated: 2019-8-6
# Author: mmurnane
import urllib
import logging
import os
permitsResourceId = "11fac50d-5d1c-40e6-80b5-c339bd7b827c" #Changed 2019-8-6
theFields = "%22Permit_Number%22,%22Applied_Date%22,%22Latitude%22,%22Longitude%22,%22Address_Line_1%22,%22Permit_Type_Description%22,%22Current_Status%22,%22Issued_Date%22,%22Fees_Paid%22,%22Valuation%22,%22Description%22,%22Link%22"
#ALL PERMITS - 9.5 seconds browser load time
url = 'http://www.civicdata.com/api/3/action/datastore_search_sql?sql=SELECT%20' + theFields + '%20FROM%20%22' + permitsResourceId + '%22%20where%20%22Latitude%22%20%3C%3E%27%27%20and%20%22Longitude%22%20%3C%3E%20%27%27'
#Last 30 days - 7.5 seconds
#url = 'http://www.civicdata.com/api/3/action/datastore_search_sql?sql=SELECT%20' + theFields + '%20FROM%20%22' + permitsResourceId + '%22%20where%20%22Latitude%22%20%3C%3E%27%27%20and%20%22Longitude%22%20%3C%3E%20%27%27and%20%22Applied_Date%22%20%3E%20%272019-01-29%27'
#file_name = "\\\\wsitd01dev\\c$\\GADS\\website\\PDS\\Permits\\data\\Permits.json" #DEV machine
file_name = "\\\\wsitd01\\c$\\GADS\\website\\PDS\\Permits\\data\\Permits.json" #Production machine
try:
# Download file
urllib.urlretrieve (url, file_name)
except:
logging.exception('\n Unexpected error with website, could not download file successfully: \n')
else:
if os.path.getsize(file_name)> 10000000:
print "File download successful!"
else:
print "CHECK JSON FILE FOR ERROR MESSAGE! File download successful, but file size appears too small!"
| true |
bae7f79efd3a1c309778a8fba40cc63be02dd7b8 | Python | guptapratham1012/Python-programming | /sadmin.py | UTF-8 | 1,079 | 3.515625 | 4 | [] | no_license | import csv
def write(list_):
with open('student_info.csv','a',newline='') as csv_file:
writer = csv.writer(csv_file)
if csv_file.tell()==0:
writer.writerow(['Name', 'Age', 'Contact_number', 'Email_ID'])
writer.writerow(list_)
if __name__=='__main__':
x=True
num=1
while(x):
student=input('Enter Student info for student #{} in the following format(Name, Age, Contact_number, Email_ID):'
.format(num))
list_=student.split(' ')
print('\nEntered information:\nName: {}\nAge: {}\nContact_Number: {}\nEmail_ID: {}'
.format(list_[0],list_[1],list_[2],list_[3]))
ch=input('\nIs the entered information correct?(yes/no): ')
if ch=='yes':
write(list_)
chk=input('Enter(yes/no)to continue with entering student details?: ')
if chk=='yes':
x=True
num=num+1
elif chk=='no':
x=False
elif ch=='no':
print('User re-enter the values')
| true |
8bcd5849cc4e252176944ead717be672360a7fa5 | Python | quirogaluistomas/PythonTest | /Manejos_Archivos.py | UTF-8 | 746 | 4.03125 | 4 | [] | no_license | from io import open
#Si existe lo abre y sino lo crea
#archivo_text = open("archivo.txt", "w")
#frase = "Estupendo dia para estudiar Python \n el miércoles"
#archivo_text.write(frase)
#archivo_text.close()
#Ahora ponemos en modo lectura como si el archivo ya existiera
archivo_text = open("archivo.txt", "r")
#texto = archivo_text.read()
#archivo_text.close()
#print(texto)
#Ahora guardamos en lista linea a linea
lineas_texto = archivo_text.readlines()
archivo_text.close()
print(lineas_texto)
print(lineas_texto[0])
print(lineas_texto[1])
#Ahora supongamos que quiero hacer un agregado de línea
archivo_text = open("archivo.txt", "a")
archivo_text.write("\nSiempre es buen momento para estudiar Python")
archivo_text.close() | true |
a11897fefa6688f03ad0749e892a6e022b39f51c | Python | aspineon/Facebook-Hacker-Cup-2018 | /2015 Round 2 Lazy Sort/lazy_sort.py | UTF-8 | 1,220 | 2.96875 | 3 | [] | no_license | # Written by Gerrit Schoettler, 2018
#f = open("lazy_sort_sample_input.txt", 'r')
#output = open("lazy_sort_sample_submissionTest.txt", 'w')
f = open("lazy_sort.txt", 'r')
output = open("lazy_sort_submission.txt", 'w')
input = f.readline
for cnum in range(1, int(input()) + 1):
n = int(input())
aOriginal = input().strip().split()
ans = 'no'
answer = [1, 1]
for side in range(2):
b = []
a = []
a = aOriginal * 1
b.append(int(a.pop(-side)))
A = b[0]
B = b[0]
for i in range(n-1):
#print('i = ', i,'a = ', a,'A = ', A, 'B = ', B)
if int(a[0]) == A + 1:
A = A +1
a.pop(0)
elif int(a[-1]) == A + 1:
A = A+1
a.pop(-1)
elif int(a[0]) == B-1:
B=B-1
a.pop(0)
elif int(a[-1])==B-1:
B=B-1
a.pop(-1)
else:
answer[side] = 0
break
if 1 in answer:
ans = 'yes'
print("Case #%d: %s" % (cnum, ans), file=output)
f.close()
output.close()
print('Input file closed:', f.closed)
print('Output file closed:', output.closed)
| true |
7b8fac0e9f883edd904e791387d80441b1383cab | Python | lliradev/Programacion-con-Python | /2. Conceptos avanzados POO/DemoTopLevelWindow.py | UTF-8 | 970 | 3.40625 | 3 | [] | no_license | # DemoTopLevelWindow.py
from tkinter import *
root = Tk()
root.title("Top Level Window")
root.geometry("300x300")
Label(root, text = "I am the main TopLevelWindow\n All other windows here are my children").pack()
child_toplevel = Toplevel(root)
Label(child_toplevel, text = "I am a child of root\n If i loose focus, I may hide below the top level\nI am destroyed if root is destroy").pack()
child_toplevel.geometry("400x100+300+300")
transient_toplevel = Toplevel(root)
Label(transient_toplevel, text = "I am transient window of root \n I always stay on top of my parent \n I get hidden if my parent window is minimize").pack()
transient_toplevel.transient(root)
no_window_decoration = Toplevel(root, bg = "black")
Label(no_window_decoration, text = "I am a toplevel with no window manager\n I cannot be resized or moved", bg = "black", fg = "white").pack()
no_window_decoration.overrideredirect(1)
no_window_decoration.geometry("250x100+700+500")
| true |
bbd7cdf3fb5c2664e861e1edd6f757868e1fb3a5 | Python | Keith-Maxwell/TWT-Challenges | /Weekly_02_ZipCode/ZipCodeValidation.py | UTF-8 | 507 | 3.171875 | 3 | [] | no_license | def validate(code):
code = str(code)
if not code.isdigit() or len(code) != 5:
return False
else:
for i in range(len(code)):
for j in range(i+1, len(code)):
if code[i] == code[j] and abs(i-j) <= 2:
return False
return True
print(validate("24325")) # True
print(validate("hello")) # False
print(validate("22345")) # False
print(validate("23245")) # False
print(validate("23425")) # True
print(validate(12345)) # True
| true |