blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7d98bbed7c05ae1cedfbf6de49866aa4c3d9ac84 | Python | keelimeguy/Twitter-Public-Analysis | /tests/dozent/test_downloader_tools.py | UTF-8 | 1,802 | 2.671875 | 3 | [] | no_license | import unittest
from dozent.downloader_tools import DownloaderTools
from pySmartDL import SmartDL
from pySmartDL.control_thread import ControlThread
from collections import namedtuple
DownloadProgress = namedtuple('DownloadProgress', 'dl_size filesize speed')
class DownloaderToolsTestCase(unittest.TestCase):
def test_make_progress_status(self):
url = 'https://www.google.com/'
# We want to initialize the SmartDL object without actually starting the download
downloader_obj = SmartDL(url)
control_thread = ControlThread(downloader_obj)
downloader_obj.control_thread = control_thread
for progress in [
DownloadProgress(dl_size=0, filesize=0, speed=0),
DownloadProgress(dl_size=1024, filesize=1048576, speed=42),
DownloadProgress(dl_size=129864, filesize=129865, speed=777),
DownloadProgress(dl_size=999999, filesize=999999, speed=999),
]:
assert(progress.speed < 1000)
# We create faked download progress to test the output
downloader_obj.shared_var.value = progress.dl_size << 20
downloader_obj.filesize = progress.filesize << 20
control_thread.dl_speed = progress.speed
progress_percentage = int(100 * progress.dl_size / progress.filesize) if progress.filesize else 0
expected_output = f"\r {url} [ready] {progress.dl_size} Mb / {progress.filesize} Mb " \
f"@ {progress.speed} {'B' if progress.speed else 'bytes'}/s " \
f"[{'#' if progress_percentage == 100 else '-'}] [{progress_percentage}%, 0 seconds left]"
self.assertEqual(expected_output, DownloaderTools._make_progress_status(downloader_obj, 3))
if __name__ == "__main__":
unittest.main()
| true |
adc66838ad45635cb5d5602e364ccf59010c45df | Python | JunYang1215/arithmetic | /sorts/bucket_sort.py | UTF-8 | 1,103 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
def bucket_sort(my_list: list) -> list:
"""
>>> data = [-1, 2, -5, 0]
>>> bucket_sort(data) == sorted(data)
True
>>> data = [9, 8, 7, 6, -12]
>>> bucket_sort(data) == sorted(data)
True
>>> data = [.4, 1.2, .1, .2, -.9]
>>> bucket_sort(data) == sorted(data)
True
>>> bucket_sort([]) == sorted([])
True
>>> import random
>>> collection = random.sample(range(-50, 50), 50)
>>> bucket_sort(collection) == sorted(collection)
True
"""
if len(my_list) == 0:
return []
min_value, max_value = min(my_list), max(my_list)
bucket_count = int(max_value - min_value) + 1
buckets = [[] for _ in range(bucket_count)]
for i in range(len(my_list)):
buckets[(int(my_list[i] - min_value) // bucket_count)].append(my_list[i])
return [v for bucket in buckets for v in sorted(bucket)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| true |
201703f8ad0ecff8826c4968cc26b624f6782b56 | Python | bopopescu/welcomerain | /welcome_rain/common/testfile.py | UTF-8 | 1,171 | 2.953125 | 3 | [] | no_license | import datetime
import time
import math
def convertStringToDateTime(str):
print datetime.datetime.now().strftime('%Y:%m:%d %H:%M:%S')
d = datetime.datetime.strptime(str+":00", "%Y:%m:%d:%H:%M:%S")
return int(time.mktime(d.timetuple()))
def getPercent(max,value):
percent = (float(value)/float(max))*100
return round(percent)
def getXAxisIndex(target_time,time_min,time_max,time_count):
if target_time>time_max:
return -1
if target_time<time_min:
return -1
time_diff = time_max - time_min
if time_diff==0:
return -1
time_unit = math.trunc(time_diff/time_count)
time_index = round((target_time-time_min)/time_unit)
return time_diff,time_unit,time_index
startTime = "2012:10:14:16:24"
endTime = "2012:10:14:17:53"
startTime2 = convertStringToDateTime(startTime)
endTime2 = convertStringToDateTime(endTime)
#print "start="+str(startTime2), " , end=",endTime2
temp = 286
max = 3000
#print getPercent(3000,286)
#print getXAxisIndex(1348444800,1348444100,1348445800,10)
print "date=",datetime.datetime.fromtimestamp(1350523473)
#UTCDateTime(1240561632)
| true |
d3e490090fa17bd951967a1a7018b9d923186d24 | Python | stepanplaunov/BMSTU-2 | /Numerical methods of linear algebra/src/householder.py | UTF-8 | 1,977 | 2.515625 | 3 | [] | no_license | import numpy as np
import math
def householder_only_r(A, B):
AB = np.vstack([A, B]).transpose()
m = AB.shape[0]
n = AB.shape[1]
for i in range(0, m-1):
Ai = AB[i:n, i:n]
ai = Ai[:, 0]
ai_norm = np.linalg.norm(ai)
di = - ai_norm if Ai[0, 0] > 0 else ai_norm
wi = Ai[0, 0] - di
fi = math.sqrt(-2 * wi * di)
h_ai = np.zeros(np.shape(ai)[0])
h_ai[0] = di
v = np.copy(ai)
v[0] = wi
v = v / fi
Ai[:, 0] = h_ai
for j in range(1, n-i):
aj = Ai[:, j]
fj = 2 * v.transpose().dot(aj)
h_aj = aj - fj * v
Ai[:, j] = h_aj
return AB[0:m, 0:n-1], AB[0:, n-1:]
def householder_without_q(A):
def sign(elem):
return -1 if elem < 0 else 1
m, n = np.shape(A)
W = np.zeros((m, n))
R = A.copy()
for k in range(m-1):
x = R[k:m, k]
e = np.zeros(len(x))
e[0] = 1
alpha = sign(x[0]) * np.linalg.norm(x, 2)
u = x - alpha * e
v = u / np.linalg.norm(u, 2)
R[k:m, k:n] = R[k:m, k:n] - 2 * np.outer(v, np.dot(v.transpose(), R[k:m, k:n]))
W[k:m, k] = v
return W, R
def form_q(W):
m, n = np.shape(W)
Q = np.identity(m)
for i in range(m):
for k in range(n-1, -1, -1):
Q[k:m, i] = Q[k:m, i]-2*np.dot(np.outer(W[k:m, k], W[k:m, k]), Q[k:m, i])
return Q
def valid(Q, R):
return np.dot(Q, R)
def householder(A):
def sign(x):
return -1 if x < 0 else 1
m, n = A.shape
R = np.array(A).copy()
P = np.eye(m)
E = np.eye(m)
Q = None
for i in range(m-1):
u = R[i:, i]-sign(R[i:, i][0])*np.linalg.norm(R[i:, i])*E[i:, i]
u = np.array([u])
u = u.T
H = np.eye(m)
H[i:, i:] = np.eye(m-i)-2*np.dot(u, u.T)/(np.linalg.norm(u)*np.linalg.norm(u.T))
R = np.dot(H, R)
P = np.dot(H, P)
Q = P.T
return Q, R
| true |
91be1db6be445856179b1ef669b7430327adce27 | Python | Factumpro/HackerRank | /Python/Tutorials/30 Days of Code/Day_09.py | UTF-8 | 406 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python3
import os
import re
import sys
import math
import random
import operator
import functools as fun
def factorial(Num):
if Num == 1:
return Num
else:
return Num*factorial(Num-1)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
Num = int(input())
result = factorial(Num)
fptr.write(str(result) + '\n')
fptr.close()
| true |
a284d82eebb8d8fa3e4410064f6867b13d633525 | Python | kilohsakul/Noodler | /noodler/parser.py | UTF-8 | 12,419 | 2.65625 | 3 | [] | no_license | """
Parse smtlib (smt2) files with string constraints.
Relies on parser in Z3 and converts assertions into queries.
Classes:
--------
SmtlibParser
Parses smt2 files and creates corresponding queries.
The current implementation often fails due to Awali's
limited support of rich alphabets. If Awali is changed
for another library, this should work well (with corresponding
adjustments).
SmtlibParserHackAbc
Adjust SmtlibParser with translations of characters that
are not compatible with Awali (see ``translate_for_awali``)
into ASCI-control sequences.
"""
import itertools
from typing import Callable, Collection, Optional, Union
from .core import StringEquation, REConstraints, RE
from .sequery import MultiSEQuery
import awalipy
import z3
def awalipy_ratexp_plus(re: RE):
"""
A wrapper that mimics the + operater in awalipy
Parameters
----------
re: RE
Returns
-------
RE
(re)+ = (re)(re)*
"""
return awalipy.RatExp.mult(re, re.star())
# Symbol used to represent characters not included in alphabet of Awali REs
NONSPEC_SYMBOL = u"\x1A"
def translate_for_awali(string):
# TODO \xff is in fact \xfffd in Z3.
if string == "":
return "\e"
if "\xfffd" in string:
string = string.replace("\xfffd", "\ufffd")
tokens = {
" ": "\x19",
"<": "\x18",
">": "\x17",
"?": "\x16",
"\n": "\x15",
")": "\x14",
"(": "\x13",
"{": "\x12",
"}": "\x11",
"*": "\x10",
".": "\x09",
"\\": "\x08",
"\ufffd": "\x07",
}
return string.translate(str.maketrans(tokens))
def awalipy_allchar(alphabet: str) -> RE:
"""
Create awalipy RE for Σ given as a string of characters.
Parameters
----------
alphabet: str
Returns
-------
RE for a+b+c+...
"""
all_str = '+'.join(alphabet)
return awalipy.RatExp(all_str, alphabet=alphabet)
OPERATORS_Z3_TO_AWALIPY = {
z3.Z3_OP_RE_PLUS : awalipy_ratexp_plus,
z3.Z3_OP_RE_STAR : awalipy.RatExp.star,
# Z3_OP_RE_OPTION : awalipy.RatExp.,
z3.Z3_OP_RE_CONCAT : awalipy.RatExp.mult,
z3.Z3_OP_RE_UNION : awalipy.RatExp.add,
# Z3_OP_RE_RANGE : awalipy.RatExp.,
# Z3_OP_RE_LOOP : awalipy.RatExp.,
# Z3_OP_RE_INTERSECT : awalipy.RatExp.,
# Z3_OP_RE_EMPTY_SET : awalipy.RatExp.,
# Z3_OP_RE_FULL_SET : awalipy.RatExp.,
# Z3_OP_RE_COMPLEMENT : awalipy.RatExp.,
}
def is_string_constant(ref: z3.SeqRef) -> bool:
return ref.is_string_value()
def is_string_variable(ref: z3.SeqRef) -> bool:
return ref.is_string() and ref.decl().kind() == z3.Z3_OP_UNINTERPRETED
def is_equation(ref):
return ref.decl().kind() == z3.Z3_OP_EQ
def is_inre(ref):
return ref.decl().kind() == z3.Z3_OP_SEQ_IN_RE
def is_assignment(ref: z3.BoolRef) -> bool:
"""
Detect assignment.
Assignment is an equation of the form `var = str_const`.
Parameters
----------
ref: z3 reference
Returns
-------
True if `ref` is an assignment.
"""
if ref.decl().kind() != z3.Z3_OP_EQ:
return False
left, right = ref.children()
return is_string_variable(left) and is_string_constant(right)
class SmtlibParser:
"""
Convert `.smt2` files into Queries.
"""
def __init__(self, filename: str):
self.filename: str = filename
self.assertions: z3.z3.AstVector = z3.parse_smt2_file(filename)
self.alphabet: Collection[str] = set()
self.variables: Collection[str] = set()
self.constraints: REConstraints = dict()
self.equations: Collection[StringEquation] = []
# Gather alphabet
for ref in self.assertions:
self._gather_symbols(ref)
self.alphabet_str: str = "".join(self.alphabet) + NONSPEC_SYMBOL
# Fresh variables
self.next_variable_id = 0
def fresh_variable(self) -> str:
"""
Introduce and return (name of) a fresh variable.
Creates a new variable, adds it into `variables`, and
ensures that the same name will not be used by subsequent
calls.
Returns
-------
Name of a fresh variable.
"""
prefix = 'noodler_var_'
self.next_variable_id += 1
new_var = f'{prefix}{self.next_variable_id-1}'
self.variables.add(new_var)
return new_var
def _gather_symbols(self,
ref: Union[z3.ReRef, z3.SeqRef, z3.BoolRef]):
"""
Detect string variables and RE-alphabet used in a z3 reference.
Parameters
----------
ref : z3.ReRef or z3.SeqRef or BoolRef
z3 reference to regular expression, string,
equation, or RE-query
"""
# Strings (can appear from equations)
if ref.sort().kind() == z3.Z3_SEQ_SORT:
if is_string_constant(ref):
self._extract_letters(ref)
return
elif is_string_variable(ref):
self.variables.add(ref.as_string())
return
# Regular expressions or equation or re-query
for child in ref.children():
self._gather_symbols(child)
def _extract_letters(self, ref: z3.SeqRef) -> None:
"""
Update alphabet with letters given by z3.SeqRef.
Parameters
----------
ref: z3.SeqRef
"""
self.alphabet.update(set(ref.as_string()))
def z3_re_to_awali(self, ref: z3.ReRef) -> RE:
"""
Convert z3 regular expression(RE) to Awalipy RE.
Parameters
----------
ref : z3.ReRef
reference to RE
Returns
-------
RE
Awalipy representation of ref
"""
z3_operator = ref.decl().kind()
alphabet = self.alphabet_str
# Basic blocks (string constants)
if z3_operator == z3.Z3_OP_SEQ_TO_RE:
string = ref.children()[0].as_string()
return self.create_awali_re(string)
# Allchar
if ref.decl().name() == 're.allchar':
return awalipy_allchar(alphabet)
# Otherwise recursively convert children and glue them
# together using appropriate operator
#
# 1. dict z3.operator -> awalipy operator
# 2. convert children
# 3. apply awalipy operator and return
# 1. get awalipy operator
if z3_operator not in OPERATORS_Z3_TO_AWALIPY:
name = ref.decl().name()
raise NotImplementedError(f"Z3 operator {z3_operator} ({name}) is "
f"not implemented yet!")
awalipy_op: Callable = OPERATORS_Z3_TO_AWALIPY[z3_operator]
# 2. convert children
child_re = [self.z3_re_to_awali(child) for child in ref.children()]
# 3. apply awalipy operator
return awalipy_op(*child_re)
def create_awali_re(self, string):
"""
Create Awalipy RatExp recognizing `string`.
Parameters
----------
string: str
string term to be converted to Awali RE.
Returns
-------
RE
Awalipy representation of RE string.
"""
return awalipy.RatExp(string, alphabet=self.alphabet_str)
def parse_equation(self, ref: z3.BoolRef) -> StringEquation:
left, right = ref.children()
# TODO This restricts only to assignment-form of equations (like SSA-fragment)
assert is_string_variable(left)
assert right.sort_kind() == z3.Z3_SEQ_SORT
res_left = [left.as_string()]
def z3_concat_to_var_list(z3_ref: z3.SeqRef) -> Collection[str]:
"""
Convert concatenation of string variables into list of vars.
Parameters
----------
z3_ref
Returns
-------
List of variables from Z3_ref
"""
if is_string_variable(z3_ref):
return [z3_ref.as_string()]
children = [z3_concat_to_var_list(child) for child in z3_ref.children()]
return itertools.chain(*children)
res_right = z3_concat_to_var_list(right)
return StringEquation(res_left, list(res_right))
def parse_re_constraint(self, ref: z3.BoolRef) -> REConstraints:
"""
Translate one regular constraint into REConstraints dict.
The reference should point to a Z3_OP_SEQ_IN_RE operator.
Parameters
----------
ref: z3.BoolRef to a in_re operator to translate
Returns
-------
REConstraint
Mapping `var -> RE`
"""
assert is_inre(ref)
left, right = ref.children()
assert is_string_variable(left) and left.as_string() in self.variables
return {left.as_string(): self.z3_re_to_awali(right)}
def process_assignment(self, ref: z3.BoolRef) -> None:
"""
Create a RE constraint or a fresh equation for literal assignment.
Assignment is `var = str_cons`.
If there is no RE constraint for `var`, we create one of the form
`var → RE(str_cons)`. Otherwise we introduce a fresh variable (`x`)
and create a new equation `var = x`, and introduce a constraint
`x → RE(str_cons)`.
Parameters
----------
ref: z3.BoolRef of the form `var = string_const`
Returns
-------
None
"""
assert is_assignment(ref)
var, const = (c.as_string() for c in ref.children())
const_re: RE = self.create_awali_re(const)
if var not in self.constraints:
self.constraints[var] = const_re
else:
# Introduce a fresh variable and a new equation
new_var = self.fresh_variable()
self.constraints[new_var] = const_re
self.equations.append(StringEquation([var],[new_var]))
def parse_query(self) -> MultiSEQuery:
# TODO might be or of equations
for ref in self.assertions:
if is_inre(ref):
res = self.parse_re_constraint(ref)
# Assert that the variable does not have a constraint yet.
# TODO: Two constraints for one variable would represent intersection of the two.
assert res.keys().isdisjoint(self.constraints)
self.constraints.update(res)
# We need first all in_re constraints before processing assignments
# for cases where we have both RE-constraint for `x` and an assignment
# for `x`. This can be used to check for example whether "abc" ∈ L(RE).
for ref in self.assertions:
if is_inre(ref):
continue
if is_equation(ref) and not is_assignment(ref):
equation = self.parse_equation(ref)
self.equations.append(equation)
elif is_assignment(ref):
# Assignments are only stored for later processing
self.process_assignment(ref)
# The rest should be `or`
else:
# assert ref.decl().kind() == z3.Z3_OP_OR
z3_operator, name = ref.decl().kind(), ref.decl().name()
raise NotImplementedError(f"Z3 operator {z3_operator} ({name}) is "
f"not implemented yet!")
sigma_star: RE = awalipy_allchar(self.alphabet_str).star()
for var in self.variables:
self.constraints.setdefault(var, sigma_star)
return MultiSEQuery(self.equations, self.constraints)
class SmtlibParserHackAbc(SmtlibParser):
"""
Extend `SmtlibParser` with encoding of `<`,`>`, ` `, and
other problematic characters using fresh ASCI symbols.
This is ensured mainly by calls to ``translate_for_awali``
"""
def __init__(self, filename: str):
super(SmtlibParserHackAbc, self).__init__(filename)
self.alphabet_str = translate_for_awali(self.alphabet_str)
def create_awali_re(self, string):
string = translate_for_awali(string)
return super().create_awali_re(string)
def _extract_letters(self, ref: z3.SeqRef) -> None:
orig_string = ref.as_string()
fixed_string = translate_for_awali(orig_string)
self.alphabet.update(set(fixed_string)) | true |
ed3c19cf715d2e0e0d53d63cf5e910d818203b2f | Python | fatalinha/b7 | /pp.py | UTF-8 | 1,495 | 2.546875 | 3 | [] | no_license | try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
import re
from os.path import isfile, join
def get_logps(orig, mix):
"""Find sentences for which the mixed LM does (much) better
pp is actually logbase10 probability"""
counter = 0
mlines=list()
with open(orig, 'r') as o, open(mix, 'r') as m:
for lo, lm in zip(o,m):
counter +=1
ppo = lo.split(':')[1].split(' ')[1]
ppm = lm.split(':')[1].split(' ')[1]
print('Line ' + str(counter) + ':' + ppo, ppm)
if ppm>ppo and abs(float(ppm))-abs(float(ppo))>abs(4):
mlines.append(counter)#, lo, lm))
if counter==141901:#spanish141901:de 167040
break
print(len(mlines))
return mlines
def get_sents(mlines, file_s):
"""Find the above sentences in source and target"""
with open(file_s, 'r') as s:
for i, line in enumerate(s, 1):
#print(i, line.rstrip())
if i in mlines:
print(line.rstrip())
outputs = '/home/alina/pCloudDrive/UniSaarland/01_Projects/B7/02_data/bilm/output_pos'
orig = join(outputs, 'epuds.orig.transall.es.pos.pp')
mix = join(outputs, 'epuds.mix1.transall.pos.es.pp')#epuds.orig1.transall.pos.en.pp')
#mix = 'eno-eso.pp'orig.4.pp
mlines = get_logps(orig, mix)
get_sents(mlines, '/home/alina/pCloudDrive/UniSaarland/01_Projects/B7/02_data/bilm/DNU/epuds.en-es.es') #de/epuds.en-de.de.train') | true |
53df3c801e6259913c85e1764aa93152d95452ca | Python | krantirk/Algorithms-and-code-for-competitive-programming. | /online_judges/Codeforces/314-pi/A/code.py | UTF-8 | 284 | 2.78125 | 3 | [] | no_license | n = int(raw_input())
l = map(long,raw_input().split())
first = l[0]
last = l[n-1]
print abs(l[1] - l[0]), abs(last - first)
for i in xrange(1,n-1):
print min(abs(l[i+1]-l[i]),abs(l[i]-l[i-1])), max(abs(l[i]-first),abs(last-l[i]))
print abs(last - l[n-2]), abs(last - first)
| true |
91dd1140aef3e9335c766a19c847a39ad6671e93 | Python | smararu/BIOL60201GroupProject | /3.massspec/outputFiles/task4_abi.py | UTF-8 | 2,072 | 3.328125 | 3 | [] | no_license | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', type=str, default='dummy.pepmasses',
help='Identify the path/location of your file containing peptide masses')
parser.add_argument('-a', '--analysis', type=str, default='range',
help='Choose your analysis: "full" to analyze whole m/z values, or "range" to define a range of values')
parser.add_argument('-r', '--range', type=int, default=(1000, 1500),
help='Choose your range of values min, max')
parser.add_argument('-o','--output', type=str,
help='Designate the name of your output file', default='output')
args=parser.parse_args()
def read_masses(filename):
filename = open(args.filename, 'r')
lines = filename.readlines()
masses = [] # create a list for the masses
for line in lines: # for each line in the file, split the line
(name,number,mass_to_charge,z,p,enzyme,prot_sequence)=line.split() # objects from list as strings (name,number,mass_to_charge,z,p,enzyme,prot_sequence)=line.split()
mzf=float(mass_to_charge)
masses.append(mzf) # add the mass to a list
return masses
masses=read_masses(args.filename)
def analysis(analysis, range):
if args.analysis=='range': # if user choose range analysis
list = args.range
min = list[0] #can I assign type as int?
max = list[1] #can I assign type as int?
# create initial numeber of peptides and sum of mass to calculate mean mass in range
number_pep=0
total_mass = 0
#calculate mean
for mzf in masses:
if mzf > min and mzf < max: #mz values within the range given by user
number_pep = number_pep + 1 #count of events on the given range with IF statement
total_mass += mzf
meanmz_inrange = total_mass / number_pep
result = f'Range {list} has {number_pep} peptides and mean m/z value {meanmz_inrange}'
return result
result=analysis(args.analysis, args.range)
output=open(f'{args.output}','w')
print(result, file=output)
| true |
f94155a5911cbdaa4a27c106163522244d27d176 | Python | akuchlous/leetcode | /atoi.py | UTF-8 | 1,497 | 3.03125 | 3 | [] | no_license | import pdb
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.lstrip(" ")
if (str == ""): return 0
neg = 1
# if (debug):
# pdb.set_trace()
while True:
if (str == ""): return 0
if (str[0] == "-" or str[0] == "+"):
neg *= int(str[0]+"1")
str = str[1:]
break
else:
break
if (str == "") : return 0
if (str[0]<"0" or str[0]>"9"):
return 0
index = 0
for i in range(0,len(str)):
if (str[i]<"0" or str[i]>"9"):
break
index +=1
val = int(str[:index])
if (2147483648 <= val):
if (neg == -1):
return -2147483648
else:
return 2147483647
val*= neg
return val
s = Solution()
print s.myAtoi("12121")
print s.myAtoi("-12121")
print s.myAtoi("+12121")
print s.myAtoi(" +12121")
print s.myAtoi(" -12121")
print s.myAtoi("v -12121")
print s.myAtoi("v12121")
print s.myAtoi("12b121")
print s.myAtoi("work 2")
print s.myAtoi("wo k 2")
print s.myAtoi("")
print s.myAtoi("+")
print s.myAtoi("-")
print s.myAtoi("---")
print s.myAtoi("+ ")
print s.myAtoi(" ")
print s.myAtoi("-91283472332")
print s.myAtoi("-+2")
print s.myAtoi("--2")
print s.myAtoi("+-2")
| true |
521cda54fda9aade16fea8c3bfa5887cd0095dc5 | Python | crosstuck/bett | /model/preprocess.py | UTF-8 | 11,423 | 3.109375 | 3 | [] | no_license | import numpy as np
import pandas as pd
from datetime import datetime as dt
import itertools
%matplotlib inline
loc = "data/"
raw_data_all = pd.read_csv(loc + 'results.csv', sep=";")
def parse_date(date):
if date == '':
return None
else:
return dt.strptime(date, '%d/%m/%Y').date()
raw_data_all.Date = raw_data_all.Date.apply(parse_date)
# sorts by date, oldest first
raw_data_all = raw_data_all.sort_values(by='Date')
# Splits seasons to individual DataFrames and assign them to array
season_names = raw_data_all.Season.unique()
n_seasons = raw_data_all.Season.unique().shape[0]
raw_data = []
for i in range(n_seasons):
raw_data.append(raw_data_all.loc[raw_data_all['Season'] == season_names[i]])
# Gets all the statistics related to gameplay and assigns to variable split by season
columns_req = ['Season', 'Date', 'HomeTeam', 'AwayTeam',
'FTHG', 'FTAG', 'FTR', 'Odds_1', 'Odds_x', 'Odds_2']
data_frames = []
for frame in range(n_seasons):
data_frames.append(raw_data[frame][columns_req])
playing_statistics_1 = raw_data[1][columns_req] # ERASE
# Sets variables for TEAMS, GAMES and ROUNDS
def get_rounds(playing_stat):
GAMES = playing_stat.shape[0]
TEAMS = playing_stat.HomeTeam.unique().shape[0]
ROUNDS = int(GAMES * 2 / TEAMS)
return TEAMS, ROUNDS, GAMES
# -------------- #
# GOALS SCORED AND CONCEDED AT THE END OF MATCHWEEK, ARRANGED BY TEAMS AND MATCHWEEK ** #
# Gets the goals scored agg arranged by teams and matchweek
def get_goals_scored(playing_stat, ROUNDS):
# Create a dictionary with team names as keys
teams = {}
for i in playing_stat.groupby('HomeTeam').mean().T.columns:
teams[i] = []
# the value corresponding to keys is a list containing the match location.
for i in range(len(playing_stat)):
HTGS = playing_stat.iloc[i]['FTHG']
ATGS = playing_stat.iloc[i]['FTAG']
teams[playing_stat.iloc[i].HomeTeam].append(HTGS)
teams[playing_stat.iloc[i].AwayTeam].append(ATGS)
# Create a dataframe for goals scored where rows are teams and cols are matchweek.
GoalsScored = pd.DataFrame(data=teams, index=[i for i in range(1, ROUNDS + 1)]).T
GoalsScored[0] = 0
# Aggregate to get uptil that point
for i in range(2, ROUNDS + 1):
GoalsScored[i] = GoalsScored[i] + GoalsScored[i - 1]
return GoalsScored
# Gets the goals conceded agg arranged by teams and matchweek
def get_goals_conceded(playing_stat, ROUNDS):
# Create a dictionary with team names as keys
teams = {}
for i in playing_stat.groupby('HomeTeam').mean().T.columns:
teams[i] = []
# the value corresponding to keys is a list containing the match location.
for i in range(len(playing_stat)):
ATGC = playing_stat.iloc[i]['FTHG']
HTGC = playing_stat.iloc[i]['FTAG']
teams[playing_stat.iloc[i].HomeTeam].append(HTGC)
teams[playing_stat.iloc[i].AwayTeam].append(ATGC)
# Create a dataframe for goals scored where rows are teams and cols are matchweek.
GoalsConceded = pd.DataFrame(data=teams, index=[i for i in range(1, ROUNDS + 1)]).T
GoalsConceded[0] = 0
# Aggregate to get uptil that point
for i in range(2, ROUNDS + 1):
GoalsConceded[i] = GoalsConceded[i] + GoalsConceded[i - 1]
return GoalsConceded
def get_gss(playing_stat):
TEAMS, ROUNDS, GAMES = get_rounds(playing_stat)
GC = get_goals_conceded(playing_stat, ROUNDS)
GS = get_goals_scored(playing_stat, ROUNDS)
j = 0
HTGS = []
ATGS = []
HTGC = []
ATGC = []
for i in range(GAMES):
ht = playing_stat.iloc[i].HomeTeam
at = playing_stat.iloc[i].AwayTeam
HTGS.append(GS.loc[ht][j])
ATGS.append(GS.loc[at][j])
HTGC.append(GC.loc[ht][j])
ATGC.append(GC.loc[at][j])
if ((i + 1) % 10) == 0:
j = j + 1
playing_stat['HTGS'] = HTGS
playing_stat['ATGS'] = ATGS
playing_stat['HTGC'] = HTGC
playing_stat['ATGC'] = ATGC
return playing_stat
# Apply to each dataset
playing_statistics_1 = get_gss(playing_statistics_1) # ERASE
for frame in range(n_seasons):
data_frames[frame] = get_gss(data_frames[frame])
# GET RESPECTIVE POINTS:
def get_points(result):
if result == 'W':
return 2
elif result == 'D':
return 1
else:
return 0
def get_cuml_points(matchres, ROUNDS, TEAMS):
matchres_points = matchres.applymap(get_points)
for i in range(2, ROUNDS + 1):
matchres_points[i] = matchres_points[i] + matchres_points[i-1]
matchres_points.insert(column=0, loc=0, value=[0*i for i in range(TEAMS)])
return matchres_points
def get_matchres(playing_stat, ROUNDS):
# Create a dictionary with team names as keys
teams = {}
for i in playing_stat.groupby('HomeTeam').mean().T.columns:
teams[i] = []
# the value corresponding to keys is a list containing the match result
for i in range(len(playing_stat)):
if playing_stat.iloc[i].FTR == 'H':
teams[playing_stat.iloc[i].HomeTeam].append('W')
teams[playing_stat.iloc[i].AwayTeam].append('L')
elif playing_stat.iloc[i].FTR == 'A':
teams[playing_stat.iloc[i].AwayTeam].append('W')
teams[playing_stat.iloc[i].HomeTeam].append('L')
else:
teams[playing_stat.iloc[i].AwayTeam].append('D')
teams[playing_stat.iloc[i].HomeTeam].append('D')
return pd.DataFrame(data=teams, index=[i for i in range(1, ROUNDS + 1)]).T
def get_agg_points(playing_stat):
TEAMS, ROUNDS, GAMES = get_rounds(playing_stat)
matchres = get_matchres(playing_stat, ROUNDS)
cum_pts = get_cuml_points(matchres, ROUNDS, TEAMS)
HTP = []
ATP = []
j = 0
for i in range(GAMES):
ht = playing_stat.iloc[i].HomeTeam
at = playing_stat.iloc[i].AwayTeam
HTP.append(cum_pts.loc[ht][j])
ATP.append(cum_pts.loc[at][j])
if ((i + 1) % 10) == 0:
j = j + 1
playing_stat['HTP'] = HTP
playing_stat['ATP'] = ATP
return playing_stat
# Apply to each dataset
for frame in range(n_seasons):
data_frames[frame] = get_agg_points(data_frames[frame])
playing_statistics_1 = get_agg_points(playing_statistics_1) # ERASE
# GET TEAM FORM:
def get_form(playing_stat, ROUNDS, num):
form = get_matchres(playing_stat, ROUNDS)
form_final = form.copy()
for i in range(num, ROUNDS+1):
form_final[i] = ''
j = 0
while j < num:
form_final[i] += form[i-j]
j += 1
return form_final
def add_form(playing_stat, GAMES, ROUNDS, num):
form = get_form(playing_stat, ROUNDS, num)
h = ['M' for i in range(num * 10)] # since form is not available for n MW (n*10)
a = ['M' for i in range(num * 10)]
j = num
for i in range((num*10), GAMES):
ht = playing_stat.iloc[i].HomeTeam
at = playing_stat.iloc[i].AwayTeam
past = form.loc[ht][j] # get past n results
h.append(past[num-1]) # 0 index is most recent
past = form.loc[at][j] # get past n results.
a.append(past[num-1]) # 0 index is most recent
if ((i + 1) % 10) == 0:
j = j + 1
playing_stat['HM' + str(num)] = h
playing_stat['AM' + str(num)] = a
return playing_stat
def add_form_df(playing_stat):
TEAMS, ROUNDS, GAMES = get_rounds(playing_stat)
playing_stat = add_form(playing_stat, GAMES, ROUNDS, 1)
playing_stat = add_form(playing_stat, GAMES, ROUNDS, 2)
playing_stat = add_form(playing_stat, GAMES, ROUNDS, 3)
playing_stat = add_form(playing_stat, GAMES, ROUNDS, 4)
playing_stat = add_form(playing_stat, GAMES, ROUNDS, 5)
return playing_stat
# Apply to each dataset
for frame in range(n_seasons):
data_frames[frame] = add_form_df(data_frames[frame])
playing_statistics_1 = add_form_df(playing_statistics_1) # ERASE
# REARRANGING COLUMNS
cols = ['Season', 'Date', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'HTGS', 'ATGS', 'HTGC', 'ATGC', 'HTP', 'ATP', 'HM1', 'HM2', 'HM3',
'HM4', 'HM5', 'AM1', 'AM2', 'AM3', 'AM4', 'AM5', 'Odds_1', 'Odds_x', 'Odds_2']
# Apply to each dataset
for frame in range(n_seasons):
data_frames[frame] = data_frames[frame][cols]
playing_statistics_1 = playing_statistics_1[cols] # ERASE
# Get matchweek
def get_mw(playing_stat):
TEAMS, ROUNDS, GAMES = get_rounds(playing_stat)
j = 1
MatchWeek = []
for i in range(GAMES):
MatchWeek.append(j)
if ((i + 1) % 10) == 0:
j = j + 1
playing_stat['MW'] = MatchWeek
return playing_stat
# apply to all DataFrames
playing_statistics_1 = get_mw(playing_statistics_1) # ERASE
for frame in range(n_seasons):
data_frames[frame] = get_mw(data_frames[frame])
# CONCATENATE DATAFRAMES
playing_stat = pd.concat(data_frames, ignore_index=True)
# SOME PREPROCESS
# Gets the form points.
def get_form_points(string):
sum = 0
for letter in string:
sum += get_points(letter)
return sum
playing_stat['HTFormPtsStr'] = playing_stat['HM1'] + playing_stat['HM2'] + \
playing_stat['HM3'] + playing_stat['HM4'] + playing_stat['HM5']
playing_stat['ATFormPtsStr'] = playing_stat['AM1'] + playing_stat['AM2'] + \
playing_stat['AM3'] + playing_stat['AM4'] + playing_stat['AM5']
playing_stat['HTFormPts'] = playing_stat['HTFormPtsStr'].apply(get_form_points)
playing_stat['ATFormPts'] = playing_stat['ATFormPtsStr'].apply(get_form_points)
# Identify Win/Loss Streaks if any.
def get_3game_ws(string):
if string[-3:] == 'WWW':
return 1
else:
return 0
def get_5game_ws(string):
if string == 'WWWWW':
return 1
else:
return 0
def get_3game_ls(string):
if string[-3:] == 'LLL':
return 1
else:
return 0
def get_5game_ls(string):
if string == 'LLLLL':
return 1
else:
return 0
playing_stat['HTWinStreak3'] = playing_stat['HTFormPtsStr'].apply(get_3game_ws)
playing_stat['HTWinStreak5'] = playing_stat['HTFormPtsStr'].apply(get_5game_ws)
playing_stat['HTLossStreak3'] = playing_stat['HTFormPtsStr'].apply(get_3game_ls)
playing_stat['HTLossStreak5'] = playing_stat['HTFormPtsStr'].apply(get_5game_ls)
playing_stat['ATWinStreak3'] = playing_stat['ATFormPtsStr'].apply(get_3game_ws)
playing_stat['ATWinStreak5'] = playing_stat['ATFormPtsStr'].apply(get_5game_ws)
playing_stat['ATLossStreak3'] = playing_stat['ATFormPtsStr'].apply(get_3game_ls)
playing_stat['ATLossStreak5'] = playing_stat['ATFormPtsStr'].apply(get_5game_ls)
playing_stat.keys()
# Get Goal Difference
playing_stat['HTGD'] = playing_stat['HTGS'] - playing_stat['HTGC']
playing_stat['ATGD'] = playing_stat['ATGS'] - playing_stat['ATGC']
# Diff in points
playing_stat['DiffPts'] = playing_stat['HTP'] - playing_stat['ATP']
playing_stat['DiffFormPts'] = playing_stat['HTFormPts'] - playing_stat['ATFormPts']
# Diff in last year positions
# playing_stat['DiffLP'] = playing_stat['HomeTeamLP'] - playing_stat['AwayTeamLP']
# Scale DiffPts , DiffFormPts, HTGD, ATGD by Matchweek.
cols = ['HTGD', 'ATGD', 'DiffPts', 'HTP', 'ATP']
playing_stat.MW = playing_stat.MW.astype(float)
for col in cols:
playing_stat[col] = playing_stat[col] / playing_stat.MW
# SAVE TO CSV
playing_stat.to_csv(loc + "final_dataset.csv")
| true |
c2f763f5da09ca26788c80c9cef4da6250589cdd | Python | Aasthaengg/IBMdataset | /Python_codes/p03496/s586749838.py | UTF-8 | 592 | 3.03125 | 3 | [] | no_license | n = int(input())
a = [int(item) for item in input().split()]
mina = min(a)
min_index = a.index(mina)
maxa = max(a)
max_index = a.index(maxa)
if abs(maxa) > abs(mina):
sign = maxa
sign_id = max_index
else:
sign = mina
sign_id = min_index
print(n + n-1)
if sign >= 0:
for i in range(n):
a[i] += sign
print(sign_id+1, i+1)
for i in range(n-1):
a[i+1] += a[i]
print(i+1, i+2)
else:
for i in range(n):
a[i] += sign
print(sign_id+1, i+1)
for i in range(n-1):
a[n-i-2] += a[n-i-1]
print(n-i, n-i-1) | true |
7813c22185c44991d6cdee2bdbdfab674bd31743 | Python | reveriel/OpenPCDet | /pcdet/models/backbones_3d/timer.py | UTF-8 | 2,057 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | import time
import torch
def avg_onthefly(cnt, old_avg, new_val):
return old_avg + (new_val - old_avg) / cnt
class dummyTimer():
def __init__(self, cuda_sync=True):
pass
def start(self):
pass
def record(self, event):
pass
def end(self):
pass
class Timer():
def __init__(self, cuda_sync=True):
self.cnt = 0
self.start_time = 0
self.end_time = 0
self.events : [str] = [ ]
self.last_time = 0
self.event_time = {}
self.cuda_sync = cuda_sync
pass
def start(self):
if self.cuda_sync:
torch.cuda.synchronize()
self.cnt += 1
self.start_time = self.last_time = time.time()
def record(self, event:str):
if self.cuda_sync:
torch.cuda.synchronize()
if not event in self.event_time:
self.events.append(event)
now = time.time()
event_time = now - self.last_time
self.last_time = now
if self.cnt > 1:
# avg
event_time = avg_onthefly(self.cnt, self.event_time[event], event_time)
self.event_time[event] = event_time
def end(self):
if self.cuda_sync:
torch.cuda.synchronize()
now = time.time()
total_time = now - self.start_time
if self.cnt > 1:
# avg
total_time = avg_onthefly(self.cnt, self.total_time, total_time)
self.total_time = total_time
print(" loop cnt = ", self.cnt)
for e in self.events:
print(e,"\t", end='')
print("total")
print()
for e in self.events:
print("{:5.5f}".format(self.event_time[e]), "\t", end='')
print("{:5.5f}".format(self.total_time))
print()
if __name__ == '__main__':
timer = Timer(cuda_sync=False)
for i in range(4):
timer.start()
time.sleep(0.3)
timer.record("a")
time.sleep(0.2)
timer.record("b")
time.sleep(0.5)
timer.end()
| true |
00ebac7e2ba04f42bae5896749a52676c6cb1f75 | Python | pooja-pichad/ifelse | /show middle number.py | UTF-8 | 71 | 3.390625 | 3 | [] | no_license | num=int(input("enter a number"))
a=(num//10)%10
if num>=0:
print(a) | true |
1123fedcff26fd5acd5785b308fd1214236188c3 | Python | qhuydtvt/C4E3 | /Assignment_Submission/thaonp/Scafe+Excel.py | UTF-8 | 1,302 | 3.046875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import urllib.request
class DataRow:
def __init__(self, t, n):
self.title = t
self.numbers = n
def print(self):
print("Title", self.title)
print("Numbers", self.numbers)
url = "http://s.cafef.vn/bao-cao-tai-chinh/VNM/IncSta/2015/3/0/0/ket-qua-hoat-dong-kinh-doanh-cong-ty-co-phan-sua-viet-nam.chn"
webpage = urllib.request.urlopen(url)
html = webpage.read()
html_string = html.decode("utf-8")
soup= BeautifulSoup(html_string,"html.parser")
table=soup.find("table",id="tableContent")
data_rows = []
tr_list= table.find_all("tr", recursive=False)
for tr in tr_list:
tds = tr.find_all("td", recursive=False)
title = str(tds[0].contents[0]).strip()
numbers = [tds[i].contents[0] # What to get
for i in range(1, len(tds) - 1) # Loop
if (len(tds[i].contents) > 0)] # Filter
data_row = DataRow(title, numbers)
data_rows.append(data_row)
import xlwt
wb = xlwt.Workbook() #Create workbook object
ws = wb.add_sheet('cafef.xls')
for i in range(len(data_rows)):
ws.write(i,0,data_rows[i].title)
for j in range(len(data_rows[i].numbers)):
ws.write(i,j+1,data_rows[i].numbers[j])
wb.save('scafef.xls') | true |
7fe2888041555fd598b22ea76a65fe73c15427a4 | Python | midasgossye/A28_SVV | /main.py | UTF-8 | 17,032 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 19 14:07:00 2018
@author: A28
"""
# Imports
from math import *
import unittest
import scipy.integrate as integrate
import numpy as np
from matplotlib import pyplot as plt
from int_stress_and_defl import *
import internal_shear_and_moment as intsm
import total_shear_calc as totshear
# Global variables
C_a = 0.515 # Chord length aileron [m]
l_a = 2.691 # Span of the aileron [m]
x_1 = 0.174 # x-location of hinge 1 [m]
x_2 = 1.051 # x-location of hinge 2 [m]
x_3 = 2.512 # x-location of hinge 3 [m]
x_a = 0.300 # Distance between actuator 1 and 2 [m]
h = 0.248 # Aileron height [m]
t_sk = 1.1 * 10 ** (-3) # Skin thickness [m]
t_sp = 2.2 * 10 ** (-3) # Spar thickness [m]
t_st = 1.2 * 10 ** (-3) # Thickness of stiffener [m]
h_st = 1.5 * 10 ** (-2) # Height of stiffener [m]
w_st = 3.0 * 10 ** (-2) # Width of stiffener [m]
n_st = 11 # Number of stiffeners [-]
d_1 = 10.34 * 10 ** (-2) # Vertical displacement hinge 1 [m]
d_3 = 20.66 * 10 ** (-2) # Vertical displacement hinge 3 [m]
theta = 25 # Maximum upward deflection [deg]
P = 20.6 * 10 ** 3 # Load in actuator 2 [N]
q = 1.00 * 10 ** 3 # Net aerodynamic load [N/m]
G = 28 * 10 ** 9 # Shear modulus in Pa (28 GPa, source: http://asm.matweb.com/search/SpecificMaterial.asp?bassnum=ma2024t3)
n = 270 # sections to be analysed
datax = [0.175, 0.902, 1.052, 1.202, 2.513]
dataptx = []
for i in xrange(len(datax)):
dataptx.append(int(datax[i] / l_a * n))
print dataptx, "<--- positions are from 1000 sections"
E = 71 * 10 ** 9
# functions
# calculating the cross section of components of the aileron
# input height aileron ha, chord length aileron ca, skin thickness tskin, spar thickness tspar,
# stiffener_amount, width stiffener w_stiffener, thickness stiffener t_stiffener, height stiffener h_stiffener
# return cshape, spar, triangle, stiffeners # unit: m^2
def cross_section(ha, ca, tskin, tspar, stiffener_amount, w_stiffener, t_stiffener, h_stiffener):
# C shape
cshape = 0.5 * pi * ((ha / 2) ** 2) - 0.5 * pi * ((ha - (2 * tskin)) / 2) ** 2
# spar
spar = tspar * (ha - (2 * tskin))
# triangle
triangle = 0.5 * ha * (ca - 0.5 * ha) - 0.5 * (ha - 2 * tskin) * (ca - 0.5 * ha - tskin)
# stiffeners
stiffeners = stiffener_amount * (w_stiffener * t_stiffener + (h_stiffener - t_stiffener) * t_stiffener)
return cshape, spar, triangle, stiffeners # unit: m^2
# calculating the enclosed cross sectional area
# input height aileron ha, chord length aileron ca, thickness tskin
# returns Circular section enclosed area A_1, Triangular section enclosed area A_2 #areas m^2
def enc_area(ha, ca, tskin):
A_1 = 0.5 * pi * ((ha - (1 * tskin)) / 2) ** 2 # Circular section enclosed area
A_2 = 0.5 * (ha - 1 * tskin) * (ca - 0.5 * ha - tskin) # Triangular section enclosed area
return A_1, A_2
# inertia
# returns stiffener z,y locations and rotation
# return z_y_angle_coords # [(stringer0 z,y,rot),(stringer1 z,y,rot)] m,m,rad
def stif_loc(h, t_sk, n_st):
circle_perim = 0.5 * pi * (0.5 * h - t_sk)
total_perimeter = circle_perim + sqrt((0.5 * h - t_sk) ** 2 + (C_a - 0.5 * h - t_sk) ** 2) # m
spacing = total_perimeter / ((n_st + 1) / 2)
z_y_angle_coords = []
for i in xrange(6):
local_spacing = i * spacing
if local_spacing < circle_perim:
angle = (local_spacing / circle_perim) * radians(90)
z_coordinate = -1 * (0.5 * h - (0.5 * h - t_sk + cos(angle) * (0.5 * h - t_sk)))
y_coordinate = sin(angle) * (0.5 * h - t_sk)
rot_angle = angle + radians(90)
else:
rot_angle = atan(0.5 * h / (C_a - 0.5 * h)) - radians(180)
z_coordinate = (-1) * (local_spacing - circle_perim) * cos(atan(0.5 * h / (C_a - 0.5 * h)))
y_coordinate = h / 2 - (local_spacing - circle_perim) * sin(atan(0.5 * h / (C_a - 0.5 * h)))
apnd_itm = (z_coordinate, y_coordinate, rot_angle)
z_y_angle_coords.append(apnd_itm)
if i > 0:
apnd_itm = (z_coordinate, -y_coordinate, -rot_angle)
z_y_angle_coords.append(apnd_itm)
# print "Stif.", i, "\t z:", z_coordinate, "\t y:", y_coordinate, "\t angle:", degrees(rot_angle)
return z_y_angle_coords # [(stringer0 z,y,rot),(stringer1 z,y,rot), ...]
# function to calculate torsional constant
# input height h, thickness skin t_sk, chord length aileron C_a
# return J # torsional constant
def torsional_constant(h, t_sk, C_a):
midcircle_perim = pi * (0.5 * h - 0.5 * t_sk) # wall mid line perimeter circular
midtriangle_perim = 2 * (
sqrt((0.5 * h - t_sk) ** 2 + (C_a - 0.5 * h - t_sk) ** 2) - 0.5 * t_sk) # wall mid line perimeter triangle
p = midcircle_perim + midtriangle_perim # wall mid line perimeter
AeC, AeT = enc_area(h, C_a, t_sk) # enclosed area of circular part and triangle part
Ae = AeC + AeT # total enclosed area
J = (4 * Ae ** 2 * t_sk) / p
return J # torsional constant
# function for transforming axis to a rotated version
# input MMOI I_zz, I_yy, I_zy, and rotation angle rot_angle
# outputs new rotated I_uu, I_vv, I_uv
def axis_transformation(I_zz, I_yy, I_zy, rot_angle):
# Axis transformation for rotated axis system used for Inertia calculations
I_uu = (I_zz + I_yy) * 0.5 + (I_zz - I_yy) * 0.5 * cos(2 * rot_angle) - I_zy * sin(2 * rot_angle)
I_vv = (I_zz + I_yy) * 0.5 - (I_zz - I_yy) * 0.5 * cos(2 * rot_angle) + I_zy * sin(2 * rot_angle)
I_uv = (I_zz - I_yy) * 0.5 * sin(2 * rot_angle) + I_zy * cos(2 * rot_angle)
return I_uu, I_vv, I_uv
# function to calculate MMOI
# input stiffener data as z_y_angle_coords, thickness skin t_st, height stiffeners h_st, width stiffener w_st,
# thickness spar t_sp,aileron height h, maximum upward deflection theta
# returns IZZ in body ref: TOT_I_zz_br, IYY body ref: TOT_I_yy_br, IZZ: TOT_I_zz, IYY: TOT_I_yy, IZY: TOT_I_zy
def moment_of_inertia(z_y_angle_coords, t_st, h_st, w_st, t_sp, h, theta):
#
# Calculate Inertias for simple beam axis system
# |
# | ^ (y)
# ------- <--| (z)
# === Determine base and height values of inv-T beam rectangles
b_1 = w_st
h_1 = t_st
b_2 = t_st
h_2 = h_st - t_st
# ===
# === Calculate individual I_zz and I_yy and sum steiner term
I_zz_1 = (b_1 * (h_1 ** 3)) / 12 + b_1 * h_1 * ((t_st * 0.5) ** 2)
I_yy_1 = ((b_1 ** 3) * h_1) / 12
I_yy_2 = ((b_2 ** 3) * h_2) / 12
I_zz_2 = (b_2 * (h_2 ** 3) / 12) + b_2 * h_2 * ((h_2 * 0.5 + t_st) ** 2)
# ===
# === BASE INERTIAS AND AREA FOR INVERSE-T BEAM
I_zz = I_zz_1 + I_zz_2
I_yy = I_yy_1 + I_yy_2
I_zy = 0
A_st = w_st * t_st + t_st * (h_st - t_st)
# ===
TOT_I_zz_br = 0
TOT_I_yy_br = 0
TOT_I_zy_br = 0
for coords in z_y_angle_coords:
z_coord, y_coord, rot_angle = coords # Get z,y and rotation angle for each stiffener
stiff_I_zz, stiff_I_yy, stiff_I_zy = axis_transformation(I_zz, I_yy, I_zy,
rot_angle) # perform inertia axis angle transformation
I_zz_body_ref = stiff_I_zz + A_st * (y_coord ** 2) # Apply parallel axis theorem
I_yy_body_ref = stiff_I_yy + A_st * (z_coord ** 2) # Apply parallel axis theorem
I_zy_body_ref = stiff_I_zy + A_st * y_coord * z_coord # Apply parallel axis theorem
# === SUM ALL STIFFENER MOMENTS OF INERTIA's W.R.T. BODY REFERENCE SYSTEM
# NOTE: TOTAL I_zy inertia should be zero, because total cross-section has an axis of symmetry
# If calculated TOTAL I_zy is NOT equal to zero, there is an error in the computation
TOT_I_zz_br += I_zz_body_ref
TOT_I_yy_br += I_yy_body_ref
TOT_I_zy_br += I_zy_body_ref # Should be zero, if not => check values!
# === Semi_circle Moment of inertia:
I_zz_s_circ = integrate.quad(lambda x: t_sk * ((0.5 * h * sin(x)) ** 2) * 0.5 * h, -pi / 2, pi / 2)[0]
I_yy_s_circ = I_zz_s_circ
TOT_I_zz_br += I_zz_s_circ
TOT_I_yy_br += I_yy_s_circ
# ===
# === Triangle skin moment of inertia
a = sqrt((0.5 * h - t_sk) ** 2 + (C_a - 0.5 * h - t_sk) ** 2)
angle = atan(0.5 * h / (C_a - 0.5 * h))
I_zz_t = ((a ** 3 * t_sk * (sin(angle)) ** 2) / 12 + a * t_sk * (0.25 * (h - t_sk)) ** 2) * 2
# print angle, I_zz_t
I_yy_t = 2 * ((a ** 3 * t_sk * (cos(angle)) ** 2) / 12) + 2 * a * t_sk * ((C_a - 0.5 * h - t_sk) * 0.5) ** 2
TOT_I_zz_br += I_zz_t
TOT_I_yy_br += I_yy_t
# ===
# === Spar Moment of Inertia
I_zz_spar = (t_sp * (h - 2 * t_sk) ** 3) / 12
# I_yy of spar is negligible since you have a t^3 term if using the thin walled approx.
# NOTE: t/h << 1
TOT_I_zz_br += I_zz_spar
# ===
# === Transform Inertias from Body Reference system to Main Reference system
TOT_I_zz, TOT_I_yy, TOT_I_zy = axis_transformation(TOT_I_zz_br, TOT_I_yy_br, TOT_I_zy_br, theta)
# ===
# Returns I_zz and I_yy in our OWN DEFINED BODY REFERENCE SYSTEM, followed by the I_zz, I_yy and I_zy in the main reference system
# NOTE: All reported values are in m^4
return TOT_I_zz_br, TOT_I_yy_br, TOT_I_zz, TOT_I_yy, TOT_I_zy
# calculates boom area
# input stiffener loaction, thickness stiffeners, height stiffeners,width stiffeners, thickness spar, height
# outputs boom_arear of booms in a list b_i_arr, b_i_spar
def boom_area_calc(stif_loc, t_st, h_st, w_st, t_sp, h):
A_st = w_st * t_st + (h_st - t_st) * t_st
circle_perim = 0.5 * pi * (0.5 * h - t_sk)
total_perimeter = circle_perim + sqrt((0.5 * h - t_sk) ** 2 + (C_a - 0.5 * h - t_sk) ** 2) # m
spacing = total_perimeter / ((n_st + 1) / 2)
B_i_arr = []
for i in xrange(n_st):
if i == 0:
sigma_ratio = (stif_loc[i][1]) / (stif_loc[i + 1][1])
B_i = A_st + ((t_sk * spacing) / 6) * (2 + sigma_ratio)
B_i_arr.append(B_i)
elif i == (n_st - 2):
sigma_ratio = (stif_loc[i - 2][1]) / (stif_loc[i][1])
B_i = A_st + ((t_sk * spacing) / 6) * (2 + sigma_ratio)
B_i_arr.append(B_i)
B_i_arr.append(B_i)
elif i < (n_st - 2):
sigma_ratio = (stif_loc[i][1]) / (stif_loc[i + 2][1])
B_i = A_st + ((t_sk * spacing) / 6) * (2 + sigma_ratio)
B_i_arr.append(B_i)
sigma_ratio = -1
B_spar_end = ((t_sp * (h - 2 * t_sk)) / 6) * (2 + sigma_ratio)
# returns an array with all stiffener boom area's and the value of the spar end_cap area
# 0-th boom is boom at LE, 1st
return B_i_arr, B_spar_end
def plot_numerical_bending(Inertia_bend, loc_data, moment_data, E):
# This function can calculate the numerical bending defelction both in y and z directions
# Parameters: Inertia refernced to bending direction, location data in x-direction, Moments [Nm], Young's Modulus [Pa]
least_error = 100
best_value = (0.0, 0.0)
# The following for-loops will try to find the best fitting integration constants (c_1 and c_2) to match the deflection to the boundary conditions
for c_1 in np.arange(-0.1, -0.1, 0.001):
for c_2 in np.arange(-0.1, -0.1, 0.001):
# === Initialise first integration
Int = 0
Int_arr = np.array([])
# ===
for i in xrange(270):
x_1 = loc_data[i] #
x_2 = loc_data[i + 1]
m_x_1 = (-1 * moment_data[i]) / (E * Inertia_bend)
m_x_2 = (-1 * moment_data[i + 1]) / (E * Inertia_bend)
dx = x_2 - x_1
Int += dx * (m_x_1 + m_x_2) / 2 + c_1
Int_arr = np.append(Int_arr, Int)
Int_2 = 0
Int_2_arr = np.array([])
for i in xrange(269):
x_1 = loc_data[i]
x_2 = loc_data[i + 1]
y_1 = Int_arr[i]
y_2 = Int_arr[i + 1]
dx = x_2 - x_1
Int_2 += dx * (y_1 + y_2) / 2 + c_2
Int_2_arr = np.append(Int_2_arr, Int_2)
print "c1:", c_1, "\tc2:", c_2
print Int_2_arr[16], "\t", Int_2_arr[105]
max_defl = max(Int_2_arr)
error = abs(Int_2_arr[16] + max_defl + (0.1034 / 2.54) / 2) + abs(
Int_2_arr[251] + max_defl + (0.2066 / 2.54)) / 2 # + abs(Int_2_arr[105])/3
# + abs(Int_2_arr[105])/3
if error < least_error:
best_value = (c_1, c_2)
least_error = error
print best_value
c_1 = best_value[0]
c_2 = best_value[1]
c_1 = 0.002
c_2 = -0.003
# c_1 = 0.0011
# c_2 = -0.001
Int = 0
Int_arr = np.array([])
for i in xrange(270):
x_1 = loc_data[i]
x_2 = loc_data[i + 1]
m_x_1 = (-1 * moment_data[i]) / (float(E * I_bend))
m_x_2 = (-1 * moment_data[i + 1]) / (float(E * I_bend))
print I_bend
dx = x_2 - x_1
Int += dx * (m_x_1 + m_x_2) / 2 + c_1
Int_arr = np.append(Int_arr, Int)
Int_2 = 0
Int_2_arr = np.array([])
for i in xrange(269):
x_1 = x_coor[i]
x_2 = x_coor[i + 1]
y_1 = Int_arr[i]
y_2 = Int_arr[i + 1]
dx = x_2 - x_1
Int_2 += dx * (y_1 + y_2) / 2 + c_2
Int_2_arr = np.append(Int_2_arr, Int_2)
max_defl = max(Int_2_arr)
print "c1:", c_1, "\tc2:", c_2
print Int_2_arr[16], "\t", Int_2_arr[105]
plt.grid()
Int_2_arr_inv = np.array([])
for i in xrange(len(Int_2_arr) - 1, -1, -1):
Int_2_arr_inv = np.append(Int_2_arr_inv, Int_2_arr[i])
print max_defl
plot_arr = Int_2_arr_inv + max_defl + 0.088962 - 0.00077
plt.plot(x_coor[:269], -Int_2_arr_inv + max_defl + 0.22479 - 0.359795) # +1*max_defl+0.098)
plt.xlabel("x-coordinate [m]")
plt.ylabel("z-deflection [m]")
plt.show()
return x_coor[:269], plot_arr
# B_i_arr[0]
print "Moments: (I_z'z', I_y'y', I_zz, I_yy, I_zy) All in m^4"
print moment_of_inertia(stif_loc(h, t_sk, n_st), t_st, h_st, w_st, t_sp, h, theta)
Moment_data = np.genfromtxt("M_y.txt")
x_coor = Moment_data[:, 0]
I_bend = 6.385385647322895e-05
M_x = Moment_data[:, 1]
x_coor, plot_arr = plot_numerical_bending(I_bend, x_coor, M_x, E)
file_n = open("defl_data_z.txt", "w")
for i in xrange(len(plot_arr)):
file_n.write(str(plot_arr[i]))
file_n.write("\n")
file_n.close()
# print "Moments: (I_z'z', I_y'y', I_zz, I_yy, I_zy) All in m^4"
# print moment_of_inertia(stif_loc(h, t_sk, n_st), t_st, h_st, w_st, t_sp, h, theta)
# main
stif_data = stif_loc(h, t_sk, n_st) # initialize stiffener properties
# Boom idealization
b_r = [] # list for the resultant boom areas
# calculating the stiffeners' total boom area
b_sp = []
b_r, b_sp = boom_area_calc(stif_data, t_st, h_st, w_st, t_sp,
h) # b_r is the list off stiffener boom areas, b_sp for spar(single value in m^2)
J = torsional_constant(h, t_sk, C_a)
# crosssection
A = sum(cross_section(h, C_a, t_sk, t_sp, n_st, w_st, t_st, h_st)) # A is sum of cross section
I_zz_br, I_yy_br, I_zz, I_yy, I_zy = moment_of_inertia(stif_data, t_st, h_st, w_st, t_sp, h,
theta) # values for the MMOI
enclosed = sum(enc_area(h, C_a, t_sk)) # enclosed area size
model = [] # whole model
section_length = l_a / n
verifdata = []
qribdata = []
def iteration(section_number):
x_start = section_number * section_length
mid = x_start + section_length / 2
M, V_y, V_z, V_ypr, V_zpr = intsm.internal(mid, I_zz)
stif_data = stif_loc(h, t_sk, n_st)
bir, bisp = boom_area_calc(stif_data, t_st, h_st, w_st, t_sp, h)
totshearvalue, qrib = totshear.totalshear(stif_data, V_zpr, V_ypr, bir, bisp, I_zz_br, I_yy_br)
verifdata.append(totshearvalue[7])
verifdata.append(totshearvalue[9])
verifdata.append(totshearvalue[0])
verifdata.append(totshearvalue[5])
# qribdata.append(qrib[0])
# qribdata.append(qrib[1])
# print "section: ",section_number,"at x: ", mid
# print totshearvalue
# print qrib
return
for i in xrange(len(dataptx)):
iteration(dataptx[i])
verifdata += qribdata
print verifdata
# print I_zz_br, I_yy_br
# x_start = 0 * section_length
# mid = x_start + section_length / 2
# M, V_y, V_z, V_ypr, V_zpr = intsm.internal(mid, I_zz)
#
# stif_data = stif_loc(h, t_sk, n_st)
# bir, bisp = boom_area_calc(stif_data, t_st, h_st, w_st, t_sp, h)
# totshearvalue = totshear.totalshear(stif_data, V_zpr, V_ypr, bir, bisp, I_zz_br, I_yy_br)
# print totshearvalue
x_start = n / 2 * section_length
mid = x_start + section_length / 2
M, V_y, V_z, V_ypr, V_zpr = intsm.internal(mid, I_zz)
stif_data = stif_loc(h, t_sk, n_st)
bir, bisp = boom_area_calc(stif_data, t_st, h_st, w_st, t_sp, h)
totshearvalue = totshear.totalshear(stif_data, V_zpr, V_ypr, bir, bisp, I_zz_br, I_yy_br)
# print totshearvalue
# for y in xrange(n):
# model.append(iteration(y))
| true |
e12149b243f750c4c237b2d103ac664a7a40c35e | Python | mahadkhanleghari/corona-finance | /data_preprocessing/get_data.py | UTF-8 | 464 | 2.6875 | 3 | [] | no_license | import pandas as pd
from datetime import datetime
def daily_report(date):
url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/' \
'csse_covid_19_data/csse_covid_19_daily_reports/{date}.csv'.format(date=date)
df = pd.read_csv(url)
return df
if __name__ == "__main__":
date = "03-23-2020"
df = daily_report(date)
df.to_csv("/Users/mahadafzal/corona/test_data.csv")
print(df.describe()
print(df.head())
| true |
2368633414d1489c071aa0cb3df0194490931773 | Python | HeHisHim/rocket_game | /game_functions.py | UTF-8 | 9,238 | 2.625 | 3 | [] | no_license | import sys
import pygame
from bullet import Bullet
from alien import Alien
from time import sleep
import json
def check_keydown_events(event, ship, alien_settings, screen, bullets, stats, play_button, aliens, sb):
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
#创建一颗子弹,并将其加入到编组bullets中
fire_bullet(ship, alien_settings, screen, bullets)
elif event.key == pygame.K_q:
check_high_score(stats, sb)
sys.exit()
elif event.key == pygame.K_w:
alien_settings.bullet_width = 600
elif event.key == pygame.K_e:
alien_settings.bullet_width = 3
elif event.key == pygame.K_p:
start_game(alien_settings, screen, stats, play_button, ship, aliens, bullets, sb)
def check_keyup_events(event, ship, alien_settings, screen, bullets):
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(alien_settings, screen, stats, play_button, ship, aliens, bullets, sb):
"""响应按键和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
#当键盘被按下时,控制移动标记,以持续移动
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ship, alien_settings, screen, bullets, stats, play_button, aliens, sb)
#当键盘被松开时,控制移动标记,以停止移动
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship, alien_settings, screen, bullets)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(alien_settings, screen, stats, play_button, ship, aliens, bullets, mouse_x, mouse_y, sb)
def update_screen(alien_settings, screen, stats, sb, ship, aliens, bullets, play_button):
#每次循环时都重绘屏幕
screen.fill(alien_settings.bg_color)
#绘制子弹
for bullet in bullets.sprites():
bullet.draw_bullet()
#创建飞船
ship.blitme()
#绘制整组外星人
for alien in aliens.sprites():
alien.blitme()
#让最近绘制的屏幕可见,每次循环都重新display来刷新元素
#显示得分
sb.show_score()
#绘制按钮
if not stats.game_active:
play_button.draw_button()
pygame.mouse.set_visible(True)
pygame.display.flip()
def update_bullets(alien_settings, screen, stats, sb, ship, aliens, bullets):
bullets.update()
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
#检查是否有子弹击中了外星人
#如果是这样,就删除相应的子弹和外星人
check_bullet_alien_collisions(alien_settings, screen, stats, sb, ship, aliens, bullets)
def fire_bullet(ship, alien_settings, screen, bullets):
if len(bullets) < alien_settings.bullets_allowed:
new_bullet = Bullet(alien_settings, screen, ship)
bullets.add(new_bullet)
def create_fleet(alien_settings, screen, ship, aliens):
"""创建外星人群"""
#创建一个外星人,并计算一行可容纳多少个外星人
#外星人间距为外星人宽度
alien = Alien(alien_settings, screen)
# alien_width = alien.rect.width
# available_space_x = alien_settings.screen_width - 2 * alien_width
# number_aliens_x = int(available_space_x / (2 * alien_width))
number_aliens_x = get_number_aliens_x(alien_settings, alien.rect.width)
number_rows = get_number_rows(alien_settings, ship.rect.height, alien.rect.height)
#创建外星人群
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
#创建一个外星人并将其加入当前行
# alien = Alien(alien_settings, screen)
# alien.x = alien_width + 2 * alien_width * alien_number
# alien.rect.x = alien.x
# aliens.add(alien)
create_alien(alien_settings, screen, aliens, alien_number, row_number)
def get_number_aliens_x(alien_settings, alien_width):
"""计算每行可容纳多少个外星人"""
available_space_x = alien_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def create_alien(alien_settings, screen, aliens, alien_number, row_number):
"""创建一个外星人并将其放在当前"""
alien = Alien(alien_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def get_number_rows(alien_settings, ship_height, alien_height):
"""计算屏幕可容纳多少行外星人"""
available_space_y = (alien_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def check_fleet_edges(alien_settings, aliens):
"""有外星人到达边缘时采取相应的措施"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(alien_settings, aliens)
break
def change_fleet_direction(alien_settings, aliens):
"""将整群外星人下移,并改变它们的方向"""
for alien in aliens.sprites():
alien.rect.y += alien_settings.fleet_drop_speed
alien_settings.fleet_direction *= -1
def update_aliens(alien_settings, stats, sb, screen, ship, aliens, bullets):
"""检查是否有外星人位于屏幕边缘,并更新整群外星人的位置"""
check_fleet_edges(alien_settings, aliens)
aliens.update()
#检查外星人和飞船之间的碰撞
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(alien_settings, stats, sb, screen, ship, aliens, bullets)
check_aliens_bottom(alien_settings, stats, sb, screen, ship, aliens, bullets)
def check_bullet_alien_collisions(alien_settings, screen, stats, sb, ship, aliens, bullets):
"""响应子弹和外星人的碰撞"""
#删除发生碰撞的子弹和外星人
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += alien_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
#删除现有的所有子弹,创建一个新的外星人群。并加快游戏节奏
if 0 == len(aliens):
bullets.empty()
alien_settings.increase_speed()
#提高等级
start_new_level(stats, sb)
create_fleet(alien_settings, screen, ship, aliens)
def ship_hit(alien_settings, stats, sb, screen, ship, aliens, bullets):
"""响应被外星人撞到的飞船"""
#将ships_left减1
if stats.ships_left > 1:
stats.ships_left -= 1
#更新记分牌
sb.prep_ships()
else:
stats.ships_left -= 1
#更新记分牌
sb.prep_ships()
stats.game_active = False
#清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
#创建一群新的外星人,并将飞船放到屏幕低端中央
create_fleet(alien_settings, screen, ship, aliens)
ship.center_ship()
#暂停
sleep(0.5)
def check_aliens_bottom(alien_settings, stats, sb, screen, ship, aliens, bullets):
"""检查是否有外星人到达了屏幕底端"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
#像飞船被撞到一样进行处理
ship_hit(alien_settings, stats, sb, screen, ship, aliens, bullets)
break
def check_play_button(alien_settings, screen, stats, play_button, ship, aliens, bullets, mouse_x, mouse_y, sb):
#重置游戏统计信息
if play_button.rect.collidepoint(mouse_x, mouse_y) and not stats.game_active:
start_game(alien_settings, screen, stats, play_button, ship, aliens, bullets, sb)
def start_game(alien_settings, screen, stats, play_button, ship, aliens, bullets, sb):
stats.game_active = True
try:
load_high_score(stats)
except FileNotFoundError:
pass
stats.reset_stats()
#重置记分牌图像
sb.prep_images()
# sb.show_score()
#重置游戏设置
alien_settings.initialize_dynamic_settings()
#隐藏光标
pygame.mouse.set_visible(False)
#清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
#创建一群新的外星人,并让飞船居中
create_fleet(alien_settings, screen, ship, aliens)
ship.center_ship()
def check_high_score(stats, sb):
"""检查是否诞生了新的最高得分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
dump_high_score(stats)
def start_new_level(stats, sb):
stats.level += 1
sb.prep_level()
def load_high_score(stats):
"""读取最高分"""
filename = stats.json_high_score
with open(filename) as file_object:
stats.high_score = json.load(file_object)
def dump_high_score(stats):
"""写入最高分"""
filename = stats.json_high_score
with open(filename, "w") as file_object: #以写入模式打开文件
json.dump(stats.high_score, file_object) #该函数需要两个参数(待写入数据和待存储的文件名)
def show_high_score_first(stats, sb):
try:
load_high_score(stats)
except FileNotFoundError:
pass
sb.prep_high_score()
| true |
073196b2431f88366f9b7be8475bf71da6769df6 | Python | Hadryan/Music-Player-with-Recommendation | /dataset.py | UTF-8 | 2,822 | 2.71875 | 3 | [] | no_license | #
# Original source code from:
# https://github.com/pytorch/text/blob/bcb9104680eb9dc978a6bbcc2b9ca46cf2bdbed9/torchtext/datasets/text_classification.py#L31
# is modified for this project.
#
import logging
import torch
import io
from torchtext.datasets import TextClassificationDataset
from torchtext.utils import download_from_url, extract_archive, unicode_csv_reader
from torchtext.data.utils import ngrams_iterator
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.vocab import Vocab
from tqdm import tqdm
def _csv_iterator(data_path, ngrams, yield_cls=False):
tokenizer = get_tokenizer("basic_english")
with io.open(data_path, encoding="utf8") as f:
reader = unicode_csv_reader(f)
for row in reader:
tokens = row[1]
tokens = tokenizer(tokens)
if yield_cls:
yield 1 if int(row[4]) == 3 else 0, ngrams_iterator(tokens, ngrams)
else:
yield ngrams_iterator(tokens, ngrams)
def _create_data_from_iterator(vocab, iterator, include_unk):
data = []
labels = []
with tqdm(unit_scale=0, unit='lines') as t:
for cls, tokens in iterator:
if include_unk:
tokens = torch.tensor([vocab[token] for token in tokens])
else:
token_ids = list(filter(lambda x: x is not Vocab.UNK, [vocab[token]
for token in tokens]))
tokens = torch.tensor(token_ids)
if len(tokens) == 0:
logging.info('Row contains no tokens.')
data.append((cls, tokens))
labels.append(cls)
t.update(1)
return data, set(labels)
def _setup_datasets(train_csv_path, test_csv_path, ngrams=1, vocab=None, include_unk=False):
if vocab is None:
logging.info('Building Vocab based on {}'.format(train_csv_path))
vocab = build_vocab_from_iterator(_csv_iterator(train_csv_path, ngrams))
else:
if not isinstance(vocab, Vocab):
raise TypeError("Passed vocabulary is not of type Vocab")
logging.info('Vocab has {} entries'.format(len(vocab)))
logging.info('Creating training data')
train_data, train_labels = _create_data_from_iterator(
vocab, _csv_iterator(train_csv_path, ngrams, yield_cls=True), include_unk)
logging.info('Creating testing data')
test_data, test_labels = _create_data_from_iterator(
vocab, _csv_iterator(test_csv_path, ngrams, yield_cls=True), include_unk)
if len(train_labels ^ test_labels) > 0:
raise ValueError("Training and test labels don't match")
return (TextClassificationDataset(vocab, train_data, train_labels),
TextClassificationDataset(vocab, test_data, test_labels))
| true |
d5c6ea1649c71463c56ae7b482861c9bd31001af | Python | DL2021Spring/CourseProject | /data_files/622 Design Circular Queue.py | UTF-8 | 1,021 | 3.484375 | 3 | [] | no_license |
class MyCircularQueue:
def __init__(self, k: int):
self.head = 0
self.tail = -1
self.sz = 0
self.k = k
self.lst = [None for _ in range(k)]
def enQueue(self, value: int) -> bool:
if self.sz >= self.k:
return False
self.tail += 1
self.lst[self.tail % self.k] = value
self.sz += 1
return True
def deQueue(self) -> bool:
if self.sz <= 0:
return False
self.lst[self.head % self.k] = None
self.head += 1
self.sz -= 1
return True
def Front(self) -> int:
ret = self.lst[self.head % self.k]
return ret if ret is not None else -1
def Rear(self) -> int:
ret = self.lst[self.tail % self.k]
return ret if ret is not None else -1
def isEmpty(self) -> bool:
return self.sz == 0
def isFull(self) -> bool:
return self.sz == self.k
| true |
55a60b2c91daa05adcd356f91a2b9a44447e0bb7 | Python | vahidito/mft-vanak-2020 | /S02/head_and_tail.py | UTF-8 | 152 | 3.296875 | 3 | [] | no_license | sz = 4
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 23, 2, 3, 99]
res = lst[:sz] + lst[-sz:]
print(res)
res2 = lst[2:7:2]
print(res2)
print(lst[-sz:-10:-1]) | true |
93329fc0eabd9393a16fa4434f7e399d61291672 | Python | jhubar/PI | /Reboot_Q2/main3.py | UTF-8 | 3,821 | 2.609375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from SEIR import SEIR
if __name__ == "__main__":
nb_sim = 100
# --------------------------- Create the model --------------------------- #
print('Phase 1: ')
# Create the model:
model = SEIR()
# Load the dataset
model.import_dataset()
# Fit the model
#model.fit(method='normal')
# Fit starting state:
#model.init_state_optimizer()
# --------------------------- Load assistant realization --------------------------- #
url = 'https://raw.githubusercontent.com/ADelau/proj0016-epidemic-data/main/Cov_invaders.csv'
# Import the dataframe:
delau_data = pd.read_csv(url, sep=',', header=0)
# Get cumul positive test column
cumul_positive = np.copy(delau_data['num_positive'].to_numpy())
for i in range(1, len(cumul_positive)):
cumul_positive[i] += cumul_positive[i - 1]
delau_data.insert(7, 'cumul_positive', cumul_positive)
delau_np = delau_data.to_numpy()
# --------------------------- 20 days of mask + social d ----------------------------- #
print('Phase 3: ')
scenario_1 = {
'duration': 191,
'social_dist': [73, 190, 6],
'wearing_mask': [73, 190]
}
# Put the scenario in the model:
model.set_scenario(scenario_1)
nb_sim = 200
# Make stochastic predictions:
pred_sto = model.stochastic_predic(duration=delau_np.shape[0], nb_simul=nb_sim, scenar=True)
# Get the mean
pred_sto_mean = np.mean(pred_sto, axis=2)
# Get std
pred_sto_std = np.std(pred_sto, axis=2)
# Get higher and lower bound of confidence interval
sto_hq = pred_sto_mean + (2 * pred_sto_std)
sto_lq = pred_sto_mean - (2 * pred_sto_std)
# Make deterministic predictions:
pred_det = model.predict(duration=delau_np.shape[0])
# Plot results for cumulative conta:
time = np.arange(delau_np.shape[0])
plt.fill_between(time, sto_hq[:, 7]*model.t*model.s, sto_lq[:, 7]*model.t*model.s, color='lavender', alpha=0.7)
plt.plot(time, pred_sto_mean[:, 7]*model.t*model.s, color='black', label='Stochastic cumulative conta. Mean')
plt.scatter(time, delau_np[:, 7], color='blue', label='Testing Cumulative data')
plt.legend()
plt.title('Cumulative testing: data vs predictions')
plt.xlabel('Time in days')
#plt.show()
plt.savefig('fig/scenar_1_cum_test_190_confidence.png')
plt.close()
# For hospit
plt.fill_between(time, sto_hq[:, 4], sto_lq[:, 4], color='lavender', alpha=0.7)
plt.plot(time, pred_sto_mean[:, 4], color='black', label='Hospit pred stocha')
plt.scatter(time, delau_np[:, 3], color='blue', label='Hospit data')
plt.legend()
plt.title('Hospitalizations: data vs predictions')
plt.xlabel('Time in days')
#plt.show()
plt.savefig('fig/scenar_1_hospit_190_confidence.png')
plt.close()
# For Criticals
plt.fill_between(time, sto_hq[:, 5], sto_lq[:, 5], color='lavender', alpha=0.7)
plt.plot(time, pred_sto_mean[:, 5], color='green', label='Critical pred stocha')
plt.scatter(time, delau_np[:, 5], color='blue', label='Critical data')
plt.legend()
plt.title('Critical: data vs prediction')
plt.xlabel('Time in days')
#plt.show()
plt.savefig('fig/scenar_1_critical_190_confidence.png')
plt.close()
# For Fatalities
plt.fill_between(time, sto_hq[:, 6], sto_lq[:, 6], color='lavender', alpha=0.7)
plt.plot(time, pred_sto_mean[:, 6], color='green', label='Fatalities pred stocha')
plt.scatter(time, delau_np[:, 6], color='blue', label='Fatalities data')
plt.legend()
plt.title('Fatalities: data vs predictions')
plt.xlabel('Time in days')
# plt.show()
plt.savefig('fig/scenar_1_fatal_190_confidence.png')
plt.close()
| true |
40f3a2846caf3dba12158b377781acc5274d1704 | Python | s1p1v1/Stepik_selenium_course_1 | /step_2_2.py | UTF-8 | 1,008 | 3.328125 | 3 | [] | no_license | import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
# Задача 1
# Открыть страницу http://suninjuly.github.io/selects1.html
# Посчитать сумму заданных чисел
# Выбрать в выпадающем списке значение равное расчитанной сумме
# Нажать кнопку "Submit"
link = "http://suninjuly.github.io/selects2.html"
try:
browser = webdriver.Firefox()
browser.get(link)
x = int(browser.find_element_by_id("num1").text)
y = int(browser.find_element_by_id("num2").text)
sel = Select(browser.find_element_by_class_name("custom-select"))
sel.select_by_value(str(x + y))
button = browser.find_element_by_class_name("btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
| true |
6ddca29d6aaa494d9f72a6d8df9988cebde51fad | Python | dinarrow/codecademy-python | /201301 - substrings/substrings.py | UTF-8 | 1,494 | 3.796875 | 4 | [] | no_license | from collections import deque
class FifoSet:
def __init__ (self):
self._set = set()
self._queue = deque()
def push (self, value):
if value:
self._set.add(value)
self._queue.append(value)
def pop (self):
return self._queue.popleft()
def __bool__ (self): return bool(self._queue)
def __len__ (self): return len(self._set)
def __call__ (self): return self._set
class SubstringGenerator:
def __init__(self, word):
self.word = word
self._subList = self.generate()
def generate(self):
substrings = FifoSet()
substrings.push(self.word)
l=0
while substrings:
string = substrings.pop()
l, previousL = len(string), l
lWord, rWord = string[:-1], string[1:]
if l!=previousL:
substrings.push(lWord)
substrings.push(rWord)
return substrings()
def __str__(self): return str(self._subList)
def __len__ (self): return len(self._subList)
def __iter__ (self): return iter(self._subList)
class PrettySubstringGenerator (SubstringGenerator):
def __iter__ (self):
yield self.word
for sub in self._subList:
if sub!=self.word: yield self.spacify (sub)
def spacify (self, sub):
return " " * self.word.find(sub) + sub
subs = PrettySubstringGenerator("abracadabra")
print (*subs, sep='\n')
print ("Count: %i" % len(subs))
| true |
71b9f65de201efc9051310db39abeda6f67c1dfa | Python | ejo034/RecOrder | /scripts/InOut/PrimeMover.py | UTF-8 | 456 | 3.015625 | 3 | [] | no_license | import os
import shutil
def moveOldAudioFiles(writer):
writer.PrintAndWrite("")
writer.PrintAndWrite("** ENTER PRIME MOVER! ** ", False)
writer.PrintAndWrite("moving old files")
path = "Audio/generated/"
oldpath = "Audio/generated/old/"
files = os.listdir(path)
if len(files) is 1:
return True
for file in files:
if file.endswith(".wav"):
shutil.move(path+file, oldpath)
writer.PrintAndWrite("moving done")
| true |
ea4d9c86e876e78c773d3af9a053bd14092df4c2 | Python | td736/Blackjack | /Main.py | UTF-8 | 684 | 3.21875 | 3 | [] | no_license | from Table import Table
def main():
table = Table()
while input("Add player or start(s): ") != 's':
table.add_player()
table.dealer.update_players(table.player_list)
while True:
table.start_hand()
table.hit_loop()
table.dealers_turn()
for player in table.player_list:
if player.money <= 0:
table.remove_player(player.name)
while input("Add players or next round(s): ") != 's':
table.add_player()
if len(table.player_list) == 0:
break
else:
table.dealer.update_players(table.player_list)
if __name__ == '__main__':
main()
| true |
7dd95901e26d09a2280adf92e5cf646b187e45c3 | Python | freewifihk/freegovwifi2json | /geocode.py | UTF-8 | 2,131 | 2.75 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python2
# Copyright 2013 onarray <http://www.onarray.com>
'''Geocode govwifi locations.'''
import os.path
import json
import urllib
import urllib2
import pygeocoder
def google_v2(premise):
'''Geocode the given premise using Google Maps API v2.'''
url = 'http://maps.google.com.hk/maps/geo?'
params = { 'q': premise['name'] }
name = urllib.urlencode(params)
page = urllib2.urlopen(url + name)
html = page.read()
doc = json.loads(html)
place = doc.get('Placemark', [])
if len(place) != 0:
place = place[0]
accuracy = place['AddressDetails']['Accuracy']
longitude, latitude = place['Point']['coordinates'][:2]
premise['latitude'] = latitude
premise['longitude'] = longitude
premise['accuracy'] = accuracy
return premise
def google_v3(premise):
'''Geocode a premise using the pygeocoder library.'''
# Workaround pylint unsupporting static methods
geocoder = pygeocoder.Geocoder()
address = "{0}, {1}, Hong Kong".format(premise['name'], premise['address'])
try:
results = geocoder.geocode(address)
except pygeocoder.GeocoderError:
address = "{0}, Hong Kong".format(premise['name'])
try:
results = geocoder.geocode(address)
except pygeocoder.GeocoderError:
raise
return results.coordinates
def merge_google_v3(premise, coordinates):
'''Merge Google geocoding API v3 results.'''
if premise.has_key('latitude'):
coords = { 'latitude': coordinates[0], 'longitude': coordinates[1] }
premise.setdefault('google_v3', coords)
else:
premise['latitude'] = coordinates[0]
premise['longitude'] = coordinates[1]
return premise
def read(path):
'''Parse serialised json.'''
with open(path) as serialised:
premises = serialised.read()
return json.loads(premises)
def main():
'''Start execution of Geocode.'''
path = os.path.join('res', 'premises-list-geocoded.json')
premises = read(path)
import pprint
pprint.pprint(premises)
if __name__ == '__main__':
main()
| true |
81bbbb174013961f9665483ab3f8a1a4fbee1bf9 | Python | flyeyas/algorithm016 | /Week_03/50.pow-x-n.py | UTF-8 | 2,019 | 3.640625 | 4 | [] | no_license | # @before-stub-for-debug-begin
from python3problem50 import *
from typing import *
# @before-stub-for-debug-end
#
# @lc app=leetcode.cn id=50 lang=python3
#
# [50] Pow(x, n)
#
# https://leetcode-cn.com/problems/powx-n/description/
#
# algorithms
# Medium (36.66%)
# Likes: 499
# Dislikes: 0
# Total Accepted: 128.2K
# Total Submissions: 349.6K
# Testcase Example: '2.00000\n10'
#
# 实现 pow(x, n) ,即计算 x 的 n 次幂函数。
#
# 示例 1:
#
# 输入: 2.00000, 10
# 输出: 1024.00000
#
#
# 示例 2:
#
# 输入: 2.10000, 3
# 输出: 9.26100
#
#
# 示例 3:
#
# 输入: 2.00000, -2
# 输出: 0.25000
# 解释: 2^-2 = 1/2^2 = 1/4 = 0.25
#
# 说明:
#
#
# -100.0 < x < 100.0
# n 是 32 位有符号整数,其数值范围是 [−2^31, 2^31 − 1] 。
#
#
#
# @lc code=start
'''
暴力方式,循环遍历
'''
class Solution:
def myPow(self, x: float, n: int) -> float:
num = 1
for index in range(n):
num = x*num
return num
'''
递归:
采用分治方式,进行拆分
思路:
1、计算x的n次方,先递归计算出y 等于 x的(n/2)次方,如果n小于0,就去当前数的倒数
2、根据计算结果,当n为偶数时,x的n次方等于y*y, 当n为基数时,x的n次方等于y*y*x
3、n=0时,x的n次方为1, 任何数的0次方,结果都为1
代码出错地方:递归n时, n/2需要向下取整
'''
class Solution:
def myPow(self, x: float, n: int) -> float:
return self.getNum(x, n) if n>=0 else 1.0/self.getNum(x, -n)
def getNum(self, x, n):
if n == 0:
return 1.0
y = self.getNum(x , n//2)
return y*y if n%2 == 0 else y*y*x
'''
国际站中的一个递归解法
'''
class Solution:
def myPow(self, x: float, n: int) -> float:
if not n:
return 1.0
if n < 0:
return 1.0/self.myPow(x, -n)
if n % 2:
return x * self.myPow(x, n-1)
return self.myPow(x*x, n/2)
# @lc code=end
| true |
0a4950e2910b77709f2edb62f18c6764341f743d | Python | sandance/CodePrac | /EDUCATIVE/Sliding_Window/Longest_subarrray_with_ones_replacement.py | UTF-8 | 760 | 3.546875 | 4 | [] | no_license | from collections import defaultdict
def length_of_longest_substring(arr, k):
window_start = 0
maxLen = 0
char_freq = defaultdict(int)
max_ones_count = 0
for window_end in range(len(arr)):
right_char = arr[window_end]
if right_char == 1:
max_ones_count += 1
if window_end - window_start + 1 - max_ones_count > k:
if arr[window_start] == 1:
max_ones_count -= 1
window_start += 1
maxLen = max(maxLen, window_end - window_start + 1)
return maxLen
def main():
print(length_of_longest_substring([0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1], 2))
print(length_of_longest_substring(
[0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1], 3))
main() | true |
27b8cf7972a186a06089f95522ff61ce83094231 | Python | arkturw/empik | /pages/login.py | UTF-8 | 1,815 | 3.234375 | 3 | [] | no_license | from selenium.webdriver.common.by import By
class LoginPage:
# Lokatory obiektów
USER_INPUT = (By.NAME, '_username')
PASS_INPUT = (By.NAME, '_password')
REMEMBER_ME_CHECKBOX = (By.NAME, '_remember_me')
LOGIN_BTN = (By.ID, 'loginForm')
ERROR_MSG = (By.XPATH, '//span[text()="Błędny użytkownik lub hasło."]')
def __init__(self, browser):
self.browser = browser
def pass_user_data(self, username, password):
"""
Wypełnianie formatki logowania danymi
:param username: string, nazwa użytkownika
:param password: string, hasło użytkownika
"""
user_input = self.browser.find_element(*self.USER_INPUT)
user_input.send_keys(username)
pass_input = self.browser.find_element(*self.PASS_INPUT)
pass_input.send_keys(password)
def uncheck_remember_me(self):
"""
Kliknięcie na checkboksie "Zapamiętaj mnie"
"""
remember_me_checkbox = self.browser.find_element(*self.REMEMBER_ME_CHECKBOX)
remember_me_checkbox.click()
def login(self):
"""
Kliknięcie na przycisku logowania
"""
login_button = self.browser.find_element(*self.LOGIN_BTN)
login_button.click()
# Asercje
def is_error_message_displayed(self):
"""
Sprawdzenie czy na ekranie wyświetlony zostaje komunikat błędu logowania
:return: Zwraca 'True' jeśli rezultat testu pozytywny i 'False' jeśli negatywny
"""
try:
error_message = self.browser.find_element(*self.ERROR_MSG)
if error_message.is_displayed():
return True
else:
return False
except Exception as err:
print('Wystąpił wyjątek:', err)
return False | true |
a39749e28604ea209f8e50a8039d0308ce22697c | Python | WildStriker/advent_of_code_2019 | /advent_of_code/day_23/part_02.py | UTF-8 | 767 | 3.03125 | 3 | [] | no_license | """Part 2 Module"""
import click
from day_23.network import communicate
from shared.opcodes import read_codes
@click.command()
@click.option("--input", "input_path", type=click.Path(exists=True), default="inputs\\day_23.txt")
def part_02(input_path):
"""Part 2"""
with open(input_path) as file_input:
codes = read_codes(file_input)
nat_address = 255
address, x_portion, y_portion = communicate(codes, nat_address=nat_address)
if address != nat_address:
print(f"{address} is outside address range")
print(f"Tried to send packet x: {x_portion} and y: {y_portion}")
else:
print(f"NAT address has sent y twice: {y_portion}")
if __name__ == "__main__":
part_02() # pylint: disable=no-value-for-parameter
| true |
6a3a58aacdd46856a1b46129ca5551280957d63b | Python | mack1210/Programmers | /1. 해시_완주하지 못한 선수.py | UTF-8 | 334 | 3.078125 | 3 | [] | no_license | def solution(participant, completion):
participant.sort()
completion.sort()
for p,c in zip(participant, completion):
if p != c:
return p
return participant.pop()
part = ["marina", "josipa", "nikola", "vinko", "filipa"]
comp = ["josipa", "filipa", "marina", "nikola"]
print(solution(part, comp))
| true |
2c01395fab9b4aa02282d6e7132b2d599c4b0ace | Python | DatHydroGuy/RayTracer | /test/test_tuples.py | UTF-8 | 7,501 | 3.4375 | 3 | [
"MIT"
] | permissive | import unittest
from tuples import *
from math import sqrt
class VectorTestCase(unittest.TestCase):
def test_tuple_with_w_value_of_1_is_a_point(self):
# Arrange
# Act
a = Tuple(4.3, -4.2, 3.1, 1.0)
# Assert
self.assertEqual(a.x, 4.3)
self.assertEqual(a.y, -4.2)
self.assertEqual(a.z, 3.1)
self.assertEqual(a.w, 1.0)
self.assertTrue(a.is_point)
self.assertFalse(a.is_vector)
def test_tuple_with_w_value_of_0_is_a_vector(self):
# Arrange
# Act
a = Tuple(4.3, -4.2, 3.1, 0.0)
# Assert
self.assertEqual(a.x, 4.3)
self.assertEqual(a.y, -4.2)
self.assertEqual(a.z, 3.1)
self.assertEqual(a.w, 0.0)
self.assertFalse(a.is_point)
self.assertTrue(a.is_vector)
def test_point_creates_tuple_with_w_value_of_1(self):
# Arrange
expected = Tuple(4, -4, 3, 1)
# Act
a = Point(4, -4, 3)
# Assert
self.assertEqual(a, expected)
def test_vector_creates_tuple_with_w_value_of_0(self):
# Arrange
expected = Tuple(4, -4, 3, 0)
# Act
a = Vector(4, -4, 3)
# Assert
self.assertEqual(a, expected)
def test_adding_two_tuples_together_creates_a_tuple(self):
# Arrange
expected = Tuple(1, 1, 6, 1)
# Act
a1 = Tuple(3, -2, 5, 1)
a2 = Tuple(-2, 3, 1, 0)
# Assert
self.assertEqual(a1 + a2, expected)
def test_subtracting_two_points_creates_a_vector(self):
# Arrange
expected = Vector(-2, -4, -6)
# Act
a1 = Point(3, 2, 1)
a2 = Point(5, 6, 7)
# Assert
self.assertEqual(a1 - a2, expected)
def test_subtracting_a_vector_from_a_point_creates_a_point(self):
# Arrange
expected = Point(-2, -4, -6)
# Act
a1 = Point(3, 2, 1)
a2 = Vector(5, 6, 7)
# Assert
self.assertEqual(a1 - a2, expected)
def test_subtracting_two_vectors_creates_another_vector(self):
# Arrange
expected = Vector(-2, -4, -6)
# Act
a1 = Vector(3, 2, 1)
a2 = Vector(5, 6, 7)
# Assert
self.assertEqual(a1 - a2, expected)
def test_subtracting_a_vector_from_the_zero_vector(self):
# Arrange
expected = Vector(-1, 2, -3)
# Act
zero = Vector(0, 0, 0)
a1 = Vector(1, -2, 3)
# Assert
self.assertEqual(zero - a1, expected)
def test_negating_a_tuple_creates_a_componentwise_negative_tuple(self):
# Arrange
expected = Tuple(-1, 2, -3, 4)
# Act
a = Tuple(1, -2, 3, -4)
# Assert
self.assertEqual(-a, expected)
def test_multiplying_a_tuple_by_a_scalar(self):
# Arrange
expected = Tuple(3.5, -7, 10.5, -14)
# Act
a = Tuple(1, -2, 3, -4)
# Assert
self.assertEqual(a * 3.5, expected)
def test_multiplying_a_tuple_by_a_fraction(self):
# Arrange
expected = Tuple(0.5, -1, 1.5, -2)
# Act
a = Tuple(1, -2, 3, -4)
# Assert
self.assertEqual(a * 0.5, expected)
def test_dividing_a_tuple_by_a_scalar(self):
# Arrange
expected = Tuple(0.5, -1, 1.5, -2)
# Act
a = Tuple(1, -2, 3, -4)
# Assert
self.assertEqual(a / 2, expected)
def test_magnitude_of_x_vector(self):
# Arrange
expected = 1
# Act
a = Vector(1, 0, 0)
# Assert
self.assertEqual(a.mag, expected)
def test_magnitude_of_y_vector(self):
# Arrange
expected = 1
# Act
a = Vector(0, 1, 0)
# Assert
self.assertEqual(a.mag, expected)
def test_magnitude_of_z_vector(self):
# Arrange
expected = 1
# Act
a = Vector(0, 0, 1)
# Assert
self.assertEqual(a.mag, expected)
def test_magnitude_of_positive_vector(self):
# Arrange
expected = sqrt(14)
# Act
a = Vector(1, 2, 3)
# Assert
self.assertEqual(a.mag, expected)
def test_magnitude_of_negative_vector(self):
# Arrange
expected = sqrt(14)
# Act
a = Vector(-1, -2, -3)
# Assert
self.assertEqual(a.mag, expected)
def test_normalising_an_axial_vector(self):
# Arrange
expected = Vector(1, 0, 0)
# Act
a = Vector(4, 0, 0)
# Assert
self.assertEqual(a.normalise(), expected)
def test_normalising_a_vector(self):
# Arrange
expected = Vector(1 / sqrt(14), 2 / sqrt(14), 3 / sqrt(14))
# Act
a = Vector(1, 2, 3)
# Assert
self.assertEqual(a.normalise(), expected)
def test_magnitude_of_a_normalised_vector_is_1(self):
# Arrange
expected = 1
# Act
a = Vector(1, 2, 3)
# Assert
self.assertEqual(a.normalise().mag, expected)
def test_dot_product_of_two_vectors(self):
# Arrange
expected = 20
# Act
a = Vector(1, 2, 3)
b = Vector(2, 3, 4)
# Assert
self.assertEqual(a.dot(b), expected)
def test_dot_product_of_two_tuples(self):
# Arrange
expected = 4
# Act
a = Tuple(1, 2, 3, 4)
b = Tuple(2, 3, 4, -4)
# Assert
self.assertEqual(a.dot(b), expected)
def test_cross_product_of_two_vectors(self):
# Arrange
expected1 = Vector(-1, 2, -1)
expected2 = Vector(1, -2, 1)
# Act
a = Vector(1, 2, 3)
b = Vector(2, 3, 4)
# Assert
self.assertEqual(a.cross(b), expected1)
self.assertEqual(b.cross(a), expected2)
def test_multiplying_a_vector_by_a_scalar(self):
# Arrange
a = Vector(1, 2, 3)
expected = Vector(2.5, 5, 7.5)
# Act
result = a * 2.5
# Assert
self.assertEqual(result, expected)
def test_multiplying_a_scalar_by_a_vector(self):
# Arrange
a = Vector(1, 2, 3)
expected = Vector(2.5, 5, 7.5)
# Act
result = 2.5 * a
# Assert
self.assertEqual(result, expected)
def test_create_a_point_from_a_tuple(self):
# Arrange
a = Point(1, 2, 3)
b = Point(2, 5, 8)
expected = Point(1, 3, 5)
# Act
result = Point.from_tuple(b - a)
# Assert
self.assertEqual(result, expected)
def test_create_a_Vector_from_a_tuple(self):
# Arrange
a = Point(1, 2, 3)
b = Point(2, 5, 8)
expected = Vector(1, 3, 5)
# Act
result = Vector.from_tuple(b - a)
# Assert
self.assertEqual(result, expected)
def test_reflecting_a_Vector_approaching_at_45_degrees(self):
# Arrange
v = Vector(1, -1, 0)
n = Vector(0, 1, 0)
expected = Vector(1, 1, 0)
# Act
result = v.reflect(n)
# Assert
self.assertEqual(result, expected)
def test_reflecting_a_Vector_off_a_slanted_surface(self):
# Arrange
v = Vector(0, -1, 0)
n = Vector(sqrt(2) / 2, sqrt(2) / 2, 0)
expected = Vector(1, 0, 0)
# Act
result = v.reflect(n)
# Assert
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| true |
ca4d605f94f0d3f59efc095782e60d9ab687d457 | Python | sstelss/python | /ccutting seam/avtomat.py | UTF-8 | 5,324 | 3.5625 | 4 | [] | no_license | from PIL import Image
def run(path):
# считывание изображения
img = Image.open(path)
obj = img.load()
# количество строк
width = img.size[1]
# количество столбцов
height = img.size[0]
print(img.size)
print(f"string -> {width} column -> {height}")
#I Создание матрици энергии
energy = []
# интенсивность по rgb
i_r = []
i_g = []
i_b = []
# считываем интенсивность в списки
for i in range(width):
i_r.append([])
i_g.append([])
i_b.append([])
for j in range(height):
i_r[i].append(obj[j, i][0])
i_g[i].append(obj[j, i][1])
i_b[i].append(obj[j, i][2])
print(i_r)
print(i_g)
print(i_b)
# вычислим энергию для пикселей исключая крайний правый столбец и нижнюю строку
for i in range(width-1):
energy.append([])
for j in range(height-1):
temp_e_r = round((abs(i_r[i][j] - i_r[i + 1][j]) + abs(i_r[i][j] - i_r[i][j + 1])) / 2)
temp_e_g = round((abs(i_g[i][j] - i_g[i + 1][j]) + abs(i_g[i][j] - i_g[i][j + 1])) / 2)
temp_e_b = round((abs(i_b[i][j] - i_b[i + 1][j]) + abs(i_b[i][j] - i_b[i][j + 1])) / 2)
energy[i].append(temp_e_r + temp_e_g + temp_e_b)
# посчитаем для крайнего правого столбца
for i in range(width-1):
temp_e_r = abs(i_r[i][height - 1] - i_r[i + 1][height - 1])
temp_e_g = abs(i_g[i][height - 1] - i_g[i + 1][height - 1])
temp_e_b = abs(i_b[i][height - 1] - i_b[i + 1][height - 1])
energy[i].append(temp_e_r + temp_e_g + temp_e_b)
# для нижней строки
mass = []
for i in range(height-1):
temp_e_r = abs(i_r[width - 1][i] - i_r[width - 1][i+1])
temp_e_g = abs(i_g[width - 1][i] - i_g[width - 1][i+1])
temp_e_b = abs(i_b[width - 1][i] - i_b[width - 1][i+1])
mass.append(temp_e_r + temp_e_g + temp_e_b)
energy.append(mass)
# элемент в правом нижнем углу равен 0
energy[width-1].append(0)
print("energy:", energy)
#II поиск минимальных швов
# создадим матрицу сумм
summ = []
# первая строка совпадает с первой строкой энергий
summ.append(energy[0])
# послудующие вычисляем по правилу: energy[i,j] + MIN ( sum[i-1, j-1], sum[i-1, j], sum[i-1, j+1])
for i in range(1, width):
summ.append([])
for j in range(height):
temp_list = []
# элемент сверху всегда есть
temp_list.append(summ[i-1][j])
# верхнего левого нет для крайнего левого столбца
if (j-1 >= 0):
temp_list.append(summ[i-1][j-1])
# верхнего правого нет для крайнего правого столбца
try:
temp_list.append(summ[i-1][j+1])
except:
pass
summ[i].append(energy[i][j] + min(temp_list))
print(f"summ: {summ}")
# обратный проход. Ищем найменьший шов
# координаты шва
path = []
# найдем минимальный элемент в нижней строке
m = min(summ[width-1])
# узнаем его индекс
index = summ[width-1].index(m)
# now_s = width-1
# now_c = index
print(m, index)
path.append([width-1, index])
for i in range(width-2, -1, -1):
temp_list = []
temp_list.append([summ[i][index], index])
if index - 1 >= 0:
temp_list.append([summ[i][index-1], index-1])
if index + 1 < height-1:
temp_list.append([summ[i][index+1], index+1])
m = min(temp_list, key=lambda x: x[0])
index = m[1]
path.append([i, index])
print(f"path: {path}")
for i in path:
obj[i[1], i[0]] = (255, 0, 0)
# img.show()
img.close()
#III удаление лишнего шва
# просто создадим новый файл на основе информации, которую имеем, но пропустим пиксели из шва
path = sorted(path, key=lambda x:x[0])
n_img = Image.new("RGB", (height-1, width))
for i in range(width):
i_r[i] = i_r[i][0:path[i][1]] + i_r[i][path[i][1] + 1:]
i_g[i] = i_g[i][0:path[i][1]] + i_g[i][path[i][1] + 1:]
i_b[i] = i_b[i][0:path[i][1]] + i_b[i][path[i][1] + 1:]
for i in range(width-1):
for j in range(height-1):
n_img.putpixel((j, i), (i_r[i][j], i_g[i][j], i_b[i][j]))
# n_img.show()
n_img.save("new_image.png")
return "new_image.png"
if __name__ == "__main__":
amount = 100
temp = run("test3.png")
for i in range(amount):
run(temp) | true |
f1ab804fbc1af39b53b7ece1ba02e13cf50a9ae3 | Python | novayo/LeetCode | /1882_Process_Tasks_Using_Servers/try_2.py | UTF-8 | 1,358 | 2.71875 | 3 | [] | no_license | class Solution:
def assignTasks(self, servers: List[int], tasks: List[int]) -> List[int]:
'''
free server
busy server
enque task
if free server:
handle one task => busy server
cur_time += 1
else:
cur_time = busy server[0]
handle busy server
'''
n = len(tasks)
free_server = [] # (weight, idx)
busy_server = [] # (finish time, weight, idx)
for i, weight in enumerate(servers):
heapq.heappush(free_server, (weight, i))
ans = []
idx_task = 0
cur_time = 0
while idx_task < n:
if free_server and idx_task < n:
weight, server = heapq.heappop(free_server)
heapq.heappush(busy_server, (cur_time + tasks[idx_task], weight, server))
ans.append(server)
idx_task += 1
cur_time = max(cur_time, idx_task)
else:
cur_time = busy_server[0][0]
while busy_server and busy_server[0][0] <= cur_time:
_, weight, server = heapq.heappop(busy_server)
heapq.heappush(free_server, (weight, server))
return ans
| true |
81a6cba45e2f2cb6a43d9d0c2c2074af1b41a2e0 | Python | milinddalakoti/Python-Projects | /Password Manager Project/password_gen.py | UTF-8 | 853 | 3.65625 | 4 | [] | no_license |
from random import randint,choice,shuffle
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
class Passgen:
def password_gen():
password_list = []
letters_list = [choice(letters) for a in range(randint(8, 10))]
symbols_list = [choice(symbols) for a in range(randint(2, 4))]
numbers_list = [choice(numbers) for a in range(randint(2, 4))]
password_list = letters_list+symbols_list+numbers_list
shuffle(password_list)
password = "".join(password_list)
return(password)
| true |
544955982c4b5b5ac3fddc030c8c5c8c712ee00c | Python | shilin21/shadowdetection | /unet/models/warp_model.py | UTF-8 | 8,959 | 2.90625 | 3 | [] | no_license | """Model class template
This module provides a template for users to implement custom models.
You can specify '--model template' to use this model.
The class name should be consistent with both the filename and its model option.
The filename should be <model>_dataset.py
The class name should be <Model>Dataset.py
It implements a simple image-to-image translation baseline based on regression loss.
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
min_<netG> ||netG(data_A) - data_B||_1
You need to implement the following functions:
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
<__init__>: Initialize this model class.
<set_input>: Unpack input data and perform data pre-processing.
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
<optimize_parameters>: Update network weights; it will be called in every training iteration.
"""
import torch
from torch.autograd import Variable
from .base_model import BaseModel
from . import networks
from . import pwclite
class WarpModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(dataset_mode='visha', lr=5e-3, batch_size=8, preprocess='resize', load_size=512, no_epoch=True, save_by_iter=True, load_iter=50000, print_freq=1, display_ncols=10)
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay for optimizer')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for sgd optimizer')
parser.add_argument('--num_classes', type=int, default=1, help='number of classes')
parser.add_argument('--backbone', type=str, default='mobilenet', help='backbone net type')
parser.add_argument('--output_stride', type=int, default=16, help='number of output stride')
parser.add_argument('--sync_bn', default=None, help='synchronized batchnorm or not')
parser.add_argument('--freeze_bn', default=False, help='freeze bacthnorm or not')
parser.add_argument('--iter_num', type=int, default=50000, help='number of iterations')
parser.add_argument('--lr_decay', type=float, default=0.9, help='learning rate decay rate')
parser.add_argument('--pretrained_model', default='checkpoints/pwclite_ar.tar')
parser.add_argument('--test_shape', default=[448, 1024], type=int, nargs=2)
parser.add_argument('--n_frames', type=int, default=2)
parser.add_argument('--upsample', default=True)
parser.add_argument('--reduce_dense', default=True)
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['first', 'second', 'sum']
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['data_A1', 'data_A2', 'data_B1', 'data_B2', 'flow12', 'flow21', 'transflow12', 'transflow21', 'pred1', 'pred2']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
self.model_names = ['FW']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.netFW = networks.define_fw(opt.num_classes, opt.backbone, opt.output_stride, opt.sync_bn, opt.freeze_bn, gpu_ids=self.gpu_ids)
self.netFG = pwclite.PWCLite(opt).to(self.device)
self.netFG = pwclite.restore_model(self.netFG, opt.pretrained_model)
self.netFG.eval()
if self.isTrain: # only defined during training time
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
self.criterionFlow = torch.nn.MSELoss()
# define and initialize optimizers. You can define one optimizer for each network.
self.train_params = [{'params': self.netFW.module.get_1x_lr_params(), 'lr': opt.lr},
{'params': self.netFW.module.get_10x_lr_params(), 'lr': opt.lr * 10}]
self.optimizer = torch.optim.SGD(self.train_params, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=False)
self.optimizers = [self.optimizer]
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.data_A1 = Variable(input['A1']).to(self.device) # get image data A
self.data_B1 = Variable(input['B1']).to(self.device) # get image data B
self.data_A2 = Variable(input['A2']).to(self.device)
self.data_B2 = Variable(input['B2']).to(self.device)
self.image_paths = input['A_paths'] # get image paths
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
flow_input1 = torch.nn.functional.interpolate(self.data_A1, size=self.opt.test_shape, mode='bilinear', align_corners=True)
flow_input2 = torch.nn.functional.interpolate(self.data_A2, size=self.opt.test_shape, mode='bilinear', align_corners=True)
flow_input = torch.cat([flow_input1, flow_input2], 1)
flow = self.netFG(flow_input)
self.flow12 = pwclite.resize_flow(flow['flows_fw'][0], (self.opt.load_size, self.opt.load_size))
self.flow21 = pwclite.resize_flow(flow['flows_bw'][0], (self.opt.load_size, self.opt.load_size))
self.pred1, self.pred2, self.transflow12, self.transflow21 = self.netFW(self.data_A1, self.data_A2, self.flow12, self.flow21)
def backward(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# calculate loss given the input and intermediate results
self.loss_first = self.criterionFlow(self.pred1, self.data_B1)
self.loss_second = self.criterionFlow(self.pred2, self.data_B2)
self.loss_sum = self.loss_first + self.loss_second
self.loss_sum.backward()
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.optimizer.zero_grad() # clear network G's existing gradients
self.forward() # first call forward to calculate intermediate results
self.backward() # calculate gradients for network G
self.optimizer.step() # update gradients for network G
def update_learning_rate(self, curr_iter):
"""Update learning rates for all the networks; called at the end of every epoch"""
if not self.opt.no_epoch:
old_lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
if self.opt.no_epoch:
old_lr = self.optimizers[0].param_groups[0]['lr']
self.optimizers[0].param_groups[0]['lr'] = 1 * self.opt.lr * (1 - float(curr_iter) / self.opt.iter_num) ** self.opt.lr_decay
self.optimizers[0].param_groups[1]['lr'] = 10 * self.opt.lr * (1 - float(curr_iter) / self.opt.iter_num) ** self.opt.lr_decay
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
| true |
ae698504b5f39e42a5764fe5fdaf174f7cecb319 | Python | krahul32/leetcode-excercise | /record-breaker.py | UTF-8 | 594 | 3.21875 | 3 | [] | no_license | def cond1(l1,i):
if(i == 0):
return cond2(l1,i)
cur = l1[i];
for j in range(i):
if(cur <= l1[j]):
return False
return True
def cond2(l1,i):
if(i == len(l1)-1):
return cond1(l1,i)
if(l1[i] <= l1[i+1]):
return False
return True
tc = int(input())
case = 1
while(tc):
res = 0
N = int(input())
l1 = [int(x) for x in input().split(" ")]
for i in range(len(l1)):
if(cond1(l1,i) and cond2(l1,i)):
res = res + 1
print("Case #{0}: {1}".format(case,res))
case = case + 1
tc = tc - 1
| true |
7c328523a9af4648dec0ee4fb9312bf5a3bf7500 | Python | EliasL/Happy-number-calculators | /Large number reverse calculator.py | UTF-8 | 789 | 3.75 | 4 | [
"MIT"
] | permissive | import math
# Note, this calculator does not compact
def square(x):
return int(x) * int(x)
def happy(number):
return sum(map(square, list(str(number))))
def findD(n, number, d):
m = 0
if n >= (d**2):
m = math.floor(n/d**2)
print("There are/is "+str(m)+" \""+str(d)+"\"s")
n = n - (d**2 * m)
return n, number
def reverse(n):
i = 0
number = []
n, number = findD(n, number, 9)
n, number = findD(n, number, 8)
n, number = findD(n, number, 7)
n, number = findD(n, number, 6)
n, number = findD(n, number, 5)
n, number = findD(n, number, 4)
n, number = findD(n, number, 3)
n, number = findD(n, number, 2)
n, number = findD(n, number, 1)
while True:
n = int(input("Enter number: "))
reverse(n)
| true |
2ec8185c26012c029ed869d7721f5118232480e8 | Python | ejeschke/pyasdf | /pyasdf/asdftypes.py | UTF-8 | 3,907 | 2.78125 | 3 | [] | no_license | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import os
from astropy.extern import six
from astropy.utils.misc import InheritDocstrings
from . import versioning
__all__ = ['format_tag', 'AsdfTypeIndex', 'AsdfType']
def format_tag(organization, standard, version, tag_name=None):
"""
Format a YAML tag.
"""
result = 'tag:{0}:{1}/{2}/'.format(
organization, standard, version)
if tag_name is not None:
result += tag_name
return result
class AsdfTypeIndex(object):
"""
An index of the known `AsdfType`s.
"""
_type_by_cls = {}
_type_by_name = {}
@classmethod
def get_asdftype_from_custom_type(cls, custom_type):
return cls._type_by_cls.get(custom_type)
@classmethod
def get_asdftype_from_yaml_tag(cls, tag):
return cls._type_by_name.get(tag)
class AsdfTypeMeta(type):
"""
Keeps track of `AsdfType` subclasses that are created, and stores
them in `AsdfTypeIndex`.
"""
def __new__(mcls, name, bases, attrs):
cls = super(AsdfTypeMeta, mcls).__new__(mcls, name, bases, attrs)
if hasattr(cls, 'name'):
if 'yaml_tag' not in attrs:
cls.yaml_tag = format_tag(
cls.organization,
cls.standard,
versioning.version_to_string(cls.version),
cls.name)
AsdfTypeIndex._type_by_cls[cls] = cls
AsdfTypeIndex._type_by_name[cls.yaml_tag] = cls
for typ in cls.types:
AsdfTypeIndex._type_by_cls[typ] = cls
return cls
@six.add_metaclass(AsdfTypeMeta)
@six.add_metaclass(InheritDocstrings)
class AsdfType(object):
"""
The base class of all custom types in the tree.
Besides the attributes defined below, most subclasses will also
override `to_tree` and `from_tree`.
To customize how the type's schema is located, override `get_schema_path`.
Attributes
----------
name : str
The name of the type.
organization : str
The organization responsible for the type.
standard : str
The standard the type is defined in. For built-in ASDF types,
this is ``"asdf"``.
version : 3-tuple of int
The version of the standard the type is defined in.
types : list of Python types
Custom Python types that, when found in the tree, will be
converted into basic types for YAML output.
"""
name = None
organization = 'stsci.edu'
standard = 'asdf'
version = (0, 1, 0)
types = []
@classmethod
def get_schema_path(cls):
return os.path.join(
cls.organization, cls.standard,
versioning.version_to_string(cls.version),
cls.name)
@classmethod
def get_schema(cls):
from . import schema
return schema.load_schema(cls.get_schema_path())
@classmethod
def validate(cls, tree):
"""
Validate the given tree of basic data types against the schema
for this type.
"""
from . import schema
schema.validate(tree, cls.get_schema())
@classmethod
def to_tree(cls, node, ctx):
"""
Converts from a custom type to any of the basic types (dict,
list, str, number) supported by YAML. In most cases, must be
overridden by subclasses.
"""
return node.__class__.__bases__[0](node)
@classmethod
def from_tree(cls, tree, ctx):
"""
Converts from basic types to a custom type.
"""
return cls(tree)
@classmethod
def assert_equal(cls, old, new):
"""
Assert that two objects of this type are equal. Used for
testing only.
"""
return
| true |
00916f251de0dba87379918c331ba586e514a28f | Python | mehedi-shafi/SnakeGame-Python | /menu.py | UTF-8 | 1,059 | 3.203125 | 3 | [] | no_license | from vars import *
class MainMenu:
def __init__(self):
pass
def update(self):
pass
def render(self, window):
menuFont = pygame.font.SysFont('monaco', 30)
menuSurf = menuFont.render('Snake Game!!', True, black)
menuRect = menuSurf.get_rect()
menuRect.midtop = (360, 200)
menuOptText = menuFont.render('1. Play', True, black)
menuOptRect = menuOptText.get_rect()
menuOptRect.midtop = (360, 240)
menuOptText2 = menuFont.render('2. Quit', True, black)
menuOptRect2 = menuOptText.get_rect()
menuOptRect2.midtop = (360, 270)
creditfont = pygame.font.SysFont('monaco', 18)
credit = creditfont.render('https://github.com/mehedi-shafi', True, black)
creditRect = credit.get_rect()
creditRect.midtop = (610, 460)
window.blit(menuSurf, menuRect)
window.blit (menuOptText, menuOptRect)
window.blit(menuOptText2, menuOptRect2)
window.blit(credit, creditRect)
| true |
5df5ef2c6134542ba73c1a94fe12b2a744251864 | Python | mohammedterryjack/tts | /src/voice.py | UTF-8 | 468 | 2.9375 | 3 | [] | no_license | from os import system
from time import sleep
from pygame import mixer
class Voice:
def __init__(self,file_path:str="audio/tts_output.wav") -> None:
self.audio_path=file_path
self.synthesiser = mixer
self.synthesiser.init()
def speak(self, text:str) -> None:
system(f'tts --text "{text}" --out_path {self.audio_path}')
self.synthesiser.music.load(self.audio_path)
self.synthesiser.music.play()
sleep(5) | true |
dc69a55003f2c795fe67df114814016ba42efb5e | Python | martavillegas/ApertiumRDF | /New-data/calculateCycles.py | UTF-8 | 10,155 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
##################################
##
##
##
## usage: python calculateCycles.py dict.file lang (v) > output.file
##
##################################
from SPARQLWrapper import SPARQLWrapper, JSON
from collections import defaultdict
import sys
import urllib
args = sys.argv
dictionary = args[1]
lang = args[2]
if len(args) == 4:
verbose = args[3]
else:
verbose = ""
# Open and read dictionary with format: {noun: [[trans-pairs],[uniq-words]]}
input_file = open(dictionary,'r')
Dict = eval(input_file.read())
def main():
"""Get root word, check if it has a graph and start computation.
The input dictionary was created bt getData.py and it contains
graph/context data from Apertium RDF data for input words.
"""
for k,v in Dict.items():
# Get 'root' and corresponding 'translation pairs' and start computation
if (v[0]):
go(k,v[0])
def go(word,graph):
"""Given a root word and its context/graph identify and evaluate potential targets.
Context/graph contains the list of translation pairs computed
by getData.py script).
Potential targets are those words in the graph that:
(i) occur in some cycle together with the root word and
(ii) are not linked to the root.
"""
root = str(word) + "-n-" + lang # Rewrite input word following 'Apertium format'.
apertium = [] # Used to store already known translations for root.
pila = [] # Used to control 'already visited nodes'.
cycles = [] # Initiate cycles list.
# Read graph and trigger findCycles for each X-Y pair where X=root.
for pair in graph:
if pair[0] == root:
findCycles(pair[1],pila,graph,root,cycles)
apertium.append(pair[1])
# Flat cycles' list to dentify distinct words (using set).
flat_pairs = reduce(lambda x,y: x+y,graph)
uniq_words = list(set(flat_pairs))
# Calculate the density of the graph. Density = V / N*(N-1).
graphD = graphDensity(len(uniq_words),len(graph))
if len(cycles) > 0:
# Remove 'duplicates' in cycles (abc=cba; abcd=abdc) and
# require len(cycles) > 5 for big contexts (more than 5 known translations).
cyclesClean = cleanCycles(cycles,root,len(apertium))
if len(cyclesClean) > 0:
# Identify potential Targets in cycles (nodes not linked to root).
targets = findTargets(cyclesClean,root,graph)
# Calculate the cycle density for each potential target
# For each cycle with a potential target we get: target, density and cycle.
cyclesDensity = calculateDensity(cyclesClean,targets,graph)
# Get 'already known' target languages.
languages = getLanguages(apertium)
# Compute the final score.
cycles_dict = compute_results(cyclesDensity,graphD,root,languages)
if verbose == 'v':
# verbose: num of cycles, num of 'uniq' cycles, num words in context,
# num translation pairs, num known translations, num potential targets
info = [len(cycles),len(cyclesClean),len(uniq_words),len(graph),len(apertium),len(targets)]
info_toprint = ', '.join(map(str, info))
# Prints: root, target, language, score, graph_density, num_cycles, length_cycles.
for target, value in cycles_dict.iteritems():
l = computeLanguages(languages,target)
graph_density = '%.3f'%(graphD)
score = '%.4f'%(value[0])
if verbose == 'v':
print "%s, %s, %s, %s, %s, %s, %s, %s" % (root,info_toprint, graph_density,target,l,score,len(value[1]),len(value[1][0]))
else:
print "%s, %s, %s, %s, %s, %s, %s" % (root,graph_density,target,l,score,len(value[1]),len(value[1][0]))
def compute_results(cyclesDensity,graphD,root,languages):
"""For each potential target compute the final score.
Input, cyclesDensity: [[target,score,[cycle]], [target,score,[cycle]], ...
Each target may occur in several cycles, so we need to choose the best cycles:
the [target,score,cycle] tuples with a higher score.
The function builds a dict with {potential_target: [higest_score, [cycles]]}
"""
cycles_dict = dict()
# Sort cyclesDensity so that for each target we have the higher score first:
# [t,08,[cycle]],[t,06,[cycle]],...]
for inputCycle in sorted(cyclesDensity, reverse=True):
target = inputCycle[0]
score = inputCycle[1]
cycle = inputCycle[2]
# If potential target is already in dict, get cycles with same score.
if target in cycles_dict:
# If the score in inputCycle equals high_score, get the cycle
if cycles_dict[target][0] == score:
cycles_dict[target][1].append(cycle)
# If target not in dict, add it as { potential_target : [score, [[cycle]] }
else:
l = [score,[cycle]]
cycles_dict[target]=l
#print cycles_dict
return cycles_dict
### Warning:
# We restrict length of cycles to 8 maximum to avoid computation problems:
# bar-n-en: 730 translation pairs (passed)
# hit-n-en: 779 translation pairs (failed)
# leader-n-en: 764 translation pairs (failed)
def findCycles(node,pila,graph,root,cycles):
"""Find cycles in Graph containing root.
This is a recursive function that starts with node in a 'root-node' pair
and ends when root is reached (node-root pair).
"""
pila2add = ""
# Add node to potential cycle.
pila.append(node)
# Look for node/Y pairs in Graph.
for pair in graph:
# We had to limit len(cycles) to < 7 to avoid computation problems!
if (pair[0] == node and len(pila) < 7):
# Termination: When pair is "X->root", we reach the end of cycle.
if pair[1] == root:
# We want cycles bigger than 2 nodes.
if len(pila) > 1:
pila2add = ' '.join(pila)
cycles.append(pila2add)
else:
# Check that node is not repeated in cycle.
if not visited(pair[1],pila):
findCycles(pair[1],pila,graph,root,cycles)
else:
pila.pop()
# for each cycle, removes inverse cycle (if exists).
#def cleanCycles(cycles,root):
# cyclesClean = []
# for cycle in cycles:
# raw = cycle.split()
# raw.append(root)
# raw.reverse()
# if not raw in cyclesClean:
# raw.reverse()
# cyclesClean.append(raw)
# return(cyclesClean)
# (see above for just removing inverse cycles).
def cleanCycles(cycles,root,apertium):
"""Remove 'redundant' cycles, that is clycles with same nodes
and short cycles in big contexts (big contexts are those with more than 5
already known targets for root).
"""
cyclesClean = []
cyclesSorted = []
for cycle in cycles:
raw = cycle.split()
raw.append(root)
# If root has more than 5 translations, remove short cyles
if (apertium < 6) or (apertium > 5 and len(raw) > 5):
check = list(raw)
check.sort()
if not check in cyclesSorted:
cyclesClean.append(raw)
cyclesSorted.append(check)
return(cyclesClean)
# Identifies 'potential' Targets in cycles (those not linked to root).
def findTargets(cycles,root,graph):
global lang
exists = "no"
targets = []
flat = reduce(lambda x,y: x+y,cycles) # we just flat cycles list to easy
reduced = list(set(flat)) # we use set to remove duplicates
reduced.remove(root) # simply removes root (we don't want root beeing target)
for word in reduced:
items = word.split("-")
if items[-1] != lang:
for g in graph:
if ((g[0] == root) & (g[1] == word)):
exists = "yes"
if not exists == "yes":
targets.append(word)
exists = "no"
return(targets)
def calculateDensity(cyclesClean,targets,graph):
"""Take cycles containing some Target word and calculate cycle density.
For each cycle with a potential target we get: target, density and cycle.
"""
cyclesDensity = []
for cycle in cyclesClean:
l = len(cycle)
targetsInCycle = []
# Ceck that cycle contains some target
for target in targets:
if target in cycle:
targetsInCycle.append(target)
if len(targetsInCycle) > 0:
score = getDensity(cycle,graph)
for t in targetsInCycle:
#cyclesDensity.append([t,l,score,cycle])
cyclesDensity.append([t,score,cycle])
return(cyclesDensity)
def getDensity(cycle,graph):
"""Calculate the density of a cycle, where Density = V / N*(N-1).
We need to compute the number of vertices between the nodes in the cycle.
"""
vertice = 0
for node in cycle:
for nextNode in cycle:
# Check if all node pairs in the cycle are linked.
for g in graph:
if ((g[0] == node) & (g[1] == nextNode)):
vertice += 1
l = len(cycle)
result = vertice / float(l * (l-1))
return(result)
def visited(node, path):
"""Check if a node was already visited.
"""
return node in path
def graphDensity(nodes,edges):
"""Calculate the density of graph. Density = V / N*(N-1).
"""
result = edges / float(nodes * (nodes-1))
return(result)
def getLanguages(apertium):
"""Get languages involved in already known translations
"""
languages = []
print("OJO :", apertium)
for a in apertium:
items = a.split("-")
languages.append(items[-1])
print("OJO :", languages)
return(languages)
def computeLanguages(languages,target):
"""Check if potential target's lang is already covered by source dict
"""
items = target.split("-")
if items[-1] in languages:
return("1")
else:
return("0")
main()
| true |
cdb615edb01b655400af0c77ed06cb8108a34adb | Python | rrmina/aim_fairness | /aim_fairness/datasets/health.py | UTF-8 | 10,541 | 2.671875 | 3 | [] | no_license | # Health Dataset
# Preprocessing is largely based on https://github.com/eth-sri/lcifr/blob/master/code/datasets/health.py
# which is also based on https://github.com/truongkhanhduy95/Heritage-Health-Prize
#
# Features : 100 Dimensions ( / {AgeAtFirstClaim, Charlson})
# Sensitive attribute : 1 Dimension (AgeAtFirstClaim)
# Ground Truth Label : 1 Dimension (Charlson Index)
#
# Please note that the features does not contain the sensitive attribute.
# This is in contrast to the assumptions of Ruoss et. al https://github.com/eth-sri/lcifr/issues/1
# which are by the way based on Data Producer - Data Consumer Framework of McNamara et. al
#
# We highly suggest that you concatenate the features and sensitive instead in cases where
# Data Producer is assumed to have access to every feature, including the sensitive attribute
import os
import urllib.request as ur
import zipfile
import torch
import pandas as pd
import numpy as np
# External File Info
health_url = "https://foreverdata.org/1015/content/"
health_download_folder = "data/"
# File Names
health_zip_filename = "HHP_release3.zip"
health_filenames = {
"claims": "Claims.csv",
"drugs": "DrugCount.csv",
"labs": "LabCount.csv",
"members": "Members.csv"
}
health_processed_filename = "health_clean.csv"
# Column Names
column_names = ['MemberID', 'ProviderID', 'Sex', 'AgeAtFirstClaim']
claims_cat_names = ['PrimaryConditionGroup', 'Specialty', 'ProcedureGroup', 'PlaceSvc']
# Label
label_attribute = "max_CharlsonIndex"
# Sensitive Attribute
sensitive_attribute = "AgeAtFirstClaim"
def _download_one( filename ):
"""
Download a file if not present
Default save path is "data/" folder
"""
filepath = health_download_folder + filename
if not os.path.exists( health_download_folder ):
os.makedirs( health_download_folder )
if not os.path.exists( filepath ):
print( "Downloading ", filename, " ..." )
file_download = ur.URLopener()
file_download.retrieve( health_url + filename, filepath )
else:
print( "Found and verified ", filepath )
def _health_download():
_download_one( health_zip_filename )
def _read_data_file(zip_path, device, transfer, normalize=True):
# Load Zip File
zf = zipfile.ZipFile(zip_path)
# Preprocess and Save or Load data frame
if not os.path.exists( health_download_folder + health_processed_filename ):
# Load the data frames from the Zip File
df_claims = _preprocess_claims( pd.read_csv( zf.open( health_filenames[ "claims" ] )))
df_drugs = _preprocess_drugs( pd.read_csv( zf.open( health_filenames[ "drugs" ] )))
df_labs = _preprocess_labs( pd.read_csv( zf.open( health_filenames[ "labs" ] )))
df_members = _preprocess_members( pd.read_csv( zf.open( health_filenames[ "members" ] )))
# Merge all the data frames
df_labs_drugs = pd.merge(df_labs, df_drugs, on=['MemberID', 'Year'], how='outer')
df_labs_drugs_claims = pd.merge(df_labs_drugs, df_claims, on=['MemberID', 'Year'], how='outer')
df_health = pd.merge(df_labs_drugs_claims, df_members, on=['MemberID'], how='outer')
# Drop Unnecessary columns
df_health.drop(['Year', 'MemberID'], axis=1, inplace=True)
df_health.fillna(0, inplace=True)
# Save Preprocessed Data Frame
df_health.to_csv( health_download_folder + health_processed_filename, index=False )
else:
# Load Preprocessed Data Frame
df_health = pd.read_csv( health_download_folder + health_processed_filename, sep=',')
# In case of transfer learning task
if (transfer):
drop_cols = [col for col in df_health.columns if col.startswith('PrimaryConditionGroup=')]
df_health.drop(drop_cols, axis=1, inplace=True)
# Divide Dataset
# Label
if (label_attribute == "max_CharlsonIndex"):
labels = 1 - df_health[label_attribute]
else:
labels = df_health[label_attribute]
# Sensitive Attribute
a = np.logical_or(
df_health['AgeAtFirstClaim=60-69'],
np.logical_or(df_health['AgeAtFirstClaim=70-79'], df_health['AgeAtFirstClaim=80+'])
)
# Features
# Drop Sensitive and Label
senstive_drop_cols = [col for col in df_health.columns if col.startswith('AgeAtFirstClaim')]
features = df_health.drop([label_attribute] + senstive_drop_cols, axis=1)
# Get the location of the continuous columns
# This will be used in normalizing their values
continuous_vars = [col for col in features.columns if '=' not in col]
continuous_columns = [features.columns.get_loc(var) for var in continuous_vars]
# one_hot_columns = {}
# for column_name in column_names:
# ids = [i for i, col in enumerate(features.columns) if col.startswith('{}='.format(column_name))]
# if len(ids) > 0:
# assert len(ids) == ids[-1] - ids[0] + 1
# one_hot_columns[column_name] = ids
# print('categorical features: ', one_hot_columns.keys())
# Convert data to torch tensor
x = torch.tensor( features.values.astype(np.float32), device=device)
y = torch.tensor( labels.values.astype(np.int64), device=device )
a = torch.tensor( a.values.astype(np.bool), device=device) * 1
# Normalize the values of continous columns
if (normalize):
columns = continuous_columns if continuous_columns is not None else np.arange(x.shape[1])
mean, std = x.mean(dim=0)[columns], x.std(dim=0)[columns]
x[:, columns] = (x[:, columns] - mean) / std
return x, y, a
def load_dataset(download=True, device="cpu", transfer=False):
device = device
if (device == None):
device = ("cuda" if torch.cuda.is_available() else "cpu")
if (download):
_health_download()
features, label, sensitive = _read_data_file( health_download_folder + health_zip_filename, transfer=transfer, device=device)
return features, label, sensitive
def _preprocess_claims(df_claims):
df_claims.loc[df_claims['PayDelay'] == '162+', 'PayDelay'] = 162
df_claims['PayDelay'] = df_claims['PayDelay'].astype(int)
df_claims.loc[df_claims['DSFS'] == '0- 1 month', 'DSFS'] = 1
df_claims.loc[df_claims['DSFS'] == '1- 2 months', 'DSFS'] = 2
df_claims.loc[df_claims['DSFS'] == '2- 3 months', 'DSFS'] = 3
df_claims.loc[df_claims['DSFS'] == '3- 4 months', 'DSFS'] = 4
df_claims.loc[df_claims['DSFS'] == '4- 5 months', 'DSFS'] = 5
df_claims.loc[df_claims['DSFS'] == '5- 6 months', 'DSFS'] = 6
df_claims.loc[df_claims['DSFS'] == '6- 7 months', 'DSFS'] = 7
df_claims.loc[df_claims['DSFS'] == '7- 8 months', 'DSFS'] = 8
df_claims.loc[df_claims['DSFS'] == '8- 9 months', 'DSFS'] = 9
df_claims.loc[df_claims['DSFS'] == '9-10 months', 'DSFS'] = 10
df_claims.loc[df_claims['DSFS'] == '10-11 months', 'DSFS'] = 11
df_claims.loc[df_claims['DSFS'] == '11-12 months', 'DSFS'] = 12
df_claims.loc[df_claims['CharlsonIndex'] == '0', 'CharlsonIndex'] = 0
df_claims.loc[df_claims['CharlsonIndex'] == '1-2', 'CharlsonIndex'] = 1
df_claims.loc[df_claims['CharlsonIndex'] == '3-4', 'CharlsonIndex'] = 2
df_claims.loc[df_claims['CharlsonIndex'] == '5+', 'CharlsonIndex'] = 3
df_claims.loc[df_claims['LengthOfStay'] == '1 day', 'LengthOfStay'] = 1
df_claims.loc[df_claims['LengthOfStay'] == '2 days', 'LengthOfStay'] = 2
df_claims.loc[df_claims['LengthOfStay'] == '3 days', 'LengthOfStay'] = 3
df_claims.loc[df_claims['LengthOfStay'] == '4 days', 'LengthOfStay'] = 4
df_claims.loc[df_claims['LengthOfStay'] == '5 days', 'LengthOfStay'] = 5
df_claims.loc[df_claims['LengthOfStay'] == '6 days', 'LengthOfStay'] = 6
df_claims.loc[df_claims['LengthOfStay'] == '1- 2 weeks', 'LengthOfStay'] = 11
df_claims.loc[df_claims['LengthOfStay'] == '2- 4 weeks', 'LengthOfStay'] = 21
df_claims.loc[df_claims['LengthOfStay'] == '4- 8 weeks', 'LengthOfStay'] = 42
df_claims.loc[df_claims['LengthOfStay'] == '26+ weeks', 'LengthOfStay'] = 180
df_claims['LengthOfStay'].fillna(0, inplace=True)
df_claims['LengthOfStay'] = df_claims['LengthOfStay'].astype(int)
for cat_name in claims_cat_names:
df_claims[cat_name].fillna(f'{cat_name}_?', inplace=True)
df_claims = pd.get_dummies(df_claims, columns=claims_cat_names, prefix_sep='=')
oh = [col for col in df_claims if '=' in col]
agg = {
'ProviderID': ['count', 'nunique'],
'Vendor': 'nunique',
'PCP': 'nunique',
'CharlsonIndex': 'max',
# 'PlaceSvc': 'nunique',
# 'Specialty': 'nunique',
# 'PrimaryConditionGroup': 'nunique',
# 'ProcedureGroup': 'nunique',
'PayDelay': ['sum', 'max', 'min']
}
for col in oh:
agg[col] = 'sum'
df_group = df_claims.groupby(['Year', 'MemberID'])
df_claims = df_group.agg(agg).reset_index()
df_claims.columns = [
'Year', 'MemberID', 'no_Claims', 'no_Providers', 'no_Vendors', 'no_PCPs',
'max_CharlsonIndex', 'PayDelay_total', 'PayDelay_max', 'PayDelay_min'
] + oh
return df_claims
def _preprocess_drugs(df_drugs):
df_drugs.drop(columns=['DSFS'], inplace=True)
# df_drugs['DSFS'] = df_drugs['DSFS'].apply(lambda x: int(x.split('-')[0])+1)
df_drugs['DrugCount'] = df_drugs['DrugCount'].apply(lambda x: int(x.replace('+', '')))
df_drugs = df_drugs.groupby(['Year', 'MemberID']).agg({'DrugCount': ['sum', 'count']}).reset_index()
df_drugs.columns = ['Year', 'MemberID', 'DrugCount_total', 'DrugCount_months']
print('df_drugs.shape = ', df_drugs.shape)
return df_drugs
def _preprocess_labs(df_labs):
df_labs.drop(columns=['DSFS'], inplace=True)
# df_labs['DSFS'] = df_labs['DSFS'].apply(lambda x: int(x.split('-')[0])+1)
df_labs['LabCount'] = df_labs['LabCount'].apply(lambda x: int(x.replace('+', '')))
df_labs = df_labs.groupby(['Year', 'MemberID']).agg({'LabCount': ['sum', 'count']}).reset_index()
df_labs.columns = ['Year', 'MemberID', 'LabCount_total', 'LabCount_months']
print('df_labs.shape = ', df_labs.shape)
return df_labs
def _preprocess_members(df_members):
df_members['AgeAtFirstClaim'].fillna('?', inplace=True)
df_members['Sex'].fillna('?', inplace=True)
df_members = pd.get_dummies(
df_members, columns=['AgeAtFirstClaim', 'Sex'], prefix_sep='='
)
print('df_members.shape = ', df_members.shape)
return df_members
| true |
12107ea64e0b4fb868c29f245a2f4c1685869002 | Python | hehedahehe/quora_project | /quoraproject/quoraproject/test/message_process.py | UTF-8 | 468 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
def processMessage(message):
import re
m = re.split("html|css|js", message)
result = []
for each in m:
each = each.strip().replace("\\","")
if "<div" in each:
result.append(each[each.find("<"):each.rfind(">")+1])
return "".join(result)
if __name__ == "__main__":
with open("message") as mf:
content = mf.read()
content = processMessage(content)
print content
| true |
45c6495f60aae67e3c4424aff98a468f62c36f33 | Python | prasanna-ranganathan/mypython | /CheckMyIp.py | UTF-8 | 226 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
import os,sys
import urllib
import re
url = "http://checkip.dyndns.org/"
print url
urlread = urllib.urlopen(url).read()
Ip = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1.3}",urlread)
print "My Ip: ",Ip
| true |
18215de64d8daba9a38d10ec2c2102287172a40f | Python | all1m-algorithm-study/2021-1-Algorithm-Study | /week1/Group6/boj1198_yongjoonseo.py | UTF-8 | 1,797 | 3.640625 | 4 | [] | no_license | # 3개의 연속된 점을 선택하여 삼각형 만든 후 다각형에서 제외
# 출력: 마지막 남은 삼각형의 넓이의 최댓값
# 만들 수 있는 모든 삼각형의 넓이 구해서 최댓값 갱신
# 1
def area(dot1, dot2, dot3):
a, b = dot1
c, d = dot2
e, f = dot3
return abs(((a*d + c*f + e*b) - (c*b + e*d + a*f)) / 2)
def solution(N):
maxval = -1
dots = [list(map(int, input().split())) for i in range(N)]
for i in range(N-2):
for j in range(i+1, N-1):
for k in range(j+1, N):
maxval = max(maxval, area(dots[i], dots[j], dots[k]))
print(maxval)
if __name__ == '__main__':
solution(int(input()))
# 2
from itertools import combinations
def area(dot1, dot2, dot3):
a, b = dot1
c, d = dot2
e, f = dot3
return abs(((a*d + c*f + e*b) - (c*b + e*d + a*f)) / 2)
def solution(N):
maxval = -1
dots = [list(map(int, input().split())) for i in range(N)]
for dot1, dot2, dot3 in combinations(dots, 3):
maxval = max(maxval, area(dot1, dot2, dot3))
print(maxval)
if __name__ == '__main__':
solution(int(input()))
# 3
def combinations(arr, start, r):
for i in range(start, len(arr)-r+1):
if r == 1:
yield [arr[i]]
else:
for rest in combinations(arr, i+1, r-1):
yield [arr[i]] + rest
def area(dot1, dot2, dot3):
a, b = dot1
c, d = dot2
e, f = dot3
return abs(((a*d + c*f + e*b) - (c*b + e*d + a*f)) / 2)
def solution(N):
maxval = -1
dots = [list(map(int, input().split())) for i in range(N)]
for dot1, dot2, dot3 in combinations(dots, 0, 3):
maxval = max(maxval, area(dot1, dot2, dot3))
print(maxval)
if __name__ == '__main__':
solution(int(input())) | true |
714af6a3acf7f9287d852926f32978c310d26641 | Python | benji011/minimal-probability-calculator | /backend/logger.py | UTF-8 | 1,326 | 2.890625 | 3 | [] | no_license | from flask_cors import CORS
from flask import request
import flask
import json
app = flask.Flask(__name__)
CORS(app)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
def log_entry(payload):
"""
Creates a log entry
Args:
payload: Data from the frontend
Returns:
A string value for writing to a log file.
"""
return (
f"[{payload['date']}] - "
f"type-of-calculation [{payload['type']}], "
f"inputs-received [{payload['input']}], "
f"result [{payload['result']}] \n"
)
@app.route("/api/logger/", methods=["POST"])
def logger():
"""
Custom logger using data received from the frontend.
Args:
a byte formatted string.
e.g.
b'{
'date': '3/24/2021, 3:44:27 PM',
'type': 'combined-with',
'input': {'first': 0.5, 'second': 0.5}
'result': 0
}'
Returns:
Either status 200 if success, else 500 when an exception occurs.
"""
payload = dict(json.loads(request.data))
info = log_entry(payload)
try:
with open("logs/log.txt", "a") as file:
file.write(info)
return {"status": 200}
except Exception:
return {"status": 500}
if __name__ == "__main__":
app.run()
| true |
cb1112d6ecab8e115d3be0e75a2e7fd7bb165f94 | Python | hrjung/python_example | /fct_main.py | UTF-8 | 2,718 | 2.546875 | 3 | [] | no_license | # code: utf-8
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QTextEdit, QVBoxLayout
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtCore import QIODevice, QByteArray
from PyQt5.QtSerialPort import QSerialPort, QSerialPortInfo
BAUDRATES = (
QSerialPort.Baud9600,
QSerialPort.Baud19200,
QSerialPort.Baud38400,
QSerialPort.Baud115200,
)
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.serial = QSerialPort()
self.serial_info = QSerialPortInfo()
self.initUI()
def initUI(self):
self.lbl1 = QLabel('COM3 and 115200')
self.te = QTextEdit()
self.te.setAcceptRichText(False)
self.pb_connect = QPushButton('연결')
self.pb_ptest = QPushButton('테스트 시작')
self.pb_connect.clicked.connect(self.open_comport)
self.pb_ptest.clicked.connect(self.send_test_cmd)
self.conn_status = False
vbox = QVBoxLayout()
vbox.addWidget(self.lbl1)
vbox.addWidget(self.pb_connect)
vbox.addWidget(self.pb_ptest)
vbox.addWidget(self.te)
self.setLayout(vbox)
self.setWindowTitle('FCT Test')
self.setGeometry(300, 300, 640, 720)
self.show()
def open_comport(self):
if self.conn_status == True:
self.serial.close()
self.conn_status = False
self.pb_connect.setText('연결')
else:
info = QSerialPortInfo('COM3')
self.serial.setPort(info)
self.serial.setBaudRate(115200)
self.serial.setDataBits(QSerialPort.Data8)
self.serial.setFlowControl(QSerialPort.NoFlowControl)
self.serial.setParity(QSerialPort.NoParity)
self.serial.setStopBits(QSerialPort.OneStop)
self.serial.readyRead.connect(self.on_serial_read)
status = self.serial.open(QIODevice.ReadWrite)
if status == True:
self.conn_status = True
self.pb_connect.setText('끊기')
else:
self.conn_status = False
self.pb_connect.setText('연결')
print('connection status: ', self.conn_status)
def send_test_cmd(self):
cmd = 'PTEST 0\n'
cmd_bytes = str.encode(cmd)
self.serial.write(cmd_bytes)
def on_serial_read(self):
rcv_text = self.serial.readAll().data().decode(encoding='ascii').strip()
if len(rcv_text) == 0:
pass
else:
print(rcv_text)
self.te.insertPlainText(rcv_text+'\n')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_()) | true |
34f6b18e5ebe2180cb528059043820ee8d3e8e2b | Python | ravitshrivastav/project-euler | /2.py | UTF-8 | 150 | 3 | 3 | [] | no_license | x1 = 1
x2 = 2
temp = 0
sum = 0
while x2 < 4000000:
if x2%2 == 0:
sum = sum + x2
temp = x2
x2 = x2 + x1
x1 = temp
print sum
| true |
169b56f25ea59bcd4bbcdbe3dc05018f30fcb36f | Python | baewonje/iot_bigdata_- | /python_workspace/02_raspberry_pi/1_led/1_238.py | UTF-8 | 299 | 2.640625 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.OUT)
GPIO.setup(9,GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
if (GPIO.input(9)==0):
GPIO.output(4, GPIO.HIGH)
time.sleep(1)
GPIO.output(4, GPIO.LOW)
time.sleep(1)
| true |
1125735720b6218336e815b53f5dca41adea2386 | Python | mRs-/hosts | /tests/unit/test_hosts_tools.py | UTF-8 | 4,613 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | from HostsTools import hosts_tools
import os
class TestHostTools(object):
TEST_FILE_NAME = 'test-write-domain-list.txt'
TEST_WHITELIST_FILE_NAME = 'test-write-domain-list-whitelist.txt'
TEST_DOMAINS = {'a.com', 'b.a.com', 'b.com', 'a.b.com'}
TEST_WHITELIST = {'b.b.com', 'z.com'}
def setup_class(self):
with open(self.TEST_WHITELIST_FILE_NAME, 'w') as file:
for domain in self.TEST_WHITELIST:
file.write(domain + '\n')
def test_none_is_not_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain(None)
assert not is_valid
def test_empty_is_not_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("")
assert not is_valid
def test_wildcard_is_not_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("*.example.com")
assert not is_valid
def test_percent_is_not_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("%example.com")
assert not is_valid
def test_double_quote_is_not_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("\"example.com")
assert not is_valid
def test_single_quote_is_not_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("'example.com")
assert not is_valid
def test_unicode_is_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain(u"www.с\ud0b0.com")
assert is_valid
def test_too_long_is_not_a_valid_domain(self):
domain = ("a" * 255) + ".com"
is_valid = hosts_tools.is_valid_domain(domain)
assert not is_valid
def test_long_is_a_valid_domain(self):
domain = "a" * 251 + ".com"
is_valid = hosts_tools.is_valid_domain(domain)
assert is_valid
def test_naked_is_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("example.com")
assert is_valid
def test_www_is_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("www.example.com")
assert is_valid
def test_trailing_dot_is_a_valid_domain(self):
is_valid = hosts_tools.is_valid_domain("www.example.com.")
assert is_valid
def test_extract_basic(self):
extracted = hosts_tools.extract_domain("0.0.0.0 example.com")
assert extracted == "example.com"
def test_extract_trailing_comment(self):
extracted = hosts_tools.extract_domain("0.0.0.0 example.com # comment")
assert extracted == "example.com"
def test_extract_empty_line(self):
extracted = hosts_tools.extract_domain("")
assert extracted == ""
def test_extract_only_comment(self):
extracted = hosts_tools.extract_domain("# comment")
assert extracted == ""
def test_extract_commented_out_entry(self):
extracted = hosts_tools.extract_domain("# 0.0.0.0 example.com")
assert extracted == ""
def test_sort_root_domains(self):
domains = ["y.a", "z.a", "x.a", "c.z", "b.z", "a.z"]
sorted = hosts_tools.sort_domains(domains)
assert sorted == ["x.a", "y.a", "z.a", "a.z", "b.z", "c.z"]
def test_sort_sub_domains(self):
domains = ["b.y.a", "a.y.a", "y.a", "c.z", "b.a.z", "a.z"]
sorted = hosts_tools.sort_domains(domains)
assert sorted == ["y.a", "a.y.a", "b.y.a", "a.z", "b.a.z", "c.z"]
def test_build_file_header(self):
file_name = 'TgM&2BXKw0SWVvync@%Az1cN6.txt'
count = 23235
header = hosts_tools.build_file_header(file_name, count)
assert str(count) in header
assert file_name in header
assert '[' not in header
assert ']' not in header
def test_write_domain_list(self):
hosts_tools.write_domain_list(self.TEST_FILE_NAME, self.TEST_DOMAINS)
assert os.path.isfile(self.TEST_FILE_NAME)
def test_read_domains_list(self):
domains = hosts_tools.load_domains_from_list(self.TEST_FILE_NAME)
assert domains
assert not self.TEST_DOMAINS.difference(domains)
def test_missing_whitelist(self):
whitelist = hosts_tools.load_domains_from_whitelist('not-a-real-file.txt')
assert len(whitelist) == 0
def test_reduce_domains(self):
reduced = hosts_tools.reduce_domains(self.TEST_DOMAINS)
assert reduced
assert not {'a.com', 'b.com'}.difference(reduced)
def teardown_class(self):
if os.path.isfile(self.TEST_FILE_NAME):
os.remove(self.TEST_FILE_NAME)
if os.path.isfile(self.TEST_WHITELIST_FILE_NAME):
os.remove(self.TEST_WHITELIST_FILE_NAME)
| true |
083fe80fa1e5c3e77f3fe77b6cea81c212dd189e | Python | luckyplusten/stencilflow | /stencilflow/compute_graph_nodes.py | UTF-8 | 13,901 | 3.21875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python3
"""
This file contains all compute graph nodes with their custom implementation of the BaseOperationNodeClass functions.
"""
__author__ = "Andreas Kuster (kustera@ethz.ch)"
__copyright__ = "BSD 3-Clause License"
import ast
import operator
from typing import List, Dict
import stencilflow
from stencilflow.base_node_class import BaseOperationNodeClass
from stencilflow.calculator import Calculator
class Name(BaseOperationNodeClass):
"""
The Name class is a subclass of the BaseOperationNodeClass and represents the variable name node in the computation
tree.
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new Name node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# initialize superclass
super().__init__(ast_node, number)
def generate_name(self, ast_node: ast) -> str:
"""
Variable name implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return ast_node.id
class Num(BaseOperationNodeClass):
"""
The Name class is a subclass of the BaseOperationNodeClass and represents the numeral node in the computation tree.
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new Num node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# initialize superclass
super().__init__(ast_node, number)
def generate_name(self, ast_node: ast) -> str:
"""
Numeral implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return ast_node.n
class BinOp(BaseOperationNodeClass):
"""
The Name class is a subclass of the BaseOperationNodeClass and represents the binary operation node in the
computation tree.
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new BinOp node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# initialize superclass
super().__init__(ast_node, number)
"""
Mapping between ast mathematical operations and the string name of the operation.
"""
_OP_NAME_MAP: Dict[type(ast), str] = {
ast.Add: "add",
ast.Sub: "sub",
ast.Mult: "mult",
ast.Div: "div",
ast.Invert: "neg"
}
"""
Mapping between the string name of the operation and its symbol.
"""
_OP_SYM_MAP: Dict[str, str] = {
"add": "+",
"sub": "-",
"mult": "*",
"div": "/",
"neg": "-"
}
def generate_name(self, ast_node: ast) -> str:
"""
Binary operation implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return self._OP_NAME_MAP[type(ast_node.op)]
def generate_op_sym(self) -> str:
"""
Generates the symbol of the mathematical operation out of the operation string (e.g. add, sub, ..).
:returns generated symbol
"""
return self._OP_SYM_MAP[self.name]
class Call(BaseOperationNodeClass):
"""
The Call class is a subclass of the BaseOperationNodeClass and represents the function calls (e.g. sin/cos,..) node
in the computation tree.
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new Function (call) node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# initialize superclass
super().__init__(ast_node, number)
def generate_name(self, ast_node: ast) -> str:
"""
Function call implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return ast_node.func.id
class Output(BaseOperationNodeClass):
"""
The Output class is a subclass of the BaseOperationNodeClass and represents the output node in the computation tree.
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new Output node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# initialize superclass
super().__init__(ast_node, number)
def generate_name(self, ast_node: ast) -> str:
"""
Output implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return ast_node.targets[0].id
class Subscript(BaseOperationNodeClass):
"""
The Subscript class is a subclass of the BaseOperationNodeClass and represents the array field access node in the
computation tree.
"""
def __init__(self, ast_node: ast, number: int, dimensions: int,
inputs) -> None:
"""
Create new Subscript node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# initialize superclass
super().__init__(ast_node, number)
# initialize local fields
self.dimensions = dimensions
self.inputs = inputs
self.index: List[int] = list()
self.create_index(ast_node)
"""
Mapping between the index of the operation and its position (actually always 0).
"""
_VAR_MAP: Dict[str, int] = {i: 0 for i in stencilflow.ITERATORS}
"""
Mapping between the operation and its symbol.
"""
_OP_SYM_MAP: Dict[type(ast), str] = {ast.Add: "+", ast.Sub: "-"}
def create_index(self, ast_node: ast) -> None:
"""
Create the numerical index of the array field access e.g. convert [i+2, j-3, k] to [2,-3,0]
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
# create index
self.index = list()
if hasattr(ast_node.slice.value, "elts"):
for slice in ast_node.slice.value.elts:
if isinstance(slice, ast.Name):
self.index.append(self._VAR_MAP[slice.id])
elif isinstance(slice, ast.BinOp):
# note: only support for index variations [i, j+3,..]
# read index expression
expression = str(slice.left.id) + self._OP_SYM_MAP[type(
slice.op)] + str(slice.right.n)
# convert [i+1,j, k-1] to [1, 0, -1]
calculator = Calculator()
self.index.append(
calculator.eval_expr(self._VAR_MAP, expression))
else:
slice = ast_node.slice.value
if isinstance(slice, ast.Name):
self.index.append(self._VAR_MAP[slice.id])
elif isinstance(slice, ast.BinOp):
# note: only support for index variations [i, j+3,..]
# read index expression
expression = str(slice.left.id) + self._OP_SYM_MAP[type(
slice.op)] + str(slice.right.n)
# convert [i+1,j, k-1] to [1, 0, -1]
calculator = Calculator()
self.index.append(
calculator.eval_expr(self._VAR_MAP, expression))
# Prune extra dimensions from index
num_dims = sum(d > 1 for d in self.dimensions)
if len(self.index) > num_dims:
self.index = self.index[len(self.index)-num_dims:]
# Now use the specified input_dims to create a 3D index with Nones
if self.name in self.inputs:
input_dims = self.inputs[self.name]["input_dims"]
self.index = [
self.index[input_dims.index(x)] if x in input_dims else None
for x in stencilflow.ITERATORS
]
# Finally prepend None so all indices are 3D
if len(self.index) < len(stencilflow.ITERATORS):
self.index = ([None] *
(len(stencilflow.ITERATORS) - len(self.index)) +
self.index)
def generate_name(self, ast_node: ast) -> str:
"""
Subscript (array field access) implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return ast_node.value.id
def generate_label(self) -> str:
"""
Subscript (array field access) implementation of generate_label.
:returns generated label
"""
return str(self.name) + str(self.index)
class Ternary(BaseOperationNodeClass):
"""
The Ternary operator class is a subclass of the BaseOperationNodeClass and represents ternary operation of the
form: expression_true if comparison_expression else expression_false
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new Ternary node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# initialize superclass
super().__init__(ast_node, number)
def generate_name(self, ast_node: ast) -> str:
"""
Ternary operator implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return "?"
class Compare(BaseOperationNodeClass):
"""
The Comparison operator class is a subclass of the BaseOperationNodeClass and represents the comparison of two
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new Compare node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# set comparison operator field
self.op: operator = self._COMP_MAP[type(ast_node.ops[0])]
# initialize superclass
super().__init__(ast_node, number)
"""
Mapping between the abstract syntax tree (python) comparison operator and the operator comparison operator.
"""
_COMP_MAP: Dict[type(ast), type(operator)] = {
ast.Lt: operator.lt,
ast.LtE: operator.le,
ast.Gt: operator.gt,
ast.GtE: operator.ge,
ast.Eq: operator.eq
}
"""
Mapping between the operator comparison operator and its mathematical string symbol.
"""
_COMP_SYM: Dict[type(operator), str] = {
operator.lt: "<",
operator.le: "<=",
operator.gt: ">",
operator.ge: ">=",
operator.eq: "=="
}
def generate_name(self, ast_node: ast) -> str:
"""
Comparison operator implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return self._COMP_SYM[self.op]
class BoolOp(BaseOperationNodeClass):
def __init__(self, ast_node: ast, number: int) -> None:
# set comparison operator field
self.op: operator = self._BOOLOP_MAP[type(ast_node.op)]
# initialize superclass
super().__init__(ast_node, number)
_BOOLOP_MAP: Dict[type(ast), type(operator)] = {
ast.And: operator.and_,
ast.Or: operator.or_,
}
_BOOLOP_SYM: Dict[type(operator), str] = {
operator.and_: "&&",
operator.or_: "||",
}
def generate_name(self, ast_node: ast) -> str:
return self._BOOLOP_SYM[self.op]
class UnaryOp(BaseOperationNodeClass):
"""
The UnaryOp operator class is a subclass of the BaseOperationNodeClass and represents unary operations. In our
case we only support negation (mathematical - sign) as unary operation.
"""
def __init__(self, ast_node: ast, number: int) -> None:
"""
Create new unary operation node with given initialization parameters.
:param ast_node: abstract syntax tree node of the computation
:param number: tree walk numbering
"""
# set unary operator field
self.op: operator = self._UNARYOP_MAP[type(ast_node.op)]
# initialize superclass
super().__init__(ast_node, number)
"""
Mapping between the ast unary operation and the operator operation.
"""
_UNARYOP_MAP: Dict[type(ast), type(operator)] = {ast.USub: operator.sub}
"""
Mapping between the operator unary operator and its mathematical string.
"""
_UNARYOP_SYM: Dict[type(operator), str] = {operator.sub: "neg"}
"""
Mapping between the mathematical string and its symbol.
"""
_UNARYOP_SYM_NAME = {"neg": "-"}
def generate_name(self, ast_node: ast) -> str:
"""
Unary operator implementation of generate_name.
:param ast_node: abstract syntax tree node of the computation
:returns generated name
"""
return self._UNARYOP_SYM[self.op]
def generate_op_sym(self) -> str:
"""
Generates the symbol of the mathematical operation out of the operation string.
:returns generated symbol
"""
return self._UNARYOP_SYM_NAME[self.name]
| true |
8535e4f4c9004f1ecd0fbf3d5c10e87f241efdd6 | Python | vaibhavmule/learning-python | /learn-python-hard-way/ex4.py | UTF-8 | 1,313 | 4.53125 | 5 | [] | no_license | # This is Exercise 4 : Variables and Names
# cars given value of 100
cars = 100
# assigning 4.0 value to space_in_a_car variable
space_in_a_car = 4
# There are 30 drivers
drivers = 30
# There are 90 Passengers
passengers = 90
# Calculate how many cars are not driven by subtracting "drivers" from "cars".
cars_not_driven = cars - drivers
# Creating new variable and assigning value of drivers.
cars_driven = drivers
# Calculate car capacity by multiplying cars driven into space in car.
carpool_capacity = cars_driven * space_in_a_car
# Calculate Average passengers by dividing passengers to cars driven.
average_passengers_per_car = passengers / cars_driven
# Print the cars which are avalable with sentence.
print "There are", cars, "cars avalable."
# Print how many drivers are there with sentence.
print "There are only", drivers, "drivers available."
# Print how many cars are empty today with sentence.
print "There will be", cars_not_driven, "empty cars today."
# Print carpool capacity with sentence.
print "We can transport", carpool_capacity, "people today."
# Print how many passengers to carpool today with sentence.
print "We have", passengers, "to carpool today."
# Print average passenger per car with sentence.
print "We need to put about", average_passengers_per_car, "in each car." | true |
1e3817cc09e5be62b0e576927b4fa6f1ff35cf5d | Python | Rosy-S/CTCI | /moderate_questions.py | UTF-8 | 2,612 | 4.09375 | 4 | [] | no_license |
# 16.6: Given two arrays of integers, compute the pair of values (one value in each array)
# with the smallest non-negative difference. return the difference.
from sys import maxint
def smallest_difference(array1, array2):
array1.sort()
array2.sort()
a = 0
b = 0
difference = maxint
while a < len(array1) and b < len(array2):
print array1[a], array2[b]
difference = min(difference, abs(array1[a] - array2[b]))
if array1[a] < array2[b]:
a += 1
else:
b += 1
return difference
# print smallest_difference([1, 3, 15, 11, 2], [23, 127, 235, 19, 8, 9, 20, 40])
# 16.7: Write a method that finds the maximum of two umbers. You should not use
# if-else or any other comparison opperator.
def find_max(a, b):
total = a + b
pass
# 16.8 Given any integer, print an English phrase that describes the integer
def translate_num(num):
num = str(num)
results = []
place_dictionary = {3: 'hundred', 4: 'thousand', 6: 'mil'}
while len(num) > 3:
length = len(num)
if length > 9:
placeholder = length - 9
billon = num[:placeholder]
results.append(hundreds_breakdown(billon) + " billon,")
num = num[placeholder:]
elif length > 6:
placeholder = length - 6
millon = num[:placeholder]
results.append(hundreds_breakdown(millon) + " millon,")
num = num[placeholder:]
elif length > 3:
placeholder = length - 3
thousand = num[:placeholder]
results.append(hundreds_breakdown(thousand) + " thousand,")
num = num[placeholder:]
results.append(hundreds_breakdown(num))
return results
def hundreds_breakdown(num):
ones_dictionary = {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'}
two_dictionary = {10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'}
doubles_dictionary = {20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
results = []
num = str(int(num))
print "this is the hundreds num: ", num
if len(num) == 3:
results.append(ones_dictionary[int(num[0])] + " hundred ")
num = num[1:]
if len(num) == 2:
if int(num) in two_dictionary:
results.append(two_dictionary[int(num)])
if int(num) in doubles_dictionary:
results.append(doubles_dictionary[int(num)])
else:
results.append(doubles_dictionary[int(num[0])] + "-" + ones_dictionary[int(num[1])])
if len(num) == 1:
if int(num) == 0:
pass
else:
results.append(ones_dictionary[int(num)])
return "".join(results)
print (translate_num(1000))
| true |
8d41e8fba4beff7f943416567a362afcf31b59f4 | Python | bilgrami/pakguru | /project/pakguru_project/common_utils_app/management/commands/remove_dump.py | UTF-8 | 1,194 | 2.734375 | 3 | [] | no_license | import os
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def add_arguments(self, parser):
help_text = 'CSV list of app names \n'
parser.add_argument('app_names', type=str, help=help_text)
def handle(self, *args, **options):
app_names = options['app_names']
if not app_names:
raise CommandError(f'App Names is required')
apps = app_names.split(',')
for app in apps:
fixture_folder_path = f'../../{app}/fixtures'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # noqa: E501
root = os.path.join(BASE_DIR, fixture_folder_path)
if not os.path.exists(root):
raise CommandError('Folder "%s" does not exist' % root)
else:
print(f'[{app}] Found folder to remove files: {root}')
for filename in os.listdir(root):
if filename.endswith(".json"):
input_file_path = os.path.join(root, filename)
os.remove(input_file_path)
print(f'Removed file: {input_file_path}')
| true |
7ef4606dd8abac239a87a94acf5dcebdaa9b892f | Python | tunealog/python-study | /ex/ex_033_Quiz_Exceptions.py | UTF-8 | 1,274 | 3.921875 | 4 | [] | no_license | # Basic of Python
# Title : Quiz(Exceptions)
# Date : 2020-07-04
# Creator : tunealog
# Quiz) Chicken order System
#
# Rule1 : When input under 1 or number, Return ValueError
# Output Message : "Wrong Number or Not Number"
# Rule2 : Order is limited to 10 Chiken by onetime
# Output Message : "Sorry, Today is over"
# Return SoldOutError
class SoldOutError(Exception):
pass
chicken = 10
waiting = 1
while(True):
try:
print("[Stock of Chicken : {0}]".format(chicken))
order = int(input("How many chickens would you like to order?"))
if order > 10:
print("Sorry, Please order for under 10 Chikens")
break
elif order > chicken:
print(
"Not enough chickens. Please order under stock [Stock : {0}]".format(chicken))
elif order < 1:
raise ValueError
else:
print("[Waiting Number {0}] Order complete of {1} chickens"
.format(waiting, order))
waiting += 1
chicken -= order
if chicken == 0:
raise SoldOutError
except ValueError:
print("Wrong Number or Not Number")
except SoldOutError:
print("Sorry, Today is over")
break
| true |
dace2f31cb390a9855bcf41665d664ad1baa4db8 | Python | vahndi/probability | /tests/shared.py | UTF-8 | 1,449 | 2.953125 | 3 | [
"MIT"
] | permissive | from numpy import random
from pandas import ExcelFile, Series, read_excel, DataFrame
from tests.paths import FN_PANDAS_TESTS
xlsx = ExcelFile(str(FN_PANDAS_TESTS))
def read_distribution(sheet_name: str) -> Series:
data = read_excel(xlsx, sheet_name)
variables = [c for c in data.columns if c != 'p' and not c.startswith('_')]
return data.set_index(variables)['p']
def make_joint_data():
random.seed(123)
return DataFrame(random.randint(1, 4, (100, 4)),
columns=['A', 'B', 'C', 'D'])
def get_joint_distribution(data_set: DataFrame) -> Series:
return (
data_set.groupby(data_set.columns.tolist()).size() / len(data_set)
).rename('p')
def series_are_equivalent(series_1: Series, series_2: Series) -> bool:
"""
Determine if the 2 series share the same items.
N.B. where series 1 has p(0), the associated item may be missing from
series 2.
"""
d1 = series_1.copy().reset_index()
cols_1 = sorted([c for c in d1.columns if c != 'p'])
cols_p = cols_1 + ['p']
s1 = d1[cols_p].set_index(cols_1)['p']
d2 = series_2.copy().reset_index()
cols_2 = sorted([c for c in d2.columns if c != 'p'])
if cols_1 != cols_2:
return False
s2 = d2[cols_p].set_index(cols_2)['p']
for k, v in s1.iteritems():
if v == 0:
continue
if k not in s2.keys() or abs(s2[k] - v) > 1e-10:
return False
return True
| true |
c4beadfcc4ebda118cb566174288635e87324df9 | Python | saptar007/Python | /nw_automation_nick_russo/Module3_edit/ssh_jinja_version.py | UTF-8 | 2,197 | 2.53125 | 3 | [] | no_license | import time
import paramiko
import yaml
import smtplib
import re
#from PyYAML import yaml
from jinja2 import Environment, FileSystemLoader
email = 'saptbane@cisco.com'
def send_email(email_address,email_body):
Email_Content = "Subject:Core Found!\n"+ email_body
smtpObj = smtplib.SMTP('outbound.cisco.com', 25)
smtpObj.ehlo()
smtpObj.sendmail('noreply@cisco.com',email_address, Email_Content )
smtpObj.quit()
def send_cmd(conn, command):
conn.send(command+ "\n")
time.sleep(2.0)
def get_output(conn):
return conn.recv(65535).decode("utf-8")
def is_core(output,host):
model_regex = re.compile(r"core_(?P<file>\S+)")
model_match = model_regex.search(output)
if model_match:
email_subject = "Core file "+(model_match.group("file"))+" found on "+host
print (email_subject)
send_email(email, email_subject)
def main():
with open("host_yaml/host.yml", "r") as hosts:
host_root = yaml.load(hosts, Loader=yaml.FullLoader)
for host in host_root["host_list"]:
# paramiko can be client or server, we are using client here
conn_param = paramiko.SSHClient()
# We dont want paramiko to refuse connection due to missing SSH Keys
conn_param.set_missing_host_key_policy(paramiko.AutoAddPolicy())
conn_param.connect (hostname=host["ip"], username="admin", password=host["passw"], look_for_keys=False, allow_agent=False)
conn = conn_param.invoke_shell()
time.sleep(2.0)
#print ("test")
send_cmd(conn, "expert")
# testing this using only 1 command and will move to a list when testing multiple commands
commands = [
#"show version",
#"expert",
"ls /ngfw/var/common | grep core",
"ls /ngfw/var/common | grep core"
]
for command in commands:
#time.sleep(1.0)
send_cmd(conn, command)
time.sleep(1.0)
result = (get_output(conn))
is_core(result,host["ip"])
conn.close()
if __name__ == "__main__":
while True:
#This will keep the script running for the time (in secs) in a loop
time.sleep(600)
main() | true |
5c3978a08426bf0a73d3163ba792e6908c3d919e | Python | compucorp/odoo_civicrm_sync | /models/res_partner.py | UTF-8 | 10,205 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import logging
import time
from collections import namedtuple
from datetime import datetime
from odoo import api, fields, models, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
_logger = logging.getLogger(__name__)
UNKNOWN_ERROR = _("Unknown error when updating res.partner data")
ERROR_MESSAGE = {
'title_error': _("This title doesn't exist in ODOO: {}"),
'country_error': _("This country_iso_code doesn't exist in ODOO: {}"),
'missed_required_parameter': _(
"Wrong CiviCRM request - missed required field: {}"),
'invalid_parameter_type': _(
"Wrong CiviCRM request - invalid \"{}\" parameter "
"data type: {} expected {}"),
'duplicated_partner_with_contact_id': _(
"You cannot have two partners with the same civicrm Id"),
}
class ResPartner(models.Model):
_inherit = "res.partner"
x_civicrm_id = fields.Integer(string='Civicrm Id', required=False,
help='Civicrm Id')
_sql_constraints = [
('x_civicrm_id', 'unique(x_civicrm_id)',
ERROR_MESSAGE['duplicated_partner_with_contact_id'])
]
@api.model
def civicrm_sync(self, input_params):
"""Synchronizes CiviCRM contact to Odoo partner.
Creates new partners if not exists and updates is it is
present in Odoo. Returns back to CiviCRM assigned partner_id and
update_date and data processing status.
:param input_params: dict of data:{
'is_company': bool,
'x_civicrm_id': int,
'name': str,
'display_name': str,
'title': str,
'street': str,
'street2': str,
'city': str,
'zip': str,
'country_iso_code': str,
'website': str,
'phone': str,
'mobile': str,
'fax': str,
'email': str,
'create_date': int,
'write_date': int,
'active': bool,
'customer': bool
}
:return: data in dictionary format: {
'is_error': int, value from list [0, 1]
'error_log': str, not empty when is_error = 1
'contact_id': int, CiviCRM contact_id
'partner_id': int, Odoo partner_id
'timestamp': float, respond timestamp
}
"""
self.error_log = []
# Build response dictionary
self.response_data = {'is_error': 0}
# Validate CiviCRM input request structure and data
if not self._validate_civicrm_sync_input_params(input_params):
return self._get_civicrm_sync_response()
# Check if CiviCRM contact id exists in ODOO
partner = self.with_context(active_test=False).search(
[('x_civicrm_id', '=', self.vals.get('x_civicrm_id'))])
# Assign ODOO partner_id if exists
self.response_data.update(partner_id=partner.id)
_logger.debug('partner = {}'.format(partner))
# Create or update res.partner data
self.save_partner(partner)
return self._get_civicrm_sync_response()
def _validate_civicrm_sync_input_params(self, input_params):
"""Validates input parameters structure and data type
:param input_params: dictionary of input parameters
:return: validation status True or False
"""
self.vals = input_params
ParamType = namedtuple('Point', ['type', 'required',
'convert_method', 'default'])
param_map = {
'is_company': ParamType(bool, True, None, None),
'x_civicrm_id': ParamType(int, False, None, None),
'name': ParamType(str, True, None, None),
'display_name': ParamType(str, True, None, None),
'title': ParamType(str, False, None, None),
'street': ParamType(str, False, None, None),
'street2': ParamType(str, False, None, None),
'city': ParamType(str, False, None, None),
'zip': ParamType(str, False, None, None),
'country_iso_code': ParamType(str, False, None, None),
'website': ParamType(str, False, None, None),
'phone': ParamType(str, False, None, None),
'mobile': ParamType(str, False, None, None),
'fax': ParamType(str, False, None, None),
'email': ParamType(str, True, None, None),
'create_date': ParamType((int, str), False,
self.convert_timestamp_param, None),
'write_date': ParamType((int, str), False,
self.convert_timestamp_param, None),
'active': ParamType(bool, True, None, None),
'customer': ParamType(bool, True, None, True)
}
for key, param_type in param_map.items():
value = self.vals.get(key, param_type.default)
if param_type.required and value is None:
self.error_log.append(ERROR_MESSAGE[
'missed_required_parameter'].format(
key))
elif not isinstance(value, param_type.type):
self.error_log.append(ERROR_MESSAGE['invalid_parameter_type']
.format(key, type(value),
param_type.type))
x_civicrm_id = self.vals.get('x_civicrm_id')
# Assign CiviCRM contact_id
self.response_data.update(contact_id=x_civicrm_id)
if value and param_type.convert_method:
new_param = param_type.convert_method(key=key, value=value)
_logger.debug(new_param)
self.vals[key] = new_param
# Check if CiviCMR contact's title and country_iso_code exists
# and have appropriated ids in ODOO
self.lookup_country_id()
self.lookup_title_id()
return False if self.error_log else True
def convert_timestamp_param(self, **kwargs):
"""Converts timestamp parameter into datetime string according
:param kwargs: dictionary with value for conversion
:return: str in DATETIME_FORMAT
"""
timestamp = kwargs.get('value')
try:
return datetime.fromtimestamp(timestamp).strftime(DATETIME_FORMAT)
except Exception as error:
_logger.error(error)
self.error_log.append(str(error))
self.error_handler()
def _get_civicrm_sync_response(self):
"""Checks errors and return dictionary response
:return: response in dictionary format
"""
self.error_handler()
return self.response_data
def error_handler(self):
"""Checks for errors and change response_data if exist
:return: True if error else False
"""
is_error = 1
if self.error_log:
self.response_data.update(is_error=is_error,
error_log=self.error_log)
return True
return False
def lookup_country_id(self):
""" Lookups the ODOO ids for contact country_iso_code
If id is present assign it to parent object
Else return error message
"""
country_iso_code = self.vals.get('country_iso_code')
if country_iso_code:
country_id = self.env['res.country'].search(
[('code', '=', str(country_iso_code))]).id
if not country_id:
self.error_log.append(
ERROR_MESSAGE.get('country_error', UNKNOWN_ERROR).format(
country_iso_code))
else:
self.vals.update(country_id=country_id)
def lookup_title_id(self):
""" Lookups the ODOO ids for contact title
If id is present assign it to parent object
Else return error message
"""
title = self.vals.get('title')
if title:
title_id = self.env['res.partner.title'].search(
['|', ('name', '=', str(title)),
('shortcut', '=', str(title))]).id
if not title_id:
self.error_log.append(
ERROR_MESSAGE.get('title_error', UNKNOWN_ERROR).format(
title))
else:
self.vals.update(title=title_id)
def save_partner(self, partner):
"""Creates or updates res.partner
:param partner: res.partner object which want to update
"""
status = True
try:
# Create or update res.partner
if partner:
status = partner.write(self.vals)
else:
partner = self.create(self.vals)
# Assign CiviCRM partner_id
self.response_data.update(partner_id=partner.id)
if not (partner or status):
self.error_log.append(UNKNOWN_ERROR)
return
# Assign CiviCRM timestamp
timestamp = self.timestamp_from_string(partner.write_date)
self.response_data.update(timestamp=int(timestamp))
except Exception as error:
_logger.error(error)
self.error_log.append(str(error))
self.error_handler()
@staticmethod
def timestamp_from_string(date_time):
""" Converts string in datetime format to timestamp
:param date_time: str, string in datetime format
:return: float, timestamp
"""
dt = datetime.strptime(date_time, DATETIME_FORMAT)
return time.mktime(dt.timetuple())
| true |
764752afea1514abc8a05ab509ddb0bd536c87f1 | Python | m4qo5/rabbitmq-watermarks | /pika_abstraction.py | UTF-8 | 778 | 2.84375 | 3 | [] | no_license | '''Creating basic rabbitmq methods for an application'''
import pika
def establish_connection(host):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host)
)
return connection
def create_channel(connection):
return connection.channel()
def declare_mq_queue(channel, queue_name):
channel.queue_declare(queue=queue_name)
def basic_consume(channel, queue_name, callback_on_consume, auto_ack):
channel.basic_consume(
queue=queue_name,
on_message_callback=callback_on_consume,
auto_ack=auto_ack
)
def publish_watermarks(channel, exchange, routing_key, body):
channel.basic_publish(exchange=exchange,
routing_key=routing_key,
body=body) | true |
a9005bc67c137b1d2d58fdc274c574bb5680b16f | Python | RoyBartolomeo/Tools | /daily_playlist.py | UTF-8 | 3,586 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python3
# Author: Roy B
# This searches youtube Channels for content posted in the
# past 24 hours and creates a playlist of the videos found
# to the authenticated users account.
import os
from pathlib import Path
from datetime import datetime, timedelta
import google.oauth2.credentials
import google_auth_oauthlib.flow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
#set the time to search for videos created in the last 24 hours
date = datetime.today() - timedelta(days = 1)
# Channel IDs for: WSJ, NYT, FT, Economist, Atlantic, Quartz, the Verge
channels = ['UCK7tptUDHh-RYDsdxO1-5QQ', 'UCqnbDFdCpuN8CMEg0VuEBqA', 'UCoUxsWakJucWg46KW5RsvPw',
'UC0p5jTq6Xx_DosDFxVXnWaQ', 'UCK0z0_5uL7mb9IjntOKi5XQ', 'UC9f78Z5hgtDt0n8JWyfBk8Q',
'UCddiUEpeqJcYeBxX1IVBKvQ']
# Google code for OAuth. This requires the client secrets file downloaded from the dev api console
CLIENT_SECRETS_FILE = Path('YOUR/PATH/client_secret.json')
DEVELOPER_KEY = 'YOUR DEVELOPER KEY'
SCOPES = ['https://www.googleapis.com/auth/youtube']
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
videos = []
# Call the Search API and return a list of video IDs
def youtube_search():
youtube = build(API_SERVICE_NAME, API_VERSION,
developerKey=DEVELOPER_KEY)
# Call the search.list method to retrieve results matching the specified
# query term.
search_response = youtube.search().list(
part='id,snippet',
channelId=channel,
publishedAfter=str(date.isoformat('T') + 'Z'), #RFC 3339 format
type='video',
safeSearch='none'
).execute()
# Append return values to the videos list
for search_result in search_response.get('items', []):
videos.append(search_result['id']['videoId'])
return(videos)
# Authorize the request and store authorization credentials. If the credential file doesn't exist it will
# open a link to authorize the account.
def get_authenticated_service():
credential_path = Path('YOUR/PATH/credential_sample.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRETS_FILE, SCOPES)
credentials = tools.run_flow(flow, store)
return build(API_SERVICE_NAME, API_VERSION, credentials=credentials)
# Add a new playlist to the user account and return the playlist ID
def add_playlist(youtube):
body = dict(
snippet=dict(
title=date.strftime('%x'),
description='Recent News'
),
status=dict(
privacyStatus='public'
)
)
playlists_insert_response = youtube.playlists().insert(
part='snippet,status',
body=body
).execute()
return(playlists_insert_response['id'])
# Add videos to the user playlist
def add_video(user):
for i in range(len(videos)):
request = user.playlistItems().insert(
part="snippet",
body={
"snippet": {
"playlistId": playlistId,
"position": i,
"resourceId": {
"kind": "youtube#video",
"videoId": videos[i]
}
}
}
)
request.execute()
if __name__ == '__main__':
user = get_authenticated_service()
try:
for channel in channels:
videos = youtube_search()
playlistId = add_playlist(user)
add_video(user)
print('Playlist Created')
except HttpError as error:
print(error)
| true |
90ad300b4fe69b978002dd9b001cb0b0153ea1c2 | Python | emilyjcosta5/machine_learning | /homework_3/question_2/generateMultiringDataset.py | UTF-8 | 1,480 | 3.046875 | 3 | [] | no_license | import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def generateMultiringDataset(numberOfClasses,numberOfSamples):
C = numberOfClasses
N = numberOfSamples
#Generates N samples from C ring-shaped
#class-conditional pdfs with equal priors
#Randomly determine class labels for each sample
thr = np.linspace(0.0, 1.0, num=C+1) #split [0,1] into C equal length intervals
u = np.random.rand(1, N) # generate N samples uniformly random in [0,1]
labels = np.zeros((1,N))
for l in range(C):
ind_l = np.where((thr[l]<u) & (u<=thr[l+1]))
labels[ind_l] = np.tile(l,(1,len(ind_l[0])))
# parameters of the Gamma pdf needed later
a = [pow(i, 2.5) for i in list(range(1,C+1))]
b = np.tile(1.7, C)
#Generate data from appropriate rings
#radius is drawn from Gamma(a,b), angle is uniform in [0,2pi]
angle = 2*math.pi*np.random.rand(1,N)
radius = np.zeros((1,N)) # reserve space
for l in range(C):
ind_l = np.where(labels==l)
radius[ind_l] = np.random.gamma(a[l], b[l], len(ind_l[0]))
data = np.vstack((np.multiply(radius,np.cos(angle)), np.multiply(radius,np.sin(angle))))
colors = iter(cm.rainbow(np.linspace(0, 1, C)))
plt.figure()
for l in range(C):
ind_l = np.where(labels==l)[1]
plt.scatter(data[0,ind_l], data[1,ind_l], color=next(colors), s=1)
return data,labels
| true |
5b1516c9014287879024c60d2642b5d65964b21a | Python | malang/mumbo | /server/db.py | UTF-8 | 668 | 2.5625 | 3 | [] | no_license | # This is the email greeter service - you will need to get a message into here
# which is then parsed and inserted into the db as appopriate
# This is the db layer where we will make the db calls
import pymongo
from pymongo import Connection
def _return_conn():
return pymongo.Connection().mumbo
def write_thread(key, msg):
"""
client:
key: normalized subj
emboj: the email obj
"""
db = _return_conn()
db.test.insert({'thread': key, 'msg': msg})
def reset():
"""
client:
key: normalized subj
emboj: the email obj
"""
db = _return_conn()
db.test.drop()
if __name__ == '__main__':
write_thread({}) | true |
ad1e5d71eaef337c449b2835062e9df196e70910 | Python | Anudeepbn/171046039_project | /Project_code/Analysis/analysis1.py | UTF-8 | 702 | 2.890625 | 3 | [] | no_license | import read
import sys
'''import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
#### read excel sheet
sheet1= pd.read_excel('Placementsois.xlsx',sheetname='placementstats')
sheet2= pd.read_excel('Placementsois.xlsx',sheetname='enrollment')
sheet1_reindexing=sheet1.reset_index()
'''
#sheet2=sheet2.drop(sheet2.index[])
##Analysis
#1.Display all the list of companies of particular branch since five years?
def Companies_of_branch(branch):
sample1=read.sheet2[read.sheet2.Branch==branch]
samp1=sample1.Company.unique()
s=[]
for i in samp1:
if i not in s:
s.append(i)
for i in s:
print(i)
if __name__=='__main__':
Companies_of_branch(sys.argv[1]) | true |
cbcc7b83b5f41aa5863e6e40305505595901dcc4 | Python | PierreVieira/EstruturaDeDados | /linked_list.py | UTF-8 | 4,725 | 4.1875 | 4 | [] | no_license | from node import Node
class LinkedList:
"""Sequência de nós (um ligado a outro) de tal forma que um nó saiba qual será o nó seguinte"""
def __init__(self):
self.head = None # Referência para o primeiro nó
self._size = 0 # Tamanho da lista encadead
def append(self, element):
"""
Insere um elemento no final da lista.
Complexidade: O(n)
:param element: elemento a ser inserido
:return: None
"""
no = Node(element)
if self.head: # Quer dizer que já tem um elemento na cabeça
pointer = self.head
while pointer.next: # Enquanto o ponteiro for diferente de None (significa que não chegamos ao fim)
pointer = pointer.next # Andando com o ponteiro
pointer.next = no
else: # Quer dizer que não tem um elemento na cabeça, estamos inserindo o primeiro elemento na lista
self.head = no # Primeira inserção
self._size += 1 # O tamanho da lista aumenta após a inserção do elemento
def _getnode(self, index):
pointer = self.head
for i in range(index):
if pointer:
pointer = pointer.next
else:
raise IndexError('O índice não está disponível na lista.')
return pointer
def __len__(self):
"""
:return: tamanho da lista
"""
return self._size
# Sobrecarga de operador
def __getitem__(self, index):
"""
Complexidade: O(n)
:param index: índice do elemento a ser pesquisado
:return: elemento que está no índice informado
"""
# a = lista[6]
pointer = self._getnode(index)
if pointer:
return pointer.data
raise IndexError("O índice não está disponível para a lista")
# Sobrecarga de operador
def __setitem__(self, index, value):
"""
Complexidade: O(n)
:param index: índice do elemento a ser alterado
:param value: novo valor que ficará no índice informado
:return: None
"""
# lista[5] = 9
pointer = self._getnode(index)
if pointer:
pointer.data = value
else:
raise IndexError("O índice não está disponível para a lista")
def index(self, element):
"""
:param element: elemento a ser pesquisado
:return: índice do elemento na lista
"""
pointer = self.head
i = 0
while pointer:
if pointer.data == element:
return i
pointer = pointer.next
i += 1
raise ValueError(f'O elemento {element} não está na lista')
def insert(self, element, index):
"""
Complexidade: O(n)
O método irá empurrar os elementos adjacentes ao índice escolhido
:param index: índice onde será inserido o elemento
:param element: elemento que será inserido
:return: None
"""
node = Node(element)
if index == 0: # Queremos inserir na cabeça
node.next, self.head = self.head, node
else:
pointer = self._getnode(index - 1)
node = Node(element)
node.next = pointer.next
pointer.next = node
self._size += 1
def remove(self, element):
"""
Complexidade: O(n)
:param element: Elemento a ser removido da lista
:return: True se conseguiu remover o elemento. Caso contrário lança um ValueError
"""
if self.head is None:
raise ValueError('{} is not in list'.format(element))
elif self.head.data == element:
self.head = self.head.next
self._size -= 1
else:
ancestor, pointer = self.head, self.head.next
while pointer:
if pointer.data == element:
ancestor.next, pointer.next = pointer.next, None
break
ancestor, pointer = pointer, pointer.next
if pointer:
self._size -= 1
return True
raise ValueError('{} is not in list'.format(element))
def __repr__(self):
r = ''
pointer = self.head
while pointer:
r += str(pointer.data) + '->'
pointer = pointer.next
return r
if __name__ == '__main__':
lista = LinkedList()
lista.append(8)
lista.append('pierre')
lista.append(12)
print(lista)
lista.insert(15, 1)
print(lista)
print('Índice do número 8: {}'.format(lista.index(8)))
lista.remove('pierre')
print(lista)
| true |
1c3a8a6380fc36a9143730f6984358b052f814d6 | Python | schmidtdominik/df-autograd | /readme.py | UTF-8 | 1,427 | 3.875 | 4 | [] | no_license | from autograd import *
# Declare variables
x = Variable('x')
# Define function f(x) = 4x^4 + 5x^3 + x^2 + 18
f1 = C(4)*x**C(4) + C(5)*x**C(3) + x**C(2) + C(18)
print('f1:', f1)
# Compute the gradient (this returns the computation graphs of each component of the gradient function)
g1 = f1.gradient()
print('g1:', g1)
# Differentiate in which point?
in_point = {x: 3}
# Evaluate the `x` component of the gradient at `in_point`
print('x component of gradient:', g1[x].forward(in_point))
# Verify with numerical gradient
print('numerical gradient:', f1.numerical_diff_grad(in_point))
# We can also just evaluate f1 at `in_point` (the forward pass)
print('f1 evaluated:', f1.forward(in_point), '\n\n')
x = Variable('x')
y = Variable('y')
z = Variable('z')
f2 = Ln(x/y) + x**y + z**C(-3) + Exp(y/x)
print('f2:', f2)
in_point = {x: 6, y: 3, z: 5}
for v, k in f2.gradient().items():
print(f'\tf2 {v}-component of gradient: {k}')
print('\tevaluated at in_point: ', k.forward(in_point))
print('numerical gradient:', f2.numerical_diff_grad(in_point), '\n')
print('Higher order derivatives:')
# To compute higher order derivatives simply differentiate the resulting gradient
f = Exp(x*y)
dfdx = f.gradient()[x]
dfdy = f.gradient()[y]
dfdxx = dfdx.gradient()[x]
dfdxy = dfdx.gradient()[y]
dfdyx = dfdy.gradient()[x]
dfdyy = dfdy.gradient()[y]
print(dfdx)
print(dfdy)
print(dfdxx)
print(dfdxy)
print(dfdyx)
print(dfdyy) | true |
a2fb416cf48323d4a68822251a30ac2d1fbfa63e | Python | a-r-j-u-n-s/CovidViz | /helpers.py | UTF-8 | 3,519 | 3.25 | 3 | [] | no_license | import pandas as pd
import geopandas as gpd
from sklearn.linear_model import LinearRegression
from io import StringIO
import numpy as np
__all__ = ['download_convert_csv', 'combine_state_data', 'update_geo', 'pre_floyd_state_plotter']
def download_convert_csv(url: str) -> pd.DataFrame:
"""
:param url: URL of a .csv download
:return: DataFrame based on url .csv
"""
import requests
with requests.Session() as s:
r = s.get(url)
df = pd.read_csv(StringIO(r.text))
return df
def combine_state_data(state_col: str, data_col: str, data: pd.DataFrame, mainland=True) -> gpd.GeoDataFrame:
"""
Merges any data set with the US States geospatial data
:param state_col: US States column to merge with
:param data_col: data column to merge with
:param data: DataFrame
:param mainland: 'False' to generate map including non-mainland states
:return: GeoDataFrame of merged data
"""
states_geo = gpd.read_file('map/states.json')
if mainland:
states_geo = states_geo[
(states_geo['NAME'] != 'Alaska') & (states_geo['NAME'] != 'Hawaii') & (states_geo['NAME'] != 'Puerto Rico')]
merged_geo = states_geo.merge(data, left_on=state_col, right_on=data_col, how='inner')
return merged_geo
def update_geo(df: gpd.GeoDataFrame, to_drop: str, latest=False):
"""
Convert time series data to current data (to plot on map) by updating all 'times' to latest time and removing
old rows
:param to_drop: column to update
:param df: DataFrame
:param latest: print latest date or not
"""
if latest:
df['date'] = pd.to_datetime(df['date'])
print(df['date'].max())
df = df.sort_values('date').drop_duplicates(to_drop, keep='last')
return df
def pre_floyd_state_plotter(data: pd.DataFrame, state_name: str, axs, subplot: int):
"""
Plots cases over time with both a linear and polynomial model trained with pre-May 25 case data
:param subplot: indicates which subplot of the given axis to plot over
:param axs: axis on which to plot
:param data: DataFrame of relevant data
:param state_name: state to plot
:return:
"""
# DataFrame for Minnesota cases; convert to Time Series
mask = data['state'] == state_name
state = data[mask]
state.index = pd.to_datetime(state['date'])
# Replace dates with numeric counter for use in regression model training
state_numeric = state.assign(date=range(len(state)))
# Linear Regression
pre_floyd = state_numeric.loc[:'2020-05-25']
pre_floyd_x, pre_floyd_y = pre_floyd[['date']], pre_floyd['cases']
model = LinearRegression(fit_intercept=False)
model.fit(pre_floyd_x, pre_floyd_y)
pred = model.predict(state_numeric[['date']])
pred_df = pd.DataFrame({'linear predictions': pred})
pred_df.index = state.index
# Polynomial Regression
# pred[0] == 0, so omit it from the exponential fit
# since ln(0) is undefined
[intercept, slope] = np.polyfit(range(len(state) - 1), np.log(pred[1:]), 1)
y = np.exp(slope) * np.exp(intercept * range(len(state)))
exp_df = pd.DataFrame({'polynomial predictions': y})
exp_df.index = state.index
state['cases'].plot(ax=axs[subplot], ylim=0, label='Cases')
pred_df.plot(ax=axs[subplot], ylim=0, label='Linear Predictions')
exp_df.plot(ax=axs[subplot], ylim=0, label='Polynomial Predictions')
axs[subplot].legend(loc='upper left')
axs[subplot].set_title(state_name + ' Cases')
| true |
846fe066699cc70b02e5d84d4628106f20d96378 | Python | RodrigoVite/ProyectoFinal-Admin-Redes | /multi.py | UTF-8 | 17,727 | 2.671875 | 3 | [] | no_license | from netmiko import ConnectHandler
import netifaces as ni
from detecta import *
import os
user = 'admin'
password = 'admin01'
secret = '12345678'
host = 'R1'
sb=1
sbb=0
#esto es para conectarnos mediante SSH
cisco = {
"device_type":"cisco_ios",
'ip': "",
"username":"",
"password":"",
"secret":"12345678"
}
#esto es para conectarnos mediante telnet
ciscot = {
"device_type":"cisco_ios_telnet",
'ip': "",
"username":"",
"password":"",
"secret":"1234"
}
known_routers = []
# Listamos las interfaces de red aqui
interfaces=os.listdir("/sys/class/net/")
c=0
for i in range(len(interfaces)):
print(f"{i+1}: {interfaces[i]}")
read=int(input("Ingresa el numero de interfaz: "))-1
interface_name = interfaces[read]
#cuando se elige configurar la red o agregar un nuevo usuario se ingresa a los routers mediante SSH
def init_configure(opcion,protocolo,opc,opt):
con = ConnectHandler(**cisco)
output = con.send_command("show running-config | i hostname")
hostname = output.split()
known_routers.append(hostname[1])
#esta es la opcion cuando se quiere configurar la red con un protocolo
if(opcion == 1):
print ('¿Como quieres configurar la red?')
protocolo=int(input('1 : RIP\n 2 : OSPF \n 3 : EIGRP\n'))
if(protocolo==1):
print(hostname[1]+":")
rip(con)
neighbors(hostname[1],con)
elif(protocolo==2):
print(hostname[1]+":")
ospf(con)
neighbors(hostname[1],con)
elif(protocolo==3):
print(hostname[1]+":")
#eigrp(con)
#neighbors(hostname[1],con)
#esta es la opcion cuando se quiere agregar un nuevo usuario, este se va a agregar en toda la red
elif(opcion == 3):
usuarioNuevo = input('Ingresa el nombre de usuario: ')
passwordNuevo = input('Ingresa la contraseña: ')
opc=int(input('1: Agregar Usuario\n 2: Modificar Usuario\n 3: Borrar Usuario\n'))
print(hostname[1]+":")
if(opc==1):
agregarUsuario(con, usuarioNuevo, passwordNuevo)
vecinos(hostname[1],con,usuarioNuevo,passwordNuevo,opc)
elif(opc==2):
modificarUsuario(con, usuarioNuevo, passwordNuevo)
vecinos(hostname[1],con,usuarioNuevo,passwordNuevo,opc)
elif(opc==3):
borrarUsuario(con, usuarioNuevo, passwordNuevo)
vecinos(hostname[1],con,usuarioNuevo,passwordNuevo,opc)
elif(opcion == 4):
ver=con.send_command("show version | i Version ")
tms=con.send_command("show version | i uptime ")
rver=ver.split(",")
rtms=tms.split("is")
print(hostname[1]+ ":")
print("El dispositivo posee la version: " + rver[2])
print("El dispositivo ha estado encendido: " + rtms[1])
viewr(hostname[1],con)
elif(opcion == 5):
print(hostname[1]+ ":")
opt=int(input('Desea cambiar el hostname?\n 1: si\n 2:no,ver siguiente router '))
if(opt==1):
newname=input('Ingresa el nuevo hostname: \n')
con.write_channel('configure terminal\n')
comandn= "hostname "+newname
print("Se ingresa: " +comandn)
con.write_channel(comandn+'\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
vnh=con.send_command("show running-config | i hostname")
nhn=vnh.split()
print("El router se llama ahora: " + nhn[1])
elif(opt==2):
editr(hostname[1],con)
con.disconnect()
#cuando se elige configurar la red con SSH se ingresa mediante telnet
def init_configureSSH():
conT = ConnectHandler(**ciscot)
output = conT.send_command("show running-config | i hostname")
hostname = output.split()
known_routers.append(hostname[1])
#Pedimos los datos que se necesitan para los comandos de configuracion de SSH
username = input('Ingresa el usuario que se configurara en la conexion SSH?: ')
contraseña = input('Ingresa una contraseña: ')
nombreDominio = input('Ingresa el nombre del dominio: ')
numIntentos = input('Ingresa el numero de intentos para la conexion SSH: ')
print(hostname[1]+":")
ssh(con,nombreDominio,username,contraseña,numIntentos)
neighborsTelnet(router,con,nombreDominio,username,contraseña,numIntentos)
conT.disconnect()
def findRouter():
dic_data=ni.ifaddresses(interface_name)
dic_data=dic_data[2][0]
addr=list(map(int,dic_data["addr"].split(".")))
net=list(map(int,dic_data["netmask"].split(".")))
c=determinate_prefix(net)
idnet=get_id_net(addr,net)
range_net=get_broadcast_ip(idnet,net)
print(f"Escaneando subred {arr_to_ip(idnet)}/{c}\n")
ips=[idnet[0],idnet[1],idnet[2],idnet[3]+1]
responde=scan_range(ips,range_net)
for i in range(len(responde)):
for k,v in responde[i].items():
if "Router" in v:
return k
def neighbors(hostname,con):
output = con.send_command('show cdp neighbors')
routers = output.split()
routers.pop()
i = 35
while i < len(routers):
if routers[i] not in known_routers:
print(routers[i]+":")
known_routers.append(routers[i])
configure_router(routers[i],hostname,con)
i = i + 8
def configure_router(router,hostname,con):
user = cisco['username']
password = cisco['password']
print(""+user)
output = con.send_command(f'show cdp entry {router}')
resp = output.split()
con.send_command('ssh -l '+user+' '+resp[8],expect_string=r'Password:')
#aqui cambie quite la parte de send_command por la de write_channel y le quite la parte de expect_string
#con.send_command(password, expect_string=r''+router+'#') ---> este es como estaba antes :v
con.write_channel(password+'\n')
#aqui va configurando los routers vecinos, hasta que no encuentre un router mas
rip(con)
#ospf(con)
neighbors(router,con)
print("HOSTNAME CONFIGURE:", hostname)
#esto de abajo lo cambie como esta en el de rip_ssh.py
con.send_command('exit',expect_string=hostname.split(".")[0]+'#')
def viewr(hostname,con):
output = con.send_command('show cdp neighbors')
routers = output.split()
routers.pop()
i = 35
while i < len(routers):
if routers[i] not in known_routers:
nr=routers[i].split(".")
print(nr[0]+":")
known_routers.append(routers[i])
configure_router_info(routers[i],hostname,con)
i = i + 8
def editr(hostname,con):
output = con.send_command('show cdp neighbors')
routers = output.split()
routers.pop()
i = 35
while i < len(routers):
if routers[i] not in known_routers:
nr=routers[i].split(".")
print(nr[0]+":")
known_routers.append(routers[i])
edit_router_info(routers[i],hostname,con)
i = i + 8
def vecinos(hostname,con,usuarioNuevo,passwordNuevo,opc):
output = con.send_command('show cdp neighbors')
routers = output.split()
routers.pop()
i = 35
while i < len(routers):
if routers[i] not in known_routers:
print(routers[i]+":")
known_routers.append(routers[i])
configure_router_usuarios(routers[i],hostname,con,usuarioNuevo,passwordNuevo,opc)
i = i + 8
def configure_router_usuarios(router,hostname,con,usuarioNuevo,passwordNuevo,opc):
user = cisco['username']
password = cisco['password']
output = con.send_command(f'show cdp entry {router}')
resp = output.split()
con.send_command('ssh -l '+user+' '+resp[8],expect_string=r'Password:')
#aqui cambie quite la parte de send_command por la de write_channel y le quite la parte de expect_string
#con.send_command(password, expect_string=r''+router+'#') ---> este es como estaba antes :v
con.write_channel(password+'\n')
if(opc==1):
#aqui va agregando el nuevo usuario en los routers vecinos, hasta que ya no encuentre un router mas
agregarUsuario(con,usuarioNuevo,passwordNuevo)
vecinos(router,con,usuarioNuevo,passwordNuevo,opc)
elif(opc==2):
#aqui va modificando el nuevo usuario en los routers vecinos, hasta que ya no encuentre un router mas
modificarUsuario(con,usuarioNuevo,passwordNuevo)
vecinos(router,con,usuarioNuevo,passwordNuevo,opc)
elif(opc==3):
#aqui va borrando el nuevo usuario en los routers vecinos, hasta que ya no encuentre un router mas
borrarUsuario(con,usuarioNuevo,passwordNuevo)
vecinos(router,con,usuarioNuevo,passwordNuevo,opc)
#esto de abajo lo cambie como esta en el de rip_ssh.py
con.send_command('exit',expect_string=hostname.split(".")[0]+'#')
#funcion que obtiene los datos del router y los imprime
def configure_router_info(router,hostname,con):
user = cisco['username']
password = cisco['password']
output = con.send_command(f'show cdp entry {router}')
resp = output.split()
con.send_command('ssh -l '+user+' '+resp[8],expect_string=r'Password:')
#aqui cambie quite la parte de send_command por la de write_channel y le quite la parte de expect_string
#con.send_command(password, expect_string=r''+router+'#') ---> este es como estaba antes :v
con.write_channel(password+'\n')
host=con.send_command("show running-config | i hostname ")
sn= host.split()
ver=con.send_command("show version | i Version ")
tms=con.send_command("show version | i uptime ")
rver=ver.split(",")
rtms=tms.split("is")
print("El dispositivo posee la version: " + rver[2])
ftms= rtms[1].split(sn[1])
print("El dispositivo ha estado encendido: " + ftms[0])
#aqui va agregando el nuevo usuario en los routers vecinos, hasta que ya no encuentre un router mas
#agregarUsuario(con,usuarioNuevo,passwordNuevo)
#vecinos(router,con,usuarioNuevo,passwordNuevo)
viewr(router,con)
#esto de abajo lo cambie como esta en el de rip_ssh.py
con.send_command('exit',expect_string=hostname.split(".")[0]+'#')
def edit_router_info(router,hostname,con):
user = cisco['username']
password = cisco['password']
output = con.send_command(f'show cdp entry {router}')
resp = output.split()
con.send_command('ssh -l '+user+' '+resp[8],expect_string=r'Password:')
#aqui cambie quite la parte de send_command por la de write_channel y le quite la parte de expect_string
#con.send_command(password, expect_string=r''+router+'#') ---> este es como estaba antes :v
con.write_channel(password+'\n')
opte=int(input('Desea cambiar el hostname?\n 1: si\n 2:no,ver siguiente router '))
if(opte==1):
newname=input('Ingresa el nuevo hostname: \n')
con.write_channel('configure terminal\n')
comandn= "hostname "+newname
print("Se ingresa: " +comandn)
con.write_channel(comandn+'\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
vnh=con.send_command("show running-config | i hostname")
nhn=vnh.split()
print("El router se llama ahora: " + nhn[1])
elif(opte==2):
#aqui va agregando el nuevo usuario en los routers vecinos, hasta que ya no encuentre un router mas
editr(router,con)
#esto de abajo lo cambie como esta en el de rip_ssh.py
con.send_command('exit',expect_string=hostname.split(".")[0]+'#')
def neighborsTelnet(hostname,con,nombreDominio,username,contraseña,numIntentos):
output = con.send_command('show cdp neighbors')
routers = output.split()
routers.pop()
i = 35
while i < len(routers):
if routers[i] not in known_routers:
print(routers[i]+":")
known_routers.append(routers[i])
configure_router_telnet(routers[i],hostname,con,nombreDominio,username,contraseña,numIntentos)
i = i + 8
def configure_router_telnet(router,hostname,con,nombreDominio,username,contraseña,numIntentos):
user = ciscot['username']
password = ciscot['password']
output = con.send_command(f'show cdp entry {router}')
resp = output.split()
con.send_command('telnet '+user+' '+resp[8],expect_string=r'Password:')
#aqui cambie quite la parte de send_command por la de write_channel y le quite la parte de expect_string
#con.send_command(password, expect_string=r''+router+'#') ---> este es como estaba antes :v
con.write_channel(password+'\n')
#aqui va configurando SSH en los routers vecinos, hasta que no encuentra ninguno
ssh(con,nombreDominio,username,contraseña,numIntentos)
neighborsTelnet(router,con,nombreDominio,username,contraseña,numIntentos)
#esto de abajo lo cambie como esta en el de rip_ssh.py
con.send_command('exit',expect_string=hostname.split(".")[0]+'#')
def findNetworkID(ip,con):
output = con.send_command('show ip interface brief | i '+ip)
net = output.split()
output = con.send_command('show running-config | i '+net[1])
mask = output.split()
addr=list(map(int,net[1].split(".")))
netmask=list(map(int,mask[3].split(".")))
idnet=get_id_net(addr,netmask)
return arr_to_ip(idnet)
#este es el metodo con los comando para configurar RIP
def rip(con):
output = con.send_command('show ip interface brief | i up')
ip = output.split()
ip_id = []
i = 1
while i < len(ip):
ip_id.append(findNetworkID(ip[i],con))
i = i + 6
con.write_channel('configure terminal\n')
time.sleep(1)
con.write_channel('router rip\n')
time.sleep(1)
con.write_channel('version 2\n')
time.sleep(1)
for i in ip_id:
print('RIP Network '+i)
con.write_channel('network '+i+'\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
#metodo base para ospf no funciona :'v
def ospf(con):
output = con.send_command('show ip interface brief | i up')
ip = output.split()
ip_id = []
i = 1
while i < len(ip):
ip_id.append(findNetworkID(ip[i],con))
i = i + 6
global sb
global sbb
opa= str(sb)
opb= str(sbb)
con.write_channel('configure terminal\n')
time.sleep(1)
con.write_channel('router ospf 200\n')
time.sleep(1)
con.write_channel('network 200.0.0.'+opa + ' 255.255.255.255 area 0\n')
time.sleep(1)
con.write_channel('network 192.168.1.'+opb + ' 0.0.0.255 area 0\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
con.write_channel('exit\n')
sb=sb+1
sbb=sbb+1
#este es el metodo con los comandos para configurar SSH
def ssh(con,nombreDominio,username,contraseña,numIntentos):
print("Se está configurando la conexión SSH...")
con.write_channel("configure terminal")
time.sleep(1)
con.write_channel("ip domain-name "+nombreDominio)
time.sleep(1)
con.write_channel("username "+username+" privilege 15 password "+contraseña)
time.sleep(1)
con.write_channel("crypto key generate rsa")
time.sleep(1)
con.write_channel("1024")
time.sleep(1)
con.write_channel("ip ssh version 2")
time.sleep(1)
con.write_channel("ip ssh time-out 50")
time.sleep(1)
con.write_channel("ip ssh authentication-retries "+numIntentos)
time.sleep(1)
con.write_channel("service password-encryption")
time.sleep(1)
con.write_channel("line vty 0 15")
time.sleep(1)
con.write_channel("transport input ssh telnet")
time.sleep(1)
con.write_channel("password "+password)
time.sleep(1)
con.write_channel("login local")
time.sleep(1)
con.write_channel("end")
time.sleep(1)
con.write_channel("write")
print("Se configuró la conexión SSH")
#este es el metodo con los comandos para agregar un nuevo usuario
def agregarUsuario(con, usuarioNuevo, passwordNuevo):
print("Se está agregando un nuevo usuario...")
con.write_channel('configure terminal\n')
time.sleep(1)
con.write_channel("username "+usuarioNuevo+" privilege 15 password "+passwordNuevo)
print("Se agregó el nuevo usuario")
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
#este es el metodo con los comandos para modificar un nuevo usuario
def modificarUsuario(con, usuarioNuevo, passwordNuevo):
print("Se está modificando el usuario...")
con.write_channel('configure terminal\n')
time.sleep(1)
con.write_channel("username "+usuarioNuevo+" privilege 15 password "+passwordNuevo)
print("Se modificó el usuario")
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
#este es el metodo con los comandos para borrar un nuevo usuario
def borrarUsuario(con, usuarioNuevo, passwordNuevo):
print("Se está eliminando el usuario...")
con.write_channel('configure terminal\n')
time.sleep(1)
con.write_channel("no username "+usuarioNuevo+" privilege 15 password "+passwordNuevo)
print("Se eliminó el usuario")
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
con.write_channel('exit\n')
time.sleep(1)
print('Elige lo que deseas realizar?\n')
opcion = int(input('1 : Configurar la red\n 2 : Configurar la conexión SSH de la red \n 3 : Agregar, modificar o eliminar un nuevo usuario\n 4: Ver informacion del dispositivo\n 5: Cambiar hostname\n'))
def switch(opcion,usuario,password)
#configurar la red con un protocolo (RIP, OSPF o EIGRP)
if(opcion==1):
usuario = input('Ingresa tu nombre de usuario: ')
cisco['username'] = usuario
password = input('Ingresa tu contraseña: ')
cisco['password'] = password
ip = findRouter()
print ('Router encontrado en '+ip)
cisco['ip'] = ip
init_configure(opcion)
#configurar la conexion SSH mediante telnet
elif(opcion==2):
usuarioTelnet = input('Ingresa el nombre de usuario: ')
ciscot['username'] = usuarioTelnet
passwordTelnet = input('Ingresa tu contraseña: ')
ciscot['password'] = passwordTelnet
ipT = findRouter()
print ('Router encontrado en '+ipT)
ciscot['ip'] = ipT
init_configureSSH()
#agregar, modificar o eliminar un nuevo usuario a todos los routers
elif(opcion==3):
usuarioAdmin = input('Ingresa tu nombre de usuario: ')
passwordAdmin = input('Ingresa tu password: ')
cisco['username'] = usuarioAdmin
cisco['password'] = passwordAdmin
ipU = findRouter()
print ('Router encontrado en '+ipU)
cisco['ip'] = ipU
init_configure(opcion)
#Ver informacion de dispositivos
elif(opcion==4):
usuarioAdmin = input('Ingresa tu nombre de usuario: ')
passwordAdmin = input('Ingresa tu password: ')
cisco['username'] = usuarioAdmin
cisco['password'] = passwordAdmin
ipU = findRouter()
print ('Router encontrado en '+ipU)
cisco['ip'] = ipU
init_configure(opcion)
#Cambiar hostname
elif(opcion==5):
usuarioAdmin = input('Ingresa tu nombre de usuario: ')
passwordAdmin = input('Ingresa tu password: ')
cisco['username'] = usuarioAdmin
cisco['password'] = passwordAdmin
ipU = findRouter()
print ('Router encontrado en '+ipU)
cisco['ip'] = ipU
init_configure(opcion)
| true |
8437398b4c72a5e116f1e286bcbd014d678ad8b5 | Python | larry2020626/motifwalk | /src/utils.py | UTF-8 | 8,245 | 2.59375 | 3 | [] | no_license | import pickle as p
import numpy as np
import re
from sklearn.multiclass import OneVsRestClassifier
from scipy.sparse import lil_matrix, csr_matrix
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from time import time
try:
from graph_tool.clustering import motifs, motif_significance
from graph_tool.spectral import adjacency
from graph_tool import load_graph_from_csv
except ImportError:
print("Warning: graph_tool module is missing, motif analysis is not available.")
dataloc = './../data/'
def load_citeseer():
with open(dataloc+'citeseer.data', 'rb') as f:
data = p.load(f)
graph = data['NXGraph']
features = data['CSRFeatures']
labels = data['Labels'] # Number format
labels = MultiLabelBinarizer().fit_transform(
labels.reshape(labels.shape[0], 1))
return graph, features, labels
def load_cora():
with open(dataloc+'cora.data', 'rb') as f:
data = p.load(f)
graph = data['NXGraph']
features = data['CSRFeatures']
labels = data['Labels'] # Number format
labels = MultiLabelBinarizer().fit_transform(
labels.reshape(labels.shape[0], 1))
return graph, features, labels
def load_blogcatalog():
with open(dataloc+'blogcatalog.data', 'rb') as f:
data = p.load(f)
graph = data['NXGraph']
features = None
labels = data['LILLabels'] # Already in binary format
return graph, features, labels
def load_data(dataset_name):
"""Load dataset"""
if dataset_name == "blogcatalog":
return load_blogcatalog()
elif dataset_name == "cora":
return load_cora()
elif dataset_name == "citeseer":
return load_citeseer()
else:
raise ValueError("Dataset not found")
def load_embeddings(emb_file):
"""Load graph embedding output from deepwalk, n2v to a numpy matrix."""
with open(emb_file, 'rb') as efile:
num_node, dim = map(int, efile.readline().split())
emb_matrix = np.ndarray(shape=(num_node, dim), dtype=np.float32)
for data in efile.readlines():
node_id, *vector = data.split()
node_id = int(node_id)
emb_matrix[node_id, :] = np.array([i for i in map(np.float, vector)])
return emb_matrix
def write_motifs_results(output, motifs_list, z_scores,
n_shuf, model="uncorrelated"):
"""Write the adjacency matrix of motifs in the `motifs_list`
and its corresponding z-scores to file.
Naming convention: blogcatalog_3um.motifslog
Parameters
==========
output: name of file will be writen to disk
motifs_list: list of motifs (graph-tool graph instances)
z_scores: corresponding z_scores to each motif.
n_shuf: number of random graph generated
model: name of the random graph generation algorithm"""
assert len(motifs_list) == len(z_scores)
with open(output, 'w') as f:
f.write("Number of shuffles: {} ({})\n".format(n_shuf, model))
for i, m in enumerate(motifs_list):
f.write("Motif {} - z-score: {}\n".format(i+1, z_scores[i]))
for rows in adjacency(m).toarray():
f.write(str(rows) + '\n')
f.write('\n')
return output
def run_motif_significance(graph, directed=True, data_loc="../data/", motif_size=3,
n_shuffles=16, s_model='uncorrelated'):
"""Run z-score computation for all `motif_size` subgraph
on the given `graph`. By default, graph is loaded as a directed graph_tool
instance.
Parameters
==========
graph: name of the graph file."""
f_name = data_loc + graph + ".edges"
g = load_graph_from_csv(f_name, directed,
csv_options={'quotechar': '"',
'delimiter': ' '})
m, z = motif_significance(g, motif_size, n_shuffles,
shuffle_model=s_model)
motif_annotation = str(motif_size) + 'm' if directed else str(motif_size) + 'um'
output_name = "{}{}_{}.{}".format(data_loc, graph,
motif_annotation, "motifslog")
return write_motifs_results(output_name, m, z, n_shuffles, s_model)
def get_top_k(labels):
"""Return the number of classes for each row in the `labels`
binary matrix. If `labels` is Linked List Matrix format, the number
of labels is the length of each list, otherwise it is the number
of non-zeros."""
if isinstance(labels, csr_matrix):
return [np.count_nonzero(i.toarray()) for i in labels]
else:
return [np.count_nonzero(i) for i in labels]
def run_embedding_classify_f1(dataset_name, emb_file, clf=LogisticRegression(),
splits_ratio=[0.5], num_run=2, write_to_file=None):
"""Run node classification for the learned embedding."""
_, _, labels = load_data(dataset_name)
emb = load_embeddings(emb_file)
results_str = []
averages = ["micro", "macro", "samples", "weighted"]
for run in range(num_run):
results_str.append("\nRun number {}:\n".format(run+1))
for sr in splits_ratio:
X_train, X_test, y_train, y_test = train_test_split(
emb, labels, test_size=sr, random_state=run)
top_k_list = get_top_k(y_test)
mclf = TopKRanker(clf)
mclf.fit(X_train, y_train)
test_results = mclf.predict(X_test, top_k_list,
num_classes=labels.shape[1])
str_output = "Train ratio: {}\n".format(1.0 - sr)
for avg in averages:
str_output += avg + ': ' + str(f1_score(test_results, y_test,
average=avg)) + '\n'
str_output += "Accuracy: " + str(accuracy_score(test_results, y_test)) + '\n'
results_str.append(str_output)
info = "Embedding dim: {}, graph: {}".format(emb.shape[1], dataset_name)
if write_to_file:
with open(write_to_file, 'w') as f:
f.write(info)
f.writelines(results_str)
print(info)
print(''.join(results_str))
return write_to_file
class TopKRanker(OneVsRestClassifier):
"""Python 3 and sklearn 0.18.1 compatible version
of the original implementation.
https://github.com/gear/deepwalk/blob/master/example_graphs/scoring.py"""
def predict(self, features, top_k_list, num_classes=39):
"""Predicts top k labels for each sample
in the `features` list. `top_k_list` stores
number of labels given in the dataset. This
function returns a binary matrix containing
the predictions."""
assert features.shape[0] == len(top_k_list)
probs = np.asarray(super().predict_proba(features))
all_labels = np.zeros(shape=(features.shape[0], num_classes))
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-k:]].tolist()
for l in labels:
all_labels[i][l] = 1.0
return all_labels
def significant_graph(motifslog, z_thres=100, m_size=3):
"""Plot and return the motifs significant graph from a logfile."""
motif_zscore = dict()
zscore = 0.0
for line in open(motifslog):
if re.match(r"Motif", line):
zscore = float(line.split()[-1])
if zscore > z_thres:
pass
def tsne_visualization(emb_file, graph_labels, pdf_name, color_map=None, n_components=2):
"""Visualize the graph embedding with t-SNE"""
emb = load_embeddings(emb_file)
t0 = time()
model = TSNE(n_components=n_components, init='pca', random_state=0)
trans_data = model.fit_transform(embeddings_vectors).T
t1 = time()
print("t-SNE: %.2g sec" % (t1-t0))
fig = plt.figure(figsize=(6.75,3))
ax = fig.add_subplot(1, 1, 1)
plt.scatter(trans_data[0], trans_data[1], c=color_map)
| true |
2c9b4b840772351487c7c75426ae77c9687f1c85 | Python | LEUNGUU/data-structure-algorithms-python | /Linked-List/_SinglyLinkedList.py | UTF-8 | 2,738 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Empty(Exception):
pass
class SinglyLinkedList:
"""An class representing a singly Linked list"""
class _Node:
"""A lightweight class representing a node in LinkedList"""
__slots__ = "_element", "_next"
def __init__(self, element, next):
self._element = element
self._next = next
def __init__(self):
"""Create an empty SinglyLinkedList"""
self._head = None
self._tail = None
self._size = 0
def __len__(self):
"""Return the number of elements of the SinglyLinkedList"""
return self._size
def is_empty(self):
"""Return True if SinglyLinkedList is empty"""
return self._size == 0
# -------------------- accessors ------------------------------
def head(self):
"""Return(but do not remove) at the front of SinglyLinkedList"""
if self.is_empty():
raise Empty("SinglyLinkedList is empty")
return self._head
def tail(self):
"""Return(but do not remove) at the end of SinglyLinkedList"""
if self.is_empty():
raise Empty("SinglyLinkedList is empty")
return self._tail
# ---------------------- mutators -----------------------------
def add_first(self, e):
"""Insert element at the front of SinglyLinkedList"""
newest = self._Node(e, None)
if self.is_empty():
self._tail = newest
else:
newest._next = self._head
self._head = newest
self._size += 1
def add_last(self, e):
"""Insert element at the end of SinglyLinkedList"""
newest = self._Node(e, None)
if self.is_empty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
self._size += 1
def remove_first(self):
"""Remove element at the front of SinglyLinkedList"""
if self.is_empty():
raise Empty("SinglyLinkedList is empty")
answer = self._head
self._head = self._head._next
self._size -= 1
def index(self, p):
"""Return the position of the specific element"""
if self.is_empty():
raise Empty("SinglyLinkedList is empty")
if not (1 <= abs(p) <= self._size):
raise IndexError("p is not a valid position")
# support negtive index
if p < 0:
p = self._size + p + 1
cursor = 1
walk = self._head
while walk is not None:
if cursor == p:
return walk._element
walk = walk._next
cursor += 1
if __name__ == "__main__":
pass
| true |
8aecfc086ec27e6c1c38266698d3d2b0ebbaeb8b | Python | tchelmella/w3python | /string/reverse.py | UTF-8 | 231 | 3 | 3 | [] | no_license | def rev(data):
if len(data) % 4 == 0:
#return ''.join(reversed(data))
return data[::-1]
print(rev('string'))
print(rev('condition'))
print(rev('tangents'))
print(rev('Tulsidas'))
print(rev('Nemethos'))
print(rev('Paytonos'))
| true |
66c6792c2c47bba41f7889968c0959c564650fbd | Python | Vanimo/StockMarketForecast | /DataAnalyser_Graph.py | UTF-8 | 1,795 | 3.078125 | 3 | [] | no_license | '''
Created on 10 May 2013
@author: Floris
'''
import matplotlib.pyplot as plt
import Methods.PreProcessing as prpr
import csv
# Main
def main():
allTweets()
perTag()
return
# Version two
def perTag():
tags, tweet_dict = prpr.countTweetTags("data/scrapeCompanies.txt","byDay")
keylist = tweet_dict.keys()
keylist.sort()
with open("data/frequency3.csv", "wb") as csvfile:
writer = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Year-month-day_hour'] + tags)
for key in keylist:
values = []
for tag in tags:
if tag in tweet_dict[key]:
values.append(tweet_dict[key][tag])
else:
values.append("0")
writer.writerow([key]+values)
# Version one
def allTweets(showGraph=False):
tweet_dict = prpr.countAllTweets("data/scrapeDJIA.txt")
keylist = tweet_dict.keys()
keylist.sort()
# Write csv file
with open("data/frequency.csv", "wb") as csvfile:
writer = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Year-month-day_hour', 'tweetcount'])
for key in keylist:
writer.writerow([key, tweet_dict[key]])
csvfile.close()
if(showGraph):
#initialize list
N_points = len(keylist) #per uur
points_of_graph=[0]*N_points
i = 0
for key in keylist:
points_of_graph[i] = tweet_dict[key]
i += 1
plt.plot(range(N_points) ,points_of_graph)
plt.ylabel('Tweet volume')
plt.xlabel('Hour')
plt.show()
if __name__ == '__main__':
main()
| true |
5d980154da0b35e2aefc099d2613aac49420fd90 | Python | Lzffancy/Aid_study | /fancy_month02/day13_self_mprocessing/day13_teacher/exercise02.py | UTF-8 | 1,291 | 3.40625 | 3 | [] | no_license | from multiprocessing import Process,Queue
from time import time
q = Queue() # 消息队列
def timeis(func):
def wrapper(*args,**kwargs):
start_time = time()
res = func(*args,**kwargs)
end_time = time()
print("执行时间:",end_time - start_time)
return res
return wrapper
class Prime(Process):
# 判断一个数是否为质数
@staticmethod
def is_prime(n):
if n <= 1:
return False
for i in range(2,n // 2 + 1):
if n % i == 0:
return False
return True
def __init__(self,begin,end):
self.__begin = begin
self.__end = end
super().__init__()
def run(self):
prime = [] # 存放所有质数
for i in range(self.__begin,self.__end):
if Prime.is_prime(i):
prime.append(i)
q.put(sum(prime)) # 存入结果
@timeis
def process_10():
result = 0
for i in range(1,100001,10000):
p = Prime(i,i + 10000)
p.start()
for i in range(10):
result += q.get() #获取每个进程的最后结果
return result
if __name__ == '__main__':
# 执行时间: 7.4214277267456055
# 执行时间: 6.722904920578003
result = process_10()
print(result) | true |
301400252da0831f407c9e68f2a2701ecc7d13b7 | Python | nshiroma/HotWheelRaceTrack | /HW6LRACE7.py | UTF-8 | 2,692 | 3.03125 | 3 | [] | no_license | #!/usr/bin/python
# you have to include the spidev module
import spidev
import time
# put these functions and variables at the
# top of your python project or in another
# file and import it
A={}
A[0]=0
A[1]=1
A[2]=2
A[3]=3
A[4]=4
A[5]=5
A[6]=6
A[7]=7
timegap = 0.001
def adcInit():
connection = spidev.SpiDev()
connection.open(0, 0)
connection.max_speed_hz = 1350000
connection.mode = 0b00
return connection
def adcRead(connection, channel):
assert 0 <= channel <= 7, 'ADC number has to be 0-7'
ret = connection.xfer2([0x01, (8 + channel) << 4, 0x00 ])
tmp = (ret[1] & 0x03) << 9
tmp |= (ret[2] & 0xFF)
return tmp
def time_convert(sec):
mins = sec // 60
sec = sec % 60
hours = mins // 60
mins = mins % 60
print("Time Lapsed = {0}:{1}:{2}".format(int(hours),int(mins),sec))
#-------------------------------------------------------
# only call adcInit() once in you project close to
# the top before calling adcRead()
# initiazie the dictionaries
adc = adcInit()
line ={}
last_triger_time={}
data ={}
start_time = time.time()
input("Press Enter to start")
for x in range(1,7):
print(x)
line[x] = True
last_triger_time[x] = start_time
looptrue = True
while looptrue:
# the first argument to adcRead() is the reurn from adcInit()
# the second argument is the analog pin to read A0-A7
for x in range(1, 7):
data[x] = adcRead(adc, A[x-1])
for y in range(1,7):
if line[y] :
if data[y] > 300:
end_time = time.time()
time_lapsed = end_time - start_time
time_lapsed_bitween_las_trigger = end_time - last_triger_time[y]
# print(time_lapsed_bitween_las_trigger)
last_triger_time[y] = end_time
# time_convert(time_lapsed_bitween_las_trigger)
if time_lapsed_bitween_las_trigger < timegap:
line[y] = False
print(data[y])
# print(time_lapsed_bitween_las_trigger)
linename ="LineNumber{}"
print(linename.format(y))
time_convert(time_lapsed)
#
race_time=time.time()
race_time_lasped = race_time-start_time
if race_time_lasped > 30:
looptrue = False
print("Race time out")
for y in range(1, 7):
if line[y]:
DNF ="Line number{} did not finish"
print(DNF.format(y))
# end of race if all six car reached goal line
if not any([line[1],line[2],line[3],line[4],line[5],line[6]]):
looptrue = False
print("All Hot Wheel Reach Goal")
| true |
902731874fb484c11653972ff372a789148da8bd | Python | lsst-camera-dh/pybench-ccd-reb | /analysis/stats.py | UTF-8 | 1,502 | 2.59375 | 3 | [] | no_license | #! /usr/bin/env python
#
# LSST
#
# Simple tools to analyze fits files
# Exists also now as part of the bench scripts
import sys
import time
import numpy as np
import astropy.io.fits as pyfits
def basic_stats(hdulist):
out = ''
for ichan in range(48):
name = "CHAN_%d" % ichan
try:
img = hdulist[name].data
except:
continue
imgcols = 512
colstart = 10
imglines = 900
linestart = 50
light = img[linestart:imglines, colstart:colstart+imgcols].flatten()
dark = np.concatenate((img[linestart:imglines, :colstart].flatten(),
img[linestart:imglines, colstart+imgcols:].flatten(),
img[imglines:].flatten()))
over = img[imglines+2:, colstart:] # +2 to avoid bad CTE effects
out += "{}\t{:10.2f} {:10.2f} {:10.2f} {:10.2f} {:10.2f} {:10.2f}\n".format(
name, light.mean(), light.std(), dark.mean(), dark.std(), over.mean(), over.std())
return out
if __name__ == '__main__':
fitsfile = sys.argv[1]
hdulist = pyfits.open(fitsfile)
logfile = "log"+time.strftime("%Y%m%d",time.localtime())+".txt"
logger = open(logfile, 'a')
logger.write(fitsfile+'\n')
print("Channel\t MeanLight SdevLight MeanDark SdevDark MeanOver SdevOver")
out = basic_stats(hdulist)
print(out)
logger.write(out)
#correlated noise
#np.corrcoef(x,y)
logger.close()
hdulist.close()
| true |
332a24310bc3e140a77adf350185b6e9c377bdb0 | Python | verted/euler | /euler27.py | UTF-8 | 411 | 2.703125 | 3 | [] | no_license | import primeGenerator
bList = primeGenerator.gen(1000)
bList = [x for x in range(len(bList)) if bList[x] == 1]
primeTest = primeGenerator.gen(2000000)
nMax = 0
aMax = 0
bMax = 0
for a in range(-999,1000):
for b in bList:
n = 0
while primeTest[n**2 + a*n +b]:
n+=1
if n >nMax:
aMax = a
bMax = b
nMax = n
print(aMax,bMax,aMax*bMax,nMax)
| true |
49484762f5cf7f158299c5a6caf51fa3c57d0456 | Python | moritzbo/anfaenger_praktikum | /Das Geiger-Müllerzählrohr/python/plot.py | UTF-8 | 1,611 | 2.890625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from uncertainties import ufloat
from scipy.optimize import curve_fit
from scipy.stats import sem
import scipy.constants as const
import uncertainties.unumpy as unp
U, N = np.genfromtxt("Daten/Kennlinie.dat", unpack=True)
Nerr = np.sqrt(N)
print(Nerr)
#xlimit = np.array([310, 710])
#ylimit = np.array([185, 230])
plt.errorbar(U, N, xerr=None, yerr=Nerr, fmt='kx', markersize=3.5, label='Messwerte mit Fehlerbalken')
plt.xlabel(r'$U[\si{\volt}]$')
plt.ylabel(r'$N[\text{Imp} / {60}\si{\second}]$')
plt.grid()
#RECHNUNG
Ulin = []
Nlin = []
Nlinerr = []
for j in range(27):
Ulin.append(U[j+5])
Nlin.append(N[j+5])
Nlinerr.append(Nerr[j+5])
print(Ulin)
print(Nlin)
Nerrarray = unp.uarray(Nlin, Nlinerr)
params, covariance_matrix = np.polyfit(Ulin, Nlin, deg=1, cov=True)
errors = np.sqrt(np.diag(covariance_matrix))
pande0 = ufloat(params[0], errors[0])
pande1 = ufloat(params[1], errors[1])
for name, value, error in zip('ab', params, errors):
print(f'{name} = {value:.3f} ± {error:.3f}')
x_plot = np.linspace(370, 630)
plt.plot(
x_plot,
params[0] * x_plot + params[1], "b--",
label='Lineare Ausgleichsgerade',
linewidth=1.5,
)
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig("build/plot1.pdf")
def percentfunction(k, j):
return (100/2.6) * (pande0 * k + pande1 - pande0 * j - pande1)/(pande0* k + pande1)
print(f'{percentfunction(630, 370):.6f}')
print(f"{Nerrarray[0]:.6f}")
print(f"{Nerrarray[26]:.6f}") | true |
dc79ee4162dfb375cdea2805860db351b9d2bda2 | Python | NiravModiRepo/Coding | /Invested/Rev3_NewMethod/NewMethodNvda.py | UTF-8 | 8,883 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 8, 2020
@author: Nirav
May Practice - Windage Rate
"""
def GetGrowthRate(x, arr):
growingSum = 0
for i in range(0,x):
growingSum += arr[i]
return growingSum/x
def GetYearlyGrowth(arr):
yearlyArr = [0]*(len(arr)-1)
for i in range(0,len(yearlyArr)):
z = 0
x = arr[i]
y = arr[i+1]
if (y):
z = ((x*100)/y)-100
yearlyArr[i] = z
return yearlyArr
dataNetIncome = []
dataTotalEquity = []
dataDividends = []
dataNetSales = []
dataCashGenerated = []
#DATA_ENTRY Net Income from Income Statement
#https://www.macrotrends.net/stocks/charts/nvda/nvidia/net-income
dataNetIncome.append(4141)
dataNetIncome.append(3047)
dataNetIncome.append(1666)
dataNetIncome.append(614)
dataNetIncome.append(631)
dataNetIncome.append(440)
dataNetIncome.append(563)
dataNetIncome.append(581)
dataNetIncome.append(253)
dataNetIncome.append(-68)
dataNetIncome.append(-30)
#DATA_ENTRY Equity from Balance Sheet
#https://www.macrotrends.net/stocks/charts/AAPL/apple/total-share-holder-equity
dataTotalEquity.append(9342)
dataTotalEquity.append(7471)
dataTotalEquity.append(5762)
dataTotalEquity.append(4469)
dataTotalEquity.append(4418)
dataTotalEquity.append(4456)
dataTotalEquity.append(4828)
dataTotalEquity.append(4146)
dataTotalEquity.append(3181)
dataTotalEquity.append(2665)
dataTotalEquity.append(2395)
#DATA_ENTRY Dividends from Cash Flow Statement
#https://www.macrotrends.net/stocks/charts/NVDA/nvidia/common-stock-dividends-paid
dataDividends.append(-371)
dataDividends.append(-341)
dataDividends.append(-261)
dataDividends.append(-213)
dataDividends.append(-186)
dataDividends.append(-181)
dataDividends.append(-47)
dataDividends.append(0)
dataDividends.append(0)
dataDividends.append(0)
dataDividends.append(0)
#DATA_ENTRY Sales from Income Statement
#https://www.macrotrends.net/stocks/charts/AAPL/apple/revenue
dataNetSales.append(11719)
dataNetSales.append(9714)
dataNetSales.append(6910)
dataNetSales.append(5010)
dataNetSales.append(4682)
dataNetSales.append(4130)
dataNetSales.append(4280)
dataNetSales.append(3998)
dataNetSales.append(3543)
dataNetSales.append(3326)
dataNetSales.append(3425)
#DATA_ENTRY Operating Cash from Cash Flow Statement
#https://www.macrotrends.net/stocks/charts/NVDA/nvidia/cash-flow-from-operating-activities
dataCashGenerated.append(3743)
dataCashGenerated.append(3502)
dataCashGenerated.append(1672)
dataCashGenerated.append(1175)
dataCashGenerated.append(906)
dataCashGenerated.append(835)
dataCashGenerated.append(824)
dataCashGenerated.append(909)
dataCashGenerated.append(676)
dataCashGenerated.append(488)
dataCashGenerated.append(249)
#DATA_ENTRY EPS
#https://www.macrotrends.net/stocks/charts/NVDA/nvidia/eps-earnings-per-share-diluted
eps = 6.63
#DATA_ENTRY Highest PE Ratio from last 10 years
#https://www.macrotrends.net/stocks/charts/NVDA/nvidia/pe-ratio
highestPeRatio = 61.36
#DATA_ENTRY
#https://finance.yahoo.com/quote/aapl/analysis/
#Growth Estimates - Next 5 Years (per annum)
analystWindageRate = 12.50
############################################################
#yearlyGrowthNetIncome = [0]*(len(dataNetIncome)-1)
#
##get growth of yearly net income
#for i in range(0,len(yearlyGrowthNetIncome)):
# x = dataNetIncome[i]
# y = dataNetIncome[i+1]
# z = ((x*100)/y)-100
# yearlyGrowthNetIncome[i] = z
yearlyGrowthNetIncome = GetYearlyGrowth(dataNetIncome)
growthRate3YearNetIncome = GetGrowthRate(3, yearlyGrowthNetIncome)
growthRate5YearNetIncome = GetGrowthRate(5, yearlyGrowthNetIncome)
growthRate10YearNetIncome = GetGrowthRate(10, yearlyGrowthNetIncome)
#print(growthRate3YearNetIncome)
#print(growthRate5YearNetIncome)
#print(growthRate10YearNetIncome)
############################################################
dataEquityPlusDividends = []
for i in range(0,len(dataTotalEquity)):
dataEquityPlusDividends.append(dataTotalEquity[i]+dataDividends[i])
#yearlyGrowthEquityPlusDividends = [0]*(len(dataEquityPlusDividends)-1)
#
#get growth of yearly Equity Plus Dividends
#for i in range(0,len(yearlyGrowthEquityPlusDividends)):
# x = dataEquityPlusDividends[i]
# y = dataEquityPlusDividends[i+1]
# z = ((x*100)/y)-100
# yearlyGrowthEquityPlusDividends[i] = z
yearlyGrowthEquityPlusDividends = GetYearlyGrowth(dataEquityPlusDividends)
growthRate3YearEquityPlusDividends = GetGrowthRate(3, yearlyGrowthEquityPlusDividends)
growthRate5YearEquityPlusDividends = GetGrowthRate(5, yearlyGrowthEquityPlusDividends)
growthRate10YearEquityPlusDividends = GetGrowthRate(10, yearlyGrowthEquityPlusDividends)
#print(growthRate3YearEquityPlusDividends)
#print(growthRate5YearEquityPlusDividends)
#print(growthRate10YearEquityPlusDividends)
############################################################
#yearlyGrowthNetSales = [0]*(len(dataNetSales)-1)
#
##get growth of yearly net sales
#for i in range(0,len(yearlyGrowthNetSales)):
# x = dataNetSales[i]
# y = dataNetSales[i+1]
# z = ((x*100)/y)-100
# yearlyGrowthNetSales[i] = z
yearlyGrowthNetSales = GetYearlyGrowth(dataNetSales)
growthRate3YearNetSales = GetGrowthRate(3, yearlyGrowthNetSales)
growthRate5YearNetSales = GetGrowthRate(5, yearlyGrowthNetSales)
growthRate10YearNetSales = GetGrowthRate(10, yearlyGrowthNetSales)
#print(growthRate3YearNetSales)
#print(growthRate5YearNetSales)
#print(growthRate10YearNetSales)
############################################################
#yearlyGrowthCashGenerated = [0]*(len(dataCashGenerated)-1)
#get growth of yearly cash generated
#for i in range(0,len(yearlyGrowthCashGenerated)):
# x = dataCashGenerated[i]
# y = dataCashGenerated[i+1]
# z = ((x*100)/y)-100
# yearlyGrowthCashGenerated[i] = z
yearlyGrowthCashGenerated = GetYearlyGrowth(dataCashGenerated)
growthRate3YearCashGenerated = GetGrowthRate(3, yearlyGrowthCashGenerated)
growthRate5YearCashGenerated = GetGrowthRate(5, yearlyGrowthCashGenerated)
growthRate10YearCashGenerated = GetGrowthRate(10, yearlyGrowthCashGenerated)
#print(growthRate3YearCashGenerated)
#print(growthRate5YearCashGenerated)
#print(growthRate10YearCashGenerated)
############################################################
windageNetIncome = (growthRate3YearNetIncome + growthRate5YearNetIncome + growthRate10YearNetIncome)/3
windageEquityPlusDividends = (growthRate3YearEquityPlusDividends + growthRate5YearEquityPlusDividends + growthRate10YearEquityPlusDividends)/3
windageNetSales = (growthRate3YearNetSales + growthRate5YearNetSales + growthRate10YearNetSales)/3
windageCashGenerated = (growthRate3YearCashGenerated + growthRate5YearCashGenerated + growthRate10YearCashGenerated)/3
windageRate = (windageNetIncome + windageEquityPlusDividends + windageNetSales + windageCashGenerated)/4
print("Overall Windage Rate: ", windageRate)
print("\n")
windageGrowthRate = min(windageRate, analystWindageRate)
########################################################################################################################
futureEps = eps
windagePercent = windageGrowthRate/100
print("Windage PE Ratio: Min of 2xWindage Rate:", 2*windageGrowthRate, ", Highest PE Ratio: ", highestPeRatio)
windagePeRatio = min(2*windageGrowthRate, highestPeRatio)
print("\n")
########################################################################################################################
#Whole foods
#futureEps = 1.48
#windagePercent = 0.14
#windagePeRatio = 28
print("windagePercent: ", windagePercent)
#Step One
for i in range(0,10):
futureEps *= (1+windagePercent)
print("Year", i, "Price: ", futureEps)
print("\n")
print("Step One: Future EPS: ", futureEps)
#Step Two
future10YearSharePrice = futureEps * windagePeRatio
print("Step Two: Future 10 Year Share Price: ", future10YearSharePrice)
#Step Three
stickerPrice = future10YearSharePrice / 4
print("Step Three: Sticker Price: ", stickerPrice)
#Step Four
marginOfSafetyPrice = stickerPrice/2
print("Step Four: Margin Of Safety Price: ", marginOfSafetyPrice) | true |
f0a11cd83b4f39925163cbc7dfdff2427b7eee27 | Python | JasoSalgado/algorithms | /sorting-by-insertion/exercises/exercise-01.py | UTF-8 | 339 | 4.375 | 4 | [] | no_license | """
Sorting by insertion
"""
list = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
print(f"List: {list}")
for i in range(1, len(list)):
aux = list[i] # i = 9
j = i - 1 # j = 10
while j >= 0 and aux < list[j]:
list[j + 1] = list[j]
list[j] = aux
j -= 1
print(f"Going through the list: {list}")
print(list)
| true |
76c11768345d5142537651abaa43ba41dfe3c9c7 | Python | usher-unlam/usher-api | /sync-py/request.py | UTF-8 | 3,770 | 3.296875 | 3 | [] | no_license | # importing the requests library
import requests
from enum import IntEnum
class Method(IntEnum):
GET = 1
POST = 2
class ResponseType(IntEnum):
Text = 1
JSON = 2
# class Synchro(IntEnum):
# Synchronous = 1
# Asynchronous = 2
class HTTPRequest():
def __init__(self, url=None, params={}, method=Method.GET, apikey=None, respType=ResponseType.Text):
#self.setup(url,params,method,apikey)
self.url = url
self.params = params
self.method = method
self.apikey = apikey
self.respType = respType
def setup(self, url=None, params={}, method=None, apikey=None, respType=None):
if not url is None:
self.url = url
# definición de parametros de API: dict for the parameters to be sent to the API: params = {'address':location}
if not params == {}:
self.params = params
if not method is None:
self.method = method
if not apikey is None:
self.apikey = apikey
if not respType is None:
self.respType = respType
def get(self, url=None, dataDict=None, respType=None):
url = url if not url is None and url != "" else self.url
dataDict = dataDict if not dataDict is None and dataDict != {} else self.params
apikey = None if self.apikey == "" else self.apikey
respType = respType if not respType is None and respType != "" else self.respType
return HTTPRequest.sendRequest(url= url, dataDict= dataDict, method= Method.GET, apikey= apikey, respType= respType)
def post(self, url=None, dataDict=None, apikey=None, respType=None):
url = url if not url is None and url != "" else self.url
dataDict = dataDict if not dataDict is None and dataDict != {} else self.params
apikey = apikey if not apikey is None and apikey != "" else self.apikey
respType = respType if not respType is None and respType != "" else self.respType
return HTTPRequest.sendRequest(url= url, dataDict= dataDict, method= Method.POST, apikey= apikey, respType= respType)
def request(self):
if self.method == Method.GET:
return self.get()
if self.method == Method.POST:
return self.post()
@staticmethod
def sendRequest(url, apikey=None, dataDict={}, method=Method.GET, respType=ResponseType.Text):
# define dato token para api key
if not apikey is None:
dataDict['token'] = apikey
r = None
if method == Method.GET:
# sending get request and saving the response as response object
r = requests.get(url=url,
params=dataDict)
elif method == Method.POST:
# sending post request and saving response as response object
r = requests.post(url=url,
data=dataDict)
else:
r = None #TODO: excepcion "Metodo no soportado"
# respuesta de API
if r is None:
data = None
else:
if respType == ResponseType.Text:
data = r.text # extracting data as text
elif respType == ResponseType.JSON:
# si el formato no es compatible con JSON, devuelve el Texto
try:
data = r.json() # extracting data in json format
except BaseException as wrongFormat:
data = r.text # extracting data as text
else:
data = None #TODO: excepcion "tipo de respuesta no soportada"
# extracting val1
#val1 = data['results'][0]['val1']
# printing the output
print("Data:",data)
return data | true |
8cabff2e8b4edfe88dc23a09e811f602b65fbe2c | Python | namvu47/PYA0818E | /7 - OOP/Polymorphism/ex2 - quanlysach/sachgiaokhoa.py | UTF-8 | 698 | 2.875 | 3 | [] | no_license | from Polymorphism.ex2.sach import *
class sachgiaokhoa(sach):
def __init__(self, masach, ngaynhap, dongia, soluong, nhaxuatban, tinhtrang):
super().__init__(masach, ngaynhap,dongia,soluong,nhaxuatban)
self.tinh_trang = tinhtrang
def thanhtien(self):
tiensachgiaokhoa = 0
if self.tinh_trang.replace(' ','') == 'moi':
tiensachgiaokhoa += int(self.so_luong) * int(self.don_gia)
else:
tiensachgiaokhoa += int(self.so_luong) * int(self.don_gia) * 0.5
return f'{tiensachgiaokhoa:0.2f}'
def __str__(self):
return sach.__str__(self) + (f' \n\
6. Tinh trang: {self.tinh_trang} ')
| true |
f12726630c8116ead5a7966b04d961c5849a8be1 | Python | williancae/pythonGuanabara | /mundo03/Exercicios/ex102.py | UTF-8 | 365 | 3.484375 | 3 | [] | no_license | from datetime import date
ano = date.today()
def votacao(idade):
if idade < 18 and idade >= 17:
return 'Opcional'
elif 18 <= idade <= 60:
return 'Obrigatorio'
else:
return 'opcional'
anoNascimento = int(input('Quando nasceu: '))
idade = ano.year - anoNascimento
info = votacao(idade)
print(f'Tem {idade} e é {info} a votação') | true |
3ff02442c821ec05af0705906a986d881aa09f36 | Python | odalipa/alpine-python-getvendor | /getvendor.py | UTF-8 | 1,421 | 3.359375 | 3 | [] | no_license | import requests
import sys
# API URL
url = "https://api.macaddress.io/v1"
# API Key
apiKey = "at_nNvyTDHLOB91TF61rW7wLcDvLhq2z"
# Desired output format
output = "json"
# if command line argument is not provoded
if len(sys.argv) == 1:
# ask user for MAC
macaddr = input("Enter MAC address: ")
else:
# else take first argument as MAC
macaddr = sys.argv[1]
# Form the complete API Query
# Example:
# https://api.macaddress.io/v1?apiKey=at_nNvyTDHLOB91TF61rW7wLcDvLhq2z&output=json&search=c8:d9:d2:a9:f5:7f
query = url + "?" + "apiKey=" + apiKey + "&" + "output=" + output + "&" + "search=" + macaddr
# Try to make a API request
try:
# Place request
result = requests.get(query)
# Collect data - data is returned as a python dictionary
data = result.json()
# If data has error in its key then print error
if 'error' in data:
print(data['error'])
# Else print relevant keys from data
else:
print("API Query:", query)
print("MAC Address:", macaddr)
print("OUI:", data['vendorDetails']['oui'])
print("Comapny Name:", data['vendorDetails']['companyName'])
print("Company Address:", data['vendorDetails']['companyAddress'])
print("Company Country Code:", data['vendorDetails']['countryCode'])
# If try fails print error message to User
except:
print('Requested API is not reachable: check your network connection and api query')
| true |
502d5863b4f41c621a3e448756e37515342284a9 | Python | marcenavuc/python_listings | /11 serialization/pckl.py | UTF-8 | 293 | 3 | 3 | [] | no_license | import pickle
import pickletools
class Foo:
attr = "Some attribute"
def __init__(self, name):
self.name = name
picklestring = pickle.dumps(Foo("boo"))
print(picklestring)
foo = pickle.loads(picklestring)
print(foo.name)
print(foo.attr)
print(pickletools.dis(picklestring)) | true |
59fcbbd0e0819aab408ac23187c5877eaa29f3e6 | Python | hari-ushankar/pyiron_contrib | /tests_integration/continuum/fenics/tutorials/page_8.py | UTF-8 | 1,831 | 3.09375 | 3 | [
"BSD-3-Clause"
] | permissive | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from fenics import *
def poisson_nonlinear():
# Warning: from fenics import * will import both `sym` and
# `q` from FEniCS. We therefore import FEniCS first and then
# overwrite these objects.
def q(u):
"Return nonlinear coefficient"
return 1 + u ** 2
# Use SymPy to compute f from the manufactured solution u
import sympy as sym
x, y = sym.symbols('x[0], x[1]')
u = 1 + x + 2 * y
f = - sym.diff(q(u) * sym.diff(u, x), x) - sym.diff(q(u) * sym.diff(u, y), y)
f = sym.simplify(f)
u_code = sym.printing.ccode(u)
f_code = sym.printing.ccode(f)
# print('u =', u_code)
# print('f =', f_code)
# Create mesh and define function space
mesh = UnitSquareMesh(8, 8)
V = FunctionSpace(mesh, 'P', 1)
# Define boundary condition
u_D = Expression(u_code, degree=2)
def boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(V, u_D, boundary)
# Define variational problem
u = Function(V) # Note: not TrialFunction!
v = TestFunction(V)
f = Expression(f_code, degree=2)
F = q(u) * dot(grad(u), grad(v)) * dx - f * v * dx
# Compute solution
solve(F == 0, u, bc)
# # Plot solution
# plot(u)
# # Compute maximum error at vertices. This computation illustrates
# # an alternative to using compute_vertex_values as in poisson.py.
# u_e = interpolate(u_D, V)
# import numpy as np
# error_max = np.abs(u_e.vector().array() - u.vector().array()).max()
# print('error_max = ', error_max)
# # Hold plot
# interactive()
return u.compute_vertex_values(mesh)
| true |
99d3ee161f1aef76e1219a9f74785faa5ad904bd | Python | sytsao/Python-ComputationalThinking-book | /ch6/e6-3-2.py | UTF-8 | 728 | 2.875 | 3 | [] | no_license | #e6-3-2上網路抓取股價資料
import pandas as pd
from pandas_datareader import data as web
import fix_yahoo_finance
import datetime
import time
starttime = time.clock()
#下載資料起始日與股票代號
start="2000-01-01"
end="2017-04-30"
writer=pd.ExcelWriter('stocprice.xlsx')
#以迴圈依公司代號順序抓取資料,並個別依公司代號命名工作表
stockid=('2303', '2330', '3008', '2498', '2311', '2409', '2357', '2317')
for i in range(0,len(stockid)):
sid=stockid[i]+'.tw'
df = web.get_data_yahoo(sid, start,end)
df.to_excel(writer,stockid[i])
#日股價資料寫入excel檔
writer.save()
endtime = time.clock()
print('程式執行時間 = %d %s' %(round(endtime - starttime), '秒'))
| true |
6daa661c39cffa842df725dc561a6adcb8c205a0 | Python | szaydel/pike | /src/pike/exceptions.py | UTF-8 | 1,908 | 2.8125 | 3 | [
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """
Exceptions used by multiple modules are defined here
"""
from . import core
class TimeoutError(Exception):
"""Future completion timed out"""
future = None
@classmethod
def with_future(cls, future, *args):
"""
Instantiate TimeoutError from a given future.
:param future: Future that timed out
:param args: passed to Exception.__init__
:return: TimeoutError
"""
ex = cls(*args)
ex.future = future
return ex
def __str__(self):
s = super(TimeoutError, self).__str__()
if self.future is not None:
if self.future.request is not None:
requests = [str(self.future.request)]
if not isinstance(self.future.request, (core.Frame, str, bytes)):
# attempt to recursively str format other iterables
try:
requests = [str(r) for r in self.future.request]
except TypeError:
pass
s += "\nRequest: {}".format("\n".join(requests))
if self.future.interim_response is not None:
s += "\nInterim: {}".format(self.future.interim_response)
return s
class StateError(Exception):
pass
class CreditError(Exception):
pass
class RequestError(Exception):
def __init__(self, request, message=None):
if message is None:
message = "Could not send {0}".format(repr(request))
Exception.__init__(self, message)
self.request = request
class CallbackError(Exception):
"""
the callback was not suitable
"""
class ResponseError(Exception):
"""
Raised when an Smb2 response contains an unexpected NTSTATUS.
"""
def __init__(self, response):
Exception.__init__(self, response.command, response.status)
self.response = response
| true |
d58d5e7d089f863c648e9afb33a3aefce0c504eb | Python | KagontleBooysen/AirBnB_clone_v3 | /api/v1/views/places.py | UTF-8 | 2,622 | 2.78125 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/python3
""" State view module"""
from flask import jsonify, abort, request
from api.v1.views import app_views
from models import storage
from models.place import Place
from models.city import City
from models.user import User
@app_views.route('/cities/<city_id>/places', strict_slashes=False)
def places_by_city(city_id):
"""Return all states"""
city = storage.get(City, city_id)
if city is None:
abort(404)
places = storage.all(Place).values()
return jsonify([place.to_dict()
for place in places if place.city_id == city_id]), 200
@app_views.route('/places/<place_id>', strict_slashes=False)
def get_place(place_id):
"""Return a state"""
place = storage.get(Place, place_id)
if place is None:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', methods=['DELETE'],
strict_slashes=False)
def delete_place(place_id):
"""Delete state"""
place = storage.get(Place, place_id)
if place is None:
abort(404)
storage.delete(place)
return jsonify({}), 200
@app_views.route('/cities/<city_id>/places', methods=['POST'],
strict_slashes=False)
def add_place(city_id):
"""Add place to a city
body of the request must be a JSON object containing name and user_id
Args:
city_id (id): city id to add the place to
Returns:
place: place added
"""
city = storage.get(City, city_id)
if city is None:
abort(404)
if not request.is_json:
abort(400, description="Not a JSON")
data = request.get_json()
if 'name' not in data:
abort(400, description="Missing name")
if 'user_id' not in data:
abort(400, description="Missing user_id")
# check if the user id provided in the request is linked to a User
user_id = data['user_id']
user = storage.get(User, user_id)
if user is None:
abort(404)
place = Place(**data)
place.city_id = city_id
place.save()
return jsonify(place.to_dict()), 201
@app_views.route('/places/<place_id>', methods=['PUT'], strict_slashes=False)
def update_place(place_id):
"""Update place inforamtion"""
place = storage.get(Place, place_id)
if place is None:
abort(404)
if not request.is_json:
abort(400, description="Not a JSON")
data = request.get_json()
for key, value in data.items():
if key not in ['id', 'user_id', 'city_id', 'created_at', 'updated_at']:
setattr(place, key, value)
place.save()
return jsonify(place.to_dict()), 200
| true |
657c620933e05d24e9ace46b27758c02e7fdc9c9 | Python | Anuradhapatel7/contact-management | /access_code.py | UTF-8 | 9,576 | 3.015625 | 3 | [] | no_license | #import libraries
from tkinter import *
import tkinter.ttk as ttk
import tkinter.messagebox as tkMessageBox
import sqlite3
#function to define database
def Database():
global conn, cursor
#creating contact database
conn = sqlite3.connect("contact.db")
cursor = conn.cursor()
#creating REGISTRATION table
cursor.execute(
"CREATE TABLE IF NOT EXISTS REGISTRATION (RID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, FNAME TEXT, LNAME TEXT, GENDER TEXT, ADDRESS TEXT, CONTACT TEXT)")
#defining function for creating GUI Layout
def DisplayForm():
#creating window
display_screen = Tk()
#setting width and height for window
display_screen.geometry("900x400")
#setting title for window
display_screen.title("krazyprogrammer.com presents")
global tree
global SEARCH
global fname,lname,gender,address,contact
SEARCH = StringVar()
fname = StringVar()
lname = StringVar()
gender = StringVar()
address = StringVar()
contact = StringVar()
#creating frames for layout
#topview frame for heading
TopViewForm = Frame(display_screen, width=600, bd=1, relief=SOLID)
TopViewForm.pack(side=TOP, fill=X)
#first left frame for registration from
LFrom = Frame(display_screen, width="350",bg="#15244C")
LFrom.pack(side=LEFT, fill=Y)
#seconf left frame for search form
LeftViewForm = Frame(display_screen, width=500,bg="#0B4670")
LeftViewForm.pack(side=LEFT, fill=Y)
#mid frame for displaying lnames record
MidViewForm = Frame(display_screen, width=600)
MidViewForm.pack(side=RIGHT)
#label for heading
lbl_text = Label(TopViewForm, text="Contact Management System", font=('verdana', 18), width=600,bg="cyan")
lbl_text.pack(fill=X)
#creating registration form in first left frame
Label(LFrom, text="First Name ", font=("Arial", 12),bg="#15244C",fg="white").pack(side=TOP)
Entry(LFrom,font=("Arial",10,"bold"),textvariable=fname).pack(side=TOP, padx=10, fill=X)
Label(LFrom, text="Last Name ", font=("Arial", 12),bg="#15244C",fg="white").pack(side=TOP)
Entry(LFrom, font=("Arial", 10, "bold"),textvariable=lname).pack(side=TOP, padx=10, fill=X)
Label(LFrom, text="Gender ", font=("Arial", 12),bg="#15244C",fg="white").pack(side=TOP)
#Entry(LFrom, font=("Arial", 10, "bold"),textvariable=gender).pack(side=TOP, padx=10, fill=X)
gender.set("Select Gender")
content={'Male','Female'}
OptionMenu(LFrom,gender,*content).pack(side=TOP, padx=10, fill=X)
Label(LFrom, text="Address ", font=("Arial", 12),bg="#15244C",fg="white").pack(side=TOP)
Entry(LFrom, font=("Arial", 10, "bold"),textvariable=address).pack(side=TOP, padx=10, fill=X)
Label(LFrom, text="Contact ", font=("Arial", 12),bg="#15244C",fg="white").pack(side=TOP)
Entry(LFrom, font=("Arial", 10, "bold"),textvariable=contact).pack(side=TOP, padx=10, fill=X)
Button(LFrom,text="Submit",font=("Arial", 10, "bold"),command=register,bg="#15244C",fg="white").pack(side=TOP, padx=10,pady=5, fill=X)
#creating search label and entry in second frame
lbl_txtsearch = Label(LeftViewForm, text="Enter fname to Search", font=('verdana', 10),bg="#0B4670")
lbl_txtsearch.pack()
#creating search entry
search = Entry(LeftViewForm, textvariable=SEARCH, font=('verdana', 15), width=10)
search.pack(side=TOP, padx=10, fill=X)
#creating search button
btn_search = Button(LeftViewForm, text="Search", command=SearchRecord,bg="cyan")
btn_search.pack(side=TOP, padx=10, pady=10, fill=X)
#creating view button
btn_view = Button(LeftViewForm, text="View All", command=DisplayData,bg="cyan")
btn_view.pack(side=TOP, padx=10, pady=10, fill=X)
#creating reset button
btn_reset = Button(LeftViewForm, text="Reset", command=Reset,bg="cyan")
btn_reset.pack(side=TOP, padx=10, pady=10, fill=X)
#creating delete button
btn_delete = Button(LeftViewForm, text="Delete", command=Delete,bg="cyan")
btn_delete.pack(side=TOP, padx=10, pady=10, fill=X)
#create update button
btn_delete = Button(LeftViewForm, text="Update", command=Update,bg="cyan")
btn_delete.pack(side=TOP, padx=10, pady=10, fill=X)
#setting scrollbar
scrollbarx = Scrollbar(MidViewForm, orient=HORIZONTAL)
scrollbary = Scrollbar(MidViewForm, orient=VERTICAL)
tree = ttk.Treeview(MidViewForm,columns=("Student Id", "Name", "Contact", "Email","Rollno","Branch"),
selectmode="extended", height=100, yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
#setting headings for the columns
tree.heading('Student Id', text="Id", anchor=W)
tree.heading('Name', text="FirstName", anchor=W)
tree.heading('Contact', text="LastName", anchor=W)
tree.heading('Email', text="Gender", anchor=W)
tree.heading('Rollno', text="Address", anchor=W)
tree.heading('Branch', text="Contact", anchor=W)
#setting width of the columns
tree.column('#0', stretch=NO, minwidth=0, width=0)
tree.column('#1', stretch=NO, minwidth=0, width=100)
tree.column('#2', stretch=NO, minwidth=0, width=150)
tree.column('#3', stretch=NO, minwidth=0, width=80)
tree.column('#4', stretch=NO, minwidth=0, width=120)
tree.pack()
DisplayData()
#function to update data into database
def Update():
Database()
#getting form data
fname1=fname.get()
lname1=lname.get()
gender1=gender.get()
address1=address.get()
contact1=contact.get()
#applying empty validation
if fname1=='' or lname1==''or gender1=='' or address1==''or contact1=='':
tkMessageBox.showinfo("Warning","fill the empty field!!!")
else:
#getting selected data
curItem = tree.focus()
contents = (tree.item(curItem))
selecteditem = contents['values']
#update query
conn.execute('UPDATE REGISTRATION SET FNAME=?,LNAME=?,GENDER=?,ADDRESS=?,CONTACT=? WHERE RID = ?',(fname1,lname1,gender1,address1,contact1, selecteditem[0]))
conn.commit()
tkMessageBox.showinfo("Message","Updated successfully")
#reset form
Reset()
#refresh table data
DisplayData()
conn.close()
def register():
Database()
#getting form data
fname1=fname.get()
lname1=lname.get()
gender1=gender.get()
address1=address.get()
contact1=contact.get()
#applying empty validation
if fname1=='' or lname1==''or gender1=='' or address1==''or contact1=='':
tkMessageBox.showinfo("Warning","fill the empty field!!!")
else:
#execute query
conn.execute('INSERT INTO REGISTRATION (FNAME,LNAME,GENDER,ADDRESS,CONTACT) \
VALUES (?,?,?,?,?)',(fname1,lname1,gender1,address1,contact1));
conn.commit()
tkMessageBox.showinfo("Message","Stored successfully")
#refresh table data
DisplayData()
conn.close()
def Reset():
#clear current data from table
tree.delete(*tree.get_children())
#refresh table data
DisplayData()
#clear search text
SEARCH.set("")
fname.set("")
lname.set("")
gender.set("")
address.set("")
contact.set("")
def Delete():
#open database
Database()
if not tree.selection():
tkMessageBox.showwarning("Warning","Select data to delete")
else:
result = tkMessageBox.askquestion('Confirm', 'Are you sure you want to delete this record?',
icon="warning")
if result == 'yes':
curItem = tree.focus()
contents = (tree.item(curItem))
selecteditem = contents['values']
tree.delete(curItem)
cursor=conn.execute("DELETE FROM REGISTRATION WHERE RID = %d" % selecteditem[0])
conn.commit()
cursor.close()
conn.close()
#function to search data
def SearchRecord():
#open database
Database()
#checking search text is empty or not
if SEARCH.get() != "":
#clearing current display data
tree.delete(*tree.get_children())
#select query with where clause
cursor=conn.execute("SELECT * FROM REGISTRATION WHERE FNAME LIKE ?", ('%' + str(SEARCH.get()) + '%',))
#fetch all matching records
fetch = cursor.fetchall()
#loop for displaying all records into GUI
for data in fetch:
tree.insert('', 'end', values=(data))
cursor.close()
conn.close()
#defining function to access data from SQLite database
def DisplayData():
#open database
Database()
#clear current data
tree.delete(*tree.get_children())
#select query
cursor=conn.execute("SELECT * FROM REGISTRATION")
#fetch all data from database
fetch = cursor.fetchall()
#loop for displaying all data in GUI
for data in fetch:
tree.insert('', 'end', values=(data))
tree.bind("<Double-1>",OnDoubleClick)
cursor.close()
conn.close()
def OnDoubleClick(self):
#getting focused item from treeview
curItem = tree.focus()
contents = (tree.item(curItem))
selecteditem = contents['values']
#set values in the fields
fname.set(selecteditem[1])
lname.set(selecteditem[2])
gender.set(selecteditem[3])
address.set(selecteditem[4])
contact.set(selecteditem[5])
#calling function
DisplayForm()
if __name__=='__main__':
#Running Application
mainloop()
| true |
c5527654ae3bdd0c6c94272506b3258382dbc990 | Python | fenglihanxiao/Python | /Module01/ModulePackage/UseModule_196.py | UTF-8 | 394 | 2.59375 | 3 | [
"MIT"
] | permissive | """
1. 196_XXX -> Package
"""
#################################################
# 1. Add Res as a package name and import from it
# import ModulePackage.Res.Moduel_196
# print(ModulePackage.Res.Moduel_196.age)
#################################################
# 1. Add Res as a package name and import from it
from Module01.ModulePackage.Res.Moduel_196 import modules
print(modules)
| true |
bbf243eee51f43440dbc9708b2fce5fbbff0105d | Python | thebillington/LMCompile | /nodes.py | UTF-8 | 1,025 | 3.265625 | 3 | [] | no_license | from tokens import *
class Statement:
def __init__(self, child):
self.child = child
def __repr__(self):
return f"Statement({self.child})"
class PrintI:
def __init__(self, child):
self.child = child
def __repr__(self):
return f"PrintI({self.child})"
class PrintC:
def __init__(self, child):
self.child = child
def __repr__(self):
return f"PrintC({self.child})"
class IntCon:
def __init__(self, child):
self.child = child
def __repr__(self):
return f"IntCon({self.child})"
class CharCon:
def __init__(self, child):
self.child = child
def __repr__(self):
return f"CharCon({self.child})"
class StrCon:
def __init__(self, child):
self.child = child
def __repr__(self):
return f"StrCon({self.child})"
def eval(self):
output = ""
for c in self.child:
output += "DAT {}\n".format(ord(c))
output += "DAT 3\n"
return output
| true |
781f5e8bd8842d73bfc2e3f1a672721a033f2dc4 | Python | WenzheLiu0829/Extreme_computing | /assignment2/task6/reservoir.py | UTF-8 | 378 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python
import sys
import random
import linecache
linecache.clearcache()
filename = "/afs/inf.ed.ac.uk/group/teaching/exc/ex2/part3/webLarge.txt"
myfile = open(filename)
lines = len(myfile.readlines())
for i in range(100):
n = random.randint(1,lines)
output_line = linecache.getline(filename,n)
output_line = output_line.strip()
print(output_line)
| true |
e5998804d0a9ae2a3e97811c70ee93f4111fdaaa | Python | k-zen/SigmaProject | /sigmaproject/data/data.py | UTF-8 | 2,441 | 2.640625 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
Copyright (c) 2019, Andreas Koenzen <akoenzen | uvic.ca>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
from sklearn import *
class Data(object):
"""
Utility class to generate data sets.
"""
DEBUG = False
"""
boolean: Flag to enable debug mode.
"""
def __init__(self):
self.data = None
def generate_data(self, type: int = 0) -> None:
"""
Generate data to train the network.
:param type: The type of data to generate.
:returns: Void
"""
if type == 0:
self.data = datasets.make_moons(
n_samples=200,
shuffle=True,
noise=0.2,
random_state=42
)
elif type == 1:
self.data = datasets.make_circles(
n_samples=200,
shuffle=True,
noise=0.2,
factor=0.5,
random_state=1
)
elif type == 2:
rng = np.random.RandomState(0)
x = rng.randn(200, 2)
y = np.logical_xor(x[:, 0] > 0, x[:, 1] > 0).astype(int)
self.data = (x, y)
return None
| true |
1461ef868c5962a6c0e4d752516142074300082c | Python | wangcaitao/pat | /pat-python/basic-level/1073.py | UTF-8 | 1,459 | 2.765625 | 3 | [] | no_license | nums = input().split()
successResults = []
for i in range(int(nums[1])):
successResults.append(input().split())
errors = [[0 for i in range(5)] for i in range(int(nums[1]))]
maxCount = 0
for i in range(int(nums[0])):
totalScore = 0.0
studentAllResults = input()[1:-1].split(") (")
for j, studentResult in enumerate(studentAllResults):
studentResults = studentResult.split()
count = 0
flag = True
for k in range(int(successResults[j][1])):
result = chr(k + 97)
if result in successResults[j][3:] and result in studentResults[1:]:
count += 1
elif result in successResults[j][3:] and result not in studentResults[1:]:
errors[j][k] += 1
elif result not in successResults[j][3:] and result in studentResults[1:]:
errors[j][k] += 1
flag = False
if errors[j][k] > maxCount:
maxCount = errors[j][k]
if flag:
if count == int(successResults[j][2]):
totalScore += float(successResults[j][0])
else:
totalScore += float(successResults[j][0]) / 2
print("{:.1f}".format(totalScore))
if maxCount:
for i, error in enumerate(errors):
for j, count in enumerate(error):
if count == maxCount:
print("{} {}-{}".format(maxCount, i + 1, chr(j + 97)))
else:
print("Too simple")
| true |
ebbd0db321f2effcadeccebfa89f3e8ebcb9f0a5 | Python | danishsaeed2/finance-ml | /RNN.py | UTF-8 | 3,188 | 2.78125 | 3 | [] | no_license | import tensorflow as tf
import pandas as pd
import numpy as np
import os
import matplotlib; matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from pandas import DataFrame
from pandas import concat
from pandas import Series
from math import sqrt
def make_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
def invert_diff(history, yhat, interval=1):
return yhat + history[-interval]
def scale(train, test):
scale_f = MinMaxScaler(feature_range=(-1,1))
scale_f = scale_f.fit(train)
train = train.reshape(train.shape[0], train.shape[1])
train_s = scale_f.transform(train)
test = test.reshape(test.shape[0], test.shape[1])
test_s = scale_f.transform(test)
return scale_f, train_s, test_s
def invert_scale(scale_f, X, value):
new_row = [x for x in X] + [value]
array = np.array(new_row)
array = array.reshape(1, len(array))
inverted = scale_f.inverse_transform(array)
return inverted[0, -1]
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:,-1]
X = X.reshape(X.shape[0],1,X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
def lstm_forecast(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
df = pd.read_csv('data_files/WIKI-AAPL.csv',usecols=[11],engine='python', skipfooter=3)
df = df['Adj. Close']
df = df.astype('float32')
X = df.values
diff_vals = difference(X,2)
supervised = make_supervised(diff_vals, 1)
supervised_vals = supervised.values
train_len = int(len(supervised_vals)*0.80)
test_len = len(supervised_vals) - train_len
train, test = supervised_vals[0:train_len,:],supervised_vals[train_len:len(supervised_vals),:]
scaler, trains_s, test_s = scale(train,test)
lstm_model = fit_lstm(trains_s, 1, 1, 4)
train_r = trains_s[:, 0].reshape(len(trains_s),1, 1)
lstm_model.predict(train_r, batch_size=1)
predictionlist = list()
for i in range(len(test_s)):
Z, y = test_s[i, 0:-1], test_s[i, -1]
yhat = lstm_forecast(lstm_model,1,Z)
yhat = invert_scale(scaler, Z, yhat)
yhat = invert_diff(X, yhat, len(test_s)+1-i)
predictionlist.append(yhat)
expected = X[len(train)+i+1]
print('Trading day=%d, Predicted=%f, Expected=%f'%(i+1, yhat, expected))
rmse = sqrt(mean_squared_error(X[-test_len:], predictionlist))
print('Test RMSE: %.3f' % rmse)
plt.plot(X[-test_len:])
plt.ion()
plt.pause(10)
plt.plot(predictionlist)
plt.pause(10)
| true |
5aac90ac386cd5d3fd4315a09b6619e5441c6135 | Python | ENSIIE-2022/IA | /Python/2.py | UTF-8 | 215 | 3.171875 | 3 | [] | no_license | code = {'e':'a', 'l':'m', 'o':'e'}
s = 'Hello world!'
# s_code = 'Hamme wermd!'
s = list(s)
counter = 0
for letter in s:
if letter in code:
s[counter] = code[letter]
counter += 1
print(''.join(s)) | true |
027e3fb46cf4ddaa9389c18d31dd77150dce7a0d | Python | iiradia/LeetCode-Problems | /Python/Pramp/PairsWithSpecificDiff.py | UTF-8 | 536 | 3.40625 | 3 | [] | no_license | def find_pairs_with_given_difference(arr, k):
"""
k = 1
[0, -1, -2, 2, 1]
0
arr = [[1,0], [0, -1], [-1, -2], [2, 1]]
{
-1: 0,
-2: -1,
1: 2,
0: 1
}
"""
# loop to create hashmap
array_pairs = []
matching_pairs = {}
for num in arr:
pair = num - k
matching_pairs[pair] = num
# loop to create pairs
for num in arr:
if matching_pairs.get(num) is not None:
array_pairs.append([matching_pairs[num], num])
return array_pairs | true |
70d2f9534f07d7f4118c6f5dc762b85f7d64fa03 | Python | deemo1228/flask-mb | /seeds/production.py | UTF-8 | 999 | 3.453125 | 3 | [] | no_license | from flask_seeder import Seeder
from main import City #因為要加入的資料表為City所以要把它從idnex引進來
class DemoSeeder(Seeder):
def run(self):
insert_row = [ #要加入的資料
{
'city_name': 'Taipei'
},
{
'city_name': 'New Taipei'
},
{
'city_name': 'Taoyuan'
},
{
'city_name': 'Taichung'
},
{
'city_name': 'Tainan'
},
{
'city_name': 'Kaohsiung'
}
]
for row in insert_row:
print(row['city_name']) #印出你加入了哪一些東西
insert_city = City(name=row['city_name']) #在City這張表下加入row這個陣列當中naem=row['city_name']
self.db.session.add(insert_city) #把inser_city存入暫存,seed會自動幫你commit | true |
46c39d532a1367aed6fdfb428a24b21361d7a095 | Python | wescleytorres/Python-Introducao | /Introducao/Fun01.100.py | UTF-8 | 463 | 3.890625 | 4 | [
"MIT"
] | permissive | from random import randint
numeros = [randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10)]
def sorteia(lista):
print('Sorteando 5 valores da lista: ', end='')
for v in lista:
print(v, end=' ')
print('PRONTO!')
def somarpar(lista):
cont = 0
for v in lista:
if v % 2 == 0:
cont += v
print(f'Somando os valores pares de {numeros}, temos {cont}')
sorteia(numeros)
somarpar(numeros)
| true |