blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
74b3d7c3cf7d317dedd25ab03e4fb37ceb1d5406 | Python | shahariaazam/HelloWorld-Python | /cli_arguments.py | UTF-8 | 177 | 2.9375 | 3 | [] | no_license | import sys
total = len(sys.argv)
cmdargs = str(sys.argv)
print ("The total numbers of args passed to the script: %d %s" % (total, cmdargs))
print ("Args list: %s " % cmdargs) | true |
6e349f849900a4fe97988d1cc0379ed2ad223827 | Python | LCAV/localization-icassp2018 | /plots.py | UTF-8 | 4,660 | 2.5625 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © 2018 Frederike Duembgen <frederike.duembgen@gmail.com>
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'DejaVu Sans', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
"""
plots.py: Plots for ICASSP paper on localization
"""
cmap = plt.get_cmap('Greys')
def create_plot():
size = (4.5, 4.5)
pos = [0.1, 0.15, 0.8, 0.8] # left, bottom, width, height
fig = plt.figure(figsize=size)
ax = fig.add_subplot(111)
plt.grid('on')
plt.ylabel('RMSE')
ax.set_position(pos)
return fig, ax
def plot_against_distance(dict_methods, chosen_eps, epsilons, sigmas, saveas, title, legend=False):
chosen_sig = np.arange(len(sigmas))
colors = [cmap((j+1)/len(chosen_eps)) for j in range(len(chosen_eps))]
fig, ax = create_plot()
fig.set_size_inches(5, 4.8)
for i, eps in enumerate(chosen_eps):
for m in dict_methods.keys():
label = m if i == 0 else None
rmses = dict_methods[m]['rmses']
ls = dict_methods[m]['linestyle']
ms = dict_methods[m]['marker']
plt.plot(sigmas[chosen_sig], rmses[chosen_sig, eps], color=colors[i],
label=label, linestyle=ls, marker=ms, fillstyle='none')
#plt.plot(sigmas[chosen_sig],rmses[chosen_sig,eps], color=colors[i],
#label='${}={:1.2f}$'.format(noise_label, epsilons[eps]), linestyle=ls, marker=ms,
#fillstyle='none')
angle = epsilons[eps]
plt.title(title.format(angle, 180*angle/np.pi))
plt.xlabel('$\sigma_d$[-]')
#ax.xaxis.set_label_coords(0.94, -0.025)
plt.tight_layout()
if (legend):
plt.legend(loc='upper left')
plt.ylim([0, 0.8])
fig.savefig(saveas) # ,bbox_extra_artists=(lgd,),bbx_inches='tight')
def plot_against_angles(dict_methods, chosen_sig, sigmas, epsilons, saveas, title, legend=False, gaussian=False):
chosen_eps = range(len(epsilons))
colors = [cmap((j+1)/len(chosen_sig)) for j in range(len(chosen_sig))]
fig, ax = create_plot()
fig.set_size_inches(5, 5)
def tick_function(X):
V = X * 180 / np.pi
return ["%.1f" % z for z in V]
if gaussian:
plot = ax.plot
else:
plot = ax.semilogx
for i, sig in enumerate(chosen_sig):
for m in dict_methods.keys():
label = m if i == 0 else None
rmses = dict_methods[m]['rmses']
ls = dict_methods[m]['linestyle']
ms = dict_methods[m]['marker']
plot(epsilons[chosen_eps], rmses[sig, chosen_eps], linestyle=ls, label=label,
marker=ms, color=colors[i], fillstyle='none')
#plt.xlim([3,102])
ax.set_ylim([0, 0.4])
ax.set_yticks([0, 0.1, 0.2, 0.3, 0.4])
if legend:
ax.legend(loc='upper left')
ax.set_xlabel('$\sigma_\\alpha$[rad]')
ax_deg = ax.twiny()
new_tick_locations = np.array([0, 0.2, 0.4])
ax_deg.set_xlim(ax.get_xlim())
ax_deg.set_xticks(new_tick_locations)
ax_deg.set_xticklabels(tick_function(new_tick_locations))
ax_deg.set_xlabel('$\sigma_\\alpha [^\circ]$')
# adjust label and title positions
#ax_deg.xaxis.set_label_coords(0.55, 1.08) #deg1
ax_deg.xaxis.set_label_coords(0.5, 1.1) # deg2
#plt.title(title.format(sigmas[sig]), y=1.12) #deg1
plt.title(title.format(sigmas[sig]), y=1.15) # deg2
#ax.xaxis.set_label_coords(0.55, -0.05) #deg1
#ax.xaxis.set_label_coords(0.94, -0.025) #deg2
plt.tight_layout()
plt.savefig(saveas)
def plot_seaborn(dict_methods, options, method, folder='', matrix=None, figsize=None, ylabel=None, **kwargs):
import pandas as pd
import seaborn as sns
if matrix is None:
matrix = dict_methods[method]['rmses']
rhos = np.round(np.linspace(
options['min_rho'], options['max_rho'], options['n_rhos']), 2)
rhos_ext = ['{} ({}$^\circ$)'.format(
r, np.round(180*r/np.pi, 1)) for r in rhos]
sigmas = np.round(np.linspace(
options['min_sigma'], options['max_sigma'], options['n_sigma']), 2)
data = pd.DataFrame(matrix, columns=rhos_ext, index=sigmas)
f, ax = plt.subplots(figsize=figsize)
n_ticklabels = 9 if ylabel else 0
sns.heatmap(data, **kwargs, annot=True, # fmt="2.2f",
linewidths=.5, ax=ax,
xticklabels=10, yticklabels=n_ticklabels)
if ylabel:
plt.ylabel('$\sigma_d$')
plt.xlabel('$\sigma_\\alpha$')
ax.invert_yaxis()
title = method
plt.title(title)
method = method.replace(' ', '_')
plt.savefig('{}/heatmap_{}.eps'.format(folder, method), transparent=True)
| true |
a55e8fa23d35cd6e0bdeda16278c6edd938a5688 | Python | kevin1024/vcrpy | /vcr/serializers/yamlserializer.py | UTF-8 | 363 | 2.53125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import yaml
# Use the libYAML versions if possible
try:
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
except ImportError:
from yaml import Dumper, Loader
def deserialize(cassette_string):
return yaml.load(cassette_string, Loader=Loader)
def serialize(cassette_dict):
return yaml.dump(cassette_dict, Dumper=Dumper)
| true |
82555a68843b5eb5f9931221b6f3a9c98e9855d8 | Python | CaizhiXu/LeetCode-Solutions-Python-Weimin | /0315. Count of Smaller Numbers After Self.py | UTF-8 | 1,782 | 3.5625 | 4 | [] | no_license | # solution 2: use merge sort, average and worst case time O(n*log(n))
# space O(n)
# ref: https://leetcode.com/problems/count-of-smaller-numbers-after-self
# /discuss/76584/Mergesort-solution
class Solution(object):
def countSmaller(self, nums):
if not nums:
return []
smaller = [0]*len(nums)
self.mergeSort(list(enumerate(nums)), smaller)
return smaller
def mergeSort(self, nums, smaller):
# nums is a list of (index, value)
if len(nums) < 2:
return nums
mid = len(nums)//2
left = self.mergeSort(nums[:mid], smaller)
right = self.mergeSort(nums[mid:], smaller)
# merge, from large (end) to small (beginning)
# use nums to store sorted list to save space
for k in range(len(nums)-1, -1, -1):
if not left:
nums[:k+1] = right
break
if not right:
nums[:k+1] = left
break
if left[-1][1] > right[-1][1]:
# all values in right will be smaller to left[-1][1]
# and to the right of left[-1]
smaller[left[-1][0]] += len(right)
nums[k] = left.pop()
else:
nums[k] = right.pop()
return nums
# solution 1: brute force, time O(n^2), space O(n)
# Time Limit Exceeded
class Solution1(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
cnt = [0]*len(nums)
for i in range(len(nums)-1, -1, -1):
for j in range(i+1, len(nums)):
if nums[j] < nums[i]:
cnt[i] += 1
return cnt
| true |
629092c85e95a7b9e1359f29bc3bde0fac9bef30 | Python | mifarse/mathematica | /rpn.py | UTF-8 | 1,631 | 3.703125 | 4 | [] | no_license | def priority(x): # returns piority level of char
d = {"(": 0,
")": 1,
"+": 2,
"-": 2,
"*": 3,
"/": 3}
return d[x]
def RPN(expression): # convert infix into postfix notation
expression = expression.replace(" ", "")
result = [] # результирующий массив
stack = [] # стэк операций
number = ""
for x in expression:
try: # Нам на вход попало число?
int(x)
except: # Нет, значит это знак!
if number: # Если что-то в number накопилось,
result.append(int(number)) # то мы закинем в результирующий массив
number = "" # и опустошим переменную.
if(stack == [] or x == "("):
stack.append(x)
elif(x == ")"):
stop = True
while(stop): # Выгружаем стек до тех пор, пока не встретим "("
poped = stack.pop()
if(poped == "("):
stop = False
else:
result.append(poped)
else:
for i in range(len(stack)): # Пробегаемся по стеку
if(priority(stack[-1]) >= priority(x)): # Если приоритет верхнего элемента больше либо равен текущему,
result.append(stack.pop()) # то убираем последний эл-т из стека, добавляя его в результирующий
stack.append(x)
else:
number+=x # Собираем число из цифр
if number:
stack.append(int(number))
for i in range(len(stack)):
result.append(stack.pop())
return result | true |
a8f7a76f33130bfa739380fb73a20f139de9e646 | Python | ryanp538853/Sandbox | /password_entry.py | UTF-8 | 316 | 3.828125 | 4 | [] | no_license | """Ryan"""
MINIMUM_LENGTH = 6
password = input("Please enter your password that has at least {} character: ".format(MINIMUM_LENGTH))
while len(password) < MINIMUM_LENGTH:
input("Invalid password!\nPlease enter password that contains at least {} characters: ".format(MINIMUM_LENGTH))
print("*" * len(password))
| true |
54420cf1aa5b0db0292486cb0eee5325d16d77df | Python | nwaiting/wolf-ai | /wolf_outer/home_work_lunwen/paper_review.py | UTF-8 | 2,192 | 2.859375 | 3 | [] | no_license | #coding=utf-8
from tkinter import *
def main(paperfile, teacherfile):
paper_index = list()
teacher_index = dict()
with open(paperfile, 'rb') as fd:
for line in fd.readlines():
if line:
paper_index.append(line.strip().decode())
with open(teacherfile, 'rb') as fd:
for line in fd.readlines():
if line:
line = line.strip()
find_index = line.decode().find(':')
if find_index != -1:
res = line.decode().split(':')
if len(res) == 2:
teacher_index[res[0].strip()] = {res[1].strip():list()}
for item in paper_index:
res = sorted(teacher_index.items(), key=lambda x:len(list(x[1].values())[0]), reverse=False)
for i in range(len(teacher_index)):
if res[i][0][:5] == item[:5]:
continue
print(res[i][0][:5],item[:5])
flag = False
for itemi,itemj in res[i][1].items():
if len(itemj) > 0:
tmp_list = itemj[:]
tmp_list.append(item)
teacher_index[res[i][0]] = {itemi:tmp_list}
else:
teacher_index[res[i][0]] = {itemi:[item]}
flag = True
if flag:
break
print(teacher_index)
root = Tk()
root.title('研究生论文评阅')
list_one = Listbox(root, height=20, width=30)
list_two = Listbox(root, height=20, width=30)
list_one.grid(row=1,column=1,padx=(10,5),pady=10)
list_two.grid(row=1,column=2,padx=(5,10),pady=10)
for i,j in teacher_index.items():
first_list = i + '--'
second_list = ''
for m,n in j.items():
first_list += m
second_list += m + ':'
list_two.insert(END, second_list)
for nn in n:
list_two.insert(END, nn)
list_one.insert(END, first_list)
root.mainloop()
if __name__ == '__main__':
paper_path = 'outer/home_work_lunwen/paper.txt'
teacher_path = 'outer/home_work_lunwen/teacher.txt'
main(paper_path, teacher_path)
| true |
c9a508d678a9a854db8010ca0409e18c401b39ee | Python | dlesignac/cg | /puzzle/the_last_crusade_1/python3/main.py | UTF-8 | 1,022 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | L_ = 1
R_ = -1
T_ = 2
B_ = -2
roomT = {
( 1, L_): B_, ( 1, R_): B_, ( 1, T_): B_,
( 2, L_): R_, ( 2, R_): L_, ( 3, T_): B_,
( 4, R_): B_, ( 4, T_): L_, ( 5, L_): B_,
( 5, T_): R_, ( 6, L_): R_, ( 6, R_): L_,
( 7, R_): B_, ( 7, T_): B_, ( 8, L_): B_,
( 8, R_): B_, ( 9, L_): B_, ( 9, T_): B_,
(10, T_): L_, (11, T_): R_, (12, R_): B_,
(13, L_): B_
}
def coords(x, y, rt, in_):
direction = roomT[(rt, in_)]
if direction == L_:
return (x - 1, y)
elif direction == R_:
return (x + 1, y)
return (x, y + 1)
w, h = [int(i) for i in input().split()]
maze = []
for j in range(h):
line = [int(i) for i in input().split()]
maze += line
exit = int(input())
while True:
xi, yi, pos = input().split()
xi = int(xi)
yi = int(yi)
if pos == "TOP":
in_ = T_
elif pos == "LEFT":
in_ = L_
else:
in_ = R_
x, y = coords(xi, yi, maze[yi * w + xi], in_)
print("{} {}".format(x, y))
| true |
e474f97ecb0ce06fb450d3c1dbbf048da48c3ba8 | Python | dnguyen0304/roomlistwatcher | /roomlistwatcher/common/automation/utility.py | UTF-8 | 801 | 2.921875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import selenium.common
from selenium.webdriver.support import expected_conditions
def find_button(wait_context, locator):
"""
Look for the button specified by the locator.
Parameters
----------
wait_context : selenium.webdriver.support.ui.WebDriverWait
locator : tuple
Two-element tuple. The first element is the select strategy.
The second element is the value.
Returns
-------
selenium.webdriver.remote.webelement.WebElement
If the button could be found. Otherwise None.
"""
condition = expected_conditions.element_to_be_clickable(locator=locator)
try:
button = wait_context.until(condition)
except selenium.common.exceptions.TimeoutException:
button = None
return button
| true |
ab4cc65bf7a0f8cb2fc57a68a6eee26938d23a66 | Python | ddraa/Algorithm | /String/KMP/7575.py | UTF-8 | 1,212 | 2.765625 | 3 | [] | no_license | import sys
def KMP(P,T):
arr = []
lt = len(T)
lp = len(P)
table = LIS(P)
i = 0
for j in range(lt):
while i > 0 and P[i] != T[j]:
i = table[i - 1]
if P[i] == T[j]:
if i == lp - 1:
#arr.append(j - lp + 2)
i = table[i]
return True
else:
i += 1
return False
def LIS(P):
lp = len(P)
Table = [0] * lp
i = 0
for j in range(1, lp):
while i > 0 and P[i] != P[j]:
i = Table[i - 1]
if P[i] == P[j]:
i += 1
Table[j] = i
return Table
string = []
n, k = map(int, sys.stdin.readline().split())
for _ in range(n):
input()
string.append(sys.stdin.readline().split())
sample = string[0]
for s in range(len(sample) - k + 1):
pattern = sample[s:s + k]
c = 0
for ss in range(1, n):
ans = KMP(pattern, string[ss])
if not ans:
ans = KMP(list(reversed(pattern)), string[ss])
if not ans:
break
else:
c += 1
else:
c += 1
if c == n - 1:
print("YES")
exit(0)
print("NO")
| true |
88c50a7ea502bf517e6a1e6bf3e11b8326bb56d9 | Python | TransactiveSCC/TRANSAX | /archive/code/RIAPSDemo/python/libs/cplex/_internal/_aux_functions.py | UTF-8 | 8,060 | 2.640625 | 3 | [] | no_license | # --------------------------------------------------------------------------
# File: _aux_functions.py
# ---------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2008, 2017. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with
# IBM Corp.
# ------------------------------------------------------------------------
"""
"""
import functools
import inspect
import warnings
from ..exceptions import CplexError, WrongNumberOfArgumentsError
from .. import six
from ..six.moves import (map, zip, range)
class deprecated(object):
"""A decorator that marks methods/functions as deprecated."""
def __init__(self, version):
self.version = version
def __call__(self, cls_or_func):
if (inspect.isfunction(cls_or_func) or
inspect.ismethod(cls_or_func)):
fmt = "{0} function or method"
# NOTE: Doesn't work for classes .. haven't figured that out yet.
# Specifically, when a decorated class is used as a base
# class.
# elif inspect.isclass(cls_or_func):
# fmt = "{0} class"
else:
raise TypeError(type(cls_or_func))
msg = _getdeprecatedmsg(fmt.format(cls_or_func.__name__),
self.version)
@functools.wraps(cls_or_func)
def wrapped(*args, **kwargs):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return cls_or_func(*args, **kwargs)
return wrapped
def deprecated_class(name, version, stacklevel=3):
"""Emits a warning for a deprecated class.
This should be called in __init__.
name - the name of the class (e.g., PresolveCallback).
version - the version at which the class was deprecated (e.g.,
"V12.7.1").
stacklevel - indicates how many levels up the stack is the caller.
"""
msg = _getdeprecatedmsg("{0} class".format(name), version)
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
def _getdeprecatedmsg(item, version):
return "the {0} is deprecated since {1}".format(item, version)
def validate_arg_lengths(arg_list, allow_empty=True):
"""non-public"""
arg_lengths = [len(x) for x in arg_list]
max_length = max(arg_lengths)
for arg_length in arg_lengths:
if ((not allow_empty or arg_length != 0) and
arg_length != max_length):
raise CplexError("Inconsistent arguments")
return max_length
def make_ranges(indices):
"""non-public"""
ranges = []
i = 0
j = 0
while i < len(indices):
while j < len(indices) - 1 and indices[j + 1] == indices[j] + 1:
j += 1
ranges.append((indices[i], indices[j]))
i = j + 1
j = i
return ranges
def apply_freeform_two_args(fn, convert, args):
"""non-public"""
def con(a):
if isinstance(a, six.string_types):
return convert(a)
else:
return a
if len(args) == 2:
conarg0, conarg1 = (con(args[0]), con(args[1]))
if (isinstance(conarg0, six.integer_types) and
isinstance(conarg1, six.integer_types)):
return fn(conarg0, conarg1)
else:
raise TypeError("expecting names or indices")
elif len(args) == 1:
if isinstance(args[0], (list, tuple)):
retval = []
for member in map(fn, *zip(*make_ranges(list(map(con, args[0]))))):
retval.extend(member)
return retval
conarg0 = con(args[0])
if isinstance(conarg0, six.integer_types):
return fn(conarg0, conarg0)[0]
else:
raise TypeError("expecting name or index")
elif len(args) == 0:
return fn(0)
else:
raise WrongNumberOfArgumentsError()
def apply_freeform_one_arg(fn, convert, maxval, args):
"""non-public"""
def con(a):
if isinstance(a, six.string_types):
return convert(a)
else:
return a
if len(args) == 2:
conarg0, conarg1 = (con(args[0]), con(args[1]))
if (isinstance(conarg0, six.integer_types) and
isinstance(conarg1, six.integer_types)):
return [fn(x) for x in range(conarg0, conarg1 + 1)]
else:
raise TypeError("expecting names or indices")
elif len(args) == 1:
if isinstance(args[0], (list, tuple)):
return [fn(x) for x in map(con, args[0])]
conarg0 = con(args[0])
if isinstance(conarg0, six.integer_types):
return fn(conarg0)
else:
raise TypeError("expecting name or index")
elif len(args) == 0:
return apply_freeform_one_arg(fn, convert, 0,
(list(range(maxval)),))
else:
raise WrongNumberOfArgumentsError()
def apply_pairs(fn, convert, *args):
"""non-public"""
def con(a):
if isinstance(a, six.string_types):
return convert(a)
else:
return a
if len(args) == 2:
fn([con(args[0])], [args[1]])
else:
a1, a2 = zip(*args[0])
fn(list(map(con, a1)), list(a2))
def delete_set_by_range(fn, convert, max_num, *args):
"""non-public"""
if len(args) == 0:
# Delete All:
if max_num > 0:
fn(0, max_num-1)
elif len(args) == 1:
# Delete all items from a possibly unordered list of mixed types:
if isinstance(convert(args[0]), six.integer_types):
args = [convert(args[0])]
else:
args = [convert(i) for i in args[0]]
for i in sorted(args, reverse=True):
fn(i, i)
elif len(args) == 2:
# Delete range from arg[0] to arg[1]:
fn(convert(args[0]), convert(args[1]))
else:
raise WrongNumberOfArgumentsError()
class _group:
"""Object to contain constraint groups"""
def __init__(self, gp):
"""Constructor for the _group object
gp is a list of tuples of length two (the first entry of which
is the preference for the group (a float), the second of which
is a tuple of pairs (type, id), where type is an attribute of
conflict.constraint_type and id is either an index or a valid
name for the type).
Example input: [(1.0, ((2, 0),)), (1.0, ((3, 0), (3, 1)))]
"""
self._gp = gp
def make_group(conv, max_num, c_type, *args):
"""Returns a _group object
input:
conv - a function that will convert names to indices
max_num - number of existing constraints of a given type
c_type - constraint type
args - arbitrarily many arguments (see description below)
If args is empty, every constraint/bound is assigned weight 1.0.
If args is of length one or more, every constraint/bound is assigned
a weight equal to the float passed in as the first item.
If args contains additional items, they determine a subset of
constraints/bounds to be included. If one index or name is
specified, it is the only one that will be included. If two indices
or names are specified, all constraints between the first and the
second, inclusive, will be included. If a sequence of names or
indices is passed in, all of their constraints/bounds will be
included.
See example usage in _subinterfaces.ConflictInterface.
"""
if len(args) <= 1:
cons = list(range(max_num))
if len(args) == 0:
weight = 1.0
else:
weight = args[0]
if len(args) == 2:
weight = args[0]
if isinstance(conv(args[1]), six.integer_types):
cons = [conv(args[1])]
else:
cons = map(conv, args[1])
elif len(args) == 3:
cons = list(range(conv(args[1]), conv(args[2]) + 1))
return _group([(weight, ((c_type, i),)) for i in cons])
| true |
af09f1ffc4c0b1edb1324adcc81ffaa8b8492e0c | Python | jlarcila-code/IMPLEMENTACION-DE-PRINCIPIOS-SOLID | /Solid4.py | UTF-8 | 722 | 2.765625 | 3 | [] | no_license | """Principio de segregación de interface
Dividir la interface hasta el grado de granularidad mas pequeño posible"""
from abc import ABC, abstractmethod
class Celular(ABC):
@abstractmethod
def llamar(self):
pass
class Texto(ABC):
@abstractmethod
def mensaje_texto(self):
pass
class Camara(ABC):
@abstractmethod
def foto(self):
pass
class SmartPhone(Celular, Texto, Camara):
def llamar(self):
pass
def mensaje_texto(self):
pass
def foto(self):
pass
class CelularViejo(Celular, Texto):
def llamar(self):
pass
def mensaje_texto(self):
pass
class Celulardesechable(Texto):
def llamar(self):
pass | true |
d38aa7c79cda3b2720fc57e69bcb3f899504e592 | Python | dobolicious/css-minify | /compile.py | UTF-8 | 1,169 | 3.140625 | 3 | [] | no_license | from os import listdir
from os.path import isfile, join
import os
def strip_lines(lines):
stripped = ""
for line in lines:
line_stripped = line.strip()
stripped += line_stripped
return stripped
def stripper(text, index):
stripped = ""
options = ["{", ":", ","]
arr = text.split(options[index])
count = 0
for split in arr:
strip = split.strip()
stripped += strip
if count < len(arr) - 1:
stripped += options[index]
count = count + 1
index = index + 1
if index < len(options):
return stripper(stripped, index)
else:
return stripped
def main():
cssFile = os.path.abspath("/Users/michael/Dropbox/python/css/main.css")
compileFile = os.path.abspath("/Users/michael/Dropbox/python/css/main.min.css")
fr = open(cssFile, "r")
fw = open(compileFile, "w+")
stripped = strip_lines(fr)
stripped = stripper(stripped, 0)
fw.write(stripped)
originalSize = os.stat(cssFile).st_size
compiled = os.stat(compileFile).st_size
print("Original size:", originalSize)
print("Compiled size:", compiled)
main()
| true |
d973a3a4467744cef99abae1e7c1a7d446e38a24 | Python | LucXyMan/starseeker | /Source/sprites/huds/gauge.py | UTF-8 | 10,681 | 2.578125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""gauge.py
Copyright (c) 2019 Yukio Kuro
This software is released under BSD license.
ゲージモジュール。
"""
import pygame as _pygame
import hud as __hud
import material.string as _string
import utils.const as _const
import utils.image as _image
import utils.layouter as _layouter
class Gauge(__hud.HUD):
u"""ゲージスプライト。
"""
__ALPHA = None
__GAUGE_SIZE = 42, 4
_BACK_COLORS = _const.GRAY, _const.BLACK
_LAYER = 0
def __init__(self, unit, groups=None):
u"""コンストラクタ。
"""
super(Gauge, self).__init__(groups)
self._unit = unit
self._layer = -1
self._old = -1
self._scale = self._dest = 0
self._text = ""
self._color = _string.CharColor()
self._images = self._get_images()
self.update()
def _fluctuate(self):
u"""目盛り増減。
"""
if self._scale < self._dest:
self._scale += 1
elif self._scale > self._dest:
self._scale -= 1
def update(self):
u"""スプライト更新。
_casterがキルされた場合に自身をキルする。
"""
def __set_layer():
u"""レイヤー設定。
"""
current_layer = self._unit.layer_of_sprite
if self._layer != current_layer:
self._layer = current_layer
self.draw_group.change_layer(self, self._layer+self._LAYER)
if self._unit.alive():
__set_layer()
else:
self.kill()
# ---- Getter ----
def _get_gauge_images(self, front, back, scale):
u"""ゲージ画像作成。
"""
import utils.memoize as __memoize
@__memoize.memoize()
def __get_gauge_image(length, front, back, scale):
u"""ゲージフレーム画像作成。
"""
def __draw_partition(surf, scale):
u"""ゲージの区切り線を描く。
"""
w, h, = self.__GAUGE_SIZE
if scale != 1:
for i in range(1, scale):
_pygame.draw.rect(
surf, (0, 0, 0), _pygame.Rect(w/scale*i, 0, 1, h))
w, h, = self.__GAUGE_SIZE
surf = _pygame.Surface((w, h)).convert()
surf.fill((0, 0, 0))
if back:
_image.draw_gradient_h(
surf, back, _pygame.Rect(1, 1, w-2, h-2))
else:
_pygame.draw.rect(
surf, _pygame.Color(_const.BLACK),
_pygame.Rect(1, 1, w-2, h-2))
if length != 0:
_image.draw_gradient_h(
surf, front, _pygame.Rect(1, 1, length, h-2))
__draw_partition(surf, scale)
return surf
w, _, = self.__GAUGE_SIZE
return tuple(
__get_gauge_image(x, front, back, scale) for x in range(0, w-1))
def _get_images(self):
u"""ゲージ画像取得。
"""
def __get_multi_gauge_images(colors, scale):
u"""複数色の組み合わせゲージ作成。
"""
return reduce(lambda x, y: x+y, (
self._get_gauge_images(color[0], color[1], scale) for
color in colors))
front = tuple(_pygame.Color(c) for c in self._FRONT_COLORS)
return __get_multi_gauge_images(((
front[0:2], tuple(_pygame.Color(c) for c in self._BACK_COLORS)),) +
tuple((
front[(i << 1):(i << 1)+2], front[(i-1) << 1:i << 1]) for
i in range(1, 4)), self._SCALE)
# ---- Setter ----
def _set_string(self):
u"""文字列設定。
"""
gauge = _pygame.Surface(self.image.get_size())
gauge.blit(self.image, (0, 0))
gw, gh = gauge.get_size()
char_size = 8
surf = _pygame.Surface((gw, gh+char_size))
surf.fill((255, 255, 255))
surf.set_colorkey((255, 255, 255))
sw, _ = surf.get_size()
surf.blit(gauge, ((sw-gw) >> 1, char_size))
char = _string.get_string(self._text, char_size, self._color)
cw, _ = char.get_size()
surf.blit(char, ((sw-cw) >> 1, 0))
surf.set_alpha(self.__ALPHA)
self.image = surf
class Life(Gauge):
u"""ライフゲージ。
"""
__FULL_GAUGE = 500
__LIFE_DISPLAY_LIMIT = 9999
_FRONT_COLORS = (
_const.YELLOW, _const.RED, _const.YELLOW, _const.YELLOW,
_const.YELLOW, _const.GREEN, _const.CYAN, _const.BLUE)
_SCALE = 1
_LAYER = 1
def update(self):
u"""ゲージ更新。
"""
def __set_parameter():
u"""目的の値を設定する。
"""
life = self._unit.life
if life != self._old:
scale = int(
(self._unit.life/float(self.__FULL_GAUGE)) *
(len(self._images)-1))
self._dest = (
scale if scale < len(self._images) else
len(self._images)-1)
self._text = str(
life if life < self.__LIFE_DISPLAY_LIMIT else
self.__LIFE_DISPLAY_LIMIT)
self._color = (
_string.CharColor(_const.RED+"##") if
self._unit.is_quarter else
_string.CharColor(_const.YELLOW+"##") if
self._unit.is_half else
_string.CharColor())
self._old = self._unit.life
super(Life, self).update()
__set_parameter()
self._fluctuate()
if self._unit.is_dead:
self.image = _image.get_clear(self.image)
else:
self.image = self._images[self._scale]
self._set_string()
self.rect = self.image.get_rect()
_layouter.Game.set_gauge(self, self._unit)
class Charge(Gauge):
u"""チャージゲージ。
"""
_LAYER = 2
def _get_images(self):
u"""ゲージ画像取得。
"""
return self._get_gauge_images(
(_pygame.Color(_const.CYAN), _pygame.Color(_const.MAGENTA)),
(_pygame.Color(_const.GRAY), _pygame.Color(_const.BLACK)), 1)
def update(self):
u"""ゲージ更新。
"""
def __set_parameter():
u"""目的の値を設定する。
"""
if self._unit.power != self._old:
limit = len(self._images)-1
ratio = self._unit.power/float(self._unit.packet)
scale = int(ratio*limit)
self._dest = scale if scale < limit else limit
self._text = str(int(ratio*100))+"%"
self._old = self._unit.power
super(Charge, self).update()
__set_parameter()
self._fluctuate()
if self._unit.is_dead or self._unit.is_frozen:
self.image = _image.get_clear(self.image)
else:
self.image = self._images[self._scale]
self._set_string()
self.rect = self.image.get_rect()
_layouter.Game.set_charge_gauge(self, self._unit)
class Freeze(Gauge):
u"""凍結ゲージ。
"""
_LAYER = 2
def __init__(self, unit, groups=None):
u"""コンストラクタ。
"""
super(Freeze, self).__init__(unit, groups)
self._text = "Freeze"
def _get_images(self):
u"""ゲージ画像取得。
"""
return self._get_gauge_images(
(_pygame.Color(_const.YELLOW), _pygame.Color(_const.CYAN)),
(_pygame.Color(_const.GRAY), _pygame.Color(_const.BLACK)), 1)
def update(self):
u"""ゲージ更新。
"""
def __set_parameter():
u"""目的の値を設定する。
"""
if self._unit.frozen_time != self._old:
scale = (
self._unit.frozen_time /
float(self._unit.packet << 2)*(len(self._images)-1))
self._dest = int(
scale if scale < len(self._images) else
len(self._images)-1)
self._old = self._unit.frozen_time
super(Freeze, self).update()
__set_parameter()
self._fluctuate()
self.image = self._images[self._scale]
self._set_string()
if self._unit.is_dead or not self._unit.is_frozen:
self.image = _image.get_clear(self.image)
self.rect = self.image.get_rect()
_layouter.Game.set_charge_gauge(self, self._unit)
class Pressure(Gauge):
u"""圧力ゲージ。
"""
_FRONT_COLORS = (
_const.YELLOW, _const.YELLOW, _const.YELLOW, _const.CYAN,
_const.CYAN, _const.BLUE, _const.YELLOW, _const.MAGENTA)
_SCALE = 4
def __init__(self, unit, system, groups=None):
u"""コンストラクタ。
"""
self.__accumulate = system.accumulate
super(Pressure, self).__init__(unit, groups)
def update(self):
u"""ゲージ更新。
"""
def __set_parameter():
u"""目的の値を設定する。
"""
def __get_string_color():
u"""ゲージ文字色取得。
"""
adamant_lv = (_const.ADAMANT_PRESS_LEVEL+1)*_const.PRESS_POINT
solid_lv = (_const.SOLID_PRESS_LEVEL+1)*_const.PRESS_POINT
pressure = self.__accumulate.pressure
return (
_string.CharColor(_const.RED+"##") if
adamant_lv < pressure else
_string.CharColor(_const.YELLOW+"##") if
solid_lv < pressure else _string.CharColor())
if self.__accumulate.pressure != self._old:
limit = len(self._images)-1
value = int(
self.__accumulate.pressure/float(_const.PRESS_LIMIT)*limit)
self._dest = value if value < limit else limit
self._text = "{level}/{effects}".format(
level=self.__accumulate.level,
effects=self.__accumulate.effects)
self._color = __get_string_color()
self._old = self.__accumulate.pressure
super(Pressure, self).update()
__set_parameter()
self._fluctuate()
self.image = self._images[self._scale]
self._set_string()
self.rect = self.image.get_rect()
_layouter.Game.set_gauge(self, self._unit)
| true |
09ca78be0ecd8034906ccf522b6a18c04f904308 | Python | sforrester23/Ticketing_System | /Python_Ticket_Sys.py | UTF-8 | 2,853 | 4 | 4 | [] | no_license | import sys
TICKET_PRICE = 10
SERVICE_CHARGE = int(2)
tickets_remaining = 100
# Create the calculate_price function. It takes number of tickets and returns: number_tickets * TICKET_PRICE
def calculate_price(number_of_tickets):
return (number_of_tickets * TICKET_PRICE) + SERVICE_CHARGE
# Run this code continuously until we run out of tickets
# How many tickets are remaining, using the tickets_remaning variable.
while tickets_remaining >= 1:
print("There are {} tickets_remaining".format(tickets_remaining))
# Gather the user's name and assign it to a new variable.
name = input("What is your name? ")
# Prompt the user by name, and ask how many tickets they would like.
# Expect a ValueError to happen and handle it appropriately. Remember to test it out.
try:
number_tickets = input("Hey, {}, how many tickets would you like to purchase? ".format(name))
number_tickets = int(number_tickets)
# Make sure the user cannot buy more tickets than there are
if number_tickets > tickets_remaining:
# Notify the user that the tickets have sold out
raise ValueError(
"Sorry, we do not have enough tickets to complete that transaction. The number of tickets we have left is {}. You may only purchase that many.".format(
tickets_remaining))
except ValueError as err:
print(
"Oh no: that's not a valid value for the number of tickets you'd like to purchase! Please enter a valid number.")
print("{}".format(err))
else:
# Calculate the price (number of tickets * price) and assign to variable
total_cost = calculate_price(number_tickets)
# Output price to screen
print(
"{}, you have selected {} tickets. Your total price for the amount of tickets you would like to buy is: ${}.".format(
name, number_tickets, total_cost))
# Ask if the user wants to proceed with the purchase. Y/N?
confirmation = input("Would you like to proceed with this purchase, {}? (Yes/No) ".format(name))
confirmation = confirmation.lower()
# If they want to proceed,
if confirmation == "yes":
# print to screen "SOLD!" to confirm purchase,
# TODO: gather credit card information and process.
print("SOLD! Thank you for your purchase, {}.".format(name))
# reduce the number of tickets available by the amount purchased.
tickets_remaining = tickets_remaining - number_tickets
# Otherwise, thank them by name
else:
print("Thank you anyway, {}. Come back soon to confirm your order!".format(name))
sys.exit(
"Sorry, we have run out of tickets.") # Cease the code if you run out of tickets i.e. the while loop no longer runs.
| true |
d159040bdf2219afc45d1fe92eb27aae50a27734 | Python | tanmay-09/demo | /iris ETL Logistic.py | UTF-8 | 1,137 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
A = pd.read_csv("iris-with-answers.csv")
# In[2]:
A
# In[3]:
copy_data=A.copy()
# In[4]:
A['species']=A['species'].map({'setosa':0,'versicolor':1,'virginica':2})
# In[5]:
A
# In[6]:
X = A[["sepal_length","sepal_length","petal_length","petal_width"]]
Y = A[["species"]]
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(X,Y,test_size=0.3,random_state=35)
# repeat this stament if your model is having sampling bias
from sklearn.linear_model import LogisticRegression
lm=LogisticRegression()
model = lm.fit(xtrain,ytrain)
b0 = model.intercept_
b1 = model.coef_
pred = model.predict(xtest)
ytest['predict']=pred
print(ytest)
from sklearn.metrics import mean_absolute_error,mean_squared_error,explained_variance_score
print(mean_absolute_error(ytest.species,pred))
print(mean_squared_error(ytest.species,pred))
print(explained_variance_score(ytest.species,pred))
# In[11]:
import pickle
pickle.dump(lm,open("iris.pkl","wb"))
model=pickle.load(open('iris.pkl','rb'))
# In[ ]:
# In[ ]:
| true |
f421f291c2cefd99bd973c4705c9397d91aa0777 | Python | Huterox/FixPicTools | /Tools.py | UTF-8 | 6,858 | 2.859375 | 3 | [] | no_license | import sys
from PIL import Image
import os
from queue import Queue
if sys.platform=="win32":
from win32 import win32api, win32gui, win32print
from win32.lib import win32con
from win32.win32api import GetSystemMetrics
class ChangeRealSize(object):
'''
该类主要对屏幕进行像素适配,按照缩放比对像素进行换算为100%显示
示例:
RealSize = ChangeRealSize()
x=RealSize.getreal_xy(500)
此时就可以换算为当前屏幕的像素
'''
def get_real_resolution(self):
"""获取真实的分辨率"""
hDC = win32gui.GetDC(0)
w = win32print.GetDeviceCaps(hDC, win32con.DESKTOPHORZRES)
h = win32print.GetDeviceCaps(hDC, win32con.DESKTOPVERTRES)
return w, h
def get_screen_size(self):
"""获取缩放后的分辨率"""
w = GetSystemMetrics (0)
h = GetSystemMetrics (1)
return w, h
def getreal_xy(self,x):
'''返回按照100%来算的真实的像素值'''
real_resolution = self.get_real_resolution()
screen_size = self.get_screen_size()
screen_scale_rate = round(real_resolution[0] / screen_size[0], 2)
try:
x = x/screen_scale_rate
except:
#对笔记本进行适配,一般而言在100%比的机器上x不会出错
x=1.25
return int(x)
class Tools(object):
def __init__(self):
self.RealSize = ChangeRealSize()
def CHANGESIZE_One(self,path,x,y,save=".\media\Out_Image.png"):
#不传递save为单图模式,默认是单图模式的所以函数名字就是单图模式
image = Image.open(path)
if sys.platform=="win32":
image = image.resize((self.RealSize.getreal_xy(x),self.RealSize.getreal_xy(y)), Image.ANTIALIAS)
else:
image = image.resize((x,y), Image.ANTIALIAS)
image.save(save)
pass
def ERZHIHUA_One(self,path,save=".\media\Out_Image.png"):
image = Image.open(path)
image = image.convert('L')
t = []
for i in range(256):
# 杂质越多,值越大(轮廓越黑越明显)
if i < 120: # 160
t.append(0)
else:
t.append(1)
image = image.point(t, '1')
image.save(save)
def DANSHANGSE_One(self,path,RGB,save=".\media\Out_Image.png"):
if save==".\media\Out_Image.png":
self.ERZHIHUA_One(path) # 执行二值化
path = r'{}'.format(os.path.dirname((os.path.abspath(__file__)))) + '\media\Out_Image.png'
image = Image.open(path)
image = image.convert("RGB")
width = image.size[0]
height = image.size[1]
new_image = Image.new("RGB", (width, height))
for x in range(width):
for y in range(height):
r, g, b = image.getpixel((x, y))
rgb = (r, g, b)
if rgb == (0, 0, 0):
rgb = RGB
new_image.putpixel((x, y), (int(rgb[0]), int(rgb[1]), int(rgb[2]))) # 画图
new_image.save(path)
else:
self.ERZHIHUA_One(path,save) # 执行二值化
path = save
image = Image.open(path)
image = image.convert("RGB")
width = image.size[0]
height = image.size[1]
new_image = Image.new("RGB", (width, height))
for x in range(width):
for y in range(height):
r, g, b = image.getpixel((x, y))
rgb = (r, g, b)
if rgb == (0, 0, 0):
rgb = RGB
new_image.putpixel((x, y), (int(rgb[0]), int(rgb[1]), int(rgb[2]))) # 画图
new_image.save(path)
def LUNKUO_One(self,path,save=".\media\Out_Image.png"):
if save==".\media\Out_Image.png":
self.ERZHIHUA_One(path)#执行二值化
path = r'{}'.format(os.path.dirname((os.path.abspath(__file__))))+'\media\Out_Image.png'
image = Image.open(path)
image = image.convert("RGB")
new_img = Image.new("RGB", (image.size[0], image.size[1]))
for x in range(image.size[0]):
for y in range(image.size[1]):
r, g, b = image.getpixel((x, y))
rgb = (r, g, b)
if rgb != (255, 255, 255):
if y > 2 and y < image.size[1] - 3:
r1, g1, b1 = image.getpixel((x, y - 3))
rgb1 = (r1, g1, b1)
r2, g2, b2 = image.getpixel((x, y + 3))
rgb2 = (r2, g2, b2)
if rgb1 == (255, 255, 255) and rgb == (0,0,0) and rgb2 == (0,0,0):
rgb = (0,0,0)
elif rgb1 == (0,0,0) and rgb == (0,0,0) and rgb2 == (255, 255, 255):
rgb = (0,0,0)
if rgb1 == (0,0,0) and rgb == (0,0,0) and rgb2 == (0,0,0):
rgb = (255, 255, 255)
new_img.putpixel((x, y), (int(rgb[0]), int(rgb[1]), int(rgb[2])))
new_img.save(path)
else:
self.ERZHIHUA_One(path,save) # 执行二值化
path = save
image = Image.open(path)
image = image.convert("RGB")
new_img = Image.new("RGB", (image.size[0], image.size[1]))
for x in range(image.size[0]):
for y in range(image.size[1]):
r, g, b = image.getpixel((x, y))
rgb = (r, g, b)
if rgb != (255, 255, 255):
if y > 2 and y < image.size[1] - 3:
r1, g1, b1 = image.getpixel((x, y - 3))
rgb1 = (r1, g1, b1)
r2, g2, b2 = image.getpixel((x, y + 3))
rgb2 = (r2, g2, b2)
if rgb1 == (255, 255, 255) and rgb == (0, 0, 0) and rgb2 == (0, 0, 0):
rgb = (0, 0, 0)
elif rgb1 == (0, 0, 0) and rgb == (0, 0, 0) and rgb2 == (255, 255, 255):
rgb = (0, 0, 0)
if rgb1 == (0, 0, 0) and rgb == (0, 0, 0) and rgb2 == (0, 0, 0):
rgb = (255, 255, 255)
new_img.putpixel((x, y), (int(rgb[0]), int(rgb[1]), int(rgb[2])))
new_img.save(path)
if __name__=="__main__":
if sys.platform=="win32":
RealSize = ChangeRealSize()
x=RealSize.getreal_xy(250)
print(x)
else:
print("there is not windows can not run this code") | true |
c91b094161aa8c8992b3b52bc610d54090376bbf | Python | mzhao15/mylearning | /algorithms/BackTracking/restoreIpAddresses.py | UTF-8 | 1,134 | 3.15625 | 3 | [] | no_license |
# backtracking
# def restoreIpAddresses(s):
# res = []
# if not s:
# return res
# temp = []
# helper(s,0,res,temp)
# return res
# def helper(s,pos,res,temp):
# if pos == 4:
# if not s:
# res.append('.'.join(temp[:]))
# return
# for i in range(1,4):
# if i<=len(s):
# if i==1:
# temp.append(''.join(s[:1]))
# helper(s[1:],pos+1,res,temp)
# temp.pop()
# elif i==2 and s[0]!='0':
# temp.append(''.join(s[:2]))
# helper(s[2:],pos+1,res,temp)
# temp.pop()
# elif i==3 and s[0]!='0' and int(''.join(s[:3]))<256:
# temp.append(''.join(s[:3]))
# helper(s[3:],pos+1,res,temp)
# temp.pop()
# brutal force
def restoreIpAddresses(s):
res = []
for i in range(1,4):
for j in range(1,4):
for k in range(1,4):
temp = [s[:i],s[i:i+j],s[i+j:i+j+k],s[i+j+k:]]
if len(temp[3])<4:
flag = True
for i in range(4):
if len(temp[i])>1 and temp[i][0]=='0':
flag = False
if len(temp[i]) == 3 and int(''.join(temp[i]))>255:
flag = False
if flag:
res.append('.'.join(temp[:]))
return res
s = '25525511135'
print(restoreIpAddresses(s)) | true |
5357f5a0f1813cf6127dff8408aa25bb42f2d83d | Python | esilberberg/ReportsScraper | /reports_scraper.py | UTF-8 | 3,623 | 2.53125 | 3 | [] | no_license | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import ElementNotInteractableException
from selenium.webdriver.chrome.options import Options
import pandas as pd
import os
import time
def LogManualDownload():
current_url = driver.current_url
for_manual_download["Title"].append(link)
for_manual_download["URL"].append(current_url)
start_pg = input("Enter the start page: ")
end_pg = input("Enter the end page: ")
driver = webdriver.Chrome(
r".\chromedriver_win32\chromedriver.exe")
reports_manifest = {"Title": [], "Date": [],
"pdf_Name": [], "Description": [], "Related_Issues": []}
for_manual_download = {"Title": [], "URL": []}
for x in range(int(start_pg), int(end_pg)):
url = f'https://www.aclu.org/search/a?page={str(x)}&f%5B0%5D=type%3Aasset&f%5B1%5D=field_asset_type%3Areport'
driver.get(url)
driver.maximize_window()
time.sleep(2)
titles = driver.find_elements_by_tag_name('h3.title')
dates = driver.find_elements_by_tag_name('span.date')
report_links = []
for title in titles:
report_links.append(title.text)
reports_manifest["Title"].append(title.text)
for date in dates:
reports_manifest["Date"].append(date.text.title())
for link in report_links:
print(f"Page: {str(x)}")
print(f"Now working on: {link}")
time.sleep(2)
driver.find_element_by_link_text(link).click()
time.sleep(3)
try:
pdf_name = driver.find_element_by_class_name(
'download-link').get_attribute('href')
reports_manifest["pdf_Name"].append(pdf_name[56:])
except NoSuchElementException:
reports_manifest["pdf_Name"].append("NO PDF FOUND")
try:
description = driver.find_element_by_xpath(
'/html/body/div[3]/div[2]/div/div[2]/div[2]/div/div/div[2]/p[1]')
reports_manifest["Description"].append(description.text)
except NoSuchElementException:
reports_manifest["Description"].append(" ")
try:
related_issues = driver.find_element_by_class_name('item-list')
reports_manifest["Related_Issues"].append(related_issues.text)
except NoSuchElementException:
reports_manifest["Related_Issues"].append(" ")
# There is at least 1 without an iFrame. Throw up exception and place in PDF name "NO PDF FOUND"
try:
iframe = driver.find_element_by_xpath('//*[@id="iFrameResizer0"]')
driver.switch_to.frame(iframe)
time.sleep(2)
driver.find_element_by_xpath('//*[@id="download"]').click()
except NoSuchElementException:
LogManualDownload()
except ElementNotInteractableException:
LogManualDownload()
driver.switch_to.default_content()
driver.back()
driver.quit()
# Print to CSV manifest and list of reports requiring manual download
output_folder = r"C:\Users\erics\Downloads"
df = pd.DataFrame.from_dict(reports_manifest)
csv_path = os.path.join(
output_folder, f"reports_manifest_{start_pg}-{end_pg}.csv")
df.to_csv(csv_path, index=False, encoding='utf-8-sig')
df_for_manual_dl = pd.DataFrame.from_dict(for_manual_download)
csv_path_manual_dl = os.path.join(
output_folder, f"for_manual_download_{start_pg}-{end_pg}.csv")
df_for_manual_dl.to_csv(csv_path_manual_dl, index=False, encoding='utf-8-sig')
| true |
575d0f45a108d64a5c7e9b399afde1e5a5baf665 | Python | ohhuola/Data-Mining-for-Cybersecurity | /Homework/2019/Task3/11/xss_pry.py | UTF-8 | 1,875 | 2.875 | 3 | [
"MIT"
] | permissive |
#coding: utf-8
import re
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
from sklearn.externals import joblib
from sklearn.metrics import classification_report
from sklearn import metrics
x = [] #特征值矩阵
y = [] #样本标签
### 特征统计
def get_len(url):
return len(url)
def isURL(param):
if re.search('(http://)|(https://)',param,re.IGNORECASE):#正则表达匹配
return 1
else:
return 0
def countChar(param):
return len(re.findall("[<>()\'\"/]",param,re.IGNORECASE))#正则表达匹配
def countWord(param):
return len(re.findall('(alert)|(scripts=)(%3ac)|(%3e)|(%20)|(onerror)|(onload)|(eval)|(src=)|(prompt)|(iframe)|(java)',param,re.IGNORECASE))#正则表达匹配
### 向量化
def getMatrix(filename, data, isxss):
with open(filename) as fd:
for line in fd:
x1 = get_len(line)
x2 = isURL(line)
x3 = countChar(line)
x4 = countWord(line)
data.append([x1,x2,x3,x4])
if isxss:
y.append(1)
else:
y.append(0)
getMatrix('/Users/dqy/XSS/xssed.csv',x,1)
getMatrix('/Users/dqy/XSS/dmzo_normal.csv',x,0)
### 训练
#### 拆分数据
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)
clf = svm.SVC(kernel='linear',C=1).fit(x_train,y_train)
#### SVM训练
y_pred = clf.predict(x_test)
print ("metrics.accuracy_score:")
print (metrics.accuracy_score(y_test,y_pred))
print ("metrics.recall_score:")
print (metrics.recall_score(y_test,y_pred))
### 测试
line = input("test: ");
x1 = get_len(line)
x2 = isURL(line)
x3 = countChar(line)
x4 = countWord(line)
test_x=[[x1,x2,x3,x4]]
#test_y.append(1)
if(clf.predict(test_x)=="0"):
print("Benign")
else:
print("Malicious XSS")
| true |
839181690f4ed28f3fab022916696fd752d0857c | Python | eggzotic/Hackerrank | /Pangrams/Pangrams.py | UTF-8 | 516 | 3.109375 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the pangrams function below.
def pangrams(s):
s = s.lower()
used = set()
for c in s:
if c.isalpha(): used.add(c)
if len(used) == 26: return 'pangram'
return 'not pangram'
if __name__ == '__main__':
try:
fptr = open(os.environ['OUTPUT_PATH'], 'w')
except KeyError:
fptr = sys.stdout
s = input()
result = pangrams(s)
fptr.write(result + '\n')
fptr.close()
| true |
f8d891e18e7464a655680e4586072b491d270cc3 | Python | vishalsodani/deal | /tests/test_marshmallow.py | UTF-8 | 2,286 | 3.109375 | 3 | [
"MIT"
] | permissive | import marshmallow
import vaa
import deal
import pytest
@pytest.fixture()
def scheme():
class _Scheme(marshmallow.Schema):
name = marshmallow.fields.Str()
yield vaa.marshmallow(_Scheme)
def test_scheme_string_validation_args_correct(scheme):
@deal.pre(scheme)
def func(name):
return name * 2
assert func('Chris') == 'ChrisChris'
with pytest.raises(deal.PreContractError):
func(123)
try:
func(123)
except deal.PreContractError as e:
assert e.args[0] == {'name': ['Not a valid string.']}
def test_method_chain_decorator_with_scheme_is_fulfilled(scheme):
@deal.pre(scheme)
@deal.pre(lambda name: name != 'Oleg')
def func(name):
return name * 2
assert func('Chris') == 'ChrisChris'
with pytest.raises(deal.PreContractError):
func(123)
with pytest.raises(deal.PreContractError):
func('Oleg')
def test_scheme_contract_is_satisfied_when_setting_arg(scheme):
@deal.inv(scheme)
class User:
name = ''
user = User()
user.name = 'Chris'
with pytest.raises(deal.InvContractError):
user.name = 123
try:
user.name = 123
except deal.InvContractError as e:
assert e.args[0] == {'name': ['Not a valid string.']}
def test_scheme_contract_is_satisfied_within_chain(scheme):
@deal.inv(lambda user: user.name != 'Oleg')
@deal.inv(scheme)
@deal.inv(lambda user: user.name != 'Chris')
class User:
name = ''
user = User()
user.name = 'Gram'
user = User()
with pytest.raises(deal.InvContractError):
user.name = 'Oleg'
user = User()
with pytest.raises(deal.InvContractError):
user.name = 123
user = User()
with pytest.raises(deal.InvContractError):
user.name = 'Chris'
def test_scheme_contract_is_satisfied_when_passing_args(scheme):
@deal.pre(scheme)
def func(name):
return name * 2
assert func('Chris') == 'ChrisChris'
assert func(name='Chris') == 'ChrisChris'
@deal.pre(scheme)
def func(**kwargs):
return kwargs['name'] * 3
assert func(name='Chris') == 'ChrisChrisChris'
@deal.pre(scheme)
def func(name='Max'):
return name * 2
assert func() == 'MaxMax'
| true |
abd1cbdd8833951d4e0544b5655cb0b96b278b75 | Python | lzxdale/MTH3300 | /HW6/quintic.py | UTF-8 | 1,763 | 3.546875 | 4 | [] | no_license | #******************************************************************************
# quintic.py
#******************************************************************************
# Name: Zexiang Lin
#******************************************************************************
# Collaborators/outside sources used
#(IMPORTANT! Write "NONE" if none were used):
#
#
#
# Reminder: you are to write your own code.
#******************************************************************************
# Overall notes (not to replace inline comments):
#
#
coco = []
for i in range(6):
coco.append(float(input("Enter x^{} coefficient:".format(i)))) #store all the coefficient
xi = float(input("take a guess plz:")) #will give x0
# derviative = c5*5x^4+c4*4x^3+c3*3x^2+c2*2x+c1
def fuc(alist, x): #geting the func answer with certarin x
ans = 0
for power, i in enumerate(alist): #power is the index
ans += i*x**power
return ans
def fuc_d(alist, x): #func of derivative
ans = 0
for power, i in enumerate(alist): #power is the index
if power != 0: #so it will pass c0 as there is no x and will be 0
ans += i*power*x**(power-1)
return ans
def main():
global xi
for i in range(10): #runing 10 times
xi = xi- fuc(coco, xi)/fuc_d(coco, xi)
print(xi)
main()
##challange##
ert = float(input("Enter a error tolerance"))
def challange():
i = 0
while True: #running until break
x = xi- fuc(coco, xi)/fuc_d(coco, xi)
i += 1 #it will act as a for loop index
if abs(fuc(coco,x)) <= abs(ert):
print(x)
break
if i == 10**30:
print("exit the loop")
break
challange()
| true |
54b0e7c9f206354afac6500dcadefa2d387c2b3e | Python | ducduyn31/ProgrammingAssignment3 | /message.py | UTF-8 | 1,872 | 2.90625 | 3 | [] | no_license | import struct
class Message:
def __init__(self, usrname='', msg='', is_command=False, raw=None):
if raw is None:
self.u_name_l = len(usrname)
self.message_l = len(msg)
self.username = usrname
self.message = msg
self._is_command = is_command
else:
a, b, c, d, e = self._extract_raw(raw)
self.u_name_l = a
self.message_l = b
self._is_command = c
self.username = d
self.message = e
def set_username(self, new_username):
self.u_name_l = len(new_username)
self.username = new_username
def get_username(self):
return self.username
def is_command(self):
return self._is_command
def set_message(self, new_message):
self.message_l = len(new_message)
self.message = new_message
def get_message(self):
return self.message
@staticmethod
def _extract_raw(raw_message):
if len(raw_message) < 7:
return 0, 0, False, None, None
unl = raw_message[:2]
ml = raw_message[2:6]
command_flag = raw_message[6:7]
len_username = int.from_bytes(unl, 'big')
len_message = int.from_bytes(ml, 'big')
uname = raw_message[7:(7 + len_username)]
mess = raw_message[(7 + len_username):]
return len_username, len_message, bool(command_flag[0]), uname.decode('utf-8'), mess.decode('utf-8')
def serialize(self):
u_name_l_byte = struct.pack('>H', self.u_name_l)
message_l_byte = struct.pack('>I', self.message_l)
is_command = struct.pack('>?', self._is_command)
username_byte = str.encode(self.username)
message_byte = str.encode(self.message)
return u_name_l_byte + message_l_byte + is_command + username_byte + message_byte
| true |
e9b780fd796569838a3b3b9f9d112e5590638ee6 | Python | RobMor/LearningMachineLearning | /Linear Perceptron/main.py | UTF-8 | 2,111 | 3.578125 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from numpy import random, array
from perceptron import Perceptron
def check(a, b, inputs):
return int((a*inputs[0] + b) <= inputs[1])
def accuracy(results, correct):
return np.sum(results == correct) / results.size
def create_set(size, a, b, point_range):
points = list()
correct = list()
for i in range(0, size):
point = random.uniform(-point_range, point_range, 2)
points.append(point)
correct.append(check(a, b, point))
return array(points), array(correct)
def display(input, results, a, b, point_range):
x = np.array(range(-point_range, point_range))
y = a * x + b
y2 = a * x
axes = plt.gca()
axes.set_xlim([-point_range, point_range])
axes.set_ylim([-point_range, point_range])
above = input[np.where(results == 1)]
below = input[np.where(results == 0)]
plt.scatter(below[:, 0], below[:, 1], c='blue')
plt.scatter(above[:, 0], above[:, 1], c='orange')
plt.plot(x, y, c='black')
plt.plot(x, y2, c='red')
plt.show()
def error_display(errors):
plt.plot(errors)
plt.show()
if __name__ == "__main__":
# The goal is to create a perceptron that can identify if a point is above or below the line (a * x + b)
a_range = 5
b_range = 50
point_range = 100
a = random.randint(-a_range, a_range)
b = random.randint(-b_range, b_range)
print('a = ' + str(a))
print('b = ' + str(b))
num_iter = 5000
train_size = 1000
test_size = 500
train_input, train_correct = create_set(train_size, a, b, point_range)
test_input, test_correct = create_set(test_size, a, b, point_range)
p = Perceptron(num_iter)
errors = p.train(train_input, train_correct)
print('Weights = ' + str(p.weights))
print('Bias = ' + str(p.bias))
test_results = p.test(test_input)
print('Accuracy = ' + str(accuracy(test_results, test_correct)))
display(test_input, test_results, a, b, point_range)
plt.figure()
print('Training Errors: ' + str(errors))
# error_display(errors)
| true |
59774af00c6c9ec0a5832b6401c0e45eb1cc5e32 | Python | vito18/tstp | /tstp/Chaper6/Chapter6_challenge9.py | UTF-8 | 88 | 3.265625 | 3 | [] | no_license | a = " three"
b = a + a + a
c = b[1:]
print(c)
b = a * 3
c = b[1:]
print(c)
| true |
d89c4e5c8c69e51fe96966d2dbb0813d9b240c12 | Python | fhan90521/algorithm | /leetcode/leetcode-53.py | UTF-8 | 260 | 2.875 | 3 | [] | no_license | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
max_sum=-1000000
s=0
for i in nums:
s+=i
if(s>max_sum):
max_sum=s
if(s<0):
s=0
return max_sum | true |
847bac5cc8fb716a82ee8b0a9589ef08d3e7eb8f | Python | ultra1971/btbot | /btbot/trainer.py | UTF-8 | 939 | 2.75 | 3 | [
"MIT"
] | permissive | import numpy as np
class Trainer(object):
def __init__(self, feeder, labeler):
self.feeder = feeder
self.labeler = labeler
# Note that we store only feed not label
self.store_feed = []
def store(self):
feed = self.feeder.current_feed
if feed is not None and len(feed) > 0:
self.store_feed.append(feed)
def get_data(self, num_data=None, side=None):
if num_data is None:
num_data = self.num_data
num_data = min(self.num_data, num_data)
feeds = []
labels = []
for i in range(num_data):
label = self.labeler.get_label(-i, side)
if label is None:
continue
feeds.append(self.store_feed[-(i + 1)])
labels.append(label)
return np.array(feeds), np.array(labels).astype(int)
@property
def num_data(self):
return len(self.store_feed) | true |
4eec7dce66ee380c61c8e0c1b5b680a03b6fa4ad | Python | ccaniano15/inClassWork | /text.py | UTF-8 | 338 | 4.1875 | 4 | [] | no_license | shape = input("triangle or rectangle?")
if shape == "triangle":
width = int(input("what is the length?"))
height = int(input("what is the height?"))
print(width * height / 2)
elif shape == "rectangle":
width = int(input("what is the length?"))
height = int(input("what is the height?"))
print(width * height)
else:
print("error")
| true |
199beb8cdc341e60b1b9bc256f789da786077e7d | Python | BiancaStoecker/complex-similarity-evaluation | /scripts/write_unique_complexes.py | UTF-8 | 3,651 | 2.984375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from collections import defaultdict
from os.path import dirname
import os
import networkx as nx
""" Given a set of simulation runs and a threshold graph (output from Tills tool
gml2tg) for a arbitrary threshold and weight, generate one gml file with
networkx for each unique complex = each node in the threshold graph (Tills
tool contains an isomorphism check and all occurring nodes are unique complexes
from the input files).
Further write a list containing all filenames and some basic stats.
"""
def get_numbers_from_name(name):
""" Extract the filenumer and the graphnumber from the node name
"""
name = name.split("_")
file_number = int(name[3][:-4]) # -> remove .gml from number
graph_number = int(name[-1])
return(file_number, graph_number)
def parse_complexes(labels, path_input_graphs, prefix_for_output_gmls, output_file):
""" Parse the complexes for each label and write a single gml file as well
as some stats.
"""
filenames_to_numbers = defaultdict(list)
for l in labels:
filename = "_".join(l.split("_")[:4])
graph_number = int(l.split("_")[-1])
filenames_to_numbers[filename].append(graph_number)
output = open(output_file, "w")
for filename in filenames_to_numbers:
current_file = open(path_input_graphs+filename[:-4]+".nx.gml", "r") # .nx.gml because of duplication for renaming, see below
count = -1
lines = []
current_graphs = sorted(filenames_to_numbers[filename])
i = 0
current_graph = current_graphs[i]
for line in current_file:
if line.strip("\n") == "graph [":
count += 1
if count == current_graph:
lines.append(line)
else:
if lines != []:
graph = nx.parse_gml(lines)
path = prefix_for_output_gmls+"{}_{}".format(filename, current_graph)
nx.write_gml(graph, path+".nx.gml")
os.system("sed '/label/d' {0}.nx.gml | sed \"s/name/label/\" > {0}.gml".format(path))
proteinnames = sorted(list(nx.get_node_attributes(graph,'name').values()))
print("{}_{}".format(filename, current_graph), graph.number_of_nodes(), graph.number_of_edges(), proteinnames, sep="\t", file=output)
lines = []
i += 1
if i < len(current_graphs):
current_graph = current_graphs[i]
if count == current_graph:
lines.append(line)
else:
break
output.close()
if __name__ == "__main__":
path_threshold_graph = snakemake.input[0]
path_input_graphs = snakemake.params.input_graphs
prefix_for_output_gmls = dirname(snakemake.output[0])+"/"
output_file = snakemake.output[0]
threshold_graph = nx.read_gml(path_threshold_graph)
labels = threshold_graph.nodes(data=False) # current format output_0.005_2.5_7.gml_870 with filenumber 7 and graphnumber 870
parse_complexes(labels, path_input_graphs, prefix_for_output_gmls, output_file)
""" networkx does not accept multiple labels in gml format, so protein names
are stored in the attribute "name" and the label is a unique id. The
standard format demands them to be "label", so the following preprocessing
is required before the tools from Till and Nils can use the gml files:
for f in *.gml; do cp $f $f.bak; sed '/label/d' $f.bak | sed "s/name/label/" > $f; done
"""
| true |
e7f13f60dff0c6deddcd6bec08923ddc0f41a1ef | Python | Meowse/IntroToPython | /Students/imdavis/session04/mailroom/mailroom.py | UTF-8 | 1,657 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python2.7
from mailroomfunct import prompt1, whichdonor, newdonation, composemail, \
formattable, print_donor_row
# Hardcoded original group of donors and donation amounts as a dictionary
donors = {}
donors.update({ "Robert Plant" : [15.00, 25.32, 100.50] })
donors.update({ "Sandra Bullock" : [12.50, 2.25] })
donors.update({ "Richard D. James" : [1500.34, 2349.99] })
donors.update({ "Slash" : [1.00, 10.99] })
donors.update({ "Jessica Alba" : [13.49] })
# Initial greeting at startup
print "Welcome to Mailrooom, buddy!"
# Call the prompt function to ask the user what they would like to do
todo = ""
# keep looping with prompts until the user says to exit
while (todo != "exit"):
# send a thank you block
todo = prompt1()
if(todo == "send a thank you"):
# get the index of existing or new donor
donor = whichdonor(donors)
# get the donation amount
donation = newdonation(donor)
# add the new donation amount to the appropriate donor
donors[donor].append(donation)
# update the user of who the donor is and their updated donation history
print "Donor:", donor
print "Donation History:", donors[donor]
# compose the email message thanking the donor for their recent donation
composemail(donor, donation)
# create a report block
elif(todo == "create a report"):
# print a header for the report table
formattable("Donor Name", "Total Donations", "Number of Donations", "Average Donation ($)")
# print each row of the table for each donor
for donor, donations in donors.items():
print_donor_row(donor, donations)
| true |
dc45de8470297db643684bc23855b6e76d1d4d4c | Python | KevinTorres03/Codigos-fuente-C-y-Python | /Proyectos Python (1)/promedio.py | UTF-8 | 349 | 4.03125 | 4 | [] | no_license | print ("Vamos a hallar el promedio de 5 numeros")
n1 = int( input (" Ingrese el primer numero: "))
n2 = int( input (" Ingrese el segundo numero: "))
n3 = int( input (" Ingrese el tercer numero: "))
n4 = int( input (" Ingrese el cuarto numero: "))
n5 = int( input (" Ingrese el quinto numero: "))
print("El promedio es: " ,(n1+n2+n3+n4+n5/5))
| true |
e53f04309c2bd8170172e85a4b41ae884a92cc50 | Python | 1924zjy0835/D3 | /D3/utils/captcha/restful.py | UTF-8 | 1,032 | 2.53125 | 3 | [] | no_license | from django.http import JsonResponse
class httpCode(object):
ok = 200
paramserror = 400
unauth = 401
methoderror = 405
servererror = 500
def httpResult(code=httpCode.ok, message="", data=None, kwargs=None):
json_data = {"code": code, "message": message, "data": data}
# 判断是否传递了kwargs(其他的值,是否为字典类型,并且kwargs是否有值)
if kwargs and isinstance(kwargs, dict) and kwargs.keys():
json_data.update(kwargs)
return JsonResponse(json_data)
def ok():
return httpResult()
def params_error(message="", data=None):
return httpResult(code=httpCode.paramserror, message=message, data=data)
def unauth_error(message="", data=None):
return httpResult(code=httpCode.unauth, message=message, data=data)
def methoderror(message="", data=None):
return httpResult(code=httpCode.methoderror, message=message, data=data)
def servererror(message="", data=None):
return httpResult(code=httpCode.servererror, message=message, data=data) | true |
3b3a2b22000c7e57361a5f25124cecb785d2ea93 | Python | webclinic017/material-strategy | /EVENT_BAR_CANDIDATE_CHECK.py | UTF-8 | 5,557 | 2.875 | 3 | [] | no_license | import sys
import json
import logging
from pubsubKeys import PUBSUB_KEYS
from redisPubsub import RedisPublisher, RedisSubscriber
# StudyThreeBarsFilter
class StudyThreeBarsFilter:
_MinimumPriceJump = 0.2
#
# return a column in a array matrix
#
@staticmethod
def _column(matrix, i):
return [row[i] for row in matrix]
# In 3 bar play, it looks for a pattern like this.
# price = [2, 4, 3]. There is a sharp rise of price from 2 to 4.
# and it follows a drop to 3 (or 50% retrace). This pattern may happen
# across 3 or 4 bars. We are looking for that pattern between 3 prices passed in.
#
@staticmethod
def _isFirstTwoBars(price0, price1, price2):
if (price0 < 3) or (price0 > 20):
return False
first = price0 - price2
second = price1 - price2
if (abs(second) < StudyThreeBarsFilter._MinimumPriceJump):
return False
percentage = 0 if second == 0 else first / second
if percentage >= 0.3 and percentage < 0.7:
return True
return False
# This is the data format for the Stack.
@staticmethod
def barCandidate(firstPrice, secondPrice, timeframe, ts, op):
return {"indicator": "price",
"timeframe": timeframe,
"filter": [firstPrice, secondPrice],
"timestamp": ts,
"operation": op
}
# It looks for 3 bar patterns on 3 or 4 bars.
@staticmethod
def potentialList(symbol, prices, timeframe):
if len(prices) > 2 and StudyThreeBarsFilter._isFirstTwoBars(prices[0][1], prices[1][1], prices[2][1]):
return True, StudyThreeBarsFilter.barCandidate(prices[0][1], prices[1][1], timeframe, prices[0][0], 'ADD')
elif len(prices) > 3 and StudyThreeBarsFilter._isFirstTwoBars(prices[0][1], prices[2][1], prices[3][1]):
return True, StudyThreeBarsFilter.barCandidate(prices[0][1], prices[2][1], timeframe, prices[0][0], 'ADD')
else:
return False, StudyThreeBarsFilter.barCandidate(0, 0, timeframe, prices[0][0], 'DEL')
# else:
# return {'symbol': symbol, 'value': {
# 'firstPrice': 14.00,
# 'secondPrice': 15.00,
# 'thirdPrice': 14.52,
# }}
#
# This class filters the Acitve Bars (stocks that are moving)
# and filter out the stocks that meets the 3 bar criteria.
# It is saved to a redis hash table. It is named STUDYTHREEBARSTACK
# or just stack.
# It also manages subscribe/unsubscribe table for Alpaca Stream.
# We subscribe/unsubscribe to real time data stream for the
# real-time live data. We subscribe to the trade stream of the
# stocks taht are in the Stack
#
class StudyThreeBarsCandidates:
def __init__(self):
# StoreStack: class to access the redis Stack.
self.publisher = RedisPublisher(PUBSUB_KEYS.EVENT_BAR_STACK_ADD)
self.publisherTrade = RedisPublisher(PUBSUB_KEYS.EVENT_BAR_TRADE_ADD)
self.subscriber = RedisSubscriber(
PUBSUB_KEYS.EVENT_BAR_CANDIDATE_CHECK, None, self.filterCheck)
# return all symbols stored in the Stack (not used)
def getStacks(self):
self.stack.getAll()
def getPriceData(self, data):
result = []
for item in data:
item = (item['t'], item['c'])
result.append(item)
return result
def filterCheck(self, data):
try:
symbol = data['symbol']
logging.info(
f'EVENT_BAR_CANDIDATE_CHECK.StudyThreeBarsCandidates.filterCheck {symbol}')
timeframe = data['period']
prices = self.getPriceData(data['data'])
_, result = StudyThreeBarsFilter.potentialList(
symbol, prices, timeframe)
data['action'] = result
self.publisher.publish(data)
self.publisherTrade.publish(data)
print('done')
except Exception as e:
logging.warning(
f'Error EVENT_BAR_CANDIDATE_CHECK.StudyThreeBarsCandidates.filterCheck - {data} {e}')
def start(self):
try:
self.subscriber.start()
except KeyboardInterrupt:
self.subscriber.stop()
except Exception as e:
logging.warning(
f'Error EVENT_BAR_CANDIDATE_CHECK.StudyThreeBarsCandidates.start - {e}')
@staticmethod
def run():
logging.info('EVENT_BAR_CANDIDATE_CHECK.StudyThreeBarsCandidates.run')
app = StudyThreeBarsCandidates()
app.start()
if __name__ == "__main__":
app: StudyThreeBarsCandidates = None
args = sys.argv[1:]
if len(args) > 0 and (args[0] == "-t" or args[0] == "-table"):
data = {"type": "threebars", "symbol": "FANG", "period": "2Min",
"data": [
{"t": 1635369840, "c": 10.4, "o": 10.6,
"h": 10.8, "l": 10.15, "v": 2000.0},
{"t": 1635369960, "c": 10.6, "o": 10.6,
"h": 10.8, "l": 10.25, "v": 2000.0},
{"t": 1635370080, "c": 10.2, "o": 10.3,
"h": 10.5, "l": 10.05, "v": 2000.0},
{"t": 1635370200, "c": 10.7, "o": 10.1,
"h": 10.8, "l": 10.05, "v": 2000.0},
{"t": 1635370320, "c": 10.7, "o": 10.1,
"h": 10.8, "l": 10.05, "v": 2000.0}
]}
app = StudyThreeBarsCandidates()
app.filterCheck(data)
| true |
949aae8ec875121498b8454d6720befaa5f6b8fa | Python | peter-tang2015/cplusplus | /PetersPyProjects/PetersPyProjects/Dictionary/Test_DictionaryTrie.py | UTF-8 | 2,447 | 3.21875 | 3 | [] | no_license | import unittest
from DictionaryTrie import DictionaryTrie
class Test_DictionaryTrie(unittest.TestCase):
def __init__(self, methodName = 'runTest'):
super(Test_DictionaryTrie, self).__init__(methodName)
self.m_Dict = DictionaryTrie()
self.m_Dict.AddWord("apple")
self.m_Dict.AddWord("orange")
self.m_Dict.AddWord("pear")
self.m_Dict.AddWords(("banana", "melon", "grape", "blueberry", "blue"))
def test_Find(self):
testDict = self.m_Dict
self.assertTrue(testDict.FindWord("apple"))
self.assertFalse(testDict.FindWord("sdfa"))
pear = testDict.FindWordAndGetNode("pear")
self.assertIsNotNone(pear)
self.assertEqual(pear.GetValue(), "pear")
for idx in range(0, 26):
self.assertIsNone(pear.GetChildren()[ord('a')+idx])
blue = testDict.FindWordAndGetNode("blue")
self.assertIsNotNone(blue)
self.assertEqual(blue.GetValue(), "blue")
for idx in range(0, 26):
if (ord('a') + idx) == ord('b'):
self.assertIsNotNone(blue.GetChildren()[ord('a') + idx])
else:
self.assertIsNone(pear.GetChildren()[ord('a') + idx])
def test_Remove(self):
testDict = self.m_Dict
testDict.RemoveWord("apple")
self.assertFalse(testDict.FindWord("apple"))
testDict.AddWord("apple")
self.assertTrue(testDict.FindWord("apple"))
def test_Traverse(self):
testDict = self.m_Dict
result = testDict.Traverse()
self.assertEqual(result[0], "apple")
self.assertEqual(result[1], "banana")
self.assertEqual(result[2], "blue")
self.assertEqual(result[3], "blueberry")
self.assertEqual(result[4], "grape")
self.assertEqual(result[5], "melon")
self.assertEqual(result[6], "orange")
self.assertEqual(result[7], "pear")
self.assertEqual(len(result), 8)
def test_QueryPrefix(self):
testDict = self.m_Dict
result = testDict.QueryPrefix("app")
self.assertEqual(result[0], "apple")
self.assertEqual(len(result), 1)
result = testDict.QueryPrefix("adj")
self.assertIsNone(result)
result = testDict.QueryPrefix("blu")
self.assertEqual(result[0], "blue")
self.assertEqual(result[1], "blueberry")
if __name__ == '__main__':
unittest.main()
| true |
c87d0a2d149cfba72a5f0ab66a53bffd4a4b68ad | Python | plipp/Python-Coding-Dojos | /katas/XX-Primers/decorator_sample.py | UTF-8 | 2,916 | 4.28125 | 4 | [
"MIT"
] | permissive | # --------------- 1. Explicit Logging
def info(msg):
print("INFO - {}".format(msg))
# some business logic with logging
def do_something1(n):
info("do_something1 called with: n={}".format(n))
return n + 1
# --------------- 2 a) Logging with self-made decorator
def with_logging1(fun):
def wrapper(*args, **kwargs):
info("{} called with : {},{}".format(fun.__name__, args, kwargs))
return fun(*args, **kwargs)
return wrapper
# -- hand-made
def do_something2(n):
return n + 2
do_something2 = with_logging1(do_something2)
# -- with @
@with_logging1 # just a short way of saying: do_something3 = with_logging(do_something3)
def do_something3(n):
"""
some docstring for do_something3
:param n: number
:return: n + 3
"""
return n + 3
# --------------- 2 b) Logging with self-made decorator, complete
def with_logging2(fun):
def wrapper(*args, **kwargs):
info("{} called with : {},{}".format(fun.__name__, args, kwargs))
return fun(*args, **kwargs)
wrapper.__name__ = fun.__name__
wrapper.__doc__ = fun.__doc__
return wrapper
@with_logging2
def do_something4(n):
"""
some docstring for do_something4
:param n: number
:return: n + 4
"""
return n + 4
# --------------- 2 c) Logging with decorator and functools-support
from functools import wraps
def with_logging3(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
info("{} called with : {},{}".format(fun.__name__, args, kwargs))
return fun(*args, **kwargs)
return wrapper
@with_logging3
def do_something5(n):
"""
some docstring for do_something5
:param n: number
:return: n + 5
"""
return n + 5
# -------------------------------------------------------------------
if __name__ == '__main__':
print('{} 1. Explicit Logging\n'.format('-' * 20))
print(do_something1(4))
# print('{} 2 a) Logging with self-made decorator\n'.format('-' * 20))
#
# print(do_something2(4))
# print(do_something3(n=4))
#
# print("do_something3.__name__: {}".format(do_something3.__name__))
# print("do_something3.__doc__ : {}".format(do_something3.__doc__))
#
# print('{} 2 b) Logging with self-made decorator, complete\n'.format('-' * 20))
#
# print(do_something4(n=4))
# print("do_something4.__name__: {}".format(do_something4.__name__))
# print("do_something4.__doc__ : {}".format(do_something4.__doc__))
#
# print('{} 2 c) Logging with decorator and functools-support\n'.format('-' * 20))
#
# print(do_something5(4))
# print("do_something5.__name__: {}".format(do_something5.__name__))
# print("do_something5.__doc__ : {}".format(do_something5.__doc__))
# ... further reading
# - http://book.pythontips.com/en/testing/decorators.html
# - http://jamescooke.info/things-to-remember-about-decorators.html
| true |
bd4543d1b56578d953ab154e23b05cb58d5ff14f | Python | MITLLRacecar/racecar-daniel-chuang | /library/racecar_core.py | UTF-8 | 5,135 | 3.015625 | 3 | [
"MIT"
] | permissive | """
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Contains the Racecar class, the top level of the racecar_core library
"""
import abc
import sys
from typing import Callable, Optional
import camera
import controller
import display
import drive
import lidar
import physics
import racecar_utils as rc_utils
class Racecar(abc.ABC):
"""
The top level racecar module containing several submodules which interface
with and control the different pieces of the RACECAR hardware.
"""
def __init__(self) -> None:
self.camera: camera.Camera
self.controller: controller.Controller
self.display: display.Display
self.drive: drive.Drive
self.lidar: lidar.Lidar
self.physics: physics.Physics
@abc.abstractmethod
def go(self) -> None:
"""
Starts the RACECAR, beginning in default drive mode.
Note:
go idles blocks execution until the program is exited when START + END are
pressed simultaneously.
"""
pass
@abc.abstractmethod
def set_start_update(
self,
start: Callable[[], None],
update: Callable[[], None],
update_slow: Optional[Callable[[], None]] = None,
) -> None:
"""
Sets the start and update functions used in user program mode.
Args:
start: A function called once when the car enters user program mode.
update: A function called every frame in user program mode. Approximately
60 frames occur per second.
update_slow: A function called once per fixed time interval in user
program mode (by default once per second).
Note:
The provided functions should not take any parameters.
Example::
# Create a racecar object
rc = Racecar()
# Define a start function
def start():
print("This function is called once")
# Define an update function
def update():
print("This function is called every frame")
# Provide the racecar with the start and update functions
rc.set_start_update(start, update)
# Tell the racecar to run until the program is exited
rc.go()
"""
pass
@abc.abstractmethod
def get_delta_time(self) -> float:
"""
Returns the number of seconds elapsed in the previous frame.
Returns:
The number of seconds between the start of the previous frame and
the start of the current frame.
Example::
# Increases counter by the number of seconds elapsed in the previous frame
counter += rc.get_delta_time()
"""
pass
@abc.abstractmethod
def set_update_slow_time(self, time: float = 1.0) -> None:
"""
Changes the time between calls to update_slow.
Args:
time: The time in seconds between calls to update_slow.
Example::
# Sets the time between calls to update_slow to 2 seconds
rc.set_update_slow_time(2)
"""
pass
def create_racecar(isSimulation: Optional[bool] = None) -> Racecar:
"""
Generates a racecar object based on the isSimulation argument or execution flags.
Args:
isSimulation: If True, create a RacecarSim, if False, create a RacecarReal,
if None, decide based on the command line arguments
Returns:
A RacecarSim object (for use with the Unity simulation) or a RacecarReal object
(for use on the physical car).
Note:
If isSimulation is None, this function will return a RacecarSim if the program
was executed with the "-s" flag and a RacecarReal otherwise.
If the program was executed with the "-d" flag, a display window is created.
If the program was executed with the "-h" flag, it is run in headless mode,
which disables the display module.
"""
library_path: str = __file__.replace("racecar_core.py", "")
isHeadless: bool = "-h" in sys.argv
initializeDisplay: bool = "-d" in sys.argv
# If isSimulation was not specified, set it to True if the user ran the program with
# the -s flag and false otherwise
if isSimulation is None:
isSimulation = "-s" in sys.argv
racecar: Racecar
if isSimulation:
sys.path.insert(1, library_path + "simulation")
from racecar_core_sim import RacecarSim
racecar = RacecarSim(isHeadless)
else:
sys.path.insert(1, library_path + "real")
from racecar_core_real import RacecarReal
racecar = RacecarReal(isHeadless)
if initializeDisplay:
racecar.display.create_window()
rc_utils.print_colored(
">> Racecar created with the following options:"
+ f"\n Simulation (-s): [{isSimulation}]"
+ f"\n Headless (-h): [{isHeadless}]"
+ f"\n Initialize with display (-d): [{initializeDisplay}]",
rc_utils.TerminalColor.pink,
)
return racecar
| true |
0eecaa1357ff9dc01369a64ab2b1c123fd4a7e6a | Python | Lloyd-Pottiger/Influence-Maximization-Problem | /IMP.py | UTF-8 | 4,399 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import multiprocessing as mp
import time
import sys
import argparse
import os
import numpy as np
from numpy import random
import math
random.seed(int(time.time()))
Comb = lambda x, y: math.factorial(x) // (math.factorial(y) * math.factorial(x - y))
core = 8
class Graph(object):
def __init__(self, m, n):
self.vertex = m
self.edge = n
self.p = []
self.post_edge = [list() for i in range(m + 1)]
def insert(self, a, b, w):
self.post_edge[b].append((a,w))
def post_to(self, b):
return self.post_edge[b]
def read_graph(file):
with open(file) as f:
m, n = map(int, f.readline().split())
g = Graph(m, n)
for i in range(n):
a, b, w = map(float, f.readline().split())
g.insert(int(a), int(b), w)
return g
def IMM(g, k, e, l):
n = g.vertex
l = l * (1 + math.log(2) / math.log(n))
R = Sampling(g, k, e, l)
S_k_star = NodeSelection(R, k)[0]
return S_k_star
def Sampling(G: Graph, k, e, l):
R = list()
LB = 1
e_ = math.sqrt(2) * e
n = G.vertex
alpha = math.sqrt(l * math.log(n) + math.log(2))
beta = math.sqrt((1 - 1 / math.e) * (math.log(Comb(n, k)) + l * math.log(n) + math.log(2)))
lambda_ = (2 + 2 * e_ / 3) * (math.log(Comb(n, k)) + l * math.log(n) + math.log(math.log2(n))) * n / (pow(e_, 2))
for i in range(1, int(math.log2(n))):
x = n / pow(2, i)
theta = lambda_ / x
cnt = (theta - len(R)) // core
R = creat_mp(R, cnt)
F_R = NodeSelection(R, k)[1]
if n * F_R >= (1 + e_) * x:
LB = n * F_R / (1 + e_)
break
lambda_star = 2 * n * pow((1 - 1 / math.e) * alpha + beta, 2) * pow(e, -2)
theta = lambda_star / LB
cnt = theta - len(R)
if cnt > 0:
R = creat_mp(R, cnt)
return R
def creat_mp(R, cnt):
pool = mp.Pool(core)
result = []
for i in range(core):
result.append(pool.apply_async(get_RR, args=(G, cnt)))
pool.close()
pool.join()
for res in result:
R.extend(res.get())
return R
def NodeSelection(R, k):
S = set()
rr_dict = {}
R_S_k = set()
cnt = [0 for i in range(G.vertex + 1)]
for i in range (0, len(R)):
rr = R[i]
for u in rr:
if u not in rr_dict:
rr_dict[u] = set()
rr_dict[u].add(i)
cnt[u] += 1
for i in range(k):
v = cnt.index(max(cnt))
S.add(v)
R_S_k = R_S_k.union(rr_dict[v])
cur_dict = rr_dict[v].copy()
for d in cur_dict:
for n in R[d]:
cnt[n] -= 1
return S, len(R_S_k)/len(R)
def get_RR(G, cnt):
RR = []
while cnt > 0:
n = G.vertex
v = random.randint(1, n)
all_activity_set = [v]
activity_set = [v]
while activity_set:
new_activity_set = []
for u in activity_set:
for (v, w) in G.post_to(u):
if v not in all_activity_set:
if random.random() <= w:
new_activity_set.append(v)
all_activity_set.append(v)
activity_set = new_activity_set
RR.append(all_activity_set)
cnt -= 1
return RR
if __name__ == '__main__':
'''
从命令行读参数
'''
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--file_name', type=str, default='Test/network.txt')
parser.add_argument('-k', '--seedCount', type=int, default=5)
parser.add_argument('-m', '--model', type=str, default='IC')
parser.add_argument('-t', '--time_limit', type=int, default=60)
args = parser.parse_args()
file_name = args.file_name
k = args.seedCount
model = args.model
time_limit = args.time_limit
G = read_graph(file_name)
l = 1
e = math.sqrt((G.vertex + G.edge) * (k + l) * math.log(G.vertex)/(5e8 * time_limit))
if G.vertex < 500 and k < 10:
if e < 0.01:
e = 0.01
else:
if e < 0.08:
e = 0.08
elif e < 0.1:
e = 0.1
S_k_star = IMM(G, k, e, l)
for seed in S_k_star:
print(seed)
'''
程序结束后强制退出,跳过垃圾回收时间, 如果没有这个操作会额外需要几秒程序才能完全退出
'''
sys.stdout.flush()
| true |
537a2a502f80841382add998a06bf158678de25d | Python | ntabris/princeton-theses | /get_data.py | UTF-8 | 2,231 | 2.828125 | 3 | [] | no_license | import os
import re
import operator
import json
import urllib.request
import pandas as pd
import numpy as np
def get_data(filename,url):
j = ''
try:
os.makedirs('data')
except:
pass
try:
f = open('data/%s.json'%filename,'r')
j = f.read()
f.close()
print("Using saved %s." % filename)
except:
req = urllib.request.Request(url,headers={'Accept': 'application/json'})
res = urllib.request.urlopen(req)
j = res.read().decode('utf-8')
f = open('data/%s.json'%filename,'w')
f.write(j)
f.close()
print("Downloaded %s and saved for future use." % filename)
return j
def item_process(json_string,id=''):
json_struct = json.loads(json_string)
data = dict()
key_list = ("dc.contributor.advisor","dc.contributor.author","dc.date.created","dc.format.extent","dc.title","pu.date.classyear")
if id:
data['id'] = id
for key in key_list:
key_trunc = key.split('.')[-1]
data[key_trunc] = None
for item in json_struct:
key = item['key']
key_trunc = key.split('.')[-1]
val = item['value']
if key_trunc == 'extent':
# remove non-numeric (" pages") from extent
val = re.sub(r'[^\d]','',val)
if key in key_list:
data[key_trunc] = val
return data
def get_list():
url = 'https://dataspace.princeton.edu/rest/collections/395/items?limit=3000'
filename = 'list'
return get_data(filename,url)
if __name__ == '__main__':
j = get_list()
print("data size:",len(j))
list_json = json.loads(j)
ids = [ i['id'] for i in list_json ]
print('item count:',len(ids))
data = dict()
for id in ids:
u = 'https://dataspace.princeton.edu/rest/items/%s/metadata' % id
n = 'item_%s'%id
json_string = get_data(n,u)
data[id] = item_process(json_string,id)
data[id]['id'] = id
df = pd.DataFrame( data ).transpose()
df['classyear'] = pd.to_numeric( df['classyear'] )
df['extent'] = pd.to_numeric( df['extent'] )
df.to_csv('data/senior_theses.csv') | true |
495bc8b28cd857e753ac0f6a0a6deff6a3949439 | Python | tiagocoutinho/tc-python | /tcp_bridge.py | UTF-8 | 2,570 | 2.703125 | 3 | [
"MIT"
] | permissive | """
requirements:
$ pip install click
run with:
$ python tcp_proxy --listen=:5000 --connect=192.168.1.100:5000
"""
import socket
import select
import logging
import click
def address(addr):
host, port = addr.split(':', 1)
return host, int(port)
@click.command()
@click.option('--listen', default=('', 5000), type=address)
@click.option('--connect', type=address, help='ex: 192.168.1.100:5000')
@click.option('--log-level', default='INFO')
def main(listen, connect, log_level):
logging.basicConfig(
level=log_level, format="%(asctime)s:%(levelname)s:%(message)s"
)
serv = socket.socket()
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind(listen)
serv.listen(1)
hardware = socket.create_connection(connect)
socks = [serv, hardware]
client = None
try:
while True:
conns = socks + ([client] if client else [])
readers, _, _ = select.select(conns, (), ())
for reader in readers:
if reader == serv:
sock, addr = reader.accept()
logging.info('NEW connection from %r', addr)
if client:
# don't accept more than one client (unpolite solution)
logging.info('REFUSE connection from %r', addr)
sock.close()
else:
client = sock
elif reader is hardware:
data = reader.recv(2**12)
if data:
logging.info('HW -> CLIENT: %r', data)
if client:
client.sendall(data)
else:
logging.warning('Dropped HW -> CLIENT: %r', data)
else:
logging.error('Hardware disconnected!')
# we don't support reconnection, so just bail out
exit(1)
else: # must be a client socket
assert reader is client
data = client.recv(2**12)
if data:
logging.info('CLIENT -> HW: %r', data)
hardware.sendall(data)
else:
logging.info('CLIENT disconnected')
client = None
finally:
for sock in socks:
if sock:
print('close', sock)
sock.close()
if __name__ == "__main__":
main()
| true |
82fd92479afe9e5f2a8143d40225fa1cdad1d797 | Python | ericmanzi/underactuated_robotics_lqr | /minipset-underactuated-robotics/source/ps1/simulate.py | UTF-8 | 3,030 | 2.90625 | 3 | [] | no_license | import os
from math import sin, cos, pi
import matplotlib.pyplot as plt
import pendulum
from IPython.display import display, HTML, clear_output, display_html, Javascript
# --- Simulation parameters ---
dt = .001
end = 10
fps = 25.
# --- Initial conditions ---
x_0 = 0.
dx_0 = 0.
th_0 = 2*pi/8
dth_0 = 0.
def simulate(x_0, dx_0, th_0, dth_0, K, sim_name, use_swing_up):
p = pendulum.Pendulum(dt, [x_0, dx_0, th_0, dth_0], end, K, use_swing_up)
data = p.integrate()
# print(data[len(data)-1])
fig = plt.figure(0)
fig.suptitle("Cart Pole")
cart_time_line = plt.subplot2grid(
(12, 12),
(9, 0),
colspan=12,
rowspan=3
)
cart_plot = plt.subplot2grid(
(12,12),
(0,0),
rowspan=8,
colspan=12
)
cart_time_line.axis([
0,
10,
min(data[:,1])*1.1,
max(data[:,1])*1.1+.1,
])
cart_time_line.set_xlabel('time (s)')
cart_time_line.set_ylabel('x (m)')
cart_time_line.plot(data[:,0], data[:,1],'r-')
pendulum_time_line = cart_time_line.twinx()
pendulum_time_line.axis([
0,
10,
min(data[:,3])*1.1-.1,
max(data[:,3])*1.1
])
pendulum_time_line.set_ylabel('theta (rad)')
pendulum_time_line.plot(data[:,0], data[:,3],'g-')
cart_plot.axes.get_yaxis().set_visible(False)
vid_path = './media/pendulum_anim_%s.mp4' % sim_name
# Create image output directory if it doesn't exist
os.system("rm -rf img/")
try:
os.makedirs('./img')
except OSError:
pass
# Delete pendulum output video if it already exists
try:
os.remove(vid_path)
except OSError:
pass
time_bar, = cart_time_line.plot([0,0], [10, -10], lw=3)
t = 0
frame_number = 1
for point in data:
if point[0] >= t + 1./fps or not t:
draw_point(point, t, cart_time_line, cart_plot, time_bar)
t = point[0]
fig.savefig('img/_tmp%03d.png' % frame_number)
frame_number += 1
os.system("ffmpeg -framerate 25 -i img/_tmp%03d.png -c:v libx264 -r 30 -pix_fmt yuv420p " + vid_path)
return data[len(data)-1]
def draw_point(point, t, cart_time_line, cart_plot, time_bar):
time_bar.set_xdata([t, t])
cart_plot.cla()
cart_plot.axis([-1.1,1.1,-.8,.8])
# Cart
cart_plot.plot([point[1]-.1,point[1]+.1],[0,0],'r-',lw=15)
# Wheels
wc='#4e4a4a'
cart_plot.scatter(point[1]-0.1, -0.13, s=150, facecolors=wc, edgecolors=wc)
cart_plot.scatter(point[1]+0.1, -0.13, s=150, facecolors=wc, edgecolors=wc)
# Floor
cart_plot.plot([-1.1,1.1],[-0.215,-0.215],color='lightsteelblue',lw=5)
# Pole
cart_plot.plot([point[1],point[1]+.4*sin(point[3])],[0,.4*cos(point[3])],'g-', lw=4)
from numpy import matrix
# simulate(x_0, dx_0, 5*pi/16, dth_0, "boa_theta_5-16pi") # Fails
simulate(x_0, dx_0, 2*pi/8, dth_0, matrix([[ -1., -2., -40., -7.]]), "test_plot", True)
# simulate(x_0, 0.0, 1., dth_0, "freefall")
| true |
68ffd18f2c1534401db14f3faea09fa9be136fcf | Python | parkjh4550/PyTorch | /RNN/RNN_sentence_generation/utils.py | UTF-8 | 2,507 | 2.765625 | 3 | [] | no_license | import string
import torch
from torch import nn, optim
from statistics import mean
import tqdm
def build_vocab():
# build all printable ASCII characters
all_chars = string.printable
vocab_size = len(all_chars)
vocab_dict = dict((c,i) for (i, c) in enumerate(all_chars))
return all_chars, vocab_size, vocab_dict
def str2ints(s, vocab_dict):
# string -> int list
return [vocab_dict[c] for c in s]
def ints2str(x, vocab_array):
# int list -> string
return "".join([vocab_array[i] for i in x])
def generate_seq(net, text_dataset, start_phrase='The King said ',
length=200, temperature=0.8, device='cpu'):
net.to(device)
net.eval()
result = [] # save output
# string -> tensor
start_tensor = torch.tensor(
str2ints(start_phrase, text_dataset.vocab_dict),
dtype=torch.int64
).to(device)
# attach a batch size dim
x0 = start_tensor.unsqueeze(0)
# model prediction
o, h = net(x0)
print('output shape : ', o.shape)
# output -> probability
print('o[:,-1] shape: ', o[:,-1].shape)
print('o[:,-1].view(-1) shape : ', o[:,-1].view(-1).shape)
out_dist = o[:,-1].view(-1).exp()
top_i = torch.multinomial(out_dist, 1)[0]
for i in range(length):
inp = torch.tensor([[top_i]], dtype=torch.int64)
inp = inp.to(device)
o, h = net(inp, h)
out_dist = o.view(-1).exp()
top_i = torch.multinomial(out_dist, 1)[0]
result.append(top_i)
return start_phrase + ints2str(result, text_dataset.char_arr)
def train_net(net, data_loader, dataset, n_iter=10, optimizer=optim.Adam, loss_f=nn.CrossEntropyLoss(), device='cpu'):
#net.to(device)
#net.cuda()
optim = optimizer(net.parameters())
for epoch in range(n_iter):
#net = net.to(device)
net.train()
losses = []
for data in tqdm.tqdm(data_loader):
x = data[:, :-1]
y = data[:, 1:]
x, y = x.to(device), y.to(device)
y_pred, _ = net(x)
loss = loss_f(y_pred.view(-1, dataset.vocab_size), y.view(-1))
net.zero_grad()
loss.backward()
optim.step()
losses.append(loss.item())
print(epoch, mean(losses))
#print(generate_seq(net, dataset, device))
with torch.no_grad():
print(generate_seq(net, dataset, device)) | true |
ea54d18c6fc78fe46010ff7839bb0e8b583e768c | Python | JakeJaeHyongKim/I210 | /0210gp2.py | UTF-8 | 139 | 3.484375 | 3 | [] | no_license | def print_range(low, high, factor):
for number in range(low, high):
if num%factor==0:
print(num, divided from
| true |
a22e32286b703ee89cfd21bd70abdbab8b47e5fe | Python | cfm25/Geog489 | /scripttool_.py | UTF-8 | 5,128 | 2.640625 | 3 | [] | no_license | # Filename: scripttool.py
# Author: Charles Moser
# Source: Adapted from Lesson 1, Geography 489, Penn State
# Date: 2/27/2019
# Input 1: Environment (geodatabase)
# Input 2: polygon feature class for clipping
# Input 3: feature class(s) to be clipped
# Output: path to output folder
# Use: Input polygon feature class to clip multiple feature classes
# This file imports the "worker" function from the "muticode.py"
# Python script which calls the Clip tool (data management).
# Finally, the clipped features are added to an open project map.
import os, sys
import arcpy
import multiprocessing
from multicode_ import worker
from mapping import add_layers
import glob
import time
start_time = time.time()
# Input parameters
arcpy.env.workspace = arcpy.GetParameterAsText(0)
Path = arcpy.env.workspace
# Feature Class that will serve as clip feature
clipper = arcpy.GetParameterAsText(1)
# Target feature class(s) that will be clipped
tobeclipped = arcpy.GetParameterAsText(2)
# Folder for saving clipped shapefiles
outFolder = arcpy.GetParameterAsText(3)
# List of target feature classes
clipList = tobeclipped.split(';')
def get_install_path():
''' Return 64bit python install path from registry (if installed and registered),
otherwise fall back to current 32bit process install path.
'''
if sys.maxsize > 2**32: return sys.exec_prefix #We're running in a 64bit process
#We're 32 bit so see if there's a 64bit install
path = r'SOFTWARE\Python\PythonCore\2.7'
from _winreg import OpenKey, QueryValue
from _winreg import HKEY_LOCAL_MACHINE, KEY_READ, KEY_WOW64_64KEY
try:
with OpenKey(HKEY_LOCAL_MACHINE, path, 0, KEY_READ | KEY_WOW64_64KEY) as key:
return QueryValue(key, "InstallPath").strip(os.sep) #We have a 64bit install, so return that.
except: return sys.exec_prefix #No 64bit, so return 32bit path
def mp_handler():
for item in clipList:
print("here is the list: " + item)
try:
# Create a list of object IDs for clipper polygons
arcpy.AddMessage("Creating Polygon OID list...")
print("Creating Polygon OID list...")
clipperDescObj = arcpy.Describe(clipper)
field = clipperDescObj.OIDFieldName
idList = []
with arcpy.da.SearchCursor(clipper, [field]) as cursor:
for row in cursor:
id = row[0]
idList.append(id)
arcpy.AddMessage("There are " + str(len(idList)) + " object IDs (polygons) to process.")
print("There are " + str(len(idList)) + " object IDs (polygons) to process.")
# Create a task list with parameter tuples for each call of the worker function. Tuples consist of the clippper, tobeclipped, field, and oid values.
jobs = []
for item in clipList:
tobeclipped = Path + "\\" + item
for id in idList:
jobs.append((clipper,tobeclipped,field,id, outFolder)) # adds tuples of the parameters that need to be given to the worker function to the jobs list
arcpy.AddMessage("Job list has " + str(len(jobs)) + " elements.")
print("Job list has " + str(len(jobs)) + " elements.")
# Create and run multiprocessing pool.
multiprocessing.set_executable(os.path.join(get_install_path(), 'pythonw.exe')) # make sure Python environment is used for running processes, even when this is run as a script tool
arcpy.AddMessage("Sending to pool")
print("Sending to pool")
cpuNum = multiprocessing.cpu_count() # determine number of cores to use
print("there are: " + str(cpuNum) + " cpu cores on this machine")
with multiprocessing.Pool(processes=cpuNum) as pool: # Create the pool object
res = pool.starmap(worker, jobs) # run jobs in job list; res is a list with return values of the worker function
# If an error has occurred report it
failed = res.count(False) # count how many times False appears in the list with the return values
if failed > 0:
arcpy.AddError("{} workers failed!".format(failed))
print("{} workers failed!".format(failed))
arcpy.AddMessage("Finished multiprocessing!")
except arcpy.ExecuteError:
# Geoprocessor threw an error
arcpy.AddError(arcpy.GetMessages(2))
print("Execute Error:", arcpy.ExecuteError)
except Exception as e:
# Capture all other errors
arcpy.AddError(str(e))
print("Exception:", e)
# Get list of shapefiles in the output folder
list_layers = glob.glob(outFolder + "\\" + "*.shp")
# Call the function to add clipped shapefiles to open project
add_layers(outFolder, list_layers)
# Print out total processing time
arcpy.AddMessage("--- %s seconds ---" % (time.time() - start_time))
if __name__ == '__main__':
mp_handler()
| true |
e25128996fd74472eeae208cdc84583dc1afca51 | Python | ecaruyer/qspace | /qspace/bases/utils.py | UTF-8 | 226 | 3.3125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] | permissive | from scipy.misc import factorial
def binomial(alpha, k):
"Returns the (generalized) binomial coefficient"
result = 1.0
for i in range(k):
result = result * (alpha - k)
return result / factorial(k)
| true |
12d9247e95b538b081294230d6c982e7b3a5d13e | Python | billyfung/tensorflow | /helloworld.py | UTF-8 | 764 | 2.703125 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
#on core2duo laptop, set cores to 2
sess = tf.Session( config=tf.ConfigProto(inter_op_parallelism_threads=2,
intra_op_parallelism_threads=2))
hello = tf.constant('Hello World')
print sess.run(hello)
with sess:
input1 = tf.constant(1, shape = [4])
input2 = tf.constant(2, shape = [4])
output = (input1 + input2)
result = output.eval()
print result
input_features = tf.constant(np.reshape([1, 0, 0, 1], (1,4)).astype(np.float32))
weights = tf.constant(np.random.randn(4,2).astype(np.float32))
output = tf.matmul(input_features, weights) #matrix multiplication
print "Input:"
print input_features.eval()
print "Weights:"
print weights.eval()
print "Output"
print output.eval()
| true |
e93a7a52b4cb3d3130ddbc846c375a8be1416635 | Python | yukiar/phrase_alignment_cted | /src/wordvec_based_phrase_sim.py | UTF-8 | 1,133 | 2.640625 | 3 | [
"MIT"
] | permissive | from phrase_sim import PhraseSim
from gensim.models.fasttext import FastText as FT_gensim
import numpy as np
class wordvec_sim(PhraseSim):
MAXPOOLING = 0
MEANPOOLING = 1
model_name = 'FastText'
def __init__(self, pooling, path_to_fasttext_model):
############ Hyper Paramer #############
self.NULL_SCORE = np.abs(1 - 0.5) * 500
self.set_pooling(pooling)
########################################
self.model = FT_gensim.load_fasttext_format(path_to_fasttext_model)
def align_score(self, n, m, sent_idx):
n_vec = self._get_vec(n)
m_vec = self._get_vec(m)
cos_sim = self._scaled_cossim(n_vec, m_vec)
return cos_sim
def null_align_score(self, n):
return self.NULL_SCORE
def _get_vec(self, node):
vecs = np.zeros((len(node.tokens), self.model.vector_size))
for i, w in enumerate(node.tokens):
vecs[i] = self.model[w]
if self.pooling_method == self.MAXPOOLING:
pooled_vec = vecs.max(axis=0)
else:
pooled_vec = vecs.mean(axis=0)
return pooled_vec
| true |
4fcc55576d0ea8070a549114db95210738f6a281 | Python | Aasthaengg/IBMdataset | /Python_codes/p03626/s537991347.py | UTF-8 | 265 | 2.8125 | 3 | [] | no_license | N = int(input())
DS = list(input())
i = 0
DC = []
while i<N:
cnt = DS.count(DS[i])
DC.append(cnt)
i += cnt
ans = 3 * DC[0]
for i in range(1,len(DC)):
if DC[i-1]==1:
ans = (ans * 2)% 1000000007
elif DC[i]==2:
ans = (ans * 3)% 1000000007
print(ans) | true |
c0ae7168d7cb3a86fd749eb9fe527806ff0c975a | Python | fdmxfarhan/Atwork | /python/Desk.py | UTF-8 | 449 | 3.28125 | 3 | [] | no_license | import pygame
class Desk():
def __init__(self, x, y, direction, index):
self.index = index
self.x = x
self.y = y
self.direction = direction
def show(self, display):
pygame.draw.rect(display, (0,0,200),(self.x - 10, self.y - 10, 20, 20))
font = pygame.font.SysFont("serif", 16)
text = font.render(str(self.index), True, (255,255,255))
display.blit(text, (self.x-4, self.y-10))
| true |
c15766701e9bae90524266009ca68d2acd2ed4e3 | Python | kim3163/kimjoon.github | /TextMining/bin/word2VecEng.py | UTF-8 | 1,055 | 2.71875 | 3 | [
"MIT"
] | permissive | import nltk
nltk.download('movie_reviews')
import os
import sys
import io
from nltk.corpus import movie_reviews
sentences = [list(s) for s in movie_reviews.sents()]
from gensim.models.word2vec import Word2Vec
model = Word2Vec(sentences)
class Word2VecModule():
def __init__(self):
pass
def run(self, line):
module = os.path.basename(sys.argv[0])
listB = line.split(",")
resList = []
for x in listB[:-1]:
print (x)
y = x.rstrip()
resList.append(model.most_similar(y))
print(resList)
inputString = sys.argv[1]
filename = '../workd2vecFile/res_%s' % inputString
fout = open(filename, 'w')
for t in enumerate(resList):
a = 'index : {} value: {}'.format(*t)
print(a)
fout.write(a)
def main():
f = open(sys.argv[1], "r")
line = f.readline()
wv = Word2VecModule()
wv.run(line)
if __name__ == '__main__':
try:
main()
except ValueError:
print(ValueError)
| true |
7557fabdd9c290acd4e9aec34d275e411d303f9a | Python | j1o1h1n/despatches | /despatches/simple_client.py | UTF-8 | 929 | 2.75 | 3 | [] | no_license | """
Example memfd_create(2) client application.
"""
import mmap
import socket
import struct
LOCAL_SOCKET_NAME = "./unix_socket"
def recv_fd(sock):
'''
Receive a single file descriptor
'''
msg, ancdata, flags, addr = sock.recvmsg(1,
socket.CMSG_LEN(struct.calcsize('i')))
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
assert cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS
return struct.unpack('i', cmsg_data)[0]
def connect_to_server_and_get_memfd_fd():
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(LOCAL_SOCKET_NAME)
return recv_fd(sock)
def main():
shm_size = 1024
fd = connect_to_server_and_get_memfd_fd()
shm = mmap.mmap(fd, shm_size, flags=mmap.MAP_PRIVATE, prot=mmap.PROT_READ)
idx = shm.find(b'\0')
print("Message: %s\n" % (shm[:idx]));
if __name__ == "__main__":
main()
| true |
e0d803aac2c817412e919109ffe8861dda84b0f8 | Python | Aasthaengg/IBMdataset | /Python_codes/p02409/s222993531.py | UTF-8 | 756 | 3.25 | 3 | [] | no_license | def main():
n = int(input())
n_b = 4
n_f = 3
n_r = 10
from collections import defaultdict
state = defaultdict(int)
for _ in range(n):
# b?£?f??????r???????????¨?±?
# v??????????????§??\?±????????????¨???????????????
# v?????????????????´??????v????????????????????¨??????????????????
b, f, r, v = map(int, input().split())
state[(b, f, r)] += v
for b in range(1, n_b + 1):
for f in range(1, n_f + 1):
room_state = []
for r in range(1, n_r + 1):
room_state.append(state[(b, f, r)])
print(" " + " ".join(map(str, room_state)))
if b != n_b:
print("#" * 20)
if __name__ == "__main__":
main() | true |
2adf716914dc80f7bd486236852c7b7ae2134062 | Python | liborutgers12/audiospeech | /utils.py | UTF-8 | 2,201 | 2.9375 | 3 | [
"BSD-2-Clause"
] | permissive | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pyaudio
from scipy.io import wavfile
import preprocessing
def plotWaveform(audio, samplingFreq, ax=plt.figure().add_subplot(111)):
'''Plot the audio waveform in time domain.'''
x_values = np.arange(0, audio.shape[0], 1) / samplingFreq
x_values = x_values * 1000
ax.plot(x_values, audio, 'k')
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Amplitude')
ax.set_title('Audio signal')
plt.draw()
def plotWaveforms(audio, samplingFreq, fig=plt.figure()):
'''Plot the waveforms of in time domain.'''
if audio.ndim == 1:
plotWaveform(audio,samplingFreq, ax=fig.add_subplot(111))
elif audio.ndim == 2:
ax = fig.add_subplot(211)
plotWaveform(audio[:,0],samplingFreq,ax)
ax = fig.add_subplot(212)
plotWaveform(audio[:,1],samplingFreq,ax)
plt.show()
def plotPowerSpectrum(audio, samplingFreq):
'''Plot the audio power spectrum.'''
preprocessing.powerSpectrum(audio, samplingFreq, plotEnabled=True)
def plotSpectrogram(audio, samplingFreq):
'''Plot the audio spectrogram.'''
preprocessing.audioSpectrogram(audio, samplingFreq, plotEnabled=True)
def playBack(audio, samplingFreq):
'''Play back audio from numpy array, based on pyaudio'''
audio = audio.astype(np.float32)
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
# open stream (2)
stream = p.open(format=p.get_format_from_width(audio.dtype.itemsize),
channels=audio.ndim,
rate=samplingFreq,
output=True)
# play stream (3)
stream.write(audio.tostring())
# stop stream (4)
stream.stop_stream()
stream.close()
# close PyAudio (5)
p.terminate()
def playBackFile(fileName):
'''Play back audio file along with summary information, based on pyaudio'''
samplingFreq, audio = wavfile.read(fileName, 'rb')
audio = audio / np.amax(np.abs(audio))
audio = audio.astype(np.float32)
print('=====Audio information=====')
print('Shape:', audio.shape)
print('Datatype:', audio.dtype)
print('SamplingFrequency:', samplingFreq)
print('Duration:', round(audio.shape[0] / samplingFreq, 3), 'seconds')
playBack(audio, samplingFreq)
| true |
afab2fe8f815cda591267691c0667a75a6182296 | Python | Hrishikeshbele/Competitive-Programming_Python | /Leaf-Similar Trees.py | UTF-8 | 767 | 3.59375 | 4 | [] | no_license | '''
Two binary trees are considered leaf-similar if their leaf value sequence is the same.
Return true if and only if the two given trees with head nodes root1 and root2 are leaf-similar.
Input: root1 = [1,2], root2 = [2,2]
Output: true
approach : we find root nodes of both tree and compare them
'''
class Solution(object):
def leafSimilar(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
def leafs(root):
if root:
if not root.left and not root.right:
return [root.val]
return leafs(root.left)+leafs(root.right)
else:
return []
return leafs(root1)==leafs(root2)
| true |
67ae4c62b8f3f151e4d2aad147e966f11d9f1eea | Python | teju85/programming | /rosalind/SIMS.py | UTF-8 | 1,644 | 2.765625 | 3 | [] | no_license | import sys
from common import readFasta
def backtrack(mat, s, t, i, j):
if i <= 0 or j <= 0:
return ('', '')
if s[i-1] == t[j-1]:
(s1, t1) = backtrack(mat, s, t, i-1, j-1)
s1 += s[i-1]
t1 += t[j-1]
elif mat[i][j-1] > mat[i-1][j] and mat[i][j-1] > mat[i-1][j-1]:
(s1, t1) = backtrack(mat, s, t, i, j-1)
s1 += '-'
t1 += t[j-1]
elif mat[i-1][j] > mat[i-1][j-1]:
(s1, t1) = backtrack(mat, s, t, i-1, j)
s1 += s[i-1]
t1 += '-'
else:
(s1, t1) = backtrack(mat, s, t, i-1, j-1)
s1 += s[i-1]
t1 += t[j-1]
return (s1, t1)
def fittingAlignment(s, t):
ls = len(s) + 1
lt = len(t) + 1
mat = [ [0 for j in range(0,lt)] for i in range(0,ls)]
for i in range(1,ls):
mat[i][0] = 0
for j in range(1,lt):
mat[0][j] = -j
for i in range(1,ls):
sa = s[i-1]
for j in range(1,lt):
ta = t[j-1]
if sa == ta:
mat[i][j] = mat[i-1][j-1] + 1
else:
a = mat[i-1][j] - 1
b = mat[i][j-1] - 1
c = mat[i-1][j-1] - 1
mat[i][j] = max(a, b, c)
maxi = -sys.maxint
maxPos = -1
for i in range(0,ls):
if mat[i][-1] > maxi:
maxi = mat[i][-1]
maxPos = i
sys.setrecursionlimit(10000)
(s1, t1) = backtrack(mat, s, t, maxPos, lt-1)
return (mat[maxPos][-1], s1, t1)
if __name__ == '__main__':
dnas = readFasta(sys.argv[1])
(score, s1, t1) = fittingAlignment(dnas[0][1], dnas[1][1])
print score
print s1
print t1
| true |
bf75a41478548f63e9465b2bef9099739da2e919 | Python | zhongshun/Leetcode | /877. Stone Game/solution1.py | UTF-8 | 455 | 3.453125 | 3 | [] | no_license | def Game(Alex,Lee,piles):
if piles:
Alex1 = Alex + piles[0]
Lee1 = Lee + piles[-1]
if Game(Alex1,Lee1,piles[1:len(piles)-1]):
return True
Alex2 = Alex + piles[-1]
Lee2 = Lee + piles[0]
if Game(Alex2,Lee2,piles[1:len(piles)-1]):
return True
else:
if Alex > Lee:
return True
else:
return False
piles = [5,3,4,5]
print(Game(0,0,piles))
| true |
b3948999b6550da60ee244c829f2542fd46f318a | Python | Psingh12354/GeeksPy | /WordCount.py | UTF-8 | 170 | 3.453125 | 3 | [] | no_license | from collections import Counter
test_str = 'Gfg is best . Geeks are good and Geeks like Gfg'
res=Counter(test_str.split())
print("The word frequency is : "+str(res))
| true |
a457507bcd050dcdff56c2c80dd32ab320345bd6 | Python | eladsnd/hw1statistics | /main.py | UTF-8 | 1,405 | 3.578125 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binom
def Empirical_F(x):
total = len(x)
xdict = {}
for y in x:
if not y in xdict:
xdict[y] = 1
else:
xdict[y] += 1
probability_dict = {k: v / total for k, v in xdict.items()}
x = np.array(x)
x.sort()
y = []
summ = 0
for i in range(total):
if i > 0 and x[i - 1] == x[i]:
y.append(y[i - 1])
else:
y.append(summ + probability_dict[x[i]])
summ = y[i]
y = np.array(y)
sol = np.matrix([x, y]).T
return sol
def Q2_b(size):
return binom.rvs(n=5, p=1 / 6, size=size)
def Q2_c(X):
return Empirical_F(X)
def Q2_d(xemp):
plt.scatter([xemp[:, 0]], [xemp[:, 1]])
plt.step(xemp[:, 0], xemp[:, 1])
plt.ylim(0, 1)
def Q2_e():
y = [0, 1, 2, 3, 4, 5]
y_cdf = binom.cdf(y, 5, 1 / 6)
plt.scatter(y, y_cdf)
plt.plot(y, y_cdf)
def Q2_g(figure, num):
plt.figure(figure)
plt.title("x = " + str(num))
x = Q2_b(num)
x_emp = Q2_c(x)
Q2_d(x_emp)
Q2_e()
if __name__ == '__main__':
print("Elad is the King")
plt.figure(0)
plt.title("x = 20")
# Q2.b
X = Q2_b(20)
# Q2.c
x_emp = Q2_c(X)
# Q2.d
Q2_d(x_emp)
# Q2.e + f
Q2_e()
# Q2.g
Q2_g(1, 100)
Q2_g(2, 200)
Q2_g(3, 1000)
plt.show()
| true |
e2dc399fa649f16830c3099a59530ecf47cc4167 | Python | lilexuan/cs61a | /lab/lab07/lab07_extra.py | UTF-8 | 2,157 | 3.703125 | 4 | [] | no_license | """ Optional Questions for Lab 07 """
from lab07 import *
def has_cycle(link):
"""Return whether link contains a cycle.
>>> s = Link(1, Link(2, Link(3)))
>>> s.rest.rest.rest = s
>>> has_cycle(s)
True
>>> t = Link(1, Link(2, Link(3)))
>>> has_cycle(t)
False
>>> u = Link(2, Link(2, Link(2)))
>>> has_cycle(u)
False
"""
"*** YOUR CODE HERE ***"
def helper(link, have_seen=[]):
if link is Link.empty:
return False
elif link in have_seen:
return True
else:
have_seen.append(link)
return helper(link.rest)
return helper(link)
def has_cycle_constant(link):
"""Return whether link contains a cycle.
>>> s = Link(1, Link(2, Link(3)))
>>> s.rest.rest.rest = s
>>> has_cycle_constant(s)
True
>>> t = Link(1, Link(2, Link(3)))
>>> has_cycle_constant(t)
False
"""
"*** YOUR CODE HERE ***"
if link is Link.empty:
return False
slow, fast = link, link.rest
while fast is not Link.empty:
if fast.rest == Link.empty:
return False
elif fast == slow or fast.rest == slow:
return True
else:
slow, fast = slow.rest, fast.rest.rest
return False
def reverse_other(t):
"""Mutates the tree such that nodes on every other (odd-depth) level
have the labels of their branches all reversed.
>>> t = Tree(1, [Tree(2), Tree(3), Tree(4)])
>>> reverse_other(t)
>>> t
Tree(1, [Tree(4), Tree(3), Tree(2)])
>>> t = Tree(1, [Tree(2, [Tree(3, [Tree(4), Tree(5)]), Tree(6, [Tree(7)])]), Tree(8)])
>>> reverse_other(t)
>>> t
Tree(1, [Tree(8, [Tree(3, [Tree(5), Tree(4)]), Tree(6, [Tree(7)])]), Tree(2)])
"""
"*** YOUR CODE HERE ***"
def reverse_helper(t, need_reverse):
if t.is_leaf():
return
new_labs = [b.label for b in t.branches][::-1]
for i in range(len(t.branches)):
reverse_helper(t.branches[i], not need_reverse)
if need_reverse:
t.branches[i].label = new_labs[i]
reverse_helper(t, True)
| true |
aeeb8d3d77ac6bbe805ac9c416620ef8433b4f9e | Python | claireyegian/unit5 | /warmup13.py | UTF-8 | 239 | 3.765625 | 4 | [] | no_license | #Claire Yegian
#11/16/17
#warmup13.py - makes list and prints min, max, and sum
from random import randint
numbers = []
i = 0
while i<20:
numbers.append(randint(9,99))
i += 1
print(min(numbers))
print(max(numbers))
print(sum(numbers))
| true |
73d7fa8f03bd3d3cee56ec06dfe64d5a32c5a9e2 | Python | kveeramah/Lynch-PoolSeq-estimator | /LynchPool_caller.py | UTF-8 | 4,865 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: ASCII -*-
#####This python script applyies to allele frequency estimator for pool seq data from Lynch et al. 2014 GBE.
#####Please note it is slow, and is only designed for obtain allele frequencies at specific SNPs in smallish numbers (tens to hundreds) of individuals. I do not recommend using this as a general variant caller.
#####Pysam and Numpy must be installed in the version of python used.
#####Bam files must be indexed.
#####To guard against mis-mappings and CNVs, the program only outputs sites with coverage between a third and twice the mean at the set of SNPs considered
#####The SNP file must have the tab seperated fields in the following order: chromosome, position (one-based), reference allele, alternate allele
#####If you want to change things like mapping and base quality threshold, edit the python code under the section "Input arguments"
#####Written (poorly) by Krishna Veeramah (krishna.veeramah@stonybrook.edu)
#####usage is ./LynchPool_called.py <bamfile> <fileoutname> <target_SNP_file> <reference_genome>
###import libraries
import string
import numpy as np
import pysam
import gzip
import math
import copy
from sys import argv
import time
###Input arguments
BAMin=argv[1] #filename of bam with poolseq data
filenameout=argv[2] #creates a vcf
SNPfile=argv[3] #must have the tab seperated fields in the following order chromosome, position (one-based), reference allele, alternate allele
#ref_file=argv[4]
MQ_t=20 #mapping quality threshold
BQ_t=20 #base_qualitythreshold
###converts phred score to probability
def phred2prob(x):
return 10.0**(-x/10.0)
###converts probability to phred score
def prob2phred(x):
return -10*math.log10(x)
###extract base of reads for a give position in a bam.
def extract_bam_SNP_base_only(samfile,chromo,pos,BQ,MQ):
var_list=[]
for pileupcolumn in samfile.pileup(chromo,pos-1,pos,truncate=True,stepper='all'):
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip:
if (pileupread.alignment.mapping_quality>=MQ) and (ord(pileupread.alignment.qual[pileupread.query_position])-33>=BQ):
var_list.append(pileupread.alignment.query_sequence[pileupread.query_position])
return var_list
#####open reference file
##ref=pysam.FastaFile(ref_file)
##chromos=ref.references
###Read SNPlist
file=open(SNPfile,'r')
SNPs=file.read()
SNPs=string.split(SNPs,'\n')
if SNPs[-1]=='':
del(SNPs[-1])
nb_SNPs=len(SNPs)
samfile = pysam.AlignmentFile(BAMin, "rb")
samp_name=samfile.header['RG'][0]['SM']
all_counts=np.zeros((nb_SNPs,3),dtype='float32') #major_allele, minor_allele, other_allele
Mm_allele=np.zeros((nb_SNPs),dtype='int32') #0=ref is major allele, 1=alt is major allele. In a tie, ref is chosen as major
###Iterate through SNPs
for g in range(len(SNPs)):
k=string.split(SNPs[g])
chromo=k[0]
k[1]=int(k[1])
SNPs[g]=k
ref_all=k[2]
alt_all=k[3]
read_list=extract_bam_SNP_base_only(samfile,k[0],k[1],BQ_t,MQ_t)
#all_counts[g]=[read_list.count('A'),read_list.count('C'),read_list.count('G'),read_list.count('T')]
if read_list.count(ref_all)>=read_list.count(alt_all):
all_counts[g]=[read_list.count(ref_all),read_list.count(alt_all),len(read_list)-read_list.count(ref_all)-read_list.count(alt_all)]
else:
all_counts[g]=[read_list.count(alt_all),read_list.count(ref_all),len(read_list)-read_list.count(ref_all)-read_list.count(alt_all)]
Mm_allele[g]=1
if g%100==0:
print g,k
###calulate depth stats
depth=np.sum(all_counts,axis=1)
min_DP=round(np.mean(depth)/3)
max_DP=round(np.mean(depth)*2)
###first iteration through Lynch
#calculate error
p_e=all_counts[:,2]/depth
E=3*p_e/2
p_m=all_counts[:,0]/np.sum(all_counts[:,0:2],axis=1)
p_hat=p_m*(1.0-(2.0*E/3.0))-(E/3.0)/1-(4*E/3)
###second iteration
a=p_hat>0.9
b=depth<min_DP
c=depth>max_DP
exclude=a+b+c
exclude_swap=exclude == False
E_mean=np.mean(E[exclude_swap])
for g in range(len(E)):
if p_hat[g]>0.9:
E[g]=E_mean
p_hat2=p_m*(1.0-(2.0*E/3.0))-(E/3.0)/1-(4*E/3)
outfile=open(filenameout,'w')
header='chrom\tpos\tref\talt\talt_AF\talt_AF_correct\tref:alt:other_dp\tDP\tDP_ok?\n'
outfile.write(header)
for g in range(len(SNPs)):
out=SNPs[g][0]+'\t'+str(SNPs[g][1])+'\t'+SNPs[g][2]+'\t'+SNPs[g][3]+'\t'
if Mm_allele[g]==0:
out=out+str(1-round(p_hat[g],3))+'\t'+str(1-round(p_hat2[g],3))+'\t'+str(int(all_counts[g][0]))+':'+str(int(all_counts[g][1]))
else:
out=out+str(round(p_hat[g],3))+'\t'+str(round(p_hat2[g],3))+'\t'+str(int(all_counts[g][1]))+':'+str(int(all_counts[g][0]))
out=out+':'+str(int(all_counts[g][2]))+'\t'+str(int(depth[g]))+'\t'
if min_DP<=depth[g]<=max_DP:
out=out+'Y\n'
else:
out=out+'N\n'
outfile.write(out)
outfile.close()
| true |
3e37d8de6446d4a99f78dfc89085eb91781550a2 | Python | Muhongfan/MACHINE-LEARNING-PRO | /Data_analysis.py | UTF-8 | 5,058 | 2.8125 | 3 | [] | no_license | # ---- coding:UTF-8 ----
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
## read files
from os import path
import pandas as pd
from sklearn.feature_selection import SelectKBest, SelectFromModel
from sklearn.impute import SimpleImputer
from ast import literal_eval
import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import seaborn as sns
## Read files to data frames
from sklearn.svm import LinearSVC
from sklearn.tree import tree
#ams_df= pd.read_csv(path.join('/Users/momo/Documents/mhf/CSI5155/PRO/dataset/ams_df_5000.csv'),index_col=0)
ams_df = pd.read_csv(path.join('/Users/momo/Documents/mhf/CSI5155/PRO/New_dataset/test_data.csv'), index_col=0)
ott_df= pd.read_csv(path.join('/Users/momo/Documents/mhf/CSI5155/PRO/dataset/ott_df.csv'),index_col=0)
ams_df_nostd= pd.read_csv(path.join('/Users/momo/Documents/mhf/CSI5155/PRO/New_dataset/ams_df_nostd_new.csv'),index_col=0)
ott_df_nostd= pd.read_csv(path.join('/Users/momo/Documents/mhf/CSI5155/PRO/dataset/ott_df_nostd.csv'),index_col=0)
ams_price = ams_df_nostd.price.copy()
ott_price = ott_df_nostd.price.copy()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
ams_price.hist(bins=30, color='blue', alpha=0.4, ax=ax1)
ott_price.hist(bins=30, color='red', alpha=0.4, ax=ax2)
sns.despine(top=True, right=True, left=True)
# plot price
ax1.set_title('Amsterdam Price')
ax2.set_title('Ottawa Price')
ax1.set_ylabel('Frequency')
ax1.set_xlabel('Price')
ax2.set_xlabel('Price')
plt.show()
plt.clf()
ams_price_min = []
for item in ams_price:
if item != 0.0:
ams_price_min.append(item)
print('length of sea_price_min',len(ams_price_min))
ams_price_min = pd.Series(ams_price_min)
print('min sea_price', ams_price_min.min())
print('max sea_price', ams_price_min.max())
ott_price_min = []
for item in ott_price:
if item != 0.0:
ott_price_min.append(item)
print('length of bos_price_min',len(ott_price_min))
ott_price_min = pd.Series(ott_price_min)
print('num of bos_price=:',(ott_price == 0.0).sum())
## Transform price to log-price
ams_price_log = ams_price_min.map(lambda x: np.log(x))
ott_price_log = ott_price_min.map(lambda x: np.log(x))
## Visualize again
fig2, (ax3, ax4) = plt.subplots(1, 2, figsize=(12, 4))
ams_price_log.hist(bins=50, color='blue', alpha=0.4, ax=ax3)
ott_price_log.hist(bins=40, color='red', alpha=0.4, ax=ax4)
sns.despine(top=True, right=True, left=True)
ax3.set_title('Amsterdam Log-price')
ax4.set_title('Ottawa Log-price')
ax3.set_ylabel('Frequency')
ax3.set_xlabel('log( Price )')
ax4.set_xlabel('log( Price )')
plt.show()
plt.clf()
## display statistics
_ = pd.concat([ams_price.describe(), ott_price.describe()], axis=1)
_.columns = ['Amsterdam log-price', 'Ottawa log-price']
print(_)
#print(sea_df.shape, bos_df.shape)
colus = [i for i in ams_df_nostd.columns.values]
print(colus)
## cor relation
#col = ['calculated_host_listings_count', 'minimum_nights', 'bathrooms', 'bedrooms', 'beds', 'price', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month']
sns.set(style="ticks", color_codes=True)
sns.pairplot(ams_df_nostd.loc[(ams_df_nostd.price <= 600) & (ams_df_nostd.price > 0)][colus].dropna())
plt.show()
plt.clf()
corr = ams_df_nostd.loc[(ams_df_nostd.price <= 600) & (ams_df_nostd.price > 0)][colus].dropna().corr()
plt.figure(figsize = (150,150))
sns.set(font_scale=1)
sns.heatmap(corr, cbar = True, annot=True, square = True, fmt = '.2f', xticklabels=colus, yticklabels=colus)
plt.show()
plt.clf()
'''
def binary_count_and_price_plot(col, figsize=(6,6)):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)
fig.suptitle(col, fontsize=16, y=1)
plt.subplots_adjust(top=0.80) # So that the suptitle does not overlap with the ax plot titles
ams_cat.groupby(col).size().plot(kind='bar', ax=ax1, color=['pink', 'blue'])
ax1.set_xticklabels(labels=['false', 'true'], rotation=0)
ax1.set_title('Category count')
ax1.set_xlabel('')
ams_cat.groupby(col).price.median().plot(kind='bar', ax=ax2, color=['pink', 'blue'])
ax2.set_xticklabels(labels=['false', 'true'], rotation=0)
ax2.set_title('Median price ($)')
ax2.set_xlabel('')
#plt.savefig('/Users/momo/Documents/mhf/CSI5155/PRO/picture/%s.jpg' % (col))
plt.show()
# EDA of catigrical features
ams_cat = pd.concat([ams_df.id,ams_df.price, ams_cat], axis=1)
ams_cat_columns = ams_cat.iloc[:,:].columns
for col in ams_cat_columns:
binary_count_and_price_plot(col)
plt.savefig('/Users/momo/Documents/mhf/CSI5155/PRO/picture/%s.jpg' % (col))
colus = [i for i in ams_cat_ani.columns.values]
ams_cat_ani = pd.concat([ams_df.id,ams_df.price, ams_cat_ani], axis=1)
corr2 = ams_cat_ani.loc[(ams_cat_ani.price <= 600) & (ams_cat_ani.price > 0)][colus].dropna().corr()
plt.figure(figsize = (60,60))
sns.set(font_scale=1)
sns.heatmap(corr2, cbar = True, annot=True, square = True, fmt = '.2f', xticklabels=colus, yticklabels=colus)
plt.show()
plt.clf()
# Replacing columns with f/t with 0/1
#ams_df.replace({'f': 0, 't': 1}, inplace=True)
'''
| true |
767d460fc994ffc06b03b324aea193b141ae8b81 | Python | digitalladder/leetcode | /problem528.py | UTF-8 | 1,212 | 3.5625 | 4 | [] | no_license | #problem 528 / random pick with weight
class Solution(object):
def __init__(self, w):
"""
:type w: List[int]
"""
self.n = len(w)
self.pre = [0]*self.n
self.pre[0] = w[0]
for i in range(1,self.n):
self.pre[i] = self.pre[i-1]+w[i]
self.num = self.pre[-1]
def pickIndex(self):
"""
:rtype: int
"""
idx = random.randint(0,self.num-1)
for i in range(self.n):
if self.pre[i] > idx:
return i #用二分法可以加快
#二分法
class Solution(object):
def __init__(self, w):
"""
:type w: List[int]
"""
self.n = len(w)
self.pre = [0]*self.n
self.pre[0] = w[0]
for i in range(1,self.n):
self.pre[i] = self.pre[i-1]+w[i]
self.num = self.pre[-1]
def pickIndex(self):
"""
:rtype: int
"""
idx = random.randint(0,self.num-1)
return bisect.bisect_right(self.pre,idx) #一定要用 _right
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex() | true |
950f57ede4637799957a3254a1f3e0e7701f0dd7 | Python | krishnajalan/InterpretedLanguage | /lib/interpreter.py | UTF-8 | 4,833 | 3.125 | 3 | [] | no_license | from .tokens import *
from .errors import RTError
from .symbols import SymbolTable
class Context:
def __init__(self, displayName, parent=None, parentEntry=None):
self.displayName = displayName
self.parent = parent
self.parentEntry = parentEntry
self.symbolTable = None
class RTResult:
def __init__(self):
self.value = None
self.error = None
def success(self, value):
self.value = value
return self
def failure(self, error):
self.error = error
return self
def register(self, res):
if isinstance(res, RTResult):
if res.error: self.error = res.error
return res.value
return res
class Number:
def __init__(self, value):
self.value = value
self.setPos()
self.setContext()
def setContext(self, context=None):
self.context = context
return self
def setPos(self, startPos=None, endPos=None):
self.startPos = startPos
self.endPos = endPos
return self
def addedTo(self, other):
if isinstance(other, Number):
return Number(self.value + other.value).setContext(self.context), None
def subbtractedBy(self, other):
if isinstance(other, Number):
return Number(self.value - other.value).setContext(self.context), None
def multipliedBy(self, other):
if isinstance(other, Number):
return Number(self.value * other.value).setContext(self.context), None
def poweredBy(self, other):
if isinstance(other, Number):
return Number(self.value ** other.value).setContext(self.context), None
def copy(self):
return Number(self.value).setContext(self.context)
def dividedBy(self, other):
if isinstance(other, Number):
if other.value == 0:
return None, RTError(
other.startPos, other.endPos,
"Division by Zero", self.context
)
return Number(self.value / other.value).setContext(self.context), None
def __repr__(self):
return f'{self.value}'
##################################
# Interpreter
##################################
class Interpreter:
def visit(self, node, context):
methodType = f'visit{type(node).__name__}'
method = getattr(self, methodType, self.noVisitMethod)
return method(node, context)
def noVisitMethod(self, node):
raise Exception(f'No visit{type(node).__name__} method defined')
def visitNumberNode(self, node, context):
return RTResult().success(
Number(node.token.value).setPos(node.startPos, node.endPos).setContext(context)
)
def visitVarAssignNode(self, node, context):
res = RTResult()
varName = node.varNameToken.value
value = res.register(self.visit(node.nodeValue, context))
if res.error: return res
context.symbolTable.set(varName, value)
return res.success(value)
def visitVarAccessNode(self, node, context):
res = RTResult()
varName = node.varNameToken.value
value = context.symbolTable.get(varName)
if not value:
return res.failure(RTError(
node.startPos, node.endPos,
f"'{varName}' is not defined", context
))
value = value.copy().setPos(node.startPos, node.endPos)
return res.success(value)
def visitBinaryOperationNode(self, node, context):
res = RTResult()
left = res.register(self.visit(node.leftNode, context))
if res.error: return res
right = res.register(self.visit(node.rightNode, context))
if res.error: return res
if node.opToken.type == TT_PLUS:
result, error = left.addedTo(right)
elif node.opToken.type == TT_MINUS:
result, error = left.subbtractedBy(right)
elif node.opToken.type == TT_MUL:
result, error = left.multipliedBy(right)
elif node.opToken.type == TT_DIV:
result, error = left.dividedBy(right)
elif node.opToken.type == TT_POW:
result, error = left.poweredBy(right)
if error: return res.failure(error)
return res.success(result.setPos(node.startPos, node.endPos))
def visitUnaryOperationNode(self, node, context):
res = RTResult()
error = None
number = res.register(self.visit(node.node, context))
if res.error: return res
if node.opToken.type == TT_MINUS:
number, error = number.multipliedBy(Number(-1))
if error: return res.failure(error)
return res.success(number.setPos(node.startPos, node.endPos)) | true |
2e49ce618b71f360dc4770a2d64ca8612a5086ad | Python | amilyxy/Leecode_summary | /Bit_Manipulation.py | UTF-8 | 5,135 | 4.3125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: bit
Description :
Author : amilyxy
date: 2019/10/5
-------------------------------------------------
"""
'''
389. Find the Difference: 找不同
describe: 给定两个字符串 s 和 t,它们只包含小写字母。
字符串 t 由字符串 s 随机重排,然后在随机位置添加一个字母。
请找出在 t 中被添加的字母。
'''
import operator
class Solution:
def findTheDifference(self, s: str, t: str) -> str:
# 方法一:逐个删除
# emm这个不太好,i in list时间复杂度高也就算了,还把s/t中的元素修改了
t = list(t)
for i in s:
t.remove(i)
res = "".join(t)
return res
# replace做不需要转换成list
# for i in s:
# t = t.replace(i, '', 1)
# 方法二 按位比较,就是str不能直接排序有点烦
def findTheDifference(self, s: str, t: str) -> str:
s = list(s)
s.sort()
s = "".join(s)
t = list(t)
t.sort()
t = "".join(t)
i = 0
while i<len(s):
if operator.ne(s[i], t[i]):
return t[i]
i += 1
if i == len(s):
return t[i]
# 方法三:ASCII之差
def findTheDifference(self, s: str, t: str) -> str:
res = chr(sum(map(ord, t)), sum(map(ord, s)))
return res
# 方法四: 异或法
'''
⭐ 加精!
感觉这个才是题目所要求的的呀!
'''
def findTheDifference(self, s: str, t: str) -> str:
res = 0
for i in s:
res ^= ord(i)
for j in t:
res ^= ord(j)
return chr(res)
'''
136. Single Number: 找不同
describe: 给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现两次。找出那个只出现了一次的元素。
'''
class Solution:
# 这个时间复杂度为n^2 不好不好
def singleNumber(self, nums: list[int]) -> int:
nums.sort()
for i in range(0, len(nums), 2):
# nums[i] != nums[i+1]
if i == (len(nums)-1) or operator.ne(nums[i], nums[i+1]):
return nums[i]
# 方法二:位操作
'''
⭐ 加精!
感觉这个才是题目所要求的的呀!
'''
class Solution(object):
def singleNumber(self, nums):
res = 0
for i in nums:
res ^= i
# reduce(lambda x, y: x^y, nums)
return res
'''
318. Maximum Product of Word Lengths: 最大单词长度乘积
describe: 给定一个字符串数组 words,找到 length(word[i]) * length(word[j]) 的最大值,
并且这两个单词不含有公共字母。你可以认为每个单词只包含小写字母。
如果不存在这样的两个单词,返回 0。
'''
class Solution:
# 题解方法一: @麦麦麦麦子。
def maxProduct(self, words: list[str]) -> int:
# 直观的版本,主要通过位运算来判断字母位
max_len = {}
for word in words:
flag = 0 # flag用26位二进制表示该词使用了的字母
for alp in word:
flag |= 1 << ord(alp) - 97 # 置字母对应的二进制位为1
max_len[flag] = max(max_len.get(flag, 0), len(word)) # 更新当前flag的最大长度
# [0]用来避免对空列表取max,下面的比较次数为n^2
return max([0] + [max_len[x] * max_len[y] for x in max_len for y in max_len if x & y == 0])
'''
201.数字范围按位与
'''
# 移位操作
class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
i = 0
while m != n:
m >>= 1
n >>= 1
i += 1
return m << i
# 方法二:
class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
while n > m: # 直到m大于等于n
n &= (n-1)
return n
'''
89.格雷编码
'''
# 根据格雷码的生成公式
class Solution:
def grayCode(self, n: int) -> List[int]:
n = pow(2,n)
res = []
for i in range(n):
# res.append(i^(int(i/2)))
res.append(i^(i>>1))
return res
# 根据格雷码的镜像排列规则 @jyd
class Solution:
def grayCode(self, n: int) -> List[int]:
res, head = [0], 1
for i in range(n):
for j in range(len(res) - 1, -1, -1):
res.append(head + res[j])
head <<= 1
return res
# 镜像排列的另一种实现 @powcai
# 都是大佬哈!我的位运算还不够熟练
class Solution:
def grayCode(self, n: int) -> List[int]:
res = [0]
for i in range(n):
for j in range(len(res) - 1, -1, -1):
res.append(res[j] ^ (1 << i))
return res
| true |
dae33a31462ab885c6c5057af6f3da4e049e0fcc | Python | stephmackenz/avalon | /common/python/http_client/http_jrpc_client.py | UTF-8 | 4,175 | 2.671875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"Zlib",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0"
] | permissive | # Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import urllib.request
import urllib.error
import logging
logger = logging.getLogger(__name__)
class MessageException(Exception):
"""
A class to capture communication exceptions when communicating with
services.
"""
pass
class HttpJrpcClient(object):
"""
Class to handle HTTP JSON RPC communication by the client.
"""
def __init__(self, url):
self.ServiceURL = url
self.ProxyHandler = urllib.request.ProxyHandler({})
def _postmsg(self, request, retries=0):
"""
Post a request JSON RPC string and return the response.
Parameters:
@param request - JSON string request to post
@param retries - Number of attempts to submit request
"""
data = request.encode('utf8')
datalen = len(data)
url = self.ServiceURL
logger.debug('post request to %s with DATALEN=%d, DATA=<%s>',
url, datalen, data)
try:
request = urllib.request.Request(
url, data,
{'Content-Type': 'application/json',
'Content-Length': datalen})
opener = urllib.request.build_opener(self.ProxyHandler)
response = self._open_with_retries(
opener, request, retries)
except urllib.error.HTTPError as err:
logger.warn('operation failed with response: %s', err.code)
raise MessageException(
'operation failed with response: {0}'.format(err.code))
except urllib.error.URLError as err:
logger.warn('operation failed: %s', err.reason)
raise MessageException('operation failed: {0}'.format(err.reason))
except Exception as err:
logger.exception('no response from server: %s', str(err))
raise MessageException('no response from server: {0}'.format(err))
content = response.read()
headers = response.info()
response.close()
encoding = headers.get('Content-Type')
if encoding != 'application/json':
logger.info('server responds with message %s of type %s',
content, encoding)
return None
# Attempt to decode the content if it is not already a string
try:
content = content.decode('utf-8')
except AttributeError:
pass
value = json.loads(content)
return value
def _open_with_retries(self, opener, request, retries):
"""
Function to retry opening a given url/request if URLError is
encountered. URLError for request would encompass HTTPError
as well as Timeout.
Parameters:
@param opener - An instance of OpenerDiretor
@param request - Request to be sent to the url
@param retries - Number of attempts to open
Returns:
@returns response - Response received
"""
count = 0
while count < retries:
try:
return opener.open(request, timeout=10)
except urllib.error.URLError as err:
logger.error("Connection error - %s", err.reason)
time.sleep(10)
except Exception as err:
raise err
# Increment counter for each handled Exception
count += 1
if count < retries:
logger.info("Will retry to connect.")
# Make a final call after retries are exhausted
return opener.open(request, timeout=10)
| true |
c15dae20add91b16d59c281db45567add039fd0b | Python | truongpt/warm_up | /practice/python/graph/maximum_product_of_splitted_binary_tree.py | UTF-8 | 854 | 3.6875 | 4 | [] | no_license | """
- Problem: https://leetcode.com/problems/maximum-product-of-splitted-binary-tree
"""
class TreeNode:
def __init__(self, val = 0, left = None, right = None):
self.val = val
self.left = left
self.right = right
def dfs(root):
if root == None:
return 0
root.val += dfs(root.left) + dfs(root.right)
return root.val
def product(root, total_sum):
if root == None:
return 0
cur = root.val * (total_sum - root.val)
left = product(root.left, total_sum)
right = product(root.right, total_sum)
return max(cur, max(left, right))
def maxProduct(root):
dfs(root)
max_val = product(root, root.val)
return max_val % 1000000007
if __name__ == "__main__":
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(1)
print(maxProduct(root))
| true |
fa6116da2637055384946a308d41006cdcfaa2a4 | Python | uni51/python_tutorial | /pyq/25_container/fast_data_set/py3.py | UTF-8 | 783 | 4.53125 | 5 | [] | no_license | # 集合のメソッド(演算)
s1 = set('ab')
s2 = set('bc')
print("s1:", s1) # s1: {'b', 'a'}
print("s2:", s2) # s2: {'b', 'c'}
# 差集合
result1 = s1.difference(s2) # s2の要素を削除した集合を返す
print("s1.difference(s2):", result1) # s1.difference(s2): {'a'}
# 積集合(共通の集合)
result2 = s1.intersection(s2) # s2と共通の集合を返す
print("s1.intersection(s2):", result2) # s1.intersection(s2): {'b'}
# 対称差(片方にしか含まれない要素の集合)
result3 = s1.symmetric_difference(s2)
print("s1.symmetric_difference(s2):", result3) # s1.symmetric_difference(s2): {'a', 'c'}
# 和集合
result4 = s1.union(s2) # s2の要素を追加した集合を返す
print("s1.union(s2):", result4) # s1.union(s2): {'b', 'a', 'c'}
| true |
c498c726bda6aa35a957b6745921705eb409972c | Python | AlexisDongMariano/miniFlaskApps | /Flask Rest Api Tutorial 1/main.py | UTF-8 | 3,208 | 2.671875 | 3 | [] | no_license | from flask import Flask, request
from flask_restful import Api, Resource, reqparse, abort, fields, marshal_with
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
db = SQLAlchemy(app)
class VideoModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
views = db.Column(db.Integer, nullable=False)
likes = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f'Video(name={name}, views={views}, likes={likes})'
# db.create_all() # creates the database but should be executed once
video_put_args = reqparse.RequestParser()
video_put_args.add_argument('name', type=str, help='Name of the video is required', required=True)
video_put_args.add_argument('views', type=int, help='Views of the video is required', required=True)
video_put_args.add_argument('likes', type=int, help='Likes on the video is required', required=True)
# serialize the VideoModel instance so that it can be converted to JSON
resource_fields = {
'id': fields.Integer,
'name': fields.String,
'views': fields.Integer,
'likes': fields.Integer
}
class Video(Resource):
@marshal_with(resource_fields) # the return values will be serialized with 'resource_fields'
def get(self, video_id):
result = VideoModel.query.filter_by(id=video_id).first()
return result
@marshal_with(resource_fields)
def put(self, video_id):
args = video_put_args.parse_args()
result = VideoModel.query.filter_by(id=video_id).first()
if result:
abort(409, message='Video id already exists...')
video = VideoModel(id=video_id, name=args['name'], views=args['views'], likes=args['likes'])
db.session.add(video) # adds the object to the current database session
db.session.commit() # commit the changes made in the session and make it permanent
return video, 201
# videos = {}
# def abort_if_dont_exists(video_id):
# '''abort the program if the video does not exists when GETting/DELETEing the video_id'''
# if video_id not in videos.keys():
# # returns error code 404 and the message
# abort(404, message=f'video id: {video_id} does not exists...')
# def abort_if_exists(video_id):
# '''abort the program if video does exists when PUTting the video_id'''
# if video_id in videos:
# # code 409 means already exists
# abort(409, message=f"video id: {video_id} already exists...")
# class Video(Resource):
# def get(self, video_id):
# abort_if_dont_exists(video_id)
# return videos[video_id]
# def put(self, video_id):
# abort_if_exists(video_id)
# args = video_put_args.parse_args()
# videos[video_id] = args
# return videos[video_id], 201 # 201 means created, 200 means OK
# def delete(self, video_id):
# abort_if_dont_exists(video_id)
# del videos[video_id]
# return '', 204 # 204 means deleted successfully
api.add_resource(Video, '/video/<int:video_id>')
if __name__ == '__main__':
app.run(debug=True) | true |
e44b960bdea06c64a7be286dbc05f3a5207676b7 | Python | Zerobitss/Python-101-ejercicios-proyectos | /practica74.py | UTF-8 | 588 | 4.15625 | 4 | [] | no_license | """
(1) Escribir un programa que almacene las asignaturas de un curso (por ejemplo Matemáticas, Física, Química, Historia y Lengua)
en una lista y la muestre por pantalla.
"""
def run():
cursos = []
while len(cursos) < 6:
asignatura = str(input("Ingresa la asignatura de tus cursos: "))
cursos.append(asignatura)
if len(cursos) >= 5:
print(f"Haz llegado al limite de asignaturas, para tu semestre")
break
for i in cursos:
print(f"Las asignatura: {i}, Se ha agregado exitosamente")
if __name__ == "__main__":
run() | true |
b0662dc86c92b7d614e1080c669cfdadc0569272 | Python | egdinger/QuadCopterSim | /prototypes/linear_accel_calc.py | UTF-8 | 466 | 3.1875 | 3 | [
"MIT"
] | permissive | #acceleration and drag test
import matplotlib.pyplot as plt
K = 1.225*1.04*.25
m = 1 # in kg
def accl(force, velocity):
return (force - K*(velocity*velocity))/m
delta_t = .01 #seconds
f = 3
v = 0
v_plot = []
a_plot = []
samples = 1000
for i in range(samples):
a = accl(f,v)
a_plot.append(a)
v += a*delta_t
v_plot.append(v)
plt.plot(list(range(samples)),a_plot)
plt.plot(list(range(samples)),v_plot)
plt.show()
| true |
fa62fda0cc2783d84f0c8fb7850f31cb2bae46ef | Python | hoanghapham/alien_invasion | /ship.py | UTF-8 | 1,906 | 3.765625 | 4 | [] | no_license | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, screen, ai_settings):
"""
Init the ship and its starting position.
Parameters:
----------
screen: Pass in screen object so the ship can be drawn on that screen.
ai_settings: pass in setting object
"""
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
# Load ship image and get rect
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = self.screen.get_rect()
# Start each new ship at the bottom center of the screen
# Set position of ship to center - bottom of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
# Movement flag
self.moving_right = False
self.moving_left = False
def blitme(self):
# Draw the ship at its current position
self.screen.blit(self.image, self.rect)
def update(self):
"""Update the ship's position based on movement flag"""
# self.rect.centerx can only hold integers, so to use speed factor
# we have to update centerx in a roundabout way:
# pass float centers to self.center then assign self.center to self.rect.centerx
cond_move_right = self.moving_right and self.rect.right < self.screen_rect.right
cond_move_left = self.moving_left and self.rect.left > self.screen_rect.left
self.center += 1 * cond_move_right * self.ai_settings.ship_speed_factor \
-1 * cond_move_left * self.ai_settings.ship_speed_factor
self.rect.centerx = self.center
def center_ship(self):
self.center = self.screen_rect.centerx
| true |
ec837067c56c7b4e9f795b7cc6e995381d709586 | Python | descartesmbogning/bibliometric | /jaccard_sac.py | UTF-8 | 2,054 | 2.625 | 3 | [] | no_license | import pandas as pd
import scipy.spatial.distance as sd
import numpy as np
import scipy.spatial as sp, scipy.cluster.hierarchy as hc
import seaborn as sns
from matplotlib import pyplot as plt
# The dataset has to include binary variables SAC = ...
# which indicate if a document is classified to a particular SAC
df = pd.read_excel("binary_sac.xlsx")
df = df[[x for x in df.columns if ("SAC =" in x and "Multi" not in x)]]
# exclude other irrelevant variables for this analysis
names = [x.split("= ")[1] for x in df.columns]
d_names = dict([(i, names[i]) for i in range(len(names))])
df_np0 = np.array(df)
def get_matrix(df_np, f=sd.jaccard):
nc = df_np.shape[1]
mat = np.zeros((nc, nc))
for i in range(nc):
for j in range(i, nc):
coeff = f(df_np[:,i], df_np[:,j])
mat[i, j], mat[j, i] = 1-coeff, 1-coeff
return mat
# first compute the order of SACs
mat0 = get_matrix(df_np0)
linkage = hc.linkage(sp.distance.squareform(1-mat0), method="ward")
cm0 = sns.clustermap(1-mat0, row_linkage=linkage, col_linkage=linkage)
# perm - permuted order of SACs
perm = cm0.data2d.index.tolist()
perm_names = ["SAC = " + d_names[p] for p in perm]
p_names = [d_names[p] for p in perm]
df = df[perm_names]
df_np = np.array(df)
f, ax = plt.subplots(figsize=(20,20), facecolor='w', edgecolor='k')
mat = get_matrix(df_np)
linkage = hc.linkage(sp.distance.squareform(1-mat), method="ward")
mask = (np.tril(1-mat)==0)
cm = sns.clustermap(1-mat, mask=mask, row_linkage=linkage, col_linkage=linkage, row_cluster=False, vmin=0.5, vmax=1.0, annot=True, fmt='.2f', annot_kws={"fontsize":5}, xticklabels=p_names, yticklabels=p_names, cbar_kws={"shrink": .82})
ax = cm.ax_heatmap
bx = ax.twinx()
bx.set_yticklabels([])
ax.axhline(y=0, color='k',linewidth=2)
ax.axhline(y=mat.shape[1], color='k',linewidth=2)
ax.axvline(x=0, color='k',linewidth=2)
ax.axvline(x=mat.shape[0], color='k',linewidth=2)
plt.savefig("jaccard.png", dpi=300, bbox_inches = "tight") | true |
b4621dd39469f0cbb23eaab91fe9de9585a44f95 | Python | sakiv93/NFEM_Assignment_Sem2 | /mesh_generation.py | UTF-8 | 899 | 2.625 | 3 | [] | no_license | import numpy as np
from material_parameters import *
#from main_Convergence import nElem
#Input parameters
radius_internal = ri
radius_external = ro
number_of_elements = nElem
#Mesh refinement factor
meshrefinement_factor= meshRefinementFactor
q=meshrefinement_factor**(1/(number_of_elements-1))
first_element=(radius_external-radius_internal)*(1-q)/(1-meshrefinement_factor*q)
rnode = radius_internal
#Function to extract co-ordinates of nodes in global system.
def COORDINATES_NODES_GLOBAL(number_of_elements,rnode,first_element,q):
rnodes=np.array([rnode])
for i in range(number_of_elements):
rnode=rnode+first_element
rnodes=np.append(rnodes,rnode)
first_element=first_element*q
return rnodes
r=COORDINATES_NODES_GLOBAL(number_of_elements,rnode,first_element,q)
rnodes_Transpose=np.array([r])
rnodes=rnodes_Transpose.T | true |
45aecb12b2c1cee95d31407823bd8f34a4d1243b | Python | tantalor/axiom | /axiom.py | UTF-8 | 4,758 | 3.4375 | 3 | [] | no_license | ## 0 (universe)
def compose(step, *args):
"""Yields arg, step(arg), step(step(arg)), etc."""
if len(args) > 1:
while args is not None:
yield args[0]
args = step(*args)
else:
(arg,) = args
while arg is not None:
yield arg
arg = step(arg)
## 1 (axioms)
def zero():
"""A zero."""
return ()
def is_zero(arg):
"""True if the argument is zero."""
return arg is zero()
def next(arg):
"""Next object from argument."""
return (arg,)
def prev(arg):
"""Inverse of next."""
if is_zero(arg):
raise Exception("Nothing is before zero.")
return arg[0]
## 2
def counting():
"""Yields zero(), next(zero()), next(next(zero())), etc."""
return compose(next, zero())
def at(g, to):
"""to-th object in the given generator, from zero."""
for t in compose(prev, to):
if is_zero(t): return g.next()
g.next()
def minus(left, right):
"""Returns (left>right, |left-right|)"""
step = lambda (l, r): (prev(l), prev(r))
for (left, right) in compose(step, (left, right)):
if is_zero(left): return (False, right)
if is_zero(right): return (True, left)
## 3
def dist(left, right):
"""Returns |left-right|"""
return minus(left, right)[1]
def gt(left, right):
"""Returns left > right"""
return minus(left, right)[0]
def add(left, right):
return at(compose(next, left), right)
def up_to(to, g=None):
"""Yields to objects from the given generator."""
if not g: g = counting()
for t in compose(prev, to):
if is_zero(t): return
yield g.next()
## 4
def eq(left, right):
"""True if left and right are the same values"""
return is_zero(dist(left, right))
def fib():
"""Yields fibonacci numbers: 0, 1, 1, 2, 3, 5, 8, 13, etc."""
step = lambda (a, b): (b, add(a,b))
yield zero()
for (a, b) in compose(step, (zero(), next(zero()))):
yield b
def multiples(n):
"""Yields n, 2n, 3n, 4n, etc."""
return compose(lambda m: add(m,n), n)
def div(n, d):
"""Returns (q, r) such that q * n + r = d and r < n"""
if is_zero(d):
raise Exception("Cannot divide by zero")
for q in counting():
for r in up_to(d):
if is_zero(n):
return (q, r)
else:
n = prev(n)
## 5
def gcd(a, b):
"""Greatest common divisor of a, b."""
step = lambda (a,b): (b, div(a, b)[1])
for (a, b) in compose(step, (a,b)):
if is_zero(b):
return a
def mult(left, right):
"""Left times right."""
if is_zero(right): return zero()
return at(multiples(left), prev(right))
def primes():
"""Yields prime numbers."""
known = list() # (generator, last), ...
two = next(next(zero()))
three = next(two)
yield two
yield three
# test every 6n-1 and 6n+1
for six in multiples(add(three, three)): # 6, 12, 18, etc...
for candidate in (prev(six), next(six)):
is_prime = True
# TO DO: use a heap instead
for (i, (generator, last)) in enumerate(known):
if eq(candidate, last):
known[i] = (generator, generator.next())
is_prime = False
if is_prime:
yield candidate
generator = multiples(candidate)
generator.next() # skip to 2n
known.append((generator, generator.next()))
def catalan():
"""Yields catalan numbers: 1 1 2 5 14 42 132..."""
c = next(zero())
two = next(next(zero()))
for n in compose(next, next(zero())):
yield c
(c, _) = div(mult(c,mult(two,prev(mult(two,n)))), next(n))
## 6
def fact():
"""Yields factorial numbers: 1, 1, 2, 6, 24, 120, etc."""
step = lambda (n, f): (next(n), mult(n, f))
for (n, f) in compose(step, (next(zero()), next(zero()))):
yield f
def powers(n):
"""Yields n^0, n^1, n^2, n^3, etc."""
return compose(lambda p: mult(p,n), next(zero()))
def pascal_column(k):
"""Yields k-th column of pascal's triangle."""
p = next(zero())
for n in compose(next, next(k)):
yield p
(p, _) = div(mult(p, n), dist(n, k))
def pascal_row(n):
"""Yields n-th row of pascal's triangle."""
return compose(_pascal_row_step, next(zero()), zero(), n)
def _pascal_row_step(t, k, n):
k2 = next(k)
t2, _ = div(mult(t, dist(n, k)), k2)
if not is_zero(t2):
return (t2, k2, n)
## 7
def exp(b, p):
"""Left times left times left, etc. right times."""
return at(powers(b), p)
def choose(n, k):
"""Returns n choose k."""
(positive, diff) = minus(n, k)
if not positive and not is_zero(diff):
raise Exception("Out of bounds")
return at(pascal_column(k), diff)
## 8
def root(n, p):
"""Returns (b, r) such that n = b ^ p + r."""
step = lambda (b, bp, last): (next(b), exp(next(b), p), bp)
for b, bp, last in compose(step, (zero(), zero(), zero())):
if eq(bp, n):
return b, zero()
if gt(bp, n):
return prev(b), dist(last, n)
| true |
ea68a3ec7d83892c9895c07e6c7676d5dcc3dd05 | Python | dreisers/python | /section02/06-tuple.py | UTF-8 | 525 | 4.25 | 4 | [] | no_license | # /section02/06-tuple.py
# 리스트와 기본적으로 동일하지만, 처음 할당한 원소의 값을 수정할 수 없다.
# 리스트를 더 많이 사용
grade = (12, 13, 14, 15, 16)
print(grade)
print(grade[0])
#grade[0] = 100 #에러
#print(grade(0)) #에러
names = ("홍길동", "무궁화", "진달래", "개나리", "라일락")
print(names)
stud1 = ("봉선화", 90) #자료형에 대한 혼합 사용도 가능함
print(stud1)
print(stud1[0]) # 대괄호로 원소 접근
print(stud1[1]) | true |
83cceb58ba54d72d471bcaf50ca1190494f3439b | Python | gayathrihogwarts/ML-SMP-2019 | /Assignment 3/Gayathri/sieve.py | UTF-8 | 299 | 3.609375 | 4 | [] | no_license | import math
n = int(input("Enter the a number upto which primes have to be entered : "))
p = []
for i in range(2,n+1):
p.append(i)
i = 2
while(i <= int(math.sqrt(n))):
if i in p:
for j in range(i*2, n+1, i):
if j in p:
p.remove(j)
i = i+1
print(p)
| true |
ba375d8e76e8726d48f570bf9c2f86d6d294557c | Python | penelopeia/motion | /color.py | UTF-8 | 284 | 2.875 | 3 | [] | no_license | import cv2
def filter_green(image):
# image in must be HSV color
frame = cv2.inRange(image, (120.0000, 100.0000, 19.6078), (120.0000, 100.0000, 100.0000))
return frame
def filter_white(image):
frame = cv2.inRange(image, (0, 0, 200), (145, 60, 255))
return frame
| true |
6473e7c325dbe1218442bb6e89cdbe13709c513d | Python | SomethingRandom0768/PythonBeginnerProgramming2021 | /Chapter 4 ( Working With Lists )/Exercises/4-7threes.py | UTF-8 | 46 | 3.515625 | 4 | [] | no_license | for value in range(3, 33, 3):
print(value) | true |
95b28f5cb76d0906e6fa4624e1cd9613568d091f | Python | ninellekam/DataScience | /hw1/a.py | UTF-8 | 1,005 | 4.09375 | 4 | [] | no_license | # Оставить различные
# Дан массив a из n целых чисел. Напишите программу, которая выведет:
# 1. исходный список чисел без дубликатов с сохранением порядка;
# 2. количество чисел, которые были удалены из массива.
# Формат входных данных
# В первой строке число n (1 ≤ n ≤ 100000). Во второй строке записаны n целых чисел ai (1 ≤ ai ≤ 10000).
# Формат результата
# Выведите в первой строке список уникальных чисел. Во второй строке число удаленных элементов.
# Решение:
n = int(input())
b = input()
a = b.split(' ')
niko = 0
list_tmp = []
for i in a:
if i not in list_tmp:
list_tmp.append(i)
else:
niko += 1
print(*list_tmp)
print(niko) | true |
fa1bfd5d15d917dd3263eec5044836e262f10520 | Python | EyeoT/MachineLearning | /get_lightbox_color.py | UTF-8 | 8,948 | 2.890625 | 3 | [] | no_license | import time
import cv2
import numpy as np
kernel = np.ones((5, 5), np.uint8) # constant
# TODO: Implement test color thresholding (Gen?)
class NoBoxError(Exception):
def __init__(self):
pass
def crop_image(img_full, gaze_data):
height, width, channels = img_full.shape
crop_to_x = .25 # Crop to a fourth of the image
crop_to_y = .5
try:
x_gaze, y_gaze = gaze_data
except:
x_gaze = .5
y_gaze = .5
x1 = x_gaze - crop_to_x / 2
x2 = x_gaze + crop_to_x / 2
if x1 < 0:
x1 = 0
x2 = crop_to_x
elif x2 > 1:
x1 = 1 - crop_to_x
x2 = 1
y1 = y_gaze - crop_to_y / 2
y2 = y_gaze + crop_to_y / 2
if y1 < 0:
y1 = 0
y2 = crop_to_y
elif y2 > 1:
y1 = 1 - crop_to_y
y2 = 1
y1 = 1 - y1
y2 = 1 - y2
# Crop is [y1:y2, x1:x2]
img_crop = img_full[int(height * y2):int(height * y1),
int(width * x1):int(width * x2)]
return img_crop
def convert_to_binary_image(img_trans, K):
Z = img_trans.reshape((-1, 3))
Z = np.float32(Z) # convert to np.float32
# Define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS, 10, 1.0)
ret, label, center = cv2.kmeans(Z, K, None, criteria, 10, cv2.KMEANS_PP_CENTERS)
#Find larger label and color it black
if np.count_nonzero(label) > len(label)/2:
center[1] = [0,0,0]
center[0] = [255,255,255]
else:
center[0] = [0,0,0]
center[1] = [255,255,255]
# Now convert back into uint8, and make original image
center = np.uint8(center)
img_bw = center[label.flatten()]
img_bw_rect = img_bw.reshape((img_trans.shape))
img_binary = cv2.cvtColor(img_bw_rect, cv2.COLOR_BGR2GRAY)
return img_binary
def find_bounding_box(img_binary, img_crop):
img_contour, contours, hierarcy = cv2.findContours(img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
max_dim = []
max_rect = None
switch_aspect_ratio = float(119)/75 # Aspect ratio of the lightswitch
for cnt in contours:
rect = cv2.minAreaRect(cnt)
w = min(rect[1])
h = max(rect[1])
# Only consider bounding boxes that match our a priori knowledge of light switch dimensions
if ( (h/w) < (switch_aspect_ratio * 1.27) and ((h/w) > (switch_aspect_ratio * 0.82))):
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img_crop,[box],0,(0,0,255),2)
if w*h > max_area:
max_area = w*h
max_rect = rect
cv2.imshow('boxes', img_crop)
if not max_rect:
raise NoBoxError
box = cv2.boxPoints(max_rect)
box = np.int0(box)
width, height = max_rect[1]
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
angle = max_rect[2]
if angle < -45:
angle += 90
# Center of rectangle in source image
center = ((x1+x2)/2,(y1+y2)/2)
# Size of the upright rectangle bounding the rotated rectangle
size = (x2-x1, y2-y1)
M = cv2.getRotationMatrix2D((size[0]/2, size[1]/2), angle, 1.0)
# Cropped upright rectangle
cropped = cv2.getRectSubPix(img_crop, size, center)
cropped = cv2.warpAffine(cropped, M, size)
croppedW = min(width, height)
croppedH = max(width, height)
cv2.drawContours(img_crop,[box],0,(0,0,255),2)
# cv2.imshow('box', img_crop)
# Final cropped & rotated rectangle
img_lightbox_crop = cv2.getRectSubPix(cropped, (int(croppedW),int(croppedH)), (size[0]/2, size[1]/2))
# cv2.imshow('lightbox', img_lightbox_crop) # Plot what we are going to average the color of
return img_lightbox_crop
def euclidean_distance(gaze_data, img_width, img_height, x, y, w, h):
x_centroid = x + (w / 2.0)
y_centroid = y + (h / 2.0)
gaze_mapped_x = gaze_data[0] * img_width
gaze_mapped_y = (1 - gaze_data[1]) * img_height
distance = np.sqrt((x_centroid - gaze_mapped_x)**2 + (y_centroid - gaze_mapped_y)**2)
return distance
def find_bounding_box_simple(img_binary, img_crop, gaze_data):
img_contour, contours, hierarcy = cv2.findContours(img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_height, img_width, img_col = img_crop.shape
# Minimum distance threshold
min_distance = np.sqrt(img_width ** 2 + img_height ** 2) * .15
max_dim = []
# ignore all bounding boxes found touching or very near the edge of the image frame
img_width_bound_high = img_width * 0.99
img_width_bound_low = img_width * 0.01
img_height_bound_high = img_height * 0.99
img_height_bound_low = img_height * 0.01
switch_aspect_ratio = float(119) / 75 # Aspect ratio of the light switch
img_boxes = img_crop.copy()
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# Only consider bounding boxes that match our a posteriori knowledge of light switch dimensions (w/ parallax)
if (float(h)/w) < (switch_aspect_ratio * 1.68) and ((float(h)/w) > (switch_aspect_ratio * 0.77)):
if (h > img_height * 0.065) and (h < img_height * 0.35):
if (x > img_width_bound_low) and (y > img_height_bound_low) \
and (x + w < img_width_bound_high) and (y + h < img_height_bound_high):
cv2.rectangle(img_boxes, (x, y), (x + w, y + h), (255, 0, 255), 2)
# print('{0} {1} {2} {3}'.format(x, y, w, h))
distance = euclidean_distance(gaze_data, img_width, img_height, x, y, w, h)
if distance < min_distance:
min_distance = distance
max_dim = [x, y, w, h]
if not max_dim:
raise NoBoxError
# x, y, w, h = max_dim
# cv2.rectangle(img_crop,(x,y),(x+w,y+h),(255, 0, 255),2)
# cv2.imshow('full image', img_boxes)
#img_lightbox_crop = img_trans[int(y):int(y+h), int(x):int(x+w)] # Crop down to just the lightswtich
#cv2.imshow('lightbox', img_lightbox_crop) # Plot what we are going to average the color of
# Check if the min_distance is reasonably close to box
if abs(img_width * gaze_data[0] - max_dim[0]) > (max_dim[2] * 2.0):
raise NoBoxError
return max_dim
def get_color(dims, img_trans, img_full):
bw_lightbox = convert_to_binary_image(img_trans[dims[1]:dims[1] + dims[3], dims[0]:dims[0] + dims[2]], 2)
bw_lightbox = cv2.morphologyEx(bw_lightbox, cv2.MORPH_OPEN, kernel)
color_slice = img_full[dims[1]:dims[1] + dims[3], dims[0]:dims[0] + dims[2]][bw_lightbox == 0]
average_color = np.uint8(np.mean(color_slice, axis=0)) # Convert to whole RGB values
color_swatch = np.zeros((bw_lightbox.shape[0], bw_lightbox.shape[1], 3), np.uint8)
for height in range(0, bw_lightbox.shape[0]):
for width in range(0, bw_lightbox.shape[1]):
if bw_lightbox[height][width] == 0:
color_swatch[height][width] = average_color
average_color_swatch = np.array([[average_color] * 100] * 100, np.uint8) # Make a color swatch
# cv2.imshow('bw lightbox', bw_lightbox) # binary version of region of interest (faceplate + switch)
# cv2.imshow('average color swatch', average_color_swatch) # color swatch just displaying the average color
# cv2.imshow('color swatch', color_swatch) # average color superimposed over the region of interest
# naive color determination
color_classification = {0: 'blue', 1: 'green', 2: 'red', 3: 'cream'} # BGR ordering due to OpenCV
if abs(int(average_color[1]) - int(average_color[2])) < 10: # if green and red are within 10 of each other, cream
main_color = color_classification[3]
else:
main_color = color_classification[np.argmax(average_color, axis=0)] # Index of max BGR color determines color
print("Naive Lightbox guess: {0}, BGR: {1} ".format(main_color, average_color))
return average_color, main_color
def get_box_color(img_full, gaze_data):
start_time = time.time()
# Transform into CIELab colorspace
img_trans = cv2.cvtColor(img_full, cv2.COLOR_BGR2LAB)
img_binary = convert_to_binary_image(img_trans, 2)
img_binary = cv2.morphologyEx(img_binary, cv2.MORPH_OPEN, kernel)
# cv2.imshow('binary', img_binary)
try:
img_lightbox_dims = find_bounding_box_simple(img_binary, img_full, gaze_data)
except NoBoxError:
print('no box found')
main_color = 'None'
average_color = [0, 0, 0] # set to black, since None can cause trouble
# cv2.waitKey(0)
return average_color, main_color
average_color, main_color = get_color(img_lightbox_dims, img_trans, img_full)
time_taken = time.time() - start_time
# print(time_taken)
# cv2.waitKey(0)
return average_color, main_color
# if __name__ == '__main__':
| true |
16eb28e050c6ea4323854613c8ae1df9632c9d79 | Python | HorizonRobotics/alf | /alf/algorithms/off_policy_algorithm.py | UTF-8 | 2,151 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for off policy algorithms."""
from alf.algorithms.rl_algorithm import RLAlgorithm
class OffPolicyAlgorithm(RLAlgorithm):
"""``OffPolicyAlgorithm`` implements basic off-policy training pipeline. User
needs to implement ``rollout_step()`` and ``train_step()``.
- ``rollout_step()`` is called to generate actions at every environment step.
- ``train_step()`` is called to generate necessary information for training.
The following is the pseudo code to illustrate how ``OffPolicyAlgorithm``
is used:
.. code-block:: python
# (1) collect stage
for _ in range(steps_per_collection):
# collect experience and store to replay buffer
policy_step = rollout_step(time_step, policy_step.state)
experience = make_experience(time_step, policy_step)
store experience to replay buffer
action = sample action from policy_step.action
time_step = env.step(action)
# (2) train stage
for _ in range(training_steps_per_collection):
# sample experiences and perform training
experiences = sample batch from replay_buffer
batched_train_info = []
for experience in experiences:
policy_step = train_step(experience, state)
add policy_step.info to batched_train_info
loss = calc_loss(experiences, batched_train_info)
update_with_gradient(loss)
"""
@property
def on_policy(self):
return False
| true |
b305fdd0b5e5dbeb8cd04891b6acc101bb84270d | Python | garnhold/ompnbox | /lang/language_ex.py | UTF-8 | 1,212 | 2.765625 | 3 | [] | no_license | import lang
class LanguageEX(lang.Language):
__slots__ = []
def __init__(self):
super(LanguageEX, self).__init__()
def make_literal_token(self, name, literal, interpreter=None):
return self.make_token(name, lang.TokenDefinition_Literal(literal), interpreter)
def make_literal_list_token(self, name, literals, interpreter=None):
return self.make_token(name, lang.TokenDefinition_LiteralList(literals), interpreter)
def make_pattern_token(self, name, pattern, interpreter=None):
return self.make_token(name, lang.TokenDefinition_Pattern(pattern), interpreter)
def make_literal_token_ignore(self, name, literal):
return self.make_token_ignore(name, lang.TokenDefinition_Literal(literal))
def make_literal_list_token_ignore(self, name, literals):
return self.make_token_ignore(name, lang.TokenDefinition_LiteralList(literals))
def make_pattern_token_ignore(self, name, pattern):
return self.make_token_ignore(name, lang.TokenDefinition_Pattern(pattern))
def make_expression(self, name, term, operands):
i = 0
for operand in operands:
term = self.create_repeating(name + str(i)).initilize(term, operand)
i += 1
return term | true |
f99bcb7814308934159f2e62e73cc0a15985d8ec | Python | gabocode2907/django_reads | /dojo_reads/main/models.py | UTF-8 | 2,030 | 2.515625 | 3 | [] | no_license | from django.db import models
import re
# Create your models here.
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9.+_-]+\.[a-zA-Z]')
# VALIDATIONS
class UserManager(models.Manager):
def registration_validator(self,postData):
# ALL THE VALIDATION FOR THE FORM
errors = {}
if len(postData['name']) < 2:
errors['name'] = "Invalid Name. Name must be at least 3 characters"
if len(postData['alias']) < 2:
errors['alias'] = "Invalid Alias. Alias must be at least 3 characters"
if not EMAIL_REGEX.match(postData['email']):
errors['email'] = "Ivalid Email"
if len(postData['password']) < 5:
errors['passwrod'] = "Password must be at leaset 6 characters"
if postData['password'] != postData['confirm_password']:
errors['pw_match'] = "Password does not match"
# MODELS CREATION
class User(models.Model):
name = models.CharField(max_length=50)
alias = models.CharField(max_length=50)
email = models.CharField(max_length=50)
password = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
objects = UserManager()
class Book(models.Model):
title = models.CharField(max_length=150)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
objects = UserManager()
class Author(models.Model):
name = models.CharField(max_length=75)
books = models.ManyToManyField(Book,related_name="authors")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
objects = UserManager()
class Review(models.Model):
content = models.TextField()
rating = models.IntegerField()
user_review = models.ForeignKey(User, related_name="user_reviews", on_delete=models.CASCADE)
book_reviewed = models.ForeignKey(Book, related_name="book_reviews", on_delete=models.CASCADE) | true |
c6013052d702503fa62daf266a8e94fe46ee4a02 | Python | TimothySjiang/leetcodepy | /Solution_33.py | UTF-8 | 1,175 | 3.390625 | 3 | [] | no_license | class Solution:
def search(self, nums, target):
if not nums: return -1
if len(nums) == 1: return 0 if nums[0] == target else -1
pivot = self.findPivot(nums)
if target == nums[pivot]:
return pivot
if pivot == 0:
return self.Bsearch(nums, target)
if target < nums[0]:
ans = self.Bsearch(nums[pivot:], target)
return pivot + ans if ans != -1 else -1
else:
return self.Bsearch(nums[:pivot], target)
def findPivot(self, nums):
if nums[0] < nums[-1]:
return 0
l, r = 0, len(nums) - 1
while l < r:
mid = l + (r - l) // 2
if nums[mid] > nums[mid + 1]:
return mid + 1
if nums[mid] > nums[l]:
l = mid + 1
else:
r = mid
def Bsearch(self, nums, target):
l, r = 0, len(nums)
while l < r:
mid = l + (r - l) // 2
if nums[mid] == target:
return mid
if nums[mid] < target:
l = mid + 1
else:
r = mid
return -1 | true |
020878e59e62e6f215a4514d8c08ef967ddce946 | Python | MichaelWStein/Mission_to_Mars | /scrape_mars.py | UTF-8 | 3,360 | 2.8125 | 3 | [] | no_license | def scrape():
#Defining the Dictionary to be returned. All data will be stored / appended here.
mars_data = {}
#Importing the required functions:
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import pandas as pd
#News from Mars
url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
executable_path = {'executable_path' : 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
results = soup.find('div', class_='content_title')
results_des = soup.find('div', class_= 'rollover_description_inner')
results2 = results.a.text
results2_des = results_des.text
news_headline = results2
news_text = results2_des
mars_data = {"News": [news_headline, news_text]}
#The latest picture from Mars
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
subtitle = soup.find_all('div', class_='article_teaser_body')
pict_text = subtitle[1].text.strip()
picture = []
for link in soup.find_all('a', class_="fancybox", limit=2):
picture.append(link.get("data-fancybox-href"))
# The Mars image is the second 'fancybox' in the website.
pict_url = ("https://www.jpl.nasa.gov" + picture[1])
mars_data.update({"Picture": [pict_text, pict_url]})
browser.quit()
# Mars Weather
# Getting the Weather report from twitter
url = 'https://twitter.com/marswxreport?lang=en'
response = requests.get(url)
soup = bs(response.text, 'html.parser')
w_results = soup.find('p', class_ = "TweetTextSize TweetTextSize--normal js-tweet-text tweet-text")
# Storing the weather as a string
w_results2 = w_results.text
w_results2 = w_results2[:-26]
mars_weather = w_results2
mars_data.update({"Weather": mars_weather})
#Adding Mars facts (as html-table)
url = "https://space-facts.com/mars/"
tables = pd.read_html(url)
#Transform table into an html-table
df = tables[0]
df.columns=['Category', 'Data']
df.set_index('Category', inplace=True)
html_table = df.to_html()
html_table = html_table.replace('\n', '')
mars_data.update({"Facts": html_table})
#Adding Overview pictures
hemisphere_image_urls = [
{"title": "Cerberus Hemisphere", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg"},
{"title": "Schiapararelli Hemisphere", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg"},
{"title": "Syrtis Major Hemisphere", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg"},
{"title": "Valles Marineri Hemisphere", "img_url" : "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg"}
]
mars_data.update({"Hemispheres": hemisphere_image_urls})
return(mars_data) | true |
a06f8fbdc9072dd3f7e491e53ea9385b92bef5e1 | Python | MACmonster2/cs114 | /madlib.py | UTF-8 | 803 | 3.328125 | 3 | [] | no_license | """Madlib: Doctors Note"""
print('Fill in the blanks.')
print('Silly Word')
silly1=input()
print('Last Name')
name= input()
print('Illness')
illness=input()
print('Plural Noun')
noun=input()
print('Adjective')
adj1=input()
print('Adjective')
adj2=input()
print('Silly Word')
silly2=input()
print('Place')
place=input()
print('Number')
num=input()
print('Adjective')
adj3=input()
print('Dear School Nurse:')
print(silly1,name,'will not be attending school today.He/she has come down with a case of',illness,'and has horrible',noun,'and a/an',adj1,
'fever. We have made an appointment with the',adj2,'Dr.',silly2,', who studied for many years in',place,'and has',num,
'degrees in pediatrics. He will send you all the information you need. Thank you!')
print('Sincerely')
print('Mrs. Wholesale.')
| true |
a6fad1e31b190c45229daf877e887f30641ee0e7 | Python | Pawel095/PJ_Pygame_Prezentacja | /game/sprites/player.py | UTF-8 | 2,094 | 2.71875 | 3 | [
"MIT"
] | permissive | import pygame
import events
import global_vars as g
import loader
from .__base import Base
from .bullets import Bullet
from .bullets import get_bullets_for_shooter
class Player(Base):
def __init__(self, *args, **kwargs):
sprite = loader.assets.get("player")
sprite = pygame.transform.rotozoom(sprite, 180, 0.50)
self.shoot_cooldown = 0.5
self.shoot_cooldown_timer = 0
self.hp = 2
super().__init__(sprite, speed=500, position=(400, 300), *args, **kwargs)
def approach(self, current, target, step=0.1):
delta = target - current
return current + delta * step
def update_timers(self, deltaT):
self.shoot_cooldown_timer += deltaT
def shooting(self):
if events.SHOOT:
if self.shoot_cooldown_timer >= self.shoot_cooldown:
self.shoot_cooldown_timer = 0
Bullet(
"bullet",
self.position,
velocity=(0, -400),
shooter=g.PLAYER_SHOOTER_GROUP,
)
def movement(self):
vx, vy = self.velocity
dx, dy = (0, 0)
if events.LEFT:
dx += -1
if events.RIGHT:
dx += 1
if events.UP:
dy += -1
if events.DOWN:
dy += 1
dx *= self.speed
dy *= self.speed
vx = self.approach(vx, dx)
vy = self.approach(vy, dy)
self.velocity = (vx, vy)
def check_for_bullet_hits(self):
bullets = get_bullets_for_shooter(g.ENEMY_SHOOTER_GROUP)
for b in bullets:
if b.distance_from(self) <= self.hitbox_size:
b.on_hit()
self.hp -= 1
if self.hp <= 0:
self.alive = False
def update(self, deltaT):
self.update_timers(deltaT)
if self.alive:
self.movement()
self.shooting()
self.check_for_bullet_hits()
super().update(deltaT)
def draw(self):
if self.alive:
super().draw()
| true |
b9f11be7e0e1ab29cfbf8b97396a7abaf5555865 | Python | tju-ypan/graptolite | /accuracy/accuracy.py | UTF-8 | 647 | 2.734375 | 3 | [] | no_license | import tensorflow as tf
batch_size = 8
class_num = 10 # 类别数量
# 定义一个logits为神经网络预测的标签结果,shape:(batch_size, )
logits = tf.constant([0, 5, 9, 1, 7, 1, 0, 1])
# 定义一个labels为真实样本号,这里设为全1,shape:(batch_size, )
labels = tf.ones((batch_size, ), dtype=tf.int32)
# 使用tf.metrics.accuracy()计算分类准确率,返回的第一个值即为分类准确率
acc, acc_op = tf.metrics.accuracy(logits, labels)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
print(logits.eval())
print(labels.eval())
print("accuracy:{}".format(acc_op.eval())) | true |
f2782ec0cbc2c6e37ee08dfe5efba5c0496d0614 | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/abc088/B/4983839.py | UTF-8 | 289 | 3.15625 | 3 | [] | no_license | n = int(input())
a = []
a = list(map(int, input().split()))
b = sorted(a, reverse=True) # reverse = True???
# print(b)
# print(b[0])
alice = 0
bob = 0
for i in range(n):
if i % 2 == 0:
alice += b[i]
else:
bob += b[i]
dif = alice - bob
print(dif) | true |
45e28cc853f3d4b64b0764e52184ba6942a9f7fa | Python | bagusdharma/python-dasar | /Intermediate/set_DataStructure.py | UTF-8 | 1,026 | 4.25 | 4 | [] | no_license | # Set Data Structure berguna untuk check dalam list apakah ada yg duplikat atau tidak. bisa dengan 2 cara, yaitu:
# 1. Dengan Loop For
some_list = ['a','b','b','c','a','f']
duplicate = []
for value in some_list:
# jadi ketika ada value yg berjumlah lebih dari 1 / duplikat
if some_list.count(value) > 1:
# kemudian jika valuenya tidak ada sebelumnya pada list 'duplicate', maka element itu di append ke list
if value not in duplicate:
duplicate.append(value)
print duplicate
# 2. dengan Set
print '\n'
duplicates = set([x for x in some_list if some_list.count(x) > 1])
print duplicates
print '\n'
# Method lainnya pada Set -> Intersection = mencari yg valid / sama
valid = set(['yellow', 'red', 'green', 'black'])
input_set = set(['red', 'brown'])
print input_set.intersection(valid)
print '\n'
# Method lainnya pada Set -> Difference = mencari yg invalid
valid = set(['yellow', 'red', 'green', 'black'])
input_set = set(['red', 'brown'])
print input_set.difference(valid)
print '\n'
| true |
b649ffdc1b490cb5deb5a4dec326fc46a816c10e | Python | tom9744/Algorithms | /BOJ/Previous/SW 역량 테스트 준비/기초 (DFS,BFS)/미로 탐색.py | UTF-8 | 900 | 3.328125 | 3 | [] | no_license | # 2178: 미로 탐색
#
# `if node not in visited` 조건문을 사용하지 않고
# '단지 번호 붙이기' 문제와 같이 주어진 그래프의 값을 바꾸는 방법으로
# 방문 처리를 수행했더니 수행시간이 1208ms 에서 132ms 까지 단축되었다.
from collections import deque
N, M = map(int, input().split())
maze = [list(map(int, input())) for _ in range(N)]
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
def BFS(graph, node):
queue = deque()
queue.append(node)
graph[node[0]][node[1]] = -1
while queue:
curr = queue.popleft()
for idx in range(4):
nx = curr[0] + dx[idx]
ny = curr[1] + dy[idx]
if 0 <= nx < N and 0 <= ny < M and graph[nx][ny] == 1:
graph[nx][ny] = graph[curr[0]][curr[1]] - 1
queue.append((nx, ny))
BFS(maze, (0, 0))
print(abs(maze[N - 1][M - 1]))
| true |
63ad82587ea71921ddfd6ec56d9f582dad81cfed | Python | OOPMan/jormungand | /src/jormungand/api/postprocessing.py | UTF-8 | 1,251 | 2.53125 | 3 | [
"MIT"
] | permissive | from yapsy import IPlugin
from extraction import ExtractedDataItem
__author__ = 'adam.jorgensen.za@gmail.com'
class PostProcessedDataItem(ExtractedDataItem):
"""
Overrides the ExtractedDataItem class to provide an indication that an
ExtractedDataItem instance has undergone post-processing.
"""
def __init__(self, seq=None, **kwargs):
self.processed_by = []
self.processing_errors = []
super(PostProcessedDataItem, self).__init__(seq, **kwargs)
class PostProcessingPluginInterface(IPlugin.IPlugin):
"""
Defines an interface for a plugin that processes data extracted from a source and transforms it in some fashion.
"""
def can_process(self, data_model_name, data_model):
"""
Determines whether the plugin can process data associated with a given data model. Returns a bool.
"""
return False
def process(self, data_items, data_model_name, data_model):
"""
For a given data model, processes a list of (UID value, ExtractedDataItem instance) tuples and transforms each
ExtractedDataItem instance into a PostProcessedDataItem instance.
Returns a list of (UID value, PostProcessedDataItem instance) tuples.
"""
return []
| true |
b03b5ba5a82f2ada395157798d32947879b890dd | Python | rahilkhan2512/python_project | /autologin.py | UTF-8 | 314 | 3.171875 | 3 | [] | no_license | import webbrowser
import datetime
link= input("Enter Link:")
hr=int (input("Enter Hour:"))
min=int (input ("Enter mintute:"))
while True:
hour=int(datetime.datetime.now().hour)
minute=int(datetime.datetime.now().minute)
if hour==hr and minute==min:
webbrowser.open(link)
break | true |
2ceacfdfa08bfdadbeb63641716e77933ec7b553 | Python | sharvilkadam/hncs | /crawler.py | UTF-8 | 2,148 | 2.734375 | 3 | [] | no_license | # Date Created: 22-Mar-2017
import requests
from bs4 import BeautifulSoup
import time
from datetime import datetime
import json
start_time = time.time()
print(datetime.now())
# Crawler for navbharattimes : Hindi News Archive
cnt = 0
success = 0
MAX_SUCCESS = -1
DATA_DIR = '../data/json/'
URLS_FILE = '../data/urls.txt'
START = 201
print('{:<7} {:<7} {:<7} {:<10} {}'.format('S.No.', 'Success', 'Status', 'News ID', 'URL'))
with open(URLS_FILE) as urls:
with requests.Session() as s:
for url in urls:
if url:
cnt += 1
if cnt < START:
continue
id = url.split('/')[-1].strip().split('.')[0]
try:
r = s.get(url.strip())
if r.status_code == requests.codes.ok:
article = {}
soup = BeautifulSoup(r.text, 'html.parser')
article['category'] = soup.find('h2', class_='section_name').text.strip()
article['title'] = soup.find('h1').text.strip()
article['date'] = soup.find('div', class_='article_datetime').text.split(':')[-1].strip()
body = BeautifulSoup(str(soup.find('arttextxml')).replace('<br>', '\n').replace('</br>', ''),
'html.parser').text.strip()
# remove extra white spaces
article['body'] = '\n'.join([x.strip() for x in body.split('\n')])
json.dump(article, open('{}{}.json'.format(DATA_DIR, id), 'w', encoding='utf-8'),
ensure_ascii=False)
success += 1
print('{:<7} {:<7} {:<7} {:<10} {}'.format(cnt, success, r.status_code, id, r.url))
except Exception as e:
print('{:<7} {:<7} {:<7} {:<10} {}'.format(cnt, success, -1, id, url))
print(e)
pass
if success == MAX_SUCCESS:
break
print("Total time: %s seconds" % (time.time() - start_time))
| true |
fc1c2ef03a67adde05e0ff2887b0a080a947475c | Python | takakomatu/machineLearningInClass | /pythonProject/Lectures/MLUtilities/SplitClassifier3.py | UTF-8 | 5,611 | 3.28125 | 3 | [] | no_license | # Takaaki Komatsu
import numpy as np
class SplitClassifier:#class
def __init__(self, X, y):#constructor
self.data = np.array(X)
self.labels = np.array(y)
self.size = len(y) # equivalent to self.data.shape[0],
# self.size is number of observations = 20, ten 0s and ten 1s
# Find and order possible label categories
self.classes = sorted(set(self.labels)) #[0,1]
# Initialize the Training Accuracy to 0
self.accuracy = 0
# Iterate over each axis/feature
# range(self.data.shape[1]) gives us number of column
for i in range(self.data.shape[1]): #self.data.shape[1] =2
# Obtain sorted list of feature values
col_values = self.data[:,i].copy()
col_values.sort()
# Iterate over each observation
for j in range(self.size): #self.size =20, ten 0s and ten 1s
# Select values below the current observation
sel = self.data[:,i] <= col_values[j] # col_values has numbers from low to high
#sel = self.data.iloc[:,i].values <= col_values[j]
#self.labels[sel]=['a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'b' 'b' 'b' 'b' 'b' 'b' 'b' 'b' 'b' 'b']
# Determine the number correctly classified, assuming
# that the lower class is class[0]
n_correct = (np.sum(self.labels[sel] == self.classes[0]) +
np.sum(self.labels[~sel] == self.classes[1]))
# bool_array = np.array([True, True, False, True, False])
# my_array = np.array([1,2,3,4,5])
#
# sub_array = my_array[bool_array]
# print(sub_array) these return [1 2 4]
#Determine the accuracy of the current cut
temp_acc=n_correct / self.size # 11/20
# print("sadfdsaf",temp_acc)=print("sdflfdffsf" + str(temp_acc))
cur_acc=max(temp_acc, 1-temp_acc)
#If new cut is an improvement, update attributes
if cur_acc >= self.accuracy:
self.accuracy = cur_acc
self.feature = i# decide which axis we should draw a line along
if(j==len(col_values)-1) : # if j==19, j is the last one
self.threshold = col_values[j]
else:#these only happens if we changed the accuracy.
self.threshold=0.5*(col_values[j]+col_values[j+1])
if cur_acc==temp_acc: # we dont reverse the labels
self.lower_class=self.classes[0]
self.upper_class=self.classes[1]
else: # we reverse the labels
self.lower_class=self.classes[1]
self.upper_class=self.classes[0]
def predict(self, X):
# Create inner function to classify an individual observation
#classifyObject, row is from feature table
def classify_obs(row):
if row[self.feature] <= self.threshold:
return self.lower_class# return either one label such as a, b, 0, 1
else:
return self.upper_class# return either one label such as a, b, 0, 1
# Convert X to a NumPy array
X = np.array(X)
# Apply classify_obs to rows of X
return np.apply_along_axis(classify_obs, 1, X)#does classify_obs take row of X as an argument??
#1 means were applying classify_obs to every row, 0 would mean to every column
# np.apply_along_axis takes and apply classify_obs that to every row such as [0.94233555 0.72765208]
def score(self, X, y): # for test data
X = np.array(X)
y = np.array(y)
predictions = self.predict(X)
num_correct = np.sum(predictions == y)
return num_correct / len(y)
def summary(self):
print('+----------------------------+')
print('| Split Classifier Summary |')
print('+----------------------------+')
print('Number of training observations:', self.size)
print('Axis/Feature split:', self.feature)
print('Threshold value:', self.threshold)
print('Predicted class below threshold:', self.lower_class)
print('Predicted class above threshold:', self.upper_class)
print('Training accuracy:', self.accuracy, '\n')
#//////////////////
x=[[ 1, 1],
[ 2, 2],
[ 3 , 3],
[ 4 , 4],
[ 5 , 5],
[ 6 , 6],
[ 7 , 7],
[ 8 ,8],
[ 9 ,9],
[ 10 , 10],
[ 11 , 11],
[ 12 , 12],
[ 13 , 13],
[ 14 , 14 ],
[ 15 , 15],
[ 16 , 16],
[ 17 , 17],
[ 18 ,18 ],
[ 19 , 19],
[ 20 ,20]]
print(x)
y=["0"]*10 + ["1"]*10# 10 copies of label, outputs
split = SplitClassifier(x,y)
x2=[[ 2, 2],
[ 4, 4],
[ 6, 6],
[ 8 , 8],
[ 10 , 10],
[ 12, 12],
[ 14, 14],
[ 16,16],
[ 18 ,18],
[ 20, 20],
[ 22 ,22],
[ 24 , 24],
[ 26, 26],
[ 28, 28 ],
[ 30 ,30],
[ 32, 32],
[ 34,34],
[ 36,36 ],
[ 38 ,38],
[ 40,40]]
split2=SplitClassifier(x2,y)
split2.summary() #Training accuracy: 1.0 ????
print("What was predicted was: ",split2.predict(x))
#If we are using the same X, or training data, the machine will have 100% accuracy??
print(split2.score(x,y)) | true |
288c81bba11b1753ad38f219631a0cd67bf768d1 | Python | reach950/ynoteios-uitest | /testcase/audio/create_audio_test.py | UTF-8 | 1,285 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""测试创建语音速记"""
__author__ = 'kejie'
import unittest
from testcase import BaseCase
from time import sleep
class TestCreateAudio(BaseCase):
"""测试创建语音速记"""
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_create_audio_from_navigator(self):
"""从导航栏语音图标创建语音笔记"""
self.recent_page.open_create_audio()
self.record_page.start_record()
# 获取麦克风权限
self.record_page.accept_alert()
# 录音3s
sleep(3)
self.record_page.pause_record()
# 录音时间大于等于3s
self.assertTrue(self.record_page.get_record_time() >= 3, '语音录制失败')
self.record_page.complete_record()
# 录音完成后,返回到语音速记详情页面
self.assertTrue(self.audio_page.is_audio_page_display())
audio_title = self.audio_page.get_audio_title()
self.audio_page.tap_return_button()
self.recent_page.wait_first_file_sync_success()
self.assertTrue(self.recent_page.is_first_file_title_exist(audio_title), '语音速记创建失败')
if __name__ == '__main__':
unittest.main()
| true |
082de2eced9bb63f6bbb1778a927157bcc7a7a90 | Python | NDSU-CSCI313-Borchert/final-tak-1pm | /Scripts/test_Board.py | UTF-8 | 2,676 | 2.796875 | 3 | [] | no_license | import unittest
from board import *
class test_Board(unittest.TestCase):
def test_5x5_brown_board_can_be_created(self):
size = "5x5"
design = "Brown"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_4x4_brown_board_can_be_created(self):
size = "4x4"
design = "Brown"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_3x3_brown_board_can_be_created(self):
size = "3x3"
design = "Brown"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_5x5_space_board_can_be_created(self):
size = "5x5"
design = "Space"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_4x4_space_board_can_be_created(self):
size = "4x4"
design = "Space"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_3x3_space_board_can_be_created(self):
size = "3x3"
design = "Space"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_5x5_yellow_board_can_be_created(self):
size = "5x5"
design = "Yellow"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_4x4_yellow_board_can_be_created(self):
size = "4x4"
design = "Yellow"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_3x3_yellow_board_can_be_created(self):
size = "3x3"
design = "Yellow"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_5x5_summer_board_can_be_created(self):
size = "5x5"
design = "Summerbreeze"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_4x4_summer_board_can_be_created(self):
size = "4x4"
design = "Summerbreeze"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
def test_3x3_summer_board_can_be_created(self):
size = "3x3"
design = "Summerbreeze"
board_type = str(size) + str(design)
board = Board(board_type)
self.assertTrue(True)
| true |
5725faa12f862529f8fa67f717f8a6eb6fe897c7 | Python | snowflowersnowflake/cv2_project- | /CV/proForCV_test1/tem.py | UTF-8 | 1,107 | 3.828125 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Creates just a figure and only one subplot
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Creates two subplots and unpacks the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Creates four polar axes, and accesses them through the returned array
fig, axes = plt.subplots(2, 2, subplot_kw=dict(polar=True))
axes[0, 0].plot(x, y)
axes[1, 1].scatter(x, y)
# Share a X axis with each column of subplots
plt.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
plt.subplots(2, 2, sharey='row')
# Share both X and Y axes with all subplots
plt.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
plt.subplots(2, 2, sharex=True, sharey=True)
# Creates figure number 10 with a single subplot
# and clears it if it already exists.
fig, ax=plt.subplots(num=10, clear=True)
plt.show() | true |
4da58fdf9c7cc64d39d7f1aa5d04c9e0a894c265 | Python | subhamb123/Python-Projects | /Level 4/Flags 3.py | UTF-8 | 1,180 | 3.1875 | 3 | [
"MIT"
] | permissive | import pygame
pygame.init()
def circles(window, corner, flag):
if corner == 1:
x = 100
y = 100
modify_x = 100
modify_y = 100
elif corner == 2:
x = 400
y = 100
modify_x = -100
modify_y = 100
elif corner == 3:
x = 400
y = 400
modify_x = -100
modify_y = -100
elif corner == 4:
x = 100
y = 400
modify_x = 100
modify_y = -100
size = 120
for i in range(5):
if flag:
pygame.draw.circle(window, (0, 0, 255), (x, y), size, 3)
else:
pygame.draw.circle(window, (0, 0, 255), (x, y), size)
x += modify_x
y += modify_y
size -= 20
w = pygame.display.set_mode([500, 500])
start_val = 100
flag = True
start = 0
drawing = True
while drawing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
drawing = False
w.fill((0, 0, 0))
circles(w, start + 1, flag)
flag = not flag
start = ((start + 1) % 4)
pygame.display.flip()
pygame.time.wait(1000)
| true |