seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71015513315 | # Title: 우수 마을
# Link: https://www.acmicpc.net/problem/1949
import sys
from collections import defaultdict
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def get_max(vil: int, select: int, dp: list, edges: defaultdict, popls: list, p_node: int):
if dp[vil][select] != -1:
return dp[vil][select]
ans = popls[vil] if select else 0
for child in edges[vil]:
if child == p_node:
continue
if select:
ans += get_max(child, 0, dp, edges, popls, vil)
else:
ans += max(get_max(child, 0, dp, edges, popls, vil), get_max(child, 1, dp, edges, popls, vil))
dp[vil][select] = ans
return ans
def solution(n: int, popls: list, edges: defaultdict):
dp = [[-1 for _ in range(2)] for _ in range(n+1)]
return max(get_max(1, 0, dp, edges, popls, 0), get_max(1, 1, dp, edges, popls, 0))
def main():
n = read_single_int()
popls = [0] + read_list_int()
edges = defaultdict(lambda: [])
for _ in range(n-1):
a, b = read_list_int()
edges[a].append(b)
edges[b].append(a)
print(solution(n, popls, edges))
if __name__ == '__main__':
main() | yskang/AlgorithmPractice | baekjoon/python/best_vilage_1949.py | best_vilage_1949.py | py | 1,357 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.read... |
12282411056 | import ply.lex as lex
from ply.lex import TOKEN
class Lexer:
tokens = (
"WORD",
"APOSTROPH",
"NEWLINE",
"OTHERS",
)
large_alpha = "[ΑἈἌᾌἊᾊἎᾎᾈἉἍᾍἋᾋᾋἏᾉΆᾺᾼ]"
large_epsilon = "[ΕἘἜἚἙἝἛΈῈ]"
large_eta = "[ΗἨἬᾜἪᾚἮᾞᾘἩἭᾝἫᾛᾛἯᾙΉῊῌ]"
large_iota = "[ΙἸἼἺἾἹἽἻἿΊῚΪ]"
large_omicron = "[ΟὈὌὊὉὍὋΌῸ]"
large_upsilon = "[ΥὙὝὛὟΎῪΫ]"
large_omega = "[ΩὨὬᾬὪᾪὮᾮᾨὩὭᾭὫᾫᾫὯᾩΏῺῼ]"
large_rho = "[ΡῬ]"
large_consonant = "[ΒΓΔΖΘΚΛΜΝΞΠΣΤΦΧΨ]"
small_alpha = "[αἀἄᾄἂᾂἆᾆᾀἁἅᾅἃᾃᾃἇᾁάᾴὰᾲᾶᾷᾳ]"
small_epsilon = "[εἐἔἒἑἕἓέὲ]"
small_eta = "[ηἠἤᾔἢᾒἦᾖᾐἡἥᾕἣᾓᾓἧᾑήῄὴῂῆῇῃ]"
small_iota = "[ιἰἴἲἶἱἵἳἷίὶῖϊΐῒῗ]"
small_omicron = "[οὀὄὂὁὅὃόὸ]"
small_upsilon = "[υὐὔὒὖὑὕὓὗύὺῦϋΰῢῧ]"
small_omega = "[ωὠὤᾤὢᾢὦᾦᾠὡὥᾥὣᾣᾣὧᾡώῴὼῲῶῷῳ]"
small_rho = "[ρῤῥ]"
small_consonant = "[βγδζθκλμνξπσςτφχψ]"
alphabet = "(" + large_alpha + \
"|" + large_epsilon + \
"|" + large_eta + \
"|" + large_iota + \
"|" + large_omicron + \
"|" + large_upsilon + \
"|" + large_omega + \
"|" + large_rho + \
"|" + large_consonant + \
"|" + small_alpha + \
"|" + small_epsilon + \
"|" + small_eta + \
"|" + small_iota + \
"|" + small_omicron + \
"|" + small_upsilon + \
"|" + small_omega + \
"|" + small_rho + \
"|" + small_consonant + ")"
apostroph = "'"
word = alphabet + "+" \
+ "(" + apostroph + alphabet + "*" + ")?"
@TOKEN(word)
def t_WORD(self, t):
return t
def t_NEWLINE(self, t):
"\\r?\\n"
pass
def t_OTHERS(self, t):
"."
pass
def t_error(self, t):
pass
def __init__(self):
self.lexer = None
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
def test(self, data):
self.lexer.input(data)
while True:
tok = self.lexer.token()
if not tok:
break
print(tok)
if __name__ == "__main__":
import sys
lexer = Lexer()
lexer.build()
lexer.test(sys.argv[1])
| ohmin839/pyplgr | pyplgr/plgrcoll/lexer.py | lexer.py | py | 2,770 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ply.lex.TOKEN",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "ply.lex.lex",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "ply.lex",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 89,
... |
74863567394 | import time
import gql
from gql.transport.aiohttp import AIOHTTPTransport
from gql.transport.exceptions import TransportError
class MidosHouse:
def __init__(self):
self.client = gql.Client(transport=AIOHTTPTransport(url='https://midos.house/api/v1/graphql'))
self.cache = None
self.cache_expires_at = time.monotonic()
async def handles_custom_goal(self, goal_name):
if time.monotonic() >= self.cache_expires_at:
try:
query = gql.gql("""
query {
goalNames
}
""")
response = await self.client.execute_async(query)
self.cache_expires_at = time.monotonic() + 60 * 60 * 24
self.cache = response['goalNames']
except TransportError: # if anything goes wrong, assume Mido's House is down and we should handle the room
self.cache_expires_at = time.monotonic() + 60
self.cache = None
if self.cache is None:
return False
return goal_name in self.cache
| deains/ootr-randobot | randobot/midos_house.py | midos_house.py | py | 1,113 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "gql.Client",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "gql.transport.aiohttp.AIOHTTPTransport",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.monotonic",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time... |
27576774668 | import numpy as np
import cv2 as cv
img = cv.imread("Resources/test.png")
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow("Gray", gray)
lap = cv.Laplacian(gray,cv.CV_64F)
lap = np.uint8(np.absolute(lap))
cv.imshow("Laplacian", lap)
cv.waitKey(0) | SafirIqbal/Demo-repo | Laplacian.py | Laplacian.py | py | 261 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_n... |
14841507583 | #!/usr/bin/env python
import argparse
from termcolor import colored
from app import App
from constants import ERROR_COLOR, KEY_COLOR, INFO_COLOR
from utils import did_you_mean
def run():
parser = argparse.ArgumentParser(description='Command line key-value store.',
add_help=False, )
parser.add_argument('action', metavar='action', nargs='*', help='action')
args = parser.parse_args()
# command = args or 'help'
if not args.action:
command = 'help'
else:
command = args.action[0]
application = App()
if not hasattr(application, command):
print (colored('Command `', ERROR_COLOR) +
colored(command, INFO_COLOR) +
colored('`not found, Did you mean `', ERROR_COLOR) +
colored(did_you_mean(command, App.SUPPORTED_COMMANDS), KEY_COLOR) +
colored('`', ERROR_COLOR))
else:
sub_commands = args.action[1:] if args.action else None
if sub_commands:
application.call(command, *sub_commands)
else:
application.call(command)
if __name__ == "__main__":
run()
| vinu76jsr/kaboom | kaboom/kaboom.py | kaboom.py | py | 1,158 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.App",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "constants.ERROR_COL... |
12834853666 | import json
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
age = Column(Integer)
income = Column(Integer)
physiological_need = Column(String)
safety_need = Column(String)
love_and_belonging_need = Column(String)
esteem_need = Column(String)
self_actualization_need = Column(String)
def create_user(db_file: str, age: int, income: int, physiological_need: str, safety_need: str, love_and_belonging_need: str, esteem_need: str, self_actualization_need: str):
engine = create_engine(f'sqlite:///{db_file}')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
new_user = User(age=age,
income=income,
physiological_need=physiological_need,
safety_need=safety_need,
love_and_belonging_need=love_and_belonging_need,
esteem_need=esteem_need,
self_actualization_need=self_actualization_need)
session.add(new_user)
session.commit()
return new_user.id
def update_user(db_file="user.db", user_id="username", age=None, income=None, physiological_need=None, safety_need=None, love_and_belonging_need=None, esteem_need=None, self_actualization_need=None):
engine = create_engine(f'sqlite:///{db_file}')
Session = sessionmaker(bind=engine)
session = Session()
user = session.query(User).filter_by(id=user_id).first()
if user is None:
return "FAILURE"
if age is not None:
user.age = age
if income is not None:
user.income = income
if physiological_need is not None:
user.physiological_need = physiological_need
if safety_need is not None:
user.safety_need = safety_need
if love_and_belonging_need is not None:
user.love_and_belonging_need = love_and_belonging_need
if esteem_need is not None:
user.esteem_need = esteem_need
if self_actualization_need is not None:
user.self_actualization_need = self_actualization_need
session.commit()
return "SUCCESS"
def get_user(db_file: str, user_id: int):
engine = create_engine(f'sqlite:///{db_file}')
Session = sessionmaker(bind=engine)
session = Session()
user = session.query(User).filter_by(id=user_id).first()
# Convert user object to dictionary
user_dict = {column.name: getattr(user, column.name) for column in user.__table__.columns}
return user_dict | nicholascgilpin/lifeman | database.py | database.py | py | 2,710 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 10,
"usage_type": "argument"
},
{
... |
2071335048 | import json
import logging
import os
import re
import sys
import time
import requests
def get_logger(name):
log_file = os.getenv('DISKU_LOG_FILE')
if log_file:
log_handler = logging.FileHandler(log_file)
else:
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(name)s - %(message)s',
'%Y-%m-%d %H:%M:%S %Z'
))
logger = logging.getLogger(name)
logger.setLevel(os.getenv('DISKU_LOG_LEVEL', 'ERROR'))
logger.addHandler(log_handler)
return logger
logger = get_logger(__name__)
logger.debug('DISKU module loaded')
class CaseInsensitiveDict(dict):
def __repr__(self):
return 'CaseInsensitiveDict(%r)' % super().__repr__()
def _find_key(self, key):
# XXX: iterate over self.keys() is slow
lkey = key.lower()
for k in self.keys():
if k.lower() == lkey:
return k
return key
def __lower_keys__(self):
# FIXME: handle collisions
return [k.lower() for k in self.keys()]
def __contains__(self, key):
# XXX: performance issue
return self._find_key(key) in self.__lower_keys__()
def __getitem__(self, key):
return super().__getitem__(self._find_key(key))
def __setitem__(self, key, val):
return super().__setitem__(self._find_key(key), val)
def __delitem__(self, key):
return super().__delitem__(self._find_key(key))
def get(self, key, defval=None):
return super().get(self._find_key(key), defval)
class ConfigProxy:
# TODO: it's read-only, what about set() and __setitem__?
# consider implement full MutableMapping iter
def __init__(self, src, namespace):
# XXX: namespace will become case-insensitive too
self.src = CaseInsensitiveDict(src)
self.namespace = namespace
def key(self, k):
return '%s.%s' % (self.namespace, k)
def __contains__(self, key):
return self.key(k) in self.src
def __getitem__(self, key):
return self.src[self.key(key)]
def get(self, key, defval=None):
return self.src.get(self.key(key), defval)
def _find_subclass(parent, name):
name = (name + parent.__name__).lower()
for klass in parent.__subclasses__():
if klass.__name__.lower() == name:
return klass
raise KeyError('Can not find subclass of %s (name: %s)' % (parent.__name__, name))
class BinaryOperator:
def __init__(self, op):
self.op = op
# FIXME: potential code injection vulnerability
# should we make a whitelist of allowed operators?
# *current usage is not vulnerable
self.f = eval('lambda l, r: l %s r' % op)
def __call__(self, l, r):
return self.f(l, r)
def __repr__(self):
return 'BinaryOperator(%r)' % self.op
def parse_size_string(s, suffixes='KMGTPEZY'):
suffix_idx = suffixes.find(s[-1].upper())
if suffix_idx == -1:
return int(s)
return int(s[:-1]) * (1024 ** (suffix_idx + 1))
def parse_time_interval(s):
suffixes = {
's': 1,
'm': 60,
'h': 60*60,
'd': 60*60*24,
}
try:
return int(s)
except ValueError:
pass
re_obj = r'(?P<val>[1-9]\d*)(?P<unit>[smhd])'
if not re.fullmatch('(?:(?:{})+|\s)+'.format(re_obj), s):
raise ValueError('Invalid time value', s)
t = 0
for m in re.finditer(re_obj, s):
val, unit = m.groups()
t += int(val) * suffixes[unit]
return t
class AlertCheck:
def __init__(self, conditions):
self.conditions = []
if not self.parse(conditions):
raise ValueError('Invalid condition(s): %r' % conditions)
def parse(self, conditions):
def parse_val(v):
if v[-1] == '%':
return int(v[:-1]) / 100.0
return parse_size_string(v)
re_cmp = re.compile(
r'(?P<var>\w+)' # variable name
r'\s*' # spaces are allowed
r'(?P<op>[<>]=?|==)' # compare operator
r'\s*' # spaces are allowed
r'(?P<val>' # begin value group
r'100%|0%|[1-9]\d?%|' # percentage
r'[1-9]\d*[KMGTP]?' # size
r')' # end value group
)
for cond in re.split(r'\s*,\s*', conditions):
m = re.fullmatch(re_cmp, cond)
if not m:
logger.error('Can not parse condition: %s', cond)
return False
raw = m.group(0)
var, op, val = m.groups()
self.conditions.append((var, BinaryOperator(op), parse_val(val), raw))
logger.debug('Parsed condition: %s %s %s', var, op, val)
return True
@staticmethod
def validate_params(disk_usage):
return 'used' in disk_usage and \
'free' in disk_usage and \
'total' in disk_usage
def __call__(self, disk_usage):
du = CaseInsensitiveDict(disk_usage)
if not self.validate_params(du):
raise ValueError('Invalid disk usage status object')
du['used_p'] = du['used'] / du['total']
du['free_p'] = du['free'] / du['total']
for var, op, val, raw in self.conditions:
if isinstance(val, float):
var += '_p'
if op(du.get(var), val):
return raw
return False
class AlertChannel:
'''
Abstraction class for alert channels
'''
_channel_cache = {}
@classmethod
def load(cls, config):
name = config['disku.alert_channel']
cached = cls._channel_cache.get(name)
if cached:
return cached
try:
klass = _find_subclass(cls, name)
except KeyError:
logger.error('Can not find AlertChannel sublcass: %s', name)
raise
instance = klass(ConfigProxy(config, name.lower()))
instance.prepare()
return cls._channel_cache.setdefault(name, instance)
def __init__(self, config):
self.config = config
def prepare(self):
pass
def fire(self, message):
raise NotImplemented()
class WebhookAlertChannel(AlertChannel):
'''
Webhook alert channel provider, it's made for Slack/Mattermost compatible
webhook interface
'''
def prepare(self):
try:
self.mixin = json.loads(self.config.get('mixin', '{}'))
logger.info('mixin: %r', self.mixin)
except json.decoder.JSONDecodeError:
self.mixin = {}
def fire(self, message):
data = dict(self.mixin)
data.update({'text': message})
logger.debug('webhook sent message: %r', data)
try:
resp = requests.post(self.config['url'], json=data)
logger.debug('response: %r', resp)
return resp.status_code == 200
except Exception as e:
logger.exception('Error during sending http request: %r', e)
return False
class AlertBuffer:
def __init__(self, interval, fire):
self.interval = interval
self.next_time = 0
self.buffer = {}
self.fire = fire
def push(self, identifier, data):
self.buffer[identifier] = data
if time.time() >= self.next_time:
logger.info('Flushing buffer')
self.fire(self.buffer)
self.buffer = {}
self.next_time = time.time() + self.interval
def test():
try:
checker = AlertCheck('FREE == 100G, FREE <\t 5G, USED >10G, USED>95%')
logger.info('Success!')
except ValueError as e:
logger.exception('Can not parse condition, error: %r', e)
for c in checker.conditions:
logger.info(c)
GB = 2 ** 30
logger.info(checker({ 'total': 100 * GB, 'used': 96 * GB, 'free': 4 * GB }))
logger.info(checker({ 'total': 100 * GB, 'used': 90 * GB, 'free': 10 * GB }))
logger.info(checker({ 'total': 100 * GB, 'used': 9 * GB, 'free': 91 * GB }))
logger.info(checker({ 'total': 100 * GB, 'used': 0 * GB, 'free': 100 * GB }))
for t in '5 10s 10m 2h 1d 24h9d 1s1m1h 1s2m3h4d'.split():
logger.info('%-10s %d', t, parse_time_interval(t))
if __name__ == '__main__':
test()
| Inndy/disku | disku.py | disku.py | py | 8,386 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
... |
72116071075 | """
Utilities for testing
"""
from collections import namedtuple
import numpy as np
from .callbacks import ConvergenceCallback
from .cpd import GaussianCPD
from .irt import BayesNetLearner
from .node import Node
EPSILON = 1e-3
NUM_TRIALS = 3
NUM_ITEMS = 50
NUM_INSTR_ITEMS = 0
NUM_LATENT = 1
NUM_CHOICES = 2
NUM_RESPONSES = 500
NUM_STUDENTS = 20
PROB_CORRECT = 0.5
THETA_MU = 0.0
THETA_SIGMA = 1.0
NONOFFSET_COEFF_SHAPE_GEN = 100.0
NONOFFSET_COEFF_SCALE_GEN = 0.01
NONOFFSET_COEFF_SHAPE = 2.0
NONOFFSET_COEFF_SCALE = 0.5
OFFSET_COEFF_MU = 0.0
OFFSET_COEFF_SIGMA = 1.0
INSTR_COEFF_MU = 0.05
INSTR_COEFF_SIGMA = 0.025
THETA_OFFSETS_SIGMA = 0.05
ResponseData = namedtuple('ResponseData', ['correct', 'student_idx', 'item_idx'])
def log_norm_pdf(x, mu=0.0, var=1.0):
""" Evaluate the log normal pdf.
:param np.ndarray|float x: point at which to evaluate the log norm pdf
:param np.ndarray|float mu: mean of the normal distribution
:param np.ndarray|float var: variance of the normal distribution.
"""
return -0.5 * np.log(2. * np.pi * var) - 0.5 / var * (x - mu) ** 2
FINITE_DIFF_EPSILON = 1e-6
ALMOST_EQUAL_EPSILON = 1e-4
def finite_diff_grad(x, func, epsilon=FINITE_DIFF_EPSILON):
""" Approximate the derivative of a function using finite difference.
:param np.ndarray x: point at which to evaluate derivative
:param function func: function with which to take finite differences.
"""
fwd_x = np.copy(x)
bwd_x = np.copy(x)
fwd_xx = fwd_x.ravel()
bwd_xx = bwd_x.ravel()
y = np.zeros(x.shape)
yy = y.ravel()
for i in xrange(x.size):
fwd_xx[i] += epsilon
bwd_xx[i] -= epsilon
yy[i] = (func(fwd_x) - func(bwd_x)) / 2.0 / epsilon
fwd_xx[i] -= epsilon
bwd_xx[i] += epsilon
return y
def finite_diff_hessian(x, grad, epsilon=FINITE_DIFF_EPSILON):
""" Approximate the Hessian of a function using finite difference in the partial gradient.
:param np.ndarray x: point at which to evaluate derivative
:param function grad: function that returns the gradient
"""
fwd_x = np.copy(x)
bwd_x = np.copy(x)
fwd_xx = fwd_x.ravel()
bwd_xx = bwd_x.ravel()
y = np.zeros((x.size, x.size))
for i in xrange(x.size):
for j in xrange(x.size):
fwd_xx[i] += epsilon
bwd_xx[i] -= epsilon
y[i, j] = (grad(fwd_x).ravel()[j] - grad(bwd_x).ravel()[j]) / 2.0 / epsilon
fwd_xx[i] -= epsilon
bwd_xx[i] += epsilon
return y
def finite_diff_hessian_diag(x, grad, epsilon=FINITE_DIFF_EPSILON):
""" Approximate the diagonal of the Hessian of a function using finite difference in the
partial gradient.
:param np.ndarray x: point at which to evaluate derivative
:param function grad: function that returns the gradient
"""
fwd_x = np.copy(x)
bwd_x = np.copy(x)
fwd_xx = fwd_x.ravel()
bwd_xx = bwd_x.ravel()
y = np.zeros(x.shape)
yy = y.ravel()
for i in xrange(x.size):
fwd_xx[i] += epsilon
bwd_xx[i] -= epsilon
yy[i] = (grad(fwd_x).ravel()[i] - grad(bwd_x).ravel()[i]) / 2.0 / epsilon
fwd_xx[i] -= epsilon
bwd_xx[i] += epsilon
return y
def generate_data(num_students=NUM_STUDENTS,
num_items=NUM_ITEMS,
num_responses=NUM_RESPONSES,
prob_correct=PROB_CORRECT):
""" Simulate student response data (independently of any parameters).
:param int num_students: Number of unique student ids.
:param int num_items: number of assessment items
:param int num_responses: number of responses to generate
:param float prob_correct: probability of correct (probability of choosing first choice when
num_choices > 1; probability of other choices are all equal)
:return: the response data
:rtype: ResponseData
"""
correct = np.random.rand(num_responses) < prob_correct
num_responses_per_student, remainder = divmod(num_responses, num_students)
unique_student_ids = range(num_students)
student_idx = [reg_id for reg_id in unique_student_ids for _ in
range(num_responses_per_student)]
# If num_responses can't be perfectly divided into students, add the remaining responses
# to the last student id:
student_idx.extend([unique_student_ids[-1]] * remainder)
student_idx = np.array(student_idx)
item_idx = np.random.random_integers(low=0, high=num_items-1, size=num_responses)
np.random.shuffle(student_idx)
return ResponseData(correct, student_idx, item_idx)
class MockNode(Node):
"""
A test node class that stores the evidence terms passed into it and does nothing with them,
and whose update method returns a dictionary with param node names
"""
def __init__(self, *args, **kwargs):
super(MockNode, self).__init__(*args, **kwargs)
self.obtained_evidence_terms = {}
def update(self, evidence_terms=None):
""" An update function that stores all the evidence infos passed to it, and sets its
log_prob to a random Gaussian value
:param list evidence_terms: evidence information passed into the node
:return: the names of all param nodes
:rtype: dict[Node, str]
"""
if evidence_terms is not None:
self.obtained_evidence_terms.update(evidence_terms)
self.log_prob = np.random.randn()
return {v: v.name for k, v in self.param_nodes.iteritems()}
class MockLearner(BayesNetLearner):
"""
A learner with the following graph of TestNodes (directed edges pointing down):
A
|
B
/ \
C D
\ / \
E F
"""
def __init__(self):
cpd = GaussianCPD(dim=1)
node_a = MockNode(name='A', data=None, cpd=cpd)
node_b = MockNode(name='B', data=None, cpd=cpd, param_nodes={'mean': node_a})
node_c = MockNode(name='C', data=None, cpd=cpd, param_nodes={'mean': node_b})
node_d = MockNode(name='D', data=None, cpd=cpd, param_nodes={'mean': node_b})
node_e = MockNode(name='E', data=None, cpd=cpd, param_nodes={'mean': node_c,
'precision': node_d})
node_f = MockNode(name='F', data=None, cpd=cpd, param_nodes={'mean': node_d})
super(MockLearner, self).__init__(nodes=[node_a, node_b, node_c, node_d, node_e, node_f],
max_iterations=1, callback=ConvergenceCallback())
| Knewton/edm2016 | rnn_prof/irt/testing_utils.py | testing_utils.py | py | 6,559 | python | en | code | 58 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "numpy.copy",
"line... |
42896773895 | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import *
from transformers.modeling_roberta import RobertaLMHead
from transformers.modeling_bert import BertOnlyMLMHead
class BertMLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
"""
def __init__(self, config):
super(BertMLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
self.tie_weights()
def get_trainable_parameters(self):
# this is useful when freezing the encoder parameters
return list(self.bert.embeddings.word_embeddings.parameters()) + [self.cls.predictions.bias]
def tie_weights(self):
self.cls.predictions.decoder.weight = self.bert.embeddings.word_embeddings.weight
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = outputs[0]
pred_mask = masked_lm_labels.ne(-1)
pred_vect = sequence_output[pred_mask] # (bs, slen, dim)
y = torch.masked_select(masked_lm_labels, pred_mask)
prediction_scores = self.cls(pred_vect)
masked_lm_loss = F.cross_entropy(prediction_scores, y)
return masked_lm_loss
class RobertaMLM(BertPreTrainedModel):
"""RoBERTa model with the masked language modeling head.
"""
def __init__(self, config):
super(RobertaMLM, self).__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.init_weights()
self.tie_weights()
def get_trainable_parameters(self):
return list(self.roberta.embeddings.word_embeddings.parameters()) + [self.lm_head.bias]
def tie_weights(self):
self.lm_head.decoder.weight = self.roberta.embeddings.word_embeddings.weight
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
outputs = self.roberta(input_ids, token_type_ids, attention_mask)
sequence_output = outputs[0]
pred_mask = masked_lm_labels.ne(-1)
pred_vect = sequence_output[pred_mask] # (bs, slen, dim)
y = torch.masked_select(masked_lm_labels, pred_mask)
prediction_scores = self.lm_head(pred_vect)
masked_lm_loss = F.cross_entropy(prediction_scores, y)
return masked_lm_loss
class BertAdaptor(nn.Module):
"""
A class for adapting English BERT to other languages
"""
def __init__(self, src_model, tgt_model):
"""
src_model: (BertForMaskedLM) of English
tgt_model: (BertForMaskedLM) of Foreign
"""
super(BertAdaptor, self).__init__()
self.src_model = src_model
self.tgt_model = tgt_model
# force sharing params
self.tgt_model.bert.encoder = self.src_model.bert.encoder
self.tgt_model.bert.pooler = self.src_model.bert.pooler
# share embedding params
self.tgt_model.bert.embeddings.position_embeddings = self.src_model.bert.embeddings.position_embeddings
self.tgt_model.bert.embeddings.token_type_embeddings = self.src_model.bert.embeddings.token_type_embeddings
self.tgt_model.bert.embeddings.LayerNorm = self.src_model.bert.embeddings.LayerNorm
# share output layers
self.tgt_model.cls.predictions.transform = self.src_model.cls.predictions.transform
def forward(self, lang, input_ids, token_type_ids=None,
attention_mask=None, masked_lm_labels=None):
model = self.src_model if lang == 'en' else self.tgt_model
return model(input_ids, token_type_ids, attention_mask=attention_mask,
masked_lm_labels=masked_lm_labels)
class RobertaAdaptor(nn.Module):
"""
A class for adapting English BERT to other languages
"""
def __init__(self, src_model, tgt_model):
"""
src_model: (BertForMaskedLM) of English
tgt_model: (Roberta) of Foreign
"""
super(RobertaAdaptor, self).__init__()
self.src_model = src_model
self.tgt_model = tgt_model
# force sharing params
self.tgt_model.roberta.encoder = self.src_model.roberta.encoder
self.tgt_model.roberta.pooler = self.src_model.roberta.pooler
# share embedding params
self.tgt_model.roberta.embeddings.position_embeddings = self.src_model.roberta.embeddings.position_embeddings
self.tgt_model.roberta.embeddings.token_type_embeddings = self.src_model.roberta.embeddings.token_type_embeddings
self.tgt_model.roberta.embeddings.LayerNorm = self.src_model.roberta.embeddings.LayerNorm
# share output layers
self.tgt_model.lm_head.dense = self.src_model.lm_head.dense
self.tgt_model.lm_head.layer_norm = self.src_model.lm_head.layer_norm
def forward(self, lang, input_ids, token_type_ids=None,
attention_mask=None, masked_lm_labels=None):
model = self.src_model if lang == 'en' else self.tgt_model
return model(input_ids, token_type_ids, attention_mask=attention_mask,
masked_lm_labels=masked_lm_labels)
| alexa/ramen | code/src/models.py | models.py | py | 5,251 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "transformers.modeling_bert.BertOnlyMLMHead",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.masked_select",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 33,
"usage_type": "call... |
19063572783 | from __future__ import annotations
import dataclasses
import enum
import grpc
from tensorflow_serving.apis.model_pb2 import ModelSpec
from tensorflow_serving.apis.model_service_pb2_grpc import ModelServiceStub
from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub
from tensorflow_serving.apis.model_management_pb2 import ReloadConfigRequest
from tensorflow_serving.apis.get_model_status_pb2 import GetModelStatusRequest
from tensorflow_serving.apis.predict_pb2 import PredictRequest
from tensorflow_serving.apis.get_model_metadata_pb2 import (
GetModelMetadataRequest,
SignatureDefMap,
)
from tensorflow_serving.config.model_server_config_pb2 import (
ModelServerConfig,
ModelConfigList,
ModelConfig,
)
from tensorflow.core.framework.types_pb2 import DataType
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.framework.tensor_pb2 import TensorProto
from tensorflow.core.framework.tensor_shape_pb2 import TensorShapeProto
class ModelStatus(enum.Enum):
# Default value.
UNKNOWN = 0
# The manager is tracking this servable, but has not initiated any action
# pertaining to it.
START = 10
# The manager has decided to load this servable. In particular, checks
# around resource availability and other aspects have passed, and the
# manager is about to invoke the loader's Load() method.
LOADING = 20
# The manager has successfully loaded this servable and made it available
# for serving (i.e. GetServableHandle(id) will succeed). To avoid races,
# this state is not reported until *after* the servable is made
# available.
AVAILABLE = 30
# The manager has decided to make this servable unavailable, and unload # it. To avoid races, this state is reported *before* the servable is
# made unavailable.
UNLOADING = 40
# This servable has reached the end of its journey in the manager. Either
# it loaded and ultimately unloaded successfully, or it hit an error at
# some point in its lifecycle.
END = 50
@dataclasses.dataclass
class TensorMeta:
name: str
dtype: DataType
shape: list[int]
class TFServing:
def __init__(
self,
grpc_channel: grpc.Channel,
base_path: str,
timeout: int = 5,
reloading_timeout: int | None = None,
) -> None:
self.base_path = base_path
self.timeout = timeout
self.reloading_timeout = reloading_timeout or self.timeout
self.model_service_stub = ModelServiceStub(grpc_channel)
self.prediction_service_stub = PredictionServiceStub(grpc_channel)
def reload_config(self, model_names: list[str]) -> None:
model_configs: list[ModelConfig] = []
for model_name in model_names:
model_configs.append(
ModelConfig(
name=model_name,
base_path="/".join([self.base_path, model_name]),
model_platform="tensorflow",
)
)
model_config_list = ModelConfigList(config=model_configs)
request = ReloadConfigRequest(
config=ModelServerConfig(model_config_list=model_config_list)
)
self.model_service_stub.HandleReloadConfigRequest(
request, timeout=self.reloading_timeout
)
def get_model_metadata(
self,
model_name: str,
input_names: list[str] | None = None,
output_names: list[str] | None = None,
) -> tuple[list[TensorMeta], list[TensorMeta]]:
model_spec = ModelSpec(name=model_name, signature_name="serving_default")
request = GetModelMetadataRequest(
model_spec=model_spec, metadata_field=["signature_def"]
)
response = self.prediction_service_stub.GetModelMetadata(
request, timeout=self.timeout
)
def_map = SignatureDefMap()
response.metadata["signature_def"].Unpack(def_map)
metadata = def_map.signature_def["serving_default"]
if input_names is None:
input_names = list(metadata.inputs.keys())
if output_names is None:
output_names = list(metadata.outputs.keys())
def read_tensor_meta(
names: list[str], tensor_info: dict[str, TensorInfo]
) -> list[TensorMeta]:
tensor_metas: list[TensorMeta] = []
for name in names:
tensor = tensor_info[name]
dims = [d.size for d in tensor.tensor_shape.dim]
tensor_meta = TensorMeta(name, tensor.dtype, dims)
tensor_metas.append(tensor_meta)
return tensor_metas
return (
read_tensor_meta(input_names, metadata.inputs),
read_tensor_meta(output_names, metadata.outputs),
)
def get_model_status(self, model_name: str) -> ModelStatus:
model_spec = ModelSpec(name=model_name)
request = GetModelStatusRequest(model_spec=model_spec)
response = self.model_service_stub.GetModelStatus(request, timeout=self.timeout)
for version_status in response.model_version_status:
if version_status.version == 0:
return ModelStatus(version_status.state)
return ModelStatus(0)
def predict(
self, model_name: str, input_tensor: dict[str, TensorProto]
) -> dict[str, TensorProto]:
model_spec = ModelSpec(name=model_name)
request = PredictRequest(model_spec=model_spec)
for name, tensor in input_tensor.items():
request.inputs[name].CopyFrom(tensor)
return dict(
self.prediction_service_stub.Predict(request, timeout=self.timeout).outputs
)
if __name__ == "__main__":
s = TFServing(grpc.insecure_channel("127.0.0.1:8500"))
s.reload_config(["xxx", "xyz"])
print(s.get_model_metadata("xxx"))
tensor = TensorProto(
dtype=DataType.DT_INT64,
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=1), TensorShapeProto.Dim(size=5)]
),
int64_val=[230, 2130, 4324, 222, 0],
)
print(s.predict("xxx", {"token_ids": tensor}))
| nanaya-tachibana/sknlp-server | sknlp_serving/tfserving.py | tfserving.py | py | 6,178 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.core.framework.types_pb2.DataType",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 51,
"usage_type": "attribute"
},
{... |
12179042424 | from itertools import dropwhile
from io_utils import read_file, write_file
def clear_missed_rides(rides, max_time, ctime):
time_left = max_time - ctime
if rides and rides[0].score > time_left:
rides = list(dropwhile(lambda x: x.score > time_left, rides))
return rides
def run_example(input_file, output_file):
print("Reading file: {}\n".format(input_file))
max_time, bonus, cars, rides, cars_nb = read_file(input_file)
for ctime in range(max_time):
cars_idle = 0
for car in cars:
car.drive(rides)
if car.current_ride is None:
cars_idle += 1
rides = clear_missed_rides(rides, max_time, ctime)
if not rides and cars_idle == cars_nb:
break
write_file(cars, output_file)
| bonheml/hashcode_2018 | main.py | main.py | py | 792 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.dropwhile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "io_utils.read_file",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "io_utils.write_file",
"line_number": 25,
"usage_type": "call"
}
] |
29121079371 | from selenium import webdriver
import time
from selenium.common.exceptions import NoSuchElementException
import random
class amdAutomation():
def __init__(self):
self.driver = None
def setWebsiteLocation(self, link):
self.driver = webdriver.Chrome('/Users/chiraag/chromedriver')
self.driver.get(link)
def executeTest(self, link):
self.setWebsiteLocation(link)
self.addToCart()
def addToCart(self):
i = 1
while i == 1:
try:
print("trying this")
addToCart_btn = self.driver.find_element_by_xpath("//button[contains(text(),'Add to Cart')]");
addToCart_btn.click()
i = 2
except NoSuchElementException:
print("sleeping now for " + str(3) + " seconds")
time.sleep(3)
self.driver.refresh()
i = 1
print("adding to cart :)")
if __name__ == "__main__":
taskmaster = amdAutomation()
taskmaster.executeTest('https://www.amd.com/en/direct-buy/5458372200/us')
#search = browser.find_element_by_name('st')
#search.send_keys("rtx 3080")
#search_btn = browser.find_element_by_xpath("/html/body/div[3]/main/div[2]/div/div[1]/div[3]/div[2]/div/div[2]/div[1]/div/div/div[1]")
#search_btn.click()
'''
''' | crekhari/Graphics-Card-Auto-Checkout-Bot | src/amd.py | amd.py | py | 1,348 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 27,
"usage_type": "name"
... |
13885704764 | from time import perf_counter_ns # Calcul temps d'exécution
import matplotlib.pyplot as plt # Graphes
def InsertSort(l):
"""
entree :
--------
l : liste d’entier ou réel
liste à trier
Sortie :
--------
l : liste triée
Attention la liste initiale est modifiée.
"""
n = len(l)
for i in range(n):
x = l[i]
j = i
while j > 0 and l[j-1] > x:
l[j] = l[j-1]
j-=1
l[j] = x
if __name__=='__main__': # test
listTimeIS = [[],[]]
listTimesort = [[],[]]
rlist = [5,500,2000,5000,10000,15000,20000]
"""
# InsertionSort()
for r in rlist: # + favorable
l = list(range(r))
start = perf_counter_ns()
sl = InsertSort(l)
end = perf_counter_ns()
execution_time = round((end - start)*10**(-6),3)
listTimeIS[0].append(execution_time)
print("Time passed :",listTimeIS[0][rlist.index(r)])
for r in rlist: # - favorable
l = list(range(r))
l.reverse()
start = perf_counter_ns()
sl = InsertSort(l)
end = perf_counter_ns()
execution_time = round((end - start)*10**(-6),3)
listTimeIS[1].append(execution_time)
print("Time passed :",listTimeIS[1][rlist.index(r)])
# méthode sort()
for r in rlist: # + favorable
l = list(range(r))
start = perf_counter_ns()
l.sort()
end = perf_counter_ns()
execution_time = round((end - start)*10**(-6),3)
listTimesort[0].append(execution_time)
print("Time passed :",listTimesort[0][rlist.index(r)])
for r in rlist: # - favorable
l = list(range(r))
l.reverse()
start = perf_counter_ns()
l.sort()
end = perf_counter_ns()
execution_time = round((end - start)*10**(-6),3)
listTimesort[1].append(execution_time)
print("Time passed :",listTimesort[1][rlist.index(r)])
print("+ favorable InsertSort() (en ms) :",listTimeIS[0])
print("- favorable InsertSort() (en ms) :",listTimeIS[1])
print("+ favorable sort() (en ms) :",listTimesort[0])
print("- favorable sort() (en ms) :",listTimesort[1])
"""
listTimeIS[0] = [0.003, 0.218, 0.783, 1.861, 2.92, 5.759, 10.202]
listTimeIS[1] = [0.012, 64.902, 642.989, 2818.485, 10652.621, 24741.013, 48911.853]
listTimesort[0] = [0.004, 0.008, 0.151, 0.058, 0.146, 0.231, 0.172]
listTimesort[1] = [0.002, 0.004, 0.058, 0.058, 0.155, 0.459, 0.333]
plt.title("Comparaison InsertSort() et sorted()")
# InsertSort()
plt.plot(rlist,listTimeIS[0],"ro-",label="InsertSort() favorable (triée)")
plt.plot(rlist,listTimeIS[1],"o-",label="InsertSort() défavorable (reversed)")
# Methode sort()
plt.plot(rlist,listTimesort[0],"go-",label="sorted() favorable (triée)")
plt.plot(rlist,listTimesort[1],"bo-",label="sorted() défavorable (reversed)")
plt.legend()
plt.ylabel("Temps d'exécution (en ms)")
plt.xlabel("Nombre d'éléments (n)")
plt.show() | Ilade-s/Sorting-Algorithms | TriInsertions.py | TriInsertions.py | py | 3,080 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.title",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matp... |
6819650595 | from flask import render_template, Blueprint
from app.model import Article, User, Category, Tag
blueprint = Blueprint('front', __name__, template_folder='templates')
@blueprint.route("/")
def index():
result = Article.query.getall()
categorys = Category.query.all()
#tags = Tag.query.all()
temp = Article.query.all()
tags = list()
dict = {}
for t in temp:
for tag in t.tag:
count = dict.get(tag.name)
if dict.get(tag.name):
dict.update({
tag.name: count + 1
})
else:
dict.update({
tag.name: 1
})
return render_template("index.html", articles=result, categorys=categorys, tags=dict)
@blueprint.route('/article/<int:pageid>')
def article(pageid=1):
result = Article.query.getart(pageid)
return render_template("article.html", article=result)
@blueprint.route('/contact/')
def contact():
return render_template("contact.html") | romasport/coolflask | app/view/front.py | front.py | py | 1,022 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "app.model.Article.query.getall",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "app.model.Article.query",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name... |
72276045475 | import pygame
class Sprite(pygame.sprite.Sprite):
def __init__(self, groups=[]):
super().__init__(groups)
# The state for the sprite
self.state = "released"
self.mouse_state = "released"
# This makes sure that the rect actually exists
# self.rect = self.rect
def refresh(self):
print("NOT IMPLEMENTED!!! (refresh) (for " + self.__class__.__name__ + ")")
def refresh_sprite(self, action, mouse_pos):
self.refresh()
return_value = None
if action == "press":
self.mouse_state = "pressed"
elif action == "release":
self.mouse_state = "released"
if self.screen_rect.collidepoint(mouse_pos):
if action == "release":
return_value = self.click()
elif self.mouse_state == "pressed":
self.state = "pushed"
return_value = self.push()
else:
self.state = "highlighted"
return_value = self.highlight()
else:
self.state = "released"
return_value = self.release()
# print(self.mouse_state)
return return_value
def push(self):
print("NOT IMPLEMENTED!!! (release) (for " + self.__class__.__name__ + ")")
return None
def highlight(self):
print("NOT IMPLEMENTED!!! (release) (for " + self.__class__.__name__ + ")")
return None
def release(self):
print("NOT IMPLEMENTED!!! (release) (for " + self.__class__.__name__ + ")")
return None
def click(self):
print("NOT IMPLEMENTED!!! (click) (for " + self.__class__.__name__ + ")")
return None
| LeoTheMighty/ApocalypseLater | ui/Sprite.py | Sprite.py | py | 1,761 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 4,
"usage_type": "attribute"
}
] |
74345211234 | #
# @lc app=leetcode.cn id=122 lang=python3
#
# [122] 买卖股票的最佳时机 II
#
from typing import List
# @lc code=start
class Solution:
def maxProfit(self, prices: List[int]) -> int:
#---------------------------------------------------------------#
# 贪心算法
# 计算相隔两天的利润
# 利润为正就sum++
# 利润不加或减就忽略
#---------------------------------------------------------------#
sum = 0 # 钱数
l = len(prices) # 交易天数
for i in range(l-1):
if prices[i+1] > prices[i]:
sum += prices[i+1] - prices[i]
return sum
# @lc code=end
| Zigars/Leetcode | 贪心算法/122.买卖股票的最佳时机-ii.py | 122.买卖股票的最佳时机-ii.py | py | 701 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
}
] |
12752768218 | """
These modules are responsible for scheduling model transfers while adhering to bandwidth limitations of both the
sending and receiving party.
"""
import logging
import random
from asyncio import Future, get_event_loop, InvalidStateError
from typing import List, Dict
from ipv8.taskmanager import TaskManager
class BWScheduler(TaskManager):
def __init__(self, peer_pk: bytes, my_id: str) -> None:
super().__init__()
self.bw_limit: int = 0 # in bytes/s
self.outgoing_requests: List[Transfer] = [] # Outgoing transfers waiting to be started
self.incoming_requests: List[Transfer] = [] # Incoming transfers waiting to be started
self.outgoing_transfers: List[Transfer] = [] # Ongoing outgoing transfers
self.incoming_transfers: List[Transfer] = [] # Ongoing incoming transfers
self.peer_pk = peer_pk
self.my_id = my_id
self.logger = logging.getLogger(self.__class__.__name__)
self.is_active: bool = False # Whether we are sending or receiving something
self.became_active: float = 0
self.total_time_transmitting: float = 0
def get_allocated_outgoing_bw(self) -> int:
allocated_bw: int = sum([transfer.allocated_bw for transfer in self.outgoing_transfers])
assert allocated_bw <= self.bw_limit, "Allocated outgoing bandwidth of %s (%d) cannot exceed limit (%d)" % (self.my_id, allocated_bw, self.bw_limit)
return allocated_bw
def get_allocated_incoming_bw(self) -> int:
allocated_bw: int = sum([transfer.allocated_bw for transfer in self.incoming_transfers])
assert allocated_bw <= self.bw_limit, "Allocated incoming bandwidth of %s (%d) cannot exceed limit (%d)" % (self.my_id, allocated_bw, self.bw_limit)
return allocated_bw
def register_transfer(self, transfer, is_outgoing=False):
if not self.incoming_transfers and not self.outgoing_transfers:
self.is_active = True
self.became_active = get_event_loop().time()
if is_outgoing:
self.outgoing_transfers.append(transfer)
else:
self.incoming_transfers.append(transfer)
def unregister_transfer(self, transfer, is_outgoing=False):
if is_outgoing:
if transfer in self.outgoing_transfers:
self.outgoing_transfers.remove(transfer)
else:
if transfer in self.incoming_transfers:
self.incoming_transfers.remove(transfer)
if not self.incoming_transfers and not self.outgoing_transfers:
self.is_active = False
self.total_time_transmitting += (get_event_loop().time() - self.became_active)
def add_transfer(self, receiver_scheduler: "BWScheduler", transfer_size: int) -> "Transfer":
"""
A new transfer request arrived.
:param transfer_size: Size of the transfer, in bytes
"""
transfer: Transfer = Transfer(self, receiver_scheduler, transfer_size)
self.outgoing_requests.append(transfer)
self.logger.debug("Adding transfer request %d: %s => %s to the queue", transfer.transfer_id, self.my_id,
transfer.receiver_scheduler.my_id)
self.schedule()
return transfer
def schedule(self):
"""
Try to schedule pending outgoing requests and allocate bandwidth to them.
"""
sender_bw_left: int = self.bw_limit - self.get_allocated_outgoing_bw()
if sender_bw_left == 0:
return # Cannot accept more pending requests
requests_scheduled: List[Transfer] = []
for request in self.outgoing_requests:
receiver_bw_left = request.receiver_scheduler.bw_limit - request.receiver_scheduler.get_allocated_incoming_bw()
bw_to_allocate = min(sender_bw_left, receiver_bw_left)
if bw_to_allocate > 0:
self.schedule_request(request, bw_to_allocate)
requests_scheduled.append(request)
sender_bw_left = self.bw_limit - self.get_allocated_outgoing_bw() # Update this as it has changed
# Do we have outgoing bandwidth left to allocate more requests?
if sender_bw_left == 0:
break # Cannot accept more pending requests
else:
# Add this transfer as pending request in the queue of the receiver, try again later.
if request not in request.receiver_scheduler.incoming_requests:
self.logger.debug("Sender %s adding transfer %d as pending incoming request in the scheduler of "
"receiver %s", self.my_id, request.transfer_id, request.receiver_scheduler.my_id)
request.receiver_scheduler.incoming_requests.append(request)
for request in requests_scheduled:
self.outgoing_requests.remove(request)
def schedule_request(self, request, bw_to_allocate: int):
"""
Schedule a particular request - we know for sure that there is bandwidth available for this transfer.
"""
self.logger.debug("Starting transfer %d: %s => %s (allocated %d bw to this transfer, s %d/%d, r %d/%d)", request.transfer_id, self.my_id,
request.receiver_scheduler.my_id, bw_to_allocate, self.get_allocated_outgoing_bw(), self.bw_limit, request.receiver_scheduler.get_allocated_incoming_bw(), request.receiver_scheduler.bw_limit)
request.allocated_bw = bw_to_allocate
estimated_finish_time = request.transfer_size / request.allocated_bw
request.start_time = get_event_loop().time()
request.last_time_updated = get_event_loop().time()
task_name = "transfer_%d_finish_%d" % (request.transfer_id, request.reschedules)
self.register_task(task_name, self.on_outgoing_transfer_complete, request, delay=estimated_finish_time)
self.register_transfer(request, is_outgoing=True)
request.receiver_scheduler.register_transfer(request, is_outgoing=False)
if request in request.receiver_scheduler.incoming_requests:
request.receiver_scheduler.incoming_requests.remove(request)
def on_outgoing_transfer_complete(self, transfer):
"""
An outgoing transfer has completed.
"""
self.logger.debug("Transfer %d: %s => %s has completed", transfer.transfer_id, self.my_id,
transfer.receiver_scheduler.my_id)
transfer.finish()
# Inform the other side
self.unregister_transfer(transfer, is_outgoing=True)
transfer.receiver_scheduler.on_incoming_transfer_complete(transfer)
# Try to schedule remaining requests as we might have unallocated bandwidth at this point.
self.schedule()
def on_outgoing_transfer_failed(self, failed_transfer):
self.unregister_transfer(failed_transfer, is_outgoing=True)
self.cancel_pending_task("transfer_%d_finish_%d" % (failed_transfer.transfer_id, failed_transfer.reschedules))
self.schedule()
def on_incoming_transfer_complete(self, completed_transfer):
"""
An incoming transfer has been completed.
We first try to allocate more bandwidth to our ongoing requests.
Then we inform other pending incoming requests.
"""
self.unregister_transfer(completed_transfer, is_outgoing=False)
# Prioritize allocating bandwidth to ongoing transfers
for transfer in self.incoming_transfers + self.incoming_requests:
self.logger.debug("Informing sender %s about available bandwidth for transfer %d",
transfer.sender_scheduler.my_id, transfer.transfer_id)
transfer.sender_scheduler.on_receiver_inform_about_free_bandwidth(transfer)
incoming_bw_left: int = self.bw_limit - self.get_allocated_incoming_bw()
if incoming_bw_left == 0:
break
def on_receiver_inform_about_free_bandwidth(self, transfer):
"""
A receiver of a pending transfer has informed us (the sender) about newly available bandwidth for a particular
transfer. Adjust this transfer and try to allocate more if we can.
"""
sender_bw_left: int = self.bw_limit - self.get_allocated_outgoing_bw()
receiver_bw_left: int = transfer.receiver_scheduler.bw_limit - transfer.receiver_scheduler.get_allocated_incoming_bw()
# This is either an ongoing request or a pending request
if transfer in self.outgoing_transfers:
self.logger.debug("Sender %s got available bw notification from receiver %s for ongoing transfer %s",
self.my_id, transfer.receiver_scheduler.my_id, transfer.transfer_id)
# It's an ongoing transfer, increase the allocated bw of this transfer accordingly
additional_bw_to_allocate = min(sender_bw_left, receiver_bw_left)
if additional_bw_to_allocate > 0:
# We can allocate more bw to this transfer, do so and update everything accordingly.
self.logger.debug("Allocating %d additional bw to transfer %d", additional_bw_to_allocate,
transfer.transfer_id)
task_name = "transfer_%d_finish_%d" % (transfer.transfer_id, transfer.reschedules)
self.cancel_pending_task(task_name)
# First we update how much of the transfer has been completed at this point.
transfer.update()
# "Restart" the transfer and reschedule the completion event
transfer.allocated_bw += additional_bw_to_allocate
new_estimated_finish_time = (transfer.transfer_size - transfer.transferred) / transfer.allocated_bw
transfer.reschedules += 1
new_task_name = "transfer_%d_finish_%d" % (transfer.transfer_id, transfer.reschedules)
self.register_task(new_task_name, self.on_outgoing_transfer_complete, transfer,
delay=new_estimated_finish_time)
elif transfer in self.outgoing_requests:
self.logger.debug("Sender %s got available bw notification from receiver %s for pending request %s",
self.my_id, transfer.receiver_scheduler.my_id, transfer.transfer_id)
bw_to_allocate = min(sender_bw_left, receiver_bw_left)
if bw_to_allocate > 0:
self.schedule_request(transfer, bw_to_allocate)
self.outgoing_requests.remove(transfer)
else:
raise RuntimeError("We do not know about request %d!" % transfer.transfer_id)
def kill_all_transfers(self):
transfer_count: int = len(self.incoming_transfers) + len(self.outgoing_transfers)
if transfer_count > 0:
self.logger.warning("Interrupting all %d transfers of participant %s in the scheduler",
transfer_count, self.my_id)
self.cancel_all_pending_tasks()
for transfer in self.outgoing_transfers:
transfer.receiver_scheduler.on_incoming_transfer_complete(transfer)
self.logger.debug("Failing outgoing transfer %d: %s => %s", transfer.transfer_id, self.my_id,
transfer.receiver_scheduler.my_id)
transfer.fail()
for transfer in self.incoming_transfers:
transfer.sender_scheduler.on_outgoing_transfer_failed(transfer)
self.logger.debug("Failing incoming transfer %d: %s => %s", transfer.transfer_id, self.my_id,
transfer.receiver_scheduler.my_id)
transfer.fail()
# Clean up all the pending requests
for request in self.outgoing_requests:
if request in request.receiver_scheduler.incoming_requests:
request.receiver_scheduler.incoming_requests.remove(request)
for request in self.incoming_requests:
if request in request.sender_scheduler.outgoing_requests:
request.sender_scheduler.outgoing_requests.remove(request)
self.incoming_transfers = []
self.outgoing_transfers = []
self.incoming_requests = []
self.outgoing_requests = []
class Transfer:
"""
Represents a bandwidth transfer.
"""
def __init__(self, sender_scheduler: BWScheduler, receiver_scheduler: BWScheduler, transfer_size: int):
self.transfer_id = random.randint(0, 100000000000)
self.sender_scheduler: BWScheduler = sender_scheduler
self.receiver_scheduler: BWScheduler = receiver_scheduler
self.transfer_size: int = transfer_size
self.transferred: int = 0
self.allocated_bw: int = 0
self.start_time: int = -1
self.last_time_updated: int = 0
self.complete_future: Future = Future()
self.reschedules: int = 0
self.metadata: Dict = {}
def finish(self):
self.update()
try:
self.complete_future.set_result(None)
except InvalidStateError:
self.sender_scheduler.logger.error("Finish of transfer %s (%s => %s) resulted in an InvalidStateError - "
"ignoring for now", self.transfer_id, self.sender_scheduler.my_id,
self.receiver_scheduler.my_id)
def fail(self):
self.update()
try:
self.complete_future.set_exception(RuntimeError("Transfer interrupted"))
except InvalidStateError:
self.sender_scheduler.logger.error("Failure of transfer %s (%s => %s) resulted in an InvalidStateError - "
"ignoring for now", self.transfer_id, self.sender_scheduler.my_id,
self.receiver_scheduler.my_id)
def update(self):
transferred: float = (get_event_loop().time() - self.last_time_updated) * self.allocated_bw
self.transferred += transferred
self.last_time_updated = get_event_loop().time()
def get_transferred_bytes(self) -> int:
return self.transferred
| devos50/decentralized-learning | simulations/bandwidth_scheduler.py | bandwidth_scheduler.py | py | 14,192 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "ipv8.taskmanager.TaskManager",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.List",
... |
29490050611 | import collections
import numpy as np # type: ignore
import random
from typing import List, Tuple, Union
class SkipGramBatcher:
"""Encapsulate functionality for getting the next batch for SkipGram from a single list of ints (e.g. from a
single text with no sentences). Use SkipGramListBatcher for Node2Vec. The input is expected to be a list of
ints. This class is an implementation detail and should not be used outside of this file.
Attributes:
data: A list of lists of integers, representing sentences/random walks.
batch_size: The number of samples to draw for a given training batch.
num_skips: How many times to reuse an input to generate a label.
skip_window: How many words to consider left and right.
span: The total size of the sliding window [skip_window central_word skip_window].
data_index: Index of the list that will be used next for batch generation.
Raises:
ValueError: If the number of skips cannot be divided by batch_size without a remainder.
ValueError: If the number of skips is larger than the skip_window.
TypeError: If data is not a list.
TypeError: If the items inside data are not integers.
ValueError: If the number of words or nodes in data is less than skip_window length.
"""
def __init__(self, data: List[Union[int, str]], batch_size: int, num_skips: int, skip_window: int) -> None:
self.data = data
self.batch_size = batch_size
self.num_skips = num_skips
self.skip_window = skip_window
if not batch_size % num_skips == 0:
raise ValueError('For SkipGram, the number of skips must divide batch_size without remainder')
if not num_skips <= 2 * skip_window:
raise ValueError('For SkipGram, the number of skips must not be larger than skip_window')
if not isinstance(data, list):
raise TypeError('Data must be a list')
if not all(x for x in data if isinstance(x, int)):
raise TypeError('Data must be a list of integers')
if len(data) < skip_window:
raise ValueError('Data (%d) is shorter than skip_window (%d)' % (len(data), skip_window))
self.span = 2 * skip_window + 1
self.data_index = 0
def generate_batch(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generates a training batch for the skip-gram model.
Assumptions: All of the data is in one and only one list (for instance, the data might derive from a book).
Returns:
A list where the first item is a batch and the second item is the batch's labels.
"""
batch_size = self.batch_size
span = self.span
num_skips = self.num_skips
skip_window = self.skip_window
batch = np.ndarray(shape=(batch_size, ), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
# get window size (words left and right + current one)
buffer: collections.deque = collections.deque(maxlen=span)
if self.data_index + span > len(self.data):
self.data_index = 0
buffer.extend(self.data[self.data_index:self.data_index + self.span])
self.data_index += self.span
for i in range(batch_size // self.num_skips):
context_words = [w for w in range(self.span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if self.data_index == len(self.data):
# i.e., we are at the end of data and need to reset the index to the beginning
buffer.extend(self.data[0:span])
self.data_index = span
else:
buffer.append(self.data[self.data_index])
self.data_index += 1 # i.e., move the sliding window 1 position to the right
# Backtrack a little bit to avoid skipping words in the end of a batch.
self.data_index = (self.data_index + len(self.data) - span) % len(self.data)
return batch, labels
| LeoPompidou/embiggen | embiggen/w2v/skip_gram_batcher.py | skip_gram_batcher.py | py | 4,263 | python | en | code | null | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number... |
12844050855 | import numpy as np
import cv2
def get_matrix_2D(center, rot, trans, scale):
ca = np.cos(rot)
sa = np.sin(rot)
sc = scale
cx = center[0]
cy = center[1]
tx = trans[0]
ty = trans[1]
t = np.array([[ca * sc, -sa * sc, sc * (ca * (-tx - cx) + sa * ( cy + ty)) + cx],
[sa * sc, ca * sc, sc * (ca * (-ty - cy) + sa * (-tx - cx)) + cy]])
return t
def get_matrix_2D_3D(center, rot, trans, scale):
mat2D = get_matrix_2D(center, rot, trans, scale)
mat2D_extended = np.hstack([mat2D[:,:2], [[0],[0]], mat2D[:,2].reshape([2,1])])
mat3D = np.eye(4)
mat3D[:2] = mat2D_extended
return mat2D, mat3D
def transform_image_and_points(image, points, center, rot, trans, scale, trans_d=0):
mat2D, mat3D = get_matrix_2D_3D(center, rot, trans, scale)
out_image = None
if image is not None:
assert(type(image) == np.ndarray and len(image.shape) == 2)
out_image = cv2.warpAffine(image, mat2D.reshape([2,3]), image.shape, flags=cv2.INTER_NEAREST)
out_image = np.clip(out_image + trans_d, 0, np.inf)
out_points = None
if points is not None:
assert(type(points) == np.ndarray
and len(points.shape) == 2
and points.shape[1] == 3)
out_points = np.zeros([points.shape[0], 3])
for i, pt in enumerate(points):
out_pt = np.dot(mat3D, [pt[0], pt[1], pt[2], 1.])
out_pt = out_pt[:3] / out_pt[3]
out_pt[2] += trans_d
out_points[i] = out_pt
return out_image, out_points
| mqne/GraphLSTM | region_ensemble/transformations.py | transformations.py | py | 1,583 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "numpy.cos",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 20,
... |
30079937946 | #! /usr/bin/env python3
import xml.etree.ElementTree as ET
import requests
import sys
from datetime import datetime
import subprocess
def get_m3u8(targetarea='tokyo', station='fm'):
hls = station + "hls"
url = 'https://www.nhk.or.jp/radio/config/config_web.xml'
try:
r = requests.get(url)
if r.status_code != 200:
raise Exception("unexpected code")
except Exception as E:
print(str(E))
sys.exit(1)
root = ET.fromstring(r.text)
config_dict = []
for data in root.iter('data'):
area = data.find('area').text
r1hls = data.find('r1hls').text
r2hls = data.find('r2hls').text
fmhls = data.find('fmhls').text
config_dict.append({"area": area, "r1hls":r1hls, "r2hls":r2hls, "fmhls":fmhls})
for d in config_dict:
if d['area'] == targetarea:
return(d[hls])
def main():
targetarea = 'tokyo'
workdir = '/media/recorder'
stationlist = {
"NHK-FM":"fm",
"NHKR1":"r1",
"NHKR2":"r2"
}
if len(sys.argv) >= 5 or len(sys.argv) <=2:
print("error")
sys.exit(1)
stationname = sys.argv[1]
minutes = sys.argv[2]
try:
comment = sys.argv[3]
except Exception as E:
comment = ""
timestamp = datetime.now().strftime('%Y%m%dT%H%M%S')
m3u8 = get_m3u8(station=stationlist[stationname])
seconds = int(minutes) * 60
if comment != "":
filename = workdir + "/" + timestamp + "_" + stationname + "_" + comment + ".m4a"
else:
filename = workdir + "/" + timestamp + "_" + stationname + ".m4a"
subprocess.run(["ffmpeg", "-i", m3u8, "-t", str(seconds), "-c", "copy", filename])
if __name__ == '__main__':
main()
| mnod/docker-radiru | rec_radio.py | rec_radio.py | py | 1,880 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "xml.etree.Elem... |
19654910100 | import keras
import os, shutil
from keras import layers
from keras import models
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
# The directory where we store our smaller dataset
base_dir = '/media/boom/HDD/FanBu/资料/PhD/research/cats_and_dogs_small'
if not os.path.exists(base_dir):
print("Wrong path:", base_dir)
# Directories for our training,
# validation and test splits
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
history = model.fit_generator(
train_generator,
epochs=30,
validation_data=validation_generator)
# steps_per_epoch=100, # We don't need to specify this. By default, it equals to data_total_num/bastch_size
# validation_steps=50, # We don't need this either. By default, it equals to the number of all images in the val set.
# So validation_steps doe NOT mean we do validation for every X steps. It actually means the number of images we test in one validation.
model.save('cats_and_dogs_small_1.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
# plt.show()
image_name = 'Chapter5_2_1.png'
plt.savefig(image_name)
plt.close()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
# plt.show()
image_name = 'Chapter5_2_2.png'
plt.savefig(image_name)
plt.close()
| BoomFan/dogs_vs_cats | Chapter5/chapter5_2_2.py | chapter5_2_2.py | py | 3,262 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.models.Sequential",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "keras.layers",... |
40071012285 | """empty message
Revision ID: 075ef7b7f465
Revises: c8df1e64ac3f
Create Date: 2020-01-14 00:46:47.381666
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '075ef7b7f465'
down_revision = 'c8df1e64ac3f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('todo', schema=None) as batch_op:
batch_op.add_column(sa.Column('test2', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('todo', schema=None) as batch_op:
batch_op.drop_column('test2')
# ### end Alembic commands ###
| paduck210/flask_practice | resources/migrations/versions/075ef7b7f465_.py | 075ef7b7f465_.py | py | 780 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.... |
11737218073 | """apks table
Revision ID: e10608056996
Revises: dd4e694b3acf
Create Date: 2018-08-20 15:22:37.862657
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e10608056996'
down_revision = 'dd4e694b3acf'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('apk',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('package_name', sa.String(length=128), nullable=True),
sa.Column('filename', sa.String(length=128), nullable=True),
sa.Column('path', sa.String(length=256), nullable=True),
sa.Column('version_code', sa.String(length=64), nullable=True),
sa.Column('version_name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_apk_package_name'), 'apk', ['package_name'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_apk_package_name'), table_name='apk')
op.drop_table('apk')
# ### end Alembic commands ###
| johndoe-dev/Ecodroid | migrations/versions/e10608056996_apks_table.py | e10608056996_apks_table.py | py | 1,158 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
39879428 | import datetime
class Logic:
localTime = datetime.datetime.now()
carLeft = carRight = carFront = carBack = False
def __init__(self, GPSData):
self.GPS = GPSData
def shouldWindowsBeTinted(self):
if 60 < self.GPS.elevationAngle < 120:
print("GPS Elevation angle shows no tinting required")
return False
if 180 < self.GPS.elevationAngle < 360:
print("Sun is below the horizon")
return False
angle = ((360 - (self.GPS.azimuthAngle % 360)) + self.GPS.heading) % 360
if angle < 90 or angle > 270:
self.carFront = True
else:
self.carFront = False
if 0 < angle < 180:
self.carLeft = True
else:
self.carLeft = False
if 90 < angle < 270:
self.carBack = True
else:
self.carBack = False
if angle > 180:
self.carRight = True
else:
self.carRight = False
| swachm/AutoCarWindowVisor | Model/Logic.py | Logic.py | py | 1,007 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "attribute"
}
] |
28816552446 | def showbook(url, kind):
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
try:
pages = int(soup.select('.cnt_page span')[0].text) # 该分类共有多少页
print("共有", pages, "页")
for page in range(1, pages + 1):
pageurl = url + '&page=' + str(page).strip()
print("第", page, "页", pageurl)
showpage(pageurl, kind)
except: # 没有分页的处理
showpage(url, kind)
def showpage(url, kind):
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
# 近期新书、在 class="mod type02_m012 clearfix" 中
res = soup.find_all('div', {'class': 'mod type02_m012 clearfix'})[0]
items = res.select('.item') # 所有 item
n = 0 # 计算该分页共有多少本书
for item in items:
msg = item.select('.msg')[0]
src = item.select('a img')[0]["src"]
title = msg.select('a')[0].text # 书名
imgurl = src.split("?i=")[-1].split("&")[0] # 图片网址
author = msg.select('a')[1].text # 作者
publish = msg.select('a')[2].text # 出版社
date = msg.find('span').text.split(":")[-1] # 出版日期
onsale = item.select('.price .set2')[0].text # 优惠价
content = item.select('.txt_cont')[0].text.replace(" ", "").strip() # 内容
# 将资料加入 list1 串列中
listdata = [kind, title, imgurl, author, publish, date, onsale, content]
list1.append(listdata)
n += 1
print("n=", n)
def twobyte(kindno):
if kindno < 10:
kindnostr = "0" + str(kindno)
else:
kindnostr = str(kindno)
return kindnostr
# 主程式
import requests
from bs4 import BeautifulSoup
import openpyxl
workbook = openpyxl.Workbook() # 建立一个工作簿
sheet = workbook.worksheets[0] # 获取工作表
list1 = []
kindno = 1 # 计算共有多少分类
homeurl = 'http://www.books.com.tw/web/books_nbtopm_01/?o=5&v=1'
mode = "?o=5&v=1" # 显示模式:直式 排序依:畅销度
url = "http://www.books.com.tw/web/books_nbtopm_"
html = requests.get(homeurl).text
soup = BeautifulSoup(html, 'html.parser')
# 中文书新书分类,算出共有多少分类
res = soup.find('div', {'class': 'mod_b type02_l001-1 clearfix'})
hrefs = res.select("a")
for href in hrefs:
kindurl = url + twobyte(kindno) + mode # 分类网址
print("\nkindno=", kindno)
kind = href.text # 分类
showbook(kindurl, kind) # 显示该分类所有书籍
kindno += 1
# excel 资料
listtitle = ["分类", "书名", "图片网址", "作者", "出版社", "出版日期", "优惠价", "内容"]
sheet.append(listtitle) # 标题
for item1 in list1: # 资料
sheet.append(item1)
workbook.save('books_all.xlsx') | c7934597/Python_Internet_NewBook_Boards | books_xlsx.py | books_xlsx.py | py | 2,801 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.Workbook",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 60,
"usage_type": "call"
}
] |
10668569761 |
from django.forms import ModelForm
from core.erp.models import *
from django.forms import *
class ListForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for form in self.visible_fields():
form.field.widget.attrs['class']='form-control'
form.field.widget.attrs['autocomplete']='off'
self.fields['username'].widget.attrs['autofocus']=True
| AxelAlvarenga/Proyecto2022 | Eldeportista/app/core/login/forms.py | forms.py | py | 439 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 7,
"usage_type": "name"
}
] |
23568303273 | from os import error
from time import sleep
import serial
import psutil
import serial.tools.list_ports
import GPUtil
import json
from hotkey import activate_gpu, activate_mem
ports = serial.tools.list_ports.comports()
handShakePort = None
prevMode = 0
mode = 0
memoryActivate = False
maxMem = 0
memKey = ''
gpuKey = ''
gpuActivate = False
Li = 16
Lii = 0
connected = False
with open("config.json") as jsonFile:
jsonObject = json.load(jsonFile)
jsonFile.close()
maxMem = jsonObject['maxMem']
memKey = jsonObject['memKey']
gpuKey = jsonObject['gpuKey']
print('maxmem',maxMem)
while not connected:
for port, desc, hwid in sorted(ports):
print("{}: {} [{}]".format(port, desc, hwid))
try:
print('connecting...',port)
testHandShake = serial.Serial(port, 9600, timeout=1.0)
print('writing to arduino...')
testHandShake.write('{"status":"1"}\n'.encode('ascii'))
# print('trying to handshake..')
# print(testHandShake.readline())
response = testHandShake.read_until(b'1\n')
print(response)
if response == b"1\r\n":
print('respondeu.. porta atribuida pelo handshake.', port)
handShakePort = port
testHandShake.close()
connected = True
break
except:
pass
def setModeMemory():
global mode, prevMode, memoryActivate
if not memoryActivate:
prevMode = mode
mode = 1
memoryActivate = True
else:
mode = -1
memoryActivate = False
def setModeGPU():
global mode, prevMode, gpuActivate
if not gpuActivate:
prevMode = mode
mode = 3
gpuActivate = True
else:
mode = -3
gpuActivate = False
activate_mem(setModeMemory,memKey)
activate_gpu(setModeGPU,gpuKey)
while handShakePort != None:
maxMemStatus = '1'
mem = psutil.virtual_memory()
memPercent = mem.percent
if memPercent > maxMem:
maxMemStatus = '1'
else:
maxMemStatus = '0'
memTotal = mem.total /1024/1024/1024
memUsed = mem.active /1024/1024/1024
cpuPercent = psutil.cpu_percent()
gpu = GPUtil.getGPUs()[0]
gpu_util = int(gpu.load * 100)
gpu_temp = int(gpu.temperature)
cpuTemp = 0
cpuTemps = psutil.sensors_temperatures()['coretemp']
for item in cpuTemps:
if 'Package' in item.label:
cpuTemp = item.current
memInfo = f'MEM: {memPercent}% {round(memUsed,1)}GB de {round(memTotal,1)}GB'
gpuInfo = f'GPU: {gpu_util}% {gpu_temp} C '
procInfo = f'CPU: {cpuPercent}% {cpuTemp} C GPU: {gpu_util}% {gpu_temp} C'
def modeWriter():
global mode
def scrollText(text):
global Li, Lii
result = None
StrProcess = " " + text + " "
result = StrProcess[Lii: Li]
Li = Li + 1
Lii = Lii + 1
if Li > len(StrProcess):
Li = 16
Lii = 0
return result
writerResult = {"rowone":f"{memInfo}","rowtwo":f"{procInfo}"}
if mode == -1:
writerResult = {"rowone":f"{memInfo}","rowtwo":f"{procInfo}"}
if mode == 1:
writerResult = {"rowone":f"{scrollText(memInfo)}","rowtwo":f"{procInfo}"}
if mode == 3:
writerResult = {"rowone":f"{gpuInfo}","rowtwo":f"{procInfo}"}
return writerResult
#print('startig at port',handShakePort)
ser = serial.Serial(handShakePort, 9600)
prepareWriter = modeWriter()
prepareWriter['maxmem'] = maxMemStatus
message = f'{prepareWriter}\n'
ser.write((message).encode('ascii'))
sleep(1)
| brutalzinn/arduino-python-computer-monitor | main.py | main.py | py | 3,817 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "serial.tools.list_ports.comports",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "serial.tools",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "serial.Ser... |
39479793327 | import json
import matplotlib.pyplot as plt
print("Loading cross validation history...")
with open("imdb_histories", "r") as f:
histories = json.load(f)
# Visualizing the data
control = histories[0]
ctrl_val = control["val_loss"]
epochs = range(1, len(ctrl_val) + 1)
print("Plotting comparisions...")
for idx, hist in enumerate(histories[1:]):
hist_val = hist["val_loss"]
title = "Comparison " + str(idx)
plt.title(title)
plt.plot(epochs, ctrl_val, 'bo')
plt.plot(epochs, hist_val, 'b+')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.savefig("comparison" + "_" + str(idx) + ".png")
plt.clf()
| byelipk/deep-imdb | imdb_model_compare_eval.py | imdb_model_compare_eval.py | py | 642 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.... |
72005765155 | import requests
import requests as rq
from util import jieba
# 使用scikit-learn進行向量轉換
# 忽略在文章中佔了90%的文字(即去除高頻率字彙)
# 文字至少出現在2篇文章中才進行向量轉換
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
# import pandas as pd
import pandas as pd
from sklearn.decomposition import LatentDirichletAllocation
def demo(lst: list, n_components=5, top_w=10):
lst = [str(item) for item in lst if len(item) > 0]
reviews_data = pd.DataFrame(lst, columns=['c'])['c'].astype(str).dropna()
#
tm = TfidfTransformer()
cv = CountVectorizer(max_df=10, min_df=3, stop_words="english")
tv = TfidfVectorizer()
reviews_data = tm.fit_transform(cv.fit_transform(reviews_data))
data = reviews_data # .dropna()
dtm = tm.fit_transform(data)
# 使用LDA演算法
LDA = LatentDirichletAllocation(n_components=n_components, random_state=0)
LDA.fit(dtm)
# 觀看結果
re_arr = []
for i, topic in enumerate(LDA.components_):
# print(f"TOP 10 WORDS PER TOPIC #{i}")
re_arr.append([cv.get_feature_names()[index] for index in topic.argsort()[-1 * top_w:]])
return re_arr
def catch_tag(text: str) -> str:
tag_str = ""
save_option = False
for i in text:
if i == "#" and not save_option:
save_option = True
elif i == "#" and save_option:
tag_str += " "
if save_option:
if i in [" ", "\n", "\t", "\ufeff"]:
save_option = False
tag_str += " "
else:
tag_str += i
return tag_str
def explore_hashtag(text: str, num=3) -> [str]:
url = f"https://www.instagram.com/web/search/topsearch/?context=blended&query={text}&rank_token=0.19167611402747253&include_reel=true"
json_obj = rq.get(url).json()
return [i["hashtag"]["name"] for i in json_obj['hashtags'][:num]]
def get_ins_post_text(text: str) -> (list, list):
url = f"https://www.instagram.com/explore/tags/{text}/?__a=1"
temp_arr = []
json_obj = rq.get(url).json()
edges = json_obj['graphql']['hashtag']['edge_hashtag_to_media']['edges']
for e in edges:
try:
temp_arr.append(catch_tag(str(e['node']['edge_media_to_caption']['edges'][0]['node']['text'])))
except IndexError:
pass
return edges, temp_arr
def get_ins_from_google_search(text: str,NextPage = 0) -> (list, list):
text.replace(" ","")
url = f"https://www.googleapis.com/customsearch/v1?key=AIzaSyA3fN27gbdKTelvniFWyrpMpEH6nka1sIg&q={text}&cx=9ff2e57a2817b1aec&start={1+NextPage*10}&sort=date"
url = f"https://www.googleapis.com/customsearch/v1?key=AIzaSyA3fN27gbdKTelvniFWyrpMpEH6nka1sIg&q={text}&cx=9ff2e57a2817b1aec&start={1+NextPage*10}"
temp_text_arr = []
temp_pic_arr = []
json_obj = rq.get(url).json()
try:
edges = json_obj['items']
except KeyError as e:
print(url)
url = f"https://www.googleapis.com/customsearch/v1?key=AIzaSyBinwEHB0IW80b1G9KmHuEA0zVHbUH_lrg&q={text}&cx=c8c145446517fb954&start={1 + NextPage * 10}"
json_obj = rq.get(url).json()
try:
edges = json_obj['items']
except KeyError as e:
print(url)
print(json_obj)
raise e
for e in edges:
try:
url = e['link']
snippet = e['snippet']
title = e['title']
shortcode = url[url.find('/p/')+3:url.find('/',url.find('/p/')+3)]
description = e['pagemap']['metatags'][0]['og:description']
source_content_post = description[description.find(":")+1:]
content_post = " ".join([w for w in list(jieba.cut(source_content_post)) if len(w)>1])
author = description[description.find("-")+1:description.find(":")]
image_post = e['pagemap']['metatags'][0]['og:image']
temp_text_arr.append(str(content_post))
temp_pic_arr.append({
"url":f"https://www.instagram.com/p/{shortcode}/",
"shortcode":shortcode,
"description":description,
"media":image_post,
"content":source_content_post,
"author":author,
"title":title
})
except IndexError:
pass
except KeyError:
pass
return temp_pic_arr, temp_text_arr
def export_spot(location="烏來"):
list_text = []
list_posts = []
for i in range(3):
posts, text = get_ins_from_google_search(location,NextPage=i)
list_text += text
for p in posts:
#
try:
post_text = p['description']
content = p['content']
url = p['url']
thumbnail_src = p['media']
author = p['author']
shortcode = p['shortcode']
title = p['title']
list_posts.append({
"shortcode":shortcode,
"post_text": post_text,
"thumbnail_src": thumbnail_src,
"accessibility_caption": author,
"title": f"{content} {author} ",
"media": thumbnail_src,
"url": f"{url}",
})
except IndexError:
pass
arr = demo(list_text, n_components=3, top_w=3)
topics_dict = {".".join(topics): [] for topics in arr}
exist_photo = []
for p in list_posts:
for k in topics_dict.keys():
for tag in str(k).split("."):
if p['post_text'].find(tag) != -1:
if p['shortcode'] not in exist_photo:
topics_dict[k].append(p)
exist_photo.append(p['shortcode'])
return topics_dict
def ins_get_pic_by_short_code(code='B7WLKhlDn_p'):
headers_mobile = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B137 Safari/601.1'}
content = requests.get(f"https://www.instagram.com/p/{code}/",headers=headers_mobile)
htmlsoup = BeautifulSoup(content.text, 'html.parser')
print(content.text)
pics = htmlsoup.findAll("meta", {"property": "og:image"})
#print(pics)
return pics[0].get("content")
"""
import json
print(json.dumps(export_spot(location="龍山寺")))
print(ins_get_pic_by_short_code())
"""
| Chunshan-Theta/GlobePocket | util/ins_explore.py | ins_explore.py | py | 6,623 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfTransformer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 24,
"u... |
1585776599 | from typing import Optional, List
from validator_collection import validators, checkers
from highcharts_core.metaclasses import HighchartsMeta
from highcharts_core.decorators import class_sensitive, validate_types
from highcharts_core.options.sonification.track_configurations import (InstrumentTrackConfiguration,
SpeechTrackConfiguration,
ContextTrackConfiguration)
from highcharts_core.options.sonification.grouping import SonificationGrouping
class SeriesSonification(HighchartsMeta):
"""Sonification/audio chart options for a series."""
def __init__(self, **kwargs):
self._context_tracks = None
self._default_instrument_options = None
self._default_speech_options = None
self._enabled = None
self._point_grouping = None
self._tracks = None
self.context_tracks = kwargs.get('context_tracks', None)
self.default_instrument_options = kwargs.get('default_instrument_options', None)
self.default_speech_options = kwargs.get('default_speech_options', None)
self.enabled = kwargs.get('enabled', None)
self.point_grouping = kwargs.get('point_grouping', None)
self.tracks = kwargs.get('tracks', None)
@property
def _dot_path(self) -> Optional[str]:
"""The dot-notation path to the options key for the current class.
:rtype: :class:`str <python:str>` or :obj:`None <python:None>`
"""
return 'plotOptions.series.sonification'
@property
def context_tracks(self) -> Optional[ContextTrackConfiguration | List[ContextTrackConfiguration]]:
"""Context tracks for the series. Context tracks are not tied to data points.
:rtype: :class:`ContextTrackConfiguration <highcharts_core.options.sonification.track_configurations.ContextTrackConfiguration>`
or :class:`list <python:list>` of track configuration types
"""
return self._context_tracks
@context_tracks.setter
def context_tracks(self, value):
if not value:
self._context_tracks = None
elif checkers.is_iterable(value, forbid_literals = (str, bytes, dict)):
self._context_tracks = [validate_types(x, types = (ContextTrackConfiguration)) for x in value]
else:
value = validate_types(value, types = ContextTrackConfiguration)
self._context_tracks = value
@property
def default_instrument_options(self) -> Optional[InstrumentTrackConfiguration]:
"""Default sonification options for all instrument tracks.
.. warning::
If specific options are also set on individual tracks or per-series, this configuration will be *overridden*.
:rtype: :class:`InstrumentTrackConfiguration <highcharts_core.options.sonification.track_configurations.InstrumentTrackConfiguration>`
or :obj:`None <python:None>`
"""
return self._default_instrument_options
@default_instrument_options.setter
@class_sensitive(InstrumentTrackConfiguration)
def default_instrument_options(self, value):
self._default_instrument_options = value
@property
def default_speech_options(self) -> Optional[SpeechTrackConfiguration]:
"""Default sonification options for all speech tracks.
.. warning::
If specific options are also set on individual tracks or per-series, this configuration will be *overridden*.
:rtype: :class:`SpeechTrackConfiguration <highcharts_core.options.sonification.track_configurations.SpeechTrackConfiguration>`
or :obj:`None <python:None>`
"""
return self._default_speech_options
@default_speech_options.setter
@class_sensitive(SpeechTrackConfiguration)
def default_speech_options(self, value):
self._default_speech_options = value
@property
def enabled(self) -> Optional[bool]:
"""If ``True``, sonification will be enabled for the series.
:rtype: :class:`bool <python:bool>` or :obj:`None <python:None>`
"""
return self._enabled
@enabled.setter
def enabled(self, value):
if value is None:
self._enabled = None
else:
self._enabled = bool(value)
@property
def point_grouping(self) -> Optional[SonificationGrouping]:
"""Options for grouping data points together when sonifying.
This allows for the visual presentation to contain more points than what is being played.
If not enabled, all visible / uncropped points are played.
:rtype: :class:`SonificationGrouping <highcharts_core.options.sonification.grouping.SonificationGrouping>` or
:obj:`None <python:None>`
"""
return self._point_grouping
@point_grouping.setter
@class_sensitive(SonificationGrouping)
def point_grouping(self, value):
self._point_grouping = value
@property
def tracks(self) -> Optional[ContextTrackConfiguration | List[ContextTrackConfiguration]]:
"""Tracks for the series.
:rtype: :class:`ContextTrackConfiguration <highcharts_core.options.sonification.track_configurations.ContextTrackConfiguration>`
or :class:`list <python:list>` of
:class:`ContextTrackConfiguration <highcharts_core.options.sonification.track_configurations.ContextTrackConfiguration>`
or :obj:`None <python:None>`
"""
return self._tracks
@tracks.setter
def tracks(self, value):
if not value:
self._tracks = None
elif checkers.is_iterable(value, forbid_literals = (str, bytes, dict)):
self._tracks = [validate_types(x, types = (ContextTrackConfiguration)) for x in value]
else:
self._tracks = validate_types(value, types = (ContextTrackConfiguration))
@classmethod
def _get_kwargs_from_dict(cls, as_dict):
kwargs = {
'context_tracks': as_dict.get('contextTracks', None),
'default_instrument_options': as_dict.get('defaultInstrumentOptions', None),
'default_speech_options': as_dict.get('defaultSpeechOptions', None),
'enabled': as_dict.get('enabled', None),
'point_grouping': as_dict.get('pointGrouping', None),
'tracks': as_dict.get('tracks', None),
}
return kwargs
def _to_untrimmed_dict(self, in_cls = None) -> dict:
untrimmed = {
'contextTracks': self.context_tracks,
'defaultInstrumentOptions': self.default_instrument_options,
'defaultSpeechOptions': self.default_speech_options,
'enabled': self.enabled,
'pointGrouping': self.point_grouping,
'tracks': self.tracks,
}
return untrimmed
| highcharts-for-python/highcharts-core | highcharts_core/options/plot_options/sonification.py | sonification.py | py | 7,074 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "highcharts_core.metaclasses.HighchartsMeta",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 40,
"usage_type": "name"
},
{
"api_na... |
39156355623 |
#+---------------------------------------------------------------------
#+ Python Script that creates CMAQ_ADJ v4.5 forcing files
#+ Check the 'CHANGE' comments to define your directories for the run
#+ Author: Camilo Moreno
#+ Email = cama9709@gmail.com
#+---------------------------------------------------------------------
#%%
import numpy as np
import glob
from netCDF4 import Dataset
from datetime import datetime, date, timedelta
from os import listdir, scandir, getcwd
def create_forced_uniarray(coords, dx, dy, lons, lats):
"""Create array where cells that wanted to be forced are equal to 1
and the rest are equal to 0.
Keyword arguments:
coords -- list of latlon coordinates of lower left and upper right corners
dx -- float representing halft the width of a grid cell
dy -- float representing halft the height of a grid cell
lons -- array containing the cell-centered longitud values of the grid
lats -- array containing the cell-centered latitude values of the grid
"""
lat_min = coords[0] - dy
lon_min = coords[1] - dx
lat_max = coords[2] + dy
lon_max = coords[3] + dx
forced_uniarr = lons
forced_uniarr[lons < lon_min] = 0
forced_uniarr[lons > lon_max] = 0
forced_uniarr[lats < lat_min] = 0
forced_uniarr[lats > lat_max] = 0
forced_uniarr[forced_uniarr != 0] = 1
return forced_uniarr
def create_forced_var(ds_latlon, dic_coords):
"""Create array where cells that wanted to be forced are equal to 1
and the rest are equal to 0 for multiple locations.
Keyword arguments:
ds_latlon -- Dataset of the latlon netCDF file of the grid
dic_coords -- dictionary of latlon coordinates for lower left and
upper right corners of each specified kay-value location
"""
lats = ds_latlon.variables['LAT'][0][0][:][:]
lons = ds_latlon.variables['LON'][0][0][:][:]
lats = np.ma.getdata(lats)
lons = np.ma.getdata(lons)
meters_per_degree = 110574.61
dx = ds_latlon.XCELL/meters_per_degree/2
dy = ds_latlon.YCELL/meters_per_degree/2
ltime = 25
llay = 1
lrow, lcol = lats.shape
forced_arr = np.zeros([ltime, llay, lrow, lcol])
for coords in dic_coords.values():
forced_uniarr = create_forced_uniarray(list(coords), dx, dy, lons, lats)
forced_uniarr = np.resize(forced_uniarr, [ltime, llay, lrow, lcol])
forced_arr = forced_arr + forced_uniarr
return forced_arr
def monthly_date(day):
"""convert date from YYYYDDD to YYYYMMDD.
Keyword arguments:
day -- int of the day in format YYYYDDD
"""
day_str = str(day)
year = int(day_str[0:4])
day_y = int(day_str[4:])
date = datetime(year, 1, 1) + timedelta(day_y - 1)
daym = date.timetuple().tm_mday
str_daym = '0'*(2 - len(str(daym))) + str(daym)
month = date.timetuple().tm_mon
str_month = '0'*(2 - len(str(month))) + str(month)
return str(year) + str_month + str_daym
def create_ncfile(save_dir, ds_latlon, spc_name, forced_arr, ds_conc):
"""Create Final NETCDF file.
Keyword arguments:
save_dir -- string of the location for saving the netCDF files
ds_latlon -- Dataset of the latlon netCDF file of the grid
spc_name -- string of the forced species name
forced_arr -- array containing the forced values by location for the species
ds_conc -- conc file dataset
"""
hr = 0
num_vars = 1
lays = 1
ltime = 25
cols = len(ds_latlon.dimensions['COL'])
rows = len(ds_latlon.dimensions['ROW'])
datetimes = len(ds_latlon.dimensions['DATE-TIME'])
day = ds_conc.SDATE
#* Create new netCDF
day_monthly = monthly_date(day)
new_cmaq_file = f'{save_dir}/ADJ_FORCE.{day_monthly}'
ds_new_cmaq = Dataset(new_cmaq_file, open = True, mode = 'w', format= "NETCDF3_64BIT")
#* Create dimenssions
TSTEP = ds_new_cmaq.createDimension("TSTEP", None)
DATE_TIME = ds_new_cmaq.createDimension("DATE-TIME", datetimes)
LAY = ds_new_cmaq.createDimension("LAY", lays)
VAR = ds_new_cmaq.createDimension("VAR", num_vars)
ROW = ds_new_cmaq.createDimension("ROW", rows)
COL = ds_new_cmaq.createDimension("COL", cols)
ds_new_cmaq.sync()
#* Creatae attributes
attrs=["IOAPI_VERSION", "EXEC_ID", "FTYPE", "CDATE", "CTIME", "WDATE", "WTIME",
"SDATE", "STIME", "TSTEP", "NTHIK", "NCOLS", "NROWS", "GDTYP", "P_ALP",
"P_BET", "P_GAM", "XCENT", "YCENT", "XORIG", "YORIG", "XCELL", "YCELL",
"VGTYP", "VGTOP", "VGLVLS", "GDNAM", "HISTORY"]
for attr in attrs:
if hasattr(ds_conc, attr):
attrVal = getattr(ds_conc, attr)
setattr(ds_new_cmaq, attr, attrVal)
varlist = spc_name + ' '*(16 - len(spc_name))
cmaq_attrs = {'NLAYS': np.int32(lays),
'NVARS': np.int32(num_vars),
'UPNAM': "RD_FORCE_FILE",
'VAR-LIST': varlist,
'FILEDESC': "Adjoint forcing file. Forcing file of specified geolocations"
}
for attr in cmaq_attrs:
ds_new_cmaq.setncattr(attr, cmaq_attrs[attr])
#* Create variables
tflag = ds_new_cmaq.createVariable('TFLAG', 'i4', ('TSTEP', 'VAR', 'DATE-TIME'))
fill_attrs(ds_conc, tflag)
var_temp = ds_new_cmaq.createVariable(spc_name,"f4",("TSTEP", "LAY", "ROW", "COL"))
fill_attrs(ds_conc, var_temp)
#* Fill variables
ds_new_cmaq.variables[spc_name][:, :, :] = forced_arr
dattim = np.squeeze(ds_conc.variables['TFLAG'][:][:])
dattim = dattim[0:ltime,1,:]
tflag[:] = np.zeros([ltime,lays,datetimes])
tflag[:] = np.reshape(dattim,[ltime,lays,datetimes])
#* Close new netcdf file
ds_new_cmaq.close()
print(f"{day_monthly} Forcing file DONE")
def fill_attrs(ds_conc, nc_var):
"""Fill atribute values for variables as appear in conc file.
Keyword arguments:
ds_conc -- conc file dataset
nc_var -- netCDF variable of the file
var_name -- variable name
"""
varattrs=["long_name","units","var_desc"]
for varattr in varattrs:
if hasattr(ds_conc.variables[nc_var.name], varattr):
varattrVal = getattr(ds_conc.variables[nc_var.name], varattr)
setattr(nc_var, varattr, varattrVal)
def get_concfiels(conc_file_dir):
"""Get all files that begins whith 'CONC' from the given directory.
Keyword arguments:
conc_file_dir -- string of the directory whre conc day files
"""
all_files = [f for f in glob.glob(f'{conc_file_dir}/CONC.*')]
all_files.sort()
return all_files
# %%
if __name__ == "__main__":
#CHANGE: save_dir: directory path where the forced files will be saved
# latlon_file: path of the latlon netCDF of the run grid
# conc_file_dir: path where CONC files are located
# spc_name: cbo5_aero5_aq name of the one species to be forced
# dic_coord: dictionary containing a list of latlon coords of lower left
# and upper right corners for each key-value location.
# Format fo dic_coord must be:
# dic_coord = {
# 'name_of_location_1': [lower_left_lat_1, lower_left_lon_1, upper_left_lat_1, upper_left_lon_1]
# 'name_of_location_2': [lower_left_lat_2, lower_left_lon_2, upper_left_lat_2, upper_left_lon_2]
# }
save_dir = '/Volumes/Avispa/ADJ_FORCE_files'
latlon_file = '/Volumes/Avispa/latlon.conc'
conc_file_dir = '//Volumes/Avispa/Conc_files'
spc_name = 'ASO4I'
dic_coords = {
'Bogota' : [4.461864, -74.223421 , 4.833805, -74.007853]
}
ds_latlon = Dataset(latlon_file, mode = 'r', open = True)
all_files = get_concfiels(conc_file_dir)
for file in all_files:
ds_conc = Dataset(file, mode = 'r', open = True)
forced_arr = create_forced_var(ds_latlon, dic_coords)
create_ncfile(save_dir, ds_latlon, spc_name, forced_arr, ds_conc)
ds_conc.close()
ds_latlon.close()
# %%
| kamitoteles/Forcingfile_generator_CMAQ_adj_v4.5 | cmaqadj_forcefile.py | cmaqadj_forcefile.py | py | 8,146 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.ma.getdata",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.ma.getdata",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_... |
35533894023 | import json
import logging
try:
from http import client as httplib
except ImportError:
# Python 2.6/2.7
import httplib
import urllib
from pretenders.common.exceptions import (
ConfigurationError,
ResourceNotFound,
UnexpectedResponseStatus,
)
from pretenders.common.pretender import PretenderModel
from pretenders.common.http import binary_to_ascii, Preset
LOGGER = logging.getLogger("pretenders.client")
class APIHelper(object):
def __init__(self, connection, path):
self.connection = connection
self.path = path
def _get_response(self, method, *args, **kwargs):
self.connection.request(method=method, *args, **kwargs)
return self.connection.getresponse()
def http(self, method, *args, **kwargs):
"""
Issue an HTTP request.
The HTTP connection is reused between requests. We try to detect
dropped connections, and in those cases try to reconnect to the remote
server.
"""
try:
response = self._get_response(method, *args, **kwargs)
except (httplib.CannotSendRequest, httplib.BadStatusLine):
self.connection.close()
self.connection.connect()
response = self._get_response(method, *args, **kwargs)
return response, response.read()
def get(self, id):
return self.http("GET", url="{0}/{1}".format(self.path, id))
def list(self, filters={}):
query_string = ""
if filters:
query_string = "?{0}".format(urllib.urlencode(filters))
url = "{0}{1}".format(self.path, query_string)
return self.http("GET", url=url)
def reset(self):
return self.http("DELETE", url=self.path)
class PresetHelper(APIHelper):
def add(
self,
match_rule=None,
response_status=200,
response_body=b"",
response_headers={},
times=1,
after=0,
):
"""
Add a new preset to the boss server.
"""
new_preset = Preset(
headers=response_headers,
body=binary_to_ascii(response_body),
status=response_status,
rule=match_rule,
times=times,
after=after,
)
response, data = self.http("POST", url=self.path, body=new_preset.as_json())
if response.status != 200:
raise ConfigurationError(data.decode())
return response
class BossClient(object):
boss_mock_type = None
def __init__(self, host, port, timeout=None, name=None):
self.host = host
self.port = port
self.timeout = timeout
self.name = name
self.full_host = "{0}:{1}".format(self.host, self.port)
self.connection = httplib.HTTPConnection(self.full_host)
self.boss_access = APIHelper(self.connection, "")
LOGGER.info(
"Requesting {0} pretender. Port:{1} Timeout:{2} ({3})".format(
self.boss_mock_type, self.port, self.timeout, self.name
)
)
if self.boss_mock_type:
self.pretender_details = self._request_mock_access()
else:
self.pretender_details = {}
self.history = APIHelper(
self.connection, "/history/{0}".format(self.pretend_access_point_id)
)
self.preset = PresetHelper(
self.connection, "/preset/{0}".format(self.pretend_access_point_id)
)
def reset(self):
"""
Delete all history.
"""
self.history.reset()
self.preset.reset()
return self
@property
def create_mock_url(self):
return "/{0}".format(self.boss_mock_type)
@property
def pretend_access_point_id(self):
return self.pretender_details.get("id", "")
@property
def pretend_access_point(self):
return self.full_host
def _request_mock_access(self):
"""
Ask the boss to create a mock server by POSTing to ``create_mock_url``
:returns:
A tuple containing:
position 0: hostname[:port] of the mock server
position 1: unique id of the pretender (for teardown
purposes)
"""
post_body = {"name": self.name}
if self.timeout:
post_body["pretender_timeout"] = self.timeout
post_body = json.dumps(post_body)
response, data = self.boss_access.http(
"POST", url=self.create_mock_url, body=post_body
)
pretender_json = data.decode("ascii")
pretender_details = json.loads(pretender_json)
return pretender_details
@property
def delete_mock_url(self):
return "{0}/{1}".format(self.create_mock_url, self.pretend_access_point_id)
def delete_mock(self):
"Delete the mock server that this points to."
response, data = self.boss_access.http(
method="DELETE", url=self.delete_mock_url
)
if not response.status == 200:
raise Exception("Delete failed")
def get_pretender(self):
"Get pretenders from the server in dict format"
response, data = self.boss_access.http(
method="GET",
url="/{0}/{1}".format(self.boss_mock_type, self.pretend_access_point_id),
)
if response.status == 200:
return PretenderModel.from_json_response(data)
elif response.status == 404:
raise ResourceNotFound("The mock server for this client was shutdown.")
else:
raise UnexpectedResponseStatus(response.status)
| pretenders/pretenders | pretenders/client/__init__.py | __init__.py | py | 5,630 | python | en | code | 108 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "httplib.CannotSendRequest",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "httplib.BadStatusLine",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_n... |
3841312108 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import dependency library
import numpy as np
import pandas as pd
from static import config
from scipy import ndimage
from collections import Counter
import csv
import os
# import user defined library
import utils.general_func as general_f
def get_cell_name_affine_table(path=config.data_path + r'name_dictionary_no_name.csv'):
"""
:return: a set of NO. to name LIST and name to NO. DICTIONARY:
zero first, but actually there are no zero, remember to plus 1
"""
label_name_dict = pd.read_csv(path, index_col=0).to_dict()['0']
name_label_dict = {value: key for key, value in label_name_dict.items()}
return label_name_dict, name_label_dict
def nii_get_cell_surface(img_arr, cell_key):
# with the original image data
img_arr_dilation = ndimage.binary_dilation(img_arr == cell_key)
# print(np.unique(img_arr,return_counts=True))
# img_df_erosion = pd.DataFrame(img_arr_erosion[100:150, 150:200, 100])
# print(img_df_erosion) me
surface_data_result = np.logical_xor(img_arr_dilation, (img_arr == cell_key))
# be careful!
surface_loc = np.array(np.where(surface_data_result)).T
return surface_loc, np.mean(surface_loc, axis=0)
# np.set_printoptions(threshold=100000)
def nii_count_volume_surface(this_image):
"""
:param this_image: the nii image from 3D image, count volume and surface
:return: volume counter, surface counter
"""
img_arr = this_image.get_data()
img_arr_shape = img_arr.shape
img_arr_count = np.prod(img_arr_shape)
struc_element = ndimage.generate_binary_structure(3, -1)
# ---------------- erosion ----------------
# with the original image data
img_arr_erosion = ndimage.grey_erosion(img_arr, footprint=struc_element)
surface_data_result = img_arr - img_arr_erosion
cnt1 = Counter(np.reshape(img_arr, img_arr_count))
del cnt1[0]
cnt2 = Counter(np.reshape(surface_data_result, img_arr_count))
del cnt2[0]
return cnt1, cnt2
def nii_count_contact_surface(this_image):
img_arr = this_image.get_data()
img_arr_shape = img_arr.shape
img_arr_count = np.prod(img_arr_shape)
cnt = Counter(np.reshape(img_arr, img_arr_count))
print(type(cnt))
def count_volume_surface_normalization_tocsv(path_tmp):
"""
normalization coefficient= (volume/10000)**(1/3)
:param path_tmp:
:return:
"""
name_list, _ = get_cell_name_affine_table()
data_embryo_time_slices = pd.DataFrame(columns=['volume', 'surface', 'normalized_c'])
for temporal_embryo in os.listdir(path_tmp):
if os.path.isfile(os.path.join(path_tmp, temporal_embryo)):
img = general_f.load_nitf2_img(os.path.join(path_tmp, temporal_embryo))
volume_counter, surface_counter = nii_count_volume_surface(img)
time_point = str.split(temporal_embryo, '_')[1]
print(path_tmp, time_point)
for cell_index in volume_counter:
cell_name = name_list[cell_index]
data_embryo_time_slices.at[time_point + '::' + cell_name, 'volume'] = volume_counter[cell_index]
data_embryo_time_slices.at[time_point + '::' + cell_name, 'surface'] = surface_counter[cell_index]
data_embryo_time_slices.at[time_point + '::' + cell_name, 'normalized_c'] = (volume_counter[
cell_index] / 10000) ** (
1 / 3)
embryo_name = os.path.split(path_tmp)[-1]
data_embryo_time_slices.to_csv(os.path.join(config.dir_my_data_volume_surface, embryo_name + '.csv'))
| chiellini/CellFeatureEnhancementModel | utils/cell_func.py | cell_func.py | py | 3,769 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "static.config.data_path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "static.config",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.ndim... |
22752641641 | from django.urls.conf import path
from . import views
app_name = 'events'
urlpatterns = [
path('create-event/', views.CreateEventView.as_view(), name="create_event"),
path('edit-event/<uuid:event_id>/', views.EditEventView.as_view(), name='edit_event'),
path('get-events/', views.RetrieveEventView.as_view(), name="get_events"),
path('join-event/<uuid:event_id>/<uuid:user_id>/', views.JoinEventView.as_view(), name='join_event')
] | rashiddaha/django_meet | events/urls.py | urls.py | py | 449 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.conf.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.conf.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.conf.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django... |
5890621434 | import torch
from torch.autograd import Variable as Var
import torch.nn as nn
def ifcond(cond, x_1, x_2):
# ensure boolean
cond = cond.byte().float()
# check is it Tensor or Variable
if not hasattr(cond, "backward"):
cond = Var(cond, requires_grad=False)
return (cond * x_1) + ((1-cond) * x_2)
def index_select_if_none(input, dim, index, ifnone):
input_max = input.data.shape[dim]
index_mask = ((index > 0) * (index < input_max)).eq(0)
index.masked_fill_(index_mask, input_max)
input = torch.cat([input, ifnone], dim=0)
return input[index]
def from_list(ls, cuda):
tensor = torch.Tensor(ls)
if cuda:
tensor = tensor.cuda()
return tensor
def from_long_list(ls, cuda):
tensor = torch.LongTensor(ls)
if cuda:
tensor = tensor.cuda()
return tensor
def zeros(*shape, cuda=False):
t = torch.FloatTensor(*shape).zero_()
if cuda:
t = t.cuda()
return t
def zeros_var(*shape, cuda=False):
t = torch.FloatTensor(*shape).zero_()
if cuda:
t = t.cuda()
return Var(t, requires_grad=False)
def normal_var(*shape, cuda=False, scale=1.0):
t = torch.FloatTensor(*shape).normal_(0.0, scale)
if cuda:
t = t.cuda()
return Var(t, requires_grad=False)
def init_var(*shape, cuda=False, scale=1.0, training=True):
if training:
return normal_var(*shape, cuda=cuda, scale=scale)
else:
return zeros_var(*shape, cuda=cuda)
def zeros_like(tensor, cuda):
if hasattr(tensor, "backward"):
shape = tensor.data.shape
else:
shape = tensor.shape
return zeros(*shape, cuda=cuda)
def add_padding_and_stack(tensors, cuda, dim=0, max_length=None):
if max_length is None:
max_length = max([t.data.shape[dim] for t in tensors])
result = []
for tensor in tensors:
sh = list(tensor.data.shape)
sh[dim] = max_length-sh[dim]
assert sh[dim] >= 0
if sh[dim] > 0:
padding = Var(zeros(*sh, cuda=cuda))
tensor = torch.cat([tensor, padding], dim=dim)
result.append(tensor)
return torch.stack(result)
def add_padding_and_cat(tensors, cuda, dim=1, cat_dim=0, max_length=None):
if max_length is None:
max_length = max([t.data.shape[dim] for t in tensors])
result = []
for tensor in tensors:
sh = list(tensor.data.shape)
sh[dim] = max_length-sh[dim]
assert sh[dim] >= 0
if sh[dim] > 0:
padding = Var(zeros(*sh, cuda=cuda))
tensor = torch.cat([tensor, padding], dim=dim)
result.append(tensor)
return torch.cat(result, dim=cat_dim)
def parameter_init_zero(*dims):
return nn.Parameter(torch.FloatTensor(*dims).zero_())
def dropout_matrix(*dims, p=0.2, train=True, cuda=False):
assert p <= 1.0 and p >= 0.0, "Invalid probability: {}".format(p)
prob = 1-p
# all 0.8, ok for evaluation
d = Var(torch.FloatTensor(*dims).fill_(prob))
if train:
# all 1 or 0
d = d.bernoulli()
if cuda:
d = d.cuda()
return d
def device_map_location(cuda):
if cuda:
return lambda storage, loc: storage.cuda()
else:
return lambda storage, loc: storage
def reverse(tensor, dim=0):
length = tensor.size()[dim]
idx = Var(torch.arange(length-1, -1, -1).long())
if tensor.is_cuda:
idx = idx.cuda()
return torch.index_select(tensor, dim, idx)
| ds4an/CoDas4CG | CodeOfApproaches/tree2tree/model/utils.py | utils.py | py | 3,470 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "torch.autograd.Variable",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
... |
12466619564 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('urly_app', '0004_auto_20161026_1723'),
]
operations = [
migrations.AddField(
model_name='link',
name='requires_login',
field=models.NullBooleanField(),
),
]
| spe-bfountain/short | urly_app/migrations/0005_link_requires_login.py | 0005_link_requires_login.py | py | 401 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
35367988265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import yaml
import datetime
import argparse
from collections import defaultdict
from dateutil.parser import parse
from dateutil.rrule import rrule, DAILY
from dateutil.relativedelta import relativedelta
from utils import get_season, get_team_from_game
# loading external configuration
CONFIG = yaml.safe_load(open(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'config.yml')))
# TODO: decide whether to put the following stuff into external configuration
PLAYOFF_DATES = {
2016: datetime.date(2017, 2, 28),
2017: datetime.date(2018, 3, 6),
2018: datetime.date(2019, 3, 5),
2019: datetime.date(2020, 3, 9),
2020: datetime.date(2021, 4, 19),
}
POS_KEYS = {'1': 'G', '2': 'D', '3': 'F'}
TGT_DIR = CONFIG['tgt_processing_dir']
TGT_FILE = "del_games.json"
def get_games_for_date(date, existing_games=None):
'''
Gets detail, event, and roster information for all games played on the
specified date. Optionally adds found game information to specified list
of existing games.
'''
print("+ Retrieving games played on %s" % date)
# loading games that may have been registered earlier
if not existing_games:
games = list()
else:
games = existing_games
# collecting already registered game ids
registered_game_ids = [g['game_id'] for g in games]
# retrieving full league schedule for current season
schedules_src_path = os.path.join(
CONFIG['tgt_processing_dir'],
str(get_season(date)), 'full_schedule.json')
schedules = json.loads(open(schedules_src_path).read())
game_ids_rounds = list()
# determining ids and rounds of games played on current date
for schedule in schedules:
try:
start_date = parse(schedule['start_date']).date()
except ValueError:
# TODO: think of something more clever than iterating over all
# fixtures each day
# print("+ Unable to parse game start date %s" % schedule[
# 'start_date'])
continue
# comparing start date of game with current date
if start_date == game_date and schedule['status'] in ['AFTER_MATCH', 'CONTUMACY']:
game_ids_rounds.append(
(schedule['game_id'], int(schedule['round'].split('_')[-1])))
for game_id, round in game_ids_rounds:
if game_id in registered_game_ids:
print("\t+ Game with id %d already registered" % game_id)
continue
# setting up data container
single_game_data = dict()
# setting game date and round information
single_game_data['date'] = date
single_game_data['weekday'] = date.weekday()
season = get_season(date)
single_game_data['season'] = season
# TODO: put date for 2020/21 regular season start somewhere else
if season == 2020 and date < datetime.date(2020, 12, 16):
single_game_data['season_type'] = 'MSC'
game_type = 4
elif date < PLAYOFF_DATES[season]:
single_game_data['season_type'] = 'RS'
game_type = 1
elif date >= PLAYOFF_DATES[season]:
single_game_data['season_type'] = 'PO'
game_type = 3
single_game_data['round'] = round
# setting game ids
# TODO: determine schedule game id
# single_game_data['schedule_game_id'] = schedule_game_id
single_game_data['game_id'] = game_id
# retrieving game details
if 'game_id' in single_game_data:
try:
single_game_details = get_single_game_details(
game_id, season, game_type)
except Exception:
import traceback
traceback.print_exc()
print(game_id)
continue
# retrieving game rosters
single_game_rosters = get_game_rosters(game_id, season, game_type)
# retrieving game events
single_game_events = get_game_events(game_id, season, game_type)
single_game_data = {
**single_game_data, **single_game_details,
**single_game_rosters, **single_game_events
}
single_game_data['first_goal'] = get_team_from_game(
single_game_data, single_game_data['first_goal'])
single_game_data['gw_goal'] = get_team_from_game(
single_game_data, single_game_data['gw_goal'])
print("\t+ %s (%d) vs. %s (%d)" % (
single_game_data['home_team'], single_game_data['home_score'],
single_game_data['road_team'], single_game_data['road_score']
))
games.append(single_game_data)
return games
def get_single_game_details(game_id, season, game_type):
"""
Gets game details for a single game with the specified id.
"""
game_detail_src_path = os.path.join(
CONFIG['base_data_dir'], 'game_info',
str(season), str(game_type), "%d.json" % game_id)
game_details = json.loads(open(game_detail_src_path).read())
single_game_data = dict()
single_game_data['arena'] = game_details['stadium']
# quick fix for wrongly registered arena in 2020 MagentaSport Cup
if season == 2020 and game_type == 4 and single_game_data['arena'] == 'Mercedes-Benz Arena':
single_game_data['arena'] = 'Sportforum Berlin'
single_game_data['attendance'] = game_details['numberOfViewers']
single_game_data['home_id'] = game_details['teamInfo']['home']['id']
single_game_data['home_team'] = game_details['teamInfo']['home']['name']
single_game_data['home_abbr'] = game_details[
'teamInfo']['home']['shortcut']
if game_details['trainers']:
if 'homeHeadCoach' in game_details['trainers']:
if type(game_details['trainers']['homeHeadCoach']) is str:
single_game_data['home_coach'] = game_details[
'trainers']['homeHeadCoach']
else:
single_game_data['home_coach_id'] = game_details[
'trainers']['homeHeadCoach']['id']
single_game_data['home_coach'] = game_details[
'trainers']['homeHeadCoach']['name']
single_game_data['road_id'] = game_details['teamInfo']['visitor']['id']
single_game_data['road_team'] = game_details['teamInfo']['visitor']['name']
single_game_data['road_abbr'] = game_details[
'teamInfo']['visitor']['shortcut']
if game_details['trainers']:
if 'visitorHeadCoach' in game_details['trainers']:
if type(game_details['trainers']['visitorHeadCoach']) is str:
single_game_data['road_coach'] = game_details[
'trainers']['visitorHeadCoach']
else:
single_game_data['road_coach_id'] = game_details[
'trainers']['visitorHeadCoach']['id']
single_game_data['road_coach'] = game_details[
'trainers']['visitorHeadCoach']['name']
single_game_data['home_score'] = game_details[
'results']['score']['final']['score_home']
single_game_data['road_score'] = game_details[
'results']['score']['final']['score_guest']
single_game_data['home_goals_1'] = game_details[
'results']['score']['first_period']['score_home']
single_game_data['road_goals_1'] = game_details[
'results']['score']['first_period']['score_guest']
single_game_data['home_goals_2'] = game_details[
'results']['score']['second_period']['score_home']
single_game_data['road_goals_2'] = game_details[
'results']['score']['second_period']['score_guest']
single_game_data['home_goals_3'] = game_details[
'results']['score']['third_period']['score_home']
single_game_data['road_goals_3'] = game_details[
'results']['score']['third_period']['score_guest']
single_game_data['overtime_game'] = False
single_game_data['shootout_game'] = False
if (sum([
single_game_data['home_goals_1'],
single_game_data['home_goals_2'],
single_game_data['home_goals_3']
]) != single_game_data['home_score']) or (sum([
single_game_data['road_goals_1'],
single_game_data['road_goals_2'],
single_game_data['road_goals_3']
]) != single_game_data['road_score']):
if game_details['results']['extra_time']:
single_game_data['overtime_game'] = True
if game_details['results']['shooting']:
single_game_data['shootout_game'] = True
if type(game_details['referees']['headReferee1']) is str:
single_game_data['referee_1'] = game_details[
'referees']['headReferee1']
else:
single_game_data['referee_1_id'] = game_details[
'referees']['headReferee1']['id']
single_game_data['referee_1'] = game_details[
'referees']['headReferee1']['name']
if type(game_details['referees']['headReferee2']) is str:
single_game_data['referee_2'] = game_details[
'referees']['headReferee2']
else:
single_game_data['referee_2_id'] = game_details[
'referees']['headReferee2']['id']
single_game_data['referee_2'] = game_details[
'referees']['headReferee2']['name']
if type(game_details['referees']['lineReferee1']) is str:
single_game_data['linesman_1'] = game_details[
'referees']['lineReferee1']
else:
single_game_data['linesman_1_id'] = game_details[
'referees']['lineReferee1']['id']
single_game_data['linesman_1'] = game_details[
'referees']['lineReferee1']['name']
if type(game_details['referees']['lineReferee2']) is str:
single_game_data['linesman_2'] = game_details[
'referees']['lineReferee2']
else:
single_game_data['linesman_2_id'] = game_details[
'referees']['lineReferee2']['id']
single_game_data['linesman_2'] = game_details[
'referees']['lineReferee2']['name']
if 'bestPlayers' in game_details:
single_game_data['home_best_player_id'] = game_details[
'bestPlayers']['home']['id']
single_game_data['home_best_player'] = game_details[
'bestPlayers']['home']['name']
single_game_data['road_best_player_id'] = game_details[
'bestPlayers']['visitor']['id']
single_game_data['road_best_player'] = game_details[
'bestPlayers']['visitor']['name']
return single_game_data
def get_game_rosters(game_id, season, game_type):
"""
Retrieves rosters for all teams in game with the specified game id.
"""
roster_data = defaultdict(list)
game_roster_src_path = os.path.join(
CONFIG['base_data_dir'], 'game_roster',
str(season), str(game_type), "%d.json" % game_id)
if not os.path.isfile(game_roster_src_path):
return roster_data
game_rosters = json.loads(open(game_roster_src_path).read())
collected_tgt_keys = set()
for home_road_key in ['home', 'visitor']:
roster = game_rosters[home_road_key]
for roster_key in sorted(roster.keys()):
# splitting key into single string digits
pos, line, clr = list(str(roster_key))
# converting coded position into actual position
pos = POS_KEYS[pos]
# goaltender's starting status is coded in third, not second digit
if pos == 'G':
line = clr
# setting up target key
tgt_key = ("%s_%s%s" % (
home_road_key.replace('visitor', 'road'), pos, line)).lower()
collected_tgt_keys.add(tgt_key)
# appending a dummy player id if necessary, e.g. for fourth
# defensive pairs only consisting of a right defenseman
if pos != 'G' and int(clr) > len(roster_data[tgt_key]) + 1:
roster_data[tgt_key].append(0)
roster_data[tgt_key].append(roster[roster_key]['playerId'])
else:
for tgt_key in collected_tgt_keys:
if '_d' in tgt_key:
while len(roster_data[tgt_key]) < 2:
roster_data[tgt_key].append(0)
if '_f' in tgt_key:
while len(roster_data[tgt_key]) < 3:
roster_data[tgt_key].append(0)
return roster_data
def get_time_tied_leading_trailing(event, previous_score, last_goal_time):
"""
Calculate time of previous score state according to current event time
and time of last goal scored.
"""
if previous_score['home'] == previous_score['road']:
return 'tied', event['time'] - last_goal_time
elif previous_score['home'] > previous_score['road']:
return 'home_leading', event['time'] - last_goal_time
elif previous_score['home'] < previous_score['road']:
return 'road_leading', event['time'] - last_goal_time
def get_game_events(game_id, season, game_type):
"""
Register game events for current game from separate data source.
"""
game_events_src_path = os.path.join(
CONFIG['base_data_dir'], 'game_events',
str(season), str(game_type), "%d.json" % game_id)
game_events = json.loads(open(game_events_src_path).read())
single_game_events = dict()
# setting up containers for all goals
all_goals = list()
goals_per_team = {'home': list(), 'road': list()}
empty_net_goals_per_team = {'home': 0, 'road': 0}
extra_attacker_goals_per_team = {'home': 0, 'road': 0}
# setting up score state container and helper variables
tied_leading_trailing = defaultdict(int)
last_goal_time = 0
current_score = {'home': 0, 'road': 0}
# collecting all goals scored in the game in order to
# retrieve the team that scored the first goal of the game
for period in sorted(game_events):
for event in game_events[period]:
if event['type'] == 'goal':
all_goals.append(event)
home_road = event['data']['team'].replace('visitor', 'road')
goals_per_team[home_road].append(event)
# calculating timespan of previous score state
score_state, timespan = get_time_tied_leading_trailing(
event, current_score, last_goal_time)
tied_leading_trailing[score_state] += timespan
# re-setting helper variables for score state time retrieval
# setting time of previous goal to current time
last_goal_time = event['time']
# adjusting score
current_score['home'], current_score['road'] = [
int(x) for x in event['data']['currentScore'].split(":")]
else:
# calculating timespan of score state between last goal scored in game
# and end of game
score_state, timespan = get_time_tied_leading_trailing(
event, current_score, last_goal_time)
tied_leading_trailing[score_state] += timespan
# finally storing score state timespans
time_played = 0
for sit in ['tied', 'home_leading', 'road_leading']:
single_game_events[sit] = tied_leading_trailing[sit]
time_played += tied_leading_trailing[sit]
else:
single_game_events['time_played'] = time_played
# retrieving first goal of the game
# making sure the goals are sorted by time
first_goal = sorted(all_goals, key=lambda d: d['time'])[0]
single_game_events['first_goal'] = first_goal[
'data']['team'].replace('visitor', 'road')
single_game_events['first_goal_time'] = first_goal['time']
single_game_events['first_goal_player_id'] = first_goal[
'data']['scorer']['playerId']
single_game_events['first_goal_first_name'] = first_goal[
'data']['scorer']['name']
single_game_events['first_goal_last_name'] = first_goal[
'data']['scorer']['surname']
# retrieving game-winning goal
if len(goals_per_team['home']) > len(goals_per_team['road']):
winning_goal = goals_per_team['home'][len(goals_per_team['road'])]
else:
winning_goal = goals_per_team['road'][len(goals_per_team['home'])]
single_game_events['gw_goal'] = winning_goal[
'data']['team'].replace('visitor', 'road')
single_game_events['gw_goal_time'] = winning_goal['time']
single_game_events['gw_goal_player_id'] = winning_goal[
'data']['scorer']['playerId']
single_game_events['gw_goal_first_name'] = winning_goal[
'data']['scorer']['name']
single_game_events['gw_goal_last_name'] = winning_goal[
'data']['scorer']['surname']
# counting empty net and extra attacker goals per team
for key in ['home', 'road']:
for goal in goals_per_team[key]:
if goal['data']['en']:
empty_net_goals_per_team[key] += 1
if goal['data']['ea']:
# some empty net goals are also falsely registered as extra
# attacker goals
if goal['data']['en']:
continue
# game-winning shootout goals are falsely registered as extra
# attacker goals
if goal['data']['balance'] == 'GWS':
continue
extra_attacker_goals_per_team[key] += 1
else:
single_game_events[
"%s_en_goals" % key] = empty_net_goals_per_team[key]
single_game_events[
"%s_ea_goals" % key] = extra_attacker_goals_per_team[key]
return single_game_events
if __name__ == '__main__':
# retrieving arguments specified on command line
parser = argparse.ArgumentParser(
description='Process DEL game information.')
parser.add_argument(
'-f', '--from', dest='from_date', required=False,
metavar='first date to process games for',
help="The first date information will be processed for")
parser.add_argument(
'-t', '--to', dest='to_date', required=False,
metavar='last date to process games for',
help="The last date information will be processed for")
parser.add_argument(
'-s', '--season', dest='season', required=False, default=2020,
type=int, choices=[2016, 2017, 2018, 2019, 2020],
metavar='season to process games for',
help="The season information will be processed for")
parser.add_argument(
'--initial', dest='initial', required=False,
action='store_true', help='Re-create list of games')
args = parser.parse_args()
# setting time interval of interest from command line options
tgt_season = args.season
from_date = args.from_date
to_date = args.to_date
initial = args.initial
if from_date is None:
# using yesterday's date as default from date
from_date = datetime.date.today() - relativedelta(days=1)
else:
from_date = parse(from_date).date()
if to_date is None:
# using from date as default to date
to_date = from_date
else:
to_date = parse(to_date).date()
# determining end date of target season
season_end_date = datetime.date(tgt_season + 1, 5, 31)
previous_season_end_date = datetime.date(tgt_season, 5, 31)
# setting up list of all game dates
game_dates = list(rrule(DAILY, dtstart=from_date, until=to_date))
game_dates = [
game_date.date() for game_date in game_dates if
game_date.date() <= season_end_date and
game_date.date() > previous_season_end_date]
# setting up target path
tgt_path = os.path.join(TGT_DIR, str(tgt_season), TGT_FILE)
if initial:
games = list()
else:
if not os.path.isfile(tgt_path):
print("+ Unable to load existing games from %s" % tgt_path)
games = list()
else:
games = json.loads(open(tgt_path).read())
# retrieving games for each game date
for game_date in game_dates:
games = get_games_for_date(game_date, games)
open(tgt_path, 'w').write(
json.dumps(games, indent=2, default=str))
| leaffan/del_stats | backend/get_del_games.py | get_del_games.py | py | 20,247 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "yaml.safe_load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_... |
18187068649 | """Control navigation of FOREST data"""
import copy
import datetime as dt
import numpy as np
import pandas as pd
import bokeh.models
import bokeh.layouts
from collections import namedtuple
from forest import data
from forest.observe import Observable
from forest.util import to_datetime as _to_datetime
from forest.export import export
from forest.mark import component
import forest.util
from typing import List, Any
import pandas as pd
__all__ = [
"State",
]
# Message to user when option not available
UNAVAILABLE = "Please specify"
UNAVAILABLE_HHMM = "HH:MM"
# Action keys
SET_VALUE = "SET_VALUE"
NEXT_VALUE = "NEXT_VALUE"
PREVIOUS_VALUE = "PREVIOUS_VALUE"
ON_SELECT = "ON_SELECT"
SET_HOUR = "SET_HOUR"
SET_DATE = "SET_DATE"
def on_select(text):
return {"kind": ON_SELECT, "payload": text}
def set_hour(text):
return {"kind": SET_HOUR, "payload": text}
def set_date(text):
return {"kind": SET_DATE, "payload": text}
def add_key(key, action):
return {
"kind": action["kind"],
"payload": {"key": key, "value": action["payload"]},
}
@export
def set_value(key, value):
return dict(kind=SET_VALUE, payload={"key": key, "value": value})
@export
def next_valid_time():
return next_value("valid_time", "valid_times")
@export
def next_initial_time():
return next_value("initial_time", "initial_times")
@export
def next_value(item_key, items_key):
return dict(kind=NEXT_VALUE, payload=locals())
@export
def previous_valid_time():
return previous_value("valid_time", "valid_times")
@export
def previous_initial_time():
return previous_value("initial_time", "initial_times")
@export
def previous_value(item_key, items_key):
return dict(kind=PREVIOUS_VALUE, payload=locals())
State = namedtuple(
"State",
(
"pattern",
"patterns",
"variable",
"variables",
"initial_time",
"initial_times",
"valid_time",
"valid_times",
"pressure",
"pressures",
"valid_format",
),
)
State.__new__.__defaults__ = (None,) * len(State._fields)
def statehash(self):
return hash(
(
self.pattern,
str(self.patterns),
self.variable,
self.initial_time,
str(self.initial_times),
self.valid_time,
str(self.valid_times),
self.pressure,
str(self.pressures),
self.valid_format,
)
)
def time_equal(a, b):
if (a is None) and (b is None):
return True
elif (a is None) or (b is None):
return False
else:
return _to_datetime(a) == _to_datetime(b)
_vto_datetime = np.vectorize(_to_datetime)
def time_array_equal(x, y):
if (x is None) and (y is None):
return True
elif (x is None) or (y is None):
return False
elif (len(x) == 0) or (len(y) == 0):
return x == y
else:
if len(x) != len(y):
return False
left = _as_datetime_array(x)
right = _as_datetime_array(y)
return np.all(left == right)
def _as_datetime_array(x):
"""Either vectorized _to_datetime or pd.to_datetime"""
try:
return _vto_datetime(x)
except TypeError:
# NOTE: Needed for EarthNetworks DatetimeIndex
return pd.to_datetime(x)
def equal_value(a, b):
if (a is None) and (b is None):
return True
elif (a is None) or (b is None):
return False
else:
return np.allclose(a, b)
def state_ne(self, other):
return not (self == other)
def state_eq(self, other):
return (
(self.pattern == other.pattern)
and np.all(self.patterns == other.patterns)
and (self.variable == other.variable)
and np.all(self.variables == other.variables)
and time_equal(self.initial_time, other.initial_time)
and time_array_equal(self.initial_times, other.initial_times)
and time_equal(self.valid_time, other.valid_time)
and time_array_equal(self.valid_times, other.valid_times)
and equal_value(self.pressure, other.pressure)
and np.shape(self.pressures) == np.shape(other.pressures)
and equal_value(self.pressures, other.pressures)
)
State.__hash__ = statehash
State.__eq__ = state_eq
State.__ne__ = state_ne
@export
def reducer(state, action):
state = copy.deepcopy(state)
kind = action["kind"]
if kind == SET_VALUE:
payload = action["payload"]
key, value = payload["key"], payload["value"]
state[key] = value
return state
@export
class InverseCoordinate(object):
"""Translate actions on inverted coordinates"""
def __init__(self, name):
self.name = name
def __call__(self, store, action):
if self.is_next_previous(action) and self.has_name(action):
yield self.invert(action)
else:
yield action
@staticmethod
def is_next_previous(action):
return action["kind"] in [NEXT_VALUE, PREVIOUS_VALUE]
def has_name(self, action):
return self.name == action["payload"]["item_key"]
@staticmethod
def invert(action):
kind = action["kind"]
payload = action["payload"]
item_key = payload["item_key"]
items_key = payload["items_key"]
if kind == NEXT_VALUE:
return previous_value(item_key, items_key)
else:
return next_value(item_key, items_key)
@export
def next_previous(store, action):
"""Translate NEXT/PREVIOUS action(s) into SET action"""
kind = action["kind"]
if kind in [NEXT_VALUE, PREVIOUS_VALUE]:
payload = action["payload"]
item_key = payload["item_key"]
items_key = payload["items_key"]
if items_key not in store.state:
# No further action to be taken
return
items = store.state[items_key]
if item_key in store.state:
item = store.state[item_key]
if kind == NEXT_VALUE:
value = next_item(items, item)
else:
value = previous_item(items, item)
else:
if kind == NEXT_VALUE:
value = max(items)
else:
value = min(items)
yield set_value(item_key, value)
else:
yield action
def next_item(items, item):
items = list(sorted(set(items)))
i = _index(items, item)
return items[(i + 1) % len(items)]
def previous_item(items, item):
items = list(sorted(set(items)))
i = _index(items, item)
return items[i - 1]
class NotFound(Exception):
pass
def _index(items: List[Any], item: Any):
try:
return items.index(item)
except ValueError as e:
# Index of first float within tolerance
try:
if any(np.isclose(items, item)):
return np.isclose(items, item).argmax()
else:
raise e
except TypeError:
print(type(item), type(items), type(items[0]))
msg = "{} not in {}".format(item, items)
raise NotFound(msg)
@export
class Navigator(object):
"""Interface for navigation menu system"""
def variables(self, pattern):
return ["air_temperature"]
def initial_times(self, pattern):
return ["2019-01-01 00:00:00"]
def valid_times(self, pattern, variable, initial_time):
return ["2019-01-01 12:00:00"]
def pressures(self, pattern, variable, initial_time):
return [750.0]
@export
class Controls(object):
def __init__(self, navigator):
self.navigator = navigator
def __call__(self, store, action):
kind = action["kind"]
if kind == SET_VALUE:
key = action["payload"]["key"]
handlers = {
"pressure": self._pressure,
"pattern": self._pattern,
"variable": self._variable,
"initial_time": self._initial_time,
}
if key in handlers:
yield from handlers[key](store, action)
else:
yield action
elif kind in [SET_DATE, SET_HOUR]:
yield from calendar_middleware(store, action)
else:
yield action
def _pressure(self, store, action):
key = action["payload"]["key"]
value = action["payload"]["value"]
try:
value = float(value)
except ValueError:
print("{} is not a float".format(value))
yield set_value(key, value)
def _pattern(self, store, action):
pattern = action["payload"]["value"]
variables = self.navigator.variables(pattern=pattern)
initial_times = self.navigator.initial_times(pattern=pattern)
initial_times = list(reversed(initial_times))
yield action
yield set_value("variables", variables)
yield set_value("initial_times", initial_times)
# Set valid_times if pattern, variable and initial_time present
variable = store.state.get("variable")
initial_time = store.state.get("initial_time")
if all(value is not None for value in [variable, initial_time]):
valid_times = self.navigator.valid_times(
pattern, variable, initial_time
)
yield set_value("valid_times", valid_times)
def _variable(self, store, action):
for attr in ["pattern", "initial_time"]:
if attr not in store.state:
yield action
return
pattern = store.state["pattern"]
variable = action["payload"]["value"]
initial_time = store.state["initial_time"]
valid_times = self.navigator.valid_times(
pattern=pattern, variable=variable, initial_time=initial_time
)
valid_times = sorted(set(valid_times))
pressures = self.navigator.pressures(
pattern=pattern, variable=variable, initial_time=initial_time
)
pressures = list(reversed(pressures))
yield action
yield set_value("valid_times", valid_times)
yield set_value("pressures", pressures)
if ("pressure" not in store.state) and len(pressures) > 0:
yield set_value("pressure", max(pressures))
def _initial_time(self, store, action):
for attr in ["pattern", "variable"]:
if attr not in store.state:
yield action
return
initial_time = action["payload"]["value"]
valid_times = self.navigator.valid_times(
pattern=store.state["pattern"],
variable=store.state["variable"],
initial_time=initial_time,
)
valid_times = sorted(set(valid_times))
yield action
yield set_value("valid_times", valid_times)
def calendar_middleware(store, action):
"""Prevent feedback from calendar widget"""
yield action
kind = action["kind"]
if kind in [SET_DATE, SET_HOUR]:
# Add extra action(s) if needed
key = action["payload"]["key"]
value = action["payload"]["value"]
# Get current time from state
time = store.state.get(key) # Note: Could be any "time" type
if isinstance(value, str):
value = pd.Timestamp(value)
if time is not None:
# Compare dates
if kind == SET_DATE:
new = forest.util.replace(
time, year=value.year, month=value.month, day=value.day
)
yield set_value(key, new)
# Compare hours
elif kind == SET_HOUR:
new = forest.util.replace(
time,
hour=value.hour,
minute=value.minute,
second=value.second,
)
yield set_value(key, new)
@export
class ControlView:
"""Layout of navigation controls
A high-level view that delegates to low-level views
which in turn perform navigation.
"""
def __init__(self):
if data.FEATURE_FLAGS["calendar"]:
TimeView = CalendarClockView
else:
TimeView = SelectView
self.views = {}
self.views["dataset"] = DatasetView()
self.views["variable"] = DimensionView(
"variable", "variables", next_previous=False
)
self.views["initial_time"] = DimensionView(
"initial_time", "initial_times", View=TimeView
)
self.views["valid_time"] = DimensionView(
"valid_time", "valid_times", View=TimeView
)
self.views["pressure"] = DimensionView(
"pressure", "pressures", formatter=self.hpa
)
self.layout = bokeh.layouts.column(
self.views["dataset"].layout,
self.views["variable"].layout,
self.views["initial_time"].layout,
self.views["valid_time"].layout,
self.views["pressure"].layout,
)
super().__init__()
def connect(self, store):
"""Connect views to the store"""
self.views["dataset"].connect(store)
self.views["variable"].connect(store)
self.views["initial_time"].connect(store)
self.views["valid_time"].connect(store)
self.views["pressure"].connect(store)
@staticmethod
def hpa(p):
return format_hpa(p)
def format_hpa(p):
"""Text representation of atmospheric pressure"""
if p is None:
return "Pressure"
if float(p) < 1:
return "{}hPa".format(str(p))
return "{}hPa".format(int(p))
@component
class DatasetView(Observable):
"""View to select datasets
.. note:: Currently 'pattern' is the primary key for
dataset selection
"""
def __init__(self):
self._table = {}
self.item_key = "pattern"
self.items_key = "patterns"
self.select = bokeh.models.Select(width=350)
self.select.on_change("value", self.on_select)
self.layout = bokeh.layouts.row(self.select)
super().__init__()
def on_select(self, attr, old, new):
"""On click handler for select widget"""
if new == UNAVAILABLE:
return
value = self._table.get(new, new)
self.notify(set_value(self.item_key, value))
def connect(self, store):
"""Wire up component to the Store"""
self.add_subscriber(store.dispatch)
store.add_subscriber(self.render)
def render(self, state):
"""Render application state"""
pattern = state.get(self.item_key)
patterns = state.get(self.items_key, [])
self._table.update(patterns)
option = self.find_label(patterns, pattern)
options = [label for label, _ in patterns]
self.select.options = [UNAVAILABLE] + options
if option in options:
self.select.value = option
else:
self.select.value = UNAVAILABLE
@staticmethod
def find_label(patterns, pattern):
for label, _pattern in patterns:
if _pattern == pattern:
return label
@component
class DimensionView(Observable):
"""Widgets used to navigate a dimension"""
def __init__(
self, item_key, items_key, next_previous=True, formatter=str, View=None
):
if View is None:
View = SelectView
self.views = {}
self.parser = KeyParser(item_key, items_key)
self.actions = KeyActions(item_key, items_key)
self.translator = Translator(formatter)
self.next_previous = next_previous
if self.next_previous:
# Include next/previous buttons
self.views["select"] = View(width=180)
self.buttons = {
"next": bokeh.models.Button(label="Next", width=75),
"previous": bokeh.models.Button(label="Previous", width=75),
}
self.buttons["next"].on_click(self.on_next)
self.buttons["previous"].on_click(self.on_previous)
self.layout = bokeh.layouts.row(
self.buttons["previous"],
self.views["select"].layout,
self.buttons["next"],
)
else:
# Without next/previous buttons
self.views["select"] = View(width=350)
self.layout = bokeh.layouts.row(self.views["select"].layout)
# Wire up child views
self.views["select"].add_subscriber(self.on_select)
super().__init__()
def on_next(self):
"""Handler for next button"""
self.notify(self.actions.next_value())
def on_previous(self):
"""Handler for previous button"""
self.notify(self.actions.previous_value())
def connect(self, store):
"""Connect user interactions to the store"""
self.add_subscriber(store.dispatch)
store.add_subscriber(self.render)
def on_select(self, action):
kind = action["kind"]
if kind in [SET_DATE, SET_HOUR]:
action = self.actions.add_item_key(action)
elif kind == ON_SELECT:
value = self.translator.decode(action["payload"])
action = self.actions.set_value(value)
self.notify(action)
def render(self, state):
"""Apply state to widgets"""
value = self.parser.item(state)
values = self.parser.items(state)
option = self.translator.encode(value)
options = [self.translator.encode(value) for value in values]
self.views["select"].render({"option": option, "options": options})
if self.next_previous:
disabled = len(values) == 0
self.buttons["next"].disabled = disabled
self.buttons["previous"].disabled = disabled
@component
class CalendarClockView(Observable):
"""Allow user to select available date and time"""
def __init__(self, width=None):
self.widths = {"select": 80, "picker": 90, "row": 190}
self.picker = bokeh.models.DatePicker(width=self.widths["picker"])
self.picker.on_change("value", self.on_picker)
self.select = bokeh.models.Select(width=self.widths["select"])
self.select.on_change("value", self.on_select)
self.layout = bokeh.layouts.row(
self.picker, self.select, width=self.widths["row"]
)
super().__init__()
def on_select(self, attr, old, new):
self.notify(set_hour(new))
def on_picker(self, attr, old, new):
self.notify(set_date(new))
def render(self, props):
"""Set selected date"""
option = props.get("option")
options = props.get("options", [])
# Map options to timestamps
time = pd.Timestamp(option)
times = pd.to_datetime(options)
# Set calendar highlights
self.picker.value = str(time.date())
self.picker.enabled_dates = [
(date, date) for date in times.date.astype(str)
]
fmt = "%H:%M:%S"
pts = (
(times.year == time.year)
& (times.month == time.month)
& (times.day == time.day)
)
value = time.strftime(fmt)
if self.select.value != value:
self.select.value = value
values = times[pts].strftime(fmt)
if len(set(values) - set(self.select.options)) > 0:
self.select.options = sorted(set(values))
@component
class SelectView(Observable):
"""Select value from menu"""
def __init__(self, width=None):
self.select = bokeh.models.Select(width=width)
self.select.on_change("value", self.on_select)
self.layout = self.select
super().__init__()
def on_select(self, attr, old, new):
"""Handler for select widget"""
if new == UNAVAILABLE:
return
self.notify(on_select(new))
def render(self, props):
"""Represent state"""
option = props.get("option")
options = props.get("options", [])
self.select.options = [UNAVAILABLE] + options
if option in options:
self.select.value = option
else:
self.select.value = UNAVAILABLE
# Deactivate widgets if no options available
disabled = len(options) == 0
self.select.disabled = disabled
class Translator:
"""Layer to de-couple UI from State"""
def __init__(self, formatter):
self.formatter = formatter
self._lookup = {} # Look-up table to convert from label to value
def encode(self, value):
"""Create a key to represent the value"""
key = self.formatter(value)
self._lookup[key] = value
return key
def decode(self, key):
"""Return original value associated with key"""
return self._lookup.get(key, key)
class KeyParser:
"""Query state for item/items"""
def __init__(self, item_key, items_key):
self.item_key = item_key
self.items_key = items_key
def item(self, state):
return state.get(self.item_key)
def items(self, state):
return state.get(self.items_key, [])
class KeyActions:
"""Actions with item/items key meta-data"""
def __init__(self, item_key, items_key):
self.item_key = item_key
self.items_key = items_key
def add_item_key(self, action):
return add_key(self.item_key, action)
def set_value(self, value):
return set_value(self.item_key, value)
def next_value(self):
return next_value(self.item_key, self.items_key)
def previous_value(self):
return previous_value(self.item_key, self.items_key)
| MetOffice/forest | forest/db/control.py | control.py | py | 21,707 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "forest.export.export",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "forest.export.export",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "forest.export.export",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "forest... |
26395961422 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#escolhe o chrome
#endereco para o chrome driver
PATH = "/home/amanda/Documents/python/SpiritBot/SpiritFanficFavoritador/chromedriver"
driver = webdriver.Chrome(PATH)
#nome do arquivo com as contas que vc quer usar
FILE = open("NomesEContas/user1.txt","r")
verificador = 0
for i in range(17):
driver.get("https://www.spiritfanfiction.com/login")
usuario = FILE.readline()
user = driver.find_element_by_id("Usuario")
user.send_keys(usuario)
user.send_keys(Keys.RETURN)
senha = driver.find_element_by_id("Senha")
senha.send_keys("senha1234@")
senha.send_keys(Keys.RETURN)
#abre a fanfic
driver.get("https://www.spiritfanfiction.com/historia/largura-das-portas-19677653 ")
#aperta fav
fav = driver.find_element_by_id("botaoFavoritos")
fav.click()
#visualiza
driver.get("https://www.spiritfanfiction.com/historia/largura-das-portas-19677653/capitulo1")
#segue
driver.get("https://www.spiritfanfiction.com/perfil/kodathewhel/addwatch")
segue = driver.find_element_by_id("cphConteudo_cphPerfil_btnEnviar")
segue.click()
#logoff
driver.get("https://www.spiritfanfiction.com/logoff")
verificador+=1
print(verificador)
driver.quit() | AmandaApolinario/SpiritFanficFavoritador | coisadelu.py | coisadelu.py | py | 1,474 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 24,
"usage_type": "attribute"
... |
19369503791 | from django.http import HttpResponse
from django.utils.html import strip_tags
from models import Listener
import activity_checker
import corrector
import formatter
cor = corrector.Corrector()
fmt = formatter.Formatter()
def check(request, listener_id, line_id, line):
good_one, len_lines, level = Listener.get_good_line(listener_id, line_id)
is_correct, corrected_dialog = cor.correct_dialog(strip_tags(good_one), strip_tags(line))
formatted = fmt.line_corrected(is_correct, corrected_dialog, line_id, listener_id)
if is_correct:
activity_checker.mark_line_correct_and_check_if_video_is_correct(request, listener_id, int(line_id), len_lines, level)
return HttpResponse(formatted)
def get_next_word(request, listener_id, line_id, line):
good_one, len_lines, level = Listener.get_good_line(listener_id, line_id)
is_correct, corrected_dialog = cor.get_next_word(strip_tags(good_one), strip_tags(line))
formatted = fmt.line_corrected(is_correct, corrected_dialog, line_id, listener_id)
activity_checker.count_suggestion(request, listener_id, len_lines)
return HttpResponse(formatted)
| deccico/capego | listener/views.py | views.py | py | 1,134 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "corrector.Corrector",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "formatter.Formatter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Listener.get_good_line",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": ... |
10068296172 | import pandas as pd
import numpy as np
import hydra
import os
import json
import yaml
import pickle
import time
from flask import Flask, request, jsonify
import mysql.connector
import prediction
#26.6.2021
#1. Change the db into production db
#2. Deploy into AWS EC2
#3. Deploy the model into AWS EKS
app = Flask(__name__)
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="asdf1234",
database="water_quality"
)
@app.route('/', methods = ["GET", "POST"])
def index():
if request.method == "POST":
if request.json:
mycursor = db.cursor()
pH = request.json["ph"]
Hardness = request.json["Hardness"]
Solids = request.json["Solids"]
Chloramines = request.json["Chloramines"]
Sulfate = request.json["Sulfate"]
Conductivity = request.json["Conductivity"]
Organic_carbon = request.json["Organic_carbon"]
Trihalomethanes = request.json["Trihalomethanes"]
Turbidity = request.json["Turbidity"]
predict_result = prediction.api_response(request.json)
mycursor.execute("""INSERT INTO water_q(
pH, Hardness, Solids, Chloramines, Sulfate, Conductivity,
Organic_carbon, Trihalomethanes, Turbidity, Potability)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""",
(pH, Hardness, Solids, Chloramines, Sulfate, Conductivity,
Organic_carbon, Trihalomethanes, Turbidity, predict_result))
db.commit()
response = {"Prediction": int(predict_result)}
return jsonify(response)
else:
return None
if __name__ == "__main__":
app.run(port = 5001, debug = True) | hoe94/Water_Quality | src/app.py | app.py | py | 1,846 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_... |
42615469832 | """Constants and enumerations"""
from enum import Enum, Flag
class RinnaiSystemMode(Enum):
"""Define system modes."""
HEATING = 1
EVAP = 2
COOLING = 3
RC = 4
NONE = 5
class RinnaiOperatingMode(Enum):
"""Define unit operating modes."""
NONE = 0
MANUAL = 1
AUTO = 2
class RinnaiCapabilities(Flag):
"""Define system capabilities."""
NONE = 0
HEATER = 1
COOLER = 2
EVAP = 4
class RinnaiSchedulePeriod(Enum):
"""Define system schedule time periods."""
def __str__(self):
return str(self.value)
NONE = None
WAKE = "W"
LEAVE = "L"
RETURN = "R"
PRE_SLEEP = "P"
SLEEP = "S"
class RinnaiUnitId(Enum):
"""Define unit ids."""
def __str__(self):
return str(self.value)
HEATER = "HGOM"
COOLER = "CGOM"
EVAP = "ECOM"
ZONE_A = "A"
ZONE_B = "B"
ZONE_C = "C"
ZONE_D = "D"
COMMON_ZONE = "U"
MAIN_ZONES = [ ZONE_A, ZONE_B, ZONE_C, ZONE_D ]
ALL_ZONES = [ ZONE_A, ZONE_B, ZONE_C, ZONE_D, COMMON_ZONE ]
TEMP_CELSIUS = "°C"
TEMP_FAHRENHEIT = "°F"
SYSTEM = "SYST"
GENERAL_SYSTEM_OPERATION = "GSO"
GENERAL_SYSTEM_STATUS = "GSS"
OVERALL_OPERATION = "OOP"
CONFIGURATION = "CFG"
CAPABILITIES = "AVM"
FAULT_INFO = "FLT"
FAULT_DETECTED = "AV"
GAS_HEATING = "HG"
COOLING_ADDON = "CG"
COOLING_EVAPORATIVE = "EC"
MODULE_ENABLED = "Y"
MULTI_SET_POINT = "MTSP"
FIRMWARE_VERSION = "VR"
WIFI_MODULE_VERSION = "CV"
SWITCH_STATE = "SW"
OPERATING_STATE = "ST"
FAN_STATE = "FS"
PUMP_STATE = "PS"
STATE_ON = "N"
STATE_OFF = "F"
STATE_FAN_ONLY = "Z"
PREWETTING = "PW"
COOLER_BUSY = "BY"
PUMP_OPERATING = "PO"
FAN_OPERATING = "FO"
FAN_ACTIVE = "FS"
COMPRESSOR_ACTIVE = "CP"
PREHEATING = "PH"
GAS_VALVE_ACTIVE = "GV"
CALLING_FOR_HEAT = "HC"
CALLING_FOR_COOL = "CC"
MEASURED_TEMPERATURE = "MT"
FAN_SPEED_LEVEL = "FL"
SET_POINT = "SP" # either comfort level or set temp
TEMPERATURE_UNIT = "TU"
UNIT_FAHRENHEIT = "F"
SCHEDULE_PERIOD = "AT"
ADVANCE_PERIOD = "AZ"
SCHEDULE_OVERRIDE = "AO"
ADVANCED = "A"
USER_ENABLED = "UE"
AUTO_ENABLED = "AE"
OPERATING_PROGRAM = "OP"
MODE_MANUAL = "M"
MODE_AUTO = "A"
| funtastix/pyrinnaitouch | pyrinnaitouch/const.py | const.py | py | 2,104 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "enum.Flag",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 25,
"... |
13043659438 | import logging
from ...node import InputTrigger
from ...stream import DataStream
from ...exceptions import SensorGraphSemanticError
from .scope import Scope
class GatedClockScope(Scope):
"""A scope that will gate all requested clocks with a latch.
Args:
sensor_graph (SensorGraph): The sensor graph we are working on.
scope_stack (list(Scope)): The stack of already allocated scopes.
input_latch ((DataStream, InputTrigger)): The input stream and condition
that should be used to gate clocks passed through this scope. The
stream must already be attached.
"""
def __init__(self, sensor_graph, scope_stack, input_latch):
parent = scope_stack[-1]
alloc = parent.allocator
sensor_graph = parent.sensor_graph
super(GatedClockScope, self).__init__(u"Gated Clock Scope", sensor_graph, alloc, parent)
stream = alloc.allocate_stream(DataStream.ConstantType)
sensor_graph.add_node(u'({} always) => {} using copy_latest_a'.format(input_latch[0], stream))
self.latch_stream = stream
self.latch_trigger = input_latch[1]
self.clock_cache = {}
self.logger = logging.getLogger(__name__)
self.logger.debug("Allocating GatedClockScope on latch stream %s with condition %s", input_latch[0], input_latch[1])
def _classify_clock(self, interval, basis):
if basis == 'system':
if interval % 10 == 0:
return 'standard'
return 'fast'
elif basis == 'tick_1':
return 'tick_1'
elif basis == 'tick_2':
return 'tick_2'
raise SensorGraphSemanticError("Unknown clock basis in GatedClockScope", scope=self.name, basis=basis, interval=interval)
def clock(self, interval, basis):
"""Return a NodeInput tuple for triggering an event every interval.
We request each distinct type of clock at most once and combine it with our
latch stream each time it is requested.
Args:
interval (int): The interval (in seconds) at which this input should
trigger.
"""
cache_name = self._classify_clock(interval, basis)
cache_data = self.clock_cache.get(cache_name)
if cache_data is None:
parent_stream, trigger = self.parent.clock(interval, basis)
if trigger.use_count is False:
raise SensorGraphSemanticError("Unsupported clock trigger in GatedClockScope", trigger=trigger)
elif interval % trigger.reference != 0:
raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", trigger=trigger, interval=interval)
ratio = interval // trigger.reference
stream = self.allocator.allocate_stream(DataStream.CounterType)
latch_stream = self.allocator.attach_stream(self.latch_stream)
self.sensor_graph.add_node(u'({} always && {} {}) => {} using copy_latest_a'.format(parent_stream, latch_stream, self.latch_trigger, stream))
self.clock_cache[cache_name] = (stream, ratio)
else:
stream, ratio = cache_data
if interval % ratio != 0:
raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", ratio=ratio, interval=interval)
count = interval // ratio
clock_stream = self.allocator.attach_stream(stream)
return clock_stream, InputTrigger(u'count', '>=', count)
| iotile/coretools | iotilesensorgraph/iotile/sg/parser/scopes/gated_clock_scope.py | gated_clock_scope.py | py | 3,501 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "scope.Scope",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "stream.DataStream.ConstantType",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "stream.DataStream",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "logg... |
17110154056 | import requests
import records
import json
db = records.Database("mysql:///....?charset=utf8")
# db.query("""
# create table video_group (
# id int primary key auto_increment,
# group_id varchar(50),
# title varchar(100),
# data text,
# comment_count int default 0,
# like_count int default 0,
# watch_count int default 0
# ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
# """);
def main():
url = "http://m.365yg.com/list/"
querystring = {"tag":"video","ac":"wap","format":"json_raw","cp":"", 'as': "A1A5183CCAA1445"}
headers = {
'dnt': "1",
'accept-encoding': "gzip, deflate, sdch",
'accept-language': "zh-CN,zh;q=0.8,en;q=0.6",
'user-agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
'accept': "*/*",
'referer': "http://m.365yg.com/?w2atif=1&channel=video&W2atIF=1",
'cookie': "....",
'connection': "keep-alive",
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
data = response.json()
for item in data['data']:
group_id = item['group_id']
title = item['title']
comment = item.get('comment_count', 0)
like = item.get('like_count', 0)
watch = item.get('video_detail_info', {}).get('video_watch_count', 0)
db.query("""INSERT INTO `video_group` (`group_id`, `title`, `data`, `comment_count`, `like_count`, `watch_count`) VALUES (:group, :title, :data, :comment, :like, :watch); """,
group=group_id, title=title, data=json.dumps(item), comment=comment, like=like, watch=watch
)
if __name__ == "__main__":
main()
| zhangheli/ScrapyLabs | crawl_toutiao.py | crawl_toutiao.py | py | 1,859 | python | en | code | 25 | github-code | 1 | [
{
"api_name": "records.Database",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 44,
"usage_type": "call"
}
] |
30276485484 | from pyroll.cli.program import main
from pyroll.cli.config import RES_DIR
import click.testing
import os
def test_create_project(tmp_path):
runner = click.testing.CliRunner()
os.chdir(tmp_path)
result = runner.invoke(main, ["create-project"])
assert result.exit_code == 0
print(result.output)
fi = tmp_path / "input.py"
fc = tmp_path / "config.toml"
assert fi.exists()
assert fc.exists()
assert fi.read_text() == (RES_DIR / "input.py").read_text()
| pyroll-project/pyroll-cli | tests/test_create_project.py | test_create_project.py | py | 495 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "click.testing.testing.CliRunner",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "click.testing.testing",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "click.testing",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": ... |
43495745433 | from datetime import datetime
from pytest import fixture
from .model import User
from .schema import UserSchema
from .interface import UserInterface
@fixture
def schema() -> UserSchema:
return UserSchema()
def test_UserSchema_create(schema: UserSchema):
assert schema
def test_UserSchema_works(schema: UserSchema):
params: UserInterface = schema.load(
{
"id": "1",
"auth_provider": "google",
"username": "JohnDoe",
"first_name": "John",
"family_name": "Doe",
"picture_url": "www.google.com",
"super_admin": True,
"created_date": str(datetime.now()),
"last_seen": str(datetime.now()),
}
)
user = User(**params)
assert user.id == "1"
assert user.auth_provider == "google"
assert user.username == "JohnDoe"
assert user.first_name == "John"
assert user.family_name == "Doe"
assert user.picture_url == "www.google.com"
assert user.super_admin == True
| lucasg-mm/arborator-grew-nilc | backend/app/user/schema_test.py | schema_test.py | py | 1,027 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "schema.UserSchema",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "schema.UserSchema",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "schema.UserSchema"... |
26641924170 | from openpyxl import load_workbook
workbook = load_workbook(filename=r'F:\kinscloud.github.io\Python\Excel\testxls.xlsx')
#print(workbook.sheetnames)
sheet = workbook.active
for row in sheet.rows:
#for row in sheet.iter_rows(min_row=2,max_row=3,min_col=1,max_col=3):
for cell in row:
print(cell.value)
#sheet['A1']='ids'
cell=sheet['A1']
cell.value='ids'
workbook.save(filename=r'F:\kinscloud.github.io\Python\Excel\testxls2.xlsx') | kinscloud/kinscloud.github.io | Python/Excel/openpyxlDemo.py | openpyxlDemo.py | py | 458 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 3,
"usage_type": "call"
}
] |
411798820 | import logging
from typing import Optional
from ..effect import EffectFactory
from .effect_checker import EffectChecker, AnnotationEffect, AnnotationRequest
class UTREffectChecker(EffectChecker):
"""UTR effect checker class."""
def __init__(self) -> None:
self.logger = logging.getLogger(__name__)
def create_utr_effect(
self, request: AnnotationRequest, strand: str
) -> AnnotationEffect:
"""Create an UTR annotation effect."""
if request.transcript_model.strand == strand:
effect_name = "5'UTR"
else:
effect_name = "3'UTR"
effect = EffectFactory.create_effect_with_prot_length(
effect_name, request
)
self.logger.debug(
"pos=%d cds end=%d",
request.variant.ref_position_last - 1,
request.transcript_model.cds[0],
)
if strand == "+":
effect.dist_from_coding = request.get_exonic_distance(
request.variant.corrected_ref_position_last,
request.transcript_model.cds[0],
)
else:
effect.dist_from_coding = request.get_exonic_distance(
request.transcript_model.cds[1], request.variant.position
)
return effect
def create_effect(
self, request: AnnotationRequest, strand: str
) -> Optional[AnnotationEffect]:
"""Create UTR effect."""
coding_regions = request.transcript_model.exons
last_position = request.variant.corrected_ref_position_last
prev = None
for i, j in enumerate(coding_regions):
if request.variant.position <= j.stop and j.start <= last_position:
return self.create_utr_effect(request, strand)
if (
prev is not None
and prev <= request.variant.position
and last_position < j.start
):
if request.transcript_model.strand == strand:
effect_name = "5'UTR-intron"
else:
effect_name = "3'UTR-intron"
return EffectFactory.create_intronic_non_coding_effect(
effect_name, request, prev, j.start, i
)
prev = j.stop
return None
def check_stop_codon(
self, request: AnnotationRequest
) -> Optional[AnnotationEffect]:
"""Check for stop codon."""
if not request.has_utr3_region():
return None
try:
ref_aa, alt_aa = request.get_amino_acids()
if "End" not in ref_aa:
return None
ref_index = ref_aa.index("End")
alt_index = alt_aa.index("End")
if ref_index == alt_index:
effect = EffectFactory.create_effect_with_prot_length(
"3'UTR", request
)
effect.dist_from_coding = 0
return effect
except ValueError:
pass
except IndexError:
pass
return None
def get_effect(
self, request: AnnotationRequest
) -> Optional[AnnotationEffect]:
if request.is_stop_codon_affected():
return self.check_stop_codon(request)
self.logger.debug(
"utr check: %d<%d or %d>%d exons:%d-%d",
request.variant.position,
request.transcript_model.cds[0],
request.variant.position,
request.transcript_model.cds[1],
request.transcript_model.exons[0].start,
request.transcript_model.exons[-1].stop,
)
if request.variant.position < request.transcript_model.cds[0]:
return self.create_effect(request, "+")
if request.variant.position > request.transcript_model.cds[1]:
return self.create_effect(request, "-")
return None
| iossifovlab/gpf | dae/dae/effect_annotation/effect_checkers/utr.py | utr.py | py | 3,917 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "effect_checker.EffectChecker",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "effect_checker.AnnotationRequest",
"line_number": 15,
"usage_type": "name"
},
{
"ap... |
73602744993 | from typing import Any
import gym
from gym import spaces
import numpy as np
import pybullet as p
from robot_utils import RegisterScenes
from robot_utils import Robot, rayTest, checkCollision
from robot_utils.log import *
from robot_utils.utils import control_miniBox
import pybullet_data
class MsnDiscrete(gym.Env):
meta_data = {'render.modes' : ['human']}
def __init__(self, scene_name : str = "plane_static_obstacle-A", render : bool = False, evaluate : bool = False,
base_thickness=0.2, base_radius=0.5, wheel_thickness=0.1, left_wheel_joint_index=3, right_wheel_joint_index=2,
target_velocity=10., max_force=10., multiply=2., debug_text_color=(0., 0., 0.), debug_text_size=1.2,
miss_color=(0., 1., 0.), hit_color=(1., 0., 0.), ray_debug_line_width=2., max_distance=100., laser_length=18.,
laser_num=1024, use_max_coor=False, center_pos=(0., 0., 0.), internal_length=20, internal_width=20,
height=4, thickness=2, fence_mass=10000., fence_color=(1., 1., 1., 1.), depart_pos=(0., -9., 0.2), depart_euler=(0., 0., np.pi / 2),
target_pos=(0., 9., 0.2), collision_reward=-8, reach_target_reward=1000000, distance_reward_coe=-10,
target_radius=2.5, distance_change_reward_coe=1000, time_reward_coe=-100, done_distance=1., done_collision=50,
done_step_num=72000) -> None:
super().__init__()
self.all_scene = ["plane_static_obstacle-A", "plane_static_obstacle-B", "plane_static_obstacle-C", "random"]
self.scene_name = scene_name
self._render = render
self._evaluate = evaluate
self.base_thickness=base_thickness
self.base_radius=base_radius
self.wheel_thickness=wheel_thickness
self.left_wheel_joint_index=left_wheel_joint_index
self.right_wheel_joint_index=right_wheel_joint_index
self.target_velocity=target_velocity
self.max_force=max_force
self.multiply=multiply
self.debug_text_color=debug_text_color
self.debug_text_size=debug_text_size
self.miss_color=miss_color
self.hit_color=hit_color
self.ray_debug_line_width=ray_debug_line_width
self.max_distance=max_distance
self.laser_length=laser_length
self.laser_num=laser_num
self.use_max_coor=use_max_coor
self.center_pos=center_pos
self.internal_length=internal_length
self.internal_width=internal_width
self.height=height
self.thickness=thickness
self.fence_mass=fence_mass
self.fence_color=fence_color
self.depart_pos=depart_pos
self.depart_euler=depart_euler
self.target_pos=target_pos
self.collision_reward=collision_reward
self.reach_target_reward=reach_target_reward
self.distance_reward_coe=distance_reward_coe
self.target_radius=target_radius
self.distance_change_reward_coe=distance_change_reward_coe
self.time_reward_coe=time_reward_coe
self.done_distance=done_distance
self.done_collision=done_collision
self.done_step_num=done_step_num
# define action space and observation space
self.action_space = spaces.Discrete(8)
self.observation_space = spaces.Box(
low =np.float32([0.] * self.laser_num + [0., 0.]),
high=np.float32([self.laser_length + 1] * self.laser_num + [self.max_distance, np.pi])
)
# connect to engin
self._physics_client_id = p.connect(p.GUI if render else p.DIRECT)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
self._register_scene = RegisterScenes()
self.seed()
self.reset()
def reset(self) -> Any:
p.resetSimulation(physicsClientId=self._physics_client_id)
p.setGravity(0., 0., -9.8, physicsClientId=self._physics_client_id)
p.setRealTimeSimulation(0)
self.step_num = 0
self.collision_num = 0
self.pre_dis = self.__distance(self.depart_pos, self.target_pos) # previous distance between robot and target
self.depart_target_distance = self.__distance(self.depart_pos, self.target_pos) # distance between depart pos and target pos
self.robot = Robot(
basePos=self.depart_pos,
baseOri=p.getQuaternionFromEuler(self.depart_euler),
physicsClientId=self._physics_client_id
)
if self.scene_name == "random":
self.scene_name = np.random.choice(self.all_scene)
self.scene = self._register_scene.construct(scene_name=self.scene_name)
state = self.robot.get_observation(targetPos=self.target_pos)
# add debug items to the target pos
if self._evaluate:
self.target_line = p.addUserDebugLine(
lineFromXYZ=[self.target_pos[0], self.target_pos[1], 0.],
lineToXYZ=[self.target_pos[0], self.target_pos[1], 5.],
lineColorRGB=[1., 1., 0.2]
)
self.rayDebugLineIds = []
froms, tos, results = rayTest(self.robot.robot, ray_length=self.laser_length, ray_num=self.laser_num)
for index, result in enumerate(results):
color = self.miss_color if result[0] == -1 else self.hit_color
self.rayDebugLineIds.append(p.addUserDebugLine(froms[index], tos[index], color, self.ray_debug_line_width))
return np.array(state)
def render(self, mode='human'):
pass
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
def close(self):
if self._physics_client_id >= 0:
p.disconnect()
self._physics_client_id = -1
def sample(self):
return self.action_space.sample()
def step(self, action):
"""
first set, second step
then calculate the reward
return state, reward, done, info
"""
if not hasattr(self, "robot"):
raise RuntimeError("reset before step!!!")
control_miniBox(self.robot.robot, instruction=action, target_velocity=self.target_velocity,
multiply=self.multiply, left_wheel_joint_index=self.left_wheel_joint_index,
right_wheel_joint_index=self.right_wheel_joint_index, max_force=self.max_force,
physicsClientId=self._physics_client_id)
p.stepSimulation(physicsClientId=self._physics_client_id)
self.step_num += 1
state = self.robot.get_observation(self.target_pos)
reward = self.__reward_func(state)
if state[-2] < self.target_radius:
done = True
elif self.step_num > self.done_step_num:
done = True
else:
done = False
info = {"distance" : state[-2], "collision_num" : self.collision_num}
# under evaluate mode, extra debug items need to be rendered
if self._evaluate:
froms, tos, results = rayTest(self.robot.robot, ray_length=self.laser_length, ray_num=self.laser_num)
for index, result in enumerate(results):
self.rayDebugLineIds[index] = p.addUserDebugLine(
lineFromXYZ=froms[index],
lineToXYZ=tos[index] if result[0] == -1 else result[3],
lineColorRGB=self.miss_color if result[0] == -1 else self.hit_color,
lineWidth=self.ray_debug_line_width,
replaceItemUniqueId=self.rayDebugLineIds[index]
)
return np.array(state), reward, done, info
def __reward_func(self, state):
if checkCollision(self.robot.robot, debug=False):
self.collision_num += 1
Rc = self.collision_reward
else:
Rc = 0
cur_dis = self.__distance(self.robot.curPos(), self.target_pos)
Rp = self.distance_change_reward_coe * (self.pre_dis - cur_dis)
# TODO : quite important
if self.step_num % 1000 == 0:
# msn_debug("pre_dis is updated, increment:{}".format(self.pre_dis - cur_dis))
self.pre_dis = cur_dis
if state[-2] < self.target_radius:
Rr = self.reach_target_reward
else:
# Rg = self.DISTANCE_REWARD_COE * cur_dis / self.depart_target_distance
Rr = 0.
Rt = self.time_reward_coe
# msn_debug("Rc={}, Rp={}, Rr={}, Rt={}".format(Rc, Rp, Rr, Rt))
# origin : Rc + Rp + Rr + Rt
return Rc + Rp + Rr + Rt
def __distance(self, v1, v2, _type="l2"):
v1 = np.array(v1)
v2 = np.array(v2)
if _type == "l2":
return np.linalg.norm(v1 - v2)
| LSTM-Kirigaya/MsnEnvironment | env/MsnDiscrete.py | MsnDiscrete.py | py | 8,863 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.Env",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Discrete",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line... |
39944748888 | import email
import email.policy
import email.utils
import email.message
import email.mime.multipart
import email.mime.text
import email.parser
import os
import time
import traceback
import boto3
from botocore.exceptions import ClientError
# us-east-1
REGION = os.getenv("REGION")
# your-bucket-name
S3_BUCKET = os.getenv("S3_BUCKET")
# domain.tld
DOMAIN = os.getenv("DOMAIN")
# any token, a SHA1 token works well because of only 32 character length
TOKEN = os.getenv("TOKEN")
# user@domain.tld
RECIPIENT = os.getenv("RECIPIENT")
# replies -- this will become <REPLY_ADDR>_<TOKEN>@<DOMAIN>
REPLY_ADDR = os.getenv("REPLY_ADDR")
# send email to this address for it to be forwarded
# -- this will become <NEW_ADDR>_<TOKEN>@<DOMAIN>
#
# This allows you to email
# "<NEW_ADDR>_<TOKEN>@<DOMAIN>"
# with a subject matching the following format:
# <FROM_ADDR> # <TO_ADDR> # <SUBJECT>
# and have that email forward to the to address, from the from address
# with the specified subject.
#
# i.e. a subject line of "some_alias@aliasdomain.tld # webmaster@example.com # Hi"
# will send an email as:
# From: some_alias@aliasdomain.tld
# To: webmaster@example.com
# Subject: Hi
#
# This means you can initiate email conversations from your aliased domain without
# needing them to email you first.
NEW_ADDR = os.getenv("NEW_ADDR")
# noreply -- this will become <NO_REPLY_ADDR>@<DOMAIN>
NO_REPLY_ADDR = os.getenv("NO_REPLY_ADDR")
# bouncer -- this will become <BOUNCE_ADDR>@<DOMAIN>
BOUNCE_ADDR = os.getenv("BOUNCE_ADDR")
# user1@domain.tld,user2@domain.tld
FROM_ALLOWLIST = os.getenv("FROM_ALLOWLIST")
FROM_ALLOWLIST = (
FROM_ALLOWLIST.replace(" ", "").split(",")
if FROM_ALLOWLIST else None
)
class CreateError(Exception):
pass
class Bounce(Exception):
message_id = None
recipient = None
reason = None
def __init__(self, message_id, recipient, reason):
self.message_id = message_id
self.recipient = recipient
self.reason = reason
super().__init__()
def put_db_message(message_id, to_addr, from_addr):
print(f"Write Message-ID: '{message_id}' to DB")
boto3.resource("dynamodb").Table("emails").put_item(
Item={
"message_id": message_id,
"to": email.utils.parseaddr(to_addr)[1],
"from": email.utils.parseaddr(from_addr)[1],
"expires": int(time.time()) + 7776000, # 90 days
}
)
def get_db_message(message_id):
print(f"Read Message-ID: '{message_id}' from DB")
return boto3.resource("dynamodb").Table("emails").get_item(
Key={"message_id": message_id}
)["Item"]
def get_message_from_s3(message_id):
print(f"Read Message-ID: '{message_id}' from S3")
return boto3.client("s3").get_object(
Bucket=S3_BUCKET, Key=message_id
)["Body"].read()
def get_db_blocklist(address):
try:
return boto3.resource("dynamodb").Table("blocklist").get_item(
Key={"address": address}
)["Item"]
except KeyError:
return None
def bounce_blocklist(message_id, to_addr, from_addr):
if get_db_blocklist(to_addr):
print(f"'{to_addr}' is in BLOCKLIST: 'to_addr'")
raise Bounce(
message_id=message_id,
recipient=to_addr,
reason="DoesNotExist"
)
if get_db_blocklist(from_addr):
print(f"'{from_addr}' is in BLOCKLIST: 'from_addr'")
raise Bounce(
message_id=message_id,
recipient=to_addr,
reason="ContentRejected"
)
if get_db_blocklist(from_addr.partition("@")[2]):
print(f"""'{from_addr.partition("@")[2]}' is in BLOCKLIST: 'from_domain'""")
raise Bounce(
message_id=message_id,
recipient=to_addr,
reason="ContentRejected"
)
def sender_auth(to_addr, from_addr):
if to_addr.partition("@")[0].partition("_")[2] != TOKEN:
raise CreateError("Invalid token")
if from_addr not in FROM_ALLOWLIST:
raise CreateError(
f"'{from_addr}' not in allow list ('{FROM_ALLOWLIST}')"
)
def send_email(message):
return boto3.client("sesv2", REGION).send_email(**message)
def send_bounce(message_id, recipient, reason):
try:
resp = boto3.client("ses", REGION).send_bounce(
OriginalMessageId=message_id,
BounceSender=f"{BOUNCE_ADDR}@{DOMAIN}",
BouncedRecipientInfoList=[{"Recipient": recipient, "BounceType": reason}],
)
except ClientError as e:
print(f"""Failed to send email: {e.response["Error"]["Message"]}""")
raise e
else:
print(f"""Bounce sent! Message-ID: '{resp["MessageId"]}'""")
def create_message(message_id):
obj = email.message_from_string(
get_message_from_s3(message_id).decode(),
policy=email.policy.default
)
msg = email.mime.multipart.MIMEMultipart()
body = obj.get_body()
msg.attach(body)
to_addr = email.utils.parseaddr(obj.get("To"))[1]
from_addr = email.utils.parseaddr(obj.get("From"))[1]
in_reply_to = obj.get("In-Reply-To")
subject = obj.get("Subject")
bounce_blocklist(message_id, to_addr, from_addr)
for payload in obj.get_payload():
if (
isinstance(payload, email.message.EmailMessage)
and payload.is_attachment()
):
msg.attach(payload)
if to_addr == f"{REPLY_ADDR}_{TOKEN}@{DOMAIN}" and in_reply_to:
sender_auth(to_addr, from_addr)
clean_in_reply_to = (
in_reply_to.replace("<", "").replace(">", "").partition("@")[0]
)
r = get_db_message(clean_in_reply_to)
sender = r["to"]
recipient = r["from"]
elif to_addr == f"{NEW_ADDR}_{TOKEN}@{DOMAIN}":
sender_auth(to_addr, from_addr)
from_addr, to_addr, subject = subject.split("#")
from_addr, to_addr, subject = from_addr.strip(), to_addr.strip(), subject.strip()
sender = from_addr
recipient = to_addr
else:
sender = (
f""""{from_addr}" [Relayed from "{to_addr}"] """
f"""<{NO_REPLY_ADDR}@{DOMAIN}>"""
)
recipient = RECIPIENT
msg["Reply-To"] = f"{REPLY_ADDR}_{TOKEN}@{DOMAIN}"
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = recipient
if in_reply_to:
msg["In-Reply-To"] = in_reply_to
if obj.get_all("References"):
msg["References"] = "\r\n ".join(obj.get_all("References"))
return (
to_addr, from_addr,
{
"FromEmailAddress": sender,
"Destination": {"ToAddresses": [recipient]},
"ReplyToAddresses": [msg["Reply-To"]] if "Reply-To" in msg else [],
"Content": {"Raw": {"Data": msg.as_string().encode()}},
},
)
def lambda_handler(event, context):
message_id = event["Records"][0]["ses"]["mail"]["messageId"]
print(f"Received Message-ID: '{message_id}'")
try:
to_addr, from_addr, message = create_message(message_id)
except Bounce as b:
send_bounce(b.message_id, b.recipient, b.reason)
return True
except CreateError as e:
print(traceback.format_exc())
return True
except Exception as e:
raise e
try:
resp = send_email(message)
except ClientError as e:
print(f"""Failed to send email: {e.response["Error"]["Message"]}""")
raise e
else:
print(f"""Email sent! Message-ID: '{resp["MessageId"]}'""")
if to_addr != f"{REPLY_ADDR}_{TOKEN}@{DOMAIN}":
try:
put_db_message(resp["MessageId"], to_addr, from_addr)
except Exception as e:
print(traceback.format_exc())
pass
| kura/private-relay | lambda.py | lambda.py | py | 7,749 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 26,
... |
15471163955 | import torch
def css(outputs, labels, n_classes, m, m2):
batch_size = outputs.size(0)
defer = [n_classes] * batch_size
outputs = -m2 * torch.log2(outputs[range(batch_size),labels])\
-m * torch.log2(outputs[range(batch_size), defer])
return torch.sum(outputs) / batch_size
def my_CrossEntropyLoss(outputs, labels):
batch_size = outputs.size()[0] # batch_size
# pick the values corresponding to the labels
outputs = - torch.log2(outputs[range(batch_size), labels])
return torch.sum(outputs) / batch_size
def madras_loss(outputs, rej, labels, expert, eps = 10e-12):
# MixOfExperts loss of Madras et al. 2018
batch_size = outputs.size()[0]
output_no_grad = outputs.detach()
net_loss_no_grad = -torch.log2(output_no_grad[range(batch_size), labels]+eps)
net_loss = -torch.log2(outputs[range(batch_size), labels]+eps)
exp_loss = -torch.log2(expert[range(batch_size), labels]+eps)
system_loss = (rej[range(batch_size),0]) * net_loss_no_grad + rej[range(batch_size),1] * exp_loss
system_loss += net_loss
return torch.sum(system_loss)/batch_size
| Xiaozhi-sudo/learning-to-defer | CIFAR/losses.py | losses.py | py | 1,128 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.log2",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.log2",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.log2",
"line_number": 13,
... |
1936442404 | import re
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.html import escape
from django.conf import settings
from django_fortunes.models import Fortune
register = template.Library()
@register.filter
@stringfilter
def fortunize(value):
"""
Transforms a fortune plain text into htmlized (but safe) one.
"""
r = ""
for i, line in enumerate(value.splitlines()):
m = re.findall(r"^<(\w+)>\s?(.*)$", line.strip())
className = "odd" if i % 2 == 0 else "even"
if (len(m) > 0):
for match in m:
nick = match[0]
quote = escape(match[1])
r += "<dt class=\"%s\"><%s></dt><dd><q>%s</q></dd>\n" % (className, nick, quote)
else:
r += "<dt> </dt><dd>%s</dd>\n" % (escape(line))
return "<dl>%s</dl>" % r
@register.inclusion_tag('partials/topcontributors.html')
def top_contributors():
"""
Displays the list of MAX_TOP_CONTRIBUTORS top contributors
"""
max = getattr(settings, 'FORTUNES_MAX_TOP_CONTRIBUTORS', 5)
return {'authors': Fortune.objects.top_authors()[:max]} | n1k0/djortunes | django_fortunes/templatetags/fortune_extras.py | fortune_extras.py | py | 1,168 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "django.template.Library",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.utils.html.... |
18931784296 | from django.urls import path
from .views import *
urlpatterns = [
path('api/', PostList.as_view()),
path('api/<int:pk>/', PostDetail.as_view()),
path("", sorry, name="sorry"),
# path('location/', LocationList.as_view()),
# path('location/<int:pk>/', LocationDetail.as_view()),
] | mehtanishad/Rest_Api_Assessment | rest_api/restApi_App/urls.py | urls.py | py | 304 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
}
] |
22019597734 | from typing import List
class Solution:
def numRescueBoats(self, people: List[int], limit: int) -> int:
people.sort()
heavy_person = len(people) - 1
light_person = 0
boats = 0
while heavy_person >= light_person:
if heavy_person == light_person:
boats += 1
break
if people[heavy_person] + people[light_person] <= limit:
light_person += 1
boats += 1
heavy_person -= 1
return boats
if __name__ == "__main__":
solution = Solution()
print(solution.numRescueBoats([3, 5, 3, 4], 5))
| zahedul/leetcode | boats_to_save_people.py | boats_to_save_people.py | py | 643 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
11424506 | #기초
import numpy as np
import cv2
print(cv2.__version__)
img = cv2.imread("./image/food.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("food", img)
cv2.imshow("food - gray", gray)
cv2.waitKey(0)
cv2.destroyWindow()
#보간법으로 픽셀 변경
resized = cv2.resize(img, None, fx = 0.2, fy = 0.2, interpolation=cv2.INTER_AREA)
cv2.imshow("resized", resized)
cv2.waitKey(0) | GitOfVitol/openCVProj | openCV기초.py | openCV기초.py | py | 397 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.__version__",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"l... |
29544867356 | import pytest
from eth2.beacon.db.chain import BeaconChainDB
from eth2.beacon.state_machines.forks.serenity.blocks import (
SerenityBeaconBlock,
)
from eth2.beacon.tools.builder.initializer import (
create_mock_genesis,
)
from eth2.beacon.tools.builder.proposer import (
create_mock_block,
)
from eth2.beacon.tools.builder.validator import (
create_mock_signed_attestations_at_slot,
)
@pytest.mark.long
@pytest.mark.parametrize(
(
'num_validators,'
'slots_per_epoch,'
'min_attestation_inclusion_delay,'
'target_committee_size,'
'shard_count'
),
[
(40, 8, 2, 3, 2)
]
)
def test_demo(base_db,
num_validators,
config,
keymap,
fixture_sm_class):
genesis_slot = config.GENESIS_SLOT
genesis_epoch = config.GENESIS_EPOCH
chaindb = BeaconChainDB(base_db)
genesis_state, genesis_block = create_mock_genesis(
num_validators=num_validators,
config=config,
keymap=keymap,
genesis_block_class=SerenityBeaconBlock,
)
for i in range(num_validators):
assert genesis_state.validator_registry[i].is_active(genesis_slot)
chaindb.persist_block(genesis_block, SerenityBeaconBlock)
chaindb.persist_state(genesis_state)
state = genesis_state
block = genesis_block
chain_length = 3 * config.SLOTS_PER_EPOCH
blocks = (block,)
attestations_map = {} # Dict[Slot, Sequence[Attestation]]
for current_slot in range(genesis_slot + 1, genesis_slot + chain_length):
if current_slot > genesis_slot + config.MIN_ATTESTATION_INCLUSION_DELAY:
attestations = attestations_map[current_slot - config.MIN_ATTESTATION_INCLUSION_DELAY]
else:
attestations = ()
block = create_mock_block(
state=state,
config=config,
state_machine=fixture_sm_class(
chaindb,
blocks[-1],
),
block_class=SerenityBeaconBlock,
parent_block=block,
keymap=keymap,
slot=current_slot,
attestations=attestations,
)
# Get state machine instance
sm = fixture_sm_class(
chaindb,
blocks[-1],
)
state, _ = sm.import_block(block)
chaindb.persist_state(state)
chaindb.persist_block(block, SerenityBeaconBlock)
blocks += (block,)
# Mock attestations
attestation_slot = current_slot
attestations = create_mock_signed_attestations_at_slot(
state=state,
config=config,
attestation_slot=attestation_slot,
beacon_block_root=block.root,
keymap=keymap,
voted_attesters_ratio=1.0,
)
attestations_map[attestation_slot] = attestations
assert state.slot == chain_length - 1 + genesis_slot
assert isinstance(sm.block, SerenityBeaconBlock)
# Justification assertions
assert state.justified_epoch == 2 + genesis_epoch
assert state.finalized_epoch == 1 + genesis_epoch
| hwwhww/trinity | tests/eth2/beacon/state_machines/test_demo.py | test_demo.py | py | 3,132 | python | en | code | null | github-code | 1 | [
{
"api_name": "eth2.beacon.db.chain.BeaconChainDB",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "eth2.beacon.tools.builder.initializer.create_mock_genesis",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "eth2.beacon.state_machines.forks.serenity.blocks.Ser... |
21224899005 | import pika
import psutil
import time
# Conecta ao RabbitMQ
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) # Conecta ao servidor RabbitMQ local
channel = connection.channel() # Cria um canal de comunicação
# Declara o tópico de temperatura
channel.queue_declare(queue='temperature') # Declara a fila "temperature" para receber mensagens de temperatura
def publish_temp_cpu():
# Publica a temperatura da CPU no tópico
temperature = psutil.sensors_temperatures()['coretemp'][0].current # Obtém a temperatura atual da CPU
channel.basic_publish(exchange='', routing_key='temperature', body=str(temperature)) # Publica a temperatura no tópico "temperature"
while True:
publish_temp_cpu() # Chama a função para publicar a temperatura da CPU no tópico
time.sleep(3) # Espera por 3 segundos antes de publicar a próxima temperatura
| mandaver/Atividades_Sistemas_Dist | Atividade_2/produtor.py | produtor.py | py | 915 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "pika.BlockingConnection",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pika.ConnectionParameters",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "psutil.sensors_temperatures",
"line_number": 14,
"usage_type": "call"
},
{
"api_n... |
900614867 | import random
import os
import shutil
from PIL import Image, ImageDraw
import torch
from torchvision import datasets, transforms
from detectron2.utils.logger import setup_logger
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
import asyncio
def crop_portraits(portraits_list, film_frame, id):
print(f"Cropping {len(portraits_list)} images...")
for i, portrait in enumerate(portraits_list):
x1, y1, w, h = portrait
cropped = film_frame.crop((x1, y1, x1+w, y1+h))
# Save portrait
path = f"./static/images/{id}/out/face{i}.png"
cropped.save(path, format='PNG')
print(f"Cropped {i+1}/{len(portraits_list)} portraits.")
return
async def trace_bbox(metadata_bbox, portraits_list, film_frame, id):
print(f"Tracing bbox for {len(portraits_list)} images...")
for i, bbox in enumerate(metadata_bbox):
x1, y1, w, h = bbox["bbox"]
coordinates = [(x1, y1), (x1+w, y1+h)]
# Draw bbox
copy = film_frame.copy()
draw = ImageDraw.Draw(copy)
draw.rectangle(coordinates, outline ="green")
# Crop
x1, y1, w, h = portraits_list[i]
cropped = copy.crop((x1, y1, x1+w, y1+h)).resize((224,224))
# Save portrait
path = f"./static/images/{id}/out/face{i}_bbox.png"
cropped.save(path, format='PNG')
print(f"Traced {i+1}/{len(portraits_list)} bboxes.")
return
def generate_id():
id = random.randint(0,1000)
dir = f"./static/images/{id}/out"
if os.path.exists(dir):
shutil.rmtree(f"./static/images/{id}")
os.makedirs(dir)
return id
def create_dataloader(id):
data_transforms = {
'inference': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], #ImageNet Mean
[0.229, 0.224, 0.225] #ImageNet StdDev
)
])
}
path = f"./static/images/{id}"
image_dataset = datasets.ImageFolder(path, data_transforms['inference'])
# batch size 1 so can get file names correctly
dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=1, shuffle=False, num_workers=2)
return dataloader
def load_classifier():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
path_to_model = f"./Best_Classifier/output/model_best.pt"
model = torch.load(path_to_model, map_location=torch.device(device))
model.to(device)
return model
def load_detector():
device = "cuda:0" if torch.cuda.is_available() else "cpu"
setup_logger()
cfg = get_cfg()
cfg.merge_from_file("./Best_Detector/output/config.yaml")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg.MODEL.WEIGHTS = "./Best_Detector/output/model_final.pth"
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # "Face"
cfg.MODEL.DEVICE = device
predictor = DefaultPredictor(cfg)
return predictor.model, cfg | mariana200196/cartoon-face-detector | API/helper_functions.py | helper_functions.py | py | 3,002 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
... |
23490191054 | import sys
from loguru import logger as log
from bs4 import BeautifulSoup
from pyquery import PyQuery as pq
from selenium.common.exceptions import ElementClickInterceptedException
from my_selenium import MySelenium
from locate104 import LocateOneZeroFour
# remove default level
log.remove()
# level DEBUG|INFO
log.add(sys.stderr, level="INFO", diagnose=True, backtrace=True)
class CrawlOneZeroFour(MySelenium, LocateOneZeroFour):
def __int__(self, set_browser, target_url, headless):
MySelenium.__init__(self, set_browser=set_browser, target_url=target_url, headless=headless)
def go_search(self):
__ERROR_CALL = "CrawlOneZeroFour.go_search"
__SEARCH_LOCATE = self.css_locate['search_keyword']
__SUBMIT = 'submit-buttons'
# get url
self.driver_get()
self.wait_element(__SEARCH_LOCATE[__SUBMIT], f"[{__ERROR_CALL}:{__SUBMIT}]")
def search_keyword(self, keys, value):
__ERROR_CALL = "CrawlOneZeroFour.go_search"
__SEARCH_LOCATE = self.css_locate['search_keyword']
__SUBMIT = 'submit-buttons'
__KEYWORD = 'keyword'
__SEARCH_TYPE = 'search-type'
find_keyword = self.find_element(__SEARCH_LOCATE[__KEYWORD], f"[{__ERROR_CALL}:{__KEYWORD}]")
find_keyword.clear()
find_keyword.send_keys(keys)
# find_select_type = self.find_element(__SEARCH_LOCATE[__SEARCH_TYPE], f"[{__ERROR_CALL}:{__SEARCH_TYPE}]")
# self.select_element(find_select_type, value)
def job_cate(self, job_list):
__ERROR_CALL = "CrawlOneZeroFour.job_cate"
__SEARCH_LOCATE = self.css_locate['job_cate']
__UL_LIST = 'result-list'
__SUBMIT = 'submit'
js_document_job_cate = "document.getElementById('jobCateLauncher').click();"
# show job cate
self.execute_script(js_document_job_cate)
self.wait_element(__SEARCH_LOCATE[__UL_LIST], f"[{__ERROR_CALL}:{__UL_LIST}]", ec_mode="visi")
ul_list_element = self.find_element("ul.result-list", f"[{__ERROR_CALL}:{__UL_LIST}]")
log.debug(ul_list_element)
ul_items = ul_list_element.find_elements_by_tag_name('li')
log.debug(ul_items)
find_count = len(job_list)
for item in ul_items:
log.debug(item.text)
if item.text in job_list:
item.find_element_by_css_selector('.result-list [type="checkbox"]').click()
find_count -= 1
elif find_count == 0:
self.find_element(__SEARCH_LOCATE[__SUBMIT], f"[{__ERROR_CALL}:{__SUBMIT}]").click()
break
def exclusion_condition(self, value):
__ERROR_CALL = "CrawlOneZeroFour.exclusion_condition"
__SEARCH_LOCATE = self.css_locate['exclusion_condition']
__TITLE_BUTTON = 'title_button'
__EXCLUDE_SELECT = 'exclude_select'
__STYLED_SELECT = 'styled-select'
self.wait_element(__SEARCH_LOCATE[__TITLE_BUTTON], f"[{__ERROR_CALL}:{__TITLE_BUTTON}]", ec_mode="visi")
self.find_element(__SEARCH_LOCATE[__TITLE_BUTTON], f"[{__ERROR_CALL}:{__TITLE_BUTTON}]").click()
self.wait_element(__SEARCH_LOCATE[__STYLED_SELECT], f"[{__ERROR_CALL}:{__STYLED_SELECT}]", ec_mode="visi")
self.find_element(__SEARCH_LOCATE[__STYLED_SELECT], f"[{__ERROR_CALL}:{__STYLED_SELECT}]").click()
find_exclude_select = self.find_element(__SEARCH_LOCATE[__EXCLUDE_SELECT], f"[{__ERROR_CALL}:{__EXCLUDE_SELECT}]")
self.select_element(find_exclude_select, value)
@property
def whole_submit(self):
__ERROR_CALL = "CrawlOneZeroFour.whole_submit"
__SEARCH_LOCATE = self.css_locate['whole_submit']
__WHOLE_SUBMIT = 'whole_submit'
__xWHOLE_SUBMIT = "//body//input[@id='searchSubmit']"
__JOB_LIST = 'job-list'
# self.find_element(__SEARCH_LOCATE[__WHOLE_SUBMIT], f"[{__ERROR_CALL}:{__WHOLE_SUBMIT}]").click()
btn = self.find_element(__xWHOLE_SUBMIT, f"[{__ERROR_CALL}:{__xWHOLE_SUBMIT}]", selector='xpath')
self.driver.execute_script("arguments[0].click();", btn)
self.wait_element(__SEARCH_LOCATE[__JOB_LIST], f"[{__ERROR_CALL}:{__JOB_LIST}]", ec_mode="visi")
return self.page_source()
@staticmethod
def write_my_soup(source):
soup = BeautifulSoup(source, 'html5lib')
prettify_soup = soup.prettify()
with open("soup.html", mode="w", encoding="utf8") as soup_file:
soup_file.write(prettify_soup)
@staticmethod
def read_my_soup():
soup_data = BeautifulSoup(open("soup.html", encoding="utf8"), "html5lib")
data1 = soup_data.find('main')
# log.debug(soup.prettify())
log.debug(data1)
@staticmethod
def pq_test(page):
doc = pq(page)
for title in doc('.items .title').parent().parent().items():
log.debug(title)
log.debug('"' + title.attr['data-cno'] + '"')
log.debug('"' + f"https://www.104.com.tw/company/{title.attr['data-cno']}?jobsource=m104_hotorder" + '"')
# title = [re.sub(',', ' _ ', '"' + title.text() + '"') for title in doc('.items .title').items()]
# title = ['"' + title.text() + '"' for title in doc('.items .title').items()]
# log.debug(title)
@staticmethod
def pq_read_driver(page):
doc = pq(page)
# title = [re.sub(',', ' _ ', '"' + title.text() + '"') for title in doc('.items .title').items()]
title = ['"' + title.text() + '"' for title in doc('.items .title').items()]
log.debug(title)
# company = [re.sub(',', ' _ ', '"' + company.text() + '"') for company in doc('li>a>.company').items()]
company = ['"' + company.text() + '"' for company in doc('li>a>.company').items()]
log.debug(company)
# location = [re.sub(',', ' _ ', '"' + location.text() + '"') for location in doc('li>a>p:nth-child(4)').items()]
location = ['"' + location.text() + '"' for location in doc('li>a>p:nth-child(4)').items()]
log.debug(location)
title_link = [f"\"https://m.104.com.tw/{link.attrib['href']}\"" for link in doc('ul.job-list > li > a')]
log.debug(title_link)
com_link = ['"' + f"https://www.104.com.tw/company/{title.attr['data-cno']}?jobsource=m104_hotorder" + '"' for title in
doc('.items .title').parent().parent().items()]
log.debug(com_link)
res = [[t, tl, c, cl, l] for (t, tl, c, cl, l) in zip(title, title_link, company, com_link, location)]
log.debug(res)
with open("104.csv", mode='a', encoding='utf8') as f:
for sub_index in range(len(res)):
log.info(','.join(res[sub_index]))
f.write(','.join(res[sub_index]) + '\n')
def next_page(self):
__ERROR_CALL = "CrawlOneZeroFour.next_page"
__SEARCH_LOCATE = self.css_locate['whole_submit']
__NEXT_PAGE = 'next_page'
__UL_LIST = 'job-list'
self.page_scroll_bottom()
check_next_page = self.wait_element(__SEARCH_LOCATE[__NEXT_PAGE], f"[{__ERROR_CALL}:{__NEXT_PAGE}]", ec_mode="vis", seconds=3)
log.debug(check_next_page)
if not check_next_page:
exit("check_next_page fail")
self.find_element(__SEARCH_LOCATE[__NEXT_PAGE], f"[{__ERROR_CALL}:{__NEXT_PAGE}]").click()
return self.page_source()
def search_area(self, area):
__ERROR_CALL = "CrawlOneZeroFour.search_area"
__SEARCH_LOCATE = self.css_locate['search_area']
__MENU_LIST = 'area_menu'
__AREA_TW = 'area_tw'
__TW_LABEL = 'tw_label'
__AREA_BUTTON = 'area_button'
js_search_area = "document.getElementById('searchAreaFake').click();"
# show job cate
self.execute_script(js_search_area)
menu_list = self.wait_element(__SEARCH_LOCATE[__MENU_LIST], f"[{__ERROR_CALL}:{__MENU_LIST}]", ec_mode="visi")
if not menu_list:
exit("Wait menu_list fail")
self.find_element(__SEARCH_LOCATE[__AREA_TW], f"[{__ERROR_CALL}:{__AREA_TW}]").click()
tw_area_list = self.find_element('#area-menu .scd-cate:nth-child(2) > ul:last-child', f"[{__ERROR_CALL}:'tw_area_list']")
log.debug(tw_area_list)
tw_label_items = tw_area_list.find_elements_by_tag_name('label')
# tw_label_items = tw_area_list.find_elements_by_css_selector('.active > span')
log.debug(tw_label_items)
find_count = len(area)
for item in tw_label_items:
log.debug(item.text)
if item.text in area:
try:
item.find_element_by_css_selector('.scd-class [type="checkbox"]').click()
find_count -= 1
except ElementClickInterceptedException:
wait_checkbox = 0
while not wait_checkbox:
log.debug("do scroll bottom")
self.page_scroll_bottom()
wait_checkbox = self.wait_element(f"//span[text()='{area}']", f"[{__ERROR_CALL}:{__MENU_LIST}]", ec_mode="visi",
selector='xpath')
log.debug(wait_checkbox)
item.find_element_by_css_selector('.scd-class [type="checkbox"]').click()
find_count -= 1
elif find_count == 0:
self.find_element(__SEARCH_LOCATE[__AREA_BUTTON], f"[{__ERROR_CALL}:{__AREA_BUTTON}]").click()
break
if __name__ == "__main__":
url = "https://m.104.com.tw/search"
keyword_keys = "auto in test 測試 SDET 程式"
keyword_keys2 = "auto 測試 in test 自動 SDET Quality 品質 QA SET"
# work_area = ['台中市'] # for test
work_area = ['台北市'] # 新北市
option_value = "全職"
job_cate_list = ["測試人員", "軟體設計工程師", "軟韌體測試工程師", "電腦組裝/測試"]
exclude_value = "排除派遣"
ts_crawlonezerofour = CrawlOneZeroFour("chrome", url, headless=1)
ts_crawlonezerofour.go_search()
ts_crawlonezerofour.search_area(work_area)
ts_crawlonezerofour.exclusion_condition(exclude_value)
ts_crawlonezerofour.search_keyword(keyword_keys, option_value)
ts_crawlonezerofour.job_cate(job_cate_list)
ts_crawlonezerofour.search_keyword(keyword_keys2, option_value)
current_page = ts_crawlonezerofour.whole_submit
ts_crawlonezerofour.pq_read_driver(current_page)
more_page = ts_crawlonezerofour.next_page()
while more_page:
ts_crawlonezerofour.pq_read_driver(more_page)
more_page = ts_crawlonezerofour.next_page()
ts_crawlonezerofour.driver_close()
# ts_crawlonezerofour = CrawlOneZeroFour("no_driver")
# ts_crawlonezerofour.read_csv()
| brian-hsu/crawl104 | crawl104.py | crawl104.py | py | 10,812 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "loguru.logger.remove",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "loguru.logger.add",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
... |
15760438393 | #!/usr/bin/python
from flask import Flask, render_template
from constant import constants
import random
import urllib3
import json
__author__ = "Daniel Fernando Santos Bustos"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Daniel Santos"
__email__ = "dfsantosbu@unal.edu.co"
__status__ = "Development"
app=Flask(__name__)
app.secret_key = '15%&*^&^GJHYTDT24623/*@!@#G@JH$%+9'
def getRandomColors( n = 30 ):
colors = []
for i in range(n):
r = lambda: random.randint(0,200)
color = '#%02X%02X%02X' % (r(),r(),r())
colors.append(color)
return colors
@app.route('/')
def index():
url = constants["urlUsers"]
http = urllib3.PoolManager()
response = http.request('GET', url)
dat = response.data.decode('utf-8')
result = json.loads(dat)
colors = getRandomColors()
return render_template('index.html', users=result, colors= colors)
if __name__ =="__main__":
getRandomColors(10)
app.run(host='0.0.0.0')
| xdanielsb/blog-flask | app.py | app.py | py | 985 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "constant.constants",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "urllib3.PoolManager",
... |
72361760035 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@File :price.py
@Description :价格表查询
@DateTime :2023-01-12 14:07
@Author :Jay Zhang
"""
from sqlalchemy.orm import Session
from Permission.models import Permission
from Tracking.models import Tracking
from User.models import User, Employee
def get_permission(db: Session, user: User):
return User
def get_all_employee_permission(db: Session, dd_db: Session):
data = []
employees = dd_db.query(Employee).filter(Employee.employee_id != "").all()
for employee in employees:
user = db.query(User).filter(User.employee_id == employee.employee_id).first()
if user:
data.append({
"employee_name": employee.name,
"employee_id": employee.employee_id,
"permission": user.permission
})
else:
data.append({
"employee_name": employee.name,
"employee_id": employee.employee_id,
"permission": []
})
return data
| zxj17815/progress_tracking | User/curd.py | curd.py | py | 1,082 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "User.models.User",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "User.models.User",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.o... |
17571257794 | # Import Files
import requests
import json
with open("details.txt") as f:
lines = f.readlines()
api_token = lines[0].rstrip()
account_id = lines[1].rstrip()
def get_account_info(steam_ids):
"""
Returns the results of a steam api get request containing a summary of one or more steam users.
:param steam_ids: A list containing the steam ids of all steam accounts you want info for.
:return: A list containing information about one or more Steam profiles. None if request fails
"""
api_url = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={0}&steamids={1}"\
.format(api_token, steam_ids)
response = requests.get(api_url)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
else:
return None
def get_friends(steam_id):
"""
Returns the results of a steam api get request containing information about a users friend list.
:param steam_id: A string containing the steam ID of the user to check.
:return: The response of the aforementioned get request. None if request fails
"""
api_url = "http://api.steampowered.com/ISteamUser/GetFriendList/v0001/?key={0}&steamid={1}&relationship=friend"\
.format(api_token, steam_id)
response = requests.get(api_url)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
else:
return None
def get_steam_name(steam_id):
"""
Returns a user's steam display name given their steam ID
:param steam_id: The ID of the steam user as a string
:return: A string containing rhe user's steam display name. None if request fails.
"""
data = get_account_info(steam_id)
if data:
return data["response"]["players"][0]["personaname"]
else:
return None
def get_friend_ids(steam_id):
"""
Returns a list of steam IDs corresponding to a user's friend list.
:param steam_id: The user who's friend list should be checked.
:return: A list of strings containing the steam ids of the user's friends. None if request fails.
"""
output = []
data = get_friends(steam_id)
if data:
for i in data["friendslist"]["friends"]:
if get_account_info(i)["response"]["players"][0]["communityvisibilitystate"] == 3: #account is visible
output.append(i["steamid"])
return output
else:
return None
def get_owned_games(steam_id):
"""
Returns the results of a steam api get request containing information about a user's owned games.
:param steam_id: The steam user to check.
:return: The results of the aforementioned get request. None if request fails.
"""
api_url = "http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/?key={0}&steamid={1}&include_appinfo=1" \
.format(api_token, steam_id)
response = requests.get(api_url)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
else:
return None
def get_game_list(steam_id):
"""
Returns a list of owned games given a user's steam ID.
:param steam_id: A string containing the user's steam ID.
:return: A list containing the names of all of the user's owned games.
"""
data = get_owned_games(steam_id)
if data:
output = []
for i in data["response"]["games"]:
output.append(i["name"])
return output
else:
return None
if __name__ == "__main__":
print("Friends:")
friends = {}
for i, value in enumerate(get_friend_ids(account_id)):
friends[i] = value
print(i, get_steam_name(value))
choices = tuple(input("List player numbers to compare (separated by a comma): ").replace(" ", "").split(","))
games = set(get_game_list(account_id))
for i in choices:
games = games.intersection(set(get_game_list(friends[int(i)])))
for game in games:
print(game)
| Exist3/SteamLibraryCompare | steam_api_scrape.py | steam_api_scrape.py | py | 3,993 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": ... |
13282499257 | from __future__ import division
import numpy as np
import ROOT as r
import math
import os,sys
from scipy.integrate import quad, dblquad
from darkphoton import *
# proton mass
mProton = 0.938272081 # GeV/c - PDG2016
protonEnergy = 400. # GeV/c
protonMomentum = math.sqrt(protonEnergy*protonEnergy - mProton*mProton)
#VDM FORM FACTOR
def rhoFormFactor(m):
""" From https://arxiv.org/abs/0910.5589 """
#constants from the code from Inar: https://github.com/pgdeniverville/BdNMC/blob/master/src/Proton_Brem_Distribution.cpp
f1ra = 0.6165340033101271
f1rb = 0.22320420111672623
f1rc = -0.33973820442685326
f1wa = 1.0117544786579074
f1wb = -0.8816565944110686
f1wc = 0.3699021157531611
f1prho = f1ra*0.77**2/(0.77**2-m**2-0.77*0.15j)
f1prhop = f1rb*1.25**2/(1.25**2-m**2-1.25*0.3j)
f1prhopp = f1rc*1.45**2/(1.45**2-m**2-1.45*0.5j)
f1pomega = f1wa*0.77**2/(0.77**2-m**2-0.77*0.0085j)
f1pomegap = f1wb*1.25**2/(1.25**2-m**2-1.25*0.3j)
f1pomegapp = f1wc*1.45**2/(1.45**2-m**2-1.45*0.5j)
return abs(f1prho+f1prhop+f1prhopp+f1pomega+f1pomegap+f1pomegapp)
# useful functions
def energy(p,m):
""" Compute energy from momentum and mass """
return math.sqrt(p*p + m*m)
def penaltyFactor(m):
""" Penalty factor for high masses - dipole form factor in the proton-A' vertex """
""" m in GeV """
if m*m>0.71:
return math.pow(m*m/0.71,-4)
else:
return 1
def zeta(p, theta):
""" Fraction of the proton momentum carried away by the paraphoton in the beam direction """
return p / (protonMomentum * math.sqrt(theta*theta + 1.))
def pTransverse(p, theta):
""" Paraphoton transverse momentum in the lab frame """
return protonMomentum*theta*zeta(p,theta)
def ptSquare(p, theta):
""" Square paraphoton transverse momentum in the lab frame """
return pow(pTransverse(p,theta), 2.)
def H(p, theta, mDarkPhoton):
""" A kinematic term """
return ptSquare(p,theta) + (1.-zeta(p,theta))*mDarkPhoton*mDarkPhoton + pow(zeta(p,theta),2.)*mProton*mProton
def wba(p, theta, mDarkPhoton, epsilon):
""" Cross section weighting function in the Fermi-Weizsaeker-Williams approximation """
const = epsilon*epsilon*alphaQED / (2.*math.pi*H(p,theta,mDarkPhoton))
h2 = pow(H(p,theta,mDarkPhoton),2.)
oneMinusZSquare = pow(1.-zeta(p,theta),2.)
mp2 = mProton*mProton
mA2 = mDarkPhoton*mDarkPhoton
p1 = (1. + oneMinusZSquare) / zeta(p,theta)
p2 = ( 2. * zeta(p,theta) * (1.-zeta(p,theta)) * ( (2.*mp2 + mA2)/ H(p,theta,mDarkPhoton)
- pow(zeta(p,theta),2.)*2.*mp2*mp2/h2 ) )
#p3 = 2.*zeta(p,theta)*(1.-zeta(p,theta))*(zeta(p,theta)+oneMinusZSquare)*mp2*mA2/h2
p3 = 2.*zeta(p,theta)*(1.-zeta(p,theta))*(1+oneMinusZSquare)*mp2*mA2/h2
p4 = 2.*zeta(p,theta)*oneMinusZSquare*mA2*mA2/h2
return const*(p1-p2+p3+p4)
def sigma(s): # s in GeV^2 ---> sigma in mb
""" Parametrisation of sigma(s) """
a1 = 35.45
a2 = 0.308
a3 = 28.94
a4 = 33.34
a5 = 0.545
a6 = 0.458
a7 = 42.53
p1 = a2*pow(math.log(s/a3),2.)
p2 = a4*pow((1./s),a5)
p3 = a7*pow((1./s),a6)
return a1 + p1 - p2 + p3
def es(p, mDarkPhoton):
""" s(p,mA) """
return 2.*mProton*(energy(protonMomentum,mProton)-energy(p,mDarkPhoton))
def sigmaRatio(p, mDarkPhoton):
""" sigma(s') / sigma(s) """
return sigma(es(p,mDarkPhoton)) / sigma(2.*mProton*energy(protonMomentum,mProton))
def dNdZdPtSquare(p, mDarkPhoton, theta, epsilon):
""" Differential A' rate per p.o.t. as a function of Z and Pt^2 """
return sigmaRatio(p,mDarkPhoton)*wba(p,theta,mDarkPhoton,epsilon)
def dPt2dTheta(p, theta):
""" Jacobian Pt^2->theta """
z2 = pow(zeta(p,theta),2.)
return 2.*theta*z2*protonMomentum*protonMomentum
def dZdP(p, theta):
""" Jacobian z->p """
return 1./( protonMomentum* math.sqrt(theta*theta+1.) )
def dNdPdTheta(p, theta, mDarkPhoton, epsilon):
""" Differential A' rate per p.o.t. as a function of P and theta """
diffRate = dNdZdPtSquare(p,mDarkPhoton,theta,epsilon) * dPt2dTheta(p,theta) * dZdP(p,theta)
return math.fabs(diffRate) # integrating in (-pi, pi)...
def pMin(mDarkPhoton):
return max(0.14*protonMomentum, mDarkPhoton)
def pMax(mDarkPhoton):
#return min(0.86*protonMomentum, math.sqrt( (energy(protonMomentum,mProton)**2. - mDarkPhoton**2.) - mDarkPhoton**2.))
return math.sqrt( (energy(protonMomentum,mProton)**2. - mDarkPhoton**2.) - mDarkPhoton**2.)
def prodRate(mDarkPhoton, epsilon, tmin = -0.5 * math.pi, tmax = 0.5 * math.pi):
""" dNdPdTheta integrated over p and theta """
integral = dblquad( dNdPdTheta, # integrand
tmin, tmax, # theta boundaries (2nd argument of integrand)
lambda x: pMin(mDarkPhoton), lambda x: pMax(mDarkPhoton), # p boundaries (1st argument of integrand)
args=(mDarkPhoton, epsilon) ) # extra parameters to pass to integrand
return integral[0]
# total production rate of A'
#norm = prodRate(1.1,3.e-7) #mDarkPhoton,epsilon)
# number of A' produced
# numDarkPhotons = int(math.floor(norm*protonFlux))
#
# print
# print "Epsilon \t %s"%epsilon
# print "mDarkPhoton \t %s"%mDarkPhoton
#print "A' production rate per p.o.t: \t %.8g"%norm
# print "Number of A' produced in SHiP: \t %.8g"%numDarkPhotons
def normalisedProductionPDF(p, theta, mDarkPhoton, epsilon, norm):
""" Probability density function for A' production in SHIP """
return (1. / norm) * dNdPdTheta(p, theta, mDarkPhoton, epsilon)
def hProdPDF(mDarkPhoton, epsilon, norm, binsp, binstheta, tmin = -0.5 * math.pi, tmax = 0.5 * math.pi, suffix=""):
""" Histogram of the PDF for A' production in SHIP """
angles = np.linspace(tmin,tmax,binstheta).tolist()
anglestep = 2.*(tmax - tmin)/binstheta
momentumStep = (pMax(mDarkPhoton)-pMin(mDarkPhoton))/(binsp-1)
momenta = np.linspace(pMin(mDarkPhoton),pMax(mDarkPhoton),binsp,endpoint=False).tolist()
hPDF = r.TH2F("hPDF_eps%s_m%s"%(epsilon,mDarkPhoton) ,"hPDF_eps%s_m%s"%(epsilon,mDarkPhoton),
binsp,pMin(mDarkPhoton)-0.5*momentumStep,pMax(mDarkPhoton)-0.5*momentumStep,
binstheta,tmin-0.5*anglestep,tmax-0.5*anglestep)
hPDF.SetTitle("PDF for A' production (m_{A'}=%s GeV, #epsilon =%s)"%(mDarkPhoton,epsilon))
hPDF.GetXaxis().SetTitle("P_{A'} [GeV]")
hPDF.GetYaxis().SetTitle("#theta_{A'} [rad]")
hPDFtheta = r.TH1F("hPDFtheta_eps%s_m%s"%(epsilon,mDarkPhoton),
"hPDFtheta_eps%s_m%s"%(epsilon,mDarkPhoton),
binstheta,tmin-0.5*anglestep,tmax-0.5*anglestep)
hPDFp = r.TH1F("hPDFp_eps%s_m%s"%(epsilon,mDarkPhoton),
"hPDFp_eps%s_m%s"%(epsilon,mDarkPhoton),
binsp,pMin(mDarkPhoton)-0.5*momentumStep,pMax(mDarkPhoton)-0.5*momentumStep)
hPDFp.GetXaxis().SetTitle("P_{A'} [GeV]")
hPDFtheta.GetXaxis().SetTitle("#theta_{A'} [rad]")
for theta in angles:
for p in momenta:
w = normalisedProductionPDF(p,theta,mDarkPhoton,epsilon,norm)
hPDF.Fill(p,theta,w)
hPDFtheta.Fill(theta,w)
hPDFp.Fill(p,w)
hPdfFilename = sys.modules['__main__'].outputDir+"/ParaPhoton_eps%s_m%s%s.root"%(epsilon,mDarkPhoton,suffix)
outfile = r.TFile(hPdfFilename,"recreate")
#weight = hPDF.Integral("width")
#print "Weight = %3.3f"%weight
#hPDF.Scale(1./weight)
hPDF.Write()
hPDFp.Write()
hPDFtheta.Write()
outfile.Close()
del angles
del momenta
return hPDF
| ShipSoft/FairShip | python/proton_bremsstrahlung.py | proton_bremsstrahlung.py | py | 7,527 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "math.sqrt",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 48,
"... |
26855426781 | # This program is in the public domain
# Author: Paul Kienzle
"""
SNS data loaders
The following instruments are defined::
Liquids, Magnetic
These are :class:`resolution.Pulsed` classes tuned with
default instrument parameters and loaders for reduced SNS data.
See :mod:`resolution` for details.
"""
import re
import math
import numpy as np
from bumps.data import parse_file
from .rebin import rebin
from .instrument import Pulsed
from . import resolution
from .probe import make_probe
## Estimated intensity vs. wavelength for liquids reflectometer
LIQUIDS_FEATHER = np.array([
(2.02555, 20.6369),
(2.29927, 23.6943),
(2.57299, 23.6943),
(2.87409, 21.1146),
(3.22993, 15.5732),
(3.58577, 12.8981),
(4.07847, 9.4586),
(4.5438, 6.59236),
(5.11861, 4.68153),
(5.7208, 3.05732),
(6.37774, 1.91083),
(7.19891, 1.24204),
(8.04745, 0.955414),
(9.06022, 0.573248),
(10.1825, 0.477707),
(11.4142, 0.382166),
(12.8102, 0.191083),
(14.3431, 0.286624),
]).T
def load(filename, instrument=None, **kw):
"""
Return a probe for SNS data.
"""
if instrument is None:
instrument=Pulsed()
header, data = parse_sns_file(filename)
header.update(**kw) # calling parameters override what's in the file.
#print "\n".join(k+":"+str(v) for k, v in header.items())
# Guess what kind of data we have
if has_columns(header, ('Q', 'dQ', 'R', 'dR', 'L')):
probe = QRL_to_data(instrument, header, data)
elif has_columns(header, ('time_of_flight', 'data', 'Sigma')):
probe = TOF_to_data(instrument, header, data)
else:
raise IOError("Unknown columns: "+", ".join(header['columns']))
probe.title = header['title']
probe.date = header['date']
probe.instrument = header['instrument']
return probe
def has_columns(header, v):
return (len(header['columns']) == len(v)
and all(ci == si for ci, si in zip(header['columns'], v)))
def QRL_to_data(instrument, header, data):
"""
Convert data to T, L, R
"""
Q, dQ, R, dR, L = data
dL = resolution.binwidths(L)
if 'angle' in header and 'slits_at_Tlo' in header:
T = header.pop('angle', header.pop('T', None))
probe = instrument.probe(L=L, dL=dL, T=T, data=(R, dR),
**header)
else:
T = resolution.QL2T(Q[0], L[0])
dT = resolution.dQdL2dT(Q[0], dQ[0], L[0], dL[0])
probe = make_probe(T=T, dT=dT, L=L, dL=dL, data=(R, dR),
**header)
return probe
def TOF_to_data(instrument, header, data):
"""
Convert TOF data to neutron probe.
Wavelength is set from the average of the times at the edges of the
bins, not the average of the wavelengths. Wavelength resolution is
set assuming the wavelength at the edges of the bins defines the
full width at half maximum.
The correct answer is to look at the wavelength distribution within
the bin including effects of pulse width and intensity as a function
wavelength and use that distribution, or a gaussian approximation
thereof, when computing the resolution effects.
"""
TOF, R, dR = data
Ledge = resolution.TOF2L(instrument.d_moderator, TOF)
L = resolution.TOF2L(instrument.d_moderator, (TOF[:-1]+TOF[1:])/2)
dL = (Ledge[1:]-Ledge[:-1])/2.35 # FWHM is 2.35 sigma
R = R[:-1]
dR = dR[:-1]
min_time, max_time = header.get('TOF_range', instrument.TOF_range)
keep = (np.isfinite(R)
& np.isfinite(dR)
& (TOF[:-1] >= min_time)
& (TOF[1:] <= max_time))
L, dL, R, dR = [v[keep] for v in (L, dL, R, dR)]
T = np.array([header.get('angle', header.get('T', None))], 'd')
T, dT, L, dL = instrument.resolution(L=L, dL=dL, T=T, **header)
probe = make_probe(T=T, dT=dT, L=L, dL=dL, data=(R, dR), **header)
return probe
def parse_sns_file(filename):
"""
Parse SNS reduced data, returning *header* and *data*.
*header* dictionary of fields such as 'data', 'title', 'instrument'
*data* 2D array of data
"""
raw_header, data = parse_file(filename)
header = {}
# guess instrument from file name
original_file = raw_header.get('F', 'unknown')
if 'REF_L' in original_file:
instrument = 'Liquids'
elif 'REF_M' in original_file:
instrument = 'Magnetic'
else:
instrument = 'unknown'
header['instrument'] = instrument
header['filename'] = original_file
header['radiation'] = 'neutron'
# Plug in default instrument values for slits
if 'instrument' in header and header['instrument'] in INSTRUMENTS:
instrument = INSTRUMENTS[header['instrument']]
header['d_s1'] = instrument.d_s1
header['d_s2'] = instrument.d_s2
# Date-time field for the file
header['date'] = raw_header.get('D', '')
# Column names and units
columnpat = re.compile(r'(?P<name>\w+)\((?P<units>[^)]*)\)')
columns, units = zip(*columnpat.findall(raw_header.get('L', '')))
header['columns'] = columns
header['units'] = units
# extra information like title, angle, etc.
commentpat = re.compile(r'(?P<name>.*)\s*:\s*(?P<value>.*)\s*\n')
comments = dict(commentpat.findall(raw_header.get('C', '')))
header['title'] = comments.get('Title', '')
header['description'] = comments.get('Notes', '')
# parse values of the form "Long Name: (value, 'units')" in comments
valuepat = re.compile(r"[(]\s*(?P<value>.*)\s*, \s*'(?P<units>.*)'\s*[)]")
def parse_value(valstr):
d = valuepat.match(valstr).groupdict()
return float(d['value']), d['units']
if 'Detector Angle' in comments:
header['angle'], _ = parse_value(comments['Detector Angle'])
return header, data
def write_file(filename, probe, original=None, date=None,
title=None, notes=None, run=None, charge=None):
"""
Save probe as SNS reduced file.
"""
## Example header
#F /SNSlocal/REF_L/2007_1_4B_SCI/2895/NeXus/REF_L_2895.nxs
#E 1174593434.7
#D 2007-03-22 15:57:14
#C Run Number: 2895
#C Title: MK NR4 dry 032007_No2Rep0
#C Notes: MK NR 4 DU 53 dry from air
#C Detector Angle: (0.0, 'degree')
#C Proton Charge: 45.3205833435
#S 1 Spectrum ID ('bank1', (85, 151))
#N 3
#L time_of_flight(microsecond) data() Sigma()
from datetime import datetime as dt
parts = []
if original is None: original = filename
if date is None:
date = dt.strftime ( dt.now(), '%Y-%m-%d %H:%M:%S')
parts.append('#F '+original)
parts.append('#D '+date)
if run is not None:
parts.append('#C Run Number: %s'%run)
if title is not None:
parts.append('#C Title: %s'%title)
if notes is not None:
parts.append('#C Notes: %s'%notes)
parts.append("#C Detector Angle: (%g, 'degree')"%probe.T[0])
if charge is not None:
parts.append('#C Proton Charge: %s'%charge)
parts.append('')
parts.append('#N 5')
parts.append('#L Q(1/A) dQ(1/A) R() dR() L(A)')
parts.append('')
header = "\n".join(parts)
probe.write_data(filename, columns=['Q', 'dQ', 'R', 'dR', 'L'],
header=header)
class SNSData(object):
def load(self, filename, **kw):
return load(filename, instrument=self, **kw)
# TODO: print "Insert correct slit distances for Liquids and Magnetic"
class Liquids(SNSData, Pulsed):
"""
Loader for reduced data from the SNS Liquids instrument.
"""
instrument = "Liquids"
radiation = "neutron"
feather = LIQUIDS_FEATHER
wavelength = 2., 15.
#wavelength = 0.5, 5
#wavelength = 5.5, 10
#wavelength = 10.5, 15
dLoL = 0.02
d_s1 = 230.0 + 1856.0
d_s2 = 230.0
d_moderator = 14.850 # moderator to detector distance
TOF_range = (6000, 60000)
class Magnetic(SNSData, Pulsed):
"""
Loader for reduced data from the SNS Magnetic instrument.
"""
instrument = "Magnetic"
radiation = "neutron"
wavelength = 1.8, 14
dLoL = 0.02
d_s1 = 75*2.54
d_s2 = 14*2.54
# Instrument names assigned by reflpak
INSTRUMENTS = {
'Liquids': Liquids,
'Magnetic': Magnetic,
}
# ===== utils ==============
def intensity_from_spline(Lrange, dLoL, feather):
L0, L1 = Lrange
n = math.ceil(math.log(L1/L0)/math.log(1+dLoL))
L = L0*(1+dLoL)**np.arange(0, n)
return (L[:-1]+L[1:])/2, rebin(feather[0], feather[1], L)
def boltzmann_feather(L, counts=100000, range=None):
"""
Return expected intensity as a function of wavelength given the TOF
feather range and the total number of counts.
TOF feather is approximately a boltzmann distribution with gaussian
convolution. The following looks pretty enough; don't know how well it
corresponds to the actual SNS feather.
"""
import scipy.stats
y = np.linspace(-4, 4, 10)
G = np.exp(-y**2/10)
x = np.arange(12, 85)
B = scipy.stats.boltzmann.pmf(x, 0.05, counts, loc=16)
BGz = np.convolve(B, G, mode='same')
#if range is None: range = L[0], L[-1]
#if range[0] > range[1]: range = range[::-1]
#range = range[0]*(1-1e-15), range[1]*(1+1e-15)
#z = np.linspace(range[0], range[1], len(BGz))
z = np.linspace(2, 16.5, len(BGz)) # Wavelength range for liquids
pL = np.interp(L, z, BGz, left=0, right=0)
nL = pL/sum(pL)*counts
return nL
| reflectometry/refl1d | refl1d/snsdata.py | snsdata.py | py | 9,380 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "instrument.Pulsed",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "probe.title",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "probe.date",
"line... |
6164148530 | import numpy as np
from string import punctuation
from random import shuffle
from gensim.test.utils import get_tmpfile
import gensim
import pandas as pd
from gensim.models.word2vec import Word2Vec
from gensim.models import KeyedVectors
import time
from nltk.tokenize import TweetTokenizer
def load1_6million(path, tokenizer = gensim.utils.simple_preprocess, limit = np.inf):
#tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True)
data = []
target = []
i = 0
with open(path, encoding='latin-1') as f:
for line in f:
i += 1
line = line.replace('"', '')
sentiment = line.split(',')[1]
if sentiment not in ('0','1','2','3','4'):
continue
if sentiment in ('0'):
sentiment = 0
elif sentiment in ('4'):
sentiment = 1
target.append(sentiment)
tweet = line.split(',')[-1]
data.append(tokenizer(tweet))
#data.append(tokenizer.tokenize(tweet))
if (i == limit):
return data, target
return data,target
def saveWordvec(wordvec):
kv = './twitter_vectors.kv'
model = "./twitter_vectors.model"
#fname = get_tmpfile("./twitter_vectors.kv") # if we want to save the file in the default temporary system directory
wordvec.wv.save(kv) #save the KeyedVectors
#fname = get_tmpfile("./twitter_vectors.model")
wordvec.save(model) #save the whole model
def readWordvec(file, kv = True):
if kv == True:
return KeyedVectors.load(file, mmap='r')
else:
return Word2Vec.load(file)
# Read each line of the text file and return a list of tokens for each tweet and the associated sentiment
data,sentiment = load1_6million ("./kaggle_sentiment140/random_training.csv", tokenizer=gensim.utils.simple_preprocess)
# Creating the custom embedings for our specific domain
begin = time.perf_counter()
# SkipGram = True
model = gensim.models.Word2Vec(data, size=200, window=5, min_count=5, workers=10, sg=1)
model.train(data, total_examples=len(data), epochs=10)
end = time.perf_counter()
print("\nTime elapsed: " + str((end-begin)/60) + " min")
saveWordvec(model)
#model = readWordvec("./twitter_vectors.kv_V1", kv = True)
#print(model.most_similar('good'))
### Augumenting the word vectors
# Read each line of the text file and return a list of tokens for each tweet and the associated sentiment
#nltk.tokenize.TweetTokenizer # a tweet tokenizer from nltk
#data,sentiment = loadTwitterKaggle("./kaggle_Twitter_sentiment_analysis/train.csv", tokenizer=gensim.utils.simple_preprocess)
# # Retrain word2vec using new dataset
# model = readWordvec("./twitter_vectors.model_V1", kv = False)
# # Creating the custom embedings for our specific domain
# begin = time.perf_counter()
# # adding more vocabulary into the previous one
# model.build_vocab(data, update=True)
# model.train(data, total_examples=len(data), epochs=model.epochs)
# end = time.perf_counter()
# print("\nTime elapsed: " + str((end-begin)/60) + " min")
# saveWordvec(model)
| masdeval/NLP | FinalProject/Word2Vec_Twitter.py | Word2Vec_Twitter.py | py | 3,101 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gensim.utils",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "gensim.models.KeyedVectors.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gens... |
74533048992 | import numpy as np
from skimage import measure
from sklearn.metrics import auc
def run(score_imgs, labeled_imgs, fpr_thresh=0.3, max_steps=2000, class_name=None):
labeled_imgs = np.array(labeled_imgs)
labeled_imgs[labeled_imgs <= 0.45] = 0
labeled_imgs[labeled_imgs > 0.45] = 1
labeled_imgs = labeled_imgs.astype(np.bool)
score_imgs = np.array(score_imgs)
max_th = score_imgs.max()
min_th = score_imgs.min()
delta = (max_th - min_th) / max_steps
ious_mean = []
ious_std = []
pros_mean = []
pros_std = []
threds = []
fprs = []
binary_score_maps = np.zeros_like(score_imgs, dtype=np.bool)
for step in range(max_steps):
thred = max_th - step * delta
# segmentation
binary_score_maps[score_imgs <= thred] = 0
binary_score_maps[score_imgs > thred] = 1
pro = [] # per region overlap
iou = [] # per image iou
# pro: find each connected gt region, compute the overlapped pixels between the gt region and predicted region
# iou: for each image, compute the ratio, i.e. intersection/union between the gt and predicted binary map
for i in range(len(binary_score_maps)): # for i th image
# pro (per region level)
label_map = measure.label(labeled_imgs[i], connectivity=2)
props = measure.regionprops(label_map)
for prop in props:
x_min, y_min, x_max, y_max = prop.bbox
cropped_pred_label = binary_score_maps[i][x_min:x_max, y_min:y_max]
# cropped_mask = masks[i][x_min:x_max, y_min:y_max]
cropped_mask = prop.filled_image # corrected!
intersection = np.logical_and(cropped_pred_label, cropped_mask).astype(np.float32).sum()
pro.append(intersection / prop.area)
# iou (per image level)
intersection = np.logical_and(binary_score_maps[i], labeled_imgs[i]).astype(np.float32).sum()
union = np.logical_or(binary_score_maps[i], labeled_imgs[i]).astype(np.float32).sum()
if labeled_imgs[i].any() > 0: # when the gt have no anomaly pixels, skip it
iou.append(intersection / union)
# against steps and average metrics on the testing data
ious_mean.append(np.array(iou).mean())
# print("per image mean iou:", np.array(iou).mean())
ious_std.append(np.array(iou).std())
pros_mean.append(np.array(pro).mean())
pros_std.append(np.array(pro).std())
# fpr for pro-auc
masks_neg = ~labeled_imgs
fpr = np.logical_and(masks_neg, binary_score_maps).sum() / masks_neg.sum()
fprs.append(fpr)
threds.append(thred)
# as array
threds = np.array(threds)
pros_mean = np.array(pros_mean)
pros_std = np.array(pros_std)
fprs = np.array(fprs)
# default 30% fpr vs pro, pro_auc
idx = fprs <= fpr_thresh # find the indexs of fprs that is less than expect_fpr (default 0.3)
fprs_selected = fprs[idx]
fprs_selected = rescale(fprs_selected) # rescale fpr [0,0.3] -> [0, 1]
pros_mean_selected = pros_mean[idx]
pro_auc_score = auc(fprs_selected, pros_mean_selected)
# print("pro auc ({}% FPR):".format(int(expect_fpr * 100)), pro_auc_score)
return pro_auc_score
def rescale(x):
return (x - x.min()) / (x.max() - x.min()) | wogur110/PNI_Anomaly_Detection | refinement/get_aupro.py | get_aupro.py | py | 3,461 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.bool",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_nu... |
43336500122 | import sys
import os
from PyQt5 import QtWidgets, uic, QtCore
import pyqtgraph as pg
import numpy as np
from ..communication import SCPI_mannager, upload
import json
class graph_view(pg.GraphicsLayoutWidget):
def __init__(self, top_window):
super().__init__(show=True)
self.top_window : Top_window = top_window # parent
self.plt1 = self.addPlot()
self.vals = []
y,x = np.histogram([])
self.hist : pg.PlotDataItem= self.plt1.plot(x, y, stepMode="center", fillLevel=0, fillOutline=True, brush=(0,0,255,150))
self.update_hist()
self.line_photon1 = pg.InfiniteLine(movable=True, angle=90, label='1:{value:0.0f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.line_photon2 = pg.InfiniteLine(movable=True, angle=90, label='2:{value:0.0f}',
labelOpts={'position':0.2, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.line_photon3 = pg.InfiniteLine(movable=True, angle=90, label='3:{value:0.0f}',
labelOpts={'position':0.3, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.line_photon4 = pg.InfiniteLine(movable=True, angle=90, label='4:{value:0.0f}',
labelOpts={'position':0.4, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.line_photon5 = pg.InfiniteLine(movable=True, angle=90, label='5:{value:0.0f}',
labelOpts={'position':0.5, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.line_photon6 = pg.InfiniteLine(movable=True, angle=90, label='6:{value:0.0f}',
labelOpts={'position':0.6, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.line_photon7 = pg.InfiniteLine(movable=True, angle=90, label='7:{value:0.0f}',
labelOpts={'position':0.7, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.line_photon8 = pg.InfiniteLine(movable=True, angle=90, label='8:{value:0.0f}',
labelOpts={'position':0.8, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': False})
self.plt1.addItem(self.line_photon1)
self.plt1.addItem(self.line_photon2)
self.plt1.addItem(self.line_photon3)
self.plt1.addItem(self.line_photon4)
self.plt1.addItem(self.line_photon5)
self.plt1.addItem(self.line_photon6)
self.plt1.addItem(self.line_photon7)
self.plt1.addItem(self.line_photon8)
def temp_func():
self.on_line_dragged()
self.top_window.display_value()
self.line_photon1.sigPositionChangeFinished.connect(temp_func)
self.line_photon2.sigPositionChangeFinished.connect(temp_func)
self.line_photon3.sigPositionChangeFinished.connect(temp_func)
self.line_photon4.sigPositionChangeFinished.connect(temp_func)
self.line_photon5.sigPositionChangeFinished.connect(temp_func)
self.line_photon6.sigPositionChangeFinished.connect(temp_func)
self.line_photon7.sigPositionChangeFinished.connect(temp_func)
self.line_photon8.sigPositionChangeFinished.connect(temp_func)
def photon_threshold_set(self, value_dict: dict):
assert len(value_dict) == 8
self.line_photon1.setValue(value_dict['photon1'])
self.line_photon2.setValue(value_dict['photon2'])
self.line_photon3.setValue(value_dict['photon3'])
self.line_photon4.setValue(value_dict['photon4'])
self.line_photon5.setValue(value_dict['photon5'])
self.line_photon6.setValue(value_dict['photon6'])
self.line_photon7.setValue(value_dict['photon7'])
self.line_photon8.setValue(value_dict['photon8'])
def update_hist(self):
bins = 1 if len(self.vals) == 0 else np.linspace(min(self.vals) - 0.5, max(self.vals) + 0.5 , max(self.vals) - min(self.vals) + 2)
y,x = np.histogram(self.vals, bins= bins )
self.hist.setData(x,y)
def append_data(self, array: list):
self.vals = self.vals + array
self.update_hist()
def on_line_dragged(self):
value_dict = {
'photon1':round(self.line_photon1.value()),
'photon2':round(self.line_photon2.value()),
'photon3':round(self.line_photon3.value()),
'photon4':round(self.line_photon4.value()),
'photon5':round(self.line_photon5.value()),
'photon6':round(self.line_photon6.value()),
'photon7':round(self.line_photon7.value()),
'photon8':round(self.line_photon8.value())
}
self.top_window.photon_threshold_set(value_dict)
def reset_graph(self):
self.vals = []
self.update_hist()
class Top_window(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
dir_name = os.path.dirname(__file__)
uic.loadUi(os.path.join(dir_name, 'top.ui'), self)
self.setWindowTitle('Redpitaya photon number resolver controller')
self.setFixedSize(self.size())
self.attach_graph()
self.toggle_config_input(False)
self.scpi_mannager: SCPI_mannager = None
self.pushButton_upload.clicked.connect(self.push_upload)
self.pushButton_connect.clicked.connect(self.push_connect)
self.pushButton_disconnect.clicked.connect(self.push_disconnect)
self.lineEdit_port.setText('5025')
def trigger_level_slider_change(value):
self.spinBox_trigger_level.setValue(value)
self.display_value()
def trigger_level_spin_change(value):
self.horizontalSlider_trigger_level.setValue(value)
self.display_value()
self.horizontalSlider_trigger_level.valueChanged.connect(trigger_level_slider_change)
self.spinBox_trigger_level.valueChanged.connect(trigger_level_spin_change)
self.spinBox_trigger_delay.valueChanged.connect(self.display_value)
self.spinBox_trigger_clearance.valueChanged.connect(self.display_value)
self.spinBox_photon1.valueChanged.connect(self.display_value)
self.spinBox_photon2.valueChanged.connect(self.display_value)
self.spinBox_photon3.valueChanged.connect(self.display_value)
self.spinBox_photon4.valueChanged.connect(self.display_value)
self.spinBox_photon5.valueChanged.connect(self.display_value)
self.spinBox_photon6.valueChanged.connect(self.display_value)
self.spinBox_photon7.valueChanged.connect(self.display_value)
self.spinBox_photon8.valueChanged.connect(self.display_value)
self.pushButton_read.clicked.connect(self.push_read)
self.pushButton_write.clicked.connect(self.push_write)
self.pushButton_graph_reset.clicked.connect(self.push_graph_reset)
self.pushButton_graph_load.clicked.connect(self.push_graph_load)
def ui_components(self):
# just type hint
self.graph_container : QtWidgets.QGridLayout = None
self.pushButton_upload : QtWidgets.QGridLayout = None
self.pushButton_connect : QtWidgets.QPushButton = None
self.pushButton_disconnect : QtWidgets.QPushButton = None
self.pushButton_read : QtWidgets.QPushButton = None
self.pushButton_write : QtWidgets.QPushButton = None
self.pushButton_graph_reset : QtWidgets.QPushButton = None
self.pushButton_graph_load : QtWidgets.QPushButton = None
self.lineEdit_ip : QtWidgets.QLineEdit = None
self.lineEdit_port : QtWidgets.QLineEdit = None
self.spinBox_trigger_level : QtWidgets.QSpinBox = None
self.spinBox_trigger_delay : QtWidgets.QSpinBox = None
self.spinBox_trigger_clearance : QtWidgets.QSpinBox = None
self.spinBox_photon1 : QtWidgets.QSpinBox = None
self.spinBox_photon2 : QtWidgets.QSpinBox = None
self.spinBox_photon3 : QtWidgets.QSpinBox = None
self.spinBox_photon4 : QtWidgets.QSpinBox = None
self.spinBox_photon5 : QtWidgets.QSpinBox = None
self.spinBox_photon6 : QtWidgets.QSpinBox = None
self.spinBox_photon7 : QtWidgets.QSpinBox = None
self.spinBox_photon8 : QtWidgets.QSpinBox = None
self.horizontalSlider_trigger_level : QtWidgets.QSlider = None
self.label_trigger_level_display : QtWidgets.QLabel = None
self.label_trigger_delay_display : QtWidgets.QLabel = None
self.label_trigger_clearance_display : QtWidgets.QLabel = None
self.label_photon1_display : QtWidgets.QLabel = None
self.label_photon2_display : QtWidgets.QLabel = None
self.label_photon3_display : QtWidgets.QLabel = None
self.label_photon4_display : QtWidgets.QLabel = None
self.label_photon5_display : QtWidgets.QLabel = None
self.label_photon6_display : QtWidgets.QLabel = None
self.label_photon7_display : QtWidgets.QLabel = None
self.label_photon8_display : QtWidgets.QLabel = None
self.label_graph_samples : QtWidgets.QLabel = None
self.graph_container : QtWidgets.QGridLayout = None
self.groupBox_controll : QtWidgets.QGroupBox = None
self.groupBox_trigger_level : QtWidgets.QGroupBox = None
self.groupBox_timing_controll : QtWidgets.QGroupBox = None
self.groupBox_misc_config : QtWidgets.QGroupBox = None
self.groupBox_threshold : QtWidgets.QGroupBox = None
self.groupBox_graph_control : QtWidgets.QGroupBox = None
self.checkBox_trig_pos_edge : QtWidgets.QCheckBox = None
self.checkBox_trig_is_a : QtWidgets.QCheckBox = None
self.checkBox_pnr_sig_inverse : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon0 : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon1 : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon2 : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon3 : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon4 : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon5 : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon6 : QtWidgets.QCheckBox = None
self.checkBox_dac_logic_mask_photon7 : QtWidgets.QCheckBox = None
def attach_graph(self):
self.graph = graph_view(self) #pg.GraphicsLayoutWidget(show=True)
self.graph_container.addWidget(self.graph)
def toggle_config_input(self, bool):
self.groupBox_controll.setEnabled(bool)
self.groupBox_trigger_level.setEnabled(bool)
self.groupBox_timing_controll.setEnabled(bool)
self.groupBox_misc_config.setEnabled(bool)
self.groupBox_threshold.setEnabled(bool)
self.groupBox_graph_control.setEnabled(bool)
def push_upload(self):
host = self.lineEdit_ip.text().strip()
ssh_port = 22
user = 'root'
passwd = 'root'
upload_cfg = json.load(open("upload_cfg.json"))
assets = upload_cfg["assets"]
exec_command = upload_cfg["exec_command"]
upload(host, ssh_port, user, passwd, assets, exec_command)
def push_connect(self):
try:
self.scpi_mannager = SCPI_mannager(host=self.lineEdit_ip.text().strip(), port=int(self.lineEdit_port.text()))
result = self.scpi_mannager.idn()
if result == None: raise Exception('fail for IDN')
self.pushButton_connect.setEnabled(False)
self.pushButton_disconnect.setEnabled(True)
self.toggle_config_input(True)
except Exception as e:
print(e)
pass
def push_disconnect(self):
self.pushButton_disconnect.setEnabled(False)
self.pushButton_connect.setEnabled(True)
self.toggle_config_input(False)
self.scpi_mannager: SCPI_mannager = None
def display_value(self):
gen_time = lambda x: f'= {8*x} ns'
gen_volt = lambda x: f'= {x/8.192:.5g} mv'
self.label_trigger_level_display.setText(gen_volt(self.spinBox_trigger_level.value()))
self.label_trigger_delay_display.setText(gen_time(self.spinBox_trigger_delay.value()))
self.label_trigger_clearance_display.setText(gen_time(self.spinBox_trigger_clearance.value()))
self.label_photon1_display.setText(gen_volt(self.spinBox_photon1.value()))
self.label_photon2_display.setText(gen_volt(self.spinBox_photon2.value()))
self.label_photon3_display.setText(gen_volt(self.spinBox_photon3.value()))
self.label_photon4_display.setText(gen_volt(self.spinBox_photon4.value()))
self.label_photon5_display.setText(gen_volt(self.spinBox_photon5.value()))
self.label_photon6_display.setText(gen_volt(self.spinBox_photon6.value()))
self.label_photon7_display.setText(gen_volt(self.spinBox_photon7.value()))
self.label_photon8_display.setText(gen_volt(self.spinBox_photon8.value()))
value_dict = {
'photon1':self.spinBox_photon1.value(),
'photon2':self.spinBox_photon2.value(),
'photon3':self.spinBox_photon3.value(),
'photon4':self.spinBox_photon4.value(),
'photon5':self.spinBox_photon5.value(),
'photon6':self.spinBox_photon6.value(),
'photon7':self.spinBox_photon7.value(),
'photon8':self.spinBox_photon8.value()
}
self.graph.photon_threshold_set(value_dict)
def push_read(self):
self.spinBox_trigger_level.setValue(self.scpi_mannager.read_trigger_level())
self.spinBox_trigger_delay.setValue(self.scpi_mannager.read_trigger_delay())
self.spinBox_trigger_clearance.setValue(self.scpi_mannager.read_trigger_clearance())
self.spinBox_photon1.setValue(self.scpi_mannager.read_photon_threshold(1))
self.spinBox_photon2.setValue(self.scpi_mannager.read_photon_threshold(2))
self.spinBox_photon3.setValue(self.scpi_mannager.read_photon_threshold(3))
self.spinBox_photon4.setValue(self.scpi_mannager.read_photon_threshold(4))
self.spinBox_photon5.setValue(self.scpi_mannager.read_photon_threshold(5))
self.spinBox_photon6.setValue(self.scpi_mannager.read_photon_threshold(6))
self.spinBox_photon7.setValue(self.scpi_mannager.read_photon_threshold(7))
self.spinBox_photon8.setValue(self.scpi_mannager.read_photon_threshold(8))
self.checkBox_pnr_sig_inverse.setChecked(self.scpi_mannager.read_pnr_sig_inverse())
self.checkBox_trig_pos_edge.setChecked(self.scpi_mannager.read_trig_positive_edge())
self.checkBox_trig_is_a.setChecked(self.scpi_mannager.read_trig_is_a())
dac_logic_mask = self.scpi_mannager.read_dac_logic_mask()
self.checkBox_dac_logic_mask_photon0.setChecked(bool(dac_logic_mask & (1<<0)))
self.checkBox_dac_logic_mask_photon1.setChecked(bool(dac_logic_mask & (1<<1)))
self.checkBox_dac_logic_mask_photon2.setChecked(bool(dac_logic_mask & (1<<2)))
self.checkBox_dac_logic_mask_photon3.setChecked(bool(dac_logic_mask & (1<<3)))
self.checkBox_dac_logic_mask_photon4.setChecked(bool(dac_logic_mask & (1<<4)))
self.checkBox_dac_logic_mask_photon5.setChecked(bool(dac_logic_mask & (1<<5)))
self.checkBox_dac_logic_mask_photon6.setChecked(bool(dac_logic_mask & (1<<6)))
self.checkBox_dac_logic_mask_photon7.setChecked(bool(dac_logic_mask & (1<<7)))
def push_write(self):
self.scpi_mannager.set_trigger_level(self.spinBox_trigger_level.value())
self.scpi_mannager.set_trigger_delay(self.spinBox_trigger_delay.value())
self.scpi_mannager.set_trigger_clearance(self.spinBox_trigger_clearance.value())
self.scpi_mannager.set_photon_threshold(1, self.spinBox_photon1.value())
self.scpi_mannager.set_photon_threshold(2, self.spinBox_photon2.value())
self.scpi_mannager.set_photon_threshold(3, self.spinBox_photon3.value())
self.scpi_mannager.set_photon_threshold(4, self.spinBox_photon4.value())
self.scpi_mannager.set_photon_threshold(5, self.spinBox_photon5.value())
self.scpi_mannager.set_photon_threshold(6, self.spinBox_photon6.value())
self.scpi_mannager.set_photon_threshold(7, self.spinBox_photon7.value())
self.scpi_mannager.set_photon_threshold(8, self.spinBox_photon8.value())
self.scpi_mannager.set_pnr_sig_inverse(self.checkBox_pnr_sig_inverse.isChecked())
self.scpi_mannager.set_trig_positive_edge(self.checkBox_trig_pos_edge.isChecked())
self.scpi_mannager.set_trig_is_a(self.checkBox_trig_is_a.isChecked())
dac_logic_mask = 0
dac_logic_mask += self.checkBox_dac_logic_mask_photon0.isChecked() << 0
dac_logic_mask += self.checkBox_dac_logic_mask_photon1.isChecked() << 1
dac_logic_mask += self.checkBox_dac_logic_mask_photon2.isChecked() << 2
dac_logic_mask += self.checkBox_dac_logic_mask_photon3.isChecked() << 3
dac_logic_mask += self.checkBox_dac_logic_mask_photon4.isChecked() << 4
dac_logic_mask += self.checkBox_dac_logic_mask_photon5.isChecked() << 5
dac_logic_mask += self.checkBox_dac_logic_mask_photon6.isChecked() << 6
dac_logic_mask += self.checkBox_dac_logic_mask_photon7.isChecked() << 7
self.scpi_mannager.set_dac_logic_mask(dac_logic_mask)
def push_graph_reset(self):
self.graph.reset_graph()
self.scpi_mannager.reset_adc_fifo()
self.label_graph_samples.setText(f'Samples:{len(self.graph.vals)}')
def push_graph_load(self):
array = self.scpi_mannager.read_pnr_adc_fifo()
self.graph.append_data(array)
self.label_graph_samples.setText(f'Samples:{len(self.graph.vals)}')
def photon_threshold_set(self, value_dict: dict):
assert len(value_dict) == 8
self.spinBox_photon1.setValue(value_dict['photon1'])
self.spinBox_photon2.setValue(value_dict['photon2'])
self.spinBox_photon3.setValue(value_dict['photon3'])
self.spinBox_photon4.setValue(value_dict['photon4'])
self.spinBox_photon5.setValue(value_dict['photon5'])
self.spinBox_photon6.setValue(value_dict['photon6'])
self.spinBox_photon7.setValue(value_dict['photon7'])
self.spinBox_photon8.setValue(value_dict['photon8'])
| ruofan-he/redpitaya_PNR | frontend/top/top.py | top.py | py | 19,736 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pyqtgraph.GraphicsLayoutWidget",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.histogram",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyqtgraph.PlotDataItem",
"line_number": 19,
"usage_type": "attribute"
},
{
"a... |
21877280591 | import os
import sys
sys.path.append("../..")
import time
import math
from z3 import *
import argparse
from multiprocessing import Process
from multiprocessing.spawn import freeze_support
from common.utils import show_output, read_instance, write_output, COLORS
def write_report(instance_name, sol, report_file):
if isinstance(sol, str):
out = instance_name + ' \t ' + '---\n'
else:
out = instance_name + ' \t ' + sol['msg'].split(' ')[-2] + '\n'
if os.path.isfile(report_file):
with open(report_file, 'a') as file:
file.write(out)
else:
with open(report_file, 'w') as file:
file.write('Instance name \t Optimal solution\n')
file.write(out)
def vlsi_optimize(inst_name, width, num_circuits, w, h, rotation=False, show_sol=True, out_path='../out-rot/', out_img_path='../out-imgs-rot/', report_file='report-rot.csv'):
area_min = sum([h[i]*w[i] for i in range(num_circuits)])
max_height = sum(h)
min_height = math.floor(area_min/width)
height = min_height
sol = 'unsat'
start = time.time()
while sol == 'unsat' and height <= max_height:
sol = vlsi_smt(width, height, num_circuits, w, h, start, rotation)
height +=1
out_name = inst_name.replace('ins', 'out')
write_output(out_path + out_name, sol)
write_report(out_name, sol, report_file)
show_output(sol, show_sol, out_img_path + out_name.replace('txt', 'png'))
def vlsi_smt(width, height, num_circuits, w, h, start_time, rotation=False):
circuits_x = [Int(f"x_{i}") for i in range(num_circuits)]
circuits_y = [Int(f"y_{i}") for i in range(num_circuits)]
circuits_w = [Int(f"w_{i}") for i in range(num_circuits)]
circuits_h = [Int(f"h_{i}") for i in range(num_circuits)]
opt = Solver()
if rotation:
opt.add([Or(And(circuits_w[i] == w[i], circuits_h[i] == h[i]), And(circuits_w[i] == h[i], circuits_h[i] == w[i])) for i in range(num_circuits)])
else:
opt.add([And(circuits_w[i] == w[i], circuits_h[i] == h[i]) for i in range(num_circuits)])
non_overlap = [Or(i==j,
circuits_x[i] + circuits_w[i] <= circuits_x[j],
circuits_x[i] >= circuits_x[j] + circuits_w[j],
circuits_y[i] + circuits_h[i] <= circuits_y[j],
circuits_y[i] >= circuits_y[j] + circuits_h[j]
) for i in range(num_circuits) for j in range(num_circuits)]
in_limits = [And(circuits_x[i] + circuits_w[i] <= width,
circuits_y[i] + circuits_h[i] <= height,
circuits_x[i] >= 0,
circuits_y[i] >= 0) for i in range(num_circuits)]
opt.add(non_overlap + in_limits)
# Implied constraint on the sum of the circuits' height at position x_thr
for x_thr in range(width):
opt.add(Sum([If(And(circuits_x[j] <= x_thr, (circuits_x[j] + circuits_w[j]) > x_thr), circuits_h[j], 0) for j in range(num_circuits)]) <= height)
# Implied constraint on the sum of the circuits' width at position y_thr
for y_thr in range(height):
opt.add(Sum([If(And(circuits_y[j] <= y_thr, (circuits_y[j] + circuits_h[j]) > y_thr), circuits_w[j], 0) for j in range(num_circuits)]) <= width)
# Symmetry breaking constraints
lex_x = lex_lesseq(circuits_x, flip(circuits_x, circuits_w, width))
lex_y = lex_lesseq(circuits_y, flip(circuits_y, circuits_h, height))
#same_y = [If(
# And(i != j, Or(circuits_h[i] == circuits_h[j], circuits_w[i] == circuits_w[j]), circuits_y[i] == circuits_y[j]),
# Or(circuits_x[i] < circuits_x[j], circuits_x[j] < circuits_x[i]),
# True) for i in range(num_circuits) for j in range(num_circuits)]
#same_x = [If(
# And(i != j, Or(circuits_w[i] == circuits_w[j], circuits_h[i] == circuits_h[j]), circuits_x[i] == circuits_x[j]),
# Or(circuits_y[i] < circuits_y[j], circuits_y[j] < circuits_y[i]),
# True) for i in range(num_circuits) for j in range(num_circuits)]
opt.add([lex_x, lex_y])
if str(opt.check()) == 'sat':
model = opt.model()
sol = {
'width': width,
'height': height,
'num_circuits': num_circuits,
'circuits_w': [int(model.evaluate(circuits_w[i]).as_string()) for i in range(num_circuits)],
'circuits_h': [int(model.evaluate(circuits_h[i]).as_string()) for i in range(num_circuits)],
'circuits_x': [int(model.evaluate(circuits_x[i]).as_string()) for i in range(num_circuits)],
'circuits_y': [int(model.evaluate(circuits_y[i]).as_string()) for i in range(num_circuits)],
'msg': 'Optimal solution found in {0:.2f} seconds'.format(time.time() - start_time)
}
else:
sol = str(opt.check())
return sol
def flip(arr, lenghts, max):
return [max - arr[i] - lenghts[i] for i in range(len(lenghts))]
def lex_lesseq(list_xy, flipped_xy):
if len(list_xy) == 0:
return True
return Or(list_xy[0] <= flipped_xy[0], And(lex_lesseq(list_xy[1:], flipped_xy[1:]), list_xy[0] == flipped_xy[0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--show-sols', default=False, help='Whether to show each instance solution (optimal if found, otherwise sub-optimal) at the end of the execution or simply store them.')
parser.add_argument('--output-folder', default='../out/', help='Path were the solutions .txt files should be stored.')
parser.add_argument('--output-imgs-folder', default='../out-imgs/', help='Path were to store the graphical representation of each solution.')
parser.add_argument('--rotation', default=False, help='Whether the SMT model to be executed should allow rotation or not.')
parser.add_argument('--report-file', default='report.csv', help='File path were to save a summary of the solved instances and the relative timing information')
args = parser.parse_args()
if not os.path.isdir(args.output_folder):
os.mkdir(args.output_folder)
if not args.show_sols and not os.path.isdir(args.output_imgs_folder):
os.mkdir(args.output_imgs_folder)
freeze_support()
instances_path = '../../common/instances/'
for instance_file in os.listdir(instances_path):
print(COLORS['green'], f'Starting to solve {instance_file}...', COLORS['endc'])
width, num_circuits, circuits_w, circuits_h = read_instance(instances_path + instance_file)
run = Process(target=vlsi_optimize, args=(instance_file, width, num_circuits, circuits_w, circuits_h,
args.rotation, args.show_sols, args.output_folder, args.output_imgs_folder, args.report_file), name='optimize_instance')
run.start()
run.join(timeout=310)
if run.is_alive():
print(COLORS['red'], f'No solution found for {instance_file}.', COLORS['endc'])
run.terminate() | mwritescode/VLSI | SMT/src/iterative_solve_smt.py | iterative_solve_smt.py | py | 6,936 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
15486771413 | import re
import requests
from bs4 import BeautifulSoup
def baidu_search(word: str) -> str:
"""
百度百科检索问题
:param word: 需要查询的问题
:return: 百度百科查询结果
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.62"
}
url = f'https://baike.baidu.com/item/{word}'
response = requests.get(url=url, headers=headers, timeout=10)
html_content = response.text
soup = BeautifulSoup(html_content, 'lxml')
li_list = soup.select('.lemma-summary')
results = [re.sub(r'\[[0-9 \-]+]', '', i.text).strip() for i in li_list]
result = ''.join(results)
if len(result) > 100:
result = result[:100] + "……"
return result
| shangruobing/infoweaver-backend | NFQA/QAS/utils/baidu_search.py | baidu_search.py | py | 829 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 23,
"usage_type": "call"
}
] |
32166503776 | #!/usr/bin/env python3
import asyncio
import sys
from getpass import getpass
from pathlib import Path
from typing import Dict
import httpx
import hug
IGNORED_AUTHOR_LOGINS = {"deepsource-autofix[bot]"}
REPO = "pycqa/isort"
GITHUB_API_CONTRIBUTORS = f"https://api.github.com/repos/{REPO}/contributors"
GITHUB_USER_CONTRIBUTIONS = f"https://github.com/{REPO}/commits?author="
GITHUB_USER_TYPE = "User"
USER_DELIMITER = "-" * 80
PER_PAGE = 100
_ACK_FILE = Path(__file__).parent.parent / "docs" / "contributing" / "4.-acknowledgements.md"
ACKNOWLEDGEMENTS = _ACK_FILE.read_text().lower()
def _user_info(user: Dict[str, str], verbose=False) -> str:
login = "@" + user["login"]
name = user.get("name")
display_name = f"{name} ({login})" if name else login
user_info = f"- {display_name}"
if verbose:
contributions = f" {GITHUB_USER_CONTRIBUTIONS}{user['login']}"
user_info += "\n" + contributions
return user_info
@hug.cli()
async def main():
auth = (input("Github Username: "), getpass())
async with httpx.AsyncClient() as client:
page = 0
results = []
contributors = []
while not page or len(results) == PER_PAGE:
page += 1
response = await client.get(
f"{GITHUB_API_CONTRIBUTORS}?per_page={PER_PAGE}&page={page}", auth=auth
)
results = response.json()
contributors.extend(
(
contributor
for contributor in results
if contributor["type"] == GITHUB_USER_TYPE
and contributor["login"] not in IGNORED_AUTHOR_LOGINS
and f"@{contributor['login'].lower()}" not in ACKNOWLEDGEMENTS
)
)
unacknowledged_users = await asyncio.gather(
*(client.get(contributor["url"], auth=auth) for contributor in contributors)
)
unacknowledged_users = [request.json() for request in unacknowledged_users]
if not unacknowledged_users:
sys.exit()
print("Found unacknowledged authors:")
print()
for user in unacknowledged_users:
print(_user_info(user, verbose=True))
print(USER_DELIMITER)
print()
print("Printing again for easy inclusion in Markdown file:")
print()
for user in unacknowledged_users:
print(_user_info(user))
sys.exit(1)
if __name__ == "__main__":
main.interface.cli()
| PyCQA/isort | scripts/check_acknowledgments.py | check_acknowledgments.py | py | 2,528 | python | en | code | 6,145 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "getpass.getpass",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "httpx.AsyncClient",
"lin... |
36079132985 | import argparse
import os
from collector.github_repo_collector import GithubRepositoryCollector
from const.constants import GITHUB_ACCESS_TOKEN
def parse_args() -> argparse.Namespace:
"""
Parse arguments.
:return: a namespace with the arguments
"""
parser = argparse.ArgumentParser(description='Get repositories from GitHub.')
parser.add_argument('--total', type=int, default=100, help='the total number of repositories to save')
return parser.parse_args()
if __name__ == '__main__':
if GITHUB_ACCESS_TOKEN not in os.environ:
raise Exception('GITHUB_ACCESS_TOKEN not found in environment variables.')
args = parse_args()
g = GithubRepositoryCollector()
g.collect(args.total)
| JulianBenitez99/ECI-MS-Thesis | GoCSVRepo/main.py | main.py | py | 732 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "const.constants.GITHUB_ACCESS_TOKEN",
"line_number": 19,
"usage_type": "name"
},
{
... |
29972846358 | #import the spaCy library
import spacy
# Load the spaCy model for NER(this is the sm version so faster but less accurate)
nlp = spacy.load("en_core_web_sm")
# Define a function to anonymize personal information
def anonymize_text(text):
# Use spaCy NER to process the input text
doc = nlp(text)
# Create an empty list to store the anonymized text
anonymized_text = []
# Iterate over each token in the processed text
for token in doc:
# Check if the token is a ORG entity
if token.ent_type_ == "ORG":
# If so, add "[ANONYMIZED]" to the anonymized text
anonymized_text.append("[ANONYMIZED ORG]")
# Check if the token is a TIME entity
elif token.ent_type_ == "TIME":
# If so, add "[ANONYMIZED]" to the anonymized text
anonymized_text.append("[ANONYMIZED TIME]")
# Check if the token is a Person entity(not working find out why)
elif token.ent_type_ == "PERSON":
# If so, add "[ANONYMIZED]" to the anonymized text
anonymized_text.append("[ANONYMIZED NAME]")
else:
# Add the token's text to the anonymized text if it isn't
anonymized_text.append(token.text)
# Join the list of tokens into a single string
anonymized_text = " ".join(anonymized_text)
# Print the anonymized text
print(anonymized_text)
###############################################################################
# Variable for participation and reuse of code
cont = input("Would you like to Anonymize text? (Y/N) ")
# If N, then stop running code.
while(cont != "N"):
# If Y, then run the code below
if (cont == "Y"):
# Get input text from user
text = input("Enter text: ")
# Anonymize the text
anonymize_text(text)
else:
# Reasks if there is more text to be added
print("Please enter 'Y' or 'N'")
cont = input("More Text? (Y/N) ") | Sai9555/anonymize_data | src/TxtSpaCyV1.0.py | TxtSpaCyV1.0.py | py | 1,954 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "spacy.load",
"line_number": 5,
"usage_type": "call"
}
] |
27528671913 | #!/usr/bin/python3
import argparse
import cv2
import numpy as np
def onTrackbar(threshold):
print("Selected threshold " + str(threshold) + " for limit")
def main():
# parse the argument
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--image', type=str, required=True, help='Full path to image file.')
args = vars(parser.parse_args())
# read the image original
image = cv2.imread(args['image'], cv2.IMREAD_COLOR) # Load an image
cv2.imshow("original", image)
# convert to gray scale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("image_gray", image_gray)
# convert to hsv
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
cv2.imshow("image_hsv", image_hsv)
# dictionary with ranges
ranges_pcss = {"b": {"min": 100, "max": 256},
"g": {"min": 100, "max": 256},
"r": {"min": 100, "max": 256},
}
# create the trackbars
cv2.namedWindow('image_process')
cv2.createTrackbar("min B", "image_process", 0, 255, onTrackbar)
cv2.createTrackbar("max B", "image_process", 0, 255, onTrackbar)
cv2.createTrackbar("min G", "image_process", 0, 255, onTrackbar)
cv2.createTrackbar("max G", "image_process", 0, 255, onTrackbar)
cv2.createTrackbar("min R", "image_process", 0, 255, onTrackbar)
cv2.createTrackbar("max R", "image_process", 0, 255, onTrackbar)
# create the trackbars
cv2.namedWindow('mask')
cv2.createTrackbar("min", "mask", 0, 255, onTrackbar)
cv2.createTrackbar("max", "mask", 0, 255, onTrackbar)
# cycle for editing the images_recog
while True:
# close windows on ESC
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
min_b_pcss = cv2.getTrackbarPos("min B", "image_process")
max_b_pcss = cv2.getTrackbarPos("max B", "image_process")
min_g_pcss = cv2.getTrackbarPos("min G", "image_process")
max_g_pcss = cv2.getTrackbarPos("max G", "image_process")
min_r_pcss = cv2.getTrackbarPos("min R", "image_process")
max_r_pcss = cv2.getTrackbarPos("max R", "image_process")
ranges_pcss["b"]["min"] = min_b_pcss
ranges_pcss["b"]["max"] = max_b_pcss
ranges_pcss["g"]["min"] = min_g_pcss
ranges_pcss["g"]["max"] = max_g_pcss
ranges_pcss["r"]["min"] = min_r_pcss
ranges_pcss["r"]["max"] = max_r_pcss
mins_pcss = np.array([ranges_pcss['b']['min'], ranges_pcss['g']['min'], ranges_pcss['r']['min']])
maxs_pcss = np.array([ranges_pcss['b']['max'], ranges_pcss['g']['max'], ranges_pcss['r']['max']])
image_process = cv2.inRange(image, mins_pcss, maxs_pcss)
cv2.imshow("image_process", image_process)
# gray image --------------------------------------------------------
min_gray = cv2.getTrackbarPos("min", "mask")
max_gray = cv2.getTrackbarPos("max", "mask")
mask = cv2.inRange(image_gray, min_gray, max_gray)
cv2.imshow("mask", mask)
if __name__ == '__main__':
main()
| JorgeFernandes-Git/PSR_AULAS_2021 | openCV/Ex3_MOUSE_TRACKBAR/main4.py | main4.py | py | 3,084 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",... |
37106040488 | from astropy.io import fits
import pandas as pd
import numpy as np
import time
import traceback
import os
flux1350=[]
flux3000=[]
df=pd.read_csv('/media/richard/Backup Plus/candidate_dr16_0.8_final.csv',low_memory=False)
groupid=df['GroupID_1']
specname=df['specname_new']
z=df['Z']
area_1350=df['LINEAREA_1350']
area_3000=df['LINEAREA_3000']
f=open('/media/richard/Backup Plus/error_log.txt',"a")
for i in range(len(specname)):
try:
if os.path.exists('/media/richard/Backup Plus/sdss_16_pair/'+str(specname[i]))==True:
fit=fits.open('/media/richard/Backup Plus/sdss_16_pair/'+str(specname[i]))
data_fit=fit[1].data
for j in range(len(data_fit.field('loglam'))):
lam=(10**(data_fit.field('loglam')[j]))/(z[i]+1)
if lam>1350-25 and lam<1350+25:
flux1350.append(data_fit.field('flux')[j])
elif lam>3000-25 and lam<3000+25:
flux3000.append(data_fit.field('flux')[j])
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_1350']=np.mean(flux1350)
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_3000']=np.mean(flux3000)
print(specname[i])
print(df['LINEAREA_1350'][i])
print(df['LINEAREA_3000'][i])
flux1350.clear()
flux3000.clear()
else:
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_1350']='nan'
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_3000']='nan'
continue
except TypeError:
print(specname[i])
traceback.print_exc()
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_1350']='nan'
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_3000']='nan'
f.write('cannot caculate the area:%s'%specname[i])
pass
except Exception:
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_1350']='nan'
df.loc[df.specname_new==df.specname_new[i],'LINEAREA_3000']='nan'
print('failed')
pass
df.to_csv('/home/richard/data/change-look-AGN/dr16_0.8_final.csv')
| RichardPeng0624/SDSSspectrum-painting | linearea.py | linearea.py | py | 2,118 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "astropy.io.fits.open",
... |
72601518114 | import requests
import json
import time
from string import ascii_letters,digits
import random
from base64 import b64encode
from hashlib import sha256
from os import urandom
def request_verify_code(phone_number):
random.seed(urandom(8))
h=sha256()
code_verifier=''.join([ random.choice(ascii_letters+digits) for _ in range(43)])
h.update(code_verifier.encode())
code_challenge=b64encode(h.digest()).decode().replace("=","").replace("+","-").replace("/","_")
header={
"Content-Type":"application/json",
"User-Agent": "okhttp/3.12.1",
}
data={
"code_challenge": code_challenge,
"code_challenge_method": "S256",
"username": "+886"+phone_number[1:]
}
requests.post("https://auth.ridegoshareapi.com/v1/sms_code",data=json.dumps(data),headers=header)
return code_verifier
def submit_verify_code(phone_number,code_verifier,sms_code):
header={
"User-Agent": "okhttp/3.12.1",
"Authorization":"Basic NTFlZmVkNjktYjQ0MC00N2Q3LThhNTMtMmEzY2ViZTY1YzkyOg=="
}
data={
"grant_type":"sms",
"username":"+886"+phone_number[1:],
"uuid":"54ed48f9-9f9e-4162-8542-2ed8f35ebb50",
"sms_code":sms_code,
"code_verifier":code_verifier
}
res=requests.post("https://auth.ridegoshareapi.com/oauth/token",data=data,headers=header)
data=res.json()
print(data)
access_token=data["access_token"]
refresh_token=data['refresh_token']
return access_token,refresh_token
def get_scooter_id(plate):
res=requests.get("https://rental.ridegoshareapi.com/v2/cities/514f2d1d-9faf-490b-b9b2-fe8ce4dce584/scooters")
for scooter in res.json()['upsert_lst']:
if scooter['plate']==plate:
return scooter["id"]
return None
def reserve(scooter_id,access_token):
data={
"corporate_type": 0,
"scooter_id": scooter_id,
"source": 0
}
header={
"User-Agent": "okhttp/3.12.1",
"Authorization":f"Bearer {access_token}",
"Content-Type":"application/json"
}
res=requests.post("https://rental.ridegoshareapi.com/v2.1/rentals",data=json.dumps(data),headers=header)
data=res.json()
return data["id"]
def cancel_reserve(reserve_id,access_token):
data={
"action": 0,
"payment_method": 0
}
header={
"User-Agent": "okhttp/3.12.1",
"Authorization":f"Bearer {access_token}",
"Content-Type":"application/json"
}
res=requests.patch(f"https://rental.ridegoshareapi.com/v2.1/rentals/{reserve_id}",data=json.dumps(data),headers=header)
#assert res.json()['id']==reserve_id
#assert res.json()['state']==4
def refresh_token(refresh_token):
header={
"User-Agent": "okhttp/3.12.1",
"Authorization":"Basic NTFlZmVkNjktYjQ0MC00N2Q3LThhNTMtMmEzY2ViZTY1YzkyOg=="
}
data={
"grant_type":"refresh_token",
"uuid":"54ed48f9-9f9e-4162-8542-2ed8f35ebb50",
"refresh_token":refresh_token
}
res=requests.post("https://auth.ridegoshareapi.com/oauth/token",data=data,headers=header)
data=res.json()
access_token=data["access_token"]
refresh_token=data['refresh_token']
return access_token,refresh_token | chenliTW/goshare-reserve | src/utils.py | utils.py | py | 3,273 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hashlib.sha256",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_numbe... |
18995343621 | # Factor Analysis
import matplotlib.pyplot as plt
def Factor_EM(Input_data,K_sub=3):
trans=Input_data
(N,D)=trans.shape
#Initialize
u=np.mean(trans,axis=0)
Input_data=FTI
sigma_full=np.cov(FTI.transpose())
[U_matrix,D_matrix,V_matrix]=np.linalg.svd(sigma_full)
eta_start=np.multiply(np.sqrt(D_matrix[0:K_sub]),U_matrix[:,0:K_sub])#
sigma_start=np.diag(np.diag(sigma_full)-np.diag(np.dot(eta_start,eta_start.transpose())))
sigma_current=sigma_start
eta_current=eta_start
Exp_hh=np.zeros((N,K_sub,K_sub))
sum_Exp_hh=np.zeros((K_sub,K_sub))
for t in range(60):
sigma_current_inv=np.linalg.pinv(sigma_current)
a=np.dot(np.dot(eta_current.transpose(),sigma_current_inv),eta_current)
b=np.linalg.pinv(a+np.identity(K_sub))
d=trans-u
Exp_h=np.dot(np.dot(np.dot(b,eta_current.transpose()),sigma_current_inv),d.transpose()).transpose()#N*K
for i in range(N):
Exp_hh[i,:,:]= np.outer(Exp_h[i,:],Exp_h[i,:])+b
sum_Exp_hh=sum_Exp_hh+Exp_hh[i,:,:]
e=np.dot(Exp_h.transpose(),d)
eta_next=np.dot(e.transpose(),np.linalg.pinv(sum_Exp_hh))
sigma_next=np.diag(np.diag(np.dot(d.transpose(),d))-np.diag(np.dot(eta_next,e)))/N#D*D
#checking convergence
delta_eta=np.linalg.norm(eta_next-eta_current)/np.linalg.norm(eta_current)
delta_sigma=np.linalg.norm(sigma_next-sigma_current)/np.linalg.norm(sigma_current)
eta_current=eta_next
sigma_current=sigma_next
eta=eta_current
sigma=sigma_current
return([u,eta,sigma])
def Factor_logP(Input_data_orig,F=True,K_sub=3):
if(F==True):#for F
Input_data=Input_data_orig
[u,eta,sigma]=Factor_EM(FTI,K_sub=K_sub)
else:#for NonF
Input_data=Input_data_orig
[u,eta,sigma]=Factor_EM(NonFTI,K_sub=K_sub)
temp_1=np.dot(eta,eta.transpose())+sigma
temp_2=Input_data-u
log_p=-(1/2)*np.sum(np.log(np.linalg.svd(temp_1)[1]))-(1/2)*np.sum(np.multiply(np.dot(temp_2,np.linalg.pinv(temp_1)),temp_2),axis=1)
return(log_p)
def Label_Factor(Input_data_orig,K_sub=3,threshold=0.5):
delta=Factor_logP(Input_data_orig,F=True,K_sub=K_sub)-Factor_logP(Input_data_orig,F=False,K_sub=K_sub)
rt=np.log(threshold/(1-threshold))
if(isinstance(threshold,np.ndarray)==False):
estimated_label=np.zeros(Input_data_orig.shape[0])
estimated_label[[i for i in range(Input_data_orig.shape[0]) if delta[i]>rt]]=1
return(estimated_label)
#FR
def FR_Factor(Input_data_orig,true_label,K_sub=3,threshold=0.5):
N=Input_data_orig.shape[0]
delta=Factor_logP(Input_data_orig,F=True,K_sub=K_sub)-Factor_logP(Input_data_orig,F=False,K_sub=K_sub)#log_p_F-log_p_nonF
rt=np.log(threshold/(1-threshold))
if(isinstance(threshold,np.ndarray)==False):#threshold is a scalar
#F_or_nonF
estimated_label=np.zeros(N)
estimated_label[[i for i in range(N) if delta[i]>rt]]=1
#False Rate
FR=np.zeros(3)
FR[0]=np.mean(estimated_label[[i for i in range(N) if true_label[i]==0]])
FR[1]=1-np.mean(estimated_label[[i for i in range(N) if true_label[i]==1]])
FR[2]=np.mean(np.abs(estimated_label-true_label))
return(FR)
def Factor_ROC(Input_data_orig,true_label,rt_seq,K_sub=3):
N=Input_data_orig.shape[0]
delta=Factor_logP(Input_data_orig,F=True,K_sub=K_sub)-Factor_logP(Input_data_orig,F=False,K_sub=K_sub)#log_p_F-log_p_nonF
if(isinstance(rt_seq,np.ndarray)):#threshold is a seq
FR=np.zeros((2,len(rt_seq)))
for i in range(len(rt_seq)):
#F_or_nonF
rt=rt_seq[i]
estimated_label=np.zeros(N)
estimated_label[[i for i in range(N) if delta[i]>rt]]=1
#False Rate
FR[0,i]=np.mean(estimated_label[[i for i in range(N) if true_label[i]==0]])
FR[1,i]=1-np.mean(estimated_label[[i for i in range(N) if true_label[i]==1]])
plt.plot(FR[0,:],1-FR[1,:],"r--")
plt.show()
#Evaluating the learned model on the testing images
Test_true_label=np.zeros(200)
Test_true_label[0:100]=1
TTL=np.zeros(2000)
TTL[0:1000]=1
print(FR_Factor(TI,true_label=TTL,threshold=0.5,K_sub=3))
print(FR_Factor(Test_images,true_label=Test_true_label,threshold=0.5,K_sub=3))
Factor_ROC(TI,true_label=TTL,rt_seq=np.arange(-1500,1500,100),K_sub=3)
[F_u,F_eta,F_sigma]=Factor_EM(FTI,3)
[NonF_u,NonF_eta,NonF_sigma]=Factor_EM(NonFTI,3)
#mean
plt.subplot(2, 2, 1)
plt.imshow(F_u.reshape((10,10,3)).astype(int))
plt.title("mean-Face")
plt.subplot(2, 2, 2)
plt.imshow(NonF_u.reshape((10,10,3)).astype(int))
plt.title("mean-NonFace")
#covariance
plt.subplot(2, 2, 3)
cov_diag=np.diag(np.dot(F_eta,F_eta.transpose())+F_sigma)
[min_v,max_v]=[np.min(cov_diag),np.max(cov_diag)]
norm_cov_diag=(cov_diag-min_v)/(max_v-min_v)*255
plt.imshow(norm_cov_diag.reshape((10,10,3)).astype(int))
plt.title("cov-Face")
plt.subplot(2, 2, 4)
cov_diag=np.diag(np.dot(NonF_eta,NonF_eta.transpose())+NonF_sigma)
[min_v,max_v]=[np.min(cov_diag),np.max(cov_diag)]
norm_cov_diag=(cov_diag-min_v)/(max_v-min_v)*255
plt.imshow(norm_cov_diag.reshape((10,10,3)).astype(int))
plt.title("cov-NonFace")
# FLASE POSITIVE RATE, FALSE NEGATIVE RATE AND MISCLASSIFICATION RATE FOR ALL THE MODELS
print(FR_Gaussian(Test_images,true_label=Test_true_label,threshold=0.5))
print(FR_Mix_Gaussian(Test_images,Test_true_label,K=6,threshold=0.5))
print(FR_T(Test_images,true_label=Test_true_label,threshold=0.5,v_start=5))
print(FR_Factor(Test_images,true_label=Test_true_label,threshold=0.5,K_sub=3))
| saikrishnawds/Generative-Face-Image-Classification | model4_Factor_analysis.py | model4_Factor_analysis.py | py | 5,914 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "ma... |
71165257954 | import cv2
import librosa
import numpy as np
import random
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler, scale
def scale_feature(feature, featureSize):
widthTarget, heightTarget = featureSize
height, width = feature.shape
# scale according to factor
newSize = (int(width / 4),41)
#print ('newSize ={}, old size = {}'.format(newSize, feature.shape ))
feature = cv2.resize(feature, newSize)
# Normalization
scaler = StandardScaler()
feature = scaler.fit_transform(feature)
feature = np.pad(feature, ((0, 0), (0, widthTarget - feature.shape[1])), 'constant')
#transpose
feature = np.transpose(feature)
return feature
def sample_preprocess(sample_path):
file = str(sample_path)
y,sr=librosa.load(file)
ZCR = librosa.feature.zero_crossing_rate(y, frame_length=2048, hop_length=512, center=True)
mel_spec = librosa.feature.melspectrogram(y=y, sr=sr, S=None, n_fft=2048,
hop_length=512, win_length=None, window='hann',
center=True, pad_mode='reflect', power=2.0)
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr, S=None, norm=None, n_fft=2048,
hop_length=512, win_length=None, window='hann',
center=True, pad_mode='reflect', tuning=None, n_chroma=12)
MFCC = librosa.feature.mfcc(y=y, sr=sr, S=None, n_mfcc=20, dct_type=2, norm='ortho', lifter=0)
spectral_centroid = librosa.feature.spectral_centroid(y=y, sr=sr, S=None, n_fft=2048,
hop_length=512, freq=None, win_length=None, window='hann',
center=True, pad_mode='reflect')
spectral_bandwidth = librosa.feature.spectral_bandwidth(y=y, sr=sr, S=None, n_fft=2048,
hop_length=512, win_length=None, window='hann',
center=True, pad_mode='reflect', freq=None, centroid=None, norm=True, p=2)
spectral_rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, S=None, n_fft=2048,
hop_length=512, win_length=None, window='hann',
center=True, pad_mode='reflect', freq=None, roll_percent=0.85)
feature = np.concatenate((mel_spec,chroma_stft,MFCC,ZCR,spectral_centroid,spectral_bandwidth,spectral_rolloff),
axis=0)
feature = librosa.power_to_db(feature, ref=np.max)
#length = aug_feature.shape[1]
max_length = 1292
scaled_feature = scale_feature(feature,featureSize = (int(max_length/4), 41)) # 323 = 1292/4, 41 = 164/4 (from original data)
scaled_feature = scaled_feature.reshape(-1, int(max_length/4), 41, 1)
return scaled_feature
| ksraj/CoughVid | helper/preprocessor.py | preprocessor.py | py | 3,017 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.resize",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.trans... |
71276798435 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import base64
import requests
from contextlib import closing
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
def authenticate_gmail_api(credentials):
"""Authenticate email API from credentials declared from an env variable.
Args:
credentials: A string containing the filepath to the API credentials.
token:
Returns:
API service object.
"""
SCOPE = ['https://www.googleapis.com/auth/gmail.readonly']
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
# TODO: Resolve contextlib closures once token and credentials are determined on GCP
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPE)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials, SCOPE)
creds = flow.run_local_server(port=50683)
# Save the credentials for the next run
with closing(open('token.json', 'w')) as token:
token.write(creds.to_json())
service = build('gmail', 'v1', credentials=creds)
return service
def get_email_content(service, query = None):
"""Use the GMail API to query email bodies and decode into string format.
Args:
service: GMail API service object.
query: A string containing GMail search syntax.
Returns:
decoded_list: A list of email bodies.
"""
# Call the Gmail API
emails = service.users().messages().list(userId='me',q=query, maxResults=101,includeSpamTrash=False).execute()
emails = emails.get('messages')
if emails is not None:
id_list = [id['id'] for id in emails]
body_list = []
# Check for container MIME message parts.
body = service.users().messages().get(userId='me',id=id_list[0]).execute().get('payload').get('body').get('data')
if body == None:
for email in id_list:
body = service.users().messages().get(userId='me',id=email).execute().get('payload').get('parts')
text = body[0].get('body').get('data')
html = body[1].get('body').get('data')
body = tuple((text,html))
body_list.append(html)
else:
for email in id_list:
body = service.users().messages().get(userId='me',id=email).execute().get('payload').get('body').get('data')
body_list.append(body)
bytes_list = [bytes(str(x),encoding='utf-8') for x in body_list]
decoded_list = [base64.urlsafe_b64decode(x) for x in bytes_list]
str_list = [str(x) for x in decoded_list]
return str_list
service.close()
def get_rescuetime_daily(KEY):
"""Use the RescueTime API to get daily totals for the past two weeks of time spent on personal digital devices.
Args:
KEY: RescueTime API Key.
Returns:
rescuetime_tuple: List of tuples containing daily total, productive, distracting, and neutral hours.
"""
# Append API key to API URL.
url = f'https://www.rescuetime.com/anapi/daily_summary_feed?key={KEY}'
with closing(requests.Session()) as session:
r = session.get(url)
iter_result = r.json()
days = [day.get('date') for day in iter_result]
prod_hours = [day.get('all_productive_hours') for day in iter_result]
dist_hours = [day.get('all_distracting_hours') for day in iter_result]
neut_hours = [day.get('neutral_hours') for day in iter_result]
rescuetime_tuple = [(day,p,d,n) for (day,p,d,n) in zip(days,prod_hours,dist_hours,neut_hours)]
session.close()
return rescuetime_tuple
| ColtAllen/codex_vitae_app | codex_vitae/etl/api_requests.py | api_requests.py | py | 4,294 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.credentials.Credentials.from_authorized_user_file",
"line_number": 36,
"usage_type": "call"
}... |
73085692514 | import numpy as np
import pygame
class GridEnvironment:
def __init__(self):
self.grid = np.zeros((7,7))
self.starting_position = [0,0]
self.goal_position = [5,4]
self.reset()
self.walls = [[3, 2], [2, 3], [1, 4]]
#actions are up, down, left, right mapped to 0, 1, 2, 3
def reset(self):
self.agent = self.starting_position
self.special_blocks = {
-2: [4, 1],
2: [2, 2],
1: [2, 4],
3: [4, 6]
}
return self.agent
def valid_move(self, state, action):
# Check for walls or outside grid
new_position = list(state)
if action == 0:
new_position[0] -= 1
elif action == 1:
new_position[0] += 1
elif action == 2:
new_position[1] -= 1
elif action == 3:
new_position[1] += 1
if new_position in self.walls or new_position[0] < 0 or new_position[0] > 6 or new_position[1] < 0 or new_position[1] > 6:
return False
return True
def check_value(self, state):
for reward_value, coordinates in self.special_blocks.items():
if coordinates == state:
return reward_value
return 0
def remove_value(self, state):
for reward_value, coordinates in list(self.special_blocks.items()):
if coordinates == state:
self.special_blocks.pop(reward_value)
break
def next_state(self, action):
new_position = list(self.agent)
if action == 0:
new_position[0] -= 1
elif action == 1:
new_position[0] += 1
elif action == 2:
new_position[1] -= 1
elif action == 3:
new_position[1] += 1
if self.valid_move(self.agent, action):
return new_position
else:
return self.agent
def step(self, action):
next_position = self.next_state(action)
reward = -1 # default reward
special_reward = self.check_value(next_position)
if special_reward:
reward += special_reward
self.remove_value(next_position)
self.agent = next_position # updating the agent's position
if self.agent == self.goal_position:
reward += 10
return next_position, reward, True
return next_position, reward, False
def render(self, q_table):
pygame.init()
self.reset()
# Configurations
BLOCK_SIZE = 60 # Increased block size
WINDOW_SIZE = [self.grid.shape[0] * BLOCK_SIZE, self.grid.shape[1] * BLOCK_SIZE]
screen = pygame.display.set_mode(WINDOW_SIZE)
pygame.display.set_caption('Q-table Visualization')
colors = {
'white': (255, 255, 255),
'red': (255, 0, 0),
'green': (0, 255, 0),
'blue': (0, 0, 255),
'yellow': (255, 255, 0),
'gray': (200, 200, 200)
}
actions = ["up", "down", "left", "right"]
font = pygame.font.SysFont(None, 25) # Reduced font size
clock = pygame.time.Clock()
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
screen.fill(colors['white'])
for i in range(self.grid.shape[0]):
for j in range(self.grid.shape[1]):
state = (i, j)
max_action = np.argmax(q_table[state])
# Choose color based on max action value
max_val = q_table[state][max_action]
color = colors['green'] if max_val > 0 else colors['red'] if max_val < 0 else colors['blue']
pygame.draw.rect(screen, color, [j * BLOCK_SIZE, i * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE])
# Display action text
action_text = font.render(actions[max_action], True, colors['white'])
screen.blit(action_text, (j * BLOCK_SIZE + 10, i * BLOCK_SIZE + 10))
# Check and draw special blocks and their values
for reward_value, coordinates in self.special_blocks.items():
if coordinates == [i, j]:
pygame.draw.circle(screen, colors['yellow'],
(j * BLOCK_SIZE + BLOCK_SIZE // 2, i * BLOCK_SIZE + BLOCK_SIZE // 2),
BLOCK_SIZE // 3)
reward_text = font.render(str(reward_value), True, colors['gray'])
screen.blit(reward_text,
(j * BLOCK_SIZE + BLOCK_SIZE // 2 - 10, i * BLOCK_SIZE + BLOCK_SIZE // 2 - 10))
# Draw walls
if [i, j] in self.walls:
pygame.draw.rect(screen, colors['gray'],
[j * BLOCK_SIZE, i * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE])
pygame.display.flip()
clock.tick(60)
pygame.quit()
| Jakub202/IDATT2502-ML | excercise8/gridworld/GridEnvironment.py | GridEnvironment.py | py | 5,224 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"... |
33565357577 | import networkx as nx
import numpy as np
from scipy import random
import pandas as pd
import copy
import random
from collections import OrderedDict, Counter
from multiprocessing import Pool
import itertools
import matplotlib.pyplot as plt
#%matplotlib inline
def generate_my_simplicial_complex_d2(N,p1,p2):
"""Our model"""
#I first generate a standard ER graph with edges connected with probability p1
G = nx.fast_gnp_random_graph(N, p1, seed=None)
if not nx.is_connected(G):
giant = list(nx.connected_components(G))[0]
G = nx.subgraph(G, giant)
print('not connected, but GC has order %i ans size %i'%(len(giant), G.size()))
triangles_list = []
G_copy = G.copy()
#Now I run over all the possible combinations of three elements:
for tri in combinations(list(G.nodes()),3):
#And I create the triangle with probability p2
if random.random() <= p2:
#I close the triangle.
triangles_list.append(tri)
#Now I also need to add the new links to the graph created by the triangle
G_copy.add_edge(tri[0], tri[1])
G_copy.add_edge(tri[1], tri[2])
G_copy.add_edge(tri[0], tri[2])
G = G_copy
#Creating a dictionary of neighbors
node_neighbors_dict = {}
for n in list(G.nodes()):
node_neighbors_dict[n] = G[n].keys()
#print len(triangles_list), 'triangles created. Size now is', G.size()
#avg_n_triangles = 3.*len(triangles_list)/G.order()
#return node_neighbors_dict, node_triangles_dict, avg_n_triangles
#return node_neighbors_dict, triangles_list, avg_n_triangles
return node_neighbors_dict, triangles_list
def get_p1_and_p2_correction(k1,k2,N):
p2 = (2.*k2)/((N-1.)*(N-2.))
p1 = (k1 - 2.*k2)/((N-1.)- 2.*k2)
if (p1>=0) and (p2>=0):
return p1, p2
else:
raise ValueError('Negative probability!')
def one_realization(ii):
print('ITERATION %i'%ii)
#Generating a simplicial complex with the given parameters
node_neighbors_dict, triangles_list = generate_my_simplicial_complex_d2(N,p1,p2)
return node_neighbors_dict, triangles_list
def parse_results(results):
degree_list = []
tri_degree_list = []
for res in results:
node_neighbors_dict, triangles_list = res
#Adding the degree sequence of this realization to the global list
degree_list += [len(neighs) for neighs in node_neighbors_dict.itervalues()]
#creating a triangles degree sequence and adding it to the global list
vertex_in_a_triangle = []
for tri in triangles_list:
vertex_in_a_triangle += tri
tri_degree_list += Counter(vertex_in_a_triangle).values()
degree_counter = Counter(degree_list)
triangle_degree_counter = Counter(tri_degree_list)
return degree_counter, triangle_degree_counter
#Simplicial Complex
N = 2000
k1 = 20
k2 = 6
p1, p2 = get_p1_and_p2_correction(k1,k2,N)
n_simulations = 80
n_processes = 8
#################################################
iteration_numbers = range(n_simulations)
pool = Pool(n_processes)
results = pool.map(one_realization, iteration_numbers)
degree_counter, triangle_degree_counter = parse_results(results)
fig = plt.figure(figsize=(4.5,3));
plt.rcParams['xtick.major.width'] = 1.2
plt.rcParams['ytick.major.width'] = 1.2
plt.rcParams['axes.linewidth'] = 1.2
ax = plt.subplot(111)
norm_k1 = 1.*np.array(degree_counter.values())/sum(degree_counter.values())
norm_k2 = 1.*np.array(triangle_degree_counter.values())/sum(triangle_degree_counter.values())
#################### plot pdf
ax.plot(degree_counter.keys(), norm_k1, 's-', label='$k_1$',
clip_on=True, mfc='white');
ax.plot(triangle_degree_counter.keys(), norm_k2, 'o-', label='$k_2$',
clip_on=True, mfc='white');
#################### real mean
sum_of_numbers = sum(number*count for number, count in degree_counter.iteritems())
count = sum(count for n, count in degree_counter.iteritems())
mean_k1 = float(sum_of_numbers) / count
sum_of_numbers = sum(number*count for number, count in triangle_degree_counter.iteritems())
count = sum(count for n, count in triangle_degree_counter.iteritems())
mean_k2 = float(sum_of_numbers) / count
ax.vlines(mean_k1,0,0.2, linewidth=1.5, linestyles='--',
color=u'#1f77b4', label=r'$\langle k_1 \rangle$');
ax.vlines(mean_k2,0,0.2, linewidth=1.5, linestyles='--',
color=u'#ff7f0e', label=r'$\langle k_2 \rangle$');
#################### expected mean
ax.vlines(k2,0,0.2, linewidth=1.5, linestyles='-', color='lightgray', zorder=1, label=r'$\approx$');
ax.vlines(k1,0,0.2, linewidth=1.5, linestyles='-', color='lightgray', zorder=1);
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel('Generalized degree, $k_\omega$', size=18)
ax.set_ylabel('$P(k_\omega)$', size=18)
ax.set_ylim(0,0.2);
ax.set_xlim(0,40);
plt.legend(fontsize=14, loc='upper right', handlelength=1, frameon=False, borderpad=0)
plt.tight_layout() | kittan13/school_lab | simplagion-master/Generalized degree distribution of the Random Simplicial Complex model.py | Generalized degree distribution of the Random Simplicial Complex model.py | py | 5,339 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "networkx.fast_gnp_random_graph",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "networkx.is_connected",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "networkx.connected_components",
"line_number": 23,
"usage_type": "call"
},
{
... |
20918803999 | # -*- coding: utf-8 -*-
import os
import sys
import time
import pickle
import requests
import pandas as pd
from tqdm import tqdm
from urllib import response
from bs4 import BeautifulSoup
class Scraper:
def __init__(self):
self.base_url = "https://scholar.google.com/"
dir_persistent = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "data", "interim", "H5Index")
if not os.path.exists(dir_persistent):
os.makedirs(dir_persistent)
self.path_persistent = os.path.join(
dir_persistent, "h5_index_rankings.pkl")
def scrape(self):
if not self._load_rankings():
print("H5-Index rankings not scraped yet. Scraping now...\n")
categories = self._scrape_categories()
subcategories = self._scrape_subcategories(categories)
self.rankings = self._scrape_rankings(subcategories)
print("Finished.\n")
self._save_rankings()
else:
print("H5-Index rankings already persistent.\n")
# Show some statistics
print("Statistics")
print("\tNumber categories: {}.".format(len(
self.rankings.category.unique())))
print("\tNumber subcategories: {}.".format(len(
self.rankings.subcategory.unique())))
print("\tNumber rankings: {}.\n".format(len(
self.rankings.h5_index)))
df = self.rankings.groupby("category")["subcategory"].agg(
set).reset_index()
df["len"] = df.subcategory.apply(lambda x: len(x))
min_len = df["len"].min()
max_len = df["len"].max()
print("\tMean number of subcategories: {}.".format(df["len"].mean()))
print("\tMin number of subcategories is {} for category {}.".format(
min_len, list(df[df["len"] == min_len].category)[0]))
print("\tMax number of subcategories is {} for category {}.\n".format(
max_len, list(df[df["len"] == max_len].category)[0]))
def _scrape_categories(self):
print("Scraping categories...")
url = self.base_url + "/citations?view_op=top_venues&hl=en"
try:
data = requests.get(url)
except Exception as e:
print(str(e))
if data.status_code != 200:
raise ConnectionError(
"Failed to open url: {} (status code: {}).".format(
url, response.status_code))
soup = BeautifulSoup(data.text, 'lxml')
categories = list()
for item in soup.find_all(
'a', attrs={"class": "gs_md_li",
"role": "menuitem", "tabindex": "-1"}):
category = item.get_text()
link = item.get("href")
if len(category.split()) > 1:
categories.append((category, self.base_url + link))
categories_df = pd.DataFrame(categories, columns=["category", "url"])
print("Scraped {} categories.\n".format(len(categories_df)))
return categories_df
def _scrape_subcategories(self, categories):
print("Scraping subcategories...")
subcategories = list()
count_categories = len(categories)
with tqdm(desc="Scraping subcategories: ",
total=count_categories) as pbar:
for idx in range(count_categories):
url = categories.url.iloc[idx]
try:
data = requests.get(url)
except Exception as e:
print(str(e))
if data.status_code != 200:
raise ConnectionError(
"Failed to open url: {} (status code: {}).".format(
url, response.status_code))
soup = BeautifulSoup(data.text, "lxml")
for item in soup.find_all(
'a', attrs={"class": "gs_md_li", "role": "menuitem",
"tabindex": "-1"}):
subcategory = item.get_text()
link = item.get("href")
if len(subcategory.split()) > 1:
subcategories.append((
categories.category.iloc[idx],
subcategory,
self.base_url + link))
time.sleep(5)
pbar.update(1)
subcategories_df = pd.DataFrame(
subcategories, columns=["category", "subcategory", "url"])
print("Scraped {} subcategories for {} categories.\n".format(
len(subcategories_df), count_categories))
return subcategories_df
def _scrape_rankings(self, subcategories):
print("Scraping rankings...")
category = list()
subcategory = list()
publication = list()
h5_index = list()
h5_median = list()
count_subcategories = len(subcategories)
with tqdm(desc="Scraping rankings: ",
total=count_subcategories) as pbar:
for idx in range(count_subcategories):
url = subcategories.url.iloc[idx]
try:
data = requests.get(url)
except Exception as e:
print(str(e))
if data.status_code != 200:
raise ConnectionError(
"Failed to open url: {} (status code: {}).".format(
url, response.status_code))
soup = BeautifulSoup(data.text, "lxml")
for content in soup.body.find_all(
'table', attrs={'class' : 'gsc_mp_table'}):
for table in content.find_all('tr'):
i = 0
for infos in table.find_all("td"):
if i == 1:
publication.append(infos.get_text())
if i == 2:
h5_index.append(infos.get_text())
if i == 3:
h5_median.append(infos.get_text())
i += 1
category.extend([subcategories.category.iloc[idx]] * 20)
subcategory.extend(
[subcategories.subcategory.iloc[idx]] * 20)
time.sleep(5)
pbar.update(1)
rankings = pd.DataFrame({
"category": category, "subcategory": subcategory,
"publication": publication, "h5_index": h5_index,
"h5_median": h5_median})
print("Finished scraping {} rankings for {} subcategories and {} categories.\n".format(
len(rankings.h5_index), len(rankings.category.unique()),
len(rankings.subcategory.unique())))
return rankings
def _save_rankings(self):
print("Saving rankings to disk...")
with open(self.path_persistent, "wb") as f:
pickle.dump(self.rankings, f)
print("Saved.\n")
def _load_rankings(self):
if os.path.isfile(self.path_persistent):
print("Loading H5-index rankings...")
with open(self.path_persistent, "rb") as f:
self.rankings = pickle.load(f)
print("Loaded.\n")
return True
return False
def main():
print("Starting...\n")
from H5IndexScraper import Scraper
scraper = Scraper()
scraper.scrape()
print("Finished.")
if __name__ == "__main__":
main()
| andreeaiana/graph_confrec | src/data/H5IndexScraper.py | H5IndexScraper.py | py | 7,691 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
28776224145 | import sys
from collections import deque
input = sys.stdin.readline
def bfs(i, j):
visited[i][j] = 1
union = [(i, j)]
_sum = graph[i][j]
q.append((i, j))
while q:
x, y = q.popleft()
for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
nx = x + dx
ny = y + dy
if 0 <= nx < n and 0 <= ny < n and l <= abs(graph[x][y] - graph[nx][ny]) <= r and not visited[nx][ny]:
visited[nx][ny] = 1
q.append((nx, ny))
union.append((nx, ny))
_sum += graph[nx][ny]
if len(union) > 1:
avg = _sum // len(union)
for x, y in union:
graph[x][y] = avg
_next.append((x, y))
return True
return False
n, l, r = map(int, input().split())
graph = [list(map(int, input().split())) for _ in range(n)]
visited = []
_next = deque()
q = deque()
cnt = 0
for i in range(n):
for j in range(n):
_next.append((i, j))
while True:
is_moved = False
visited = [[0] * n for _ in range(n)]
for _ in range(len(_next)):
i, j = _next.popleft()
if not visited[i][j]:
if bfs(i, j): is_moved = True
if not is_moved:
print(cnt)
break
cnt += 1
| jiyoon127/algorithm_study | Implementation/인구_이동.py | 인구_이동.py | py | 1,264 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 34,
"usage_type": "call"
}
] |
73793723235 | # Create a Scraper that extracts information about job descriptions
# 1. Open up website
# 2. Parse the HTML and gather content objects from the indeed page
# - list of Job Titles
# - list of Job Descriptions
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import json
import time
with open("aggregate_job_info.json", "r") as f:
aggregate_job_info = json.load(f)
try:
driver_path = r"chromedriver.exe" # Replace with your Chrome driver path
service = Service(driver_path)
driver = webdriver.Chrome(service=service)
driver.get(r"https://www.indeed.com/jobs?q=computer+science+internship&start=90&pp=gQAPAAAAAAAAAAAAAAACFjq7MgAqAQAIDFM_StAtKUnkYv0UY6MkuMvhXH8rBs1KaUeq58G4VwL8sAAcULK8AAA&vjk=6cb6793d38e2cf21")
# aggregate_job_info = []
running = True
while running:
time.sleep(1)
link_elements = driver.find_elements(By.CLASS_NAME, "jcs-JobTitle.css-jspxzf.eu4oa1w0")
for link_element in link_elements:
job_info = {}
req_list = []
link_element.click()
time.sleep(0.5)
job_title_element = driver.find_element(By.CLASS_NAME, "jobsearch-JobInfoHeader-title-container")
# job_title_element = driver.find_element(By.CLASS_NAME, "icl-u-xs-mb--xs.icl-u-xs-mt--none.jobsearch-JobInfoHeader-title.is-embedded")
job_info["title"] = job_title_element.text
job_description_element = driver.find_element(By.ID, "jobDescriptionText")
unordered_lists = job_description_element.find_elements(By.TAG_NAME, "ul")
for unordered_list in unordered_lists:
list_items = unordered_list.find_elements(By.TAG_NAME, "li")
for list_item in list_items:
print(list_item.text)
req_list.append(list_item.text)
job_info["reqs"] = req_list
job_info["link"] = driver.current_url
aggregate_job_info.append(job_info)
next_buttons = driver.find_elements(By.CLASS_NAME, "e8ju0x50")
if(len(next_buttons) == 6):
running = False
else:
next_buttons[len(next_buttons)-2].click()
except Exception as e:
print(f"An exception occurred: {e}")
if 'driver' in locals():
driver.quit()
with open("aggregate_job_info.json", "w") as f:
json.dump(aggregate_job_info, f)
driver.quit()
| nitink23/Sachacks2023nava | job_scraper/indeed_scraper.py | indeed_scraper.py | py | 2,542 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 20,
"usage_type": "call"
},
{
"api... |
28849219985 | import pickle
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
from scipy import stats
from datetime import datetime
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import math
import re
def prediction_cycling(weight, duration, sports):
cycling_data = {'weight':[130,130,130,130,130,130,155,155,155,155,155,155,180,180,180,180,180,180,205,205,205,205,205,205],
'intensity/level':['<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph','<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph','<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph','<10 mph','>20 mph','10-11.9 mph','12-13.9 mph','14-15.9 mph','16-19 mph'],
'calories':[236, 944, 354, 472, 590, 708, 281, 1126, 422, 563, 704, 844, 327, 1308, 490, 654, 817, 981, 372, 1489, 558, 745, 931, 1117]}
cycling_df = pd.DataFrame(cycling_data)
cycling_df['intensity'] = [0 if x == '<10 mph' else 1 if x == '10-11.9 mph' else 2 if x == '12-13.9 mph' else 3 if x == '14-15.9 mph' else 4 if x == '16-19 mph' else 5 for x in cycling_df['intensity/level']]
cycling_X = cycling_df[["weight","intensity"]]
cycling_y = cycling_df[["calories"]]
cycling_X_train,cycling_X_test, cycling_y_train,cycling_y_test = train_test_split(cycling_X,cycling_y,test_size=0.2,random_state=42)
model1 = LinearRegression()
model1.fit(cycling_X_train,cycling_y_train)
cycling_y_pred = model1.predict([[weight, sports]])/60*duration
return cycling_y_pred
def prediction_running(weight, duration, sports):
running_data = {'weight':[130,130,130,130,130,130,130,130,130,130,130,155,155,155,155,155,155,155,155,155,155,155,180,180,180,180,180,180,180,180,180,180,180,205,205,205,205,205,205,205,205,205,205,205],
'intensity/level': ['5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph','5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph','5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph','5 mph', '5.2 mph', '6 mph', '6.7 mph', '7 mph', '7.5 mph', '8 mph', '8.6 mph', '9 mph', '10 mph', '10.9 mph'],
'calories': [472, 531, 590, 649, 679, 738, 797, 826, 885, 944, 1062, 563, 633, 704, 774, 809, 880,950, 985, 1056, 1126, 1267, 654, 735, 817, 899,940, 1022, 1103, 1144, 1226, 1308, 1471, 745, 838, 931, 1024, 1070, 1163, 1256, 1303, 1396, 1489, 1675]}
running_df = pd.DataFrame(running_data)
running_df['intensity'] = [0 if x == '5 mph' else 1 if x == '5.2 mph' else 2 if x == '6 mph' else 3 if x == '6.7 mph' else 4 if x == '7 mph' else 5 if x == '7.5 mph' else 6 if x == '8 mph' else 7 if x == '8.6 mph' else 8 if x == '9 mph' else 9 if x == '10 mph' else 10 for x in running_df['intensity/level']]
running_X = running_df[["weight","intensity"]]
running_y = running_df[["calories"]]
running_X_train,running_X_test, running_y_train,running_y_test = train_test_split(running_X,running_y,test_size=0.2,random_state=42)
model2 = LinearRegression()
model2.fit(running_X_train,running_y_train)
running_y_pred = model2.predict([[weight, sports]])/60*duration
return running_y_pred
def prediction_walking(weight, duration, sports):
walking_data = {'weight':[130,130,130,130,130,130,130,155,155,155,155,155,155,155,180,180,180,180,180,180,180,205,205,205,205,205,205,205],
'intensity/level':['2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph','2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph', '2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph', '2.0 mph', '2.5 mph', '3.0 mph', '3.5 mph', '4.0 mph', '4.5 mph', '5.0 mph'],
'calories': [148,177,195,224,295,372,472,176,211,232,267,352,443,563,204,245,270,311,409,515,654,233,279,307,354,465,586,745]}
walking_df = pd.DataFrame(walking_data)
walking_df['intensity'] = [0 if x == '2.0 mph' else 1 if x == '2.5 mph' else 2 if x == '3.0 mph' else 3 if x == '3.5 mph' else 4 if x == '4.0 mph' else 5 if x == '4.5 mph' else 6 for x in walking_df['intensity/level']]
walking_X = walking_df[["weight","intensity"]]
walking_y = walking_df[["calories"]]
walking_X_train,walking_X_test, walking_y_train,walking_y_test = train_test_split(walking_X,walking_y,test_size=0.2,random_state=42)
model3 = LinearRegression()
model3.fit(walking_X_train,walking_y_train)
walking_y_pred = model3.predict([[weight, sports]])/60*duration
return walking_y_pred
def prediction_swimming(weight, duration, sports):
global swimming_df
swimming_data = {'weight':[130,130,130,130,130,130,130,130,130,130,155,155,155,155,155,155,155,155,155,155,180,180,180,180,180,180,180,180,180,180,205,205,205,205,205,205,205,205,205,205],
'intensity/level':['freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate','freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate','freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate','freestyle fast','free style slow','backstroke','breaststroke','butterfly','leisurely','sidestroke','synchronized','trending water fast','trending water moderate'],
'calories':[590,413,413,590,649,354,472,472,590,236,704,493,493,704,774,422,563,563,704,281,817,572,572,817,899,490,654,654,817,327,931,651,651,931,1024,558,745,745,931,372]}
swimming_df = pd.DataFrame(swimming_data)
swimming_df['intensity'] = [0 if x == 'trending water moderate' else 1 if x == 'leisurely' else 2 if x == 'free style slow' else 3 if x == 'backstroke' else 4 if x == 'sidestroke' else 5 if x == 'synchronized' else 6 if x == 'freestyle fast' else 7 if x == 'breaststroke' else 8 if x == 'trending water fast' else 9 for x in swimming_df['intensity/level']]
swimming_X = swimming_df[["weight","intensity"]]
swimming_y = swimming_df[["calories"]]
swimming_X_train,swimming_X_test, swimming_y_train,swimming_y_test = train_test_split(swimming_X,swimming_y,test_size=0.2,random_state=42)
model4 = LinearRegression()
model4.fit(swimming_X_train,swimming_y_train)
swimming_y_pred = model4.predict([[weight, sports]])/60*duration
return swimming_y_pred
# st.header('Calories burned calculation')
# st.subheader('Sports Category')
# def app2():
global weight, sports, duration
st.header('Calories burned calculation')
st.subheader('Sports Category')
df = pd.read_csv('/Users/Calvin/Documents/GitHub/yolov5_streamlit/csv files/exercise_dataset_category2.csv')
df.rename(columns={'Activity, Exercise or Sport (1 hour)':'Sports'}, inplace=True)
#Top Sports DataFrame Only
trying = df.loc[df['Category'].str.contains('Cycling|Running|Walking')] #have certain standard
trying2 = df.loc[df['Category'].str.contains('Swimming')] #pose only
trying2 = trying2.sort_values(by='Calories per kg')
#trying is new DataFrame
#category_list = ['None']
category_list = trying['Category'].apply(lambda x: x.lower()).value_counts().sort_index(ascending=True).index.tolist()
category_list.append('swimming')
sports_list = trying['Sports'].apply(lambda x: x.lower()).value_counts().sort_index(ascending=True).index.tolist()
sports_list_swimming = trying2['Sports'].tolist()
options_category = list(range(len(category_list)))
# options_category = options_category.append(3)
#Choice1
category = st.selectbox('Select your exercise category', options_category, format_func=lambda x: category_list[x])
#list in each category
options_cycling = list(range(len(sports_list[0:6]))) #c0
display_cycling = sports_list[0:6]
options_running = list(range(len(sports_list[7:18]))) #c1
display_running = sports_list[7:18]
options_walking = list(range(len(sports_list[22:30]))) #c2
display_walking = sports_list[22:30]
options_swimming = list(range(len(sports_list_swimming[0:11]))) #c3
display_swimming = sports_list_swimming[0:11]
#Choice2 with condition
if category == options_category[0]:
st.subheader('Intensity Selection')
sports = st.selectbox('Select your exercise', options_cycling, format_func=lambda x: display_cycling[x])
elif category == options_category[1]:
st.subheader('Intensity Selection')
sports = st.selectbox('Select your exercise', options_running, format_func=lambda x: display_running[x])
elif category == options_category[2]:
st.subheader('Intensity Selection')
sports = st.selectbox('Select your exercise', options_walking, format_func=lambda x: display_walking[x])
elif category == options_category[3]:
st.subheader('Intensity Selection')
sports = st.selectbox('Select your exercise', options_swimming, format_func=lambda x: display_swimming[x])
#current weight
weight = st.number_input('Weight (kg)', step = 1)*2.2
st.write(weight, ' lbs')
#each exercise duration
duration = st.number_input('Sports Duration in each attempt (minutes)', step = 1)
#daily calories burned:
if st.button('Confirm'):
if category == 0:
calories = pd.to_numeric(prediction_cycling(weight, duration, sports)[-1,0])
st.write('In this attempt, you have reduced: ',pd.to_numeric(prediction_cycling(weight, duration, sports)[-1,0]), 'calories in exercise')
#st.write(calories)
if category == 1:
calories = pd.to_numeric(prediction_running(weight, duration, sports)[-1,0])
st.write('In this attempt, you have reduced: ',prediction_running(weight, duration, sports)[-1,0], 'calories in exercise')
if category == 2:
calories = pd.to_numeric(prediction_walking(weight, duration, sports)[-1,0])
st.write('In this attempt, you have reduced: ',prediction_walking(weight, duration, sports)[-1,0], 'calories in exercise')
if category == 3:
calories = pd.to_numeric(prediction_swimming(weight, duration, sports)[-1,0])
st.write('In this attempt, you have reduced: ',prediction_swimming(weight, duration, sports)[-1,0], 'calories in exercise')
#1 global variable from function in the same file
#2 prediction model
if __name__ == '__app__':
app2()
| sing829/yolov5_streamlit_updated | separate_py/show_ml2.py | show_ml2.py | py | 10,796 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 30,
"usage_type": "call"... |
3982305761 | #!/usr/bin/env python
#
import google.appengine.api.users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.api import images
from google.appengine.ext.webapp import blobstore_handlers
import logging
import jinja2
import webapp2
import json
import os
import re
ipadRegex = re.compile(r"iPad|iPhone")
jinj = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))
def login_required(func):
def do_func(self, *args, **kwargs):
logging.info(self.request.host)
current_user = google.appengine.api.users.get_current_user()
if not current_user:
self.redirect('/signup')
self.user = UserDetails.get_current()
if not self.user:
self.redirect('/setup')
return func(self, *args, **kwargs)
return do_func
class UserDetails(db.Model):
user = db.UserProperty()
@staticmethod
def get_for_user(user):
return UserDetails.all().filter('user =', user).get()
@staticmethod
def get_current():
return UserDetails.all().filter('user =', google.appengine.api.users.get_current_user()).get()
class Picture(db.Model):
owner = db.ReferenceProperty(UserDetails)
title = db.StringProperty()
caption = db.StringProperty()
full_size_image = db.LinkProperty()
class Schedule(db.Model):
owner = db.ReferenceProperty(UserDetails)
last_modified = db.DateTimeProperty(auto_now=True)
@staticmethod
def get_for_user(user):
schedule = Schedule.all().filter('owner =', user).get()
if not schedule:
schedule = Schedule(owner=user)
schedule.put()
logging.info(",".join([str(pic) for pic in schedule.pictures]))
return schedule
class PicturePlacement(db.Model):
picture = db.ReferenceProperty(Picture)
schedule = db.ReferenceProperty(Schedule, collection_name='pictures')
position = db.ListProperty(int)
@staticmethod
def get_or_create(key, schedule):
logging.info("Looking for placement %s" % (key))
placement = PicturePlacement.get(key)
return placement
class PictureAdminHandler(blobstore_handlers.BlobstoreUploadHandler):
@login_required
def get(self):
upload_url = blobstore.create_upload_url('/picture')
template = jinj.get_template('pictureadmin.html')
self.response.out.write(template.render(
{
'uploadurl': upload_url
}
))
@login_required
def post(self):
uploaded_files = self.get_uploads()
blob_info = uploaded_files[0]
pic = Picture(title="title", caption="caption")
pic.owner = self.user
pic.full_size_image = images.get_serving_url(blob_key=blob_info)
pic.put()
self.redirect('/')
class ScheduleApiHandler(webapp2.RequestHandler):
@login_required
def get(self, schedulekey):
schedule = Schedule.get(schedulekey)
doc = {}
doc['id'] = str(schedule.key())
doc['lastmodified'] = schedule.last_modified.isoformat()
doc['pictures'] = [
{'id': str(p.key()),
'picture': str(p.picture.key()),
'url': p.picture.full_size_image,
'left': p.position[0],
'top': p.position[1]}
for p in schedule.pictures]
self.response.out.write(json.dumps(doc))
@login_required
def post(self, schedulekey):
logging.info(self.request.params)
left, top = self.request.get('left'), self.request.get('top')
key = self.request.get('key')
schedule = Schedule.get(schedulekey)
if self.request.get('type') == "placement":
placement = PicturePlacement.get_or_create(key, schedule)
else:
placement = PicturePlacement(schedule=schedule, picture=Picture.get(key))
placement.position = [int(i) for i in [left, top]]
db.put([placement, schedule])
self.response.out.write(json.dumps(str(placement.key())))
class ApiPlacementHandler(webapp2.RequestHandler):
@login_required
def get(self, schedulekey, placementkey):
placement = PicturePlacement.get(placementkey)
doc = {}
doc['id'] = str(placement.key())
doc['picture'] = placement.picture.key()
self.response.out.write(json.dumps(doc))
@login_required
def delete(self, schedulekey, placementkey):
placement = PicturePlacement.get(placementkey)
db.delete(placement)
@login_required
def post(self, schedule):
logging.info(self.request.params)
left, top = self.request.get('left'), self.request.get('top')
key = self.request.get('key')
schedule = Schedule.get(schedule)
if self.request.get('type') == "placement":
placement = PicturePlacement.get_or_create(key, schedule)
else:
placement = PicturePlacement(schedule=schedule, picture=Picture.get(key))
placement.position = [int(i) for i in [left, top]]
db.put([placement, schedule])
self.response.out.write(json.dumps(str(placement.key())))
class UserSetupHandler(webapp2.RequestHandler):
def get(self):
current_user = google.appengine.api.users.get_current_user()
if not current_user:
self.redirect('/signup')
user = UserDetails(user=current_user)
user.put()
logging.info("Setting up default library")
pictures = [
Picture(owner=user, title="Kitten jumping", caption="Kitten jumping", full_size_image='http://' + self.request.host + '/static/img/kitten1.jpg'),
Picture(owner=user, title="Kitten running", caption="Kitten running", full_size_image='http://' + self.request.host + '/static/img/kitten2.jpg'),
Picture(owner=user, title="Ginger kitten", caption="Ginger kitten", full_size_image='http://' + self.request.host + '/static/img/kitten3.jpg')
]
db.put(pictures)
self.redirect('/')
class MainHandler(webapp2.RequestHandler):
@login_required
def get(self):
if ipadRegex.search(self.request.environ["HTTP_USER_AGENT"]):
self.redirect("/display")
schedule = Schedule.get_for_user(self.user)
template = jinj.get_template('index.html')
self.response.out.write(template.render(
{
'user': self.user,
'schedule': schedule,
'library': Picture.all().filter('owner =', self.user),
'logout_url': google.appengine.api.users.create_logout_url("/")
}))
class DisplayHandler(webapp2.RequestHandler):
@login_required
def get(self):
schedule = Schedule.get_for_user(self.user)
template = jinj.get_template('schedule.html')
self.response.out.write(template.render(
{
'user': self.user,
'schedule': schedule,
}))
class LogHandler(webapp2.RequestHandler):
def post(self):
self.get()
def get(self):
logging.info(self.request.get('msg'))
class SignupHandler(webapp2.RequestHandler):
def get(self):
self.redirect(google.appengine.api.users.create_login_url())
from webapp2 import Route
app = webapp2.WSGIApplication([
('/', MainHandler),
('/display', DisplayHandler),
('/log', LogHandler),
('/signup', SignupHandler),
Route('/schedule/<schedulekey>', ScheduleApiHandler),
Route('/schedule/<schedulekey>/<placementkey>', ApiPlacementHandler),
('/picture', PictureAdminHandler),
('/setup', UserSetupHandler)
], debug=True)
| bruntonspall/visual-schedule | main.py | main.py | py | 7,684 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
... |
21217490923 | import json
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.options
import os.path
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class Message: #消息
sender=""
text=""
roomID=""
time=""
def __init__(sender,text,roomID,time):
self.sender=sender
self.text=text
self.roomID=roomID
self.time=time
###############################################################################
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("username")
class LoginHandler(BaseHandler):
def get(self):
self.render('login.html')
def post(self):
self.set_secure_cookie("username", self.get_argument("username"))
self.redirect("/")
class WelcomeHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
roomCnt = self.application.rooms.getRooms()
self.render('index.html', user=self.current_user,roomCnt=roomCnt)
# self.render('index.html', user="self.current_user",roomCnt=roomCnt)
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("username")
self.redirect("/")
##############################################################################
class Rooms(object):
cnt = 0
callbacks = []
def register(self,callback):
self.callbacks.append(callback)
def unregister(self,callback):
self.callbacks.remove(callback)
def addRoom(self):
self.cnt+=1
self.notifyCallbacks()
def notifyCallbacks(self):
for callback in self.callbacks:
callback(self.getRooms())
def getRooms(self):
return self.cnt
class RoomsHandler(tornado.web.RequestHandler):
def post(self):
action = self.get_argument('action')
if action == 'add':
self.application.rooms.addRoom()
else:
self.set_status(400)
class StatusHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.application.rooms.register(self.callback)
def on_close(self):
self.application.rooms.unregister(self.callback)
def on_message(self,message):
pass
def callback(self,count):
self.write_message('{"cnt":"%d"}' % count)
#############################################################################
class chatRoom(object):
callbacks = {}
def register(self,callback):
roomID = str(callback.get_argument("roomID"))
if roomID in self.callbacks:
self.callbacks[roomID].append(callback)
else:
self.callbacks[roomID]=[callback]
def unregister(self,callback):
roomID = str(callback.get_argument("roomID"))
self.callbacks[roomID].remove(callback)
def callbackMessage(self,callback,message):
roomID = str(callback.get_argument("roomID"))
sender = str(callback.get_argument("sender"))
message = {
"mes":message,
"sender":sender
}
self.board(roomID,message)
def board(self,roomID,message):
for callback in self.callbacks[roomID]:
callback.write_message(json.dumps(message))
class JoinHandler(BaseHandler):
def post(self):
roomid=self.get_argument("roomid",0)
self.render('chatroom.html',user=self.current_user,roomID=roomid)
class WriteHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.application.chatRoom.register(self)
def on_close(self):
self.application.chatRoom.unregister(self)
def on_message(self,message):
self.application.chatRoom.callbackMessage(self,message)
class Application(tornado.web.Application):
def __init__(self):
self.rooms = Rooms()
self.chatRoom = chatRoom()
settings = {
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"static_path": 'static',
"cookie_secret": "bZJc2sWbQLKos6GkHn/VB9oXwQt8S0R0kRvJ5/xJ89E=",
# "xsrf_cookies": True,
"login_url": "/login"
}
handlers = [
(r'/', WelcomeHandler),
(r'/login', LoginHandler),
(r'/logout', LogoutHandler),
(r'/rooms',RoomsHandler),
(r'/status',StatusHandler),
(r'/join',JoinHandler),
(r'/write/',WriteHandler),
]
tornado.web.Application.__init__(self,handlers,**settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
application = Application()
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| Drenight/2019ComprehensiveProject | exp2/cookies.py | cookies.py | py | 4,273 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "tornado.options.define",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tornado.httpserver.web",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tornado.httpserver",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": ... |
22589006256 | import pygame, sys
from pygame.locals import *
import random
pygame.init
windowSurface = pygame.display.set_mode((800, 600), 0, 32)
pygame.display.set_caption('game')
brown=(190,128,0)
black=(0,0,0)
green=(0,128,0)
blue=(0,0,255)
white=(255,255,255)
red=(255,0,0)
xtree1=[200,250,100,225,350]
ytree=[200,50,350]
x1=random.randint(0,3200)
x2=x1+50
x3=x1-100
x4=x1+25
x5=x1+150
xtree2=[x1,x2,x3,x4,x5]
x1=random.randint(0,3200)
x2=x1+50
x3=x1-100
x4=x1+25
x5=x1+150
xtree3=[x1,x2,x3,x4,x5]
x1=random.randint(0,3200)
x2=x1+50
x3=x1-100
x4=x1+25
x5=x1+150
xtree4=[x1,x2,x3,x4,x5]
x1=random.randint(0,3200)
x2=x1+50
x3=x1-100
x4=x1+25
x5=x1+150
xtree5=[x1,x2,x3,x4,x5]
orbx=400
pygame.draw.polygon(windowSurface, brown, ((xtree1[0],200), (xtree1[1],200), (xtree1[1],350), (xtree1[0],350)))
pygame.draw.polygon(windowSurface, green, ((xtree1[2],200), (xtree1[3],50), (xtree1[4],200)))
pygame.display.update()
motion=False
shooting=False
orbSize=0
def updateScreen(xtree1,ytree,orbSize,orbx,xtree2,xtree3,xtree4,xtree5,badGuyx,badGuyr):
windowSurface.fill(black)
pygame.draw.polygon(windowSurface, green, ((0,300),(0,600), (800,600), (800,300)))
pygame.draw.polygon(windowSurface, brown, ((xtree1[0],ytree[0]), (xtree1[1],ytree[0]), (xtree1[1],ytree[2]), (xtree1[0],ytree[2])))
pygame.draw.polygon(windowSurface, green, ((xtree1[2],ytree[0]), (xtree1[3],ytree[1]), (xtree1[4],ytree[0])))
pygame.draw.polygon(windowSurface, brown, ((xtree2[0],ytree[0]), (xtree2[1],ytree[0]), (xtree2[1],ytree[2]), (xtree2[0],ytree[2])))
pygame.draw.polygon(windowSurface, green, ((xtree2[2],ytree[0]), (xtree2[3],ytree[1]), (xtree2[4],ytree[0])))
pygame.draw.polygon(windowSurface, brown, ((xtree3[0],ytree[0]), (xtree3[1],ytree[0]), (xtree3[1],ytree[2]), (xtree3[0],ytree[2])))
pygame.draw.polygon(windowSurface, green, ((xtree3[2],ytree[0]), (xtree3[3],ytree[1]), (xtree3[4],ytree[0])))
pygame.draw.polygon(windowSurface, brown, ((xtree4[0],ytree[0]), (xtree4[1],ytree[0]), (xtree4[1],ytree[2]), (xtree4[0],ytree[2])))
pygame.draw.polygon(windowSurface, green, ((xtree4[2],ytree[0]), (xtree4[3],ytree[1]), (xtree4[4],ytree[0])))
pygame.draw.polygon(windowSurface, brown, ((xtree5[0],ytree[0]), (xtree5[1],ytree[0]), (xtree5[1],ytree[2]), (xtree5[0],ytree[2])))
pygame.draw.polygon(windowSurface, green, ((xtree5[2],ytree[0]), (xtree5[3],ytree[1]), (xtree5[4],ytree[0])))
pygame.draw.circle(windowSurface, blue, (orbx,300), orbSize, 0)
pygame.draw.circle(windowSurface, red, (badGuyx,300), badGuyr, 0)
pygame.draw.circle(windowSurface, white, (400,300), 5, 0)
pygame.display.update()
score=0
badShot=False
move=0
badGuyx=random.randint(0,3200)
badGuyr=1
badSpeed=1
while True:
if badShot==True:
badGuyx=random.randint(0,3200)
badGuyr=0
score+=1
if score>15:
badSpeed+=1
score=0
if orbSize==0:
shooting=False
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
if event.type==KEYDOWN:
if event.key == K_RIGHT:
motion=True
while motion==True:
if orbSize==0:
shooting=False
badGuyx-=1
if badGuyx==0:
badGuyx+=3450
xtree1[0]-=1
xtree1[1]-=1
xtree1[2]-=1
xtree1[3]-=1
xtree1[4]-=1
if xtree1[4]==0:
xtree1[0]+=3450
xtree1[1]+=3450
xtree1[2]+=3450
xtree1[3]+=3450
xtree1[4]+=3450
xtree2[0]-=1
xtree2[1]-=1
xtree2[2]-=1
xtree2[3]-=1
xtree2[4]-=1
if xtree2[4]==0:
xtree2[0]+=3450
xtree2[1]+=3450
xtree2[2]+=3450
xtree2[3]+=3450
xtree2[4]+=3450
xtree3[0]-=1
xtree3[1]-=1
xtree3[2]-=1
xtree3[3]-=1
xtree3[4]-=1
if xtree3[4]==0:
xtree3[0]+=3450
xtree3[1]+=3450
xtree3[2]+=3450
xtree3[3]+=3450
xtree3[4]+=3450
xtree4[0]-=1
xtree4[1]-=1
xtree4[2]-=1
xtree4[3]-=1
xtree4[4]-=1
if xtree4[4]==0:
xtree4[0]+=3450
xtree4[1]+=3450
xtree4[2]+=3450
xtree4[3]+=3450
xtree4[4]+=3450
xtree5[0]-=1
xtree5[1]-=1
xtree5[2]-=1
xtree5[3]-=1
xtree5[4]-=1
if xtree5[4]==0:
xtree5[0]+=3450
xtree5[1]+=3450
xtree5[2]+=3450
xtree5[3]+=3450
xtree5[4]+=3450
if shooting == True:
orbSize-=1
orbx-=1
move+=1
if move==40:
badGuyr+=badSpeed
move=0
updateScreen(xtree1,ytree,orbSize,orbx,xtree2,xtree3,xtree4,xtree5,badGuyx,badGuyr)
for event in pygame.event.get():
if event.type==KEYUP:
if event.key==K_RIGHT:
motion=False
if event.key == K_LEFT:
motion=True
while motion==True:
if orbSize==0:
shooting=False
badGuyx+=1
if badGuyx==3200:
badGuyx-=3450
xtree1[0]+=1
xtree1[1]+=1
xtree1[2]+=1
xtree1[3]+=1
xtree1[4]+=1
if xtree1[2]==3200:
xtree1[0]-=3450
xtree1[1]-=3450
xtree1[2]-=3450
xtree1[3]-=3450
xtree1[4]-=3450
xtree2[0]+=1
xtree2[1]+=1
xtree2[2]+=1
xtree2[3]+=1
xtree2[4]+=1
if xtree2[2]==3200:
xtree2[0]-=3450
xtree2[1]-=3450
xtree2[2]-=3450
xtree2[3]-=3450
xtree2[4]-=3450
xtree3[0]+=1
xtree3[1]+=1
xtree3[2]+=1
xtree3[3]+=1
xtree3[4]+=1
if xtree3[2]==3200:
xtree3[0]-=3450
xtree3[1]-=3450
xtree3[2]-=3450
xtree3[3]-=3450
xtree3[4]-=3450
xtree4[0]+=1
xtree4[1]+=1
xtree4[2]+=1
xtree4[3]+=1
xtree4[4]+=1
if xtree4[2]==3200:
xtree4[0]-=3450
xtree4[1]-=3450
xtree4[2]-=3450
xtree4[3]-=3450
xtree4[4]-=3450
xtree5[0]+=1
xtree5[1]+=1
xtree5[2]+=1
xtree5[3]+=1
xtree5[4]+=1
if xtree5[2]==3200:
xtree5[0]-=3450
xtree5[1]-=3450
xtree5[2]-=3450
xtree5[3]-=3450
xtree5[4]-=3450
if shooting == True:
orbSize-=1
orbx+=1
move+=1
if move==40:
badGuyr+=badSpeed
move=0
updateScreen(xtree1,ytree,orbSize,orbx,xtree2,xtree3,xtree4,xtree5,badGuyx,badGuyr)
for event in pygame.event.get():
if event.type==KEYUP:
if event.key==K_LEFT:
motion=False
if event.key == K_SPACE:
shooting=True
orbSize=100
orbx=400
if shooting == True:
orbSize-=1
badShot=False
move+=1
if move==50:
badGuyr+=badSpeed
move=0
if orbSize==badGuyr:
if badGuyx>350 and badGuyx<450:
badShot=True
if move>100:
move=0
updateScreen(xtree1,ytree,orbSize,orbx,xtree2,xtree3,xtree4,xtree5,badGuyx,badGuyr)
| micah-kitzler/first-pygame-game | game.py | game.py | py | 9,217 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.disp... |
28768775449 | from rest_framework import generics, viewsets
from .models import approvals
from .serializer import approvalsSerializers
from sklearn.externals import joblib
import pandas as pd
from .form import ApprovalsForm
from django.shortcuts import render
from keras import backend as K
from django.contrib import messages
# Create your views here.
class approvalsList(generics.ListCreateAPIView):
queryset = approvals.objects.all()
serializer_class = approvalsSerializers
class approvalsDetils(generics.RetrieveUpdateDestroyAPIView):
queryset = approvals.objects.all()
serializer_class = approvalsSerializers
class ApprovalsView(viewsets.ModelViewSet):
queryset = approvals.objects.all()
serializer_class = approvalsSerializers
def ohevalue(df):
ohe_col = joblib.load("/folder/APIs/django/djangoAPI/api/allcol.pkl")
cat_columns = ['Gender', 'Married', 'Education', 'Self_Employed', 'Property_Area']
df_processed = pd.get_dummies(df, columns=cat_columns)
newdict = {}
for i in ohe_col:
if i in df_processed.columns:
newdict[i] = df_processed[i].values
else:
newdict[i] = 0
newdf = pd.DataFrame(newdict)
return newdf
# @api_view(["POST"])
def approvereject(unit):
try:
mdl = joblib.load("/folder/APIs/django/djangoAPI/api/loan_model.pkl")
scalers = joblib.load("folder/APIs/django/djangoAPI/api/scalers.pkl")
X = scalers.transform(unit)
y_pred = mdl.predict(X)
y_pred = (y_pred > 0.58)
newdf = pd.DataFrame(y_pred, columns=['Status'])
newdf = newdf.replace({True: 'Approved', False: 'Rejected'})
K.clear_session()
return newdf.values[0][0], X[0]
except ValueError as e:
return (e.args[0])
def cxcontact(request):
form = ApprovalsForm(request.POST)
if request.method == 'POST':
if form.is_valid():
Firstname = form.cleaned_data['Firstname']
Lastname = form.cleaned_data['Lastname']
Dependants = form.cleaned_data['Dependants']
Applicantincome = form.cleaned_data['Applicantincome']
Coapplicatincome = form.cleaned_data['Coapplicatincome']
Loanamt = form.cleaned_data['Loanamt']
Loanterm = form.cleaned_data['Loanterm']
Credithistory = form.cleaned_data['Credithistory']
Gender = form.cleaned_data['Gender']
Married = form.cleaned_data['Married']
# Graduatededucation = form.cleaned_data['graduatededucation']
# Selfemployed = form.cleaned_data['selfemployed']
Property_Area = form.cleaned_data['Property_Area']
myDict = (request.POST).dict()
df = pd.DataFrame(myDict, index=[0])
print(approvereject(ohevalue(df)))
answer = approvereject(ohevalue(df))[0]
Xscalers = approvereject(ohevalue(df))[1]
print(Xscalers)
messages.success(request, 'Application Status: {}'.format(answer))
else:
form = ApprovalsForm()
return render(request, 'myform/myform.html', {'form': form})
| xolanisiqhelo/djangoAPI | api/views.py | views.py | py | 3,125 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "models.approvals.objects.all",
"line_number": 15,
"usage_type": "c... |
7937095341 | from django.urls import path
from app.views import SignUpView, ProductListView, ProductCreateView, ProductApprove, ProductUpdateView, ApproveView, \
RedirectView
urlpatterns = [
path('', RedirectView.as_view()),
path('signup', SignUpView.as_view(), name='signup'),
path('products', ProductListView.as_view(), name='products'),
path('create_product', ProductCreateView.as_view(), name='create_product'),
path('approve_products', ApproveView.as_view(), name='approve_products'),
path('update_product/<int:pk>', ProductUpdateView.as_view(), name='update_product'),
path('api/approve/<int:pk>', ProductApprove.as_view(), name='approve'),
]
| ToshipSo/PinkBlue | app/urls.py | urls.py | py | 669 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "app.views.RedirectView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "app.views.RedirectView",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "d... |
1967729697 | from copyreg import constructor
import csv
import sys
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
if (len(sys.argv) < 1):
raise Exception('Argument Missing: run with "blockchain explorer" "number of pages (opt)"')
elif (len(sys.argv) > 3):
raise Exception('Too Many Arguments: run with "blockchain explorer" "number of pages (opt)"')
elif (len(sys.argv) > 1):
if (sys.argv[1].isnumeric()):
raise Exception('Argument Error: first argument must be the blockchain explorer')
if (len(sys.argv) > 2):
NUMBER_OF_PAGES = int(sys.argv[2])
else:
NUMBER_OF_PAGES = -1
SCAN = sys.argv[1].strip('https://').strip('http://').strip('/')
f = open(f'./{SCAN}_wellKnownAddresses.csv', 'w')
writer = csv.writer(f)
header = ['Address', 'Name']
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920,1080")
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--allow-running-insecure-content')
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
chrome_options.add_argument(f'user-agent={user_agent}')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
driver.get(f'https://{SCAN}/accounts/1')
if (NUMBER_OF_PAGES == -1):
NUMBER_OF_PAGES = int(driver.find_element(By.XPATH, '//*[@id="ContentPlaceHolder1_pageRecords"]/ul/li[3]/span/strong[2]').text)
for page in range(NUMBER_OF_PAGES):
print(f'Parsing page {page}...')
table = driver.find_element(By.XPATH, '//*[@id="ContentPlaceHolder1_divTable"]/table/tbody')
rows = table.find_elements(By.TAG_NAME, "tr")
for row in rows:
elements = row.find_elements(By.TAG_NAME, 'td')
address = elements[1].text
name = elements[2].text
if (len(name) > 0 and
'Tornado' not in name and
'Exploiter' not in name and
'Hacker' not in name):
writer.writerow([address, name])
driver.get(f'https://{SCAN}/accounts/{page}')
f.close() | 0x14os/well-known-parser | parser.py | parser.py | py | 2,246 | python | en | code | null | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number"... |
28994089757 | # a great deal of code modified and copied from:
# https://github.com/OpenBounds/Processing
import sys
import os
import logging
import subprocess
import tempfile
import glob
import click
from pyproj import Proj, transform
import fiona
from fiona.crs import from_epsg, to_string
from fiona.transform import transform_geom
import rasterio
import boto3
def info(*strings):
if sys.stdout.isatty():
click.echo(' '.join(strings))
else:
logging.info(' '.join(strings))
def error(*strings):
if sys.stdout.isatty():
click.secho(' '.join(strings), fg='red')
else:
logging.error(' '.join(strings))
def success(*strings):
if sys.stdout.isatty():
click.secho(' '.join(strings), fg='green')
else:
logging.info(' '.join(strings))
def sizeof_fmt(num):
"""Human readable file size.
Modified from http://stackoverflow.com/a/1094933/1377021
:param num: float
:returns: string
"""
for unit in ['', 'k', 'm', 'g', 't', 'p', 'e', 'z']:
if abs(num) < 1024.0:
return "%.0f%s%s" % (num, unit, 'b')
num /= 1024.0
return "%.f%s%s" % (num, 'y', 'b')
def make_sure_path_exists(path):
"""
Make directories in path if they do not exist.
Modified from http://stackoverflow.com/a/5032238/1377021
"""
try:
os.makedirs(path)
except:
pass
def scan_for_layers(path, filters):
# https://stackoverflow.com/questions/4568580/python-glob-multiple-filetypes
if type(filters) == str:
filters = [filters]
if type(filters) in [list, tuple]:
files = []
for f in filters:
files.extend(glob.glob(os.path.join(path, f)))
return sorted(files,
key=os.path.basename)
else:
error('scan_for_layers requires a glob string or a list/tuple of strings')
def transform_bbox(bbox, in_crs='EPSG:4326', out_crs='EPSG:3005'):
"""Transform bbox coordinates
"""
in_proj = Proj(in_crs)
out_proj = Proj(out_crs)
a = transform(in_proj, out_proj, bbox[0], bbox[1])
b = transform(in_proj, out_proj, bbox[2], bbox[3])
return (a+b)
def describe(in_file, layer=0):
"""Basically fio and rio info
https://github.com/Toblerity/Fiona/blob/master/fiona/fio/info.py
https://github.com/mapbox/rasterio/blob/master/rasterio/rio/info.py
"""
# try vector first
try:
with fiona.drivers():
with fiona.open(in_file, layer=layer) as src:
inf = src.meta
inf.update(bounds=src.bounds, name=src.name)
try:
inf.update(count=len(src))
except TypeError:
inf.update(count=None)
info("Setting 'count' to None/null - layer does "
"not support counting")
proj4 = fiona.crs.to_string(src.crs)
if proj4.startswith('+init=epsg'):
proj4 = proj4.split('=')[1].upper()
inf['crs'] = proj4
inf['type'] = 'VECTOR'
# if fiona fails, try rasterio
except:
with rasterio.open(in_file) as src:
inf = dict(src.profile)
inf['shape'] = (inf['height'], inf['width'])
inf['bounds'] = src.bounds
proj4 = src.crs.to_string()
if proj4.startswith('+init=epsg'):
proj4 = proj4.split('=')[1].upper()
inf['crs'] = proj4
inf['type'] = 'RASTER'
return inf
def get_bbox(in_file, layer=0):
""" Get wgs84 bbox of in_file
"""
meta = describe(in_file, layer)
bbox = meta['bounds']
if meta['crs'] != from_epsg(4326):
bbox = transform_bbox(bbox, meta['crs'], from_epsg(4236))
return bbox
def get_crs(in_file, layer=0):
"""Return CRS of intput as a Proj.4 mapping
"""
return describe(in_file, layer)['crs']
def bbox_copy(in_file, out_file, bbox, in_layer=0, out_layer=None, dst_crs=None):
"""Dump all features within the provided WGS84 bbox to a new file
"""
with fiona.drivers():
with fiona.open(in_file, layer=in_layer) as source:
output_schema = source.schema.copy()
# transform the provided bbox to the crs of source data
bbox_proj = transform_bbox(bbox, from_epsg(4326),
out_crs=source.meta['crs'])
# use source crs if no reprojection specified
if dst_crs:
out_crs = dst_crs
else:
out_crs = source.crs
with fiona.open(out_file, 'w',
crs=out_crs, driver="ESRI Shapefile",
schema=output_schema) as sink:
for f in source.filter(bbox=bbox_proj):
# transform only if dst_crs specified
if dst_crs:
g = transform_geom(
source.crs, dst_crs, f['geometry'],
antimeridian_cutting=True)
f['geometry'] = g
sink.write(f)
def bbox_copyraster(in_file, out_file, bbox, dst_crs=None):
"""Rather than re-invent rio clip, rio warp just call them directly
"""
with rasterio.open(in_file) as source:
bbox = transform_bbox(bbox, from_epsg(4326), out_crs=source.meta['crs'])
bbox = [str(b) for b in bbox]
if not dst_crs:
clip_file = out_file
else:
clip_file = os.path.join(tempfile.gettempdir(), "rio_temp.tif")
command = ['rio', 'clip', in_file, clip_file,
'--bounds', '"'+" ".join(bbox)+'"']
subprocess.call(" ".join(command), shell=True)
if dst_crs:
# convert crs to string and wrap in quotes
if type(dst_crs) == dict:
dst_crs = to_string(dst_crs)
dst_crs = '"'+dst_crs+'"'
command = ['rio', 'warp', clip_file, out_file,
'--dst-crs', dst_crs,
'--force-overwrite']
subprocess.call(" ".join(command), shell=True)
os.unlink(clip_file)
def upload_s3(bucket_name, path):
"""Upload a file to S3
deprecated - use awscli
"""
s3 = boto3.resource('s3')
buckets = [b.name for b in s3.buckets.all()]
if bucket_name not in buckets:
s3.create_bucket(Bucket=bucket_name)
info('Uploading', path)
filesize = os.path.getsize(path)
key = os.path.split(path)[1]
s3.Object(bucket_name, key).put(Body=open(path, 'rb'))
success('Done. Uploaded', sizeof_fmt(filesize))
def download_s3(bucket_name, key, out_path=None):
"""Download a file from S3
deprecated - use awscli
"""
if not out_path:
out_path = os.getcwd()
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
info('Downloading', key)
bucket.download_file(key, os.path.join(out_path, key))
success('Done. Downloaded %s' % key)
| cat-cfs/gcbm_preprocessing | preprocess_tools/gcbm_aws/util.py | util.py | py | 7,193 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdout.isatty",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "click.echo",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line... |
25295807822 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
import numpy as np
class LinkedIn:
def __init__(self):
self.mail = "sohalkhan90570@gmail.com"
self.password = "gamer@123"
self.chrome_web = Service('chromedriver.exe')
self.driver = webdriver.Chrome(ChromeDriverManager().install())
self.data = {'Designation': [],
'Name': [],
'Location': [],
'Level_and_involvement': [],
'job_description': [],
'Total_applicants': [],
'Industry_and_Employee_count': [],
'LinkedIn_Followers': []}
def login(self):
self.driver.maximize_window()
self.driver.get('https://www.linkedin.com/')
mail_box = self.driver.find_element(By.XPATH, '/html/body/main/section[1]/div/div/form/div[2]/div[1]/input')
mail_box.send_keys(self.mail)
password_box = self.driver.find_element(By.XPATH, '/html/body/main/section[1]/div/div/form/div[2]/div[2]/input')
password_box.send_keys(self.password)
login_button = self.driver.find_element(By.XPATH, '/html/body/main/section[1]/div/div/form/button')
time.sleep(5)
login_button.click()
def data_collection(self, link):
self.driver.get(link)
bar = self.driver.find_element(By.XPATH, '/html')
for i in range(1, 21):
time.sleep(2.5)
try:
jobs = self.driver.find_element(By.XPATH, f'/html/body/div[5]/div[3]/div[4]/div/div/main/div/section['
f'1]/div/ul/li[{i}]')
time.sleep(1)
jobs.click()
except:
print(link[link.find('start'):])
return None
time.sleep(1)
try:
job_title = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section[2]/div/div[2]/div['
'1]/div/div[1]/div/div[1]/div[1]/a/h2')
self.data['Designation'].append(job_title.text)
except NoSuchElementException:
self.data['Designation'].append(np.nan)
time.sleep(1)
try:
company_name = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section[2]/div/div[2]/div['
'1]/div/div[1]/div/div[1]/div[1]/div[1]/span['
'1]/span[1]/a')
self.data['Name'].append(company_name.text)
except NoSuchElementException:
self.data['Name'].append(np.nan)
time.sleep(1)
try:
com_location = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section[2]/div/div[2]/div['
'1]/div/div[1]/div/div[1]/div[1]/div[1]/span['
'1]/span[2]')
self.data['Location'].append(com_location.text)
except NoSuchElementException:
self.data['Location'].append(np.nan)
time.sleep(1)
for x in range(4):
bar.send_keys(Keys.ARROW_DOWN)
try:
job_level_and_type = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section[2]/div/div['
'2]/div[1]/div/div[1]/div/div[1]/div[1]/div['
'2]/ul/li[1]/span')
self.data['Level_and_involvement'].append(job_level_and_type.text)
except NoSuchElementException:
self.data['Level_and_involvement'].append(np.nan)
time.sleep(1)
try:
num_of_applicants = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section[2]/div/div['
'2]/div[1]/div/div[1]/div/div[1]/div[1]/div['
'1]/span[2]/span[2]/span')
self.data['Total_applicants'].append(num_of_applicants.text)
except NoSuchElementException:
self.data['Total_applicants'].append(np.nan)
time.sleep(1)
try:
com_industry_and_employee_num = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section['
'2]/div/div[2]/div[1]/div/div['
'1]/div/div[1]/div[1]/div['
'2]/ul/li[2]/span')
self.data['Industry_and_Employee_count'].append(com_industry_and_employee_num.text)
except NoSuchElementException:
self.data['Industry_and_Employee_count'].append(np.nan)
time.sleep(1)
try:
job_description = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section[2]/div/div[2]/div['
'1]/div/div[4]/article')
self.data['job_description'].append(job_description.text)
except NoSuchElementException:
self.data['job_description'].append(np.nan)
time.sleep(1)
sec_bar = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div[4]/div/div/main/div/section['
'2]/div')
self.driver.execute_script('arguments[0].scrollTop = arguments[0].scrollHeight', sec_bar)
time.sleep(1.5)
try:
followers = self.driver.find_element(By.XPATH, '/html/body/div[5]/div[3]/div['
'4]/div/div/main/div/section[2]/div/div[2]/div['
'1]/div/section/section/div[1]/div[1]/div/div[2]/div['
'2]')
self.data['LinkedIn_Followers'].append(followers.text)
except NoSuchElementException:
self.data['LinkedIn_Followers'].append(np.nan)
time.sleep(1)
def create_and_store(self):
df = pd.DataFrame(self.data)
df.to_csv(r"..\PYTHON\EXCEL\SCRAPED_DATA\scrapped data.csv", index=False)
obj = LinkedIn()
obj.login()
flag = [i for i in range(0, 401, 25)]
flag.remove(0)
flag.insert(0, 1)
for i in flag:
obj.data_collection(f'https://www.linkedin.com/jobs/search/?currentJobId=3365364752&f_C=165158%2C1353%2C58396'
f'%2C51692521%2C1283%2C6567943%2C1073%2C18145101%2C12770%2C9215331%2C4300%2C1318%2C3178'
f'%2C86813252%2C6339%2C210064%2C14439560&f_E=1%2C2%2C3%2C4%2C5%2C6&geoId=102713980&location'
f'=India&refresh=true&start={i}')
obj.create_and_store()
| manishhemnani06/LINKEDIN_JOB_ANALYSIS_ | SCRAPPING_CODE/SCRAPPING_MAIN_FILE.py | SCRAPPING_MAIN_FILE.py | py | 8,400 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{... |
13365106466 | import numpy as np
import tensorflow as tf
from PIL import Image
def get_data(img_file, labels_file, image_size):
"""
Given a file path and two target classes, returns an array of
normalized inputs (images) and an array of labels.
Extract only the data that matches the corresponding classes
(there are 101 classes and we only want 5).
Normalizes all inputs and also turns the labels
into one hot vectors using tf.one_hot().
:param img_file: file path for inputs
:param labels_file: file path for labels
:param image_size: size of each input image
"""
print("Loading data")
inputs = np.load(img_file, allow_pickle=True)
labels = np.load(labels_file, allow_pickle=True)
img = Image.fromarray(inputs[0], 'RGB')
#img.show()
# inputs = inputs/255
# test_inputs = np.float32(test_inputs/255)
# One-hot encoding for labels
d_str = np.unique(labels)
# label_dict = dict(enumerate(d_str.flatten(), 0))
label_dict = dict(zip(d_str.flatten(), range(len(d_str))))
# num_classes = len(label_dict)
# Only process 5 classes
labels = np.vectorize(label_dict.get)(labels)
processed_labels = labels[((labels == 0) | (labels == 1)) | (labels == 2) | (labels == 3) | (labels == 4)]
temp_labels = np.where(processed_labels == 1, 1, 0)
temp_labels2 = np.where(processed_labels == 2, 2, 0)
temp_labels3 = np.where(processed_labels == 3, 3, 0)
temp_labels4 = np.where(processed_labels == 4, 4, 0)
processed_labels = temp_labels + temp_labels2 + temp_labels3 + temp_labels4
one_hot = tf.one_hot(processed_labels, depth=5)
processed_inputs = inputs[((labels == 0) | (labels == 1)) | (labels == 2) | (labels == 3) | (labels == 4)]
processed_inputs = np.array(processed_inputs/255)
processed_inputs = tf.reshape(processed_inputs, (-1, 3, image_size, image_size))
processed_inputs = tf.transpose(processed_inputs, perm=[0,2,3,1])
#print(processed_inputs.dtype)
processed_inputs = tf.dtypes.cast(processed_inputs, tf.float32)
# labels = tf.one_hot(np.vectorize(label_dict.get)(labels), num_classes)
# test_labels = tf.one_hot(np.vectorize(label_dict.get)(test_labels), num_classes)
return processed_inputs, one_hot | meera-kurup/skimage | code/preprocess.py | preprocess.py | py | 2,139 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_numbe... |
75090764834 | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'webapp.views.index', name='index'),
url(r'^province/(\w+)/$', 'webapp.views.province_json', name='province_json'),
url(r'^province/(\w+)/(\w+)/$', 'webapp.views.town_json', name='town_json'),
url(r'^province/(\w+)/(\d+)/(\d+)/$', 'webapp.views.show_province_year_month', name='show_province_year_month'),
url(r'^provinces/?$', 'webapp.views.list_provinces', name='list_provinces'),
url(r'^provinces/(\w+)/?$', 'webapp.views.list_towns', name='list_towns'),
url(r'^desahucios/(\w+)/(\d+)/(\d+)/$', 'webapp.views.show_desahucios', name = 'show_desahucios'),
url(r'^desahucios/(\w+)/(\d+)/$', 'webapp.views.show_desahucios_anyo', name = 'show_desahucios_anyo'),
url(r'^unemployment_graph/$', 'webapp.views.show_unemployment_graph', name = 'show_unemployment_graph'),
url(r'^eviction_graph/$', 'webapp.views.show_eviction_graph', name = 'show_eviction_graph'),
# url(r'^website/', include('website.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| RHoK-Bilbao/desahucios | website/website/urls.py | urls.py | py | 1,492 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dja... |
28159584514 | import json
import requests
from datetime import date, datetime
from mysql.connector import (connection)
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from pprint import pprint
import os.path
import pandas as pd
import time
''' Module to make queries easier via python elasticsearch api.
All requestable fields are for now:
"city", "country", "job", "goal","tag"
'''
def get_query_dict(conf, **kwargs):
''' construct full text query '''
# get terms that shouldn't be used by fuzziness
fuzziness_terms_to_exclude = conf['fuzziness_terms_to_exclude']
query = {}
# full text on job
# if specific job is requested
if "job" in kwargs.keys():
# if several jobs are requested
if isinstance(kwargs.get("job"), list):
# prepare a should condition over all the jobs
jobs = kwargs.get("job")
query["bool"] = { "minimum_should_match": 1, "should": []}
for ind_job in jobs:
# individual job requested
if ind_job in fuzziness_terms_to_exclude:
match_query = {"query": ind_job, "minimum_should_match": "2<75%"}
else:
match_query = {"query": ind_job, "prefix_length": 3, "fuzziness": "AUTO", "minimum_should_match": "2<75%"}
query["bool"]["should"].append({"match":{"job":match_query}})
else:
# individual job requested
if kwargs.get("job") in fuzziness_terms_to_exclude:
match_query = {"query": kwargs.get("job"), "minimum_should_match": "2<75%"}
else:
match_query = {"query": kwargs.get("job"), "prefix_length": 3, "fuzziness": "AUTO", "minimum_should_match": "2<75%"}
query["match"] = {"job":match_query}
else:
# else match all
query["match_all"] = {}
return {"query": query}
def get_filter_dict(**kwargs):
''' construct filter query '''
filt = {"filter": {"bool": {}}}
logic_clauses = []
# full text search field should be removed from kwargs
for key, value in kwargs.items():
# if the value is a list in a filter context, then we need to create a should
if isinstance(value, list):
should_dict = {"bool": {"minimum_should_match": 1, "should": []}}
for elt in value:
should_dict["bool"]["should"].append({"term" : {key: elt}})
logic_clauses.append(should_dict)
else:
logic_clauses.append({"term": {key: value}})
if len(logic_clauses) == 0:
pass
elif len(logic_clauses) > 1:
filt["filter"]["bool"] = {"must": logic_clauses}
else:
# exactly one logic clause
filt["filter"]["bool"] = {"must": logic_clauses[0]}
return filt
def construct_body_query(conf, highlight, **kwargs):
''' construct complete es query '''
if len(kwargs) == 0:
print("no inputs")
return False
# final_dict = {"_source": ["fk_node_id"], "query": {"bool": {"must": {}}}}
final_dict = {"query": {"bool": {"must": {}}}}
params_dict = kwargs
query_dict = get_query_dict(conf, **params_dict)
if "job" in params_dict.keys():
params_dict.pop("job")
# add highlight information
if highlight:
final_dict["highlight"] = {"fields": {"job" : {}}}
filter_dict = get_filter_dict(**params_dict)
final_dict["query"]["bool"]["must"] = query_dict["query"]
if len(filter_dict["filter"]["bool"]) > 0:
final_dict["query"]["bool"]["filter"] = filter_dict["filter"]
return final_dict
def elastic_search(es, index_name, doc_type_name, conf, highlight=False, debug=0, **kwargs):
''' specific function to query elasticsearch with custom parameters '''
# construct json body query and request results
body = construct_body_query(conf, highlight, **kwargs)
res = es.search(index = index_name, doc_type = doc_type_name, size=10, body=body)
if debug > 0:
print("Query:")
pprint(body)
print("\nNumber of results: {}".format(str(res['hits']['total'])))
pprint(res)
return res['hits']['total']
def automatic_search(es, index_name, doc_type_name, file_scopes='', debug=0):
''' automatic research from a tsv file. please be sure to use the appropiate template '''
# if file exists
if not os.path.isfile(file_scopes):
print("Error file: {} doesn't not exist.")
return False
start_time = time.time()
df = pd.read_csv(file_scopes, sep='\t')
errors = []
nb_results = []
print("Start processing file...\n")
for id_row, row_dict in df.fillna('').T.to_dict().items():
# print every 10% of treatments
if id_row % (df.shape[0]/10) == 0:
print("Processed {}%".format(str(id_row/df.shape[0]*100)))
try:
row_dict_drop_na = {}
for k, v in row_dict.items():
# if the field is empty
if v != '':
# is it a list (not the best test)
if '[' in v:
row_dict_drop_na[k] = json.loads(v)
else:
row_dict_drop_na[k] = v
nb_query_results = elastic_search(es, index_name=index_name, doc_type_name=doc_type_name, **row_dict_drop_na)
nb_results.append(nb_query_results)
errors.append(0)
except:
nb_results.append(-1)
errors.append(1)
# integrate results to tsv
df["nb_results"] = nb_results
df["errors"] = errors
print("Processed {}%".format(str(100)))
print("Done")
print("--- %s seconds ---\n" % (time.time() - start_time))
file_scopes_tokens = file_scopes.rsplit(".", 1)
df.to_csv(file_scopes_tokens[0] + "_completed." + file_scopes_tokens[1], sep='\t')
return True
# parameters
index_name = "node"
doc_type_name = "users"
# create elasticsearch object
es = Elasticsearch([{'host': "192.168.91.193", 'port': "9200"}])
# load configuration
with open('conf.json') as data_file:
conf = json.load(data_file)
# construct json body query and request results on a SEPECIFIC SCOPE
res = elastic_search(es, debug=1, index_name=index_name, doc_type_name=doc_type_name, conf=conf, job="student", city="Paris")
| belaz/elasticpoc | python/search-by-querypost.py | search-by-querypost.py | py | 6,362 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pprint.pprint",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path.path",
... |
19741466326 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a cointegration script file.
"""
import numpy as np
import pandas as pd
import tushare as ts
import matplotlib.pyplot as plt
import statsmodels.api as sm
def find_cointegration_pairs(dataframe):
# to obtain the length of dataframe
n = dataframe.shape[1]
# initialize the matrix of p-value
pvalue_matrix = np.ones((n, n))
# to obtain column name
keys = dataframe.keys()
# initialize the array of cointegration
pairs = []
for i in range(n):
for j in range(i + 1, n):
stock1 = dataframe[keys[i]]
stock2 = dataframe[keys[j]]
result = sm.tsa.stattools.coint(stock1, stock2)
pvalue = result[1]
pvalue_matrix[i, j] = pvalue
if pvalue < 0.05:
pairs.append((keys[i], keys[j], pvalue))
return pvalue_matrix, pairs
stock = pd.DataFrame()
stock_list = ['601169', '600036']
df = ts.get_k_data(stock_list[0])[['date', 'close']].set_index('date')
df.columns = [stock_list[0]]
for z in stock_list[1:]:
ds = ts.get_k_data(z)[['date', 'close']].set_index('date')
ds.columns = [z]
df = df.merge(ds, right_index=True, left_index=True)
# obtain matrix, pair
pvalues, stock_pairs = find_cointegration_pairs(df)
stock_601169 = df['601169']
stock_600036 = df['600036']
stock_601169.plot()
stock_600036.plot()
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend(['601169', '600036'])
| simple321vip/violin-trade | strategy/cointegration_2.py | cointegration_2.py | py | 1,464 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.ones",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.tsa.stattools.coint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.tsa",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name"... |
34977561674 | import numpy as np
import cv2
img = cv2.imread('/Users/huojiaxi/Desktop/googlelogo_color_272x92dp.png')
vert = [70, 110, 40] # RGB de la couleur végétale, il est nécessaire de la régler lors que la première figure sera générée
diff_rouge = 60
diff_vert = 40
diff_bleu = 30
boundaries = [([vert[2]-diff_bleu, vert[1]-diff_vert, vert[0]-diff_rouge],
[vert[2]+diff_bleu, vert[1]+diff_vert, vert[0]+diff_rouge])]
# Seuil
for (lower, upper) in boundaries:
lower = np.array(lower, dtype=np.uint8)
upper = np.array(upper, dtype=np.uint8)
mask = cv2.inRange(img, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
ratio_vert = cv2.countNonZero(mask)/(img.size/3)
print('Vert pixel percentage:', np.round(ratio_vert*100, 2))
#cv2.imshow("images", np.hstack([img, output]))
cv2.waitKey(0)
| HUOJIAXI/PROJETDRONE1920 | TraitementDImage/image.py | image.py | py | 853 | python | fr | code | 2 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number... |
9362838462 | #coding:utf-8
"""
Description: separate speech from mixed signal of music and speech
Date: 2018.6.3
Reference: const.py, DoExperiment.py and util.py
by wuyiming
in UNet-VocalSeparation-Chainer
<https://github.com/Xiao-Ming/UNet-VocalSeparation-Chainer>
"""
import argparse
import os
import numpy as np
from librosa.util import find_files
from librosa.core import load, stft, istft, resample, to_mono
from librosa.output import write_wav
from scipy.io.wavfile import read
import train
import chainer
from chainer import config
# check version
# python 3.6.4 (64bit) win32
# windows 10 (64bit)
# Chainer 3.2.0
# librosa (0.6.0)
# scipy (1.0.0)
# numpy (1.14.0)
def separate(PATH_INPUT, PATH_OUTPUT, MODEL, SR=16000, FFT_SIZE = 1024, H = 512):
if os.path.isdir( PATH_INPUT):
# 入力がディレクトリーの場合、ファイルリストをつくる
filelist_mixdown = find_files(PATH_INPUT, ext="wav", case_sensitive=True)
else:
# 入力が単一ファイルの場合
filelist_mixdown=[PATH_INPUT]
print ('number of mixdown file', len(filelist_mixdown))
# 出力用のディレクトリーがない場合は 作成する。
_, path_output_ext = os.path.splitext(PATH_OUTPUT)
print ('path_output_ext',path_output_ext)
if len(path_output_ext)==0 and not os.path.exists(PATH_OUTPUT):
os.mkdir(PATH_OUTPUT)
# モデルの読み込み
unet = train.UNet()
chainer.serializers.load_npz( MODEL,unet)
config.train = False
config.enable_backprop = False
# ミックスされたものを読み込み、vocal(speech)の分離を試みる
for fmixdown in filelist_mixdown:
# audioread でエラーが発生した場合は、scipyを使う。
try:
y_mixdown, _ = load(fmixdown, sr=SR, mono=True)
except:
sr_mixdown, y_mixdown = read(fmixdown)
if not sr_mixdown == SR:
y_mixdown = resample(y_mixdown, sr_mixdown, SR)
# 入力の短時間スペクトラムを計算して、正規化する。
spec = stft(y_mixdown, n_fft=FFT_SIZE, hop_length=H, win_length=FFT_SIZE)
mag = np.abs(spec)
mag /= np.max(mag)
phase = np.exp(1.j*np.angle(spec))
print ('mag.shape', mag.shape)
start = 0
end = 128 * (mag.shape[1] // 128) # 入力のフレーム数以下で、networkの定義に依存して 適切な値を選ぶこと。
# speech(vocal)を分離するためのマスクを求める
mask = unet(mag[:, start:end][np.newaxis, np.newaxis, 1:, :]).data[0, 0, :, :]
mask = np.vstack((np.zeros(mask.shape[1], dtype="float32"), mask))
# 入力の短時間スペクトラムにマスクを掛けて、逆FFTで波形を合成する。
mag2=mag[:, start:end]*mask
phase2=phase[:, start:end]
y = istft(mag2*phase2, hop_length=H, win_length=FFT_SIZE)
# 分離した speech(vocal)を出力ファイルとして保存する。
if len(path_output_ext)==0:
# ディレクトリーへ出力
foutname, _ = os.path.splitext( os.path.basename(fmixdown) )
fname= os.path.join(PATH_OUTPUT, (foutname + '.wav'))
else:
# 指定されたファイルへ出力
fname= PATH_OUTPUT
print ('saving... ', fname)
write_wav(fname, y, SR, norm=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Speech(Vocal) Separation by U-Net')
parser.add_argument('--input', '-i', default='mixdown',
help='Prefix Directory Name Or the file name (ex: xxx.wav) to input as mixed signal')
parser.add_argument('--out', '-o', default='separate',
help='Prefix Directory Name Or the file name (ex: xxx.wav) to output as separated signal')
parser.add_argument('--model', '-m', default='result/model_420',
help='Specify model (ex: result/model_iterationNumber Or unet.model)')
args = parser.parse_args()
separate( args.input, args.out, args.model)
| shun60s/Blind-Speech-Separation | separate.py | separate.py | py | 4,185 | python | ja | code | 3 | github-code | 1 | [
{
"api_name": "os.path.isdir",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "librosa.util.find_files",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",... |
9708255908 | from app import app
from flask import request, render_template, jsonify, session
import re
from app.modules.text_translator_ch2en import translator_ch2en
from app.modules.text_translator_en2ch import translator_en2ch
from app.modules.summa_TextRank import TextRank_Summarizer_en
from app.modules.snownlp_TextRank import TextRank_Summarizer_ch
from app.modules.speech_synthesis import tts, tts_foreach, tts_Function
from app.modules.vocabulary_hunter import vocabulary_hunter
from app.modules.word_cloud import englishCloud
from app.modules.openaiSummarizer import openaiSummarizer_en, openaiSummarizer_ch
from app.modules.language_detection import language_detector
#建立網站頁面
@app.route("/")
def index():
return render_template("index.html")
#建立單字頁面
@app.route("/vocabulary")
def vocabulary():
return render_template("vocabulary.html")
#建立說明頁面
@app.route("/help")
def help():
return render_template("help.html")
#建立關於頁面
@app.route("/about")
def about():
return render_template("about.html")
#中英翻譯功能輸出網站
@app.route("/translate", methods=['POST','GET'])
def translate_output():
text = request.form["text"]
languageType = language_detector(text)
if languageType == "en":
#英文原文
English_Text = text
#中文翻譯
Chinese_Text = translator_en2ch(English_Text).replace('\\n', '<br>').replace('[\'"', '').replace('"\']', '').replace('\\r', '').replace('\\', '')
return render_template('translate_output_en2ch.html',English_Text=English_Text,Chinese_Text=Chinese_Text)
elif language_detector(text) == "zh-TW":
#中文原文
Chinese_Text = text
#英文翻譯
English_Text = translator_ch2en(Chinese_Text).replace('\\n', '<br>').replace('\\r', '').replace('\\\\','').replace('\\','').replace('[','').replace(']','').strip('\'').strip('\"').strip().replace('.','. ').replace('. ','. ')
#產生英文文字雲
createCloud = englishCloud(English_Text)
return render_template('translate_output_ch2en.html',Chinese_Text=Chinese_Text,English_Text=English_Text,createCloud=createCloud)
#逐句翻譯(中翻英)
@app.route("/translate_foreach_ch2en", methods=['POST'])
def translate_foreach_ch2en():
#取得中文輸入文章
data = request.get_json()
text = data.get('text')
voiceID = data.get('voiceID')
#利用Regular Expression將中文輸入文章進行斷句
sentences = re.split(r'(?<=[。!?])\s*', text)
#建立中英句子的List,並將中英句子合併後放在List中
sentences_bothLangaugeList = []
for sentence in sentences:
#將中文句子逐句進行英文翻譯
english_sentence = translator_ch2en(sentence).replace('\\n', '').replace('\\r', '').replace('\\\\','').replace('\\','').replace('[','').replace(']','').strip('\'').strip('\"').strip().replace('.','. ').replace('. ','. ')
#取得句子的index,並將其index設定為文字轉語音的檔名
index = sentences.index(sentence)
tts_foreach(english_sentence, index, voiceID)
#若句子內容為空,則跳出if判斷式不做任何事
if len(sentence) == 0 & len(english_sentence) ==0:
continue
else:
sentences_bothLangauge = sentence + "<br>" + english_sentence + "<br>"
sentences_bothLangaugeList.append(sentences_bothLangauge)
return jsonify({"sentences_bothLangaugeList":sentences_bothLangaugeList})
#逐句翻譯(英翻中)
@app.route("/translate_foreach_en2ch", methods=['POST'])
def translate_foreach_en2ch():
#取得英文輸入文章
data = request.get_json()
text = data.get('text')
voiceID = data.get('voiceID')
#利用Regular Expression將英文輸入文章進行斷句
# splitedSentences = re.split(r'[.?!]', text)
splitedSentences = re.split(r'(?<=[.?!])', text)
sentences = []
#因為re模組輸出的splitedSentences列表中,會有一個多餘的空白元素,因此需要利用strip()將多餘空白去除,並重新放入另一個sentences列表中
for sentence in splitedSentences:
sentence = sentence.strip()
sentences.append(sentence)
#建立中英句子的List,並將中英句子合併後放在List中
sentences_bothLangaugeList = []
for sentence in sentences:
#將英文句子逐句進行中文翻譯
chinese_sentence = translator_en2ch(sentence).replace('[\'"', '').replace('"\']', '').replace('\\r', '').replace('\\n', '').replace('\\', '')
#取得句子的index,並將其index設定為文字轉語音的檔名
index = sentences.index(sentence)
tts_foreach(sentence, index, voiceID)
#若句子內容為空,則跳出if判斷式不做任何事
if len(sentence) == 0 & len(chinese_sentence) == 0:
continue
else:
sentences_bothLangauge = sentence + "<br>" + chinese_sentence + "<br>"
sentences_bothLangaugeList.append(sentences_bothLangauge)
return jsonify({"sentences_bothLangaugeList":sentences_bothLangaugeList})
#中英摘要功能輸出網站
@app.route("/summarize", methods=['POST','GET'])
def summarize_output():
text = request.form["text"]
languageType = language_detector(text)
if languageType == "en":
#英文原文
English_Text = text
#英文摘要以及highlight過的原文
highlighted_text, extractive_summary_en = TextRank_Summarizer_en(English_Text)
createCloud = englishCloud(English_Text)
return render_template('summarize_output_en.html',English_Text=English_Text,highlighted_text=highlighted_text,extractive_summary_en=extractive_summary_en,createCloud=createCloud,outputAction="summarize_en")
elif language_detector(text) == "zh-TW":
#中文原文
Chinese_Text = text
#中文摘要
highlighted_text, extractive_summary_ch = TextRank_Summarizer_ch(Chinese_Text)
return render_template('summarize_output_ch.html',Chinese_Text=Chinese_Text,highlighted_text=highlighted_text,extractive_summary_ch=extractive_summary_ch,outputAction="summarize_ch")
#中英翻譯並摘要功能輸出網站
@app.route("/TranslateSummarize", methods=['POST','GET'])
def TranslateSummarize_output():
text = request.form["text"]
languageType = language_detector(text)
if languageType == "en":
#英文原文
English_Text = text
#中文翻譯
Chinese_Text = translator_en2ch(English_Text).replace('\\n', '<br>').replace('[\'"', '').replace('"\']', '').replace('\\r', '').replace('\\', '')
#中文摘要
highlighted_text, extractive_summary_ch = TextRank_Summarizer_ch(Chinese_Text)
return render_template("TranslateSummarize_output_en2ch.html",English_Text=English_Text,Chinese_Text=Chinese_Text,extractive_summary_ch=extractive_summary_ch,highlighted_text=highlighted_text,outputAction="TranslateSummarize_en2ch")
elif language_detector(text) == "zh-TW":
#中文原文
Chinese_Text = text
#英文翻譯
English_Text = translator_ch2en(Chinese_Text).replace('\\n', '<br>').replace('\\r', '').replace('\\\\','').replace('\\','').replace('[','').replace(']','').strip('\'').strip('\"').strip().replace('.','. ').replace('. ','. ')
#英文摘要以及highlight過的原文
highlighted_text, extractive_summary_en = TextRank_Summarizer_en(English_Text)
#產生英文文字雲
createCloud = englishCloud(English_Text)
return render_template("TranslateSummarize_output_ch2en.html",Chinese_Text=Chinese_Text,English_Text=English_Text, highlighted_text=highlighted_text, extractive_summary_en=extractive_summary_en, createCloud=createCloud,outputAction="TranslateSummarize_ch2en")
#單字統整功能(英文翻譯)
@app.route("/vocabularyHunter", methods=['POST'])
def vocabularyHunter():
#取得英文摘要
data = request.get_json()
text = data.get('text')
#單字統整後的英文摘要
keypointText = vocabulary_hunter(text)
print("keypointText", keypointText)
return jsonify({"keypointText":keypointText})
#語音輸出網站
@app.route("/tts_output", methods=['POST','GET'])
def tts_output():
Text= request.form["text"]
voiceID= request.form["voiceID"]
tts(Text, voiceID)
return render_template("tts_output.html",Text=Text)
#語音導讀功能
@app.route("/ttsFunction", methods=['POST'])
def ttsFunction():
#取得文章
data = request.get_json()
text = data.get('text')
textId = data.get('textId')
voiceID = data.get('voiceID')
tts_Function(text, textId, voiceID)
return jsonify({"text":text, "textId":textId})
#抽象英文摘要功能
@app.route("/abstractive_summarization_en", methods=['POST'])
def openaiEnglishSummarization():
#取得文章
data = request.get_json()
text = data.get('text')
abstractive_summary_en = openaiSummarizer_en(text)
return jsonify({"abstractive_summary_en":abstractive_summary_en})
#抽象中文摘要功能
@app.route("/abstractive_summarization_ch", methods=['POST'])
def openaiChineseSummarization():
#取得文章
data = request.get_json()
text = data.get('text')
abstractive_summary_ch = openaiSummarizer_ch(text)
return jsonify({"abstractive_summary_ch":abstractive_summary_ch})
| ChingHung21/Bilingual-Lesson-Assistant | app/views.py | views.py | py | 9,410 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.render_template",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "app.app.route",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
... |
687494140 | import streamlit as st
import pandas as pd
import numpy as np
#import seaborn as sns
#import matplotlib.pyplot as plt
import plotly.express as px
from PIL import Image
# Page layout
st.set_page_config(page_title='Churn Analysis FinTech', page_icon=':bar_chart:', layout='wide')
df = pd.read_csv('fintech_dashboard.csv')
DATA = {
'df':df
}
PAGES = {
'Overall Churn Rate': 'overall_churn_rate',
'Age Groups': 'age_groups',
'Housing Status': 'housing_status',
'Payment Types': 'payment_types',
'Deposits & Withdrawals': 'deposits_withdrawals',
'Mobile App Usages': 'app_downloaded',
'Credit Card Application': 'credit_card_application',
'Customer Satisfaction': 'customer_satisfaction',
'Rewards Earned': 'rewards_earned'
}
# Set the default page
DEFAULT_PAGE = 'Overall Churn Rate'
# st.dataframe(df)
st.markdown('Explore the overall churn rate and its variation across customer segments.')
st.title('Churn Analysis FinTech')
# Sidebar with clickable links to page
page = st.sidebar.selectbox('Navigate to:', ['Overall Churn Rate', 'Age Groups', 'Housing Status', 'Payment Types',
'Deposits & Withdrawals', 'Mobile App Usages', 'Credit Card Application',
'Customer Satisfaction', 'Rewards Earned', 'Dashboard'])
image = Image.open('churn-rate.png')
st.sidebar.image(image)
# if page != 'Dashboard':
# # Display dataset
# st.subheader('Dataset')
# st.dataframe(df)
# Calculate the overall churn rate
overall_churn_rate = df['churn'].mean()
# Display overall churn rate
# st.subheader('Overall Churn Rate')
# st.write(f'The overall churn rate in the FinTech company is {overall_churn_rate:.2%}')
# Define layout for each page
if page == 'Dashboard':
st.subheader('Dashboard')
# Dashboard Visuals
overall_churn_rate = df['churn'].mean()
st.write(f'The overall churn rate is: {overall_churn_rate:.2%}')
age_group_churn_rate = df.groupby('age')['churn'].mean().reset_index()
fig_age = px.bar(age_group_churn_rate, x='age', y='churn', labels={'churn': 'Churn Rate'}, title='Churn Rate Across Age Groups')
st.plotly_chart(fig_age)
housing_churn_rate = df.groupby('housing')['churn'].mean().reset_index()
fig_housing = px.bar(housing_churn_rate, x='housing', y='churn', labels={'churn': 'Churn Rate'}, title='Churn Rate Based on Housing Status')
st.plotly_chart(fig_housing)
payment_churn_rate = df.groupby('payment_type')['churn'].mean().reset_index()
fig_payment = px.bar(payment_churn_rate, x='payment_type', y='churn', labels={'churn': 'Churn Rate'}, title='Churn Rate for Different Payment Types')
st.plotly_chart(fig_payment)
avg_deposits = df.groupby('churn')['deposits'].mean()
avg_withdrawals = df.groupby('churn')['withdrawal'].mean()
data = pd.DataFrame({'Churn': avg_deposits.index, 'Avg Deposits': avg_deposits.values, 'Avg Withdrawals': avg_withdrawals.values})
fig_deposits_withdrawals = px.bar(data, x='Churn', y=['Avg Deposits', 'Avg Withdrawals'],
labels={'value': 'Amount', 'variable': 'Transaction Type'},
title='Average Deposits and Withdrawals for Churned vs. Non-Churned Customers')
st.plotly_chart(fig_deposits_withdrawals)
app_downloads_churn = df.groupby('churn')['app_downloaded'].value_counts(normalize=True).unstack().reset_index()
app_downloads_churn = pd.melt(app_downloads_churn, id_vars='churn', value_vars=[0, 1],
value_name='Percentage', var_name='App Downloaded')
fig_app_downloads = px.bar(app_downloads_churn, x='churn', y='Percentage', color='App Downloaded',
labels={'churn': 'Churn Status', 'Percentage': 'Percentage'},
title='App Downloads and Churn Rate')
st.plotly_chart(fig_app_downloads)
credit_card_app_status = df.groupby('churn')['cc_application_begin'].value_counts(normalize=True).unstack().reset_index()
credit_card_app_status = pd.melt(credit_card_app_status, id_vars='churn', value_vars=[0, 1],
value_name='Percentage', var_name='Application Status')
fig_credit_card_app = px.bar(credit_card_app_status, x='churn', y='Percentage', color='Application Status',
labels={'churn': 'Churn Status', 'Percentage': 'Percentage'},
title='Credit Card Application Status and Churn Rate')
st.plotly_chart(fig_credit_card_app)
fig_customer_satisfaction = px.box(df, x='churn', y=['cc_liked', 'cc_disliked'],
labels={'churn': 'Churn Status', 'value': 'Satisfaction Score'},
title='Customer Satisfaction: Likes and Dislikes')
st.plotly_chart(fig_customer_satisfaction)
fig_rewards_earned = px.histogram(df, x='rewards_earned', color='churn', barmode='overlay',
labels={'churn': 'Churn Status', 'rewards_earned': 'Rewards Earned'},
title='Rewards Earned Distribution')
st.plotly_chart(fig_rewards_earned)
if page == 'Overall Churn Rate':
st.subheader('Overall Churn Rate')
# Calculate overall churn rate
overall_churn_rate = df['churn'].mean()
st.write(f'The overall churn rate in the FinTech company is: {overall_churn_rate:.2%}')
# Create a pie chart for the overall churn rate
churn_count = df['churn'].value_counts()
fig = px.pie(
churn_count,
names=churn_count.index,
values=churn_count.values,
labels=['Not Churned', 'Churned'],
title='Churn Distribution'
)
st.plotly_chart(fig)
st.dataframe(df)
elif page == 'Age Groups':
# Bar plot showing churn rate across age groups
age_group_churn_rate = df.groupby('age')['churn'].mean().reset_index()
fig_age = px.bar(age_group_churn_rate, x='age', y='churn', labels={'churn': 'Churn Rate'}, title='Churn Rate Across Age Groups')
# Display visualization and dataset below it
st.plotly_chart(fig_age)
# Page 3: Housing Status
elif page == 'Housing Status':
# Bar plot showing churn rate based on housing status
housing_churn_rate = df.groupby('housing')['churn'].mean().reset_index()
fig_housing = px.bar(housing_churn_rate, x='housing', y='churn', labels={'churn': 'Churn Rate'}, title='Churn Rate Based on Housing Status')
st.plotly_chart(fig_housing)
# Page 4: Payment Types
elif page == 'Payment Types':
# Bar plot showing churn rate for different payment types
payment_churn_rate = df.groupby('payment_type')['churn'].mean().reset_index()
fig_payment = px.bar(payment_churn_rate, x='payment_type', y='churn', labels={'churn': 'Churn Rate'}, title='Churn Rate for Different Payment Types')
st.plotly_chart(fig_payment)
# Page 5: Deposits & Withdrawals
elif page == 'Deposits & Withdrawals':
st.subheader('Deposits and Withdrawals Analysis')
# Visualization related to deposits and withdrawals (customize according to your specific question)
# Example: Bar plot showing average deposits and withdrawals for churned vs. non-churned customers
avg_deposits = df.groupby('churn')['deposits'].mean()
avg_withdrawals = df.groupby('churn')['withdrawal'].mean()
data = pd.DataFrame({'Churn': avg_deposits.index, 'Avg Deposits': avg_deposits.values, 'Avg Withdrawals': avg_withdrawals.values})
fig_deposits_withdrawals = px.bar(data, x='Churn', y=['Avg Deposits', 'Avg Withdrawals'],
labels={'value': 'Amount', 'variable': 'Transaction Type'},
title='Average Deposits and Withdrawals for Churned vs. Non-Churned Customers')
st.plotly_chart(fig_deposits_withdrawals)
# Page 6: Mobile Apps Usage
elif page == 'Mobile App Usages':
st.subheader('Mobile App Usages Analysis')
app_downloads_churn = df.groupby('churn')['app_downloaded'].value_counts(normalize=True).unstack().reset_index()
app_downloads_churn = pd.melt(app_downloads_churn, id_vars='churn', value_vars=[0, 1],
value_name='Percentage', var_name='App Downloaded')
fig_grouped_bar = px.bar(app_downloads_churn, x='churn', y='Percentage', color='App Downloaded',
labels={'churn': 'Churn Status', 'Percentage': 'Percentage'},
title='App Downloads and Churn Rate', barmode='group')
st.plotly_chart(fig_grouped_bar)
# Page 7: Credit Card Application
elif page == 'Credit Card Application':
st.subheader('Credit Card Application Analysis')
credit_card_app_status = df.groupby('churn')['cc_application_begin'].value_counts(normalize=True).unstack().reset_index()
credit_card_app_status = pd.melt(credit_card_app_status, id_vars='churn', value_vars=[0, 1],
value_name='Percentage', var_name='Application Status')
fig_credit_card_app = px.bar(credit_card_app_status, x='churn', y='Percentage', color='Application Status',
labels={'churn': 'Churn Status', 'Percentage': 'Percentage'},
title='Credit Card Application Status and Churn Rate')
st.plotly_chart(fig_credit_card_app)
# Page 8: Customer Satisfaction
elif page == 'Customer Satisfaction':
st.subheader("Customer's Overall Experience")
# Stacked bar plot showing counts of likes and dislikes for each churn status
customer_satisfaction_counts = df.groupby(['churn', 'cc_liked', 'cc_disliked']).size().reset_index(name='Count')
color_discrete_map = {'0': 'lightgrey', '1': 'red'}
fig_customer_satisfaction = px.bar(customer_satisfaction_counts, x='churn', y='Count', color='cc_liked',
labels={'churn': 'Churn Status', 'Count': 'Count'},
title="Customer Satisfaction: Likes and Dislikes",
barmode='stack')
st.plotly_chart(fig_customer_satisfaction)
# Page 9: Rewards Earned
elif page == 'Rewards Earned':
st.subheader('Reward Points earned by Customers who left the bank.')
fig_rewards_earned = px.histogram(df, x='rewards_earned', color='churn', barmode='overlay',
labels={'churn': 'Churn Status', 'rewards_earned': 'Rewards Earned'},
title='Rewards Earned Distribution')
st.plotly_chart(fig_rewards_earned)
| Asifmehdiyev/dashboard_app | dashboard.py | dashboard.py | py | 11,067 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "streamli... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.