seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
8976322898 | from typing import Tuple
import tensorflow as tf
def dataset_split(
dataset: tf.data.Dataset, split_fraction: float, fold: int = 0
) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""Splits the dataset into one chunk with split_fraction many elements of
the original dataset and another chunk with size (1 - split_fraction)
elements.
Args:
dataset (tf.data.Dataset): Dataset to be splitted.
split_fraction (float): Fraction of the dataset split.
fold (int): Which fold of the dataset, the validation set should be.
Returns:
Tuple[tf.data.Dataset, tf.data.Dataset]: Splitted datasets tuple.
"""
split_size = int(len(dataset) * split_fraction)
offset_idx = fold * split_size
val_dataset = dataset.skip(offset_idx).take(split_size)
first_train_folds = dataset.take(offset_idx)
last_train_folds = dataset.skip(offset_idx + split_size)
train_dataset = first_train_folds.concatenate(last_train_folds)
return train_dataset, val_dataset
def dataset_join(
dataset_left: tf.data.Dataset, dataset_right: tf.data.Dataset
) -> tf.data.Dataset:
dataset_joined = dataset_left.concatenate(dataset_right)
return dataset_joined
| franneck94/TensorCross | tensorcross/utils/dataset.py | dataset.py | py | 1,214 | python | en | code | 11 | github-code | 13 |
21251174982 | #Name: Shezan Alam
#Email: shezan.alam48@myhunter.cuny.edu
#Date: October 4th, 2019
import matplotlib.pyplot as plt
import pandas as pd
pop = pd.read_csv('nycHistPop.csv',skiprows=5)
pop.plot(x="Year")
n = input("Enter borough name: ")
o = input("Enter output name: ")
pop['Fraction'] = pop[n]/pop['Total']
pop.plot(x='Year' , y= 'Fraction')
fig = plt.gcf()
fig.savefig(o)
| shezalam29/simple-python-projects | BoroGraph.py | BoroGraph.py | py | 377 | python | en | code | 0 | github-code | 13 |
25221103970 | #爬取Openjudge题目id及其通过人数
from bs4 import BeautifulSoup
import urllib.request
f=open("data.txt","w+")
for pn in range(24):
page=urllib.request.urlopen("http://bailian.openjudge.cn/practice/?page="+str(pn+1)).read()
soup=BeautifulSoup(page,"lxml")
l=soup.find("tbody").find_all("tr")
for tr in l:
print(" Processing page "+str(pn+1)+" ID "+tr.find_all("td",class_="problem-id")[0].find("a").string+'\n')
f.write(tr.find_all("td",class_="problem-id")[0].find("a").string+" ")
f.write(tr.find_all("td",class_="accepted")[0].find("a").string+'\n')
f.close()
| Allen-Cee/Python | Crawler/Openjudge_Problem_Info.py | Openjudge_Problem_Info.py | py | 581 | python | en | code | 1 | github-code | 13 |
40377093819 | import hashlib
import base64
import requests
from bs4 import BeautifulSoup
def shorten_url(url):
url_bytes = url.encode('utf-8')
hash_bytes = hashlib.sha256(url_bytes).digest()
short_bytes = hash_bytes[:8]
short_url = base64.b64encode(short_bytes).decode('utf-8')
return short_url
def get_page_title(url):
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.find('title')
return title.get_text()
except:
return None | juanmarcoscabezas/url-shortener | shortener/utils.py | utils.py | py | 533 | python | en | code | 0 | github-code | 13 |
73643920657 | # Ejercicio 955: Obtener todas las combinaciones posibles de minúsculas y mayúsculas de un conjunto de caracteres.
from itertools import product
def obtener_combinaciones(caracteres):
resultado = map(''.join, product(*((c.lower(), c.upper()) for c in caracteres)))
return list(resultado)
frase = 'abc'
combinaciones = obtener_combinaciones(frase)
print(combinaciones)
| Fhernd/PythonEjercicios | Parte001/ex955_combinaciones_posible_letra_minusculas_mayusculas.py | ex955_combinaciones_posible_letra_minusculas_mayusculas.py | py | 384 | python | es | code | 126 | github-code | 13 |
47048815674 | import asyncio
import json
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set
import websockets
import uctl2_back.events as customEvent
if TYPE_CHECKING:
from uctl2_back.race import Race
# Type aliases
EventList = List[Dict[str, Any]]
class Notifier:
def __init__(self, race: 'Race'):
self.race = race
self.clients: Set[websockets.WebSocketServerProtocol] = set()
self.events: asyncio.Queue[Optional[EventList]] = asyncio.Queue(50)
self.delayedEvents: EventList = []
self.stop = asyncio.get_event_loop().create_future()
async def broadcast_event(self, id: int, payload: Dict[str, Any]) -> None:
"""
Appends an event to a queue for events broadcasting.
:param id: id of the event
:param payload: optional data for this event, could be None
"""
await self.events.put([{
'id': id,
'payload': payload
}])
def broadcast_event_later(self, event: Dict[str, Any]) -> None:
self.delayedEvents.append(event)
async def broadcast_events(self) -> None:
if len(self.delayedEvents) > 0:
await self.events.put(self.delayedEvents.copy())
self.delayedEvents = []
async def broadcaster(self) -> None:
"""
Broadcasts events from a queue through a
websockets server.
"""
logger = logging.getLogger(__name__)
while True:
event = await self.events.get()
if event is None:
break
logger.debug(event)
raw_event = json.dumps(event)
for client in list(self.clients):
try:
await client.send(raw_event)
except websockets.ConnectionClosed:
self.clients.remove(client)
async def _consumer_handler(self, ws: websockets.WebSocketServerProtocol, path: str) -> None:
self.clients.add(ws)
if self.race is not None:
await ws.send(json.dumps([{
'id': customEvent.RACE_SETUP,
'payload': self.race.serialize()
}]))
# The handler needs to wait the end of the server in order
# to keep the connection opened
await self.stop
async def start_notifier(self, port) -> None:
"""
Starts a new websockets server on the given port.
:param port: port of the websockets server
:type port: int
"""
async with websockets.serve(self._consumer_handler, '127.0.0.1', port):
await self.stop
async def stop_notifier(self) -> None:
"""
Stops the websockets server
"""
self.stop.set_result(1)
await self.events.put(None)
| mdesmarais/UCTL2_Broadcaster | uctl2_back/notifier.py | notifier.py | py | 2,837 | python | en | code | 0 | github-code | 13 |
40635823400 | from core.plugin.loader import get_plugin_classes, get_module_class_names
from core.configuration.utils import get_pipeline_step_names, extract_pipeline_config, \
extract_pipeline_name
from core.helpers.utils import get_duplicates
from core.logger.logger import logger
def set_default_values(config):
"""Sets default values for all necessary values of the generator config
:param config: Generator configuration entity
:type config: dict
:returns: The configuration with set default values
:rtype: dict
"""
if "dataprovider" not in config:
config["dataprovider"] = {"preload": False}
if "preload" not in config["dataprovider"]:
config["dataprovider"]["preload"] = False
return config
def preprocess_config(config):
"""Preprocesses the configuration of the generator for missing attributes
:param config: Generator configuration entity
:type config: dict
:returns: The checked configuration with set default values
:rtype: dict
"""
if "pipelines" not in config:
logger.error("Config error: Mandatory 'pipelines' key not found in config")
raise SystemExit(0)
check_pipelines_config(config["pipelines"])
if "dataprovider" in config:
check_data_provider_config_valid(config["dataprovider"])
return set_default_values(config)
def check_pipelines_config(pipelines_config):
"""Checks the configuration of a pipeline for validity
:param pipelines_config: Pipeline configuration entity
:type pipelines_config: dict
"""
if not isinstance(pipelines_config, list):
logger.error("Config error: 'pipelines' value needs to be a list")
raise SystemExit(0)
check_duplicate_pipeline_names(pipelines_config)
for idx, pipeline in enumerate(pipelines_config):
pipeline_config = extract_pipeline_config(pipeline)
check_pipeline_config_valid(idx + 1, pipeline_config)
def check_duplicate_pipeline_names(pipelines_config):
"""Checks the configuration of all pipelines for duplicated names
:param pipelines_config: Pipelines configuration entity
:type pipelines_config: dict
"""
pipeline_names = [extract_pipeline_name(pipeline) for pipeline in pipelines_config]
duplicates = get_duplicates(pipeline_names)
if duplicates:
logger.error(
f'Only globally unique pipeline names are allowed. Found duplicates {duplicates}')
raise SystemExit(0)
def check_pipeline_config_valid(idx, pipeline_config):
"""Checks all the configuration entities of a pipeline
:param idx: Index of the pipeline step. Used for logging
:type idx: int
:param pipeline_config: Pipeline configuration entity
:type pipeline_config: dict
"""
if not isinstance(pipeline_config, dict):
logger.error(f'Config error at pipeline #{idx}: "pipeline" entity needs to be a dict')
raise SystemExit(0)
if "steps" not in pipeline_config:
logger.error(f'Config error at pipeline #{idx}: "steps" value is missing')
raise SystemExit(0)
check_steps_config_valid(idx, pipeline_config["steps"])
def check_data_provider_config_valid(data_provider_config):
"""Checks the data provider configuration entity for validity
:param data_provider_config: Configuration entity of the data provider
:type data_provider_config: dict
"""
if not isinstance(data_provider_config, dict):
logger.error("Config error: 'dataprovider' value needs to be a dictionary")
raise SystemExit(0)
if "preload" in data_provider_config and not isinstance(data_provider_config["preload"], bool):
logger.error("Config error: 'dataprovider:preload' value needs to be a boolean")
raise SystemExit(0)
def check_steps_config_valid(idx, steps_config):
"""Checks the steps of the pipeline configuration for validity
:param idx: Index of the pipeline step. Used for logging
:type idx: int
:param steps_config: Steps configuration entity of the pipeline
:type steps_config: dict
"""
if not isinstance(steps_config, list):
logger.error(f'Config error at pipeline #{idx}: "steps" value needs to be a list')
raise SystemExit(0)
# check if all configured pipeline steps actually exist via a class in the plugins section
classes = get_plugin_classes()
class_names = get_module_class_names(classes)
pipeline_step_names = get_pipeline_step_names(steps_config)
is_subset = set(pipeline_step_names).issubset(class_names)
if not is_subset:
logger.error(
f'Config error at pipeline #{idx}: '
f'One or more of the configured steps do not correspond to a valid class name.'
f' Value needs to be in {class_names}')
raise SystemExit(0)
| Magoli1/carla-pre-crash-scenario-generator | core/configuration/validator.py | validator.py | py | 4,803 | python | en | code | 2 | github-code | 13 |
41811127522 | from datetime import datetime as dt
import os
class LogFile:
"""
Write a log file from coding interaction. It will help to understand how the model works
and helps to see if there are any error while running
"""
def __init__(self, FileLocation: str, FileName: str = "logFile"):
"""
File log initilization, creates and write down the first line on file ( Log file creation)
and initilizaed line counter
:param str FileLocation: File path to be write default: code current object
:param str FileName: File name dafault:logFile with timestamp format yyyy-mm-dd_hh:mm:ss
:returns FileLog Object
"""
self.FilePath = FileLocation
self.FileName = str(f"{FileName}.txt")
self.FileLine = 1
self.write_to_file("Log File Creation")
def write_to_file(self, Message):
"""
Write down a messange into file log already created
:param str Message: Message to be written on file
:return: None
"""
try:
if os.path.exists(os.path.join(self.FilePath, self.FileName)):
with open(os.path.join(self.FilePath, self.FileName), "a") as file:
file.writelines(f"|Line|:{self.FileLine}.....|Message|:{Message}.....|on|:{dt.now()}\n")
self.counter_increment()
else:
with open(os.path.join(self.FilePath, self.FileName), "w") as file:
file.writelines(f"|Line|:{self.FileLine}.....|Message|:{Message}.....|on|:{dt.now()}\n")
self.counter_increment()
except Exception as e:
print(str(e))
def write_report(self, Message):
try:
with open(os.path.join(self.FilePath, self.FileName), "a+") as file:
file.write(f"|Line|:{self.FileLine}***********Report Start***********:{dt.now()}\n")
file.write(f"{Message}\n")
self.counter_increment()
file.write(f"|Line|:{self.FileLine}***********Report End***********:{dt.now()}\n")
self.counter_increment()
except Exception as e:
print(str(e))
def counter_increment(self, increment=1):
"""
Increment line number on file log interaction
:return: int
"""
self.FileLine = self.FileLine + increment
| Datanarch/data_mining_challange | Logs.py | Logs.py | py | 2,021 | python | en | code | 0 | github-code | 13 |
31439549881 | """Quantized DEVS-LIM modeling and simulation framework.
"""
from math import pi as PI
from math import sin as SIN
from math import cos as COS
from math import acos as ACOS
from math import tan as TAN
from math import acos as ACOS
from math import atan2 as ATAN2
from math import sqrt as SQRT
from math import floor as FLOOR
from collections import OrderedDict as odict
from array import array
import pandas as pd
import numpy as np
import numpy.linalg as la
from mpl_toolkits import mplot3d
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes.formatter', useoffset=False)
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from scipy.stats import gaussian_kde
import sympy as sp
from sympy import sin, cos, tan, atan2, acos, pi, sqrt
from sympy.solvers import solve
from sympy.utilities.lambdify import lambdify, implemented_function
# ============================ Private Constants ===============================
_EPS = 1.0e-15
_INF = float('inf')
_MAXITER = 1000
# ============================ Public Constants ================================
#DEF_DQ = 1.0e-6 # default delta Q
#DEF_DQMIN = 1.0e-6 # default minimum delta Q (for dynamic dq mode)
#DEF_DQMAX = 1.0e-6 # default maximum delta Q (for dynamic dq mode)
#DEF_DQERR = 1.0e-2 # default delta Q absolute error (for dynamic dq mode)
DEF_DTMIN = 1.0e-12 # default minimum time step
DEF_DMAX = 1.0e5 # default maximum derivative (slew-rate)
PI_4 = float(pi / 4.0)
PI_3 = float(pi / 3.0)
PI5_6 = float(5.0 * pi / 6.0)
PI7_6 = float(7.0 * pi / 6.0)
# =============================== Globals ======================================
sys = None # set by qdl.System constructor for visibility from fode function.
# ============================= Enumerations ===================================
class SourceType:
NONE = "NONE"
CONSTANT = "CONSTANT"
STEP = "STEP"
SINE = "SINE"
PWM = "PWM"
RAMP = "RAMP"
FUNCTION = "FUNCTION"
# ============================= Qdl Model ======================================
def print_matrix_dots(m):
s = ""
for i in m.size(0):
for j in m.size(1):
if m[i,j]:
s += "x"
else:
s += " "
s += "\n"
print(s)
class Atom(object):
def __init__(self, name, x0=0.0, dq=None, dqmin=None, dqmax=None,
dqerr=None, dtmin=None, dmax=1e10, units=""):
# params:
self.name = name
self.x0 = x0
self.dq = dq
self.dqmin = dqmin
self.dqmax = dqmax
self.dqerr = dqerr
self.dtmin = dtmin
self.dmax = dmax
self.units = units
# simulation variables:
self.dq0 = self.dq
self.qlo = 0.0
self.qhi = 0.0
self.time = 0.0
self.tlast = 0.0
self.tnext = 0.0
self.x = x0
self.d = 0.0
self.d0 = 0.0
self.q = x0
self.q0 = x0
self.triggered = False
# results data storage:
# qss:
self.tout = None # output times quantized output
self.qout = None # quantized output
self.tzoh = None # zero-order hold output times quantized output
self.qzoh = None # zero-order hold quantized output
self.updates = 0 # qss updates
# state space:
self.tout_ss = None # state space time output
self.xout_ss = None # state space value output
self.updates_ss = 0 # state space update count
# non-linear ode:
self.tout_ode = None # state space time output
self.xout_ode = None # state space value output
self.updates_ode = 0 # state space update count
# atom connections:
self.broadcast_to = [] # push updates to
self.connections = [] # recieve updates from
# jacobian cell functions:
self.jacfuncs = []
self.derargfunc = None
# parent object references:
self.sys = None
self.device = None
# other:
self.implicit = True
def add_connection(self, other, coefficient=1.0, coeffunc=None):
connection = Connection(self, other, coefficient=coefficient,
coeffunc=coeffunc)
connection.device = self.device
self.connections.append(connection)
return connection
def add_jacfunc(self, other, func):
self.jacfuncs.append((other, func))
def set_state(self, value, quantize=False):
self.x = float(value)
if quantize:
self.quantize(implicit=False)
else:
self.q = value
self.qhi = self.q + self.dq
self.qlo = self.q - self.dq
def initialize(self, t0):
self.tlast = t0
self.time = t0
self.tnext = _INF
# init state:
if isinstance(self, StateAtom):
self.x = self.x0
if isinstance(self, SourceAtom):
self.dint()
self.q = self.x
self.q0 = self.x
self.qsave = self.x
self.xsave = self.x
# init quantizer values:
#self.dq = self.dqmin
self.qhi = self.q + self.dq
self.qlo = self.q - self.dq
# init output:
typecode = "d"
self.tout = array(typecode)
self.qout = array(typecode)
self.nupd = array(typecode)
self.tzoh = array(typecode)
self.qzoh = array(typecode)
self.tout_ss = array(typecode)
self.xout_ss = array(typecode)
self.nupd_ss = array(typecode)
self.tout_ode = array(typecode)
self.xout_ode = array(typecode)
self.nupd_ode = array(typecode)
self.updates = 0
self.updates_ss = 0
self.updates_ode = 0
self.tout.append(self.time)
self.qout.append(self.q0)
self.nupd.append(0)
self.tzoh.append(self.time)
self.qzoh.append(self.q0)
self.tout_ss.append(self.time)
self.xout_ss.append(self.q0)
self.nupd_ss.append(0)
self.tout_ode.append(self.time)
self.xout_ode.append(self.q0)
self.nupd_ode.append(0)
def update(self, time):
self.time = time
self.updates += 1
self.triggered = False # reset triggered flag
self.d = self.f()
#if self.sys.enable_slewrate:
# self.d = max(self.d, -self.dmax*self.dq)
# self.d = min(self.d, self.dmax*self.dq)
self.dint()
self.quantize()
self.ta()
# trigger external update if quantized output changed:
if self.q != self.q0:
self.save()
self.q0 = self.q
self.broadcast()
self.update_dq()
def step(self, time):
self.time = time
self.updates += 1
self.d = self.f()
self.dint()
self.q = self.x
self.save()
self.q0 = self.q
def dint(self):
raise NotImplementedError()
def quantize(self):
raise NotImplementedError()
def ta(self):
raise NotImplementedError()
def f(self, q=None):
raise NotImplementedError()
def broadcast(self):
for atom in self.broadcast_to:
if atom is not self:
atom.triggered = True
def update_dq(self):
if not self.dqerr:
return
else:
if self.dqerr <= 0.0:
return
if not (self.dqmin or self.dqmax):
return
if (self.dqmax - self.dqmin) < _EPS:
return
self.dq = min(self.dqmax, max(self.dqmin, abs(self.dqerr * self.q)))
self.qlo = self.q - self.dq
self.qhi = self.q + self.dq
def save(self, force=False):
if self.time != self.tout[-1] or force:
self.tout.append(self.time)
self.qout.append(self.q)
self.nupd.append(self.updates)
self.tzoh.append(self.time)
self.qzoh.append(self.q0)
self.tzoh.append(self.time)
self.qzoh.append(self.q)
def save_ss(self, t, x):
self.tout_ss.append(t)
self.xout_ss.append(x)
self.nupd_ss.append(self.updates_ss)
self.updates_ss += 1
def save_ode(self, t, x):
self.tout_ode.append(t)
self.xout_ode.append(x)
self.nupd_ode.append(self.updates_ss)
self.updates_ode += 1
def get_error(self, typ="l2"):
# interpolate qss to ss time vector:
# this function can only be called after state space AND qdl simualtions
# are complete
qout_interp = numpy.interp(self.tout2, self.tout, self.qout)
if typ.lower().strip() == "l2":
# calculate the L**2 relative error:
# ________________
# / sum((y - q)**2)
# / --------------
# \/ sum(y**2)
dy_sqrd_sum = 0.0
y_sqrd_sum = 0.0
for q, y in zip(qout_interp, self.qout2):
dy_sqrd_sum += (y - q)**2
y_sqrd_sum += y**2
return sqrt(dy_sqrd_sum / y_sqrd_sum)
elif typ.lower().strip() == "nrmsd": # <--- this is what we're using
# calculate the normalized relative root mean squared error:
# ________________
# / sum((y - q)**2)
# / ---------------
# \/ N
# -----------------------
# max(y) - min(y)
dy_sqrd_sum = 0.0
y_sqrd_sum = 0.0
for q, y in zip(qout_interp, self.qout2):
dy_sqrd_sum += (y - q)**2
y_sqrd_sum += y**2
return sqrt(dy_sqrd_sum / len(qout_interp)) / (max(self.qout2)
- min(self.qout2))
elif typ.lower().strip() == "re":
# Pointwise relative error
# e = [|(y - q)| / |y|]
e = []
for q, y in zip(qout_interp, self.qout2):
e.append(abs(y-q) / abs(y))
return e
elif typ.lower().strip() == "rpd":
# Pointwise relative percent difference
# e = [ 100% * 2 * |y - q| / (|y| + |q|)]
e = []
for q, y in zip(qout_interp, self.qout2):
den = abs(y) + abs(q)
if den >= _EPS:
e.append(100 * 2 * abs(y-q) / (abs(y) + abs(q)))
else:
e.append(0)
return e
return None
def get_previous_state(self):
if self.qout:
if len(self.qout) >= 2:
return self.qout[-2]
else:
return self.x0
else:
return self.x0
def full_name(self):
return self.device.name + "." + self.name
def __repr__(self):
return self.full_name()
def __str__(self):
return __repr__(self)
class SourceAtom(Atom):
def __init__(self, name, source_type=SourceType.CONSTANT, u0=0.0, u1=0.0,
u2=0.0, ua=0.0, freq=0.0, phi=0.0, duty=0.0, t1=0.0, t2=0.0,
srcfunc=None, dq=None, dqmin=None, dqmax=None, dqerr=None,
dtmin=None, dmax=1e10, units=""):
Atom.__init__(self, name=name, x0=u0, dq=dq, dqmin=dqmin, dqmax=dqmax,
dqerr=dqerr, dtmin=dtmin, dmax=dmax, units=units)
self.source_type = source_type
self.u0 = u0
self.u1 = u1
self.u2 = u2
self.ua = ua
self.freq = freq
self.phi = phi
self.duty = duty
self.t1 = t1
self.t2 = t2
self.srcfunc = srcfunc
# source derived quantities:
self.u = self.u0
self.omega = 2.0 * pi * self.freq
if self.freq:
self.T = 1.0 / self.freq
if self.source_type == SourceType.RAMP:
self.u0 = self.u1
self.ramp_slope = 0.0
if (self.t2 - self.t1) > 0:
self.ramp_slope = (self.u2 - self.u1) / (self.t2 - self.t1)
def dint(self):
self.u_prev = self.u
if self.source_type == SourceType.FUNCTION:
u = self.srcfunc(self.device, self.time)
elif self.source_type == SourceType.CONSTANT:
u = self.u0
elif self.source_type == SourceType.STEP:
if self.time < self.t1:
u = self.u0
else:
u = self.u1
elif self.source_type == SourceType.SINE:
if self.time >= self.t1:
u = self.u0 + self.ua * sin(self.omega * self.time + self.phi)
else:
u = self.u0
elif self.source_type == SourceType.PWM:
pass # todo
elif self.source_type == SourceType.RAMP:
if self.time <= self.t1:
u = self.u1
elif self.time <= self.t2:
u = self.u1 + (self.time - self.t1) * self.d
else:
u = self.u2
elif self.source_type == SourceType.FUNCTION:
u = self.srcfunc()
if self.sys.enable_slewrate:
if u > self.u_prev:
self.u = min(u, self.dmax * self.dq * (self.time - self.tlast) + self.u_prev)
elif u < self.u_prev:
self.u = max(u, -self.dmax * self.dq * (self.time - self.tlast) + self.u_prev)
else:
self.u = u
self.tlast = self.time
self.x = self.u
self.q = self.u
return self.u
def quantize(self):
self.q = self.x
return False
def ta(self):
self.tnext = _INF
if self.source_type == SourceType.FUNCTION:
pass
if self.source_type == SourceType.RAMP:
if self.time < self.t1:
self.tnext = self.t1
elif self.time < self.t2:
if self.d > 0.0:
self.tnext = self.time + (self.q + self.dq - self.u)/self.d
elif self.d < 0.0:
self.tnext = self.time + (self.q - self.dq - self.u)/self.d
else:
self.tnext = _INF
else:
self.tnext = _INF
elif self.source_type == SourceType.STEP:
if self.time < self.t1:
self.tnext = self.t1
else:
self.tnext = _INF
elif self.source_type == SourceType.SINE:
if self.time < self.t1:
self.tnext = self.t1
else:
w = self.time % self.T # cycle time
t0 = self.time - w # cycle start time
theta = self.omega * w + self.phi # wrapped angular position
# value at current time w/o dc offset:
u = self.ua * sin(2.0 * pi * self.freq * self.time)
# determine next transition time. Saturate at +/- xa:
# quadrant I
if theta < pi/2.0:
self.tnext = (t0 + (asin(min(1.0, (u + self.dq)/self.ua)))
/ self.omega)
# quadrant II
elif theta < pi:
self.tnext = (t0 + self.T/2.0
- (asin(max(0.0, (u - self.dq)/self.ua)))
/ self.omega)
# quadrant III
elif theta < 3.0*pi/2:
self.tnext = (t0 + self.T/2.0
- (asin(max(-1.0, (u - self.dq)/self.ua)))
/ self.omega)
# quadrant IV
else:
self.tnext = (t0 + self.T
+ (asin(min(0.0, (u + self.dq)/self.ua)))
/ self.omega)
elif self.source_type == SourceType.FUNCTION:
pass
#self.tnext = self.time + self.srcdt # <-- should we do this?
self.tnext = max(self.tnext, self.tlast + self.dtmin)
def f(self, q=None):
if not q:
q = self.q
d = 0.0
if self.source_type == SourceType.RAMP:
d = self.ramp_slope
elif self.source_type == SourceType.SINE:
d = self.omega * self.ua * cos(self.omega * self.time + self.phi)
elif self.source_type == SourceType.STEP:
pass # todo: sigmoid approx.
elif self.source_type == SourceType.PWM:
pass # todo: sigmoid approx.
elif self.source_type == SourceType.FUNCTION:
d = 0.0 # todo: add a time derivative function delegate
return d
class StateAtom(Atom):
""" Qdl State Atom.
"""
def __init__(self, name, x0=0.0, coefficient=0.0, coeffunc=None,
derfunc=None, dq=None, dqmin=None, dqmax=None, dqerr=None,
dtmin=None, dmax=1e10, units=""):
Atom.__init__(self, name=name, x0=x0, dq=dq, dqmin=dqmin, dqmax=dqmax,
dqerr=dqerr, dtmin=dtmin, dmax=dmax, units=units)
self.coefficient = coefficient
self.coeffunc = coeffunc
self.derfunc = derfunc
def dint(self):
self.x += self.d * (self.time - self.tlast)
self.tlast = self.time
return self.x
def quantize(self, implicit=True):
interp = False
change = False
self.d0 = self.d
# derivative based:
if self.x >= self.qhi:
self.q = self.qhi
self.qlo += self.dq
change = True
elif self.x <= self.qlo:
self.q = self.qlo
self.qlo -= self.dq
change = True
self.qhi = self.qlo + 2.0 * self.dq
if change and self.implicit and implicit: # we've ventured out of (qlo, qhi) bounds
self.d = self.f()
# if the derivative has changed signs, then we know
# we are in a potential oscillating situation, so
# we will set the q such that the derivative ~= 0:
if (self.d * self.d0) < 0: # if derivative has changed sign
flo = self.f(self.qlo)
fhi = self.f(self.qhi)
if flo != fhi:
a = (2.0 * self.dq) / (fhi - flo)
self.q = self.qhi - a * fhi
interp = True
return interp
def ta(self):
if self.d > _EPS:
self.tnext = self.time + (self.qhi - self.x) / self.d
elif self.d < -_EPS:
self.tnext = self.time + (self.qlo - self.x) / self.d
else:
self.tnext = _INF
self.tnext = max(self.tnext, self.tlast + self.dtmin)
def compute_coefficient(self):
if self.coeffunc:
return self.coeffunc(self.device)
else:
return self.coefficient
def f(self, q=None):
if not q:
q = self.q
if self.derfunc:
if self.derargfunc:
args = self.derargfunc(self.device)
return self.derfunc(*args)
else:
return self.derfunc(self.device, q)
d = self.compute_coefficient() * q
for connection in self.connections:
d += connection.value()
return d
class System(object):
def __init__(self, name="sys", dq=None, dqmin=None, dqmax=None, dqerr=None,
dtmin=None, dmax=None, print_time=False):
global sys
sys = self
self.name = name
# qss solution parameters:
#self.dq = DEF_DQ
#if dq:
# self.dq = dq
#
#self.dqmin = DEF_DQMIN
#if dqmin:
# self.dqmin = dqmin
#elif dq:
# self.dqmin = dq
#
#self.dqmax = DEF_DQMAX
#if dqmax:
# self.dqmax = dqmax
#elif dq:
# self.dqmax = dq
#
#self.dqerr = DEF_DQERR
#if dqerr:
# self.dqerr = dqerr
#
self.dtmin = DEF_DTMIN
if dtmin:
self.dtmin = dtmin
self.dmax = DEF_DMAX
if dmax:
self.dmax = dmax
# child elements:
self.devices = []
self.atoms = []
self.state_atoms = []
self.source_atoms = []
self.n = 0
self.m = 0
# simulation variables:
self.tstop = 0.0 # end simulation time
self.time = 0.0 # current simulation time
self.tsave = 0.0 # saved time for state restore
self.iprint = 0 # for runtime updates
self.print_time = print_time
self.dt = 1e-4
self.enable_slewrate = False
self.jacobian = None
self.Km = 1.2
# events:
self.events = {}
def schedule(self, func, time):
if not time in self.events:
self.events[time] = []
self.events[time].append(func)
def add_device(self, device):
self.devices.append(device)
for atom in device.atoms:
if not atom.dq:
atom.dq = self.dq
#if not atom.dqmin:
# atom.dqmin = self.dqmin
#
#if not atom.dqmax:
# atom.dqmax = self.dqmax
#
#if not atom.dqerr:
# atom.dqerr = self.dqerr
if not atom.dtmin:
atom.dtmin = self.dtmin
if not atom.dmax:
atom.dmax = self.dmax
atom.device = device
atom.sys = self
self.atoms.append(atom)
if isinstance(atom, StateAtom):
atom.index = self.n
self.state_atoms.append(atom)
self.n += 1
elif isinstance(atom, SourceAtom):
atom.index = self.m
self.source_atoms.append(atom)
self.m += 1
setattr(self, device.name, device)
def add_devices(self, *devices):
for device in devices:
device.setup_connections()
for device in devices:
device.setup_functions()
for device in devices:
self.add_device(device)
def save_state(self):
self.tsave = self.time
for atom in self.atoms:
atom.qsave = atom.q
atom.xsave = atom.x
def connect(self, from_electrical_port, to_electrical_port):
from_electrical_port["input_port"]["ports"].append(to_electrical_port["output_port"])
to_electrical_port["input_port"]["ports"].append(from_electrical_port["output_port"])
def connectdq(self, from_dq_port, to_dq_port):
from_dq_port["inputd_port"]["ports"].append(to_dq_port["outputd_port"])
from_dq_port["inputq_port"]["ports"].append(to_dq_port["outputq_port"])
to_dq_port["inputd_port"]["ports"].append(from_dq_port["outputd_port"])
to_dq_port["inputq_port"]["ports"].append(from_dq_port["outputq_port"])
def restore_state(self):
self.time = self.tsave
for atom in self.atoms:
atom.q = atom.qsave
atom.x = atom.xsave
atom.qhi = atom.q + atom.dq
atom.qlo = atom.q - atom.dq
def get_jacobian(self):
jacobian = np.zeros((self.n, self.n))
for atom in self.state_atoms:
for other, func in atom.jacfuncs:
if atom.derargfunc:
args = atom.derargfunc(atom.device)
jacobian[atom.index, other.index] = func(*args)
else:
if atom is other:
jacobian[atom.index, other.index] = func(atom.device, atom.q)
else:
jacobian[atom.index, other.index] = func(atom.device, atom.q, other.index)
return jacobian
@staticmethod
def fode(t, x, sys):
"""Returns array of derivatives from state atoms. This function must be
a static method in order to be passed as a delgate to the
scipy ode integrator function. Note that sys is a global module variable.
"""
dx_dt = [0.0] * sys.n
for atom in sys.state_atoms:
atom.q = x[atom.index]
for atom in sys.state_atoms:
dx_dt[atom.index] = atom.f()
return dx_dt
@staticmethod
def fode2(x, t=0.0, sys=None):
"""Returns array of derivatives from state atoms. This function must be
a static method in order to be passed as a delgate to the
scipy ode integrator function. Note that sys is a global module variable.
"""
y = [0.0] * sys.n
for atom in sys.state_atoms:
atom.q = x[atom.index]
for atom in sys.state_atoms:
y[atom.index] = atom.f()
return y
def solve_dc(self, init=True, set=True):
xi = [0.0]*self.n
for atom in self.state_atoms:
if init:
xi[atom.index] = atom.x0
else:
xi[atom.index] = atom.x
xdc = fsolve(self.fode2, xi, args=(0, sys), xtol=1e-12)
for atom in self.state_atoms:
if init:
atom.x0 = xdc[atom.index]
elif set:
atom.x = xdc[atom.index]
atom.q = atom.x
return xdc
def initialize(self, t0=0.0, dt=1e-4, dc=False):
self.time = t0
self.dt = dt
self.dq0 = np.zeros((self.n, 1))
for atom in self.state_atoms:
self.dq0[atom.index] = atom.dq0
if dc:
self.solve_dc()
for atom in self.state_atoms:
atom.initialize(self.time)
for atom in self.source_atoms:
atom.initialize(self.time)
def run(self, tstop, ode=True, qss=True, verbose=True, qss_fixed_dt=None,
ode_method="RK45", optimize_dq=False, chk_ss_delay=None):
self.verbose = verbose
self.calc_ss = False
if optimize_dq or chk_ss_delay:
self.calc_ss = True
self.update_steadystate_distance()
# get the event times and event function lists, sorted by time:
sorted_events = sorted(self.events.items())
# add the last tstop event to the lists:
sorted_events.append((tstop, None))
# loop through the event times and solve:
for time, events in sorted_events:
if self.calc_ss:
self.calc_steadystate()
if optimize_dq:
self.optimize_dq()
self.update_steadystate_distance()
self.tstop = time
if ode:
print("ODE Simulation started...")
self.save_state()
self.enable_slewrate = False
xi = [0.0]*self.n
for atom in self.state_atoms:
xi[atom.index] = atom.x
tspan = (self.time, self.tstop)
soln = solve_ivp(self.fode, tspan, xi, ode_method, args=(sys,),
max_step=self.dt)
t = soln.t
x = soln.y
for i in range(len(t)):
for atom in self.state_atoms:
atom.q = x[atom.index, i]
atom.save_ode(t[i], atom.q)
for atom in self.source_atoms:
atom.save_ode(t[i], atom.dint())
for atom in self.state_atoms:
xf = x[atom.index, -1]
atom.x = xf
atom.q = xf
for atom in self.source_atoms:
atom.dint()
atom.q = atom.q
self.time = self.tstop
self.enable_slewrate = True
print("ODE Simulation completed.")
if qss:
print("QSS Simulation started...")
if ode: self.restore_state()
# start by updating all atoms:
for atom in self.atoms:
atom.update(self.time)
atom.save(force=True)
if qss_fixed_dt:
while(self.time <= self.tstop):
for atom in self.source_atoms:
atom.step(self.time)
for atom in self.state_atoms:
atom.step(self.time)
self.time += qss_fixed_dt
else:
# now iterate over atoms until nothing triggered:
i = 0
while i < _MAXITER:
triggered = False
for atom in self.atoms:
if atom.triggered:
triggered = True
atom.update(self.time)
if not triggered:
break
i += 1
# main simulation loop:
tlast = self.time
last_print_time = self.time
interval = (self.tstop - self.time) * 0.02
chk_ss_clock = 0.0
while self.time < self.tstop:
self.advance()
if verbose and self.time-last_print_time > interval:
print("t = {0:5.2f} s".format(self.time))
last_print_time = self.time
if chk_ss_delay:
chk_ss_clock += self.time - tlast
if not self.check_steadystate(apply_if_true=False):
chk_ss_clock = 0.0
if chk_ss_clock >= chk_ss_delay:
self.check_steadystate(apply_if_true=True)
tlast = self.time
self.time = self.tstop
for atom in self.atoms:
atom.update(self.time)
atom.save()
print("QSS Simulation completed.")
if events:
for event in events:
event(self)
def calc_steadystate(self):
self.jac1 = self.get_jacobian()
self.save_state()
self.xf = self.solve_dc(init=False, set=False)
for atom in self.state_atoms:
atom.xf = self.xf[atom.index]
self.jac2 = self.get_jacobian()
self.restore_state()
def update_steadystate_distance(self):
dq0 = [0.0]*self.n
for atom in self.state_atoms:
dq0[atom.index] = atom.dq0
self.steadystate_distance = la.norm(dq0) * self.Km
def optimize_dq(self):
if self.verbose:
print("dq0 = {}\n".format(self.dq0))
print("jac1 = {}\n".format(self.jac1))
if 0:
QQ0 = np.square(self.dq0)
JTJ = self.jac1.transpose().dot(self.jac1)
QQ = la.solve(JTJ, QQ0)
dq1 = np.sqrt(np.abs(QQ))
JTJ = self.jac2.transpose().dot(self.jac1)
QQ = la.solve(JTJ, QQ0)
dq2 = np.sqrt(np.abs(QQ))
if 1:
factor = 0.5
E = np.zeros((self.n, self.n))
dq1 = np.zeros((self.n, 1))
dq2 = np.zeros((self.n, 1))
for atom in self.state_atoms:
for j in range(self.n):
if atom.index == j:
E[atom.index, atom.index] = (atom.dq0*factor)**2
else:
pass
E[atom.index, j] = (atom.dq0*factor)
JTJ = self.jac1.transpose().dot(self.jac1)
Q = la.solve(JTJ, E)
for atom in self.state_atoms:
dq = 999999.9
for j in range(self.n):
if atom.index == j:
dqii = sqrt(abs(Q[atom.index, j]))
dqii = abs(Q[atom.index, j])
if dqii < dq:
dq = dqii
else:
dqij = abs(Q[atom.index, j])
if dqij < dq:
dq = dqij
dq1[atom.index, 0] = dq
JTJ = self.jac2.transpose().dot(self.jac2)
Q = la.solve(JTJ, E)
for atom in self.state_atoms:
dq = 999999.9
for j in range(self.n):
if atom.index == j:
dqii = sqrt(abs(Q[atom.index, j]))
dqii = abs(Q[atom.index, j])
if dqii < dq:
dq = dqii
else:
dqij = abs(Q[atom.index, j])
if dqij < dq:
dq = dqij
dq2[atom.index, 0] = dq
if self.verbose:
print("at t=inf:")
print("dq1 = {}\n".format(dq1))
print("at t=0+:")
print("dq2 = {}\n".format(dq2))
for atom in self.state_atoms:
#atom.dq = min(atom.dq0, dq1[atom.index, 0], dq2[atom.index, 0])
atom.dq = min(dq1[atom.index, 0], dq2[atom.index, 0])
atom.qhi = atom.q + atom.dq
atom.qlo = atom.q - atom.dq
if self.verbose:
print("dq_{} = {} ({})\n".format(atom.full_name(), atom.dq, atom.units))
def check_steadystate(self, apply_if_true=True):
is_ss = False
q = [0.0]*self.n
for atom in self.state_atoms:
q[atom.index] = atom.q
qe = la.norm(np.add(q, -self.xf))
if (qe < self.steadystate_distance):
is_ss = True
if is_ss and apply_if_true:
for atom in self.state_atoms:
atom.set_state(self.xf[atom.index])
for atom in self.source_atoms:
atom.dint()
atom.q = atom.x
return is_ss
def advance(self):
tnext = _INF
for atom in self.atoms:
tnext = min(atom.tnext, tnext)
self.time = max(tnext, self.time + _EPS)
self.time = min(self.time, self.tstop)
for atom in self.atoms:
if atom.tnext <= self.time or self.time >= self.tstop:
atom.update(self.time)
i = 0
while i < _MAXITER:
triggered = False
for atom in self.atoms:
if atom.triggered:
triggered = True
atom.update(self.time)
if not triggered:
break
i += 1
def plot_devices(self, *devices, plot_qss=True, plot_ss=False,
plot_qss_updates=False, plot_ss_updates=False, legend=False):
for device in devices:
for atom in devices.atoms:
atoms.append(atom)
self.plot(self, *atoms, plot_qss=plot_qss, plot_ss=plot_ss,
plot_qss_updates=plot_qss_updates,
plot_ss_updates=plot_ss_updates, legend=legend)
def plot_old(self, *atoms, plot_qss=True, plot_ss=False,
plot_qss_updates=False, plot_ss_updates=False, legend=False):
if not atoms:
atoms = self.state_atoms
c, j = 2, 1
r = floor(len(atoms)/2) + 1
for atom in atoms:
ax1 = None
ax2 = None
plt.subplot(r, c, j)
if plot_qss or plot_ss:
ax1 = plt.gca()
ax1.set_ylabel("{} ({})".format(atom.full_name(), atom.units),
color='b')
ax1.grid()
if plot_qss_updates or plot_ss_updates:
ax2 = ax1.twinx()
ax2.set_ylabel('updates', color='r')
if plot_qss:
ax1.plot(atom.tzoh, atom.qzoh, 'b-', label="qss_q")
if plot_ss:
ax1.plot(atom.tout_ss, atom.xout_ss, 'c--', label="ss_x")
if plot_qss_updates:
ax2.hist(atom.tout, 100)
#ax2.plot(atom.tout, atom.nupd, 'r-', label="qss updates")
if plot_ss_updates:
ax2.plot(self.tout_ss, self.nupd_ss, 'm--', label="ss_upds")
if ax1 and legend:
ax1.legend(loc="upper left")
if ax2 and legend:
ax2.legend(loc="upper right")
plt.xlabel("t (s)")
j += 1
plt.tight_layout()
plt.show()
def plot_groups(self, *groups, plot_qss=False, plot_ss=False, plot_ode=False):
c, j = 1, 1
r = len(groups)/c
if r % c > 0.0: r += 1
for atoms in groups:
plt.subplot(r, c, j)
if plot_qss:
for i, atom in enumerate(atoms):
color = "C{}".format(i)
lbl = "{} qss ({})".format(atom.full_name(), atom.units)
plt.plot(atom.tout, atom.qout,
marker='.',
markersize=4,
markerfacecolor='none',
markeredgecolor=color,
markeredgewidth=0.5,
linestyle='none',
label=lbl)
if plot_ode:
for i, atom in enumerate(atoms):
color = "C{}".format(i)
lbl = "{} ode ({})".format(atom.full_name(), atom.units)
plt.plot(atom.tout_ode, atom.xout_ode,
color=color,
alpha=0.6,
linewidth=1.0,
linestyle='dashed',
label=lbl)
plt.legend(loc="lower right")
plt.ylabel("atom state")
plt.xlabel("t (s)")
plt.grid()
j += 1
plt.tight_layout()
plt.show()
def plot(self, *atoms, plot_qss=False, plot_zoh=False, plot_ss=False, plot_ode=False,
plot_qss_updates=False, plot_ss_updates=False, legloc=None,
plot_ode_updates=False, legend=True, errorband=False, upd_bins=1000,
pth=None):
c, j = 1, 1
r = len(atoms)/c
if r % c > 0.0: r += 1
fig = plt.figure()
for i, atom in enumerate(atoms):
plt.subplot(r, c, j)
ax1 = plt.gca()
ax1.set_ylabel("{} ({})".format(atom.full_name(),
atom.units), color='tab:red')
ax1.grid()
ax2 = None
if plot_qss_updates or plot_ss_updates:
ax2 = ax1.twinx()
ylabel = "update density ($s^{-1}$)"
ax2.set_ylabel(ylabel, color='tab:blue')
if plot_qss_updates:
dt = atom.tout[-1] / upd_bins
label = "update density"
#ax2.hist(atom.tout, upd_bins, alpha=0.5,
# color='b', label=label, density=True)
n = len(atom.tout)
bw = n**(-2/3)
kde = gaussian_kde(atom.tout, bw_method=bw)
t = np.arange(0.0, atom.tout[-1], dt/10)
pdensity = kde(t) * n
ax2.fill_between(t, pdensity, 0, lw=0,
color='tab:blue', alpha=0.2,
label=label)
if plot_ss_updates:
ax2.plot(self.tout_ss, self.nupd_ss, 'tab:blue', label="ss_upds")
if plot_qss:
#lbl = "{} qss ({})".format(atom.full_name(), atom.units)
lbl = "qss"
ax1.plot(atom.tout, atom.qout,
marker='.',
markersize=4,
markerfacecolor='none',
markeredgecolor='tab:red',
markeredgewidth=0.5,
alpha=1.0,
linestyle='none',
label=lbl)
if plot_zoh:
lbl = "qss (zoh)"
ax1.plot(atom.tzoh, atom.qzoh, color="tab:red", linestyle="-",
alpha=0.5, label=lbl)
if plot_ss:
lbl = "ss"
ax1.plot(atom.tout_ss, atom.xout_ss,
color='r',
linewidth=1.0,
linestyle='dashed',
label=lbl)
if plot_ode:
lbl = "ode"
if errorband:
xhi = [x + atom.dq0 for x in atom.xout_ode]
xlo = [x - atom.dq0 for x in atom.xout_ode]
ax1.plot(atom.tout_ode, atom.xout_ode,
color='k',
alpha=0.6,
linewidth=1.0,
linestyle='dashed',
label=lbl)
lbl = "error band"
ax1.fill_between(atom.tout_ode, xhi, xlo, color='k', alpha=0.1,
label=lbl)
else:
ax1.plot(atom.tout_ode, atom.xout_ode,
color='k',
alpha=0.6,
linewidth=1.0,
linestyle='dashed',
label=lbl)
loc = "best"
if legloc:
loc = legloc
lines1, labels1 = ax1.get_legend_handles_labels()
if ax2:
lines2, labels2 = ax2.get_legend_handles_labels()
ax1.legend(lines1+lines2, labels1+labels2, loc=loc)
else:
ax1.legend(lines1, labels1, loc=loc)
plt.xlabel("t (s)")
j += 1
plt.tight_layout()
if pth:
fig.savefig(pth)
else:
plt.show()
def plotxy(self, atomx, atomy, arrows=False, ss_region=False, auto_limits=False):
ftheta = interp1d(atomx.tout, atomx.qout, kind='zero')
fomega = interp1d(atomy.tout, atomy.qout, kind='zero')
tboth = np.concatenate((atomx.tout, atomy.tout))
tsort = np.sort(tboth)
t = np.unique(tsort)
x = ftheta(t)
y = fomega(t)
u = np.diff(x, append=x[-1])
v = np.diff(y, append=x[-1])
fig = plt.figure()
ax = fig.add_subplot(111)
if not auto_limits:
r = max(abs(max(x)), abs(min(x)), abs(max(y)), abs(min(y)))
dq = atomx.dq
rx = r + (dq - r % dq) + dq * 5
dx = rx*0.2 + (dq - rx*0.2 % dq)
x_major_ticks = np.arange(-rx, rx, dx)
x_minor_ticks = np.arange(-rx, rx, dx*0.2)
dq = atomy.dq
ry = r + (dq - r % dq) + dq * 5
dy = ry*0.2 + (dq - ry*0.2 % dq)
y_major_ticks = np.arange(-ry, ry, dy)
y_minor_ticks = np.arange(-ry, ry, dy*0.2)
ax.set_xticks(x_major_ticks)
ax.set_xticks(x_minor_ticks, minor=True)
ax.set_yticks(y_major_ticks)
ax.set_yticks(y_minor_ticks, minor=True)
plt.xlim([-rx, rx])
plt.ylim([-ry, ry])
if ss_region:
dq = sqrt(atomx.dq**2 + atomx.dq**2) * self.Km
region= plt.Circle((atomx.xf, atomy.xf), dq, color='k', alpha=0.2)
ax.add_artist(region)
if arrows:
ax.quiver(x[:-1], y[:-1], u[:-1], v[:-1], color="tab:red",
units="dots", width=1, headwidth=10, headlength=10, label="qss")
ax.plot(x, y, color="tab:red", linestyle="-")
else:
ax.plot(x, y, color="tab:red", linestyle="-", label="qss")
ax.plot(atomx.xout_ode, atomy.xout_ode, color="tab:blue", linestyle="--", alpha=0.4, label="ode")
ax.grid(b=True, which="major", color="k", alpha=0.3, linestyle="-")
ax.grid(b=True, which="minor", color="k", alpha=0.1, linestyle="-")
plt.xlabel(atomx.full_name() + " ({})".format(atomx.units))
plt.ylabel(atomy.full_name() + " ({})".format(atomy.units))
ax.set_aspect("equal")
plt.legend()
plt.show()
def plotxyt(self, atomx, atomy, arrows=True, ss_region=False):
fx = interp1d(atomx.tout, atomx.qout, kind='zero')
fy = interp1d(atomy.tout, atomy.qout, kind='zero')
tboth = np.concatenate((atomx.tout, atomy.tout))
tsort = np.sort(tboth)
t = np.unique(tsort)
x = fx(t)
y = fy(t)
u = np.diff(x, append=x[-1])
v = np.diff(y, append=x[-1])
fig = plt.figure()
ax = plt.axes(projection="3d")
dq = sqrt(atomx.dq**2 + atomx.dq**2) * self.Km
def cylinder(center, r, l):
x = np.linspace(0, l, 100)
theta = np.linspace(0, 2*pi, 100)
theta_grid, x_grid = np.meshgrid(theta, x)
y_grid = r * np.cos(theta_grid) + center[0]
z_grid = r * np.sin(theta_grid) + center[1]
return x_grid, y_grid, z_grid
Xc, Yc, Zc = cylinder((0.0, 0.0), 0.1, t[-1])
ax.plot_surface(Xc, Yc, Zc, alpha=0.2)
ax.scatter3D(t, x, y, c=t, cmap="hsv", marker=".")
ax.plot3D(t, x, y)
ax.plot3D(atomy.tout_ode, atomx.xout_ode, atomy.xout_ode, color="tab:blue", linestyle="--", alpha=0.4, label="ode")
ax.set_ylabel(atomx.full_name() + " ({})".format(atomx.units))
ax.set_zlabel(atomy.full_name() + " ({})".format(atomy.units))
ax.set_xlabel("t (s)")
xmax = max(abs(min(x)), max(x))
ymax = max(abs(min(y)), max(y))
xymax = max(xmax, ymax)
ax.set_xlim([0.0, t[-1]])
ax.set_ylim([-xymax, xymax])
ax.set_zlim([-xymax, xymax])
plt.legend()
plt.show()
def plotxy2(self, atomsx, atomsy, arrows=True, ss_region=False):
fx1 = interp1d(atomsx[0].tout, atomsx[0].qout, kind='zero')
fx2 = interp1d(atomsx[1].tout, atomsx[1].qout, kind='zero')
fy1 = interp1d(atomsy[0].tout, atomsy[0].qout, kind='zero')
fy2 = interp1d(atomsy[1].tout, atomsy[1].qout, kind='zero')
tall = np.concatenate((atomsx[0].tout, atomsx[1].tout,
atomsy[0].tout, atomsy[1].tout))
tsort = np.sort(tall)
t = np.unique(tsort)
x1 = fx1(t)
x2 = fx2(t)
y1 = fy1(t)
y2 = fy2(t)
x = np.multiply(x1, x2)
y = np.multiply(y1, y2)
u = np.diff(x, append=x[-1])
v = np.diff(y, append=x[-1])
fig = plt.figure()
ax = fig.add_subplot(111)
r = max(abs(max(x)), abs(min(x)), abs(max(y)), abs(min(y)))
dq = atomsx[0].dq
rx = r + (dq - r % dq) + dq * 5
dx = rx*0.2 + (dq - rx*0.2 % dq)
x_major_ticks = np.arange(-rx, rx, dx)
x_minor_ticks = np.arange(-rx, rx, dx*0.2)
dq = atomsy[0].dq
ry = r + (dq - r % dq) + dq * 5
dy = ry*0.2 + (dq - ry*0.2 % dq)
y_major_ticks = np.arange(-ry, ry, dy)
y_minor_ticks = np.arange(-ry, ry, dy*0.2)
ax.set_xticks(x_major_ticks)
ax.set_xticks(x_minor_ticks, minor=True)
ax.set_yticks(y_major_ticks)
ax.set_yticks(y_minor_ticks, minor=True)
#plt.xlim([-rx, rx])
#plt.ylim([-ry, ry])
#if ss_region:
# dq = sqrt(atomx.dq**2 + atomx.dq**2) * self.Km
# region= plt.Circle((atomx.xf, atomy.xf), dq, color='k', alpha=0.2)
# ax.add_artist(region)
if arrows:
ax.quiver(x[:-1], y[:-1], u[:-1], v[:-1], color="tab:red",
units="dots", width=1, headwidth=10, headlength=10, label="qss")
ax.plot(x, y, color="tab:red", linestyle="-")
else:
ax.plot(x, y, color="tab:red", linestyle="-", label="qss")
xode = np.multiply(atomsx[0].xout_ode, atomsx[1].xout_ode)
yode = np.multiply(atomsy[0].xout_ode, atomsy[1].xout_ode)
ax.plot(xode, yode, color="tab:blue", linestyle="--", alpha=0.4, label="ode")
#ax.grid(b=True, which="major", color="k", alpha=0.3, linestyle="-")
#ax.grid(b=True, which="minor", color="k", alpha=0.1, linestyle="-")
plt.xlabel("{} * {}".format(atomsx[0].name, atomsx[0].name))
plt.ylabel("{} * {}".format(atomsy[0].name, atomsy[0].name))
#ax.set_aspect("equal")
plt.legend()
plt.show()
def __repr__(self):
return self.name
def __str__(self):
return self.name
# ========================== Interface Model ===================================
class Device(object):
"""Collection of Atoms and Connections that comprise a device
"""
def __init__(self, name):
self.name = name
self.atoms = []
self.ports = []
def add_atom(self, atom):
self.atoms.append(atom)
atom.device = self
setattr(self, atom.name, atom)
def add_atoms(self, *atoms):
for atom in atoms:
self.add_atom(atom)
def setup_connections(self):
pass
def setup_functions(self):
pass
def __repr__(self):
return self.name
def __str__(self):
return __repr__(self)
class Connection(object):
"""Connection between atoms.
"""
def __init__(self, atom=None, other=None, coefficient=1.0, coeffunc=None, valfunc=None):
self.atom = atom
self.other = other
self.coefficient = coefficient
self.coeffunc = coeffunc
self.valfunc = valfunc
self.device = None
if atom and other:
self.reset_atoms(atom, other)
def reset_atoms(self, atom, other):
self.atom = atom
self.other = other
self.other.broadcast_to.append(self.atom)
def compute_coefficient(self):
if self.coeffunc:
return self.coeffunc(self.device)
else:
return self.coefficient
def value(self):
if self.other:
if self.valfunc:
return self.valfunc(self.other)
else:
if isinstance(self.other, StateAtom):
return self.compute_coefficient() * self.other.q
elif isinstance(self.other, SourceAtom):
return self.compute_coefficient() * self.other.dint()
else:
return 0.0
class PortConnection(object):
def __init__(self, variable, sign=1, expr=""):
self.variable = variable
self.sign = sign
self.expr = expr
self.from_connections = []
class Port(object):
def __init__(self, name, typ="in", *connections):
self.name = name
self.typ = typ
self.from_ports = []
if connections:
self.connections = connections
else:
self.connections = []
def connect(self, other):
if self.typ == "in":
self.connections[0].from_connections.append(other.connections[0])
elif self.typ == "out":
other.connections[0].from_connections.append(self.connections[0])
elif self.typ in ("inout"):
self.connections[0].from_connections.append(other.connections[0])
other.connections[0].from_connections.append(self.connections[0])
elif self.typ in ("dq"):
self.connections[0].from_connections.append(other.connections[0])
other.connections[0].from_connections.append(self.connections[0])
self.connections[1].from_connections.append(other.connections[1])
other.connections[1].from_connections.append(self.connections[1])
class SymbolicDevice(Device):
def __init__(self, name):
Device.__init__(self, name)
self.states = odict()
self.constants = odict()
self.parameters = odict()
self.input_ports = odict()
self.output_ports = odict()
self.electrical_ports = odict()
self.dq_ports = odict()
self.algebraic = odict()
self.diffeq = []
self.dermap = odict()
self.jacobian = odict()
self.ports = odict()
def add_state(self, name, dername, desc="", units="", x0=0.0, dq=1e-3):
self.states[name] = odict()
self.states[name]["name"] = name
self.states[name]["dername"] = dername
self.states[name]["desc"] = desc
self.states[name]["units"] = units
self.states[name]["x0"] = x0
self.states[name]["dq"] = dq
self.states[name]["device"] = self
self.states[name]["sym"] = None
self.states[name]["dersym"] = None
self.states[name]["expr"] = None
self.states[name]["atom"] = None
self.dermap[dername] = name
def add_input_port(self, name, var, sign=1, expr=""):
self.input_ports[name] = odict()
self.input_ports[name]["name"] = name
self.input_ports[name]["var"] = var
self.input_ports[name]["ports"] = []
self.input_ports[name]["sign"] = sign
#setattr(self, name, self.input_ports[name])
#connection = PortConnection(var, sign=sign, expr=expr)
#self.ports[name] = Port(name, typ="in", connection)
#setattr(self, name, self.ports[name])
def add_output_port(self, name, var, state):
self.output_ports[name] = odict()
self.output_ports[name]["name"] = name
self.output_ports[name]["var"] = var
self.output_ports[name]["device"] = self
self.output_ports[name]["state"] = state
self.output_ports[name]["atom"] = None
#setattr(self, name, self.output_ports[name])
#connection = PortConnection(var, sign=sign, expr=expr)
#self.ports[name] = Port(name, typ="out", connection)
#setattr(self, name, self.ports[name])
def add_constant(self, name, desc="", units="", value=None):
self.constants[name] = odict()
self.constants[name]["name"] = name
self.constants[name]["desc"] = desc
self.constants[name]["units"] = units
self.constants[name]["value"] = value
self.constants[name]["sym"] = None
def add_parameter(self, name, desc="", units="", value=None):
self.parameters[name] = odict()
self.parameters[name]["name"] = name
self.parameters[name]["desc"] = desc
self.parameters[name]["units"] = units
self.parameters[name]["value"] = value
self.parameters[name]["sym"] = None
def add_diffeq(self, equation):
self.diffeq.append(equation)
def add_algebraic(self, var, rhs):
self.algebraic[var] = rhs
def update_parameter(self, key, value):
self.parameters[key]["value"] = value
def add_electrical_port(self, name, input, output, sign=1, expr=""):
self.electrical_ports[name] = odict()
setattr(self, name, self.electrical_ports[name])
self.add_input_port(name, input, sign)
self.add_output_port(name, input, output)
self.electrical_ports[name]["input_port"] = self.input_ports[name]
self.electrical_ports[name]["output_port"] = self.output_ports[name]
connection = PortConnection(input, sign=sign, expr=expr)
self.ports[name] = Port(name, "inout", connection)
setattr(self, name, self.ports[name])
def add_dq_port(self, name, inputs, outputs, sign=1, exprs=None):
self.dq_ports[name] = odict()
setattr(self, name, self.dq_ports[name])
inputd, inputq = inputs
outputd, outputq = outputs
self.add_input_port(name+"d", inputd, sign)
self.add_output_port(name+"d", inputd, outputd)
self.add_input_port(name+"q", inputq, sign)
self.add_output_port(name+"q", inputq, outputq)
self.dq_ports[name]["inputd_port"] = self.input_ports[name+"d"]
self.dq_ports[name]["outputd_port"] = self.output_ports[name+"d"]
self.dq_ports[name]["inputq_port"] = self.input_ports[name+"q"]
self.dq_ports[name]["outputq_port"] = self.output_ports[name+"q"]
expr_d = ""
expr_q = ""
if exprs:
expr_d, expr_q = exprs
connection_d = PortConnection(inputs[0], sign=sign, expr=expr_d)
connection_q = PortConnection(inputs[1], sign=sign, expr=expr_q)
self.ports[name] = Port(name, "dq", connection_d, connection_q)
setattr(self, name, self.ports[name])
def setup_connections(self):
for name, state in self.states.items():
atom = StateAtom(name, x0=state["x0"], dq=state["dq"],
units=state["units"])
atom.derargfunc = self.get_args
self.add_atom(atom)
self.states[name]["atom"] = atom
def setup_functions(self):
# 1. create sympy symbols:
x = []
dx_dt = []
for name, state in self.states.items():
sym = sp.Symbol(name)
dersym = sp.Symbol(state["dername"])
x.append(name)
dx_dt.append(state["dername"])
self.states[name]["sym"] = sym
self.states[name]["dersym"] = dersym
for name in self.constants:
sp.Symbol(name)
for name in self.parameters:
sp.Symbol(name)
for name in self.input_ports:
sp.Symbol(self.input_ports[name]["var"])
for var in self.algebraic:
sp.Symbol(var)
# 2. create symbolic derivative expressions:
# 2a. substitute algebraic equations:
n = len(self.algebraic)
m = len(self.diffeq)
algebraic = [[sp.Symbol(var), sp.sympify(expr)] for var, expr in self.algebraic.items()]
for i in range(n-1):
for j in range(i+1, n):
algebraic[j][1] = algebraic[j][1].subs(algebraic[i][0], algebraic[i][1])
diffeq = self.diffeq.copy()
for i in range(m):
diffeq[i] = sp.sympify(diffeq[i])
for var, expr in algebraic:
diffeq[i] = diffeq[i].subs(var, expr)
# 3. solve for derivatives:
derexprs = solve(diffeq, *dx_dt, dict=True)
for lhs, rhs in derexprs[0].items():
dername = str(lhs)
statename = self.dermap[dername]
self.states[statename]["expr"] = rhs
# 4. create atoms:
ext_state_names = []
ext_state_subs = {}
external_states = []
for portname in self.input_ports:
connected_ports = self.input_ports[portname]["ports"]
varname = self.input_ports[portname]["var"]
for port in connected_ports:
devicename = port["device"].name
statename = port["state"]
mangeld_name = "{}_{}".format(devicename, statename)
ext_state_names.append(mangeld_name)
ext_state_subs[varname] = "(" + " + ".join(ext_state_names) + ")"
sign = self.input_ports[portname]["sign"]
if sign == -1:
ext_state_subs[varname] = "-" + ext_state_subs[varname]
external_states.append(port["device"].states[statename])
argstrs = (list(self.constants.keys()) + list(self.parameters.keys())
+ list(self.states.keys()) + ext_state_names)
argstr = " ".join(argstrs)
argsyms = sp.var(argstr)
for name, state in self.states.items():
expr = state["expr"]
for var, substr in ext_state_subs.items():
subexpr = sp.sympify(substr)
expr = expr.subs(var, subexpr)
state["expr"] = expr
func = lambdify(argsyms, expr, dummify=False)
self.states[name]["atom"].derfunc = func
for name in self.output_ports:
statename = self.output_ports[name]["state"]
state = self.states[statename]
self.output_ports[name]["atom"] = state["atom"]
# 5. connect atoms:
for statex in self.states.values():
for statey in self.states.values():
f = statex["expr"]
if statey["sym"] in f.free_symbols:
# connect:
statex["atom"].add_connection(statey["atom"])
# add jacobian expr:
df_dy = sp.diff(f, statey["sym"])
func = lambdify(argsyms, df_dy, dummify=False)
statex["atom"].add_jacfunc(statey["atom"], func)
for statey in external_states:
f = statex["expr"]
mangled_name = "{}_{}".format(statey["device"].name, statey["name"])
mangled_symbol = sp.Symbol(mangled_name)
if mangled_symbol in f.free_symbols:
# connect:
statex["atom"].add_connection(statey["atom"])
# jacobian expr:
df_dy = sp.diff(f, mangled_symbol)
func = lambdify(argsyms, df_dy, dummify=False)
statex["atom"].add_jacfunc(statey["atom"], func)
@staticmethod
def get_args(self):
args = []
for name, constant in self.constants.items():
args.append(float(constant["value"]))
for name, parameter in self.parameters.items():
args.append(float(parameter["value"]))
for name, state in self.states.items():
args.append(float(state["atom"].q))
for name, port in self.input_ports.items():
for port2 in port["ports"]:
args.append(port2["atom"].q)
return args
| joehood/SubCircuit | subcircuit/qdl.py | qdl.py | py | 62,733 | python | en | code | 9 | github-code | 13 |
72263194578 | # Download models from the SubT Tech Repo into .zip files in the current directory (does not require Ignition install)
# May choose a subset of models (e.g., robots, artifacts, tiles) or download all models
#
# Usage:
# python download_models.py <TYPE>
#
# Valid types:
# 1: All models
# 2: Robots
# 3: Artifacts
# 4: Tunnel tiles
# 5: Urban tiles
# 6: Cave tiles
import sys, json, requests
model_filters = {
1: "*",
2: "categories:Robots",
3: "tags:artifact",
4: "Tunnel*",
5: "Urban*",
6: "Cave*"
}
if (len(sys.argv) != 2):
print("""Usage: python download_models.py <TYPE>
Valid types:
1: All models
2: Robots
3: Artifacts
4: Tunnel tiles
5: Urban tiles
6: Cave tiles""")
exit()
else:
model_filter = model_filters.get(int(sys.argv[1]), "*")
# Note: if the type is not in the list, all models will be downloaded
print("Downloading SubT Tech Repo models matching the filter: %s\nThis may take a few minutes..." % model_filter)
# URLs for getting model names and files
repo_url = 'https://fuel.ignitionrobotics.org/1.0/models?per_page=500&q=collections:SubT%20Tech%20Repo%26'
download_url = 'https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/'
# Get a list of models matching the filter
models = requests.get(repo_url+model_filter)
models_dict = json.loads(models.text)
# For each model in list, download the model
for entry in models_dict:
model_name = entry['name']
print(' Downloading %s' % model_name)
download_res = requests.get(download_url+model_name+'.zip',stream=True)
with open(model_name+'.zip', 'wb') as fd:
for chunk in download_res.iter_content(chunk_size=1024*1024):
fd.write(chunk)
print('Done.')
| osrf/subt | subt_ign/scripts/download_models.py | download_models.py | py | 1,837 | python | en | code | 260 | github-code | 13 |
71083984659 | def exchange_integers():
a = int(input())
b = int(input())
print('Before:')
print(f'a = {a}')
print(f'b = {b}')
c = a
a = b
b = c
print('After:')
print(f'a = {a}')
print(f'b = {b}')
def prime_number_checker():
x = int(input())
is_prime = True
i = x
if i in [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]:
is_prime = True
elif i % 2 == 0:
is_prime = False
elif i <= 1: # negatives, 0 and 1 are NOT prime numbers
is_prime = False
else:
s = int(i ** 0.5)
for j in range(3, s + 1):
if i % j == 0:
is_prime = False
break
print(is_prime)
def decrypting_messages():
key = int(input())
n_lines = int(input())
message = ''
for _ in range(n_lines):
letter = input()
message += chr(ord(letter) + key)
print(message)
def balanced_brackets():
n = int(input())
is_opened = False
is_balanced = True
for _ in range(n):
char = input()
if not is_balanced:
continue
if char == '(':
if is_opened:
is_balanced = False
continue
else:
is_opened = True
elif char == ')':
if is_opened:
is_opened = False
else:
is_balanced = False
continue
else:
...
if is_balanced:
print('BALANCED')
else:
print('UNBALANCED')
################################################
# RUN!
###########################################
balanced_brackets() | bobsan42/SoftUni-Learning-42 | ProgrammingFunadamentals/09DataTypesMoreExercises.py | 09DataTypesMoreExercises.py | py | 1,656 | python | en | code | 0 | github-code | 13 |
7869398852 | import base64
import requests
import json
import time
from . import config
from .cachehandler import CacheHandler
from .authhandler import AuthHandler
from .endpoints.purchaseinvoices import PurchaseInvoiceMethods
class BillToBoxAPI:
def __init__(self, clientId, clientSecret, demo=False):
self.clientId = clientId
self.clientSecret = clientSecret
self.demo = demo
self.headers = {
'Accept' : 'application/json',
'Content-Type' : 'application/json',
}
self.baseUrl = config.UAT_URL if demo else config.BASE_URL
self.cacheHandler = CacheHandler()
self.authHandler = AuthHandler(self, self.clientId, self.clientSecret)
self.purchaseInvoices = PurchaseInvoiceMethods(self)
def doRequest(self, method, url, data=None, headers=None, files=None):
if headers:
mergedHeaders = self.headers
mergedHeaders.update(headers)
headers = mergedHeaders
else: headers = self.headers
reqUrl = '{base}/{url}'.format(base=self.baseUrl, url=url)
if method == 'GET':
response = requests.get(reqUrl, params=data, headers=headers)
elif method == 'POST':
if files: response = requests.post(reqUrl, data=json.dumps(data), files=files, headers=headers)
else: response = requests.post(reqUrl, data=json.dumps(data), headers=headers)
elif method == 'PUT':
response = requests.put(reqUrl, data=json.dumps(data), headers=headers)
elif method == 'DELETE':
response = requests.delete(reqUrl, params=json.dumps(data), headers=headers)
return response
def request(self, method, url, data=None, headers=None, files=None):
self.authHandler.checkHeaderTokens()
response = self.doRequest(method, url, data, headers, files)
if 'json' in response.headers['Content-Type']:
respContent = response.json()
elif 'pdf' in response.headers['Content-Type']:
respContent = response.content
return response.status_code, response.headers, respContent
def get(self, url, data=None, headers=None):
status, headers, response = self.request('GET', url, data, headers)
return status, headers, response
def post(self, url, data=None, headers=None, files=None):
status, headers, response = self.request('POST', url, data, headers, files)
return status, headers, response
def put(self, url, data=None, headers=None):
status, headers, response = self.request('PUT', url, data, headers)
return status, headers, response
def delete(self, url, data=None, headers=None):
status, headers, response = self.request('DELETE', url, data, headers)
return status, headers, response | alexander-schillemans/python-billtobox-api | billtobox/api.py | api.py | py | 2,864 | python | en | code | 1 | github-code | 13 |
33451161476 | from datetime import datetime
from dateutil.relativedelta import relativedelta
from fbprophet import Prophet
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('example_retail_sales.csv')
last_data_str = df.iloc[len(df) - 1]['ds']
last_date = datetime.strptime(last_data_str, '%Y-%m-%d')
pivot = 290
while True:
m = Prophet(seasonality_mode='multiplicative')
forecast = m.fit(df)
future = m.make_future_dataframe(periods=20, freq='MS')
future = m.predict(future)
plt.plot(future['ds'][pivot:], future['trend'][pivot:])
plt.plot(future['ds'][pivot:], future['yhat'][pivot:])
plt.plot(future['ds'][pivot:], future['yhat_lower'][pivot:])
plt.plot(future['ds'][pivot:], future['yhat_upper'][pivot:])
y_values = []
x_values = []
for i, row in df[pivot:].iterrows():
y_values.append(future['ds'][i])
x_values.append(row['y'])
plt.plot(y_values, x_values)
plt.show()
next_value = int(input('Input next value : '))
next_date = last_date + relativedelta(months=1)
next_date_str = next_date.strftime('%Y-%m-%d')
last_date = next_date
new_row = pd.DataFrame({
'ds': next_date_str,
'y': next_value
}, index=[0])
print(new_row)
df = df.append(new_row, ignore_index=True)
print(next_date_str)
pivot = pivot + 1 | jybill01/optimization | example_retall_sales_2.py | example_retall_sales_2.py | py | 1,396 | python | en | code | 0 | github-code | 13 |
37942439465 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 28 17:15:23 2020
@author: matthew
"""
#%% function attempt
def plot_2d_interactive_fig(xy, colours, spatial_data = None, temporal_data = None,
inset_axes_side = {'x':0.1, 'y':0.1}, arrow_length = 0.1, figsize = (10,6),
labels = None, legend = None, markers = None,
figures = 'window', png_path = './', fig_filename = '2d_interactive_plot'):
""" Data are plotted in a 2D space, and when hovering over a point, further information about it (e.g. what image it is) appears in an inset axes.
Inputs:
xy | rank 2 array | e.g. 2x100, the x and y positions of each data
colours | rank 1 array | e.g. 100, value used to set the colour of each data point
spatial_data | dict or None | contains 'images_r3' in which the images are stored as in a rank 3 array (e.g. n_images x heigh x width). Masked arrays are supported.
temporal_data | dict or None | contains 'tcs_r2' as time signals as row vectors and 'xvals' which are the times for each item in the timecourse.
inset_axes_side | dict | inset axes side length as a fraction of the full figure, in x and y direction
arrow_length | float | lenth of arrow from data point to inset axes, as a fraction of the full figure.
figsize | tuple | standard Matplotlib figsize tuple, in inches.
labels | dict or None | title for title, xlabel for x axis label, and ylabel for y axis label
legend | dict or None | elements contains the matplotilb symbols. E.g. for a blue circle: Line2D([0], [0], marker='o', color='w', markerfacecolor='#1f77b4')
labels contains the strings for each of these.
markers | dict or None | dictionary containing labels (a numpy array where each number relates to a different marker style e.g. (1,0,1,0,0,0,1 etc)))
and markers (a list of the different Matplotlib marker styles e.g. ['o', 'x'])
figures | string, "window" / "png" / "png+window" | controls if figures are produced (either as a window, saved as a png, or both)
png_path | string | if a png is to be saved, a path to a folder can be supplied, or left as default to write to current directory.
fig_filename | string | name of file, if you wish to set one. Doesn't include the extension (as it's always a png).
Returns:
Interactive figure
History:
2020/09/09 | MEG | Modified from a sript in the ICASAR package.
2020/09/10 | MEG | Add labels, and change so that images are stored as rank3 arrays.
2020/09/10 | MEG | Add legend option.
2020/09/11 | MEG | Add option to have different markers.
2020/09/15 | MEG | Add option to set size of inset axes.
2021_04_16 | MEG | Add figures option (png, png and window, or just window), option to save to a directory, and option to set filename.
"""
def remove_axes2_and_arrow(fig):
""" Given a figure that has a second axes and an annotation arrow due to a
point having been hovered on, remove this axes and annotation arrow.
Inputs:
fig | matplotlib figure
Returns:
History:
2020/09/08 | MEG | Written
"""
# 1: try and remove any axes except the primary one
try:
fig.axes[1].remove()
except:
pass
# 2: try and remove any annotation arrows
for art in axes1.get_children():
if isinstance(art, matplotlib.patches.FancyArrow):
try:
art.remove()
except:
continue
else:
continue
fig.canvas.draw_idle() # update the figure
def axes_data_to_fig_percent(axes_lims, fig_lims, point):
""" Given a data point, find where on the figure it plots (ie convert from axes coordinates to figure coordinates)
Inputs:
axes_xlims | tuple | usually just the return of something like: axes1.get_ylim()
fig_lims | tuple | the limits of the axes in the figure. usuall (0.1, 0.9) for an axes made with something like this: axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
point |float | point in data coordinates
Returns:
fig_position | float | where the data point is in the figure. (0,0) would be the lower left corner.
History:
2020/09/08 | MEG | Written
"""
gradient = (fig_lims[1] - fig_lims[0])/(axes_lims[1] - axes_lims[0])
y_intercept = fig_lims[0] - (gradient * axes_lims[0])
fig_position = (gradient * point) + y_intercept
return fig_position
def calculate_insetaxes_offset(lims, points, offset_length):
"""
The offsets between the inset axes and the point are different depending on which quadrant of the graph the point is in.
Inputs:
lims | list | length is equal to the number of dimensions. Filled with tuples of the axes limits.
point | list | length is equal to the number of diemsions. Filled with points.
offset_length | float | length of the arrow.
Returns:
offsets | list | length is equal to the number of dimensions. Length of offset for inset axes in each dimension.
History:
2020/09/08 | MEG | Written
"""
import numpy as np
offsets = []
for dim_n in range(len(lims)): # loop through each dimension.
dim_centre = np.mean(lims[dim_n])
if points[dim_n] < dim_centre:
offsets.append(-offset_length)
else:
offsets.append(offset_length)
return offsets
def hover(event):
if event.inaxes == axes1: # determine if the mouse is in the axes
cont, ind = sc.contains(event) # cont is a boolean of if hoving on point, ind is a dictionary about the point being hovered over. Note that two or more points can be in this.
if cont: # if on point
remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point (incase cursor moves from one point to next without going off a point)
point_n = ind['ind'][0] # get the index of which data point we're hovering on in a simpler form.
# 1: Add the annotation arrow (from inset axes to data point)
arrow_lengths = calculate_insetaxes_offset([axes1.get_xlim(), axes1.get_ylim()],
[xy[0,point_n], xy[1,point_n]], arrow_length) # calculate the length of the arrow, which depends which quadrant we're in (as the arrow always go away from the plot)
axes1.arrow(xy[0,point_n] + arrow_lengths[0], xy[1,point_n] + arrow_lengths[1], # add the arrow. Notation is all a bit backward as head is fixed at end, so it has to be drawn backwards.
-arrow_lengths[0], -arrow_lengths[1], clip_on = False, zorder = 999) # clip_on makes sure it's visible, even if it goes off the edge of the axes.
# 2: Add the inset axes
fig_x = axes_data_to_fig_percent(axes1.get_xlim(), (0.1, 0.9), xy[0,point_n] + arrow_lengths[0]) # convert position on axes to position in figure, ready to add the inset axes
fig_y = axes_data_to_fig_percent(axes1.get_ylim(), (0.1, 0.9), xy[1,point_n] + arrow_lengths[1]) # ditto for y dimension
if arrow_lengths[0] > 0 and arrow_lengths[1] > 0: # top right quadrant
inset_axes = fig.add_axes([fig_x, fig_y, # create the inset axes, simple case, anochored to lower left forner
inset_axes_side['x'], inset_axes_side['y']], anchor = 'SW')
elif arrow_lengths[0] < 0 and arrow_lengths[1] > 0: # top left quadrant
inset_axes = fig.add_axes([fig_x - inset_axes_side['x'], fig_y, # create the inset axes, nudged in x direction, anchored to lower right corner
inset_axes_side['x'], inset_axes_side['y']], anchor = 'SE')
elif arrow_lengths[0] > 0 and arrow_lengths[1] < 0: # lower right quadrant
inset_axes = fig.add_axes([fig_x, fig_y - inset_axes_side['y'], # create the inset axes, nudged in y direction
inset_axes_side['x'], inset_axes_side['y']], anchor = 'NW')
else: # lower left quadrant
inset_axes = fig.add_axes([fig_x - inset_axes_side['x'], fig_y - inset_axes_side['y'], # create the inset axes, nudged in both x and y
inset_axes_side['x'], inset_axes_side['y']], anchor = 'NE')
# 3: Plot on the inset axes
if temporal_data is not None:
inset_axes.plot(temporal_data['xvals'], temporal_data['tcs_r2'][point_n,]) # draw the inset axes time course graph
if spatial_data is not None:
inset_axes.imshow(spatial_data['images_r3'][point_n,]) # or draw the inset axes image
inset_axes.set_xticks([]) # and remove ticks (and so labels too) from x
inset_axes.set_yticks([]) # and from y
fig.canvas.draw_idle() # update the figure.
else: # else not on a point
remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point
else: # else not in the axes
remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point (incase cursor moves from one point to next without going off a point)
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
# 1: Check some inputs:
if temporal_data is None and spatial_data is None: # check inputs
raise Exception("One of either spatial or temporal data must be supplied. Exiting. ")
if temporal_data is not None and spatial_data is not None:
raise Exception("Only either spatial or temporal data can be supplied, but not both. Exiting. ")
# 2: Draw the figure
fig = plt.figure(figsize = figsize) # create the figure, size set in function args.
axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
if markers is None: # if a dictionary about different markers is not supplied,
sc = axes1.scatter(xy[0,],xy[1,],c=colours, s=100) # draw the scatter plot, just draw them all with the default maker
else: # but if we do have a dictionary of markers.
n_markers = len(markers['styles']) # get the number of unique markers
for n_marker in range(n_markers): # loop through each marker style
point_args = np.ravel(np.argwhere(markers['labels'] == n_marker)) # get which points have that marker style
sc = axes1.scatter(xy[0,point_args],xy[1,point_args],c=colours[point_args], s=100, marker = markers['styles'][n_marker]) # draw the scatter plot with different marker styles
sc = axes1.scatter(xy[0,],xy[1,],c=colours, s=100, alpha = 0.0) # draw the scatter plot again, but with invisble markers. As the last to be drawn, these are the ones that
# are hovered over, and indexing works as all the points are draw this time.
# 3: Try and add various labels from the labels dict
try:
fig.canvas.set_window_title(labels['title'])
fig.suptitle(labels['title'])
except:
pass
try:
axes1.set_xlabel(labels['xlabel'])
except:
pass
try:
axes1.set_ylabel(labels['ylabel'])
except:
pass
# 4: Possibly add a legend, using the legend dict.
if legend is not None:
axes1.legend(handles = legend['elements'], labels = legend['labels'],
bbox_to_anchor=(1., 0.5), loc = 'center right', bbox_transform=plt.gcf().transFigure) # Put a legend to the right of the current axis. bbox is specified in figure coordinates.
fig.canvas.mpl_connect("motion_notify_event", hover) # connect the figure and the function.
if figures == 'window':
pass
elif figures == "png":
fig.savefig(f"{png_path}/{fig_filename}.png")
plt.close()
elif figures == 'png+window':
fig.savefig(f"{png_path}/{fig_filename}.png")
else:
pass
#%% temporal data example
import numpy as np
import numpy.ma as ma
from matplotlib.lines import Line2D # for the manual legend
np.random.seed(0) # to make reproducible.
xy = np.random.rand(2,15)
time_courses = np.cumsum(np.random.randn(15,40), axis = 0)
xvals = np.arange(0,40)
c = np.random.randint(1,5,size=15)
temporal_data = {'tcs_r2' : time_courses,
'xvals' : xvals}
labels = {'title' : '01 Temporal Example (with legend)',
'xlabel' : 'x',
'ylabel' : 'y'}
legend = {'elements' : [Line2D([0], [0], marker='o', color='w', markerfacecolor='#1f77b4'), # note that the legend has to be created manually.
Line2D([0], [0], marker='o', color='w', markerfacecolor='#ff7f0e')],
'labels' : ['One', 'Two']}
plot_2d_interactive_fig(xy, c, temporal_data = temporal_data, inset_axes_side = {'x':0.3, 'y':0.1}, arrow_length = 0.05, figsize = (10,6),
labels = labels, legend = legend) # note we can make the inset axes rectuangular with the inset_axes_side dict
#%% spatial data example
spatial_maps_r3 = np.random.rand(15,100,100) # r3 to signify that it's rank3 (n_images x Y x X)
spatial_data = {'images_r3' : spatial_maps_r3}
labels['title'] = '02 Spatial Example'
plot_2d_interactive_fig(xy, c, spatial_data = spatial_data, inset_axes_side = {'x':0.1, 'y':0.1}, arrow_length = 0.05, figsize = (10,6), labels = labels) # or we can make the inset axes square
#%% Equally, the spatial data can be masked arrays
mask = np.where(np.random.randint(0,2, (100,100)) == 1, np.ones((100,100)), np.zeros((100,100))).astype(bool) # create a random boolean mask
spatial_maps_r3_ma = ma.array(spatial_maps_r3, mask = np.repeat(mask[np.newaxis,], 15, axis = 0)) # apply it to the images, making it a masked array
spatial_data = {'images_r3' : spatial_maps_r3_ma}
labels['title'] = '03 Spatial Example (with masked arrays)'
plot_2d_interactive_fig(xy, c, spatial_data = spatial_data, inset_axes_side = {'x':0.1, 'y':0.1}, arrow_length = 0.05, figsize = (10,6), labels = labels)
#%% Also, a dictionary of marker styles can be supplied.
markers = {'labels' : np.random.randint(0,2, (15)), # label number will set the marker style. ie lable 0 is the first style in styles
'styles' : ['o', 'x'] } # matplotlib marker styles.
labels['title'] = '04 Spatial Example (with different marker styles)'
plot_2d_interactive_fig(xy, c, spatial_data = spatial_data, inset_axes_side = {'x':0.1, 'y':0.1}, arrow_length = 0.05,
figsize = (10,6), labels = labels, markers = markers)
#%% Old version as a script.
#Version where the axes are drawn each time, using a more object orientated approach.
# def remove_axes2_and_arrow(fig):
# """ Given a figure that has a second axes and an annotation arrow due to a
# point having been hovered on, remove this axes and annotation arrow.
# Inputs:
# fig | matplotlib figure
# Returns:
# History:
# 2020/09/08 | MEG | Written
# """
# # 1: try and remove any axes except the primary one
# try:
# fig.axes[1].remove()
# except:
# pass
# # 2: try and remove any annotation arrows
# for art in axes1.get_children():
# if isinstance(art, matplotlib.patches.FancyArrow):
# try:
# art.remove()
# except:
# continue
# else:
# continue
# fig.canvas.draw_idle() # update the figure
# def axes_data_to_fig_percent(axes_lims, fig_lims, point):
# """ Given a data point, find where on the figure it plots (ie convert from axes coordinates to figure coordinates)
# Inputs:
# axes_xlims | tuple | usually just the return of something like: axes1.get_ylim()
# fig_lims | tuple | the limits of the axes in the figure. usuall (0.1, 0.9) for an axes made with something like this: axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
# point |float | point in data coordinates
# Returns:
# fig_position | float | where the data point is in the figure. (0,0) would be the lower left corner.
# History:
# 2020/09/08 | MEG | Written
# """
# gradient = (fig_lims[1] - fig_lims[0])/(axes_lims[1] - axes_lims[0])
# y_intercept = fig_lims[0] - (gradient * axes_lims[0])
# fig_position = (gradient * point) + y_intercept
# return fig_position
# def calculate_insetaxes_offset(lims, points, offset_length):
# """
# The offsets between the inset axes and the point are different depending on which quadrant of the graph the point is in.
# Inputs:
# lims | list | length is equal to the number of dimensions. Filled with tuples of the axes limits.
# point | list | length is equal to the number of diemsions. Filled with points.
# offset_length | float | length of the arrow.
# Returns:
# offsets | list | length is equal to the number of dimensions. Length of offset for inset axes in each dimension.
# History:
# 2020/09/08 | MEG | Written
# """
# offsets = []
# for dim_n in range(len(lims)): # loop through each dimension.
# dim_centre = np.mean(lims[dim_n])
# if points[dim_n] < dim_centre:
# offsets.append(-offset_length)
# else:
# offsets.append(offset_length)
# return offsets
# import matplotlib.pyplot as plt
# import matplotlib
# import numpy as np; np.random.seed(1)
# inset_axes_side = 0.1
# arrow_length = 0.1
# x = np.random.rand(15)
# y = np.random.rand(15)
# time_courses = np.cumsum(np.random.randn(15,40), axis = 0)
# xvals = np.arange(0,40)
# c = np.random.randint(1,5,size=15)
# norm = plt.Normalize(1,4)
# cmap = plt.cm.RdYlGn
# fig = plt.figure()
# axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes
# sc = axes1.scatter(x,y,c=c, s=100, cmap=cmap, norm=norm)
# def hover(event):
# if event.inaxes == axes1: # determine if the mouse is in the axes
# print('in axes', end = '')
# cont, ind = sc.contains(event) # cont is a boolean of if hoving on point, ind is a dictionary about the point being hovered over. Note that two or more points can be in this.
# if cont:
# print('on point')
# remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point (incase cursor moves from one point to next without going off a point)
# point_n = ind['ind'][0]
# arrow_lengths = calculate_insetaxes_offset([axes1.get_xlim(), axes1.get_ylim()],
# [x[point_n], y[point_n]], arrow_length)
# axes1.arrow(x[point_n] + arrow_lengths[0], y[point_n] + arrow_lengths[1], # add the arrow. Notation is all a bit backward as head is fixed at end, so it has to be drawn backwards.
# -arrow_lengths[0], -arrow_lengths[1], clip_on = False, zorder = 999) # clip_on makes sure it's visible, even if it goes off the edge of the axes.
# fig_x = axes_data_to_fig_percent(axes1.get_xlim(), (0.1, 0.9), x[point_n] + arrow_lengths[0]) # convert position on axes to position in figure, ready to add the inset axes
# fig_y = axes_data_to_fig_percent(axes1.get_ylim(), (0.1, 0.9), y[point_n] + arrow_lengths[1]) # ditto for y dimension
# print(arrow_lengths)
# if arrow_lengths[0] > 0 and arrow_lengths[1] > 0: # top right quadrant
# inset_axes = fig.add_axes([fig_x, fig_y, inset_axes_side, inset_axes_side]) # create the inset axes, simple case
# elif arrow_lengths[0] < 0 and arrow_lengths[1] > 0: # top left quadrant
# inset_axes = fig.add_axes([fig_x - inset_axes_side, fig_y, inset_axes_side, inset_axes_side]) # create the inset axes, simple case
# elif arrow_lengths[0] > 0 and arrow_lengths[1] < 0: # lower right quadrant
# inset_axes = fig.add_axes([fig_x, fig_y - inset_axes_side, inset_axes_side, inset_axes_side]) # create the inset axes, simple case
# else:
# inset_axes = fig.add_axes([fig_x - inset_axes_side, fig_y - inset_axes_side, inset_axes_side, inset_axes_side]) # create the inset axes, simple case
# inset_axes.plot(xvals, time_courses[point_n,]) # draw the inset axes figure
# inset_axes.set_xticks([]) # and remove ticks (and so labels too) from x
# inset_axes.set_yticks([]) # and from y
# fig.canvas.draw_idle() # update the figure.
# else:
# print(' off point')
# remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point
# else:
# print('out of axes')
# remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point (incase cursor moves from one point to next without going off a point)
# fig.canvas.mpl_connect("motion_notify_event", hover)
| matthew-gaddes/interactive_2d_plot | interactive_2d_plot.py | interactive_2d_plot.py | py | 25,890 | python | en | code | 2 | github-code | 13 |
24940265093 | import streamlit as st
def get_params():
col1,col2,col3, col4 = st.columns(4)
with col1:
division = st.selectbox('Division', ['II'])
with col2:
tier = st.selectbox('Tier', ['SILVER'])
with col3:
queue = st.selectbox('Queue', ['RANKED_SOLO_5x5'])
with col4:
region = st.selectbox('Region', ['na1'])
return region, queue, tier, division
| nicolasesnis/league-win-loss-prediction | src/utils.py | utils.py | py | 396 | python | en | code | 0 | github-code | 13 |
11867505311 | ilk = int(input("İlk sayıyı giriniz:"))
iki = int(input("İkinci sayıyı giriniz:"))
def ekok(x,y):
ekok = x*y
for i in range(ekok,max(x,y)-1,-1):
if i % x == 0 and i % y == 0:
ekok = i
return ekok
print(ekok(ilk,iki))
| zaFer234/Temel-Python-Projeleri | ekok hesaplama.py | ekok hesaplama.py | py | 268 | python | tr | code | 0 | github-code | 13 |
7591701895 | from __future__ import print_function
##########################################################
## OncoMerge: app.py ##
## ______ ______ __ __ ##
## /\ __ \ /\ ___\ /\ \/\ \ ##
## \ \ __ \ \ \___ \ \ \ \_\ \ ##
## \ \_\ \_\ \/\_____\ \ \_____\ ##
## \/_/\/_/ \/_____/ \/_____/ ##
## @Developed by: Plaisier Lab ##
## (https://plaisierlab.engineering.asu.edu/) ##
## Arizona State University ##
## 242 ISTB1, 550 E Orange St ##
## Tempe, AZ 85281 ##
## @github: https://github.com/plaisier-lab/mpm_web ##
## @Author: Chris Plaisier ##
## @License: GNU GPLv3 ##
## ##
## If this program is used in your analysis please ##
## mention who built it. Thanks. :-) ##
##########################################################
import logging
import json
import csv
from flask import Flask, Response, render_template, request
from constants import HALLMARKS, SELECTABLE_PHENOTYPES, SELECTABLE_PHENOTYPES_BLACKLIST
from database import dbconn
from bicluster import bicluster_page
from causal_analysis import causal_analysis_page
from search import search_page, get_index_locals
from jacks import jacks_page
app = Flask(__name__)
# app.config.from_envvar('MESO_SETTINGS')
app.register_blueprint(bicluster_page)
app.register_blueprint(causal_analysis_page)
app.register_blueprint(search_page)
app.register_blueprint(jacks_page)
@app.errorhandler(Exception)
def unhandled_exception(e):
app.logger.exception(e)
return render_template('unknown_error.html')
@app.route('/')
def index():
hallmarks, selectable_phenotypes = get_index_locals()
return render_template('index.html', hallmarks=hallmarks, selectable_phenotypes=selectable_phenotypes)
@app.route('/network')
def network():
return render_template('network.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/download')
def download():
return render_template('download.html')
@app.route('/citation')
def citation():
return render_template('citation.html')
@app.route('/genecompletions')
def genecompletions():
term = request.args.get('term')
db = dbconn()
try:
c = db.cursor()
c.execute("""SELECT symbol FROM gene WHERE symbol LIKE %s""", [str(term)+'%'])
tmpGene = [i[0] for i in c.fetchall()]
c.execute("""SELECT name FROM mirna WHERE name LIKE %s""", [str(term)+'%'])
tmpMiRNA = [i[0] for i in c.fetchall()]
json1 = json.dumps(tmpGene+tmpMiRNA)
finally:
db.close()
return Response(response=json1, status=200, mimetype='application/json')
@app.route('/combinatorial_network')
def combinatorial_network():
with open(app.config['NODES_FILE'], 'r') as infile:
csvreader = csv.reader(infile, delimiter=',')
csvreader.next()
nodes = {node_id: {'id': node_id, 'tf_ko': tf_ko, 'in_gbm': in_gbm}
for node_id, tf_ko, in_gbm in csvreader}
with open(app.config['EDGES_FILE'], 'r') as infile:
csvreader = csv.reader(infile, delimiter=',')
csvreader.next()
edges = []
for edge, sig_coocc in csvreader:
source, edge_type, target = edge.split()
edges.append({'source': source, 'target': target, 'type': edge_type,
'sig_coocc': sig_coocc})
graph_data = []
for node_id, node_data in nodes.items():
classes = []
if node_id.startswith('hsa-miR'):
classes.append('mirna')
else:
classes.append('gene')
if node_data['tf_ko'] == 'Yes':
classes.append('crispr')
if node_data['in_gbm'] == 'Yes':
classes.append('in_gbm')
if 'in_gbm' in classes and 'crispr' in classes:
classes.append('crispr_gbm')
graph_data.append({ 'data': { 'id': node_id }, 'classes': ' '.join(classes) })
for i, edge in enumerate(edges):
if edge['sig_coocc'] == 'Yes':
graph_data.append({ 'data': { 'id': 'e%d' % i, 'source': edge['source'], 'target': edge['target'] }, 'classes': 'sigcoocc' })
else:
graph_data.append({ 'data': { 'id': 'e%d' % i, 'source': edge['source'], 'target': edge['target'] } })
return render_template('combinatorial_network.html', **locals())
if __name__ == '__main__':
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
app.debug = True
app.secret_key = 'supercalifragilistic'
app.logger.addHandler(handler)
app.run(host='0.0.0.0', debug=True)
| plaisier-lab/mpm_web | app/__main__.py | __main__.py | py | 4,586 | python | en | code | 0 | github-code | 13 |
71184949139 | from cgitb import text
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
## webdriver instance
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
## function that inputs search term using string formatting
def get_url(search_term):
## Generate a URL
template = "https://www.amazon.com/s?k={}&crid=2GM7AAQABFGBO&sprefix={}%2Caps%2C98&ref=nb_sb_noss_1"
search_term = search_term.replace(' ','+')
## add term query to url
url = template.format(search_term, search_term)
## add page query placeholder
url += '&page{}'
return url
def extract_record(item):
## extract and return data froma single record
# description and url
atag = item.h2.a.span
description = atag.text.strip()
ataglink = item.h2.a
url = 'https://www.amazon.com' + ataglink.get('href')
try:
# price
## EDIT THESE LINES IF NOT ABLE TO RETRIEVE PRICE
price_parent = item.find('span', 'a-price')
price =price_parent.find('span', 'a-offscreen').text
except AttributeError:
return
try:
# rank and rating
## EDIT THESE LINES IF NOT ABLE TO FIND REVIEW OR REVIEW COUNTS
rating = item.find('i', {'span', 'a-icon-alt'}).text
review_count = item.find('a', {'span', 'a-size-base s-underline-text'}).text
except AttributeError:
rating = ''
review_count = ''
result = (description, price, rating, review_count, url)
return result
def main(search_term):
# start the webdriver
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
records = []
url = get_url(search_term)
for page in range(1,20):
driver.get(url.format(page))
soup = BeautifulSoup(driver.page_source, 'html.parser')
## CHANGE THIS PATH IF YOU CANNOT FIND PRODUCT NAMES
results = soup.find_all("div", {"class": "a-section a-spacing-base"})
for item in results:
record = extract_record(item)
if record:
records.append(record)
driver.close()
headers = ['Description', 'Price', 'Rating', 'ReviewCount', 'Url']
with open('results.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(headers)
writer.writerows(records)
## EDIT THIS TO CHANGE WHAT YOU WANT TO SEARCH FOR
main('Dresser for bedroom')
| Young-Parrott/Amazon-Web-Scraper | amazon_web_scraper.py | amazon_web_scraper.py | py | 2,523 | python | en | code | 0 | github-code | 13 |
32039427955 | import json
from src.data.wl_data import WlDataRawLoader
from src.data.wl_data import WlDataPreprocessor
from src.data.psoriasis_data import PsoriasisLabeler
raw_data_loader = WlDataRawLoader('data/raw/')
raw_data_loader.load_files()
raw_data_loader.data.to_csv('data/interim/raw_data.csv', index=False)
raw_data_loader.generate_report('reports/raw_data_report.json')
data_preprocessor = WlDataPreprocessor('data/interim/raw_data.csv')
data_preprocessor.preprocess()
data_preprocessor.data.to_csv('data/interim/preprocessed_data.csv', index=False)
data_preprocessor.generate_report('reports/preprocessing_report.json','reports/preprocessed_data_report.json')
psoriasis_labeler = PsoriasisLabeler('data/interim/preprocessed_data.csv')
psoriasis_labeler.label_psoriasis()
psoriasis_labeler.labeled_data.to_csv('data/interim/labeled_data.csv', index=False)
psoriasis_labeler.compute_distribution()
psoriasis_labeler.psoriasis_summary.to_csv('data/processed/psoriasis_summary.csv', index=False) | fvillena/psoriasis-incidence | analyze.py | analyze.py | py | 993 | python | en | code | 0 | github-code | 13 |
14239049667 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import thumt.utils as utils
import thumt.nn.op as ops
from thumt.modules.module import Module
from thumt.modules.affine import Affine as Linear
class Affine(Module):
def __init__(self, in_features, out_features, bias=True, n=4,
name="affine"):
super(Affine, self).__init__(name=name)
self.in_features = in_features
self.out_features = out_features
self.n = n
with utils.scope(name):
self.experts = nn.ModuleList([
Linear(in_features, out_features, bias=bias,
name="affine_%d" % i)
for i in range(n)])
def forward(self, input, gates):
input_shape = input.shape
input = torch.reshape(input, [-1, input_shape[-1]])
gates = torch.reshape(gates, [-1, gates.shape[-1]])
batch_size = int(input.shape[0])
part_sizes = list(torch.sum((gates > 0).long(), [0]))
index = torch.nonzero(gates.t())
cell_index, batch_index = torch.unbind(index, 1)
input = torch.nn.functional.embedding(batch_index, input)
inputs = torch.split(input, part_sizes, 0)
results = []
for i in range(self.n):
if inputs[i].shape[0] != 0:
results.append(self.experts[i](inputs[i]))
else:
results.append(torch.empty([0, self.out_features],
device=input.device,
dtype=input.dtype))
# combine
gate_index = batch_index * self.n + cell_index
nonzero_gates = torch.gather(torch.reshape(gates, [-1]), 0, gate_index)
stitched = torch.cat(results, dim=0)
stitched = stitched * nonzero_gates[:, None]
output = ops.unsorted_segment_sum_2d(stitched, batch_index, batch_size)
output_shape = list(input_shape[:-1]) + [output.shape[-1]]
output = torch.reshape(output, output_shape)
return output
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
| THUNLP-MT/Transformer-DMB | thumt/modules/moe/affine.py | affine.py | py | 2,321 | python | en | code | 1 | github-code | 13 |
38706337400 | from django.core.management.base import BaseCommand
import sendgrid
from dashboard.models import pa_Get_Parts_Count, get_service_parts_detail
from adam.models import SGFields
from django.template.loader import render_to_string
import datetime, time
import os
from mysite.settings import ADAM_PATH, ADAM_EXPORT_PATH
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('email_type')
def handle(self, *args, **options):
def send_email(htmlbody, subject, email_list):
# using SendGrid's Python Library - https://github.com/sendgrid/sendgrid-python
x = SGFields.objects.get(id=1)
u = x.sgusername
p = x.sgpassword
sg = sendgrid.SendGridClient(u, p)
message = sendgrid.Mail()
"""
message.add_filter('templates', 'enable', '1')
message.add_filter('templates', 'template_id', 'TEMPLATE-ALPHA-NUMERIC-ID')
message.add_substitution('key', 'value')
message.add_to("jesse@dovimotors.com")
message.set_from("admin@dovimotors.com")
message.set_subject("Sending with SendGrid is Fun")
message.set_html("and easy to do anywhere, even with Python")
message.add_to_name("Jesse Dovi")
message.set_from_name("Dovi Motors Inc.")
"""
message.add_to(email_list)
message.set_from_name("Dovi Motors Inc.")
message.set_from("admin@dovimotors.com")
message.set_subject(subject)
message.set_html(htmlbody)
status, msg = sg.send(message)
return (status,msg)
email_type = options['email_type']
if email_type == "parts_30to45":
part_list = pa_Get_Parts_Count('detail',-29,-45,'DATEPURC')
html = part_list.to_html()
subject = "Parts 30 to 45 days old"
email_addresses = ["jesse@dovimotors.com","gordy@dovimotors.com","robin@dovimotors.com"]
send_email(html,subject,email_addresses)
elif email_type == "parts_monthly":
file_path = ''.join([ADAM_EXPORT_PATH,'Extract.csv'])
last_modified = time.ctime(os.path.getmtime(file_path))
modified_message = 'The stock parts file was last updated %s' % last_modified
context = {
'headline':'Monthly Aged Parts Reports',
'body':'This is a reminder to run the monthly aged parts inventory reports.',
'last_modified': last_modified
}
html = render_to_string('mysite/email_notification.html',context)
subject = "Time to run the monthly aged parts reports"
email_addresses = ["jesse@dovimotors.com","gordy@dovimotors.com","robin@dovimotors.com","luke@dovimotors.com"]
send_email(html,subject,email_addresses)
elif email_type == "parts_45to50":
part_list = pa_Get_Parts_Count('detail',-45,-50,'DATEPURC')
html = part_list.to_html()
subject = "Get rid of these pronto! Parts 45 to 50 days old"
email_addresses = ["jesse@dovimotors.com","gordy@dovimotors.com","robin@dovimotors.com"]
send_email(html,subject,email_addresses)
elif email_type == "battery":
#startdate = datetime.datetime.strftime(datetime.date.today() + datetime.timedelta(-21),'%Y-%m-%d')
#enddate = datetime.datetime.strftime(datetime.date.today() + datetime.timedelta(-14), '%Y-%m-%d')
startdate = '2016-08-15'
enddate = '2016-08-31'
ro_list = get_service_parts_detail(startdate,enddate)
ro_list.sort('DATE_OUT')
html = ro_list.to_html()
subject = "Battery Cores from %s to %s" % (startdate,enddate)
email_addresses = ["jesse@dovimotors.com","gordy@dovimotors.com","robin@dovimotors.com"]
send_email(html,subject,email_addresses)
| dovimotors/mysite | adam/management/commands/send_parts_email.py | send_parts_email.py | py | 3,994 | python | en | code | 0 | github-code | 13 |
17186611887 | bl_info = {
"name": "Heavypoly Operators",
"description": "Operators that make for smooth blending",
"author": "Vaughan Ling",
"version": (0, 1, 0),
"blender": (2, 80, 0),
"location": "",
"warning": "",
"wiki_url": "",
"category": "Operators"
}
import bpy
import bmesh
from bpy.types import Menu
from bpy.types import Operator
from bpy.props import BoolProperty
class HP_OT_loopcut(bpy.types.Operator):
bl_idname = "mesh.hp_loopcut" # unique identifier for buttons and menu items to reference.
bl_label = "Loopcut with tablet modals" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def modal(self, context, event):
if event.type == 'MOUSEMOVE' and event.value == 'PRESS':
print('Mousemove...')
bpy.ops.mesh.loopcut_slide('INVOKE_DEFAULT')
return {'RUNNING_MODAL'}
if event.type in {'RIGHTMOUSE', 'ESC'}: # Cancel
return {'CANCELLED'}
elif event.type == 'MOUSEMOVE' and event.value == 'RELEASE':
bpy.context.scene.tool_settings.mesh_select_mode = (False, True, False)
print('Release...')
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
class HP_OT_smart_snap_cursor(bpy.types.Operator):
bl_idname = "view3d.smart_snap_cursor" # unique identifier for buttons and menu items to reference.
bl_label = "" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def invoke(self, context, event):
try:
if context.active_object.mode == 'EDIT':
if context.object.data.total_vert_sel > 0:
bpy.ops.view3d.snap_cursor_to_selected()
else:
bpy.ops.view3d.snap_cursor_to_center()
elif len(bpy.context.selected_objects) > 0:
bpy.ops.view3d.snap_cursor_to_selected()
else:
bpy.ops.view3d.snap_cursor_to_center()
except:
bpy.ops.view3d.snap_cursor_to_center()
#bpy.context.scene.tool_settings.transform_pivot_point = 'CURSOR'
return {'FINISHED'}
class HP_OT_smart_snap_origin(bpy.types.Operator):
bl_idname = "view3d.smart_snap_origin" # unique identifier for buttons and menu items to reference.
bl_label = "" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def invoke(self, context, event):
try:
if context.active_object.mode == 'EDIT':
if context.object.data.total_vert_sel > 0:
bpy.ops.view3d.snap_cursor_to_selected()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.origin_set(type = 'ORIGIN_CURSOR')
bpy.ops.object.mode_set(mode='EDIT')
else:
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
elif len(bpy.context.selected_objects) > 0:
bpy.ops.object.origin_set(type = 'ORIGIN_GEOMETRY')
else:
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.context.scene.tool_settings.transform_pivot_point = 'MEDIAN_POINT'
except:
return {'FINISHED'}
return {'FINISHED'}
class PushAndSlide(bpy.types.Operator):
bl_idname = "mesh.push_and_slide" # unique identifier for buttons and menu items to reference.
bl_label = "Push And Slide" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def invoke(self, context, event):
if tuple(bpy.context.scene.tool_settings.mesh_select_mode) == (True, False, False):
bpy.ops.transform.vert_slide('INVOKE_DEFAULT', mirror=False, correct_uv=True)
elif tuple(bpy.context.scene.tool_settings.mesh_select_mode) == (False, False, True):
bpy.ops.transform.shrink_fatten('INVOKE_DEFAULT', use_even_offset=True, mirror=False)
else:
bpy.ops.transform.edge_slide('INVOKE_DEFAULT', mirror=False, correct_uv=True)
return {'FINISHED'}
class HP_OT_extrude(Operator):
"""Context Sensitive Extrude"""
bl_label = "Context Sensitive Extrude"
bl_idname = "mesh.hp_extrude"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj is not None and obj.mode == 'EDIT')
def modal(self, context, event):
if event.type == 'MOUSEMOVE':
bpy.ops.mesh.select_linked()
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_select()
bpy.ops.object.vertex_group_remove(all=False, all_unlocked=False)
return {'FINISHED'}
if event.type in {'RIGHTMOUSE', 'ESC'}: # Cancel
print('rightmouse')
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if bpy.context.object.type == 'CURVE':
bpy.ops.curve.extrude()
bpy.ops.transform.translate('INVOKE_DEFAULT', constraint_orientation='GLOBAL')
print('EXTRUDING CURVES')
return {'FINISHED'}
mesh = context.object.data
selface = mesh.total_face_sel
seledge = mesh.total_edge_sel
selvert = mesh.total_vert_sel
# Nothing Selected
if selvert == 0:
bpy.ops.mesh.select_mode(type = 'VERT')
bpy.ops.mesh.dupli_extrude_cursor('INVOKE_DEFAULT')
print('PLACING VERT')
return {'FINISHED'}
if selvert > 0 and seledge == 0:
print('EXTRUDING VERTS')
bpy.ops.mesh.extrude_region_move('INVOKE_DEFAULT')
return {'FINISHED'}
if seledge > 0 and selface == 0:
print('EXTRUDING EDGES')
bpy.ops.mesh.extrude_region_move('INVOKE_DEFAULT')
return {'FINISHED'}
# Save Selection
bpy.ops.object.face_map_add()
bpy.ops.object.face_map_assign()
bpy.ops.mesh.select_linked(delimit={'SEAM'})
print('SAVING SELECTION')
linkedface = mesh.total_face_sel
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.face_map_select()
bpy.ops.object.face_map_remove()
bpy.ops.mesh.extrude_region_move('EXEC_DEFAULT')
print('EXTRUDING FACES')
if linkedface != selface:
bpy.ops.transform.shrink_fatten('INVOKE_DEFAULT')
return {'FINISHED'}
context.window_manager.modal_handler_add(self)
if selface > 0:
bpy.ops.object.vertex_group_add()
bpy.ops.object.vertex_group_assign()
bpy.ops.transform.shrink_fatten('INVOKE_DEFAULT')
print('FIXING NORMALS')
return {'RUNNING_MODAL'}
return {'RUNNING_MODAL'}
class SmartBevel(bpy.types.Operator):
bl_idname = "mesh.smart_bevel" # unique identifier for buttons and menu items to reference.
bl_label = "Smart Bevel" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def invoke(self, context, event):
if tuple(bpy.context.scene.tool_settings.mesh_select_mode) == (True, False, False):
bpy.ops.mesh.bevel('INVOKE_DEFAULT',vertex_only=True)
return {'FINISHED'}
elif tuple(bpy.context.scene.tool_settings.mesh_select_mode) == (False, False, True):
bpy.ops.mesh.select_mode(type = 'EDGE')
print('edge mode...')
bpy.ops.mesh.region_to_loop('INVOKE_DEFAULT')
print('selecting border...')
bpy.ops.mesh.bevel('INVOKE_DEFAULT',vertex_only=False)
return {'FINISHED'}
class SeparateAndSelect(bpy.types.Operator):
bl_idname = "mesh.separate_and_select" # unique identifier for buttons and menu items to reference.
bl_label = "Separate and Select" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def execute(self, context):
base = bpy.context.active_object
bpy.ops.mesh.separate(type='SELECTED')
bpy.ops.object.editmode_toggle()
base.select_set(state=False)
selected = bpy.context.selected_objects
for sel in selected:
bpy.context.view_layer.objects.active = sel
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
return {'FINISHED'}
class SmartShadeSmooth(bpy.types.Operator):
bl_idname = "view3d.smart_shade_smooth_toggle" # unique identifier for buttons and menu items to reference.
bl_label = "Smart Shade Smooth" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def invoke(self, context, event):
if context.active_object.mode == 'EDIT':
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.shade_smooth()
bpy.context.object.data.use_auto_smooth = True
bpy.context.object.data.auto_smooth_angle = 0.436332
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
else:
bpy.ops.object.shade_smooth()
bpy.context.object.data.use_auto_smooth = True
bpy.context.object.data.auto_smooth_angle = 0.436332
return {'FINISHED'}
class toggle_render_material(bpy.types.Operator):
bl_idname = "view3d.toggle_render_material" # unique identifier for buttons and menu items to reference.
bl_label = "" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def invoke(self, context, event):
if bpy.context.space_data.viewport_shade != 'MATERIAL':
bpy.context.space_data.viewport_shade = 'MATERIAL'
elif bpy.context.space_data.viewport_shade == 'MATERIAL':
bpy.context.space_data.viewport_shade = 'RENDERED'
return {'FINISHED'}
class Smart_Delete(bpy.types.Operator):
bl_idname = "view3d.smart_delete" # unique identifier for buttons and menu items to reference.
bl_label = "" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def invoke(self, context, event):
obj = context.object
objType = getattr(obj, 'type', '')
act = bpy.context.active_object
try:
if not act:
for o in bpy.context.selected_objects:
bpy.context.view_layer.objects.active = o
act = bpy.context.active_object
actname = act.name
if context.active_object.mode == 'OBJECT':
if 'Bool_Cutter' in act.name and context.active_object.mode == 'OBJECT':
bpy.ops.object.delete(use_global=False)
# bpy.ops.object.select_all(action='SELECT')
# for o in bpy.context.selected_objects:
for buttsniffers in bpy.context.view_layer.objects:
bpy.context.view_layer.objects.active = buttsniffers
bpy.ops.object.modifier_remove(modifier=actname)
else:
bpy.ops.object.delete(use_global=False)
#bpy.ops.object.select_all(action='DESELECT')
elif objType == 'CURVE':
if context.active_object.mode != 'OBJECT':
bpy.ops.curve.delete(type='VERT')
elif objType == 'GPENCIL':
if context.active_object.mode != 'OBJECT':
bpy.ops.gpencil.delete(type='POINTS')
elif objType == 'META':
if context.active_object.mode != 'OBJECT':
bpy.ops.mball.delete_metaelems()
elif objType == 'MESH':
if context.active_object.mode != 'OBJECT':
if tuple(bpy.context.scene.tool_settings.mesh_select_mode) == (False, False, True):
bpy.ops.mesh.delete(type='FACE')
else:
bpy.ops.mesh.delete(type='VERT')
except:
pass
return {'FINISHED'}
class Subdivision_Toggle(bpy.types.Operator):
bl_idname = "view3d.subdivision_toggle"
bl_label = ""
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
for o in bpy.context.selected_objects:
bpy.context.view_layer.objects.active = o
if 0 < len([m for m in bpy.context.object.modifiers if m.type == "SUBSURF"]):
if bpy.context.object.modifiers["Subsurf_Base"].show_viewport == False:
bpy.context.object.modifiers["Subsurf_Base"].show_render = True
bpy.context.object.modifiers["Subsurf_Base"].show_viewport = True
else:
bpy.context.object.modifiers["Subsurf_Base"].show_render = False
bpy.context.object.modifiers["Subsurf_Base"].show_viewport = False
else:
o.modifiers.new("Subsurf_Base", "SUBSURF")
bpy.context.object.modifiers["Subsurf_Base"].name = "Subsurf_Base"
bpy.context.object.modifiers["Subsurf_Base"].render_levels = 3
bpy.context.object.modifiers["Subsurf_Base"].levels = 3
bpy.context.object.modifiers["Subsurf_Base"].show_in_editmode = True
bpy.context.object.modifiers["Subsurf_Base"].show_on_cage = True
bpy.context.object.modifiers["Subsurf_Base"].subdivision_type = 'CATMULL_CLARK'
return {'FINISHED'}
class SaveWithoutPrompt(bpy.types.Operator):
bl_idname = "wm.save_without_prompt"
bl_label = "Save without prompt"
def execute(self, context):
bpy.ops.wm.save_mainfile()
return {'FINISHED'}
class RevertWithoutPrompt(bpy.types.Operator):
bl_idname = "wm.revert_without_prompt"
bl_label = "Revert without prompt"
def execute(self, context):
bpy.ops.wm.revert_mainfile()
return {'FINISHED'}
class DeleteWithoutPrompt(bpy.types.Operator):
bl_idname = "wm.delete_without_prompt"
bl_label = "Delete without prompt"
def execute(self, context):
bpy.ops.object.delete()
return {'FINISHED'}
classes = (
SaveWithoutPrompt,
RevertWithoutPrompt,
DeleteWithoutPrompt,
Subdivision_Toggle,
Smart_Delete,
SmartShadeSmooth,
SeparateAndSelect,
PushAndSlide,
SmartBevel,
HP_OT_smart_snap_cursor,
HP_OT_smart_snap_origin,
HP_OT_extrude,
HP_OT_loopcut
)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register() | dngrzn/hpolyscripts | HEAVYPOLY_OPERATORS.py | HEAVYPOLY_OPERATORS.py | py | 15,353 | python | en | code | 0 | github-code | 13 |
14241623997 | import os
from django.conf import settings
from django.urls import reverse
from django.db import models
from django.db.models.signals import pre_delete
from django.db.models.signals import post_save
from course_files.models import generate_courseitem_filepath
from course_files.models import GenericCourseFile
from course_files.models import GenericFlag
from course_files.models import GenericInstructorPermission
from shortcuts import disable_for_loaddata
from private_storage.fields import PrivateFileField
class ExamManager(models.Manager):
def get_approved(self):
"""Return a filtered queryset of approved exams.
An exam is 'approved' if it meets all of the following conditions:
1. Verified by an officer
2. Is not associated with a blacklisted instructor
3. Has less than or equal to ExamFlag.LIMIT flags
"""
return self.filter(verified=True, blacklisted=False,
flags__lte=ExamFlag.LIMIT)
class Exam(GenericCourseFile):
# Exam Number constants
UNKNOWN = 'un'
MT1 = 'mt1'
MT2 = 'mt2'
MT3 = 'mt3'
MT4 = 'mt4'
FINAL = 'final'
EXAM_NUMBER_CHOICES = (
(UNKNOWN, 'Unknown'),
(MT1, 'Midterm 1'),
(MT2, 'Midterm 2'),
(MT3, 'Midterm 3'),
(MT4, 'Midterm 4'),
(FINAL, 'Final'),
)
# Exam Type constants
EXAM = 'exam'
SOLN = 'soln'
EXAM_TYPE_CHOICES = (
(EXAM, 'Exam'),
(SOLN, 'Solution'),
)
# Constants
EXAM_FILES_LOCATION = 'exam_files'
exam_number = models.CharField(max_length=5, choices=EXAM_NUMBER_CHOICES)
exam_type = models.CharField(max_length=4, choices=EXAM_TYPE_CHOICES)
exam_file = PrivateFileField(upload_to=generate_courseitem_filepath)
objects = ExamManager()
class Meta(object):
permissions = (
('view_all_exams',
'Can view blacklisted and flagged exams'),
)
def get_folder(self):
"""Return the path of the folder where the exam file is."""
return os.path.join(
settings.PRIVATE_STORAGE_ROOT, Exam.EXAM_FILES_LOCATION,
str(self.unique_id)[0:2])
def get_relative_pathname(self):
"""Return the relative path of the exam file from inside the media
root.
"""
return os.path.join(Exam.EXAM_FILES_LOCATION,
str(self.unique_id)[0:2],
str(self.unique_id) + self.file_ext)
def get_absolute_pathname(self):
"""Return the absolute path of the exam file."""
return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())
def get_absolute_url(self):
return reverse('exams:edit', args=(self.pk,))
def get_download_file_name(self):
"""Return the file name of the exam file when it is downloaded."""
# Use 'unknown' if the course instance does not have a term
if self.course_instance.term:
term = self.course_instance.term.get_url_name()
else:
term = 'unknown'
return '{course}-{term}-{number}-{instructors}-{type}{ext}'.format(
course=self.course_instance.course.get_url_name(),
term=term,
number=self.exam_number,
instructors='_'.join([i.last_name for i in self.instructors]),
type=self.exam_type,
ext=self.file_ext)
def __str__(self):
"""Return a human-readable representation of the exam file."""
# Use 'Unknown' if the course instance does not have a term
if self.course_instance.term:
term = self.course_instance.term.verbose_name()
else:
term = 'Unknown'
exam_unicode = '{term} {number} {type} for {course}'.format(
term=term,
number=self.get_exam_number_display(),
type=self.get_exam_type_display(),
course=self.course_instance.course)
if self.instructors:
instructors = ', '.join([i.last_name for i in self.instructors])
return '{}, taught by {}'.format(exam_unicode, instructors)
else:
return '{} (Instructors Unknown)'.format(exam_unicode)
class ExamFlag(GenericFlag):
"""Flag an issue with a particular exam on the website."""
exam = models.ForeignKey(Exam, help_text='The exam that has an issue.', on_delete=models.CASCADE)
def __str__(self):
return '{} Flag'.format(self.exam)
def delete_file(sender, instance, **kwargs):
"""Delete an exam file after the exam has been deleted, if it exists."""
if bool(instance.exam_file): # check if exam file exists
try:
instance.exam_file.delete()
except OSError:
pass
# if exam file has already been deleted, then do nothing and continue
# with deleting the exam model
class InstructorPermission(GenericInstructorPermission):
"""Separate set of instructor permissions for exams only."""
# Nothing here, because there are no additional fields we want to define
pass
@disable_for_loaddata
def update_exam_flags(sender, instance, **kwargs):
"""Update the amount of flags an exam has every time a flag is updated."""
exam = Exam.objects.get(pk=instance.exam.pk)
exam.flags = ExamFlag.objects.filter(exam=exam, resolved=False).count()
exam.save()
@disable_for_loaddata
def update_exam_blacklist(sender, instance, **kwargs):
"""Update whether an exam is blacklisted every time an instructor
permission is updated.
"""
exams = Exam.objects.filter(
course_instance__instructors=instance.instructor)
if instance.permission_allowed is False:
exams.exclude(blacklisted=True).update(blacklisted=True)
else:
for exam in exams:
if exam.has_permission():
exam.blacklisted = False
exam.save()
pre_delete.connect(delete_file, sender=Exam)
post_save.connect(update_exam_flags, sender=ExamFlag)
post_save.connect(update_exam_blacklist, sender=InstructorPermission)
| TBP-IT/tbpweb | exams/models.py | models.py | py | 6,100 | python | en | code | 2 | github-code | 13 |
5505537147 | from aiogram import executor, Bot, Dispatcher, types
from keyborads import *
bot = Bot(token='6191956586:AAEycG1ebRMhEq3iMBpzlAg0CXTcOIeaPXc')
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def show_keyboards(message: types.Message):
name = message.from_user.full_name
await message.answer(text=f"Assalomu alaykum {name},\nIltimos ona tilingizni tanlang😊", reply_markup=language)
@dp.message_handler(text='uz🇺🇿')
async def uz_hendler(message: types.Message):
name = message.from_user.full_name
photo = 'https://lh3.googleusercontent.com/p/AF1QipNowR_J62Gzk8QB_CjWz9ijW3OM_JC0357eKnDW=w768-h768-n-o-v1'
await message.answer_photo(photo=photo)
await message.answer(
text=f"Assalomu alaykum {name},\nSiz 'Azon' kitob do'konlari rasmiy botining asosiy menyusidasiz.\n"
f"Iltimos quyidagi tugmalardan o'zingizga keraklisini tanlang😊", reply_markup=keyboards1)
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| Nodirabegim16/book-shopping | app.py | app.py | py | 1,017 | python | en | code | 0 | github-code | 13 |
35128798989 | """
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
# using 3 loops and 4 array variables
if not root: return []
res = []
cur_stack = [root]
while cur_stack:
tmp, nxt_stack = [], []
for node in cur_stack:
tmp.append(node.val)
for child in node.children:
nxt_stack.append(child)
cur_stack = nxt_stack
res.append(tmp)
return res
"""In Binary tree, this approach was very slow. Therefore, learn a more optimal
solution.
https://leetcode.com/problems/n-ary-tree-level-order-traversal/discuss/162439/Python-iterative-solution-beat-96"""
from collections import deque
class Solution:
# bfs using deque
def levelOrder(self, root):
res, queue = [], deque()
if root:
queue.append(root)
while queue:
tmp = []
for _ in range(len(queue)):
node = queue.popleft()
tmp.append(node.val)
for child in node.children:
queue.append(child)
res.append(tmp)
return res
"""O(n) time and O(n) space to store elements in queue (output res space not counted).
https://leetcode.com/problems/n-ary-tree-level-order-traversal/discuss/1386593/C%2B%2BPython-BFS-and-DFS-Solutions-Clean-and-Concise"""
| aakanksha-j/LeetCode | 429. N-ary Tree Level Order Traversal/bfs_deque_1.py | bfs_deque_1.py | py | 1,551 | python | en | code | 0 | github-code | 13 |
17654225350 | from django.urls import path
from . import views
urlpatterns = [
path('projects/', views.pro, name='projects'),
path('projectitem/<uuid:pk>/', views.projectitem, name='projectitem'),
path('create-project/', views.create_project, name='create-project'),
path('update-project/<uuid:pk>/', views.update_project, name='update-project'),
path('delete-project/<uuid:pk>/', views.delete_project, name='delete-project'),
]
| aashiqahmed97/devSearch | projects/urls.py | urls.py | py | 436 | python | en | code | 0 | github-code | 13 |
2446770237 | class MyCircularDeque:
"""
| 1| 2| 3| 4| 5|
c
| | 5| 2| 3| 4|
r f t
t < 0
t = max
| | | 3| 4| |
c
"""
def __init__(self, k: int):
self.queue = [-1] * k
self.Max = k - 1
self.front = -1
self.last = -1
self.size = 0
def insertFront(self, value: int) -> bool:
if self.size > self.Max:
return False
if self.front == -1:
self.front = 0
self.last = 0
elif self.front==0:
self.front = self.Max
else:
self.front -= 1
self.queue[self.front] = value
self.size += 1
return True
def insertLast(self, value: int) -> bool:
if self.size > self.Max:
return False
else:
self.last += 1
if self.last > self.Max:
self.last = 0
self.queue[self.last] = value
self.size += 1
if self.front == -1:
self.front += 1
return True
def deleteFront(self) -> bool:
if self.size > 0:
self.front += 1
if self.front > self.Max:
self.front = 0
self.size -= 1
return True
else:
return False
def deleteLast(self) -> bool:
if self.size > 0:
self.last -= 1
if self.last < 0:
self.last = self.Max
self.size -= 1
return True
else:
return False
def getFront(self) -> int:
if self.size == 0:
return -1
return self.queue[self.front]
def getRear(self) -> int:
if self.size == 0:
return -1
return self.queue[self.last]
def isEmpty(self) -> bool:
if self.size == 0:
return True
else:
return False
def isFull(self) -> bool:
if self.size > self.Max:
return True
else:
return False
# Your MyCircularDeque object will be instantiated and called as such:
# obj = MyCircularDeque(k)
# param_1 = obj.insertFront(value)
# param_2 = obj.insertLast(value)
# param_3 = obj.deleteFront()
# param_4 = obj.deleteLast()
# param_5 = obj.getFront()
# param_6 = obj.getRear()
# param_7 = obj.isEmpty()
# param_8 = obj.isFull() | asnakeassefa/Competitive-programming | circularDeque.py | circularDeque.py | py | 2,430 | python | en | code | 0 | github-code | 13 |
74325227536 | import mysql.connector
db = mysql.connector.connect(
host="localhost",
user="root",
passwd ="root",
database ="voertuigen"
)
class Voertuig:
def __init__(self,id,merk,model,bouwjaar,brandstof,verhuurd):
self.id = id
self.merk = merk
self.model = model
self.bouwjaar = bouwjaar
self.brandstof = brandstof
def __str__(self):
return "merk en model: {} {}\nbouwjaar: {} ".format(self.merk,self.model,self.bouwjaar)
mycursor = db.cursor()
lijst_autos = []
def toon_voertuigen():
mycursor.execute("SELECT * FROM auto")
for x in mycursor:
lijst_autos.append(Voertuig(x[0],x[1],x[2],x[3],x[4],x[5]))
def toon_lijst_autos():
for x in lijst_autos:
print(x)
def toon_voertuigen_niet_verhuurd():
mycursor.execute("SELECT * FROM auto WHERE Verhuurd = 'Nee'")
for x in mycursor:
print(*x)
def voeg_auto_toe():
merk = input("geef het merk in")
model = input("geef het model in")
bouwjaar = input("geef het bouwjaar in")
brandstof = input("geef brandstof in")
verhuurd = input("wagen verhuurd ja of nee")
mycursor.execute("INSERT INTO auto(merk,model,bouwjaar,brandstof,verhuurd) VALUES (%s,%s,%s,%s,%s)"
,(merk,model,bouwjaar,brandstof,verhuurd))
db.commit()
def huur_auto():
print("deze wagens zijn nog beschikbaar")
toon_voertuigen_niet_verhuurd()
id = input("geef het id van de wagen die je wenst te huren")
sqlstring ="UPDATE auto SET Verhuurd = 'Ja'" + "WHERE idAuto = "+"'"+id+"'"
mycursor.execute(sqlstring)
db.commit()
def verwijder_auto():
toon_lijst_autos()
id = input("Geef het id van de wagen dat je wenst te verwijderen")
sqlstring = "DELETE FROM auto WHERE idAuto = "+"'"+id+"'"
mycursor.execute(sqlstring)
db.commit()
toon_voertuigen()
verwijder_auto()
toon_voertuigen()
| bjornlecis/MySQLTest | Voertuigen.py | Voertuigen.py | py | 1,818 | python | nl | code | 0 | github-code | 13 |
16726094644 | # NAIVE BAYES CLASSIFIER
# Declaring the initial text-category list
sports = ["A great game", "Very clean match", "A clean but forgettable game"]
nonSports = ["The election was over", "It was a close election"]
# Initializing the list to store each words of each elements of sports and nonSports
sportsWords = []
nonSportsWords = []
uniqueWords = []
# Initializing the unique word count to 0
uniqueWordCount = 0
for sentence in sports:
# Splitting the sentence into words
words = sentence.split() # Returns a list
for word in words:
word = word.lower()
if word not in uniqueWords:
uniqueWords.append(word)
# Appending the word to the sportsWords list
sportsWords.append(word)
for sentence in nonSports:
# Splitting the sentence into words
words = sentence.split() # Returns a list
for word in words:
word = word.lower()
if word not in uniqueWords:
uniqueWords.append(word)
# Appending the word to the nonSportsWords list
nonSportsWords.append(word)
# Calculating total number of words in each category
sportsWordCount = len(sportsWords)
nonSportsWordCount = len(nonSportsWords)
uniqueWordCount = len(uniqueWords)
# Function to calculate probability of each word in a text
def probabilityOfWord(word, category):
wordCount = 0
word = word.lower()
if category == "sports":
for sportWord in sportsWords:
if sportWord == word:
wordCount += 1
return (wordCount + 1) / sportsWordCount
elif category == "nonSports":
for nonSportWord in nonSportsWords:
if nonSportWord == word:
wordCount += 1
return (wordCount + 1) / nonSportsWordCount
# Function to calculate the probability in terms of each category and categorize it
def probabilityCategorizer(text):
probabilitySports = 1
probabilityNonSports = 1
sentence = text.split()
# Iterating through ever word in sentence
for word in sentence:
probabilitySports *= probabilityOfWord(word, "sports")
probabilityNonSports *= probabilityOfWord(word, "nonSports")
# Comparing both values
if probabilitySports > probabilityNonSports:
print("The text belongs to sports category.")
elif probabilityNonSports > probabilitySports:
print("The text belongs to non-sports category.")
testText = "A very close game"
probabilityCategorizer(testText) | swarup-prog/Naive-Bayes-Classifier | NaiveBayesClassifier.py | NaiveBayesClassifier.py | py | 2,498 | python | en | code | 0 | github-code | 13 |
14191724592 | import cv2
import numpy as np
img = cv2.imread("D:\dahab\dahab1\IMG_20200131_142814.jpg")
imgGray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
imgBlue = cv2.GaussianBlur(imgGray ,(7,7),1)
imgcanny = cv2.Canny(img,100,100)
imgDig = cv2.dilate(imgGray , kernel=.5 ,iterations= 1)
cv2.imshow("GRAY",imgGray)
cv2.imshow("Blur",imgBlue)
cv2.imshow("cany",imgcanny)
cv2.imshow("cany",imgDig)
cv2.waitKey(0)
| MOHAMMED-NASSER22/PycharmProjects | pythonProject/ch2.py | ch2.py | py | 403 | python | en | code | 0 | github-code | 13 |
17807504130 | #Función para sumar dos números binarios
def suma(A, B):
sumador = 0
ext = ''
for i in range (len(A)-1, -1, -1):
temp = int(A[i]) + int(B[i]) + sumador
if (temp>1):
ext += str(temp % 2)
sumador = 1
else:
ext += str(temp)
sumador = 0
return ext[::-1]
#Funcion para en contrar el complemento
def complemento(C):
M = ''
for i in range (0, len(C)):
# Calculando el complemento
M += str((int(C[i]) + 1) % 2)
#Sumando 1
M = suma(M, '0001')
return M
def division(Q, M, A):
count = len(M)
comp_M = complemento(M)
flag = 'paso'
print ('Valores: A:', A,' Q:', Q, ' M:', M)
# longitud del número binario
while (count):
print ("\npaso:", len(M)-count + 1,
end = ' | ')
A = A[1:] + Q[0]
if (flag == 'paso'):
A = suma(A, comp_M)
else:
A = suma(A, M)
print('A:', A, ' Q:', Q[1:]+'_', end ='')
if (A[0] == '1'):
Q = Q[1:] + '0'
flag = 'no paso'
print ('| A:', A, ' Q:', Q,)
else:
Q = Q[1:] + '1'
flag = 'paso'
print ('| A:', A, ' Q:', Q)
count -= 1
print ('\n(Q):', Q,' (A):', A)
if __name__ == "__main__":
num = 56 # Numero a convertir.
tamano = 4 # Cantidad de digitos
binario = ''.join([str(min(2**i & num, 1)) for i in range(tamano-1,-1,-1)])
dividendo = '0111'
A = '0' * len(dividendo)
divisor = '0101'
division(dividendo,divisor,A)
| BryanSuca/lab03 | lab03.py | lab03.py | py | 1,716 | python | es | code | 0 | github-code | 13 |
8642787218 |
from train_network import load_data, do_it, DEFAULT_TRAIN_IMAGE_SIZE
dataset_path = "images/guitar"
model = "guitar"
EPOCHS = (15, 25, 35)
LEARN_RATES = (0.001, 0.0001)
BATCH_SIZES = (32, 48, 64)
TRAIN_IMAGE_SIZES = (DEFAULT_TRAIN_IMAGE_SIZE,)
def train():
for tis in TRAIN_IMAGE_SIZES:
data_label = load_data(dataset_path, tis)
for e in EPOCHS:
for lr in LEARN_RATES:
for bs in BATCH_SIZES:
ARGS = {
"dataset" : dataset_path,
"model": model,
"epochs" : e,
"learn_rate" : lr,
"batch_size" : bs,
"train_image_size" : tis,
"plot" : None,
"data": data_label
}
do_it(ARGS)
if __name__ == "__main__":
train()
| windsting/yoni | batch_train.py | batch_train.py | py | 916 | python | en | code | 1 | github-code | 13 |
26992622215 | import os
# I don't totally understand this line.
# I understand that we're configuring the settings for the project
# and that we need to do this before we manipulate the models
os.environ.setdefault('DJANGO_SETTINGS_MODULE','first_project.settings')
import django
django.setup()
import random
from first_app.models import AccessRecord, Webpage, Topic
from faker import Faker
fakegen = Faker()
topics = ['Search', 'Social', 'Marketplace', 'News', 'Games']
def add_topic():
t = Topic.objects.get_or_create(top_name=random.choice(topics))[0]
# save it to db
t.save()
return t
def populate(N=5):
for entry in range(N):
# get the topic for the entry
top = add_topic()
# Create the fake data for that entyr
fake_url = fakegen.url()
fake_date = fakegen.date()
fake_name = fakegen.company()
# Create the new webapage entry
webpg = Webpage.objects.get_or_create(topic=top, url=fake_url, name=fake_name)[0]
# create a fake access record for that webpage
acc_rec = AccessRecord.objects.get_or_create(name=webpg, date=fake_date)
if __name__ == '__main__':
print('populating script!')
populate(20)
print('populating complete') | staubind/django-part-two | first_project/populate_first_app.py | populate_first_app.py | py | 1,240 | python | en | code | 0 | github-code | 13 |
35884796508 |
path1 = "/Users/traviskochel/Desktop/temp/Kablammo-8-21-d.pdf"
path2 = "/Users/traviskochel/Desktop/temp/Kablammo-8-24-b.pdf"
exportPath = "/Users/traviskochel/Desktop/temp/Kablammo-8-21-8-24.pdf"
# rgba
color1 = (1,0,0,1)
color2 = (0,0,1,1)
# PDF exports in raster, so raise this if it's too pixellated. Lower for quicker export times.
pdfScale = 2
pageCount = numberOfPages(path1)
def buildLayer(path, color, pageNumber):
img = ImageObject()
with img:
w, h = imageSize(path)
size(w*pdfScale, h*pdfScale)
scale(pdfScale)
image(path, (0, 0), pageNumber=pageNumber)
img.falseColor(color0=color)
return img
def main():
for pageNumber in range(1, pageCount+1):
#for pageNumber in range(1, 3):
w, h = imageSize(path1, pageNumber=pageNumber)
np = newPage(w*pdfScale, h*pdfScale)
layer1 = buildLayer(path1, color1, pageNumber)
layer2 = buildLayer(path2, color2, pageNumber)
layer1.multiplyBlendMode(backgroundImage=layer2)
image(layer1, (0, 0))
saveImage(exportPath)
main() | scribbletone/overlay-pdf | OverlayPDF.py | OverlayPDF.py | py | 1,132 | python | en | code | 16 | github-code | 13 |
33447651122 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 4 12:32:53 2017
@author: wroscoe
"""
import os
import time
import json
import random
import glob
class Tub(object):
"""
A datastore to store sensor data in a key, value format.
Accepts str, int, float, image_array, image, and array data types.
For example:
#Create a tub to store speed values.
>>> path = '~/mydonkey/test_tub'
>>> inputs = ['user/speed', 'cam/image']
>>> types = ['float', 'image']
>>> t=Tub(path=path, inputs=inputs, types=types)
"""
def __init__(self, path, inputs=None, types=None, user_meta=[]):
self.path = os.path.expanduser(path)
#print('path_in_tub:', self.path)
self.meta_path = os.path.join(self.path, 'meta.json')
self.exclude_path = os.path.join(self.path, "exclude.json")
self.df = None
exists = os.path.exists(self.path)
if exists:
#load log and meta
#print("Tub exists: {}".format(self.path))
try:
with open(self.meta_path, 'r') as f:
self.meta = json.load(f)
except FileNotFoundError:
self.meta = {'inputs': [], 'types': []}
try:
with open(self.exclude_path,'r') as f:
excl = json.load(f) # stored as a list
self.exclude = set(excl)
except FileNotFoundError:
self.exclude = set()
try:
self.current_ix = self.get_last_ix() + 1
except ValueError:
self.current_ix = 0
if 'start' in self.meta:
self.start_time = self.meta['start']
else:
self.start_time = time.time()
self.meta['start'] = self.start_time
elif not exists and inputs:
print('Tub does NOT exist. Creating new tub...')
self.start_time = time.time()
#create log and save meta
os.makedirs(self.path)
self.meta = {'inputs': inputs, 'types': types, 'start': self.start_time}
for kv in user_meta:
kvs = kv.split(":")
if len(kvs) == 2:
self.meta[kvs[0]] = kvs[1]
# else exception? print message?
with open(self.meta_path, 'w') as f:
json.dump(self.meta, f)
self.current_ix = 0
self.exclude = set()
print('New tub created at: {}'.format(self.path))
else:
msg = "The tub path you provided doesn't exist and you didnt pass any meta info (inputs & types)" + \
"to create a new tub. Please check your tub path or provide meta info to create a new tub."
raise AttributeError(msg)
def get_last_ix(self):
index = self.get_index()
return max(index)
def get_index(self, shuffled=True):
files = next(os.walk(self.path))[2]
record_files = [f for f in files if f[:6]=='record']
def get_file_ix(file_name):
try:
name = file_name.split('.')[0]
num = int(name.split('_')[1])
except:
num = 0
return num
nums = [get_file_ix(f) for f in record_files]
if shuffled:
random.shuffle(nums)
else:
nums = sorted(nums)
return nums
def gather_records(self):
ri = lambda fnm : int( os.path.basename(fnm).split('_')[1].split('.')[0] )
record_paths = glob.glob(os.path.join(self.path, 'record_*.json'))
if len(self.exclude) > 0:
record_paths = [f for f in record_paths if ri(f) not in self.exclude]
record_paths.sort(key=ri)
return record_paths
| DiyAI-robocar/AI_Summer_School_trainig | parts/datastore.py | datastore.py | py | 3,867 | python | en | code | 0 | github-code | 13 |
3285660187 | import socketio
sio = socketio.Client()
@sio.on('all')
def on_message(data):
print(f'\n{data}\n')
@sio.event
async def connect():
print("I'm connected!")
@sio.event
def connect_error(err):
print(err)
print("\nThe connection failed!\n")
sio.disconnect()
@sio.event
def disconnect():
print("I'm disconnected!")
| BabyMonitorSimulation/observer-effector | Observer/project/util/generate_socket.py | generate_socket.py | py | 341 | python | en | code | 0 | github-code | 13 |
7864699228 | from http import HTTPStatus
from typing import Any
from common.constants import MESSAGE_NOT_FOUND, MESSAGE_OAUTH_MISSING_REFRESH_TOKEN
from common.enums.form_provider import FormProvider
from fastapi import HTTPException
from googleform.app.repositories.oauth_credential import OauthCredentialRepository
from googleform.app.schemas.oauth_credential import Oauth2CredentialDocument
from googleform.app.services.oauth_google import OauthGoogleService
class OauthCredentialService:
"""
Class for interacting with OAuth2 credential documents.
This class provides a convenient way to access OAuth2 credential
documents and perform various operations on them.
"""
def __init__(
self,
oauth_credential_repo: OauthCredentialRepository,
oauth_google_service: OauthGoogleService,
):
"""Initialize the OAuth2 credential service.
Args:
oauth_credential_repo (OauthCredentialRepository): An instance
of the OAuth2 credential repository.
oauth_google_service (OauthGoogleService): An instance of the
OAuth2 Google service.
"""
self.oauth_credential_repo: OauthCredentialRepository = oauth_credential_repo
self.oauth_google_service: OauthGoogleService = oauth_google_service
async def _get_oauth_credential(
self, email: str, provider: FormProvider = FormProvider.GOOGLE
) -> Oauth2CredentialDocument:
"""
Get an OAuth2 credential document by email and provider.
Args:
email (str): The email of the user to get the credential for.
provider (FormProvider, optional): The provider of the form.
Defaults to FormProvider.GOOGLE.
Returns:
Oauth2CredentialDocument: An OAuth2 credential document.
Raises:
HTTPException: If the document is not found.
"""
return await self.oauth_credential_repo.get(email, provider)
async def add_oauth_credential(
self, email: str, credentials: Any, provider: FormProvider = FormProvider.GOOGLE
):
"""
Add an OAuth2 credential document by email and provider.
Args:
email (str): The email of the user to get the credential for.
credentials (Any): Credential object returned from OAuth authorization.
provider (FormProvider, optional): The provider of the form.
Defaults to FormProvider.GOOGLE.
Returns:
Oauth2CredentialDocument: An OAuth2 credential document.
Raises:
HTTPException: If the document is not found.
"""
return await self.oauth_credential_repo.add(email, credentials, provider)
async def verify_oauth_token(
self, email: str, provider: FormProvider = FormProvider.GOOGLE
) -> Oauth2CredentialDocument:
"""
Verify an OAuth2 token.
This method retrieves an OAuth2 credential document by
email and provider, and verifies that it has a valid refresh
token. If the token is valid, it will be refreshed if necessary.
Args:
email (str): The email of the user to verify the token for.
provider (FormProvider, optional): The provider of the form.
Defaults to FormProvider.GOOGLE.
Returns:
Oauth2CredentialDocument: An OAuth2 credential document with
a valid access token.
Raises:
HTTPException: If the document is not found or does not have
a valid refresh token.
"""
oauth_credential = await self._get_oauth_credential(email, provider)
if not oauth_credential:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail=MESSAGE_NOT_FOUND
)
if (
not oauth_credential.credentials
or not oauth_credential.credentials.refresh_token
):
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=MESSAGE_OAUTH_MISSING_REFRESH_TOKEN,
)
return await self.oauth_google_service.fetch_oauth_token(oauth_credential)
| bettercollected/bettercollected-integrations-google-forms | googleform/app/services/oauth_credential.py | oauth_credential.py | py | 4,226 | python | en | code | 1 | github-code | 13 |
74007812496 | num_testes = int(input())
teste = 1
for _ in range(num_testes):
nl, nc, i_soco, j_soco = [ int(x) for x in input().split() ]
i_soco -= 1
j_soco -= 1
matriz = [ [int(x) for x in input().split()] for _ in range(nl) ]
for i in range(nl):
for j in range(nc):
matriz[i][j] += max(10 - max(abs(i - i_soco), abs(j - j_soco)), 1)
print(f"Parede {teste}:")
print(matriz)
teste += 1
| broeringlucas/SIN-UFSC | INE5603 - POO1/Coleções Bidimensionais (matrizes)/soco_hulk.py | soco_hulk.py | py | 435 | python | en | code | 0 | github-code | 13 |
29263856362 | import unittest
from scrappy.scrapper import youtube_video_data_scrapper
class TestScrapper(unittest.TestCase):
def test_scrap(self):
url = 'https://www.youtube.com/watch?v=TFMnICdHiyM'
driver = r"C:\Users\ME\projects\for_github\chromedriver_win32\chromedriver.exe"
self.assertAlmostEqual(
youtube_video_data_scrapper(url=url, driver=driver)['title'], "Xiaomi Mi 11 Lite vs Samsung A52: SIMILAR BUT ONLY ONE WINNER! Let's Find Out!")
if __name__ == '__main__':
unittest.main()
| MerlinEmris/youtube_srapping_with_python | mescrappy/test.py | test.py | py | 525 | python | en | code | 13 | github-code | 13 |
9373022775 | import numpy as np
from sklearn.base import BaseEstimator, MetaEstimatorMixin
from sklearn.feature_selection import SelectorMixin
from sklearn.utils.validation import check_is_fitted, check_X_y
from .stratified_continious_split import ContinuousStratifiedKFold
class CrossValidatedFeatureSelector(MetaEstimatorMixin, SelectorMixin, BaseEstimator):
def __init__(self, estimator, n_splits=5) -> None:
self.estimator = estimator
self.n_splits = n_splits
self.cv = ContinuousStratifiedKFold(n_splits=self.n_splits)
def _get_support_mask(self):
check_is_fitted(self)
return self.mask_
def fit(self, X, y):
X = self._validate_data(X) # type: ignore
X, y = check_X_y(X, y)
feat_importances = []
for train_idxs, _ in self.cv.split(X, y):
X_train, y_train = X[train_idxs], y[train_idxs]
self.estimator.fit(X_train, y_train)
feat_importances.append(self.estimator.feature_importances_)
self.mean_feature_importance_ = np.vstack(feat_importances).mean(axis=0)
self.mask_ = ~np.isclose(self.mean_feature_importance_, 0.0)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
class CorrelationThreshold(SelectorMixin, BaseEstimator):
def __init__(self, threshold=0.95) -> None:
self.threshold = threshold
def fit(self, X, y=None):
corr_matrix = X.corr().abs()
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))
# True for preserved columns, False for dropped columns
self.mask_ = np.array(
[not any(upper[column] > self.threshold) for column in upper.columns]
)
X = self._validate_data( # type: ignore
X,
accept_sparse=("csr", "csc"),
dtype=float,
force_all_finite="allow-nan",
)
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.mask_
| rahuldeve/chem_commons | feature_selection.py | feature_selection.py | py | 2,028 | python | en | code | 0 | github-code | 13 |
197255148 | import os
import shutil
def readfile(filename):
a = []
f = open(filename, mode = 'r')
n, m = f.readline().split()
n = int(n)
m = int(m)
for i in range (n):
k = list(map(float, f.readline().split()))
a.append(k)
f.close()
return n, m, a
def avg(a, n, m, j):
if j>m:
print('Khong co cot {}'.format(j))
return
else:
sum = 0
for i in range(n):
sum += a[i][j]
return sum / n
def checkzero(a, n, m):
dem = 0
d = []
for i in range (m):
d.append(avg(a,n,m,i))
for i in range(n):
for j in range (m):
if a[i][j]==0:
dem = dem + 1
a[i][j] = d[j]
return dem, a
def nhapfile(a):
f = open('E:/PythonCode/image2.txt', mode='w')
f.write(str(len(a))+' ')
f.write(str(len(a[0])) + '\n')
for i in range(len(a)):
for j in range(len(a[0])):
f.write(str(a[i][j]) + ' ')
f.write('\n')
f.close()
def nhap100(k, filename1, filename2):
f1 = open(filename1, mode = 'w')
f1.write('100' + ' ')
f1.write(str(len(k[0])) + '\n')
for i in range(100):
for j in range(len(k[0])):
f1.write(str(k[i][j]) + ' ')
f1.write('\n')
f1.close()
f2 = open(filename2, mode='w')
f2.write(str(len(k) - 100) + ' ')
f2.write(str(len(k[0])) + '\n')
for i in range(101, len(k)):
for j in range(len(k[0])):
f2.write(str(k[i][j]) + ' ')
f2.write('\n')
f2.close()
def copydir(dirname, filename):
os.mkdir(dirname)
shutil.copy(filename, dirname)
os.remove(filename)
n, m, k = readfile('E:/PythonCode/Image.data')
print(n)
print(m)
for i in range (n):
print(k[i])
d, b = checkzero(k, n, m)
print('So luong so 0 trong tep: ',d)
print('Sau khi sua:')
for i in range (n):
print(b[i])
nhapfile(b)
nhap100(k, 'E:/PythonCode/test1.txt', 'E:/PythonCode/test2.txt')
copydir('dir1', 'image2.txt') | haidang03ez/HaiDang_Project | th4-5.py | th4-5.py | py | 2,083 | python | en | code | 0 | github-code | 13 |
37353251363 | import time
import xlrd
from selenium import *
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
workbook = xlrd.open_workbook('info.xlsx')
worksheet = workbook.sheet_by_index(0)
chromedriver = str(worksheet.cell(26,1).value)
driver=webdriver.Chrome(executable_path=chromedriver)
def main():
shineJobs()
naukari()
hirist()
instahyre()
# indeed()
olx()
def shineJobs():
# enter emailid and pass for shine
ShineId = worksheet.cell(1,2).value
ShinePwd = worksheet.cell(2,2).value
# REFER TO READMEE FILE FOR INSTRUCTIONS ON HOW TO ENTER VALUES FOR SALARY AND EXPERIENCE
jobName = worksheet.cell(5,2).value
salary = str(int(worksheet.cell(6,2).value))
experience = str(int(worksheet.cell(8,2).value))
location = worksheet.cell(10,1).value
department = worksheet.cell(11,2).value
industry = worksheet.cell(12,2).value
name = worksheet.cell(13,2).value
site_id = "https://www.shine.com/myshine/login/"
username = """//*[@id="id_email"]"""
password = """//*[@id="id_password"]"""
postJobSite_id = "https://www.shine.com/myshine/free-job-alerts/"
jobNamePath = """//*[@id="id_keywords"]"""
salaryPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[2]/span/select"""
experiencePath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[3]/span/select"""
locationPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[4]/span/div/button"""
inputLocationPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[4]/span/div/div/div/input"""
departmentPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[5]/div/div/button"""
inputDepartmentPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[5]/div/div/div/div/input"""
industryPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[6]/div/div/button"""
inputIndustryPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[6]/div/div/div/div/input"""
nameofAlertPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[1]/li[9]/input"""
createBtnPath = """/html/body/div[2]/div/div[2]/div[2]/form/ul[2]/li/div/a"""
driver.get(site_id)
ele = driver.find_element_by_xpath(username).send_keys(ShineId)
driver.find_element_by_xpath(password).send_keys(ShinePwd, Keys.RETURN)
driver.implicitly_wait(1)
driver.get(postJobSite_id)
driver.find_element_by_xpath(jobNamePath).send_keys(jobName)
Select(driver.find_element_by_xpath(salaryPath)).select_by_value(salary)
Select(driver.find_element_by_xpath(experiencePath)).select_by_value(experience)
driver.find_element_by_xpath(locationPath).click()
driver.find_element_by_xpath(inputLocationPath).send_keys(location, Keys.TAB, Keys.SPACE)
driver.find_element_by_xpath(departmentPath).click()
driver.find_element_by_xpath(inputDepartmentPath).send_keys(department, Keys.TAB, Keys.SPACE)
driver.find_element_by_xpath(industryPath).click()
driver.find_element_by_xpath(inputIndustryPath).send_keys(industry, Keys.TAB, Keys.SPACE)
driver.find_element_by_xpath(nameofAlertPath).send_keys(name)
driver.find_element_by_xpath(createBtnPath).click()
print("Posted on shine jobs")
driver.implicitly_wait(4)
def naukari():
NaukariId = worksheet.cell(1,3).value
NuakariPwd = str(int(worksheet.cell(2,3).value))
JobName = worksheet.cell(5,3).value
location = worksheet.cell(10,3).value
workExpNo = int(worksheet.cell(8,3).value)
salary = int(worksheet.cell(6,3).value) # for .5 years enter 0 otherwise exact amount
industry = worksheet.cell(12,3).value
jobCategory = worksheet.cell(11,3).value #Department
role = worksheet.cell(14,3).value
name = worksheet.cell(13,3).value
# all site id and locations
site_id = "https://www.naukri.com/nlogin/login"
createAlert_id = "https://www.naukri.com/free-job-alerts"
password = """//*[@id="passwordField"]"""
username = """//*[@id="usernameField"]"""
signInBtn = """//*[@id="loginForm"]/div[3]/div[3]/div/button[1]"""
JobPath = """//*[@id="Sug_kwdsugg"]"""
locationPath = """//*[@id="Sug_locsugg"]"""
workExp = """//*[@id="cjaExp"]"""
salaryPath = """//*[@id="cjaMinSal"]"""
industryPath = """//*[@id="cjaInd"]"""
jobCategoryPath = """//*[@id="cjaJob"]"""
rolePath = """//*[@id="cjaRole"]"""
namePath = """//*[@id="nyja"]"""
jobALertBtn = """//*[@id="cjaSubmit"]"""
workStart = -1
salaryStart = -1
driver.get(site_id)
driver.implicitly_wait(3)
time.sleep(3)
ele = driver.find_element_by_xpath(username).send_keys(NaukariId)
driver.find_element_by_xpath(password).send_keys(NuakariPwd, Keys.RETURN)
driver.implicitly_wait(4)
time.sleep(4)
driver.get(createAlert_id)
driver.implicitly_wait(3)
time.sleep(3)
for i in range(20):
try:
driver.find_element_by_xpath(JobPath).send_keys(JobName)
break
except:
driver.refresh()
time.sleep(1)
driver.implicitly_wait(1)
pass
driver.find_element_by_xpath(locationPath).send_keys(location, Keys.TAB)
for x in range(workStart, workExpNo):
driver.find_element_by_xpath(workExp).send_keys(Keys.DOWN)
else:
driver.find_element_by_xpath(workExp).send_keys(Keys.TAB)
if(salary > 50):
salary = salary - 50
salary = salary / 5
salary = int(salary)
salary = salary+50
for i in range(salaryStart, salary):
driver.find_element_by_xpath(salaryPath).send_keys(Keys.DOWN)
else:
driver.find_element_by_xpath(salaryPath).send_keys(Keys.TAB)
driver.find_element_by_xpath(industryPath).send_keys(industry, Keys.RETURN)
driver.find_element_by_xpath(jobCategoryPath).send_keys(jobCategory, Keys.RETURN)
driver.find_element_by_xpath(rolePath).send_keys(role, Keys.DOWN, Keys.RETURN)
driver.find_element_by_xpath(namePath).send_keys(name, Keys.TAB, Keys.RETURN)
print("Posted on times jobs")
driver.implicitly_wait(4)
time.sleep(4)
def hirist():
HiristId = worksheet.cell(1,4).value
HiristPwd = worksheet.cell(2,4).value
jobTitle = worksheet.cell(5,4).value
location = worksheet.cell(10,4).value
yearsOfExperienceMin = str(int(worksheet.cell(8,4).value))
yearsOfExperienceMax = str(int(worksheet.cell(9,4).value))
jobDescription = worksheet.cell(24,4).value
category = worksheet.cell(11,4).value #Department
functionalArea = worksheet.cell(12,4).value #industry
site_id = "https://recruit.hirist.com/login"
postJobSite_id = "https://recruit.hirist.com/post-job?ref=nav"
username = """//*[@id="email"]"""
password = """//*[@id="password"]"""
signInBtn = """//*[@id="login"]"""
jobTitlePath = """//*[@id="title"]"""
locationPath = """//*[@id="location"]"""
yearsOfExpMinPath = """//*[@id="min_experience"]"""
yearsOfExpMaxPath = """//*[@id="max_experience"]"""
jobDescPath = """/html/body/div[6]/div/div[1]/div[1]/div[4]/div[4]/div[2]/div[1]"""
categoryPath = """//*[@id="category"]"""
functionalAreaPath = """//*[@id="functional_area"]"""
postJobPath = """//*[@id="submitForm"]"""
driver.get(site_id)
time.sleep(2)
driver.find_element_by_xpath(username).send_keys(HiristId)
driver.find_element_by_xpath(password).send_keys(HiristPwd, Keys.RETURN)
time.sleep(1)
driver.get(postJobSite_id)
time.sleep(2)
driver.find_element_by_xpath(jobTitlePath).send_keys(jobTitle)
driver.find_element_by_xpath(locationPath).send_keys(location, Keys.DOWN, Keys.RETURN)
Select(driver.find_element_by_xpath(yearsOfExpMinPath)).select_by_visible_text(yearsOfExperienceMin)
Select(driver.find_element_by_xpath(yearsOfExpMaxPath)).select_by_visible_text(yearsOfExperienceMax)
time.sleep(1)
driver.find_element_by_xpath(jobDescPath).click()
driver.find_element_by_xpath(jobDescPath).send_keys(jobDescription)
time.sleep(1)
Select(driver.find_element_by_xpath(categoryPath)).select_by_visible_text(category)
Select(driver.find_element_by_xpath(functionalAreaPath)).select_by_visible_text(functionalArea)
driver.find_element_by_xpath(postJobPath).click()
print("Posted on hirist jobs")
time.sleep(4)
def instahyre():
InstaHyreId = worksheet.cell(1,5).value
InstaHyrepwd = worksheet.cell(2,5).value
jobTitle = worksheet.cell(5,5).value
jobFunction = worksheet.cell(11,5).value
workExpMin = str(int(worksheet.cell(8,5).value)) + " years"
workExpMax = str(int(worksheet.cell(9,5).value)) + " years"
salaryMin = str(worksheet.cell(6,5).value)
salaryMax = str(worksheet.cell(7,5).value)
jobLocation = worksheet.cell(10,5).value
jobDesc = worksheet.cell(24,5).value
skills = str(int(worksheet.cell(15,5).value))
site_id = "https://www.instahyre.com/login/"
jobId = "https://www.instahyre.com/employer/jobs/0/0/"
usernamePath = """//*[@id="email"]"""
passwordPath = """//*[@id="password"]"""
addNewJobPath = """//*[@id="jobs"]/div/div[1]/button"""
notNowPath = """//*[@id="select-free-job-type"]"""
jobTitlePath = """//*[@id="job-form"]/form/fieldset/div[2]/div[1]/div/input"""
jobFunctionPath = """//*[@id="job-form"]/form/fieldset/div[2]/div[2]/div/select"""
workExpMinPath = """//*[@id="job-form"]/form/fieldset/div[3]/div[1]/div/div/div[1]/div/select"""
workExpMaxPath = """//*[@id="job-form"]/form/fieldset/div[3]/div[1]/div/div/div[2]/div/select"""
salaryMinPath = """//*[@id="job-form"]/form/fieldset/div[3]/div[2]/div/div/div[1]/div/input"""
salaryMaxPath = """//*[@id="job-form"]/form/fieldset/div[3]/div[2]/div/div/div[2]/div/input"""
jobLocationPath = """//*[@id="preferred-location-selectized"]"""
iframePath = """//*[@id="id-job-1-quill"]/iframe"""
jobDescPath = """//*[@id="quill-1"]"""
skillsPath = """//*[@id="job-form"]/form/fieldset/div[7]/div/div/div/div/li[""" + skills + "]/label/input"
submitPath = """//*[@id="employer-jobs-save-btn"]"""
freePostPath = """//*[@id="select-free-job-type"]"""
driver.get(site_id)
driver.implicitly_wait(1)
time.sleep(1)
for i in range(20):
try:
driver.find_element_by_xpath(usernamePath).send_keys(InstaHyreId)
driver.find_element_by_xpath(passwordPath).send_keys(InstaHyrepwd, Keys.RETURN)
driver.implicitly_wait(10)
time.sleep(10)
break
except:
driver.implicitly_wait(1)
time.sleep(1)
pass
print("1")
try:
driver.find_element_by_xpath(notNowPath).click()
driver.implicitly_wait(3)
time.sleep(3)
except:
driver.implicitly_wait(1)
time.sleep(1)
pass
print("2")
driver.get(jobId)
driver.implicitly_wait(5)
time.sleep(3) # seconds
time.sleep(4)
for i in range(20):
try:
driver.find_element_by_xpath(addNewJobPath).click()
break
except:
driver.implicitly_wait(2)
time.sleep(2)
pass
driver.implicitly_wait(3)
time.sleep(3) # seconds
driver.find_element_by_xpath(jobTitlePath).send_keys(jobTitle)
Select(driver.find_element_by_xpath(jobFunctionPath)).select_by_visible_text(jobFunction)
Select(driver.find_element_by_xpath(workExpMinPath)).select_by_visible_text(workExpMin)
driver.implicitly_wait(1)
time.sleep(3)
Select(driver.find_element_by_xpath(workExpMaxPath)).select_by_visible_text(workExpMax)
driver.find_element_by_xpath(salaryMinPath).send_keys(salaryMin)
driver.find_element_by_xpath(salaryMaxPath).send_keys(salaryMax)
driver.find_element_by_xpath(jobLocationPath).send_keys(Keys.BACKSPACE, jobLocation, Keys.ENTER)
# for job description
iframe = driver.find_element_by_xpath(iframePath)
driver.switch_to.frame(iframe)
driver.find_element_by_xpath(jobDescPath).click()
driver.find_element_by_xpath(jobDescPath).send_keys(jobDesc)
driver.switch_to.default_content()
#back to normal
driver.find_element_by_xpath(skillsPath).click()
driver.find_element_by_xpath(submitPath).click()
driver.implicitly_wait(2)
time.sleep(3)
try:
driver.find_element_by_xpath(freePostPath).click()
except:
pass
print("Posted on InstaHyre jobs")
driver.implicitly_wait(4)
time.sleep(3)
def indeed():
IndeedId = worksheet.cell(1,6).value
Indeedpwd = worksheet.cell(2,6).value
companyName = worksheet.cell(5,6).value
jobTitle = worksheet.cell(11,6).value
location = worksheet.cell(10,6).value
name = worksheet.cell(13,6).value
phnno = str(int(worksheet.cell(20,6).value))
typeofEmp = 2
contractType = "5"
salaryfrom = str(int(worksheet.cell(6,6).value))
salaryto = str(int(worksheet.cell(7,6).value))
jobDesc = worksheet.cell(24,6).value
# ALL SITE PATHS
site_id = "https://secure.indeed.com/account/login"
username = """//*[@id="login-email-input"]"""
password = """//*[@id="login-password-input"]"""
signInBtn = """//*[@id="login-submit-button"]"""
validationMsgPath = """//*[@id="login-recaptcha-message-error"]"""
postJobSite_id = "https://employers.indeed.com/p#post-job"
companyNamePath = """//*[@id="JobCompanyName"]"""
jobTitlePath = """//*[@id="JobTitle"]"""
locationPath = """//*[@id="cityOrPostalCode"]"""
namePath = """//*[@id="AdvertiserName"]"""
phnnoPath = """//*[@id="AdvertiserPhoneNumber"]"""
typeofEmpPath = """//*[@id="JobEmploymentType"]"""
contractTypePath = """//*[@id="label-checkbox-option-""" + contractType + '"]'
salaryfromPath = """//*[@id="JobSalary1"]"""
salarytoPath = """//*[@id="JobSalary2"]"""
jobDescPath = """//*[@id="AppendedJobDescription-editor-content"]"""
employerAssistAgreePath = """//*[@id="plugin-smbcommunication-EmployerAssistLegalModal-modal_box_content"]/div/div/div/div/div[2]/div[2]/div[1]/button[1]"""
removeBannerPath = """//*[@id="plugin-pie-AddCollaboratorsTheseusModal-modal_close_button"]"""
escapeBodyPath = "/html/body"
applicantQualificationsPath = """//*[@id="QualificationsVisibility"]/div[1]/div[2]/div/label/div[2]"""
skillsAssessmentsPath = """//*[@id="SkillsAssessmentVisibility"]/div[1]/div[2]/div/label/div[2]"""
ContinuePath = """//*[@id="sheet-next-button"]"""
driver.get(site_id)
ele = driver.find_element_by_xpath(username).send_keys(IndeedId)
driver.find_element_by_xpath(password).send_keys(Indeedpwd)
driver.find_element_by_xpath(signInBtn).click()
driver.implicitly_wait(2)
driver.get(postJobSite_id)
driver.implicitly_wait(2)
driver.find_element_by_xpath(companyNamePath).clear()
driver.find_element_by_xpath(companyNamePath).send_keys(companyName)
driver.find_element_by_xpath(jobTitlePath).send_keys(jobTitle, Keys.DOWN, Keys.ENTER)
driver.find_element_by_xpath(locationPath).send_keys(location, Keys.DOWN, Keys.ENTER)
driver.find_element_by_xpath(ContinuePath).click()
driver.implicitly_wait(4)
try:
driver.find_element_by_xpath(namePath).send_keys(name)
driver.find_element_by_xpath(phnnoPath).send_keys(phnno, Keys.TAB)
driver.find_element_by_xpath(ContinuePath).click()
except:
pass
driver.implicitly_wait(4)
for i in range(typeofEmp):
driver.find_element_by_xpath(typeofEmpPath).send_keys(Keys.DOWN)
driver.implicitly_wait(2)
driver.find_element_by_xpath(contractTypePath).click()
driver.find_element_by_xpath(salaryfromPath).send_keys(salaryfrom)
driver.find_element_by_xpath(salarytoPath).send_keys(salaryto)
driver.implicitly_wait(2)
driver.find_element_by_xpath(jobDescPath).send_keys(jobDesc)
driver.find_element_by_xpath(ContinuePath).click()
driver.implicitly_wait(2)
driver.find_element_by_xpath(employerAssistAgreePath).click()
driver.implicitly_wait(2)
try:
driver.find_element_by_xpath(applicantQualificationsPath).click()
driver.find_element_by_xpath(skillsAssessmentsPath).click()
except:
webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
driver.implicitly_wait(1)
driver.find_element_by_xpath(ContinuePath).click()
driver.implicitly_wait(3)
driver.find_element_by_xpath(ContinuePath).click()
print("Posted on indeed jobs")
driver.implicitly_wait(4)
def olx():
OlxId = worksheet.cell(1,1).value
OlxPwd = worksheet.cell(2,1).value
typeOfJob = str(int(worksheet.cell(5,1).value))
salaryPeriod = str(int(worksheet.cell(16,1).value))
posType = str(int(worksheet.cell(17,1).value))
salaryfrom = str(int(worksheet.cell(6,1).value))
salaryto = str(int(worksheet.cell(7,1).value))
adTitle = worksheet.cell(13,1).value
description = worksheet.cell(24,1).value
State = worksheet.cell(10,1).value
City = worksheet.cell(18,1).value
Neighbourhood = worksheet.cell(19,1).value
phno = str(int(worksheet.cell(20,1).value))
site_id = "https://www.olx.in/"
postJobSite_id = "https://www.olx.in/post"
loginPath = """//*[@id="container"]/header/div/div/div[4]/button"""
emailPath = """/html/body/div[3]/div/div/div/button[3]"""
usernamePath = """/html/body/div[3]/div/div/form/div/div[2]/div/div[1]/div/div/input"""
passwordPath = """//*[@id="password"]"""
jobPath = """//*[@id="container"]/main/div/div/div/div/div/div/ul/li[4]"""
typeOfJobPath = """//*[@id="container"]/main/div/div/div/div/div/div/ul[2]/li[""" + typeOfJob + "]"
salaryPeriodPath = """//*[@id="container"]/main/div/div/div/div/div[3]/div/div[2]/div/div[1]/button[""" + salaryPeriod + "]"
posTypePath = """//*[@id="container"]/main/div/div/div/div/div[3]/div/div[3]/div/div[1]/button[""" + posType + "]"
salaryFromPath = """//*[@id="salary_from"]"""
salaryToPath = """//*[@id="salary_to"]"""
adTitlePath = """//*[@id="title"]"""
descPath = """//*[@id="description"]"""
StatePath = """//*[@id="State"]"""
cityPath = """//*[@id="City"]"""
NeighbourhoodPath = """//*[@id="Locality"]"""
phnoPath = """//*[@id="publicPhone"]"""
driver.get(site_id)
time.sleep(2)
driver.find_element_by_xpath(loginPath).click()
time.sleep(3)
driver.find_element_by_xpath(emailPath).click()
time.sleep(3)
driver.find_element_by_xpath(usernamePath).send_keys(OlxId, Keys.RETURN)
time.sleep(4)
driver.find_element_by_xpath(passwordPath).send_keys(OlxPwd, Keys.RETURN)
time.sleep(3)
driver.get(postJobSite_id)
driver.refresh()
time.sleep(3)
driver.find_element_by_xpath(jobPath).click()
time.sleep(1)
driver.find_element_by_xpath(typeOfJobPath).click()
time.sleep(4)
driver.find_element_by_xpath(salaryPeriodPath).click()
driver.find_element_by_xpath(posTypePath).click()
driver.find_element_by_xpath(salaryFromPath).send_keys(salaryfrom)
driver.find_element_by_xpath(salaryToPath).send_keys(salaryto)
driver.find_element_by_xpath(adTitlePath).send_keys(adTitle)
driver.find_element_by_xpath(descPath).send_keys(description)
Select(driver.find_element_by_xpath(StatePath)).select_by_visible_text(State)
time.sleep(4)
Select(driver.find_element_by_xpath(cityPath)).select_by_visible_text(City)
time.sleep(4)
Select(driver.find_element_by_xpath(NeighbourhoodPath)).select_by_visible_text(Neighbourhood)
driver.find_element_by_xpath(phnoPath).send_keys(phno)
print("Posted on olx jobs")
main() | kushagraag/Selenium-Projects-Python- | Job Post Sites/all_five.py | all_five.py | py | 18,681 | python | en | code | 0 | github-code | 13 |
73606123856 | import pandas as pd
from slicer import slice_ticks
import os
TICKS_PATH = os.path.abspath(os.path.dirname(__file__) + '../../../ticks/bitmex')
BARS_PATH = os.path.abspath(os.path.dirname(__file__) + '../../../bars')
SYMBOLS = [{
'symbol': 'XBTUSD',
'price_step': 0.5,
'size_step': 1
}, {
'symbol': 'ETHUSD',
'price_step': 0.05,
'size_step': 1
}]
FREQ = ['10s', '1min', '5min', '10min']
def read_file(sym, csv_name, freq):
symbol = sym['symbol']
filename = f'{TICKS_PATH}/{symbol}/{csv_name}'
print(f'reading \'{filename}\'... ', end='')
df = pd.read_csv(filename)
df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S.%f')
if isinstance(freq, str):
res = slice_ticks(df, freq, price_step=sym['price_step'], size_step=sym['size_step'])
print(res.shape)
return res
elif isinstance(freq, list):
res = {}
for f in freq:
res[f] = slice_ticks(df, f, price_step=sym['price_step'], size_step=sym['size_step'])
print([(f, *res[f].shape) for f in freq])
return res
else:
print('Unsupported freq type')
return None
for sym in SYMBOLS:
symbol = sym['symbol']
files = os.listdir(f'{TICKS_PATH}/{symbol}')
files.sort()
dfs = [read_file(sym, f, FREQ) for f in files]
for f in FREQ:
dfs_f = [d1[f] for d1 in dfs]
df_all = pd.concat(dfs_f, ignore_index=True, axis=0) if len(dfs_f) > 0 else pd.DataFrame()
df_all.sort_values(by='time', ignore_index=True, inplace=True)
filename = f'{BARS_PATH}/bitmex-{symbol}-{f}.csv.zip'
print(f'writing output to \'{filename}\'')
df_all.to_csv(filename, date_format='%Y-%m-%d %H:%M:%S', index=False, compression='zip')
print('done!') | bellerofonte/skillfactory-dst-50 | final/src/sliced/slc-bitmex.py | slc-bitmex.py | py | 1,839 | python | en | code | 0 | github-code | 13 |
20190801199 | import tensorflow as tf
from augment_io.spec_aug_tf import TFFreqMasking, TFTimeMasking
TFAUGMENTATIONS = {
"freq_masking": TFFreqMasking,
"time_masking": TFTimeMasking,
}
class TFAugmentationExecutor:
def __init__(self, augmentations):
self.augmentations = augmentations
def augment(self, inputs):
outputs = inputs
for au in self.augmentations:
outputs = au.augment(outputs)
return outputs
class Augmentation:
def __init__(self, config = None, use_tf = False):
if not config: config = {}
tf.logging.info("** config **")
tf.logging.info(config)
self.before = self.parse(config.pop("before", {}))
self.after = self.parse(config.pop("after", {}))
@staticmethod
def parse(config):
augmentations = []
for key, value in config.items():
au = TFAUGMENTATIONS.get(key, None)
if au is None:
continue
aug = au(**value) if value is not None else au()
augmentations.append(aug)
return TFAugmentationExecutor(augmentations)
| yyht/deepspeech | augment_io/augment_tf.py | augment_tf.py | py | 1,020 | python | en | code | 2 | github-code | 13 |
25686745470 | import netCDF4
import numpy as np
nc_file = 'data/AQUA_MODIS.20230101.L3m.DAY.CHL.chlor_a.4km.NRT.nc'
nc = netCDF4.Dataset(nc_file, mode='r')
nc.variables.keys()
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
chloro = nc.variables['chlor_a'][:]
np.savetxt('lat.csv', lat, delimiter=',')
np.savetxt('lon.csv', lon, delimiter=',')
np.savetxt('chloro.csv', chloro, delimiter=',')
| WarXenozord/SpaceApps2023 | netCFDtoCSV.py | netCFDtoCSV.py | py | 409 | python | en | code | 0 | github-code | 13 |
10802109296 | def longest_common_prefix(strings):
if 0 == len(strings):
return "Empty String"
else:
for prefix in range(0, len(strings[0])):
to_match = strings[0][prefix]
for i in range(1, len(strings)):
if prefix >= len(strings[i]) or to_match != strings[i][prefix]:
return strings[0][0:prefix]
return strings[0]
if __name__ == "__main__":
test1 = ["flow", "flow", "flow"]
test2 = ["dog", "cat", "bird"]
test3 = ["class", "classes", "c"]
print(longest_common_prefix(test1))
| TanujSharma369/258286_DailyCommits | third.py | third.py | py | 568 | python | en | code | 0 | github-code | 13 |
5850658570 | import os
import sys
import logging
import argparse
import time
import rethinkdb
from cachetools import LRUCache
from gossip.common import NullIdentifier
from sawtooth.client import SawtoothClient
from config import ParseConfigurationFiles
from config import SetupLoggers
logger = logging.getLogger()
full_sync_interval = 50
_full_sync_counter = 0
block_cache = LRUCache(maxsize=100)
def GetCurrentBlockList(client, count):
"""
"""
# Get the identity of the block at the head of the chain
try:
blocklist = client.get_block_list(count)
except:
logger.exception('failed to retrieve the current block list')
return None
return blocklist
def GetBlock(client, blockid):
"""
Get a block from the ledger and cache it for future use
:param SawtoothClient client: client for accessing the ledger
:param str blockid: identifier for the current block
"""
global block_cache
if blockid in block_cache:
return block_cache[blockid]
# Get the identity of the block at the head of the chain
try:
block = client.get_block(block_id=blockid)
block_cache[blockid] = block
except:
logger.exception('failed to retrieve block %s', blockid)
return None
return block
def GetPreviousBlockID(client, blockid):
"""
Return the value of the PrevioudBlockID field from the block
:param SawtoothClient client: client for accessing the ledger
:param str blockid: identifier for the current block
"""
block = GetBlock(client, blockid)
return block.get('PreviousBlockID', NullIdentifier)
def GetBlockNum(client, blockid):
"""
Return the value of the BlockNum field from the block
:param SawtoothClient client: client for accessing the ledger
:param str blockid: identifier for the current block
"""
block = GetBlock(client, blockid)
return int(block.get('BlockNum', -1))
def GetBlockStateDelta(client, blockid):
"""
Get the state delta for the identified block
:param SawtoothClient client: client for accessing the ledger
:param str blockid: identifier for the current block
"""
# Get the identity of the block at the head of the chain
try:
blockdelta = client.get_store_delta_for_block(blockid)
except:
logger.exception('failed to retrieve state delta for block %s',
blockid)
return None
return blockdelta
def GetBlockStateFull(client, blockid):
"""
Get the full status for the identified block
:param SawtoothClient client: client for accessing the ledger
:param str blockid: identifier for the current block
"""
# Get the state for the current block
try:
blockstate = client.get_store_objects_through_block(blockid)
except:
logger.exception('failed to retrieve the state of block %s', blockid)
return None
return blockstate
def GetTransaction(client, txnid):
"""
Get data from the specified transaction
:param SawtoothClient client: client for accessing the ledger
:param str txnid: identifier for a transaction
"""
try:
txninfo = client.get_transaction(transaction_id=txnid)
except:
logger.exception('failed to retrieve transaction %s', txnid)
return None
return txninfo
def CleanupOldState(blocklist):
"""
Remove the tables for state that are no longer necessary
:param list blocklist: list of block identifiers
"""
statenames = map(lambda b: 'blk' + b, blocklist)
tablelist = rethinkdb.table_list().run()
for table in tablelist:
if table.startswith('blk') and table not in statenames:
try:
logger.info('drop old state table %s', table)
rethinkdb.table_drop(table).run()
except:
logger.exception('failed to drop state table %s', table)
def SaveToBlockList(blockinfo):
"""
Save block information to the block list table
:param dict blockinfo: block data
"""
logger.debug('insert block %s into block list table', blockinfo['id'])
try:
blktable = rethinkdb.table('block_list')
blktable.insert(blockinfo).run()
except:
logger.exception('failed to insert block %s into block list',
blockinfo['id'])
def SaveBlockState(client, blockid):
"""
Synchronize the current ledger state into the database. This creates a
new table identified by the block identifier.
:param SawtoothClient client: client for accessing the ledger
:param str blockid: identifier for a block
"""
# Get/create the table for the block state
logger.debug('create state table for block %s', blockid)
currentblockname = 'blk' + blockid
rethinkdb.table_create(currentblockname).run()
# Check to see if there is already a collection in the
# database for the previous block
previousblockid = GetPreviousBlockID(client, blockid)
previousblockname = 'blk' + previousblockid
assert (previousblockid == NullIdentifier or
previousblockname in rethinkdb.table_list().run())
# we use the full_sync_interval to ensure that we never
# get too far away from the ledger, this shouldn't be
# necessary and should be dropped later
global _full_sync_counter, full_sync_interval
_full_sync_counter -= 1
if _full_sync_counter > 0:
# update from the delta to the previous state
logger.info('copy block %s from existing block %s',
blockid, previousblockid)
# copy the block in the database
rethinkdb.table(currentblockname).insert(
rethinkdb.table(previousblockname)).run()
# retrieve the deltas
blockdelta = GetBlockStateDelta(client, blockid)
if blockdelta:
blockstate = blockdelta['Store']
blockdeletes = blockdelta['DeletedKeys']
else:
blockstate = {}
blockdeletes = []
else:
# perform a full state update
logger.info('copy block %s from ledger', blockid)
# retreive the complete state
blockstate = GetBlockStateFull(client, blockid)
blockdeletes = []
_full_sync_counter = full_sync_interval
# the only time we have the information to add the name is when
# we have the full dump, so names may be out of data between
# full syncs
# And add all the objects from the current state into the new collection
for (objid, objinfo) in blockstate.iteritems():
objinfo['id'] = objid
rethinkdb.table(currentblockname).get(objid).replace(objinfo).run()
# and delete the ones we dont need
for objid in blockdeletes:
rethinkdb.table(currentblockname).get(objid).delete().run()
def SaveTransactions(client, blockinfo):
"""
Save the transactions committed in a block into the transaction table
Args:
client -- SawtoothClient for accessing the ledger
blockinfo -- dictionary, block data
"""
logger.debug('save transactions for block %s in transaction table',
blockinfo['id'])
# save the transactions in the block
txnlist = []
for txnid in blockinfo['TransactionIDs']:
txn = GetTransaction(client, txnid)
if txn:
txn['id'] = txnid
txnlist.append(txn)
if txnlist:
try:
txntable = rethinkdb.table('txn_list')
txntable.insert(txnlist).run()
except:
logger.exception(
'failed to insert txns for block %s into transaction table',
blockinfo['id'])
def UpdateTransactionState(client, ledgerblocks):
"""
Update the state of transactions from the transaction collection in the
exchange database.
Args:
client -- SawtoothClient for accessing the ledger
ledgerblocks -- list of block identifiers in the current ledger
"""
# add the failed block to the list so we dont keep trying to
# fix a transaction that is marked as unfixable
blklist = set(ledgerblocks[:])
blklist.add('failed')
# ok... now we are looking for any transactions that are not in
# one of the blocks in the committed list, transactions that are
# in one of these blocks already have the correct state registered
# one concern about this approach is that transactions that fail
# are likely to stick around for a long time because we don't know
# if they might magically show up in another block, for now I'm
# just going to assume that a transaction that fails, fails
# permanently
logger.debug('update transaction state from blocks')
# this is the query that we should use, but it isn't working probably
# because of missing InBlock fields but there are no logs to be sure
txnquery = rethinkdb.table('transactions').filter(
lambda doc: ~(rethinkdb.expr(blklist).contains(doc['InBlock'])))
txniter = txnquery.run()
for txndoc in txniter:
txnid = txndoc.get('id')
assert txnid
if txndoc.get('InBlock') in blklist:
logger.debug('already processed transaction %s', txnid)
continue
try:
logger.info('update status of transaction %s', txnid)
txn = client.get_transaction(txnid)
txndoc['Status'] = txn['Status']
if txn.get('InBlock'):
txndoc['InBlock'] = txn['InBlock']
except:
# if we cannot retrieve the transaction then assume that it has
# failed to commit, this might be an invalid assumption if the
# validator itself has failed though presumably that would have
# been discovered much earlier
logger.info('failed to retrieve transaction %s, marking it failed',
txnid)
txndoc['Status'] = 3
txndoc['InBlock'] = 'failed'
rethinkdb.table('transactions').get(txnid).replace(txndoc).run()
def AddBlock(client, blockid):
"""
Args:
client -- SawtoothClient for accessing the ledger
blockid -- string, sawtooth identifier
"""
logger.info('add block %s', blockid)
blockinfo = GetBlock(client, blockid)
blockinfo['id'] = blockid
SaveToBlockList(blockinfo)
SaveTransactions(client, blockinfo)
SaveBlockState(client, blockid)
def DropBlock(client, blockinfo):
"""
Drop a block and all associated data, this can happen when a block
is removed from the committed chain by a fork.
Args:
client -- SawtoothClient for accessing the ledger
blockinfo -- block data
"""
logger.info('drop block %s', blockinfo['id'])
try:
rethinkdb.table('block_list').get(blockinfo['id']).delete().run()
except:
logger.warn('failed to remove block %s from block list table',
blockinfo['id'])
try:
blockstatetable = 'blk' + blockinfo['id']
if blockstatetable in rethinkdb.table_list().run():
rethinkdb.table(blockstatetable).tableDrop().run()
except:
logger.warn('failed to drop state table for block %s',
blockinfo['id'])
for txnid in blockinfo['TransactionIDs']:
try:
rethinkdb.table('txn_list').get(txnid).delete().run()
except:
logger.warn('failed to drop transaction %s for block %s',
txnid, blockinfo['id'])
def SaveChainHead(client, blockid):
"""
Record information about the current block in the metacollection document
Args:
client -- SawtoothClient for accessing the ledger
blockid -- string, sawtooth identifier
"""
blocknum = GetBlockNum(client, blockid)
blockdoc = {'id': 'currentblock', 'blockid': blockid, 'blocknum': blocknum}
metatable = rethinkdb.table('chain_info')
metatable.get('currentblock').replace(blockdoc).run()
def ProcessBlockList(client, ledgerblocks):
"""
Args:
client -- SawtoothClient for accessing the ledger
ledgerblocks -- list of block identifiers in the current ledger
"""
logger.info('process new blocks')
headblockid = ledgerblocks[0]
statelist = ledgerblocks[:10]
deleteblocks = []
bcursor = rethinkdb.table('block_list').run()
for blockinfo in bcursor:
try:
ledgerblocks.remove(blockinfo['id'])
except ValueError:
deleteblocks.append(blockinfo)
for blockid in deleteblocks:
DropBlock(client, blockid)
# work through the list of new block from oldest to newest
for blockid in reversed(ledgerblocks):
AddBlock(client, blockid)
SaveChainHead(client, headblockid)
CleanupOldState(statelist)
def InitializeDatabase(dbhost, dbport, dbname):
"""
"""
rconn = rethinkdb.connect(dbhost, dbport)
rconn.repl()
if dbname not in rethinkdb.db_list().run():
logger.info('create the sync database %s', dbname)
rethinkdb.db_create(dbname).run()
rconn.use(dbname)
tables = rethinkdb.table_list().run()
for tabname in ['block_list', 'chain_info', 'txn_list']:
if tabname not in tables:
rethinkdb.table_create(tabname).run()
rconn.close()
def LocalMain(config):
"""
Main processing loop for the synchronization process
"""
# To make this more robust we should probably pass in several
# URLs and handle failures more cleanly by swapping to alternates
client = SawtoothClient(config['LedgerURL'],
store_name='BondTransaction',
name='LedgerSyncClient')
global full_sync_interval
full_sync_interval = config.get('FullSyncInterval', 50)
blockcount = config.get('BlockCount', 10)
refresh = config['Refresh']
# pull database and collection names from the configuration and set up the
# connections that we need
dbhost = config.get('DatabaseHost', 'localhost')
dbport = int(config.get('DatabasePort', 28015))
dbname = config['DatabaseName']
InitializeDatabase(dbhost, dbport, dbname)
lastblockid = None
while True:
try:
logger.debug('begin synchronization')
rconn = rethinkdb.connect(dbhost, dbport, dbname)
rconn.repl()
currentblocklist = GetCurrentBlockList(client, full_sync_interval)
currentblockid = currentblocklist[0]
UpdateTransactionState(client, currentblocklist)
if currentblockid and currentblockid != lastblockid:
ProcessBlockList(client, currentblocklist)
logger.info('synchronization completed successfully, '
'current block is %s', currentblockid)
lastblockid = currentblockid
except:
logger.exception('synchronization failed')
finally:
logger.debug('close the database connection')
rconn.close()
logger.debug('sleep for %s seconds', float(refresh))
time.sleep(float(refresh))
CurrencyHost = os.environ.get("HOSTNAME", "localhost")
CurrencyHome = os.environ.get("EXPLORERHOME") or os.environ.get("CURRENCYHOME")
CurrencyEtc = (os.environ.get("EXPLORERETC") or
os.environ.get("CURRENCYETC") or
os.path.join(CurrencyHome, "etc"))
CurrencyLogs = (os.environ.get("EXPLORERLOGS") or
os.environ.get("CURRENCYLOGS") or
os.path.join(CurrencyHome, "logs"))
ScriptBase = os.path.splitext(os.path.basename(sys.argv[0]))[0]
config_map = {
'base': ScriptBase,
'etc': CurrencyEtc,
'home': CurrencyHome,
'host': CurrencyHost,
'logs': CurrencyLogs
}
def ParseCommandLine(config, args):
parser = argparse.ArgumentParser()
help_text = 'Name of the log file, __screen__ for standard output',
parser.add_argument('--logfile',
help=help_text,
default=config.get('LogFile', '__screen__'))
parser.add_argument('--loglevel',
help='Logging level',
default=config.get('LogLevel', 'INFO'))
parser.add_argument('--url',
help='Default url for connection to the ledger',
default=config.get('LedgerURL',
'http://localhost:8800'))
parser.add_argument('--dbhost',
help='Host where the rethink db resides',
default=config.get('DatabaseHost', 'localhost'))
parser.add_argument('--dbport',
help='Port where the rethink db listens',
default=config.get('DatabasePort', 28015))
help_text = 'Name of the rethink database where data will be stored'
parser.add_argument('--dbname',
help=help_text,
default=config.get('DatabaseName', 'ledger'))
parser.add_argument('--refresh',
help='Number of seconds between ledger probes',
default=config.get('Refresh', 10))
parser.add_argument('--set',
help='Specify arbitrary configuration options',
nargs=2,
action='append')
options = parser.parse_args(args)
config["LogLevel"] = options.loglevel.upper()
config["LogFile"] = options.logfile
config['DatabaseHost'] = options.dbhost
config['DatabasePort'] = options.dbport
config['DatabaseName'] = options.dbname
config['Refresh'] = options.refresh
config["LedgerURL"] = options.url
if options.set:
for (k, v) in options.set:
config[k] = v
def Main():
# parse out the configuration file first
conf_file = ScriptBase + '.js'
conf_path = [".", "./etc", CurrencyEtc]
parser = argparse.ArgumentParser()
parser.add_argument('--config',
help='configuration file',
default=[conf_file],
nargs='+')
parser.add_argument('--config-dir',
help='configuration file',
default=conf_path,
nargs='+')
(options, remainder) = parser.parse_known_args()
config = ParseConfigurationFiles(options.config,
options.config_dir,
config_map)
ParseCommandLine(config, remainder)
SetupLoggers(config)
LocalMain(config)
| gabykyei/GC_BlockChain_T_Rec | extensions/bond/ui/ledger_sync/main/sync_ledger_cli.py | sync_ledger_cli.py | py | 18,675 | python | en | code | 1 | github-code | 13 |
2816147685 | import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def pytest_addoption(parser):
parser.addoption('--language', action='store', default=None,
help="Choose language")
@pytest.fixture(scope="function")
def browser(request):
browser_lang = request.config.getoption("language")
browser = None
if browser_lang:
print(f"\nstart browser for {browser_lang} language..")
options = Options()
options.add_experimental_option(
'prefs', {'intl.accept_languages': browser_lang}
)
browser = webdriver.Chrome(options=options)
else:
raise pytest.UsageError("Pleace enter --language")
yield browser
print("\nquit browser..")
browser.quit()
| AlexKlo/web_lang_test | conftest.py | conftest.py | py | 807 | python | en | code | 0 | github-code | 13 |
25552156807 | class Student:
def __init__(self,name,rollno):
self.name = name
self.rollno = rollno
self.lap = self.Laptop()
def show(self):
print(self.name,self.rollno)
class Laptop:
def __init__(self):
self.brand = 'Dell'
self.cpu = 'i5'
self.ram = 16
s1 = Student('rachel', 1)
s2 = Student('rhea', 2)
s16.show()
lap1 = s1.Laptop()
lap2 = s2.Laptop()
print(id(lap1))
print(id(lap2)) | draksha22/python | InnerClass.py | InnerClass.py | py | 465 | python | en | code | 0 | github-code | 13 |
29423233589 | from __main__ import app, db
from flask import send_file, abort, redirect
from models import ExternalLink
@app.route('/visit_link/<int:link_id>', methods=['GET'])
def visit_link(link_id):
link = ExternalLink.query.filter_by(id=link_id).first()
if link is not None:
db.session.add(link)
link.num_visits += 1
db.session.commit()
return redirect(link.url)
else:
abort(404)
| javilm/msx-center | routes/visit_link.py | visit_link.py | py | 385 | python | en | code | 0 | github-code | 13 |
36092327315 | from scraper import *
from fileIO import *
from write_html import *
from messenger import *
import os
url = "https://www.epicnpc.com/forums/last-cloudia-accounts.1797/"
file_url = "./output.csv"
results = 10
def main():
document = collect(url)
listings = get_listings(document)
for listing in listings[:results]:
listing_details(listing)
listing_media(listing)
write_urls(listings[:results])
write_html(listings[:results])
send_message("Thanks for using CarnoldPyBot v1.00")
main()
| carnoldcoding/EpicScraper | main.py | main.py | py | 531 | python | en | code | 0 | github-code | 13 |
18095875011 | import cv2
import glob
import os
import sys
import json
import imsearch
gcp_config = {
'GOOGLE_APPLICATION_CREDENTIALS': '../.config/cloud-ml-f1954f23eaa8.json',
'BUCKET_NAME': 'imsearch-testing',
'STORAGE_MODE': 'gcp'
}
with open('../.config/aws-config.json', 'r') as fp:
aws_config_file = json.load(fp)
aws_config = {
'AWS_ACCESS_KEY_ID': aws_config_file['AWS_ACCESS_KEY_ID'],
'AWS_SECRET_ACCESS_KEY': aws_config_file['AWS_SECRET_ACCESS_KEY'],
'BUCKET_NAME': aws_config_file['BUCKET_NAME'],
'STORAGE_MODE': 's3'
}
def show_results(similar, qImage):
qImage = imsearch.utils.check_load_image(qImage)
qImage = cv2.cvtColor(qImage, cv2.COLOR_RGB2BGR)
cv2.imshow('qImage', qImage)
for _i, _s in similar:
rImage = cv2.cvtColor(imsearch.utils.check_load_image(
_i['image']), cv2.COLOR_RGB2BGR)
print([x['name'] for x in _i['primary']])
print(_s)
cv2.imshow('rImage', rImage)
cv2.waitKey(0)
if __name__ == "__main__":
all_images = glob.glob(os.path.join(
os.path.dirname(__file__), '..', 'images/*.jpg'))
index = imsearch.init(name='test', **aws_config)
index.cleanIndex()
index.addImageBatch(all_images)
index.createIndex()
# query with image URL
img_url = 'https://www.wallpaperup.com/uploads/wallpapers/2014/04/14/332423/d5c09641cb3af3a18087937d55125ae3-700.jpg'
similar, _ = index.knnQuery(image_path=img_url, k=10, policy='global')
show_results(similar, img_url)
| rikenmehta03/imsearch | examples/storage.py | storage.py | py | 1,515 | python | en | code | 76 | github-code | 13 |
27547114693 | from django.shortcuts import render
from django.http import JsonResponse
import pickle
import jieba
import re
import json
from keras.models import load_model
from keras.preprocessing import sequence
from django.views.decorators.csrf import csrf_exempt
jieba.set_dictionary('app_sentiment/jieba_big_chinese_dict/dict.txt.big')
# We don't use GPU
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
#keras==2.2.5 tensorflow==1.14
# (Problem 1) Sometimes exception arises when we use tensorflow in Django
# We should run K.clear_session() to avoid the problem
# K.clear_session() should be used before all the deep learning models are loaded.
# Usually we apply clear_session() only once, and then the problem will not happen again.
# Then we comma and disable the K.clear_session()
from keras import backend as K
K.clear_session()
from tensorflow.python.keras.backend import set_session
import tensorflow as tf
graph = tf.get_default_graph()
sess = tf.Session()
## Read tokenizer
tokenizer = pickle.load(open('app_sentiment/sentiment_model/model_deep/sentiment_tokenizer.pickle', 'rb'))
## Read CNN model
# (Problem 2) Sometimes exception arises when we use tensorflow in Django
# We should apply set_session() before load_model() to avoid the problem
set_session(sess)
model = load_model('app_sentiment/sentiment_model/model_deep/sentiment_best_model.hdf5')
# home
def home(request):
return render(request, "app_sentiment/home.html")
# api get sentiment score
@csrf_exempt
def api_get_sentiment_simple(request):
new_text = request.POST['input_text'] # or get('input_text')
sentiment_prob = get_sentiment_proba(new_text)
return JsonResponse(sentiment_prob)
# api get sentiment score
@csrf_exempt
def api_get_sentiment(request):
# See the content_type and body
print(request.content_type)
print(request.body) # byte format
if request.content_type == "application/x-www-form-urlencoded":
print("Content type: application/x-www-form-urlencoded")
new_text = request.POST['input_text'] # or get('input_text')
elif request.content_type in ["application/json","text/plain"]:
print("Content type: text/plain or application/json")
# json.load can load data with json format
request_json = json.loads(request.body)
new_text = request_json['input_text']
sentiment_prob = get_sentiment_proba(new_text)
return JsonResponse(sentiment_prob)
# get sentiment probability
chinese_word_regex = re.compile(r'[\u4e00-\u9fa5]+')
def get_sentiment_proba( new_text ):
tokens = jieba.lcut(new_text, cut_all=False)
# remove some characters
tokens = [x for x in tokens if chinese_word_regex.match(x)]
tokens = [tokens]
# print(tokens)
# Index the document
new_text_seq = tokenizer.texts_to_sequences(tokens)
# Pad the document
max_document_length = 350
new_text_pad = sequence.pad_sequences(new_text_seq, maxlen= max_document_length)
# result = model.predict(new_text_pad) # 這樣寫會報錯
# 報錯訊息: .... is not an element of this graph.
# set_session(sess) 不可缺少 否則會有以下報錯訊息:
# tensorflow.python.framework.errors_impl.FailedPreconditionError: Error while reading resource variable dense_2/kernel from Container: localhost. This could mean that the variable was uninitialized. Not found: Container localhost does not exist. (Could not find resource: localhost/dense_2/kernel)
# tensorflow graph
# (Problem 3) Exception will arise when we use predict() in Django
# We should apply graph.as_default() and set_session() to avoid the problem
with graph.as_default():
set_session(sess)
result = model.predict(new_text_pad)
response = {'Negative': round(float(result[0, 0]), 2), 'Positive': round(float(result[0, 1]), 2)}
# Note that result is numpy format and it should be convert to float
return response
print("app_sentiment was loaded!")
| guan-jie-chen/Term_Project-Django | app_sentiment/views.py | views.py | py | 3,994 | python | en | code | 1 | github-code | 13 |
12821479031 | ## Write a program that lets the user play Rock-Paper-Scissors against the computer. There should be five rounds, and after those five rounds, your program should print out who won and lost or that there is a tie
from random import randint
import math
c = 0
d = 0
for i in range(5):
x = randint(1, 3)
if x == 1:
x = "r"
elif x == 2:
x = "s"
else:
x = "p"
play = input("'Rock(R)', 'Scissor(S)', 'Paper(P)': ")
play.lower()
if x == "r" and play == "R":
print("Draw")
elif x == "r" and play == "S":
print("Computer won")
d += 1
elif x == "r" and play == "P":
print("You won")
c = c + 1
elif x == "s" and play == "R":
print("You won!")
c = c + 1
elif x == "s" and play == "S":
print("Draw!")
elif x == "s" and play == "P":
print("Computer won!")
d += 1
elif x == "p" and play == "R":
print("Computer won!")
d += 1
elif x == "p" and play == "S":
print("You Won!")
c = c + 1
elif x == "p" and play == "P":
print("Draw!")
if c > d:
print("Congratulation! You are the winner")
elif d > c:
print("Sorry, you lost to computer")
else:
print("It's a tie")
| Oposibu/PythonTutorial | pythonExercise/RockPapperScissorsGame1.py | RockPapperScissorsGame1.py | py | 1,264 | python | en | code | 0 | github-code | 13 |
37910366398 |
#
# jobOptions file for Combined Tower Reconstruction
# (needed by jet and combined sliding window)
#
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
from CaloRec.CaloRecConf import CaloTowerAlgorithm
# -- switch on some monitoring
if not 'doCaloCombinedTowerMonitoring' in dir():
doCaloCombinedTowerMonitoring = False
# -- do monitoring
if doCaloCombinedTowerMonitoring:
from CaloRec.CaloRecConf import CaloTowerMonitor
topSequence += CaloTowerAlgorithm("CmbTowerBldr")
topSequence += CaloTowerMonitor("TowerSpy")
else:
topSequence += CaloTowerAlgorithm("CmbTowerBldr")
# --------------------------------------------------------------
# Algorithms Private Options
# --------------------------------------------------------------
# tower Maker:
# CmbTowerBldr.OutputLevel = 2
CmbTowerBldr = topSequence.CmbTowerBldr
CmbTowerBldr.TowerContainerName="CombinedTower"
CmbTowerBldr.NumberOfPhiTowers=64
CmbTowerBldr.NumberOfEtaTowers=100
CmbTowerBldr.EtaMin=-5.0
CmbTowerBldr.EtaMax=5.0
# input to LArTowerBuilder: cells in LArEM and LARHEC
from LArRecUtils.LArRecUtilsConf import LArTowerBuilderTool
LArCmbTwrBldr = LArTowerBuilderTool( name = 'LArCmbTwrBldr',
CellContainerName = "AllCalo",
IncludedCalos = [ "LAREM",
"LARHEC" ])
#CmbTowerBldr.TowerBuilderTools+=[ LArCmbTwrBldr.getFullName() ]
#CmbTowerBldr += LArCmbTwrBldr #for private tools
# input to FCALTowerBuilder : cells in FCAL
from LArRecUtils.LArRecUtilsConf import LArFCalTowerBuilderTool
FCalCmbTwrBldr = LArFCalTowerBuilderTool( name = 'FCalCmbTwrBldr',
CellContainerName = "AllCalo",
MinimumEt = 0.*MeV)
#CmbTowerBldr.TowerBuilderTools+=[ FCalCmbTwrBldr.getFullName() ]
#CmbTowerBldr += FCalCmbTwrBldr #for private tools
from TileRecUtils.TileRecUtilsConf import TileTowerBuilderTool
TileCmbTwrBldr = TileTowerBuilderTool( name = 'TileCmbTwrBldr',
CellContainerName = "AllCalo",
DumpTowers = False,
DumpWeightMap = False);
#CmbTowerBldr.TowerBuilderTools+=[ TileCmbTwrBldr.getFullName() ]
CmbTowerBldr += TileCmbTwrBldr
CmbTowerBldr.TowerBuilderTools+=[ TileCmbTwrBldr ]
#for private tools
#for public tools use instead
#ToolSvc += TileCmbTwrBldr;
| rushioda/PIXELVALID_athena | athena/Calorimeter/CaloRec/share/CaloCombinedTower_jobOptions.py | CaloCombinedTower_jobOptions.py | py | 2,570 | python | en | code | 1 | github-code | 13 |
71528326739 | #!/usr/bin/env python3
"""
Usage: <./day5-lunch4.py> <tab_file1> <tab_file2> <tab_file3> <tab_file4> <tab_file5>
Plotting residuals with log scale
"""
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
import os
name1 = sys.argv[1].split(os.sep)[-1].split('.')[0] #split on periods because the names had "." in them
m_01 = pd.read_csv (sys.argv[1], sep="\t").iloc[:,4]
name2 = sys.argv[2].split(os.sep)[-1].split('.')[0]
m_02 = pd.read_csv (sys.argv[2], sep="\t").iloc[:,4]
name3 = sys.argv[3].split(os.sep)[-1].split('.')[0]
m_03 = pd.read_csv (sys.argv[3], sep="\t").iloc[:,4]
name4 = sys.argv[4].split(os.sep)[-1].split('.')[0]
m_04 = pd.read_csv (sys.argv[4], sep="\t").iloc[:,4]
name5 = sys.argv[5].split(os.sep)[-1].split('.')[0]
m_05 = pd.read_csv (sys.argv[5], sep="\t").iloc[:,4]
name6 = sys.argv[6].split(os.sep)[-2]
fpkms = pd.read_csv (sys.argv[6], sep="\t").iloc[:,-1]
means0 = {name1 : m_01, name2 : m_02, name3 : m_03, name4 : m_04, name5 : m_05, name6 : fpkms}
means0_df = pd.DataFrame(means0)
means0_df = means0_df.dropna()
#print(means0_df)
y = means0_df.loc[:, name6]
x = means0_df.loc[:,[name1, name2, name3, name4, name5]]
mod = smf.ols(formula = "SRR072893 ~ {} + {} + {} + {} + {}".format(name1, name2, name3, name4, name5,), data=means0_df)
res = mod.fit()
res2= res.resid
#print(res2)
x_axis = res2
y_axis = y
fig, ax = plt.subplots()
ax.hist(res2, bins=5000)
plt.yscale("log")
ax.set_title("Histogram-Residual data")
ax.set_xlabel("resuduals")
ax.set_ylabel("number")
ax.set_xlim(left=-100, right=100)
fig.savefig("residuals_log")
plt.close()
| JSYamamoto/qbb2018-answers | day5-lunch/day5-lunch6.py | day5-lunch6.py | py | 1,655 | python | en | code | 0 | github-code | 13 |
17055267344 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LeaseEnrollDTO(object):
def __init__(self):
self._brand_pid = None
self._create_time = None
self._name = None
self._plan_id = None
self._status = None
@property
def brand_pid(self):
return self._brand_pid
@brand_pid.setter
def brand_pid(self, value):
self._brand_pid = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.brand_pid:
if hasattr(self.brand_pid, 'to_alipay_dict'):
params['brand_pid'] = self.brand_pid.to_alipay_dict()
else:
params['brand_pid'] = self.brand_pid
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.plan_id:
if hasattr(self.plan_id, 'to_alipay_dict'):
params['plan_id'] = self.plan_id.to_alipay_dict()
else:
params['plan_id'] = self.plan_id
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LeaseEnrollDTO()
if 'brand_pid' in d:
o.brand_pid = d['brand_pid']
if 'create_time' in d:
o.create_time = d['create_time']
if 'name' in d:
o.name = d['name']
if 'plan_id' in d:
o.plan_id = d['plan_id']
if 'status' in d:
o.status = d['status']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/LeaseEnrollDTO.py | LeaseEnrollDTO.py | py | 2,686 | python | en | code | 241 | github-code | 13 |
73570747536 | cases = int(input());
for i in range(0, cases):
# We don't care about the size, but have to read it anyway
case_size = input();
raw_case = input();
case = []
for char in raw_case:
if char == '(':
case.append(1)
else:
case.append(-1)
misplaced = 0;
max_misplaced = 0
for num in case:
misplaced += num
if misplaced < max_misplaced:
max_misplaced = misplaced
print(-max_misplaced)
| JDSeiler/programming-problems | codeforces/round-653/c-move-brackets.py | c-move-brackets.py | py | 485 | python | en | code | 0 | github-code | 13 |
24825525935 | import sys
sys.path.append('..')
import os
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import gc
import functools
from scipy import ndimage
import cv2
import pickle as pkl
import lightgbm as lgb
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import log_loss, accuracy_score, roc_auc_score, confusion_matrix
from scripts.utils.utils import getListOfFiles
from scripts.utils.metrics import dice, correct_dice
from scripts.utils.mask_functions import mask2rle
from scripts.models.unet_resnet34 import UnetResnet34
import segmentation_models_pytorch as smp
from scripts.dataset.he_dataset import HeSegmentationDataset
from scripts.dataset.raw_dataset import RawSegmentationDataset
from scripts.dataset.raw_pos_neg_dataset import RawPosNegSegmentationDataset
from scripts.dataset.he_pos_neg_dataset import HePosNegSegmentationDataset
from scripts.predicting.tta_predictor import TTAPredictor
from albumentations import (
Resize,
Normalize,
HorizontalFlip,
Crop,
Compose,
Rotate,
PadIfNeeded,
CenterCrop
)
tqdm.monitor_interval = 0
torch.backends.cudnn.benchmark = True
def resize_preds(preds):
preds_resized = np.zeros((len(preds), 1024, 1024), dtype=np.float32)
for i in tqdm(range(len(preds))):
resized_image = cv2.resize(preds[i], (1024, 1024))
preds_resized[i] = resized_image
return preds_resized
def filter_small_masks(mask, threshold_size=250):
labled, n_objs = ndimage.label(mask)
result = np.zeros_like(mask, dtype=np.int8)
for i in range(n_objs):
obj = (labled == i + 1).astype(int)
if obj.sum() > threshold_size:
result[obj > 0] = 1
return result
def filter_all_masks(mask):
labled, n_objs = ndimage.label(mask)
sizes = ndimage.sum(mask, labled, range(n_objs + 1))
res_idx = np.argmax(sizes)
return (labled == res_idx).astype(np.uint8)
def predict_seg(args, model_checkpoint, base_model, dataset_base, base_transforms):
checkpoint = torch.load(model_checkpoint)
model = torch.nn.DataParallel(base_model).cuda()
model.load_state_dict(checkpoint['model'])
model.eval()
tta_predictor = TTAPredictor(
model=model,
ds_base=dataset_base,
batch_size=args.batch_size,
workers=args.workers,
base_transforms=base_transforms,
put_deafult=False
)
current_size = int(args.image_size)
tta_predictor.put(
[Resize(current_size, current_size)],
None
)
tta_predictor.put(
[Resize(current_size, current_size), HorizontalFlip(always_apply=True)],
Compose([HorizontalFlip(always_apply=True)])
)
tta_predictor.put(
[Resize(int(current_size * 1.25), int(current_size * 1.25))],
Resize(current_size, current_size)
)
tta_predictor.put(
[Resize(int(current_size * 0.75), int(current_size * 0.75))],
Resize(current_size, current_size)
)
tta_predictor.put(
[HorizontalFlip(always_apply=True), Resize(int(current_size * 1.25), int(current_size * 1.25))],
Compose([HorizontalFlip(always_apply=True), Resize(current_size, current_size)])
)
tta_predictor.put(
[HorizontalFlip(always_apply=True), Resize(int(current_size * 0.75), int(current_size * 0.75))],
Compose([HorizontalFlip(always_apply=True), Resize(current_size, current_size)])
)
tta_predictor.put(
[Resize(current_size, current_size), Rotate(limit=(-15,-15), always_apply=True)],
Rotate(limit=(15,15), always_apply=True)
)
tta_predictor.put(
[Resize(current_size, current_size), Rotate(limit=(15,15), always_apply=True)],
Rotate(limit=(-15,-15), always_apply=True)
)
tta_predictor.put(
[Resize(current_size, current_size), Rotate(limit=(-15,-15), always_apply=True), HorizontalFlip(always_apply=True)],
Compose([HorizontalFlip(always_apply=True), Rotate(limit=(15,15), always_apply=True)])
)
tta_predictor.put(
[Resize(current_size, current_size), Rotate(limit=(15,15), always_apply=True), HorizontalFlip(always_apply=True)],
Compose([HorizontalFlip(always_apply=True), Rotate(limit=(-15,-15), always_apply=True)])
)
res_preds = []
res_targets = []
for pred, targets in tqdm(tta_predictor):
res_preds.extend(pred)
if targets is not None:
res_targets.extend(targets)
res_preds = np.array(res_preds)
if len(res_targets) > 0:
res_targets = np.array(res_targets)
del tta_predictor
gc.collect()
return res_preds, res_targets
def main():
parser = argparse.ArgumentParser(description='Predict best of segmentation and classifiers and make some assumes on that result with lgb')
parser.add_argument('-i', '--input', default='../input/dicom-images-train', help='input eval data directory')
parser.add_argument('-it', '--input_test', default='../input/dicom-images-test', help='input test data directory')
parser.add_argument('-id', '--input_df', default='../input/train-rle.csv', help='input train df file')
parser.add_argument('-ss', '--sample_submission', default='../input/sample_submission.csv', help='sample submission file')
parser.add_argument('-s', '--seed', default=42, help='seed')
parser.add_argument('-tfr', '--test_fold_ratio', default=0.2, help='test fold ratio')
# parser.add_argument('-mp', '--model_path', default='../models/unet_resnet34_1024_v1/', help='path to models directory')
# parser.add_argument('-mp', '--model_path', default='../models/unet_resnet34_960_v1/', help='path to models directory')
# parser.add_argument('-mp', '--model_path', default='../models/smp_ur34_1024_v1/', help='path to models directory')
parser.add_argument('-bs', '--batch_size', default=8, help='size of batches', type=int)
parser.add_argument('-w', '--workers', default=6, help='data loader wokers count', type=int)
parser.add_argument('-is', '--image_size', default=1024, help='image size', type=int)
args = parser.parse_args()
df = pd.read_csv(args.input_df)
ids_with_mask = set(df[df[' EncodedPixels'].str.strip() != '-1']['ImageId'].values)
ids_without_mask = set(df[df[' EncodedPixels'].str.strip() == '-1']['ImageId'].values)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
norm = Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.225, 0.225, 0.225],
)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
all_files = getListOfFiles(args.input)
all_pos_files = [x for x in all_files if os.path.split(x)[1][:-4] in ids_with_mask]
all_pos_files = np.array(all_pos_files)
all_neg_files = [x for x in all_files if os.path.split(x)[1][:-4] in ids_without_mask]
all_neg_files = np.array(all_neg_files)
_, valid_pos_files = train_test_split(all_pos_files,
test_size=args.test_fold_ratio,
random_state=args.seed)
_, valid_neg_files = train_test_split(all_neg_files,
test_size=args.test_fold_ratio,
random_state=args.seed)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
ur34_1024_v1_checkpoint_name = '../models/unet_resnet34_1024_v1/best_dice_model.t7'
ur34_1024_v1_base_model = UnetResnet34(in_channels=3, num_classes=1, num_filters=32, pretrained=False, is_deconv=True)
ur34_1024_v1_val_dataset_base = functools.partial(
HePosNegSegmentationDataset,
dcm_pos_files=valid_pos_files,
dcm_neg_files=valid_neg_files,
masks_file=args.input_df,
neg_ratio=None,
)
ur34_1024_v1_base_transformations = []
ur34_960_v1_checkpoint_name = '../models/unet_resnet34_960_v1/best_dice_model.t7'
ur34_960_v1_base_model = ur34_1024_v1_base_model
ur34_960_v1_val_dataset_base = ur34_1024_v1_val_dataset_base
ur34_960_v1_base_transformations = ur34_1024_v1_base_transformations
smp_ur34_1024_v1_checkpoint_name = '../models/smp_ur34_1024_v1/best_dice_model.t7'
smp_ur34_1024_v1_base_model = smp.Unet("resnet34", encoder_weights="imagenet", activation=None)
smp_ur34_1024_v1_val_dataset_base = functools.partial(
RawPosNegSegmentationDataset,
dcm_pos_files=valid_pos_files,
dcm_neg_files=valid_neg_files,
masks_file=args.input_df,
neg_ratio=None,
)
smp_ur34_1024_v1_base_transformations = [norm]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
ur34_1024_v1_val_res, val_targets = predict_seg(
args,
ur34_1024_v1_checkpoint_name,
ur34_1024_v1_base_model,
ur34_1024_v1_val_dataset_base,
ur34_1024_v1_base_transformations
)
pkl.dump(ur34_1024_v1_val_res,
open('../p_input/ur34_1024_v1_val_res.pkl', 'wb'),
protocol=4)
pkl.dump(val_targets, open('../p_input/rs42_tfp0.2_pos_neg_val_targets.pkl', 'wb'))
ur34_960_v1_val_res, _ = predict_seg(
args,
ur34_960_v1_checkpoint_name,
ur34_960_v1_base_model,
ur34_960_v1_val_dataset_base,
ur34_960_v1_base_transformations
)
pkl.dump(ur34_960_v1_val_res,
open('../p_input/ur34_960_v1_val_res.pkl', 'wb'),
protocol=4)
smp_ur34_1024_v1_val_res, _ = predict_seg(
args,
smp_ur34_1024_v1_checkpoint_name,
smp_ur34_1024_v1_base_model,
smp_ur34_1024_v1_val_dataset_base,
smp_ur34_1024_v1_base_transformations
)
pkl.dump(smp_ur34_1024_v1_val_res,
open('../p_input/smp_ur34_1024_v1_val_res.pkl', 'wb'),
protocol=4)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
ur34_1024_v1_val_res = pkl.load(open('../p_input/ur34_1024_v1_val_res.pkl', 'rb'))
ur34_960_v1_val_res = pkl.load(open('../p_input/ur34_960_v1_val_res.pkl', 'rb'))
smp_ur34_1024_v1_val_res = pkl.load(open('../p_input/smp_ur34_1024_v1_val_res.pkl', 'rb'))
val_targets = pkl.load(open('../p_input/rs42_tfp0.2_pos_neg_val_targets.pkl', 'rb'))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
eval_files = np.concatenate((valid_pos_files, valid_neg_files))
valid_df = pd.DataFrame(data={'images': eval_files})
valid_df['bin_target'] = 0
valid_df.loc[valid_df.index < len(valid_pos_files), 'bin_target'] = 1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
clf_preds_files = [
'../p_input/binary/valid_efficientnet0_1024_d75_he_v1_raw_10xTTA.csv',
'../p_input/binary/valid_efn5_512_d90_he_v1_raw_10xTTA.csv',
'../p_input/binary/valid_efn0_1024_d75_add_v1_10xTTA.csv',
'../p_input/binary/valid_efn0_256_d80_he_cropped_v1.csv',
'../p_input/binary/valid_efn0_512_aug_dynamic_v1.csv'
]
images_set = set(df['ImageId'].values)
eval_all_files = [x for x in all_files if os.path.split(x)[1][:-4] in images_set]
eval_all_files = np.array(eval_all_files)
for idx, file in enumerate(clf_preds_files):
tmp_df = pd.read_csv(file)
eval_all_files_dict = dict(zip(eval_all_files, tmp_df['raw_target'].values))
eval_files_res = np.array([eval_all_files_dict[x] for x in eval_files])
valid_df['cl_{}'.format(idx)] = eval_files_res
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
tmp_preds = np.stack((ur34_1024_v1_val_res, ur34_960_v1_val_res)).mean(axis=0)
mean_val_res = np.stack((smp_ur34_1024_v1_val_res, tmp_preds)).mean(axis=0)
del tmp_preds
gc.collect()
seg_preds = [
ur34_1024_v1_val_res,
ur34_960_v1_val_res,
smp_ur34_1024_v1_val_res,
mean_val_res
]
for th in tqdm(np.arange(0., 1., 0.05)):
for idx, seg_pred in enumerate(seg_preds):
pred_m = (seg_pred > th)
valid_df['seg_{}_th_{}_count'.format(idx, th)] = pred_m.sum(axis=1).sum(axis=1)
del pred_m
gc.collect()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
train_cols = ['cl_{}'.format(i) for i in range(len(clf_preds_files))]
target_col = 'bin_target'
train_cols.extend(['seg_{}_th_{}_count'.format(idx, th) for idx in range(len(seg_preds)) for th in np.arange(0., 1., 0.05)])
param = {
'bagging_freq': 1,
'bagging_fraction': 0.38,
# 'boost_from_average':'false',
'boost': 'gbdt',
'feature_fraction': 0.33,
# 'learning_rate': 0.1,
'learning_rate': 0.01,
# 'max_depth': 2,
'max_depth': 6,
'num_leaves': 63,
# 'metric':'auc',
# 'metric':'binary_error',
# 'min_data_in_leaf': 80,
# 'min_sum_hessian_in_leaf': 10.0,
'num_threads': 8,
# 'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1
}
fold_count = 10
folds = StratifiedKFold(n_splits=fold_count, shuffle=True, random_state=args.seed)
oof = np.zeros(len(valid_df))
cl_clfs = []
for fold_, (trn_idx, val_idx) in enumerate(folds.split(valid_df[train_cols].values, valid_df[target_col].values)):
print("Fold : {}".format(fold_ + 1))
trn_data = lgb.Dataset(valid_df.iloc[trn_idx][train_cols], label=valid_df.iloc[trn_idx][target_col])
val_data = lgb.Dataset(valid_df.iloc[val_idx][train_cols], label=valid_df.iloc[val_idx][target_col])
clf = lgb.train(param, trn_data, 100000, valid_sets = [trn_data, val_data], verbose_eval=100, early_stopping_rounds = 100)
oof[val_idx] = clf.predict(valid_df.iloc[val_idx][train_cols], num_iteration=clf.best_iteration)
cl_clfs.append(clf)
max_acc = np.max([accuracy_score(valid_df[target_col].values, (oof > th)) for th in np.arange(0., 1., 0.01)])
max_acc_th = np.argmax([accuracy_score(valid_df[target_col].values, (oof > th)) for th in np.arange(0., 1., 0.01)])
max_acc_th = max_acc_th / 100.
print('CV log loss score: {}'.format(log_loss(valid_df[target_col].values, oof)))
print('CV accuracy score: {} th {}'.format(max_acc, max_acc_th))
print('CV roc auc score: {}'.format(roc_auc_score(valid_df[target_col].values, oof)))
cl_oof = oof.copy()
for idx, clf in enumerate(cl_clfs):
clf.save_model('../p_input/classifier_lgb_{}'.format(idx))
# orig CLx5
# CV log loss score: 0.20778928355121096
# CV accuracy score: 0.9133895131086143
# CV roc auc score: 0.9589424926597145
# th x20 over segx3+1mean
# CV log loss score: 0.1886846117676869
# CV accuracy score: 0.9293071161048689 th 0.51
# CV roc auc score: 0.9666573858459045
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dices = []
thrs = np.arange(0., 1., 0.01)
for idx, th in tqdm(enumerate(thrs), total=len(thrs)):
preds_m = (mean_val_res > th).astype(np.int8)
preds_m[cl_oof <= max_acc_th] = 0
dices.append(correct_dice(val_targets, preds_m))
print('curr {} dice {}'.format(th, dices[-1]))
dices = np.array(dices)
eval_dice_score = dices.max()
eval_best_thrs = thrs[np.argmax(dices)]
print('Eval best dice score {} best treshold {}'.format(eval_dice_score, eval_best_thrs))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Eval best dice score 0.8655028939247131 best treshold 0.46
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# dices = []
# tresholds = []
# thrs = np.arange(0., 1., 0.01)
# for i in tqdm(range(len(valid_pos_files))):
# cur_dices = []
# for th in thrs:
# preds_m = (mean_val_res[i:i+1] > th).astype(np.int8)
# cur_dices.append(correct_dice(val_targets[i:i+1], preds_m))
# best_idx = np.argmax(cur_dices)
# dices.append(cur_dices[best_idx])
# tresholds.append(thrs[best_idx])
# dices = np.array(dices)
#
# print('Eval best possible dice score {}'.format(np.mean(dices)))
# # Eval best possible dice score 0.6842488646507263
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# from sklearn.metrics import mean_squared_error
#
# pos_sub_df = valid_df[:len(valid_pos_files)].copy()
# pos_sub_df['th'] = tresholds
# train_cols = ['cl_{}'.format(i) for i in range(len(clf_preds_files))]
# train_cols.extend(['seg_{}_th_{}_count'.format(idx, th) for idx in range(len(seg_preds)) for th in np.arange(0., 1., 0.05)])
# target_col = 'th'
#
# param = {
# 'bagging_freq': 1,
# 'bagging_fraction': 0.38,
# # 'boost_from_average':'false',
# 'boost': 'gbdt',
# 'feature_fraction': 0.33,
# # 'learning_rate': 0.1,
# 'learning_rate': 0.01,
# # 'max_depth': 2,
# 'max_depth': 6,
# 'num_leaves': 63,
# 'metric':'mse',
# # 'metric':'binary_error',
# # 'min_data_in_leaf': 80,
# # 'min_sum_hessian_in_leaf': 10.0,
# 'num_threads': 8,
# # 'tree_learner': 'serial',
# # 'objective': 'binary',
# 'objective': 'regression',
# 'verbosity': -1
# }
#
# fold_count = 5
# folds = KFold(n_splits=fold_count, shuffle=True, random_state=args.seed)
# oof = np.zeros(len(pos_sub_df))
# clfs = []
#
# for fold_, (trn_idx, val_idx) in enumerate(folds.split(np.arange(len(pos_sub_df)))):
# print("Fold : {}".format(fold_ + 1))
# trn_data = lgb.Dataset(pos_sub_df.iloc[trn_idx][train_cols], label=pos_sub_df.iloc[trn_idx][target_col])
# val_data = lgb.Dataset(pos_sub_df.iloc[val_idx][train_cols], label=pos_sub_df.iloc[val_idx][target_col])
# clf = lgb.train(param, trn_data, 100000, valid_sets = [trn_data, val_data], verbose_eval=100, early_stopping_rounds = 100)
# oof[val_idx] = clf.predict(pos_sub_df.iloc[val_idx][train_cols], num_iteration=clf.best_iteration)
# clfs.append(clf)
#
# # max_acc = np.max([accuracy_score(pos_sub_df[target_col].values, (oof > th)) for th in np.arange(0., 1., 0.01)])
# # max_acc_th = np.argmax([accuracy_score(valid_df[target_col].values, (oof > th)) for th in np.arange(0., 1., 0.01)])
# # max_acc_th = max_acc_th / 100.
# print('CV mse score: {}'.format(mean_squared_error(pos_sub_df[target_col].values, oof)))
# # CV mse score: 0.08751432656237994
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# neg_preds = np.zeros((len(valid_neg_files)))
# for clf in clfs:
# neg_preds += clf.predict(valid_df[len(valid_pos_files):][train_cols], num_iteration=clf.best_iteration)
# neg_preds = neg_preds / len(clfs)
# th_preds = np.concatenate((oof, neg_preds))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# preds = []
# for idx, pr in enumerate(mean_val_res):
# preds.append((pr > th_preds[idx] if cl_oof[idx] > max_acc_th else np.zeros_like(pr, dtype=bool)).astype(np.uint8))
# preds = np.array(preds)
# final_dice = correct_dice(val_targets, preds)
# print("final dice {}".format(final_dice))
# # final dice 0.8659218549728394
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# clf_preds_1 = pd.read_csv('../p_input/binary/valid_efficientnet0_1024_d75_he_v1_raw_10xTTA.csv')
# clf_preds_2 = pd.read_csv('../p_input/binary/valid_efn5_512_d90_he_v1_raw_10xTTA.csv')
# clf_preds_3 = pd.read_csv('../p_input/binary/valid_efn0_1024_d75_add_v1_10xTTA.csv')
# clf_preds_4 = pd.read_csv('../p_input/binary/valid_efn0_256_d80_he_cropped_v1.csv')
# clf_preds_5 = pd.read_csv('../p_input/binary/valid_efn0_512_aug_dynamic_v1.csv')
#
# clf_tmp_1 = np.stack((
# clf_preds_1['raw_target'].values,
# clf_preds_2['raw_target'].values,
# clf_preds_3['raw_target'].values
# )).mean(axis=0)
#
# clf_tmp_2 = np.stack((
# clf_preds_4['raw_target'].values,
# clf_preds_5['raw_target'].values,
# )).mean(axis=0)
#
# clf_mean = np.stack((
# clf_tmp_1,
# clf_tmp_2,
# )).mean(axis=0)
#
# eval_all_files_dict = dict(zip(eval_all_files, clf_mean))
# eval_files_res = np.array([eval_all_files_dict[x] for x in eval_files])
#
# cur_eval_preds = mean_val_res.copy()
# cur_eval_preds[eval_files_res <= 0.356] = 0
# preds_m = (cur_eval_preds > 0.65).astype(np.int8)
# preds_m = np.array([filter_small_masks(x, 1500) for x in tqdm(preds_m)])
# need_zeroing_for = (np.sum(preds_m, axis=1).sum(axis=1) == 0)
#
# dices = []
# thrs = np.arange(0., 1., 0.01)
# for i in tqdm(thrs):
# preds_m = (cur_eval_preds > i).astype(np.int8)
# preds_m[need_zeroing_for] = 0
# dices.append(correct_dice(val_targets, preds_m))
# print('curr {} dice {}'.format(i, dices[-1]))
# dices = np.array(dices)
# eval_dice_score = dices.max()
# eval_best_thrs = thrs[np.argmax(dices)]
#
# print('Eval best dice score {} best treshold {}'.format(eval_dice_score, eval_best_thrs))
# mean_val_res cl0.365 base0.8 f250
# Eval best dice score 0.8630527257919312 best treshold 0.47
# mean_val_res cl0.356 base0.65 f1500
# Eval best dice score 0.8626716136932373 best treshold 0.44
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
del seg_preds
del ur34_1024_v1_val_res
del ur34_960_v1_val_res
del smp_ur34_1024_v1_val_res
del val_targets
del mean_val_res
gc.collect()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
test_all_files = getListOfFiles(args.input_test)
ss = pd.read_csv(args.sample_submission)
ss_images_set = set(ss['ImageId'].values)
test_all_files = [x for x in test_all_files if (os.path.split(x)[1][:-4]) in ss_images_set]
test_all_files = np.array(test_all_files)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
ur34_1024_v1_test_dataset_base = functools.partial(
HeSegmentationDataset,
dcm_files=test_all_files,
)
ur34_1024_v1_test_res, _ = predict_seg(
args,
ur34_1024_v1_checkpoint_name,
ur34_1024_v1_base_model,
ur34_1024_v1_test_dataset_base,
ur34_1024_v1_base_transformations
)
pkl.dump(ur34_1024_v1_test_res,
open('../p_input/ur34_1024_v1_test_res.pkl', 'wb'),
protocol=4)
ur34_960_v1_test_dataset_base = ur34_1024_v1_test_dataset_base
ur34_960_v1_test_res, _ = predict_seg(
args,
ur34_960_v1_checkpoint_name,
ur34_960_v1_base_model,
ur34_960_v1_test_dataset_base,
ur34_960_v1_base_transformations
)
pkl.dump(ur34_960_v1_test_res,
open('../p_input/ur34_960_v1_test_res.pkl', 'wb'),
protocol=4)
smp_ur34_1024_v1_test_dataset_base = functools.partial(
RawSegmentationDataset,
dcm_files=test_all_files,
)
smp_ur34_1024_v1_test_res, _ = predict_seg(
args,
smp_ur34_1024_v1_checkpoint_name,
smp_ur34_1024_v1_base_model,
smp_ur34_1024_v1_test_dataset_base,
smp_ur34_1024_v1_base_transformations
)
pkl.dump(smp_ur34_1024_v1_test_res,
open('../p_input/smp_ur34_1024_v1_test_res.pkl', 'wb'),
protocol=4)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
ur34_1024_v1_test_res = pkl.load(open('../p_input/ur34_1024_v1_test_res.pkl', 'rb'))
ur34_960_v1_test_res = pkl.load(open('../p_input/ur34_960_v1_test_res.pkl', 'rb'))
smp_ur34_1024_v1_test_res = pkl.load(open('../p_input/smp_ur34_1024_v1_test_res.pkl', 'rb'))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
test_df = pd.DataFrame(data={'images': test_all_files})
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
clf_preds_files = [
'../p_input/binary/efficientnet0_1024_d75_he_v1_raw_10xTTA.csv',
'../p_input/binary/efn5_512_d90_he_v1_raw_10xTTA.csv',
'../p_input/binary/efn0_1024_d75_add_v1_10xTTA.csv',
'../p_input/binary/efn0_256_d80_he_cropped_v1.csv',
'../p_input/binary/efn0_512_aug_dynamic_v1.csv'
]
for idx, file in enumerate(clf_preds_files):
tmp_df = pd.read_csv(file)
test_df['cl_{}'.format(idx)] = tmp_df['raw_target'].values
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
tmp_preds = np.stack((ur34_1024_v1_test_res, ur34_960_v1_test_res)).mean(axis=0)
mean_test_res = np.stack((smp_ur34_1024_v1_test_res, tmp_preds)).mean(axis=0)
del tmp_preds
gc.collect()
seg_preds = [
ur34_1024_v1_test_res,
ur34_960_v1_test_res,
smp_ur34_1024_v1_test_res,
mean_test_res
]
for th in tqdm(np.arange(0., 1., 0.05)):
for idx, seg_pred in enumerate(seg_preds):
pred_m = (seg_pred > th)
test_df['seg_{}_th_{}_count'.format(idx, th)] = pred_m.sum(axis=1).sum(axis=1)
del pred_m
gc.collect()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
train_cols = ['cl_{}'.format(i) for i in range(len(clf_preds_files))]
train_cols.extend(['seg_{}_th_{}_count'.format(idx, th) for idx in range(len(seg_preds)) for th in np.arange(0., 1., 0.05)])
fold_count = 10
clfs = [lgb.Booster(model_file='../p_input/classifier_lgb_{}'.format(idx)) for idx in range(fold_count)]
test_cl_preds = np.zeros((len(test_df)))
for clf in clfs:
test_cl_preds += clf.predict(test_df[train_cols], num_iteration=clf.best_iteration)
test_cl_preds = test_cl_preds / len(clfs)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
args.output = '../submissions/lgb_cl_0.51_ur34_1024_v1_ur34_960_v1_smp_ur34_1024_v1_0.46.csv.gz'
res_preds = (mean_test_res > 0.46).astype(np.uint8)
res_preds[test_cl_preds <= 0.51] = 0
res_rle = [mask2rle(x.T * 255, x.shape[0], x.shape[1]) if x.sum() > 0 else '-1' for x in tqdm(res_preds)]
res_df = pd.DataFrame(data={
'ImageId': [os.path.split(x)[1][:-4] for x in test_all_files],
'EncodedPixels': res_rle
})
res_df.to_csv(args.output, index=False, compression='gzip')
os.system('kaggle competitions submit siim-acr-pneumothorax-segmentation -f {} -m "{}"'.format(args.output, "{}_fold_seed_{}".format(args.test_fold_ratio, args.seed)))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == '__main__':
main()
| AGrankov/siim_final | scripts/predicting/after_predict.py | after_predict.py | py | 27,839 | python | en | code | 0 | github-code | 13 |
19550675343 | # Import packages
from dash import Dash, html, dash_table, dcc, callback, Input, Output
import pandas as pd
import ssl
import plotly.express as px
ssl._create_default_https_context = ssl._create_unverified_context
# Incorporate data
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminder2007.csv')
# Initialize the app and incorporate css
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = Dash(__name__, external_stylesheets=external_stylesheets)
# App layout
app.layout = html.Div([
html.H1('My First Webapp Dashboard', style={'textAlign': 'center', 'color': 'gray', 'fontSize': 36}),
html.Hr(),
dcc.RadioItems(options=['pop', 'lifeExp', 'gdpPercap'], value='lifeExp', id='radio-buttons', inline=True),
html.Div(className='row', children=[
html.Div(className='six columns', children=[
dash_table.DataTable(data=df.to_dict('records'), page_size=11, style_table={'overflowX': 'auto'})
]),
html.Div(className='six columns', children=[
dcc.Graph(figure={}, id='histo-chart')
])
])
])
# Add controls to build the interaction
@callback(
Output(component_id='histo-chart', component_property='figure'),
Input(component_id='radio-buttons', component_property='value')
)
def update_graph(col_chosen):
fig = px.histogram(df, x='continent', y=col_chosen, histfunc='avg', color='continent')
return fig
# Run the app
if __name__ == '__main__':
app.run(debug=True)
| axiom19/Dash-plotly | main.py | main.py | py | 1,513 | python | en | code | 0 | github-code | 13 |
18400603504 | import requests
import re
from bs4 import BeautifulSoup
URL = 'https://fftoday.com/stats/players?Pos=QB'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find()
player_data = results.find_all(class_='bodycontent')
playerlinks = re.finditer("/stats/players[/a-zA-Z0-9_]+",str(player_data))
for link in playerlinks:
if link:
URL = link.group(0)
print(URL)
| MatthewLee311/django3 | mysite/twitter/fantasy.py | fantasy.py | py | 415 | python | en | code | 0 | github-code | 13 |
18114661779 | from shutil import which
from os.path import exists
from subprocess import run
import click
def in_path(program):
"""Check if R is available in PATH."""
return which(program) is not None
def run_r_command(cmd, program="R"):
"""Run R command"""
if not exists(program) and not in_path(program):
raise Exception(f"{program} does not exists, nor is it available "
"in the PATH environment variable.")
args = [program, "--quiet", "-e", cmd]
click.echo(f"$ {' '.join(args)}")
process = run(args)
return process
def install_r_package(package, repos):
"""Check if R package is installed and if not, install it.
Returns whether the package was initially installed
"""
package_fmt = to_character(package)
repos_fmt = to_character(repos)
check_proc = run_r_command(f"library({package_fmt})")
is_installed = check_proc.returncode == 0
if not is_installed:
install_proc = run_r_command(f"install.packages({package_fmt}, repos = {repos_fmt})")
if install_proc.returncode != 0:
raise Exception("{package} didn't install correctly.")
return is_installed
def to_logical(value):
"""Format boolean value to logical for R."""
return "TRUE" if value else "FALSE"
def to_character(value):
"""Format string value to character for R."""
return "'" + value + "'"
def to_vector(iterable, type="character"):
"""Convert iterable to vector for R."""
formatters = {
"character": to_character,
"logical": to_logical}
formatter = formatters[type]
vector = ", ".join(formatter(v) for v in iterable)
vector = "c(" + vector + ")"
return vector
| datasnakes/rut | rut/utils.py | utils.py | py | 1,706 | python | en | code | 4 | github-code | 13 |
10067042689 | import json
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State, MATCH
import plotly.express as px
import pandas as pd
## DATA FROM https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
data_urls = {
"cases": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv",
"death": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv",
"recovery": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv",
}
def _read_and_melt(url):
df = pd.read_csv(url).drop("Province/State", axis=1)
df = df.melt(id_vars=["Country/Region", "Lat", "Long"])
df["variable"] = pd.to_datetime(df["variable"])
return df
def _update_data(df):
data = pd.DataFrame()
for country in df["Country/Region"].unique():
df_country = df[df["Country/Region"] == country].copy().reset_index(drop=True)
first_value = df_country["value"][0]
counts_list = list(df_country["value"].diff().values)
counts_list[0] = first_value
df_country["counts"] = counts_list
data = pd.concat([data, df_country])
return data
def read_john_data(url):
df = _read_and_melt(url)
df = _update_data(df)
return df
def _mapdata_to_weekly(df):
df = df.set_index("variable")
df = df.resample("W").last()
df = df.drop("counts", axis=1)
df = df.reset_index()
df["variable"] = df["variable"].astype("str")
return df
def mapdata(df):
data = pd.DataFrame()
for country in df["Country/Region"].unique():
country_df = df[df["Country/Region"] == country]
country_df = _mapdata_to_weekly(country_df)
data = pd.concat([data, country_df])
return data
cases = read_john_data("data/cases.csv")
death = read_john_data("data/death.csv")
recovery = read_john_data("data/recovery.csv")
cases_map = mapdata(cases)
death_map = mapdata(death)
recovery_map = mapdata(recovery)
cases_map["data_type"] = "cases"
death_map["data_type"] = "death"
recovery_map["data_type"] = "recovery"
all_data = pd.concat([cases_map, death_map, recovery_map])
mapbox = "your-token"
px.set_mapbox_access_token(mapbox)
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div(
[
html.Div(
[
html.H1("COVID-19 Time series Data"),
html.P(
"Data from Johns Hopkins University: ", style={"fontSize": "2.5rem"}
),
html.A(
"https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series",
href="https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series",
),
],
style={
"marginBottom": "2%",
"backgroundColor": "#5feb4d",
"padding": "3%",
"borderRadius": "20px",
},
),
html.Div(
[
html.H3("国ごとのデータ"),
html.Div(
[
dcc.Dropdown(
id="drop1",
options=[
{"label": i, "value": i}
for i in ["感染者数", "死亡者数", "回復者数"]
],
value="感染者数",
),
dcc.Dropdown(
id="drop2",
options=[
{"label": i, "value": i}
for i in cases["Country/Region"].unique()
],
value=["Japan"],
multi=True,
),
dcc.RadioItems(
id="graph_radio",
options=[{"label": s, "value": s} for s in ["新規", "累計"]],
value="新規",
),
]
),
dcc.Graph(id="first_graph"),
dcc.Graph(
id="map_graph", style={"width": "65%", "display": "inline-block"}
),
dcc.Graph(
id="callback_graph",
style={"width": "35%", "display": "inline-block"},
),
html.H1(id="test"),
],
style={
"marginBottom": "2%",
"backgroundColor": "#5feb4d",
"padding": "3%",
"borderRadius": "20px",
},
),
# html.Div(
# [
# html.Div(
# [
# html.H3("国ごとのデータ(パターン・マッチング・コールバック)"),
# html.Button(id="junku_button", children="PUSHME", n_clicks=0),
# html.Div(id="add_layout", children=[]),
# ]
# )
# ],
# style={
# "backgroundColor": "#5feb4d",
# "padding": "3%",
# "borderRadius": "20px",
# },
# ),
],
style={"padding": "5%", "backgroundColor": "#17be06"},
)
@app.callback(
Output("first_graph", "figure"),
Output("map_graph", "figure"),
Input("drop1", "value"),
Input("drop2", "value"),
Input("graph_radio", "value"),
)
def update_graph(type_select, cnt_select, graph_select):
if type_select == "死亡者数":
death_data = death[death["Country/Region"].isin(cnt_select)]
if graph_select == "新規":
return (
px.line(death_data, x="variable", y="counts", color="Country/Region"),
px.scatter_mapbox(
death_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
return (
px.line(death_data, x="variable", y="value", color="Country/Region"),
px.scatter_mapbox(
death_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
elif type_select == "回復者数":
recovery_data = recovery[recovery["Country/Region"].isin(cnt_select)]
if graph_select == "新規":
return (
px.line(
recovery_data, x="variable", y="counts", color="Country/Region"
),
px.scatter_mapbox(
recovery_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
return (
px.line(recovery_data, x="variable", y="value", color="Country/Region"),
px.scatter_mapbox(
recovery_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
cases_data = cases[cases["Country/Region"].isin(cnt_select)]
if graph_select == "新規":
return (
px.line(cases_data, x="variable", y="counts", color="Country/Region"),
px.scatter_mapbox(
cases_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
else:
return (
px.line(cases_data, x="variable", y="value", color="Country/Region"),
px.scatter_mapbox(
cases_map,
lat="Lat",
lon="Long",
size="value",
animation_frame="variable",
color="value",
hover_name="Country/Region",
zoom=1,
size_max=60,
color_continuous_scale=px.colors.cyclical.IceFire,
height=800,
title=f"マップ表示(累計値: {type_select})",
template={"layout": {"clickmode": "event+select"}},
),
)
@app.callback(
Output("callback_graph", "figure"),
Input("map_graph", "selectedData"),
Input("drop1", "value"),
)
def update_graph(selectedData, selected_value):
if selectedData is None:
selectedData = {"points": [{"hovertext": "Japan"}]}
country_list = list()
for one_dict in selectedData["points"]:
country_list.append(one_dict["hovertext"])
if selected_value == "死亡者数":
death_df = death[death["Country/Region"].isin(country_list)]
return px.line(
death_df,
x="variable",
y="value",
color="Country/Region",
title=f"選択国の{selected_value}(累計値: SHIFT+クリック)",
height=800,
)
elif selected_value == "回復者数":
recovery_df = recovery[recovery["Country/Region"].isin(country_list)]
return px.line(
recovery_df,
x="variable",
y="value",
color="Country/Region",
title=f"選択国の{selected_value}(累計値: SHIFT+クリック)",
height=800,
)
else:
cases_df = cases[cases["Country/Region"].isin(country_list)]
return px.line(
cases_df,
x="variable",
y="value",
color="Country/Region",
title=f"選択国の{selected_value}(累計値: SHIFT+クリック)",
height=800,
)
# @app.callback(
# Output("add_layout", "children"),
# Input("junku_button", "n_clicks"),
# State("add_layout", "children"),
# )
# def update_layout(n_clicks, layout_children):
# append_layout = html.Div(
# [
# dcc.Dropdown(
# id={"type": "count_select_drop", "index": n_clicks},
# options=[
# {"value": i, "label": i} for i in cases["Country/Region"].unique()
# ],
# value=cases["Country/Region"].unique()[n_clicks],
# ),
# dcc.RadioItems(
# id={"type": "count_select_radio", "index": n_clicks},
# options=[{"value": i, "label": i} for i in ["Linear", "Log"]],
# value="Linear",
# ),
# dcc.Graph(id={"type": "count_select_graph", "index": n_clicks}),
# ],
# style={"width": "46%", "padding": "2%", "display": "inline-block"},
# )
# layout_children.append(append_layout)
# return layout_children
# @app.callback(
# Output({"type": "count_select_graph", "index": MATCH}, "figure"),
# Input({"type": "count_select_drop", "index": MATCH}, "value"),
# Input({"type": "count_select_radio", "index": MATCH}, "value"),
# )
# def update_country_graph(selected_country, selected_radio_value):
# if selected_country is None:
# dash.exceptions.PreventUpdate
# selected_country_data = all_data[all_data["Country/Region"] == selected_country]
# if selected_radio_value == "Log":
# return px.line(
# selected_country_data,
# x="variable",
# y="value",
# color="data_type",
# log_y=True,
# )
# return px.line(selected_country_data, x="variable", y="value", color="data_type",)
if __name__ == "__main__":
app.run_server(debug=True)
| plotly-dash-book/events | 20210112/dash/application/app.py | app.py | py | 14,405 | python | en | code | 7 | github-code | 13 |
31942935150 | from typing import List
"""
方法一:迭代法实现子集枚举
思路
考虑数组 [1,2,2],选择前两个数,或者第一、三个数,都会得到相同的子集。
也就是说,对于当前选择的数 x,若前面有与其相同的数 y,且没有选择 y,
此时包含 x 的子集,必然会出现在包含 y 的所有子集中。
我们可以通过判断这种情况,来避免生成重复的子集。代码实现时,可以先将
数组排序;迭代时,若发现没有选择上一个数,且当前数字与上一个数相同,
则可以跳过当前生成的子集。
方法二:递归法实现子集枚举
思路
与方法一类似,在递归时,若发现没有选择上一个数,且当前数字与上一个数相同,
则可以跳过当前生成的子集。
"""
# @lc code=start
# 方法二
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
n = len(nums)
ans, stack = [], []
def dfs(cur: int, choosePre: bool) -> None:
if cur == n:
ans.append(stack[:])
return
dfs(cur + 1, False)
if not choosePre and cur > 0 and nums[cur - 1] == nums[cur]:
return
stack.append(nums[cur])
dfs(cur + 1, True)
stack.pop()
nums.sort()
dfs(0, False)
return ans
# @lc code=end
# 方法一
# class Solution:
# def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
# ans, n = [], len(nums)
# nums.sort()
# for mask in range(1 << n):
# flag, stack = True, []
# for i in range(n):
# if mask & (1 << i):
# if (i > 0 and ((mask >> (i - 1)) & 1) == 0
# and nums[i] == nums[i - 1]):
# flag = False
# break
# stack.append(nums[i])
# if flag:
# ans.append(stack)
# return ans
if __name__ == '__main__':
solu = Solution()
print(solu.subsetsWithDup([1, 2, 2]))
print(solu.subsetsWithDup([0]))
| wylu/leetcodecn | src/python/p1to99/90.子集-ii.py | 90.子集-ii.py | py | 2,156 | python | zh | code | 3 | github-code | 13 |
39989857639 | import numpy as np
import pickle
from Bio import pairwise2
import pdb
import sys
import nltk.translate.meteor_score as meteor_score
import nltk as nltk
def main():
# start from tmux window 21
# 30-36: alphalady
# 36-42 alphaman
# 10-29 amethyst + 8,9
# next:21
input_idx = int(sys.argv[1])
input_file = 'files/unprocessed_strct_' + str(input_idx) + '.tsv'
output_file = 'unprocessed_files/unprocessed_file_' + str(input_idx) + '.tsv'
# freq_dict_file = '../seq2seq_bm25_postags_freq_original_dict.pkl'
# freq_dict = pickle.load(open(freq_dict_file, 'rb'))
dict_file = '/share/home/alexchao2007/code/weibo_data_final/1000/pos_unprocessed_dict_meteor1000.pkl'
# rearrange struct from frequency
'''
struct_dict_array = []
for k, v in freq_dict.items():
struct_dict_array.append((k, v))
sorted_struct_dict_array = sorted(struct_dict_array, key=lambda x: x[1], reverse=True)
struct_list = sorted_struct_dict_array[:1000]
structure_dict = {}
idx2structure = {}
structure2idx = {}
idx = 0
for each_struct, _ in struct_list:
idx2structure[idx] = each_struct
structure2idx[each_struct] = idx
idx = idx + 1
structure_dict['idx2structure'] = idx2structure
structure_dict['structure2idx'] = structure2idx
'''
'''
with open(dict_file, 'wb') as w_file:
pickle.dump(structure_dict, w_file)
'''
structure_dict = pickle.load(open(dict_file, 'rb'))
struct_list = []
for k, v in structure_dict['structure2idx'].items():
struct_list.append(k)
with open(output_file, 'w') as struct_idx_file:
with open(input_file, 'r') as readlines:
for idx, line in enumerate(readlines):
print('processing idx: ', input_idx, 'unprocessed: ', idx, '/30000')
_, _, _, structure, _, _ = line.split('\t')
max_struct_score = -100
max_struct_idx = 0
for idx, latent_struct in enumerate(struct_list):
# max_score = meteor_score.single_meteor_score(latent_struct, structure)
# if latent_struct=='v n n n n n n n n n n n n n n n n n n n n n n n n n n n' or structure=='v n n n n n n n n n n n n n n n n n n n n n n n n n n n':
# continue
max_score = pairwise2.align.globalxx(latent_struct, structure)[0][2]
if max_score > max_struct_score:
max_struct_score = max_score
max_struct_idx = structure_dict['structure2idx'][latent_struct]
line = '\t'.join(line[:-1].split('\t')[:-1])
struct_idx_file.write(line)
struct_idx_file.write('\t')
struct_idx_file.write(str(max_struct_idx))
struct_idx_file.write('\n')
if __name__ == '__main__':
main() | timchen0618/LaPat | processing/cal_align/process_structure_data_unprocessed.py | process_structure_data_unprocessed.py | py | 2,961 | python | en | code | 2 | github-code | 13 |
71350179859 | import hydra
import sys
import albumentations as albu
import tensorflow as tf
import tensorflow.keras as K
from fastprogress.fastprogress import master_bar, progress_bar
from TF_CenterNet.models import get_centernet
from TF_CenterNet.datasets import DatasetBuilder
from TF_CenterNet.losses import get_kpt_loss
from TF_CenterNet.losses import get_wh_loss
from TF_CenterNet.losses import get_offset_loss
from TF_CenterNet.summary_keys import SummaryKeys
class Trainer:
def __init__(self, cfg):
self.cfg = cfg
def training(self, sess, writer, epoch):
sess.run(self.train_init_op)
ops = {
"optim": self.optim_op,
"loss": self.t_loss,
}
for i in progress_bar(range(self.n_train_iter), parent=self.mb):
# ops.update(self._get_summary_ops(i))
ops["summary"] = self._get_summary_ops(i)
feed_dict = {self.ph_iter: i, self.ph_epoch: epoch}
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
try:
ret = sess.run(ops, feed_dict, run_options)
writer.add_summary(ret["summary"], self.global_step)
self.global_step += 1
self.mb.child.comment = "training step, loss: {loss:.4f}".format(
loss=ret["loss"]
)
except tf.errors.OutOfRangeError:
break
def validation(self, sess, writer):
sess.run(self.valid_init_op)
for i in progress_bar(range(self.n_valid_iter), parent=self.mb):
try:
summary, loss_val = sess.run((self.valid_summary_op, self.t_loss))
writer.add_summary(summary, self.global_step)
self.mb.child.comment = "validation step"
except tf.errors.OutOfRangeError:
break
def _get_summary_ops(self, iteration):
keys = [SummaryKeys.PER_ITER]
if iteration == 0:
keys.append(SummaryKeys.PER_EPOCH)
if iteration % 100 == 0:
keys.append(SummaryKeys.PER_100ITER)
# so = self.summary_ops
# ops = {k: so[k] for k in keys if so[k] is not None}
ops = [self.summary_ops[k] for k in keys]
ops = tf.summary.merge([op for op in ops if op is not None])
return ops
def get_optimizer(self):
cfg = self.cfg.optimizer
if cfg.name == "sgd":
return tf.train.MomentumOptimizer(
cfg.lr, cfg.momentum, use_nesterov=cfg.nesterov
)
elif cfg.name == "adam":
return tf.train.AdamOptimizer(cfg.lr, cfg.beta1, cfg.beta2)
else:
raise ValueError()
def initialize_training_loop(self):
cfg = self.cfg
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("dataset"):
t_inputs, init_ops, n_iter = DatasetBuilder(cfg).get_dataset()
train_init_op, valid_init_op = init_ops
n_train_iter, n_valid_iter = n_iter
with tf.name_scope("centernet"):
model = get_centernet(cfg, t_inputs["image"])
fmaps = model.output
with tf.name_scope("loss"):
loss_kpt = get_kpt_loss(fmaps, t_inputs["kpt"])
loss_wh = get_wh_loss(
fmaps, t_inputs["wh"], t_inputs["ct"], t_inputs["categories"]
)
loss_offset = get_offset_loss(
fmaps, t_inputs["offset"], t_inputs["ct"], t_inputs["categories"]
)
loss = 1.0 * loss_kpt, 0.1 * loss_wh, 1.0 * loss_offset
loss = tf.reduce_mean(loss)
with tf.name_scope("optimizer"):
optim_op = tf.group(
(
self.get_optimizer().minimize(loss),
tf.get_collection(tf.GraphKeys.UPDATE_OPS),
)
)
summary_ops, ph_epoch, ph_iter = self._initialize_summary_ops()
tf.summary.scalar("loss", loss, [SummaryKeys.PER_ITER])
tf.summary.scalar("loss_kpt", loss_kpt, [SummaryKeys.PER_ITER])
tf.summary.scalar("loss_wh", loss_wh, [SummaryKeys.PER_ITER])
tf.summary.scalar("loss_offset", loss_offset, [SummaryKeys.PER_ITER])
tf.summary.image(
"sample_image", t_inputs["image"][:1], 1, [SummaryKeys.PER_EPOCH]
)
_, h, w, _ = t_inputs["kpt"].shape
sample_kpt = t_inputs["kpt"][0]
sample_kpt = tf.reshape(sample_kpt, (h, w, 2, 3))
sample_kpt = tf.transpose(sample_kpt, [2, 0, 1, 3])
tf.summary.image(
"sample_keypoint", sample_kpt, 6, [SummaryKeys.PER_EPOCH],
)
sample_pred_kpt = tf.sigmoid(fmaps[0, :, :, 4:])
sample_pred_kpt = tf.reshape(sample_pred_kpt, (h, w, 2, 3))
sample_pred_kpt = tf.transpose(sample_pred_kpt, [2, 0, 1, 3])
tf.summary.image(
"sample_pred_keypoint", sample_pred_kpt, 6, [SummaryKeys.PER_EPOCH]
)
self.graph = graph
self.t_inputs = t_inputs
self.t_output = fmaps
self.t_loss = loss
self.train_init_op = train_init_op
self.valid_init_op = valid_init_op
self.n_train_iter = n_train_iter
self.n_valid_iter = n_valid_iter
self.optim_op = optim_op
self.model = model
self.ph_epoch = ph_epoch
self.ph_iter = ph_iter
self.summary_ops = summary_ops
self.global_step = 0
self.mb = master_bar(range(self.cfg.n_epochs))
def _initialize_summary_ops(self):
ph_epoch = tf.placeholder(tf.int32, shape=None, name="ph_epoch")
ph_iter = tf.placeholder(tf.int32, shape=None, name="ph_iteration")
tf.summary.scalar("epoch", ph_epoch, [SummaryKeys.PER_ITER])
tf.summary.scalar("iteration", ph_iter, [SummaryKeys.PER_ITER])
var = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="centernet/detector/conv2d_24/kernel:0"
)[0]
tf.summary.histogram("conv2d_24_kernel", var, [SummaryKeys.PER_100ITER])
summary_ops = {
SummaryKeys.PER_ITER: tf.summary.merge_all(SummaryKeys.PER_ITER),
SummaryKeys.PER_100ITER: tf.summary.merge_all(SummaryKeys.PER_100ITER),
SummaryKeys.PER_EPOCH: tf.summary.merge_all(SummaryKeys.PER_EPOCH),
}
return summary_ops, ph_epoch, ph_iter
def run(self):
cfg = self.cfg
self.initialize_training_loop()
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
with tf.Session(graph=self.graph, config=config) as sess:
writer = tf.summary.FileWriter(cfg.logdir, sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for epoch in self.mb:
self.training(sess, writer, epoch)
# self.validation(sess, writer, epoch)
@hydra.main(config_path="configs/config.yaml")
def main(cfg):
print("CONFIG")
print(cfg.pretty())
cfg.data_dir = hydra.utils.to_absolute_path(cfg.data_dir)
trainer = Trainer(cfg)
trainer.run()
if __name__ == "__main__":
main()
| cafeal/SIGNATE_AIEdge2 | src/TF_CenterNet/train.py | train.py | py | 7,332 | python | en | code | 3 | github-code | 13 |
25911772147 | # Define a function that returns the sum of latin alphabet symbols of the argument string
# ASCII a = 097 .. z = 122
DEF_SYM_OFFSET = 96
DEF_SYM_LO = 97
DEF_SYM_HI = 122
def sumLatinSymbols(string):
lower = str(string).lower()
result = 0
for i in range(len(lower)):
sym = ord(lower[i])
if DEF_SYM_LO <= sym <= DEF_SYM_HI:
result += sym - DEF_SYM_OFFSET
return result
print("COLIN = %d" % sumLatinSymbols("COLIN"))
print("Ace = %d" % sumLatinSymbols("Ace"))
print("cloud = %d" % sumLatinSymbols("cloud")) | Nazdorovye/VeAPython | 2020_lecture_08_tasks/task03.py | task03.py | py | 536 | python | en | code | 0 | github-code | 13 |
2142692736 | #!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Utility layer on the main Blender Python programming interface. This isn't
the utilities for the Blender Game Engine.
This module can only be used from within Blender."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for JavaScript Object Notation (JSON) strings.
# https://docs.python.org/3.5/library/json.html
import json
#
# Module for levelled logging messages.
# Tutorial is here: https://docs.python.org/3.5/howto/logging.html
# Reference is here: https://docs.python.org/3.5/library/logging.html
from logging import DEBUG, INFO, WARNING, ERROR, log
#
# Blender library imports, in alphabetic order.
#
# These modules can only be imported if running from within Blender.
try:
#
# Main Blender Python interface.
import bpy
#
# Vectors.
from mathutils import Vector
except ImportError as error:
print(__doc__)
print(error)
def load_driver(driverClass, arguments):
"""Load the owner data subclass and initialise everything.
This subroutine has all the hard-coded names.
This is the end of the chain from:
- The blenderdriver.py script, which launches Blender and specifies the \
.blend file to open and tells Blender to run the launch_blender_driver.py \
script.
- The launch_blender_driver.py script, which works out the name of the \
driver application module, extends the Python module path, and then calls \
this function.
The driver application class is instantiated in two different stages: Data and \
Game. The Data stage is here. The Game class is when the Blender Game Engine \
starts.
"""
log(DEBUG, 'begin {} "{}".', arguments, __package__)
#
# Add and configure the driver gateway object, on which everything else will
# depend. It is a Blender Empty.
driverGateway = set_up_object(arguments.gateway)
#
# Create the settings collection. The settings include:
# - The module path and name of the application class.
# - The arguments from the command line.
# - The Blender version string, so that it can be retrieved in the BGE
# stage.
settings = {'module': driverClass.__module__,
'class': driverClass.__name__,
'arguments': None if arguments is None else vars(arguments),
'blender': bpy.app.version_string[:] }
#
# Instantiate the application class.
driver = driverClass(settings)
#
# Call the application's constructor for the Blender data stage. Pass it a
# reference to the scene and the driver gateway. This is a data scene, not a
# game scene. For now, assume there is only a single scene.
driver.data_constructor(bpy.data.scenes[0], driverGateway)
#
# Call the override-able initialisation.
driver.data_initialise()
#
# Attach the controllers for BGE to the gateway object.
# Default is that the controllers module is in the same package as this
# file. This can be overriden on the launch script command line, in case a
# diagnostic controllers package is to be run instead.
controllersPackage = __package__
if arguments.controllersPackage is not None:
controllersPackage = arguments.controllersPackage
controllers = get_controllers(
driver, controllersPackage, arguments.controllersModule,
('initialise', 'tick', 'keyboard'))
log(DEBUG, 'controllers {}.', vars(controllers))
configure_gateway(driverGateway, controllers, driver.tickInterval)
#
# Put a collection of configuration settings into one or more game
# properties. The collection gets read from there by the
# blender_driver.controllers initialise() subroutine when the Blender game
# engine is started.
set_game_property(driverGateway, 'settingsJSON', json.dumps(settings) )
#
# Start the Blender Game Engine, if that was specified.
# Could also have an option to export as a blender game here.
if arguments.start:
log(DEBUG, 'starting BGE.')
bpy.ops.view3d.game_start()
log(DEBUG, 'end.')
def set_up_object(name, params={}):
"""Set up an object in the data layer. Returns a reference to the object."""
object_ = None
objectIndex = bpy.data.objects.find(name)
if objectIndex >= 0:
object_ = bpy.data.objects[objectIndex]
new_ = (object_ is None)
#
# Create a new object with the specified Blender mesh, if necessary.
subtype = params.get('subtype')
text = params.get('text')
if new_:
if text is not None:
curve = bpy.data.curves.new(name, 'FONT')
curve.align_x = 'CENTER'
curve.align_y = 'CENTER'
curve.body = text
object_ = bpy.data.objects.new(name, curve)
elif subtype is None or subtype == 'Empty':
object_ = bpy.data.objects.new(name, None)
else:
object_ = bpy.data.objects.new(name, bpy.data.meshes[subtype])
#
# Set its physics type and related attributes.
physicsType = params.get('physicsType')
if physicsType is None and new_:
if subtype is None:
physicsType = 'NO_COLLISION'
else:
physicsType = 'RIGID_BODY'
if physicsType is not None:
object_.game.physics_type = physicsType
if physicsType != 'NO_COLLISION':
object_.game.use_collision_bounds = True
#
# Position the object, if necessary.
location = params.get('location')
if location is not None:
object_.location = Vector(location)
#
# Scale the object, if necessary.
scale = params.get('scale')
if scale is not None:
object_.scale = Vector(scale)
#
# Add the object to the current scene.
if new_:
bpy.context.scene.objects.link(object_)
#
# Set its Blender ghost, if specified.
ghost = params.get('ghost')
if ghost is not None:
object_.game.use_ghost = ghost
#
# Add the object to the required layers.
#
# The gateway object, which has subtype None and text None, goes on every
# layer. Template objects go on layer one only. This means that:
#
# - Template objects aren't visible by default.
# - Template objects can be addObject'd later, by bge.
# - The module that contains the controllers of the gateway object always
# gets imported, whatever layer happens to be active when BGE gets
# started.
layer = 1
if subtype is None and text is None:
layer = 0
#
# It seems that Blender doesn't allow an object to be on no layers at any
# time. This makes the following line necessary, in addition to the for
# loop.
object_.layers[layer] = True
for index in range(len(object_.layers)):
if index != layer:
object_.layers[index] = (layer == 0)
#
# Refresh the current scene.
bpy.context.scene.update()
#
# Return a reference to the object.
return object_
def get_controllers(driver, packageName, moduleName, controllers):
"""Get the names of the specified controllers that exist in the specified
module and package. Names of controllers are returned in a namespace type of
object, in a package.module.controller format. The application can remove
any, for diagnostic purposes."""
#
# Declare an empty class to use as a namespace.
# https://docs.python.org/3.5/tutorial/classes.html#odds-and-ends
class Controllers:
pass
return_ = Controllers()
#
# Start by adding all of them.
for controller in controllers:
setattr(return_, controller, ".".join(
(packageName, moduleName, controller)))
#
# Give the application an opportunity to remove any, for diagnostic
# purposes.
driver.diagnostic_remove_controllers(return_)
return return_
def configure_gateway(driverGateway, controllers, tickInterval):
"""Set various configurations that make the driver gateway work or are
convenient."""
#
bpy.context.scene.render.engine = 'BLENDER_GAME'
bpy.ops.wm.addon_enable(module="game_engine_save_as_runtime")
#
# Controller and sensor for initialisation.
if controllers.initialise is not None:
sensor = add_sensor(driverGateway, controllers.initialise)
#
# Controller and sensor for every tick.
if controllers.tick is not None:
sensor = add_sensor(driverGateway, controllers.tick)
sensor.use_pulse_true_level = True
#
# Set the tick frequency using whatever API the current version of
# Blender has.
if hasattr(sensor, 'frequency'):
sensor.frequency = tickInterval
else:
sensor.tick_skip = tickInterval
#
# Controller and sensor for the keyboard. This allows, for example, a back
# door to be added to terminate the engine.
if controllers.keyboard is not None:
sensor = add_sensor(driverGateway, controllers.keyboard, 'KEYBOARD')
sensor.use_all_keys = True
def add_sensor(driver, subroutine, sensorType='ALWAYS'):
driver = select_only(driver)
bpy.ops.logic.controller_add(type='PYTHON')
#
# Only way to access the controller just added is to get the last one now.
controller = driver.game.controllers[-1]
controller.mode = 'MODULE'
controller.module = subroutine
controller.name = subroutine
bpy.ops.logic.sensor_add(type=sensorType)
#
# Only way to access the sensor just added is to get the last one now.
sensor = driver.game.sensors[-1]
sensor.name = subroutine
sensor.use_tap = True
sensor.link(controller)
return sensor
def select_only(target):
"""Set the Blender user interface selection to a specified single object, or
to nothing. If a single object is selected then it is also made active. Some
parts of the programming interface also require that an object is
selected."""
bpy.ops.object.select_all(action='DESELECT')
if target is not None:
if isinstance(target, str):
target = bpy.data.objects[target]
target.select = True
bpy.context.scene.objects.active = target
return target
def set_up_objects(objectsDict):
return_ = []
if objectsDict is None:
return return_
for name in objectsDict.keys():
return_.append(set_up_object(name, objectsDict[name]))
return return_
def set_game_property(object_, key, value):
"""Set a game property in the data context, i.e. before the game engine has
started."""
object_ = select_only(object_)
#
# Attempt to add the value to a single property. This might not work.
bpy.ops.object.game_property_new(type='STRING', name=key)
#
# Get a reference to the new game property.
gameProperty = object_.game.properties[-1]
#
# Set the value, then check that it worked.
gameProperty.value = value
if gameProperty.value == value:
return object_
#
# If this code is reached, then it didn't work.
#
# Confirm that it didn't work because the value is too long.
if not value.startswith(gameProperty.value):
# The set didn't work, and it isn't because the value is too long.
# Fail now.
raise AssertionError(''.join((
'Game property value set failed. Expected "', value,
'". Actual "', gameProperty.value, '"' )))
#
# The set didn't work because the value is too long. Split the value
# across an "array" of game properties. Actually, a number of game
# properties with a single root name and numeric suffixes.
#
# Find out the maximum length of a game property.
max = len(gameProperty.value)
#
# Delete the property that failed to take the whole value.
bpy.ops.object.game_property_remove(-1)
#
# Break the value into chunks and set each into a game property with a
# key that has a suffix for its chunk number.
chunks = int(len(value) / max) + 1
index = 0
for chunk in range(chunks):
chunkValue = value[ index + max*chunk : index + max*(chunk+1) ]
bpy.ops.object.game_property_new(type='STRING', name=key + str(chunk))
chunkProperty = object_.game.properties[-1]
chunkProperty.value = chunkValue
return object_
def get_game_property(object_, key):
"""Get a property value from a game object in the game context, i.e. when
the game engine is running."""
properties = object_.getPropertyNames()
if key in properties:
# Property name on its own found. It contains the whole value.
return object_[key]
if ''.join((key, '0')) in properties:
# Property name found with 0 appended. The value is split across a
# number of properties. Concatenate them to retrieve the value.
value = ''
index = 0
while True:
chunkName = ''.join((key, str(index)))
if chunkName not in properties:
break
value = ''.join((value, object_[chunkName]))
index += 1
return value
raise AttributeError(''.join(('No game property for "', key, '"')))
def delete_except(keepers):
# Following lines are a bit naughty. They add some meshes using the ops API.
# This is only done in order to add the items to the project's meshes. The
# next thing that happens is everything gets deleted, including the newly
# added objects. The meshes are not deleted when the objects are deleted
# though.
# If we don't do this, then objects based on these meshes cannot be created
# later.
bpy.ops.mesh.primitive_uv_sphere_add()
bpy.ops.mesh.primitive_circle_add()
bpy.ops.mesh.primitive_torus_add()
bpy.ops.mesh.primitive_cone_add()
#
# Delete everything except the keepers.
#
# Select all layers.
for layer_index in range(len(bpy.data.scenes[0].layers)):
bpy.data.scenes[0].layers[layer_index] = True
#
# Select all objects, on all layers.
bpy.ops.object.select_all(action='SELECT')
#
# Unselect the keepers.
if keepers is not None:
for keeper in keepers:
if isinstance(keeper, str):
if keeper in bpy.data.objects:
bpy.data.objects[keeper].select = False
else:
raise AttributeError(''.join((
'bpyutils delete_except "', keeper, '" not found.')))
else:
keeper.select = False
#
# And delete.
bpy.ops.object.delete()
#
# Select only the first layer.
for layer_index in range(len(bpy.data.scenes[0].layers)):
if layer_index <= 0:
bpy.data.scenes[0].layers[layer_index] = True
else:
bpy.data.scenes[0].layers[layer_index] = False
#
# Return None if there were no keepers.
if keepers is None or len(keepers) < 1:
return None
#
# Otherwise, select and return the first keeper.
return select_only(keepers[0])
def set_active_layer(layer):
# It'd nice to set the active layer here. There doesn't seem to be any way
# to do that in Python. Second best is to terminate if it happens to have
# the wrong value.
#
# for index in range(len(bpy.data.scenes[0].layers)):
# bpy.data.scenes[0].layers[index] = (index == 0)
# print( index, bpy.data.scenes[0].layers[index] )
# bpy.data.scenes[0].layers[0] = True
# print( "Active layer:", bpy.data.scenes[0].active_layer )
# bpy.context.scene.update()
activeLayer = bpy.data.scenes[0].active_layer
if activeLayer != layer:
raise RuntimeError("".join((
"Active layer wrong. You have to set it manually, sorry.",
" Required:", str(layer), ". Actual:", str(activeLayer), ".")))
| sjjhsjjh/blender-driver | blender_driver/bpyutils.py | bpyutils.py | py | 16,092 | python | en | code | 2 | github-code | 13 |
5186325415 | import time
import Adafruit_DHT as dht
DHT_SENSOR = dht.DHT11
DHT_PIN = 4
while True:
humidity, temperature = dht.read(DHT_SENSOR, DHT_PIN)
if humidity is not None and temperature is not None:
print(
"Temp={0:0.1f}C Humidity={1:0.1f}%".format(temperature, humidity))
print("raw data: Temp; " + str(temperature) +"C Humidity; " + str(humidity))
else:
print("Sensor failure. Check wiring.")
time.sleep(15)
| PimMiii/Data-Science-IoT-KP02 | testscripts/DHT_test.py | DHT_test.py | py | 460 | python | en | code | 0 | github-code | 13 |
4628532395 | import uuid
from django.db import models
from course.models import Lesson
from deadline.models import Deadline, DeadlineSubmit
from .validators import FileExtensionValidator, FileContentTypeValidator
# Create your models here.
class File(models.Model):
def get_upload_path(instance, filename):
parts = filename.split("_")
course_id = parts[0]
file_uuid = uuid.uuid4().hex
return f"course_{course_id}/{file_uuid}_{'_'.join(parts[1:])}"
lesson = models.ForeignKey(
Lesson,
on_delete=models.CASCADE,
related_name="file_lesson",
null=True,
blank=True,
)
deadline = models.ForeignKey(
Deadline,
on_delete=models.CASCADE,
related_name="file_deadline_lesson",
null=True,
blank=True,
)
deadlineSubmit = models.ForeignKey(
DeadlineSubmit,
on_delete=models.CASCADE,
related_name="file_deadlineSubmit_lesson",
null=True,
blank=True,
)
name = models.CharField(max_length=50)
file_upload = models.FileField(
validators=[FileExtensionValidator(["txt", "pdf", "doc", "docx", "xls", "xlsx", "csv", "zip", "rar", "png", "jpg", "svg", "gif"]),
FileContentTypeValidator()],
upload_to=get_upload_path
)
in_folder = models.CharField(max_length=200, blank=True)
def delete(self, using=None, keep_parents=False):
self.file_upload.delete()
super().delete()
def __str__(self):
if self.lesson == None:
if self.deadline == None:
return "%s - %s" % (self.name, self.deadlineSubmit)
return "%s - %s" % (self.name, self.deadline)
return "%s - %s" % (self.name, self.lesson)
| pinanek/WebAppSecProject | backend/resource/models.py | models.py | py | 1,763 | python | en | code | 2 | github-code | 13 |
7731025252 | import pandas as pd
import matplotlib.pyplot as plt
stocks = pd.read_csv('/Users/apple/desktop/dataVisualisation/dataset/stocks.csv', index_col = 'Date')
aapl = stocks['AAPL']
# convert aapl index to datatime64
aapl.index = pd.to_datetime(aapl.index)
# print(aapl.index)
std_30 = aapl.resample('30D').std()
# print(mean_30)
std_75 = aapl.resample('75D').std()
std_125 = aapl.resample('125D').std()
std_250 = aapl.resample('250D').std()
# Plot std_30 in red
plt.plot(std_30, color='red', label='30d')
# Plot std_75 in cyan
plt.plot(std_75, color='cyan', label='75d')
# Plot std_125 in green
plt.plot(std_125, color='green', label='125d')
# Plot std_250 in magenta
plt.plot(std_250, color='magenta', label='250d')
# Add a legend to the upper left
plt.legend(loc='upper left')
# Add a title
plt.title('Moving standard deviations')
# Display the plot
plt.show()
plt.savefig('/Users/apple/desktop/dataVisualisation/4_timeSeries/5_movingStd.png')
| RobertNguyen125/Datacamp---DataVisulisationPython | dataVisualisation/4_timeSeries/5_plottingStd.py | 5_plottingStd.py | py | 950 | python | en | code | 0 | github-code | 13 |
28639959286 | import asyncio
import subprocess
import sys
import io
from typing import Iterable, TextIO, Any
def tee(
cmd: Iterable[str],
check: bool = True,
**kwargs: Any,
) -> subprocess.CompletedProcess[str]:
out = io.StringIO()
err = io.StringIO()
async def read(
stream: asyncio.StreamReader,
sinks: Iterable[TextIO],
) -> None:
while True:
line = await stream.readline()
if line:
decoded = line.decode()
for sink in sinks:
sink.write(decoded)
sink.flush()
else:
break
async def loading(proc: asyncio.subprocess.Process) -> None:
if not sys.stderr.isatty():
return
ticks = 0
while True:
try:
await asyncio.wait_for(asyncio.shield(proc.wait()), 1)
except asyncio.TimeoutError:
print("." * ticks, end="\r", file=sys.stderr)
ticks += 1
if ticks == 20:
print(" " * ticks, end="\r", file=sys.stderr)
ticks = 0
else:
break
async def run() -> subprocess.CompletedProcess[str]:
p = await asyncio.create_subprocess_exec(
"stdbuf",
"-oL",
"-eL",
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
**kwargs,
)
assert p.stdout
assert p.stderr
await asyncio.wait(
(
asyncio.create_task(read(p.stdout, (out, sys.stdout))),
asyncio.create_task(read(p.stderr, (err, sys.stderr))),
asyncio.create_task(loading(p)),
)
)
assert p.returncode is not None
if check and p.returncode > 0:
raise subprocess.CalledProcessError(
p.returncode,
tuple(cmd),
out.getvalue(),
err.getvalue(),
)
return subprocess.CompletedProcess(
tuple(cmd),
p.returncode,
out.getvalue(),
err.getvalue(),
)
return asyncio.run(run())
| DaanDeMeyer/fpbench | benchmarks/tee.py | tee.py | py | 2,259 | python | en | code | 1 | github-code | 13 |
32309205465 | from sgcn_mf import SGCN_MF
from MF import MF
from parser import parameter_parser
from utils import tab_printer, read_dataset_split_bytime, score_printer, save_logs , build_graph
from tqdm import trange
import torch
def main():
"""
Parsing command line parameters, creating target matrix, fitting an SGCN, predicting edge signs, and saving the embedding.
edge_path
"""
args = parameter_parser()
tab_printer(args)
trainset , testset = read_dataset_split_bytime(args) # split the dataset and share a encoder for all str
traingraph = build_graph(args , trainset) # build the graph from the dataset
testgraph = build_graph(args , testset)
if args.model == 'sgcn_mf' :
trainer = SGCN_MF(args, traingraph , testgraph)
trainer.setup_dataset()
trainer.create_and_train_model()
if args.test_size > 0:
trainer.save_model()
score_printer(trainer.logs)
save_logs(args, trainer.logs)
elif args.model == 'mf' :
model = MF(args , args.encoder['nu'] , args.encoder['ni'])
epochs = trange(args.epochs, desc="Loss")
optimizer = torch.optim.Adam(model.parameters(), lr=args.mf_learnrate, weight_decay=args.weight_decay)
for epoch in epochs :
loss = model(torch.LongTensor(traingraph['interaction']))
loss.backward()
optimizer.step()
epochs.set_description("SGCN (Loss=%g)" % round(loss.item(),4))
if args.test_size >0:
print (model.score(traingraph, testgraph))
if __name__ == "__main__":
main()
| 2742195759/SGCN_MF | src/main.py | main.py | py | 1,630 | python | en | code | 0 | github-code | 13 |
39087206431 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class Property(models.Model):
_inherit = 'product.template'
property_type_id = fields.Many2one(
'property.type',
string='Property Type'
)
partner_id = fields.Many2one(
'res.partner',
string='Property Location',
help='Add Location of property which will be show on website shop on product page.'
)
website_product_attachment = fields.Many2many(
'ir.attachment',
copy=True,
help="Select attachment/documents which will be show on website shop on product page.",
string="Website Attachments"
)
@api.multi
def google_map_img(self, zoom=8, width=298, height=298):
partner = self.sudo().partner_id
return partner and partner.google_map_img(zoom, width, height) or None
@api.multi
def google_map_link(self, zoom=8):
partner = self.sudo().partner_id
return partner and partner.google_map_link(zoom) or None
| Admin-Ever/qatarfacility | property_rental_tenant_management_enterprise-12.0.1.0/property_rental_tenant_management_enterprise/models/property_template.py | property_template.py | py | 1,021 | python | en | code | 1 | github-code | 13 |
20069274965 | #!/usr/bin/python3
from collections import defaultdict
from nessus_session import NessusScanSession, nessus_scan_script_arg_parse
def get_synscan(sess):
synscan = sess.get('/plugins/11219').json()
# organize the output by port
ports = defaultdict(set)
for output in synscan['outputs']:
for port, arr in output['ports'].items():
port = int(port.split()[0])
ports[port].update({h['hostname'] for h in arr})
# check the tls/ssl plugins to sort out encrypted ports
sslvers = sess.get('/plugins/56984').json()
sslports = defaultdict(set)
for output in sslvers['outputs']:
for port, arr in output['ports'].items():
port = int(port.split()[0])
sslports[port].update({h['hostname'] for h in arr})
out = list()
for port, hosts in ports.items():
if port == 21:
out += ['ftp://{}'.format(h['hostname']) for h in arr]
continue
elif port == 80:
out += ['http://{}'.format(h['hostname']) for h in arr]
continue
elif port == 443:
out += ['https://{}'.format(h['hostname']) for h in arr]
continue
elif port == 3389:
out += ['rdp://{}'.format(h['hostname']) for h in arr]
continue
# drop any port under 1000 as probably not an http(s) port
elif port < 1000:
continue
enc = sslports[port]
unenc = hosts.difference(enc)
out += ['http://{}:{}'.format(h, port) for h in unenc]
out += ['https://{}:{}'.format(h, port) for h in enc]
return out
if __name__=='__main__':
parser = nessus_scan_script_arg_parse('convert a syn scan to a file compatable with Eyewitness')
clargs = parser.parse_args()
sess = NessusScanSession(clargs.scan_no, clargs.NessusHost, clargs.AccessKey, clargs.SecretKey, history_id=clargs.history_id)
scan = get_synscan(sess)
print('\n'.join(scan))
| ElliotKaplan/nessus_scripts | nessus_scan_syn2ew.py | nessus_scan_syn2ew.py | py | 2,002 | python | en | code | 0 | github-code | 13 |
38625077985 | import random
import winsound
n = int(input('Advinhe o número que estou pensando!!??\nde 0 à 3\n'))
numeros = [1,2,3]
lista = random.choice(numeros)
print(lista)
if n == lista:
print('Você acertou \o/')
while lista != n:
print('O Número Sorteado foi {}'.format(lista))
print('Você errou!, Tente novamentem')
print('\033[0;31;44Caso queira sair do jogo, Digite 0')
lista = random.choice(numeros)
n = int(input('Advinhe o número que estou pensando!!??\nde 1 à 3\n'))
if n == 0 :
print('Good Bye :)')
break
if lista == n:
print('\033[0;35;43mMiseravi, Acertou \o/\033[m')
winsound.Beep(1000,500)
winsound.PlaySound('', winsound.SND_FILENAME)
s = int(input('Digite start para jogar novamente')) | CleberSilva93/Study-DesenvolvimentoemPython | Advinhação2.0.py | Advinhação2.0.py | py | 769 | python | pt | code | 0 | github-code | 13 |
24781982994 | from django.contrib.auth import get_user_model
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from bookshop_base.models import Book, Author, Rating, Stock, Publisher
from bookshop_base.serializers import (StockSerializer,
BookSerializer,
RatingSerializer)
BOOK_URL = '/api/book/'
class BookApiTest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'amin@gmail.com',
'amin')
self.client.force_authenticate(self.user)
self.publisher1 = Publisher.objects.create(name='hekmataneh')
self.publisher2 = Publisher.objects.create(name='haselmeire')
self.author1 = Author.objects.create(
first_name='Hesam', last_name='Mahboub')
self.author2 = Author.objects.create(
first_name='Ali', last_name='Ghaedi')
def test_retrieve_book_list(self):
Book.objects.create(author=self.author1, name='Potato',
price=10000, publisher=self.publisher1)
res = self.client.get(BOOK_URL + '1/')
book = Book.objects.get(id=1)
serializer = BookSerializer(book)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, serializer.data)
def test_create_book_successful(self):
payload = {'name': 'Cabbage', 'price': 88888,
'author': 1, 'publisher': 1}
self.client.post(BOOK_URL, payload)
exists = Book.objects.filter(name=payload['name'],).exists()
self.assertTrue(exists)
RATING_URL = '/api/rating/'
class RatingApiTest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'Ali@gmail.com',
'Ali')
self.client.force_authenticate(self.user)
self.author = Author.objects.create(
first_name='Hesam', last_name='Mahboub')
self.publisher = Publisher.objects.create(name='hekmataneh')
self.book = Book.objects.create(author=self.author, name='Potato',
price=1234, publisher=self.publisher)
def test_retrieve_rating_list(self):
Rating.objects.create(user=self.user, book=self.book, rating=2)
res = self.client.get(RATING_URL + '1/')
rating = Rating.objects.get(id=1)
serializer = RatingSerializer(rating)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, serializer.data)
STOCK_URL = '/api/stock/'
class StockApiTest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'Ali@gmail.com',
'Ali')
self.client.force_authenticate(self.user)
self.author = Author.objects.create(
first_name='Hesam', last_name='Mahboub')
self.publisher = Publisher.objects.create(name='hekmataneh')
self.book = Book.objects.create(author=self.author, name='Potato',
price=1234, publisher=self.publisher)
def test_retrieve_stock_item(self):
Stock.objects.create(quantity=120, book=self.book, in_stock=True)
res = self.client.get(STOCK_URL + '1/')
rating = Stock.objects.get(id=1)
serializer = StockSerializer(rating)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, serializer.data)
| amin7mazaheri/haselmeier_test | bookshop_base/tests/test_views.py | test_views.py | py | 3,582 | python | en | code | 0 | github-code | 13 |
20854050373 | #%%
from os import environ
from typing import Set, Dict, List, Tuple, Union
#%%%%%%%%%%%%%%%%%#
# GET INPUT #
###################
file_input = open("input.txt", "rt")
NB_FOR_COMPATIBLE = 12
#file_input = open("input_test.txt", "rt")
#NB_FOR_COMPATIBLE = 4
scanners = []
for line in file_input:
#print(line.strip())
if line.strip() == "":
continue
elif line.startswith("---") :
#print("NEW SCAN")
scanners.append(set())
continue
else :
scanners[-1].add(tuple(int(x) for x in line.strip().split(',')))
file_input.close()
del file_input, line
#%%%%%%%%%%%%%%%%#
# FONTIONS #
##################
# rot_2 = lambda scan : {( x, -y, -z) for x, y, z in scan} # legacy
# [T, rot_Z(T), rot_Z(rot_Z(T)), rot_Z(rot_Z(rot_Z(T))),\
# B, rot_Z(B), rot_Z(rot_Z(B)), rot_Z(rot_Z(rot_Z(B))),\
# F, rot_X(F), rot_X(rot_X(F)), rot_X(rot_X(rot_X(F))),\
# K, rot_X(K), rot_X(rot_X(K)), rot_X(rot_X(rot_X(K))),\
# L, rot_Y(L), rot_Y(rot_Y(L)), rot_Y(rot_Y(rot_Y(L))),\
# R, rot_Y(R), rot_Y(rot_Y(R)), rot_Y(rot_Y(rot_Y(R)))]
# pb with 8-rev, 17-rev, 21-rev
rot_X = lambda scan : {( x, -z, y) for x, y, z in scan}
rot_Y = lambda scan : {( z, y, -x) for x, y, z in scan}
rot_Z = lambda scan : {(-y, x, z) for x, y, z in scan}
rot_2X = lambda scan : {( x, -y, -z) for x, y, z in scan}
rot_2Y = lambda scan : {(-x, y, -z) for x, y, z in scan}
rot_2Z = lambda scan : {(-x, -y, z) for x, y, z in scan}
def get_rotations(scan):
T = scan # z pointing Top
B = rot_2Y(T) # z pointing Bottom
F = rot_Y(scan) # z pointing Front
K = rot_2Z(F) # z pointing bacK
R = rot_X(scan) # z pointing Right
L = rot_2X(R) # z pointing Left
return [T, rot_Z(T), rot_2Z(T), rot_Z(rot_2Z(T)),\
B, rot_Z(B), rot_2Z(B), rot_Z(rot_2Z(B)),\
F, rot_X(F), rot_2X(F), rot_X(rot_2X(F)),\
K, rot_X(K), rot_2X(K), rot_X(rot_2X(K)),\
L, rot_Y(L), rot_2Y(L), rot_Y(rot_2Y(L)),\
R, rot_Y(R), rot_2Y(R), rot_Y(rot_2Y(R))]
def are_compatible(delta1 : Set[Tuple[int]], delta2 : Set[Tuple[int]]) -> bool:
inter = len(delta1.intersection(delta2))
#if inter>1 and inter<NB_FOR_COMPATIBLE: print("partial match",inter, "in common")
return inter >= NB_FOR_COMPATIBLE
def get_deltas(ref : Tuple[int], scan : Set[Tuple[int]]) -> Set[Tuple[int]]:
X, Y, Z = ref
return set((x-X, y-Y, z-Z) for x, y, z in scan)
def get_tranformated(scan1 : Set[Tuple[int]], scan2 : Set[Tuple[int]]):
for s1 in scan1:
delta1 = get_deltas(s1, scan1)
for n_rot, rscan2 in enumerate(get_rotations(scan2)):
for s2 in rscan2:
delta2 = get_deltas(s2, rscan2)
if are_compatible(delta1, delta2):
print(f"worked whith rotation n°{n_rot}")
x0, y0, z0 = s1
return {(x0+dx, y0+dy, z0+dz) for dx, dy, dz in delta2},\
(s1[0]-s2[0], s1[1]-s2[1], s1[2]-s2[2])
return set(), (0,0,0)
##################
# MAIN #
##################
main_scan = scanners[0].copy()
scanners_coord = [(0,0,0)]
done = set([0])
changes = True
while changes:
changes = False
for index, scan in enumerate(scanners):
if not index in done:
res, scan_coord = get_tranformated(main_scan, scan)
if len(res) > 0:
print(f"- scan {index} matched")
changes = True
done.add(index)
main_scan = main_scan.union(res)
scanners_coord.append(scan_coord)
else :
print(f"scan {index} not matching")
print(set(range(len(scanners))).symmetric_difference(done) ,"n'ont pas marché")
print("Résultat pb1 :", len(main_scan))
# %%
manhattan_dist = lambda s1, s2 : abs(s1[0]-s2[0]) + abs(s1[1]-s2[1]) + abs(s1[2]-s2[2])
max_distance = -1
for i, s1 in enumerate(scanners_coord):
for j, s2 in enumerate(scanners_coord[i+1:]):
max_distance = max(manhattan_dist(s1, s2), max_distance)
print("Résultat pb2 :", max_distance)
# %%
| AdrienGuimbal/AdventOfCode2021 | Day19/scanners.py | scanners.py | py | 3,747 | python | en | code | 0 | github-code | 13 |
7246338364 | # Read text from a file, and count the occurence of words in that text
# Example:
# count_words("The cake is done. It is a big cake!")
# --> {"cake":2, "big":1, "is":2, "the":1, "a":1, "it":1}
def read_file_content(filename):
# [assignment] Add your code here
with open(filename) as f:
contents = f.readlines()
return contents
return "Hello World"
def count_words():
text = read_file_content("./story.txt")
# [assignment] Add your code here
d = dict()
for line in text:
line = line.strip()
line = line.lower()
words= line.split(" ")
for word in words:
if word in d:
d[word]= d[word]+1
else:
d[word]=1
for key in list(d.keys()):
return d
return {"as": 10, "would": 20}
print (count_words()) | oputaolivia/Reading-Text-File | Reading-Text-Files/main.py | main.py | py | 840 | python | en | code | 0 | github-code | 13 |
20199180544 | from re import fullmatch
def fullrange(start, end):
dir = 1 if start <= end else -1
return range(start, end + dir, dir)
def apply_lines_rules(field, rules, diag=False):
for rule in rules:
if rule[0] == rule[2]:
x = rule[0]
for y in fullrange(rule[1], rule[3]):
field[x][y] += 1
elif rule[1] == rule[3]:
y = rule[1]
for x in fullrange(rule[0], rule[2]):
field[x][y] += 1
elif diag:
range_x = list(fullrange(rule[0], rule[2]))
range_y = list(fullrange(rule[1], rule[3]))
if len(range_x) != len(range_y):
print(f"Skipping {rule}")
else:
for i in range(len(range_x)):
field[range_x[i]][range_y[i]] += 1
else:
print(f"Skipping {rule}")
def main(args):
rules = []
with open("5.txt", "r") as input_file:
for line in input_file:
if len(line.strip()) == 0:
continue
match = fullmatch("([0-9]+),([0-9]+) -> ([0-9]+),([0-9]+)",
line.strip())
if match is None:
raise Exception(f"Invalid line: {line}")
rules.append(
(int(match.group(1)), int(match.group(2)),
int(match.group(3)), int(match.group(4))))
bounds = max([max(x) for x in rules]) + 1
field = []
for i in range(bounds):
field += [[0] * bounds]
apply_lines_rules(field, rules, True)
duplicates = 0
for row in field:
for cell in row:
if cell > 1:
duplicates += 1
print(f"Result: {duplicates}")
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| p-f/adventofcode2021 | 5.py | 5.py | py | 1,787 | python | en | code | 0 | github-code | 13 |
18719871535 | import sys, os
import torch
from torch.utils.data import DataLoader
from config import parse_args, get_vrd_cfg
from utils.register_dataset import register_vrd_dataset
from utils.trainer import CustomTrainer
from utils.dataset import VRDDataset
from modeling.reltransr import RelTransR
def finetune_detectron2():
cfg = get_vrd_cfg()
#Register Dataset (only vrd for now)
register_vrd_dataset('vrd')
#Finetune FasterRCNN Module
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = CustomTrainer(cfg)
trainer.resume_or_load(resume=True)
trainer.train()
def main():
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
train_dataset = VRDDataset(set_type='train')
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
cfg = get_vrd_cfg(args)
model = RelTransR(cfg)
#criterion = CUSTOM CRITERION GOES HERE MULTIPLE CRITERIONS WHICH ALTERNATIVELY COMPUTE
#optimizer = OPTIMIZER CHOICE GOES HERE
if (__name__ == '__main__'):
main()
| herobaby71/vltranse | src/train_net.py | train_net.py | py | 1,199 | python | en | code | 0 | github-code | 13 |
6713817806 | import cv2
import numpy as np
filepath1 = r"images\LM-world2.PNG"
img = cv2.imread(filepath1)
scale_percent = 50 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# SIFT 추출기 생성
sift = cv2.SIFT_create()
#sift = cv2.xfeatures2d.SIFT_create()
# 키 포인트 검출과 서술자 계산
keypoints, descriptor = sift.detectAndCompute(gray, None)
print('keypoint:',len(keypoints), 'descriptor:', descriptor.shape)
print(descriptor)
# 키 포인트 그리기
img_draw = cv2.drawKeypoints(img, keypoints, None, \
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# 결과 출력
cv2.imshow('SIFT', img_draw)
cv2.waitKey()
cv2.destroyAllWindows() | forfsoft/PythonComponents | ImageMatch/featureImg.py | featureImg.py | py | 868 | python | en | code | 0 | github-code | 13 |
5359900543 | import torch
class BaseSynthesizer:
def save(self, path):
device_backup = self._device
self.set_device(torch.device("cpu"))
torch.save(self, path)
self.set_device(device_backup)
def xai_discriminator(self, data_samples):
discriminator_predict_score = self._discriminator(data_samples)
return discriminator_predict_score
@classmethod
def load(cls, path):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = torch.load(path)
model.set_device(device)
return model
| sunchang0124/dp_cgans | src/dp_cgans/synthesizers/base.py | base.py | py | 594 | python | en | code | 22 | github-code | 13 |
12384033729 | '''
def tax(*args):
income = 1300
rate = 10
calc_tax = income * rate / 100
print('Tax is ', calc_tax)
tax(1700, 10)
'''
menu = {
1: {"name": 'espresso',
"price": 1.99},
2: {"name": 'coffee',
"price": 2.50},
3: {"name": 'cake',
"price": 2.79},
4: {"name": 'soup',
"price": 4.50},
5: {"name": 'sandwich',
"price": 4.99}
}
def calculate_subtotal(**kwargs):
""" Calculates the subtotal of an order
[IMPLEMENT ME]
1. Add up the prices of all the items in the order and return the sum
Args:
order: list of dicts that contain an item name and price
Returns:
float = The sum of the prices of the items in the order
"""
print('Calculating bill subtotal...')
### WRITE SOLUTION HERE
subtotal = 0
for bill_record, bill_counter in kwargs:
bill_counter += bill_counter
return round(subtotal, 2)
print(subtotal, kwargs)
print(calculate_subtotal(cake=123, tea=432))
calculate_subtotal()
| Chukwukaoranile/learning_notes | tax.py | tax.py | py | 1,037 | python | en | code | 0 | github-code | 13 |
27185473698 | """
-------------------------------------------------------
[program description]
-------------------------------------------------------
Author: Daniel James
ID: 210265440
Email: jame5440@mylaurier.ca
__updated__ = "2022-02-06"
-------------------------------------------------------
"""
from List_array import List
from utilities import array_to_list, list_to_array
llist = List()
source = [1, 2, 3, 4, 5]
array_to_list(llist, source)
print("LList: ")
for value in llist:
print(value)
list_to_array(llist, source)
print()
print("List: ")
for value in source:
print(value) | danij12/Data-Structures | jame5440_l04/src/t06.py | t06.py | py | 595 | python | en | code | 1 | github-code | 13 |
3665257030 | from message import Message
from message import Respond
import socket
import logfile
class Sock():
def __init__(self, server_ip="127.0.0.1", server_port=7796):
self.__ip = server_ip
self.__port = server_port
self.__address = (self.__ip, self.__port)
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__respond_size = 2
self.__logger = logfile.somewhere_logger()
def __handle_respond(self):
success = False
try:
self.__respond = self.__sock.recv(self.__respond_size)
print(self.__respond.decode()[1])
self.__respond = self.__respond.decode()
if self.__respond[1] == '1':
success = True
except socket.error as ex:
success = False
self.__logger.somewhere_error("recv message error:", ex)
finally:
return success
def load_message(self, mess: Message):
self.__message: Message = mess
def send_message(self, wait=True):
success = True
try:
self.__sock.connect(self.__address)
self.__sock.sendall(self.__message.get_message().encode())
if wait:
success = self.__handle_respond()
except Exception as ex:
success = False
self.__sock.close()
self.__logger.somewhere_error("send message error:", ex)
finally:
if wait:
self.__sock.close()
return success
| ddkddown/py_client | source/sock.py | sock.py | py | 1,532 | python | en | code | 0 | github-code | 13 |
33298054745 | import os
import json
import openpyxl as op
from datetime import datetime, timedelta, date
from pathlib import Path
src_path = Path(__file__).parent
main_path = src_path.parents[1]
data_path = src_path.parent / 'data'
correo_path = src_path.parent / 'email bot'
excel_path = src_path.parent / 'excel'
def verificacion_carpetas():
if os.path.exists(data_path) and os.path.exists(correo_path) and os.path.exists(excel_path):
print("Carpetas esenciales existen")
True
else:
print("Creacion de carpetas esenciales")
if not os.path.exists(data_path):
os.mkdir(f'{data_path}')
if not os.path.exists(excel_path):
os.mkdir(f'{excel_path}')
print("Carpetas creadas")
def verificacion_archivo():
if os.path.exists(data_path / 'correo.json') and os.path.exists(data_path / 'Resale certificate Camlem 2021.pdf'):
print("Archivos 'correo.json' y 'Resale certificate Camlem 2021.pdf' se han encontrado")
return True
else:
raise Exception(f"Archivos 'correo.json' o 'Resale certificate Camlem 2021.pdf' no encontrados en la carpeta {data_path}")
def archivo_fijo():
ruta = data_path / 'Resale certificate Camlem 2021.pdf'
return ruta
def read_json():
with open(data_path / 'correo.json', encoding='utf-8') as archivo_json:
return json.load(archivo_json)
def download_path():
for root, _, _ in os.walk(main_path):
if('download' in root.lower()):
return root
def obtencion_archivos(nombre_carpeta):
listado_carpeta = os.listdir(download_path())
# Declaro una variable para guardar cada una de las carpetas que cumplen las condiciones de fecha
carpetas = {}
for carpeta in listado_carpeta:
if nombre_carpeta in carpeta:
carpetas[f'{download_path()}\\{carpeta}'] = os.listdir(f'{download_path()}\\{carpeta}')
return carpetas
def listar_fechas(fecha_inicio, fecha_fin):
fecha_inicio = datetime.strptime(fecha_inicio, "%Y-%m-%d")
fecha_fin = datetime.strptime(fecha_fin,"%Y-%m-%d")
if fecha_inicio < fecha_fin:
lista_fechas = [(fecha_inicio + timedelta(days=d)).strftime("%Y-%m-%d") for d in range((fecha_fin - fecha_inicio).days + 1)]
return lista_fechas
elif fecha_fin == fecha_inicio:
lista_fechas = [fecha_inicio]
return lista_fechas
else:
print("La fecha de inicio es una fecha mayor a la fecha final. Ingrese un intervalo de fechas correcta.")
return False
def eliminar_log_error():
if os.path.exists(f'{src_path.parent}\\log.txt'):
os.remove(f'{src_path.parent}\\log.txt')
def log_error(carpeta, archivo):
if os.path.exists(f'{src_path.parent}\\log.txt') == False:
with open(f'{src_path.parent}\\log.txt', 'w') as error:
error.write(f'*{datetime.today()} ---- Ha ocurrido un error con el archivo {archivo} en la carpeta {carpeta}' + "\n")
else:
with open(f'{src_path.parent}\\log.txt', 'a') as error:
error.write(f'*{datetime.today()} ---- Ha ocurrido un error con el archivo {archivo} en la carpeta {carpeta}' + "\n")
def crear_archivo_excel(nombre_archivo):
if os.path.exists(f'{excel_path}\\{date.today()}-{nombre_archivo}.xlsx'):
archivo = op.load_workbook(f'{excel_path}\\{date.today()}-{nombre_archivo}.xlsx')
return archivo
else:
archivo = op.Workbook()
archivo.worksheets[0].title = "Informe"
hoja = archivo.get_sheet_by_name("Informe")
# Tamaños predeterminados dados
hoja.column_dimensions['A'].width = 25
# Coloco titulo
hoja.cell(row = 1, column = 1).value = "Nº ORDEN"
return archivo
def escribir_excel(archivo_a_modificar, orden):
#Busco la hoja donde tengo que modificar (General en cada archivo creado)
hoja_a_modificar = archivo_a_modificar.active
#Selecciono la ultima fila
ultima_fila = hoja_a_modificar.max_row
#Escribo en la columna uno que es el numero de orden:
hoja_a_modificar.cell(row = ultima_fila+1, column = 1).value = orden
def guardar_archivo_excel(archivo, nombre_archivo):
# Guardo
archivo.save(f'{excel_path}\\{date.today()}-{nombre_archivo}.xlsx')
# Finalizo la sesion del archivo
archivo.close() | DylanVicharra/Bot-Correo | email bot/archivos.py | archivos.py | py | 4,409 | python | es | code | 0 | github-code | 13 |
28492426233 | import json
import httpx
from typing import Union
class vanity_client:
def __init__(self) -> None:
pass
def vanity_taken(vanity: str) -> bool:
result = httpx.get(f"https://discord.com/api/v9/invites/{vanity}")
if result.status_code == 200:
return True
else:
return False
def guild_id(vanity: str) -> Union[None, str]:
result = httpx.get(f"https://discord.com/api/v9/invites/{vanity}")
if result.status_code == 200:
return result.json()["guild"]["id"]
else:
return None
def guild_boost_count(vanity: str) -> int:
result = httpx.get(f"https://discord.com/api/v9/invites/{vanity}")
if result.status_code == 200:
return result.json()["guild"]["premium_subscription_count"]
else:
return None
def guild_name(vanity: str) -> str:
result = httpx.get(f"https://discord.com/api/v9/invites/{vanity}")
if result.status_code == 200:
return result.json()["guild"]["name"]
else:
return None
def guild_description(vanity: str) -> str:
result = httpx.get(f"https://discord.com/api/v9/invites/{vanity}")
if result.status_code == 200:
return result.json()["guild"]["description"]
else:
return None
def change_vanity_code(guild_id: int, vanity: str, token: str) -> bool:
try:
payload = {'code': vanity}
headers = {
"Authorization": f"{token}",
"Content-Type": "application/json",
}
response = httpx.patch(f"https://discord.com/api/v9/guilds/{guild_id}/vanity-url", data=json.dumps(payload), headers=headers)
if response.status_code == 200:
return True
else:
return False
except:
return False | NotKatsu/Discord-Vanity-Sniper | helpers/vanity.py | vanity.py | py | 2,044 | python | en | code | 0 | github-code | 13 |
72106315859 |
class Node:
## Node of a linked List has priority as well
def __init__(self,value,priority):
self.data = value
self.link = None
self.prt = priority
class PriorityQ:
## We need the front refrence only
def __init__(self):
self.front = None
self.size = 0
def isEmpty():
return self.size == 0
def enqueue(self,item,prt):
## if same priority then FIFO principle
## make a temp Node
temp = Node(item,prt)
## if first elememt or temp having priority higher than the best priority
if self.size == 0 or temp.prt < self.front.prt:
## insert at the front
temp.link = self.front
self.front = temp
else:
## else move till you find apt. priority
## traverse the linked list
p = self.front
while p.link != None and p.link.prt <= temp.prt:
p = p.link
temp.link = p.link
p.link = temp
self.size += 1
def dequeue(self):
## this will be normal only
if self.size == 0:
print("Nothing to delete")
else :
data = self.front
self.front = self.front.link
print('{} was deleted'.format(data))
def main():
q = PriorityQ()
q.dequeue()
for i in range(20):
q.enqueue(i,i+10)
q.enqueue(100000,23)
p = q.front
while p.link != None :
print(p.data,p.prt)
p = p.link
print(p.data,p.prt)
if __name__ == '__main__':
main() | JARVVVIS/ds-algo-python | stack_and_queues/Priority_q.py | Priority_q.py | py | 1,610 | python | en | code | 0 | github-code | 13 |
17039883234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppInvoiceInstitutionScopeModifyModel(object):
def __init__(self):
self._account_id = None
self._adapter_type = None
self._add_owner_id_list = None
self._add_owner_open_id_list = None
self._agreement_no = None
self._enterprise_id = None
self._institution_id = None
self._owner_type = None
self._remove_owner_id_list = None
self._remove_owner_open_id_list = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def adapter_type(self):
return self._adapter_type
@adapter_type.setter
def adapter_type(self, value):
self._adapter_type = value
@property
def add_owner_id_list(self):
return self._add_owner_id_list
@add_owner_id_list.setter
def add_owner_id_list(self, value):
if isinstance(value, list):
self._add_owner_id_list = list()
for i in value:
self._add_owner_id_list.append(i)
@property
def add_owner_open_id_list(self):
return self._add_owner_open_id_list
@add_owner_open_id_list.setter
def add_owner_open_id_list(self, value):
if isinstance(value, list):
self._add_owner_open_id_list = list()
for i in value:
self._add_owner_open_id_list.append(i)
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def enterprise_id(self):
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
self._enterprise_id = value
@property
def institution_id(self):
return self._institution_id
@institution_id.setter
def institution_id(self, value):
self._institution_id = value
@property
def owner_type(self):
return self._owner_type
@owner_type.setter
def owner_type(self, value):
self._owner_type = value
@property
def remove_owner_id_list(self):
return self._remove_owner_id_list
@remove_owner_id_list.setter
def remove_owner_id_list(self, value):
if isinstance(value, list):
self._remove_owner_id_list = list()
for i in value:
self._remove_owner_id_list.append(i)
@property
def remove_owner_open_id_list(self):
return self._remove_owner_open_id_list
@remove_owner_open_id_list.setter
def remove_owner_open_id_list(self, value):
if isinstance(value, list):
self._remove_owner_open_id_list = list()
for i in value:
self._remove_owner_open_id_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.account_id:
if hasattr(self.account_id, 'to_alipay_dict'):
params['account_id'] = self.account_id.to_alipay_dict()
else:
params['account_id'] = self.account_id
if self.adapter_type:
if hasattr(self.adapter_type, 'to_alipay_dict'):
params['adapter_type'] = self.adapter_type.to_alipay_dict()
else:
params['adapter_type'] = self.adapter_type
if self.add_owner_id_list:
if isinstance(self.add_owner_id_list, list):
for i in range(0, len(self.add_owner_id_list)):
element = self.add_owner_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.add_owner_id_list[i] = element.to_alipay_dict()
if hasattr(self.add_owner_id_list, 'to_alipay_dict'):
params['add_owner_id_list'] = self.add_owner_id_list.to_alipay_dict()
else:
params['add_owner_id_list'] = self.add_owner_id_list
if self.add_owner_open_id_list:
if isinstance(self.add_owner_open_id_list, list):
for i in range(0, len(self.add_owner_open_id_list)):
element = self.add_owner_open_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.add_owner_open_id_list[i] = element.to_alipay_dict()
if hasattr(self.add_owner_open_id_list, 'to_alipay_dict'):
params['add_owner_open_id_list'] = self.add_owner_open_id_list.to_alipay_dict()
else:
params['add_owner_open_id_list'] = self.add_owner_open_id_list
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.enterprise_id:
if hasattr(self.enterprise_id, 'to_alipay_dict'):
params['enterprise_id'] = self.enterprise_id.to_alipay_dict()
else:
params['enterprise_id'] = self.enterprise_id
if self.institution_id:
if hasattr(self.institution_id, 'to_alipay_dict'):
params['institution_id'] = self.institution_id.to_alipay_dict()
else:
params['institution_id'] = self.institution_id
if self.owner_type:
if hasattr(self.owner_type, 'to_alipay_dict'):
params['owner_type'] = self.owner_type.to_alipay_dict()
else:
params['owner_type'] = self.owner_type
if self.remove_owner_id_list:
if isinstance(self.remove_owner_id_list, list):
for i in range(0, len(self.remove_owner_id_list)):
element = self.remove_owner_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.remove_owner_id_list[i] = element.to_alipay_dict()
if hasattr(self.remove_owner_id_list, 'to_alipay_dict'):
params['remove_owner_id_list'] = self.remove_owner_id_list.to_alipay_dict()
else:
params['remove_owner_id_list'] = self.remove_owner_id_list
if self.remove_owner_open_id_list:
if isinstance(self.remove_owner_open_id_list, list):
for i in range(0, len(self.remove_owner_open_id_list)):
element = self.remove_owner_open_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.remove_owner_open_id_list[i] = element.to_alipay_dict()
if hasattr(self.remove_owner_open_id_list, 'to_alipay_dict'):
params['remove_owner_open_id_list'] = self.remove_owner_open_id_list.to_alipay_dict()
else:
params['remove_owner_open_id_list'] = self.remove_owner_open_id_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppInvoiceInstitutionScopeModifyModel()
if 'account_id' in d:
o.account_id = d['account_id']
if 'adapter_type' in d:
o.adapter_type = d['adapter_type']
if 'add_owner_id_list' in d:
o.add_owner_id_list = d['add_owner_id_list']
if 'add_owner_open_id_list' in d:
o.add_owner_open_id_list = d['add_owner_open_id_list']
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'enterprise_id' in d:
o.enterprise_id = d['enterprise_id']
if 'institution_id' in d:
o.institution_id = d['institution_id']
if 'owner_type' in d:
o.owner_type = d['owner_type']
if 'remove_owner_id_list' in d:
o.remove_owner_id_list = d['remove_owner_id_list']
if 'remove_owner_open_id_list' in d:
o.remove_owner_open_id_list = d['remove_owner_open_id_list']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayEbppInvoiceInstitutionScopeModifyModel.py | AlipayEbppInvoiceInstitutionScopeModifyModel.py | py | 8,069 | python | en | code | 241 | github-code | 13 |
25917843479 | #! /usr/bin/env python3
from PIL import ImageColor
from datetime import datetime
from vicariouspanel import NodeyezPanel
import sys
import vicarioustext
class UTCClockPanel(NodeyezPanel):
def __init__(self):
"""Instantiates a new UTC Clock panel"""
# Define which additional attributes we have
self.configAttributes = {
# legacy key name mappings
"colorBackground": "backgroundColor",
"colorTextDate": "dateTextColor",
"colorTextDayOfWeek": "dayOfWeekTextColor",
"colorTextTime": "timeTextColor",
"sleepInterval": "interval",
# panel specific key names
"dateTextColor": "dateTextColor",
"dayOfWeekTextColor": "dayOfWeekTextColor",
"timeTextColor": "timeTextColor",
}
# Define our defaults (all panel specific key names should be listed)
self._defaultattr("dateTextColor", "#f1c232")
self._defaultattr("dayOfWeekTextColor", "#e69138")
self._defaultattr("footerEnabled", False)
self._defaultattr("headerEnabled", False)
self._defaultattr("interval", 30)
self._defaultattr("timeTextColor", "#6aa84f")
self._defaultattr("watermarkEnabled", False)
# Initialize
super().__init__(name="utcclock")
def fetchData(self):
"""Fetches all the data needed for this panel"""
self.now = datetime.utcnow()
def run(self):
super().startImage()
dayofweek = self.now.strftime("%A")
fs,_,_ = vicarioustext.getmaxfontsize(self.draw, dayofweek, self.width, self.height//3, True)
vicarioustext.drawcenteredtext(self.draw, dayofweek, fs, self.width//2, self.height*1//6, ImageColor.getrgb(self.dayOfWeekTextColor))
date = self.now.strftime("%d %b %Y")
fs,_,_ = vicarioustext.getmaxfontsize(self.draw, date, self.width, self.height//3, True)
vicarioustext.drawcenteredtext(self.draw, date, fs, self.width//2, self.height*3//6, ImageColor.getrgb(self.dateTextColor))
time = self.now.strftime("%H:%M:%S")
fs,_,_ = vicarioustext.getmaxfontsize(self.draw, time, self.width, self.height//3, True)
vicarioustext.drawcenteredtext(self.draw, time, fs, self.width//2, self.height*5//6, ImageColor.getrgb(self.timeTextColor))
super().finishImage()
# --------------------------------------------------------------------------------------
# Entry point if running this script directly
# --------------------------------------------------------------------------------------
if __name__ == '__main__':
p = UTCClockPanel()
# If arguments were passed in, treat as a single run
if len(sys.argv) > 1:
if sys.argv[1] in ['-h','--help']:
print(f"Generates a simple output of the date and time in UTC and weekday")
print(f"Usage:")
print(f"1) Call without arguments to run continuously using the configuration or defaults")
print(f"2) Pass an argument other than -h or --help to run once and exit")
else:
p.fetchData()
p.run()
exit(0)
# Continuous run
p.runContinuous() | vicariousdrama/nodeyez | scripts/utcclock.py | utcclock.py | py | 3,204 | python | en | code | 47 | github-code | 13 |
14278530986 | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 26 17:51:11 2022
@author: Rubyxu
"""
import datetime
import numpy as np
from matplotlib import pyplot as plt, dates
import seaborn as sns
import pandas as pd
import pickle
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.model_selection import RandomizedSearchCV
import time
data = pd.read_csv('/rigel/home/yx2693/albedo_df_updated.csv')
data = data.drop('AL2', axis=1)
data = data.sample(frac = 0.1, random_state=42) # for testing
y = np.array(data['albedo'])
X = data.drop(['albedo'], axis = 1) #try dropping some variable 1125
X_list = list(X.columns)
#X = np.array(X)
# log transformation
X.loc[X.ME<1e-10,'ME'] = 1e-10
X.loc[X.RF<1e-10,'RF'] = 1e-10
X.loc[X.SF<1e-10,'SF'] = 1e-10
X.loc[X.CD<1e-10,'CD'] = 1e-10
X.loc[X.CM<1e-10,'CM'] = 1e-10
X.ME = np.log(X.ME)
X.RF = np.log(X.RF)
X.SF = np.log(X.SF)
X.CD = np.log(X.CD)
X.CM = np.log(X.CM)
'''
X_train = X[:int(X.shape[0]*0.8)]
y_train = y[:int(y.shape[0]*0.8)]
X_test = X[int(X.shape[0]*0.8):]
y_test = y[int(y.shape[0]*0.8):]
'''
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size = 0.2, random_state = 42)
# scaling
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
#X_train = np.hstack([np.ones((X_train.shape[0], 1)), X_train])
#X_test = np.hstack([np.ones((X_test.shape[0], 1)), X_test])
minmax = MinMaxScaler()
y_train = pd.Series(minmax.fit_transform(pd.DataFrame(y_train)).reshape(1,-1)[0])
y_test = pd.Series(minmax.transform(pd.DataFrame(y_test)).reshape(1,-1)[0])
'''
#truncated svd
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=14, algorithm='randomized',
random_state=42)
svd.fit(X_train)
X_train = svd.transform(X_train)
X_test = svd.transform(X_test)
'''
'''
#pca
from sklearn.decomposition import PCA
pca = PCA(n_components=15)#X_train.shape[1])
pca.fit(X_train)
print(pca.explained_variance_)
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
'''
# hyperparameter tuning
n_estimators = [200,500]##[10, 50, 200]#[200, 300, 500, 1000]
#max_features = ['auto', 'sqrt']
max_depth = [20, 100]##[5,20,100]#[int(x) for x in np.linspace(5, 100, num = 5)]
#max_depth.append(None)
#min_samples_split = [2,10]##[2, 10]#[2, 5, 10]
#min_samples_leaf = [1, 2, 4]
#bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators,
#'max_features': max_features,
'max_depth': max_depth}
#'min_samples_split': min_samples_split}
#'min_samples_leaf': min_samples_leaf,
#'bootstrap': bootstrap}
print('Hyperparameter options: ', random_grid)
start = time.time()
rf = RandomForestRegressor()
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid,
n_iter = 10, cv = 3, verbose=2, random_state=42, n_jobs = -1)
rf_random.fit(X_train, y_train)
end = time.time()
print('Time for random search: ', end-start)
bp = rf_random.best_params_
print('Best params: ', bp)
rf_best = RandomForestRegressor(n_estimators = bp['n_estimators'],
#min_samples_split = bp['min_samples_split'],
#min_samples_leaf = bp['min_samples_leaf'],
#max_features = bp['max_features'],
max_depth = bp['max_depth'],
#bootstrap = bp['bootstrap'],
random_state = 42)
'''
s = time.time()
rf_best = RandomForestRegressor(n_estimators = 500,
min_samples_split = 10,
#min_samples_leaf = 2,
#max_features = 'sqrt',
max_depth = 100,
#bootstrap = False,
random_state = 42)
'''
rf_best.fit(X_train, y_train)
#e = time.time()
#print('time for one fit:', e-s)
y_pred_train = rf_best.predict(X_train)
r2_train = r2_score(y_train, y_pred_train)
print("training R^2 : % f" %(r2_train))
y_pred = rf_best.predict(X_test)
r2 = r2_score(y_test, y_pred)
print("test R^2 : % f" %(r2))
filename = '/rigel/home/yx2693/albedo_df_updated/rf_wpre_model_01.sav'
pickle.dump(rf_best, open(filename, 'wb'))
#<UNI>@habanero.rcs.columbia.edu
| rootsnquery/tedesco-project | random_forest_wpre_dimred.py | random_forest_wpre_dimred.py | py | 4,699 | python | en | code | 4 | github-code | 13 |
2434068153 | #!/usr/bin/env python3
# Standard library.
import datetime
import typing
import unittest
# Internal packages.
import phile.notify
class TestEntry(unittest.TestCase):
def test_construct_signatures(self) -> None:
phile.notify.Entry(name="n")
phile.notify.Entry(
name="n",
text="t",
modified_at=datetime.datetime.now(),
)
def test_available_attributes(self) -> None:
now = datetime.datetime.now()
entry = phile.notify.Entry(
name="n",
text="t",
modified_at=now,
)
self.assertEqual(entry.name, "n")
self.assertEqual(entry.text, "t")
self.assertEqual(entry.modified_at, now)
def test_default_attributes(self) -> None:
entry = phile.notify.Entry(name="n")
self.assertEqual(entry.text, "")
self.assertIsNone(entry.modified_at)
class TestRegistry(unittest.IsolatedAsyncioTestCase):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
self.notify_registry: phile.notify.Registry
super().__init__(*args, **kwargs)
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.notify_registry = phile.notify.Registry()
def test_invariants(self) -> None:
current_keys = self.notify_registry.current_keys
current_values = self.notify_registry.current_values
self.assertEqual(current_keys, sorted(current_keys))
self.assertEqual(
current_keys, [entry.name for entry in current_values]
)
def test_set__with_new_entry_inserts(self) -> None:
notify_entry = phile.notify.Entry(
name="abc", text="c", modified_at=datetime.datetime.now()
)
self.notify_registry.add_entry(notify_entry)
self.assertEqual(
self.notify_registry.current_values, [notify_entry]
)
self.test_invariants()
| BoniLindsley/phile | tests/test_phile/test_notify/test_init.py | test_init.py | py | 1,946 | python | en | code | 0 | github-code | 13 |
1073347884 | #
# @lc app=leetcode id=70 lang=python3
#
# [70] Climbing Stairs
#
import itertools
# @lc code=start
class Solution:
def climbStairs(self, n: int) -> int:
"""
result = 1 # all are ones
arr = [1 for i in range(n)]
while (1 in arr) and len(arr)>1:
arr = arr[2:]
arr.append(2)
if sum(arr) == n:
print(arr)
c = list(itertools.permutations(arr, len(arr)))
print(c)
result += len(set(c))
return result
"""
onestep = 1
twostep = 1
result = 1
for i in range(n-1):
result = onestep + twostep
twostep = onestep
onestep = result
return result
# @lc code=end
| uday1201/Leetcode2023 | 70.climbing-stairs.py | 70.climbing-stairs.py | py | 777 | python | en | code | 0 | github-code | 13 |
70526749779 | import warnings
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Union
import numpy as np
import tensorflow as tf
from gymnasium import spaces
from typing import NamedTuple
try:
# Check memory used by replay buffer when possible
import psutil
except ImportError:
psutil = None
class ReplayBufferSamples(NamedTuple):
observations: tf.Tensor
actions: tf.Tensor
next_observations: tf.Tensor
dones: tf.Tensor
rewards: tf.Tensor
class BaseBuffer(ABC):
"""
Base class that represent a buffer (rollout or replay)
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
n_envs: int = 1,
):
super().__init__()
self.buffer_size = buffer_size
self.observation_space = observation_space
self.action_space = action_space
self.obs_shape = observation_space.shape
if isinstance(action_space, spaces.Discrete):
self.action_dim = 1
else:
raise NotImplementedError(f"{action_space} action space is not supported")
self.pos = 0
self.full = False
self.n_envs = n_envs
@staticmethod
def swap_and_flatten(arr: np.ndarray) -> np.ndarray:
"""
Swap and then flatten axes 0 (buffer_size) and 1 (n_envs)
to convert shape from [n_steps, n_envs, ...] (when ... is the shape of the features)
to [n_steps * n_envs, ...] (which maintain the order)
:param arr:
:return:
"""
shape = arr.shape
if len(shape) < 3:
shape = shape + (1,)
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
def size(self) -> int:
"""
:return: The current size of the buffer
"""
if self.full:
return self.buffer_size
return self.pos
def add(self, *args, **kwargs) -> None:
"""
Add elements to the buffer.
"""
raise NotImplementedError()
def extend(self, *args, **kwargs) -> None:
"""
Add a new batch of transitions to the buffer
"""
# Do a for loop along the batch axis
for data in zip(*args):
self.add(*data)
def reset(self) -> None:
"""
Reset the buffer.
"""
self.pos = 0
self.full = False
def sample(self, batch_size: int):
"""
:param batch_size: Number of element to sample
:param env: associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
"""
upper_bound = self.buffer_size if self.full else self.pos
batch_inds = np.random.randint(0, upper_bound, size=batch_size)
return self._get_samples(batch_inds)
@abstractmethod
def _get_samples(
self, batch_inds: np.ndarray) -> Union[ReplayBufferSamples]:
"""
:param batch_inds:
:param env:
:return:
"""
raise NotImplementedError()
def to_torch(self, array: np.ndarray) -> tf.Tensor:
"""
Convert a numpy array to a PyTorch tensor.
Note: it copies the data by default
:param array:
:return:
"""
return tf.convert_to_tensor(array)
@staticmethod
def _normalize_obs(
obs: Union[np.ndarray, Dict[str, np.ndarray]],
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
return obs
@staticmethod
def _normalize_reward(reward: np.ndarray) -> np.ndarray:
return reward
class ReplayBuffer(BaseBuffer):
"""
Replay buffer used in off-policy algorithms like SAC/TD3.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device: PyTorch device
:param n_envs: Number of parallel environments
:param optimize_memory_usage: Enable a memory efficient variant
of the replay buffer which reduces by almost a factor two the memory used,
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
Cannot be used in combination with handle_timeout_termination.
:param handle_timeout_termination: Handle timeout termination (due to timelimit)
separately and treat the task as infinite horizon task.
https://github.com/DLR-RM/stable-baselines3/issues/284
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
n_envs: int = 1,
optimize_memory_usage: bool = False,
handle_timeout_termination: bool = True,
):
super().__init__(buffer_size, observation_space, action_space, n_envs=n_envs)
# Adjust buffer size
self.buffer_size = max(buffer_size // n_envs, 1)
# Check that the replay buffer can fit into the memory
if psutil is not None:
mem_available = psutil.virtual_memory().available
# there is a bug if both optimize_memory_usage and handle_timeout_termination are true
# see https://github.com/DLR-RM/stable-baselines3/issues/934
if optimize_memory_usage and handle_timeout_termination:
raise ValueError(
"ReplayBuffer does not support optimize_memory_usage = True "
"and handle_timeout_termination = True simultaneously."
)
self.optimize_memory_usage = optimize_memory_usage
self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)
if optimize_memory_usage:
# `observations` contains also the next observation
self.next_observations = None
else:
self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape,
dtype=observation_space.dtype)
self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
# Handle timeouts termination properly if needed
# see https://github.com/DLR-RM/stable-baselines3/issues/284
self.handle_timeout_termination = handle_timeout_termination
self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
if psutil is not None:
total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes
if self.next_observations is not None:
total_memory_usage += self.next_observations.nbytes
if total_memory_usage > mem_available:
# Convert to GB
total_memory_usage /= 1e9
mem_available /= 1e9
warnings.warn(
"This system does not have apparently enough memory to store the complete "
f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"
)
def add(
self,
obs: np.ndarray,
next_obs: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
infos: List[Dict[str, Any]],
) -> None:
# Reshape needed when using multiple envs with discrete observations
# as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)
if isinstance(self.observation_space, spaces.Discrete):
obs = obs.reshape((self.n_envs,) + self.obs_shape)
next_obs = next_obs.reshape((self.n_envs,) + self.obs_shape)
# Same, for actions
action = action.reshape((self.n_envs, self.action_dim))
# Copy to avoid modification by reference
self.observations[self.pos] = np.array(obs).copy()
if self.optimize_memory_usage:
self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy()
else:
self.next_observations[self.pos] = np.array(next_obs).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.dones[self.pos] = np.array(done).copy()
if self.handle_timeout_termination:
self.timeouts[self.pos] = np.array([info.get("TimeLimit.truncated", False) for info in infos])
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
def sample(self, batch_size: int) -> ReplayBufferSamples:
"""
Sample elements from the replay buffer.
Custom sampling when using memory efficient variant,
as we should not sample the element with index `self.pos`
See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
:param batch_size: Number of element to sample
:return:
"""
if not self.optimize_memory_usage:
return super().sample(batch_size=batch_size)
# Do not sample the element with index `self.pos` as the transitions is invalid
# (we use only one array to store `obs` and `next_obs`)
if self.full:
batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size
else:
batch_inds = np.random.randint(0, self.pos, size=batch_size)
return self._get_samples(batch_inds)
def _get_samples(self, batch_inds: np.ndarray) -> ReplayBufferSamples:
# Sample randomly the env idx
env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))
if self.optimize_memory_usage:
next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, env_indices, :])
else:
next_obs = self._normalize_obs(self.next_observations[batch_inds, env_indices, :])
data = (
self._normalize_obs(self.observations[batch_inds, env_indices, :]),
self.actions[batch_inds, env_indices, :],
next_obs,
# Only use dones that are not due to timeouts
# deactivated by default (timeouts is initialized as an array of False)
(self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(-1, 1),
self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1)),
)
return ReplayBufferSamples(*tuple(map(self.to_torch, data)))
| Deewens/FYP-DRL-Comparison | experiments/prototyping/tensorflow/common/replay_buffer.py | replay_buffer.py | py | 11,057 | python | en | code | 0 | github-code | 13 |
19202589245 | from os import path
import sys
if __package__:
parent_dir = path.dirname(__file__)
root_dir = path.dirname(parent_dir)
if parent_dir not in sys.path:
sys.path.append(parent_dir)
if root_dir not in sys.path:
sys.path.append(root_dir)
import customtkinter
from fns import init, redrawServerList, appendServer, redrawServerList, getConfig, steamRootAdd
def run():
init()
customtkinter.set_appearance_mode("system")
root = customtkinter.CTk()
root.geometry("1000x1000")
root.minsize(500, 500)
root.title("DZL")
root.grid_rowconfigure((1), weight=1)
root.grid_columnconfigure((0), weight=1)
frameServerList = customtkinter.CTkFrame(master=root)
frameServerList.grid(row=0, column=0, padx=5, pady=(5, 0), sticky='NSEW')
label2 = customtkinter.CTkLabel(master=frameServerList, text="Server List")
label2.pack(padx=5, pady=5)
frameMainServerList = customtkinter.CTkFrame(master=root)
frameMainServerList.grid(row=1, column=0, padx=5, pady=(5, 0), sticky='NSEW')
redrawServerList(frameMainServerList)
def addBtnEvenet():
appendServer(frameServerAdd)
redrawServerList(frameMainServerList)
frameServerAdd = customtkinter.CTkFrame(master=root, border_width=2)
frameServerAdd.grid_rowconfigure((0), weight=1)
frameServerAdd.grid_columnconfigure((8), weight=1)
frameServerAdd.grid(row=2, column=0, padx=5, pady=(5, 0), sticky='NSEW')
customtkinter.CTkLabel(master=frameServerAdd, text="Config").grid(row=0, column=0, columnspan=14, padx=2, pady=20, sticky='NSEW')
customtkinter.CTkLabel(master=frameServerAdd, text="Name").grid(row=1, column=0, padx=2)
asname = customtkinter.CTkEntry(master=frameServerAdd, fg_color="gray")
asname.grid(row=1, column=1, padx=2)
customtkinter.CTkLabel(master=frameServerAdd, text="In game name").grid(row=1, column=2, padx=2)
ign = customtkinter.CTkEntry(master=frameServerAdd, fg_color="gray")
ign.grid(row=1, column=3, padx=2)
customtkinter.CTkLabel(master=frameServerAdd, text="host").grid(row=1, column=4, padx=2)
ashost = customtkinter.CTkEntry(master=frameServerAdd, fg_color="gray")
ashost.grid(row=1, column=5, padx=2)
customtkinter.CTkLabel(master=frameServerAdd, text="game port").grid(row=1, column=6, padx=2)
asgp = customtkinter.CTkEntry(master=frameServerAdd, fg_color="gray", height=10)
asgp.grid(row=1, column=7, padx=2, pady=3)
customtkinter.CTkLabel(master=frameServerAdd, text="query port").grid(row=1, column=8, padx=2)
asqp = customtkinter.CTkEntry(master=frameServerAdd, fg_color="gray", height=10)
asqp.grid(row=1, column=9, padx=2)
customtkinter.CTkButton(master=frameServerAdd, text="Add", command=addBtnEvenet).grid(row=1, column=10, padx=20)
customtkinter.CTkLabel(master=frameServerAdd, text="").grid(row=1, column=11, padx=20)
steamRoot = customtkinter.CTkLabel(master=frameServerAdd, text=getConfig()["steamHome"])
steamRoot.grid(row=1, column=12, padx=2)
customtkinter.CTkButton(master=frameServerAdd, text="Set Steam Root", command=lambda: steamRootAdd(el=steamRoot)).grid(row=1, column=13, padx=20)
root.mainloop()
if __name__ == "__main__":
run() | zetxx/dzl | dzl/main.py | main.py | py | 3,239 | python | en | code | 0 | github-code | 13 |
5459699350 | #!/usr/bin/python3
""" Creation of class Square defined by its size """
class Square:
""" Class Square
Attribute:
__size : the size of the square
Method:
area() : returns the square area
"""
def __init__(self, __size=0):
""" Constructor method """
if isinstance(__size, int) is not True:
raise TypeError("size must be an integer")
if __size < 0:
raise ValueError("size must be >= 0")
else:
self.__size = __size
def area(self):
""" Method that returns the current square area """
return (self.__size) ** 2
@property
def size(self):
""" Getter method """
return self.__size
@size.setter
def size(self, value):
""" Setter method """
if type(value) != int:
raise TypeError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
else:
self.__size = value
| frcaru/holbertonschool-higher_level_programming | python-classes/4-square.py | 4-square.py | py | 1,001 | python | en | code | 0 | github-code | 13 |
4528031492 | import re
from string import punctuation
import config as cfg
NAMES = ["cole", "laurie", "loretta", "cornelius", "brian", "walter", "carl", "sam", "tom", "jeffrey", "fred", "cole", "kevin", "jake", "billy", "kathy", "james", "annie", "otis", "wolfi", "michael", "marry", "johnson", "jerry", "stanzi", "paula", "jeff", "smith", "mary", "rachel", "milo", "claire", "davis", "tommy", "paul", "johnson", "casey", "harrington", "rick", "david", "jeffrey", "jack", "sid", "rose", "mikey", "marty", "dave", "jones", "enzo", "mike", "betty", "bill", "amon", "cosgrove", "bobby", "romeo", "rudy", "elaine", "jeffrey", "jim", "tom", "mickey", "ronnie", "cindy", "paulie", "jimmy", "alex", "ted", "stella", "joe", "ed", "benjamin", "ike", "richard", "gale", "johnny", "walter", "george", "frank", "dignan", "johnny", "norman", "bob", "john", "louis", "bruce", "paulie", "charlie", "charles", "christ", "i\\97", "helen", "dolores", "peter", "fred", "nick", "andy", "eddie"]
REPLACEMENT_DICT = {
"won't": "will not",
"wouldn't": "would not",
"let's": "let us",
"where's": "where is",
"who's": "who is",
"what's": "what is",
"here's": "here is",
"'m": " am",
"'re": " are",
"'ve": " have",
"'ll": " will",
"'d": " had",
"don't it": "doesn't it",
"'bout": "about",
"'til": "until",
"c'mon": "come on",
"stayin'": "staying",
"rollin'": "rolling",
"doin'": "doing",
"can't": "cannot",
"ain't": "are not",
"n't": " not",
#"'s": ' is', # avoid: breaks possessive
"he's": "he is",
"she's": "she is",
"that's": "that is",
"it's": "it is",
"o.k.": "ok",
",...": ",",
"...!": " !",
"..!": " !",
".!": " !",
"...?": " ?",
"..?": " ?",
".?": " ?",
"EOS": "",
"BOS": "",
"eos": "",
"bos": "",
". . .": "...",
". .": " ",
". .": " ",
"<u>": "",
"</u>": "",
"<b>": "",
"</b>": "",
"<i>": "",
"</i>": "",
}
repl_by_space_dict = ["-", "_", " *", " /", "* ", "/ ", "\"", "--"]
"""
Cleans up the given text to normalize syntax.
Returned text is lower case, striped, and most word contractions are expanded.
"""
def cleanup(text):
if len(text) == 0:
return ""
text = text.lower()
text = text.replace("’", "'").replace("", "'")
text = re.sub(r"\r", "", text)
text = re.sub(r"\n", "", text)
# Replace double dots with triple dots
text = re.sub(r"(?<=([a-z]| ))(\.){2}(?=([a-z]| |$))", "... ", text)
for _ in range(3):
for v in REPLACEMENT_DICT:
text = text.replace(v, REPLACEMENT_DICT[v])
for v in repl_by_space_dict:
text = text.replace(v, " ")
# Change multi spaces to single spaces and strip line
text = re.sub(" +", " ", text).strip()
if len(text) > 1 and text[-1] in [","]:
text = text[:-1].strip()
while len(text) > 1 and text[0] in punctuation:
text = text[1:].strip()
return text
""" Processes a query before it is given as input to the bot. """
def preprocess_query(text, name):
text = cleanup(text)
if len(text) == 0:
return ""
# Normalize names
text = text.replace(", " + cfg.BOT_NAME, "")
text = text.replace(cfg.BOT_NAME + " ,", "")
if text[-1] != "!" and text[-1] != "?" and text[-1] != ".":
text += "."
# Handle the case where the text is just punctuation
non_punctuation = 0
for character in text:
if not (character in punctuation):
non_punctuation += 1
if non_punctuation == 0:
text = ""
return text
""" Processes a response from the bot, before it is printed to the user. """
def postprocess_response(text, name):
text = cleanup(text)
if len(text) == 0:
return ""
# Normalize names
for person_name in NAMES:
text = text.replace(", " + person_name, ", " + name)
text = text.replace(" " + person_name + " ," , " " + name + " ,")
text = text.replace("i am " + person_name, "i am " + cfg.BOT_NAME)
text = text.replace("my name is " + person_name, "my name is " + cfg.BOT_NAME)
# Make sure the sentence ends with a period, an exclamation mark or a question mark
if text[-1] != "!" and text[-1] != "?" and text[-1] != ".":
text += "."
# Handle the case where the text is just punctuation
non_punctuation = 0
for character in text:
if not (character in punctuation):
non_punctuation += 1
if non_punctuation == 0:
text = "what?"
# Remove space before punctuation
text = re.sub(r"( +)(?=[?.!:,])", "", text)
return text
| KelianB/Keras-Chatbot | textprocessor.py | textprocessor.py | py | 4,679 | python | en | code | 0 | github-code | 13 |
23164838189 |
# -*- coding: utf-8 -*-
from PyQt4.QtGui import QDialog, QFileDialog, QMessageBox, QHeaderView, QTableWidgetItem
from ui_send_mail import Ui_SendMailDialog
import os, email, smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
GMAIL = {'server':'smtp.gmail.com', 'port':587}
class SendMailDialog(QDialog, Ui_SendMailDialog):
def __init__(self, email_id, passwd, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.tableWidget.horizontalHeader().setResizeMode(0, QHeaderView.Stretch)
self.tableWidget.setHidden(True)
self.tableWidget.cellClicked.connect(self.onAttachmentClick)
self.attachFileButton.clicked.connect(self.addAttachment)
self.sendButton.clicked.connect(self.sendMail)
self.cancelButton.clicked.connect(self.reject)
self.email_id = unicode(email_id)
self.passwd = passwd
self.attachments = []
self.reply_to = False
def replyTo(self, reply_to, msg_id, subject):
self.reply_to = reply_to
self.reference_msg = unicode(msg_id)
self.recipientEdit.setText(reply_to)
if subject[:4].lower() not in ['re: ', 're :'] : subject = 'Re: ' + subject
self.subjectEdit.setText(subject)
def attachFile(self, filename):
# check if filename exists and it is not directory
# check if filesize exceeds 25MB
filename = unicode(filename)
if filename not in self.attachments:
self.attachments.append(filename)
row = self.tableWidget.rowCount()
self.tableWidget.insertRow(row)
self.tableWidget.setItem(row, 0, QTableWidgetItem(os.path.basename(filename)))
self.tableWidget.setItem(row, 1, QTableWidgetItem('Remove'))
self.tableWidget.setHidden(False)
def onAttachmentClick(self, row, col):
''' remove attachment if remove is clicked '''
if col == 1:
self.tableWidget.removeRow(row)
self.attachments.pop(row)
def addAttachment(self):
# Open a file dialog a add files
filefilters = 'All Files (*);;Image Files (*.jpg *jpeg *.png);;Document Files (*.pdf *.docx *.odt)'
filenames = QFileDialog.getOpenFileNames(self, 'Attach Files', '', filefilters)
for each in filenames:
self.attachFile(each)
def sendMail(self):
# Check if sender is empty or invalid
# Check if msg text and attachments both are empty
sendServer = smtplib.SMTP(GMAIL['server'], GMAIL['port'])
try:
sendServer.ehlo()
sendServer.starttls()
sendServer.ehlo()
sendServer.login(self.email_id, self.passwd)
print("Login Successful")
except smtplib.SMTPException:
QMessageBox.warning(self, "Login Failed !", "Failed to Login to Mail Server")
return
msg = MIMEMultipart()
# Email Info
msg['Subject'] = unicode(self.subjectEdit.text())
msg['To'] = unicode(self.recipientEdit.text())
msg['From'] = self.email_id
if self.reply_to:
msg['In-Reply-To'] = self.reference_msg
msg['References'] = self.reference_msg
# Attach text and images
textPart = MIMEText(unicode(self.mailText.toHtml()), 'html')
msg.attach(textPart)
# Attach files
for filename in self.attachments:
attachment = MIMEBase('application','octet-stream')
with open(filename, 'rb') as fd:
data = fd.read()
attachment.set_payload(data)
email.encoders.encode_base64(attachment)
attachment.add_header('Content-Disposition','attachment; filename="%s"' % os.path.basename(filename))
msg.attach(attachment)
sendServer.sendmail(self.email_id, [unicode(self.recipientEdit.text())], msg.as_string())
sendServer.quit()
# Show success/failure msg here
self.accept()
| ksharindam/daaq-mail | daaq/send_mail.py | send_mail.py | py | 4,071 | python | en | code | 0 | github-code | 13 |
21632439445 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sys
infile=np.loadtxt("../results/virial.csv",delimiter=",",skiprows=1)
omegas=infile[:int(len(infile)/3),0]
print(omegas)
kinetic=[]
potential=[]
for i in range(3):
print(int((i)*len(omegas)),int((i+1)*len(omegas)))
kinetic.append(infile[int((i)*len(omegas)):int((i+1)*len(omegas)),1].astype(float))
potential.append(infile[int((i)*len(omegas)):int((i+1)*len(omegas)),2].astype(float))
print(kinetic)
labels=["No repulsion",r"$\Psi_{T1}$",r"$\Psi_{T2}$"]
fig=plt.figure(figsize=(20,20))
colors=["blue","orange","green","red"]
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 40}
matplotlib.rc('font', **font)
for i in range(3):
plt.plot(omegas[:-1],(kinetic[i]/potential[i])[:-1],label=labels[i])
plt.legend()
plt.xlabel(r"$\omega$")
plt.ylabel(r"$<T>/<V>$")
plt.tight_layout()
plt.xlim(-0.05,1.05)
plt.ylim(0, 1.05)
plt.savefig("../plots/Virial.pdf")
plt.show()
| adrian2208/FYS3150_collab | Project5/python/virial_plot.py | virial_plot.py | py | 984 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.