id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1950838 | #!/usr/bin/env python2.7
"""
This script collects temperature data from wunderground.com
It is using wu.cfg which is JSON dictionary with following required fields:
{
"key":"your key",
"location":"location query"
}
The query specifies location for which you want weather information. Examples:
CA/San_Francisco US state/city
60290 US zipcode
Australia/Sydney country/city
37.8,-122.4 latitude,longitude
KJFK airport code
pws:KCASANFR70 PWS id
autoip AutoIP address location
autoip.json?geo_ip=172.16.58.3 specific IP address location
The script writes CSV file wu.csv with the following fields:
1. current time (unix timestamp)
2. observation time (as reported by API)
3. temperature in C (as reported by API)
Additionaly, config file might contain following fields
"cosm": {
"key":"your key"
"feed":123
"datastream":123
}
If they present, it will additionally submit data to COSM.com to specified
feed and datastream.
"""
import json
import sys
import logging
import time,datetime
import string
from urllib import quote_plus
import getopt
import cosm
from rest_json_helper import json_GET
API_ENDPOINT="http://api.wunderground.com/api/%s/conditions/q/%s.json"
CFG_FILE="wu.cfg"
WU_LOGFILE="wu.log"
DATA_FILE = 'wu.csv'
API_TIMEOUT=5
def usage():
print """
%s [-f <cfg file>] [-c] [-d] [-o <csv file>] [-t seconds]
-c -- log to console instead of log file
-d -- debug, dry-run mode. No data written or sent.
-f <cfg file> -- config file name. Default is '%s'
-o <csv file> -- CSV file name. Default is '%s'
-t <seconds> -- Loop mode: query every 't' seconds. (off by default)
""" % (sys.argv[0], CFG_FILE, DATA_FILE)
def read_config(cfg_fname):
log.info("Reading config file %s" % cfg_fname)
f=open(cfg_fname,"r")
try:
return json.load(f)
finally:
f.close()
def main():
global log
global debug_mode
try:
opts, args = getopt.getopt(sys.argv[1:], 'dcf:o:t:', [])
except getopt.GetoptError:
usage()
sys.exit(2)
console = False
debug_mode = False
cfg_fname = CFG_FILE
data_fname = DATA_FILE
sleep_time = 0 # non zero means loop mode
for o, a in opts:
if o in ['-d']:
debug_mode = True
elif o in ['-c']:
console = True
elif o in ['-f']:
cfg_fname = a
elif o in ['-o']:
data_fname = a
elif o in ['-t']:
sleep_time = int(a)
else:
usage()
sys.exit(1)
log_format = '%(asctime)s %(process)d %(filename)s:%(lineno)d %(levelname)s %(message)s'
if debug_mode:
log_level=logging.DEBUG
else:
log_level=logging.INFO
if console:
logging.basicConfig(level=log_level, format=log_format)
else:
logging.basicConfig(level=log_level, format=log_format,
filename=WU_LOGFILE, filemode='a')
log = logging.getLogger('default')
try:
cfg = read_config(cfg_fname)
except Exception, ex:
log.error("Error reading config file %s" % ex)
sys.exit(1)
key = cfg["key"]
query = cfg["location"]
log.info("Using query %s" % query)
if cfg.has_key("cosm"):
cosm_feed = cfg["cosm"]["feed"]
cosm_key = cfg["cosm"]["key"]
cosm_datastream = cfg["cosm"]["datastream"]
log.debug("Will log to COSM %s/%s" % (cosm_feed, cosm_datastream))
if not debug_mode:
data_file = file(data_fname, 'a')
try:
while True:
try:
parsed_json = json_GET(API_ENDPOINT % (quote_plus(key), quote_plus(query)),
API_TIMEOUT)
local_time= time.time()
observation_time = int(parsed_json['current_observation']['observation_epoch'])
temp_c = parsed_json['current_observation']['temp_c']
log.info("Current temperature is: %s" % temp_c)
except Exception, ex:
log.error("Error fetching data from API: %s" %ex)
continue
csv_report = '{0},{1},{2}\n'.format(local_time,observation_time,temp_c)
if debug_mode:
print csv_report
else:
# Write to file
try:
data_file.write(csv_report)
data_file.flush()
except IOError, ex:
# Error writing CSV is fatal
log.error("Error writing CSV file: %s" % ex)
sys.exit(1)
# Send to COSM
if cfg.has_key("cosm"):
try:
ts = datetime.datetime.utcfromtimestamp(int(local_time)).isoformat('T')+"Z"
cosm_report =string.join([ts,str(temp_c)],",") + "\r\n"
cosm.submit_datapoints(cosm_feed,cosm_datastream,cosm_key,cosm_report)
except Exception, ex:
# Error sending to COSM is non-fatal, but logged anyway
log.error("Error sending to COSM: %s" % ex )
if sleep_time>0:
time.sleep(sleep_time)
else:
break
finally:
if not debug_mode:
data_file.close();
if __name__ == '__main__':
main()
| StarcoderdataPython |
11359783 | import requests
from dictor import dictor
def run(ping):
notification_text = False
status = False
try:
timeout = int(ping["timeout"]["S"])
r = requests.get(ping["host"]["S"], timeout=timeout)
status = r.status_code
# check a metrics in response
if "check_metric" in ping.keys() and status == 200:
response_json = r.json()
actual_value = dictor(response_json, ping["check_metric"]["S"])
print(actual_value)
if ping["check_operator"]["S"] == "=":
if actual_value == ping["check_value"]["S"]:
status = "500 in = test"
elif ping["check_operator"]["S"] == ">":
if actual_value > ping["check_value"]["S"]:
status = "500 in > test"
elif ping["check_operator"]["S"] == "<":
if actual_value < ping["check_value"]["S"]:
status = "500 in < test"
elif ping["check_operator"]["S"] == "!=":
if actual_value != ping["check_value"]["S"]:
status = "500 in != test"
else:
status = 500
except requests.exceptions.RequestException as e:
status = e
if status != 200:
notification_text = ping["host"]["S"] + " status: " + str(status)
return notification_text
| StarcoderdataPython |
131843 | <gh_stars>1-10
#!/usr/bin/env python
import sys
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import tf.transformations
from std_msgs.msg import Bool
from roboarm.srv import PoseRequest, PoseRequestResponse
def goto_pose_server(request):
group.set_pose_target(request.pose)
plan = group.plan()
display_trajectory.trajectory_start = roboarm.get_current_state()
display_trajectory.trajectory.append(plan)
display_trajectory_pub.publish(display_trajectory)
success = group.go(wait=True)
response = Bool()
response.data = success
return PoseRequestResponse(response)
sys.argv.append('/joint_states:=/roboarm/joint_states')
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('goto_pose_server', anonymous=True)
roboarm = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group = moveit_commander.MoveGroupCommander('roboarm')
rospy.sleep(5)
print 'The planning frame is', group.get_planning_frame()
print 'The end effector link is', group.get_end_effector_link()
print 'The group name is', roboarm.get_group_names()
print 'The current state is', roboarm.get_current_state()
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory_pub = rospy.Publisher('/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory)
rospy.Service('goto_pose_server', PoseRequest, goto_pose_server)
rospy.spin()
#Test values
#pose_target.orientation.x = -0.501634507333
#pose_target.orientation.y = 1.72876577259e-05
#pose_target.orientation.z = -4.46482869593e-05
#pose_target.orientation.w = 0.865079660355
#pose_target.position.x = 1.06955479166
#pose_target.position.y = 0.775824538778
#pose_target.position.z = 1.42802207074 | StarcoderdataPython |
5125432 | <reponame>fostroll/modelsrv0
#!python
#-*- coding: utf-8 -*-
import argparse
from fastapi import BackgroundTasks, Body, Depends, FastAPI, HTTPException, \
Response, Security, status
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
import gc
import traceback
from typing import Dict, Tuple
import uvicorn
from auth import make_routes as auth_make_routes
from const import CONFIG_FN, ADMIN_PATH_PREFIX, PREDICT_PATH_PREFIX, \
STATIC_PATH, ADMIN_TAG, MODEL_TAG, LOADING_LOG
from model import model_load, model_predict
import response_examples
import schemata
from threading import Lock
new_config_lock, reloading_lock = Lock(), Lock()
app = FastAPI()
def load_config():
schemata.config = schemata.Config.parse(CONFIG_FN)
def reload_model():
with reloading_lock,\
open(LOADING_LOG, 'wt', encoding='utf-8') as f:
try:
model_load(schemata.config.model.name,
device=schemata.config.model.device)
except Exception as e:
print(traceback.format_exc())
print(traceback.format_exc(), file=f)
else:
print('The model is loaded.', file=f)
def load_router():
app_ = FastAPI(title=schemata.config.model.swagger_title,
version=schemata.config.model.swagger_version,
description=schemata.config.model.swagger_description,
responses={**response_examples.HTTP_400_BAD_REQUEST,
**response_examples.HTTP_401_UNAUTHORIZED})
if STATIC_PATH:
app_.mount(STATIC_PATH,
StaticFiles(directory='static'), name='static')
check_user = auth_make_routes(app_)
def check_admin(current_user: schemata.UserData = Security(check_user)):
if not current_user.admin:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail='Admin rights required')
return current_user
@app_.get(f'{ADMIN_PATH_PREFIX}/reload',
name='Reload the model',
#status_code=status.HTTP_204_NO_CONTENT)
responses={**response_examples.HTTP_202_ACCEPTED,
**response_examples.HTTP_500_INTERNAL_SERVER_ERROR,
**response_examples.HTTP_503_SERVICE_UNAVAILABLE},
dependencies=[Depends(check_admin)],
tags=[ADMIN_TAG])
async def admin_reload(background_tasks: BackgroundTasks):
with new_config_lock:
if reloading_lock.locked():
raise HTTPException(status_code=\
status.HTTP_503_SERVICE_UNAVAILABLE,
detail='Process is locked. Another '
'reloading is still in progress')
with reloading_lock:
try:
load_config()
except RuntimeError as e:
#except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e).split('\n')
)
load_router()
background_tasks.add_task(reload_model)
return JSONResponse('The request is processing',
status_code=status.HTTP_202_ACCEPTED)
@app_.get(f'{ADMIN_PATH_PREFIX}/reload/status',
name='the model reloading status',
responses={**response_examples.HTTP_503_SERVICE_UNAVAILABLE},
dependencies=[Depends(check_admin)],
tags=[ADMIN_TAG])
async def admin_reload_check(t: int):
if reloading_lock.locked():
raise HTTPException(status_code=\
status.HTTP_503_SERVICE_UNAVAILABLE,
detail='Reloading is still in progress')
return FileResponse(LOADING_LOG)
@app_.post('/predict',
dependencies=[Security(check_user)],
tags=[MODEL_TAG])
async def predict(text: str = Body(...),
with_intents: bool = True, probs: bool = True,
threshold: float = .5, only_true: bool = False):
return model_predict(text, with_intents=with_intents, probs=probs,
threshold=threshold, only_true=only_true)
for attr, val in app_.__dict__.items():
setattr(app, attr, val)
load_config()
load_router()
reload_model()
if __name__ == '__main__':
#https://www.uvicorn.org/settings/
#uvicorn.run(app, host='127.0.0.1', port=8000)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
argparser = argparse.ArgumentParser(add_help=True)
argparser.add_argument('--host', dest='host', type=str,
default='127.0.0.1', help='The server address')
argparser.add_argument('--port', dest='port', type=int,
default=8000, help='The server port')
argparser.add_argument('--reload', dest='reload', type=str2bool,
default=True, help='Whethere we need a reload')
argparser.add_argument('--workers', dest='workers', type=int,
default=1, help='The number of workers')
args = argparser.parse_args()
uvicorn.run('main:app', host=args.host, port=args.port,
reload=args.reload, workers=args.workers)
| StarcoderdataPython |
268113 | <reponame>mackelab/theano_shim
from setuptools import setup
setup(
name='theano_shim',
version='0.3.0',
description="A simple interface to easily switch between using Theano and pure Numpy",
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only'
],
packages=["theano_shim"],
install_requires=['numpy', 'scipy', 'theano-pymc']
)
| StarcoderdataPython |
135218 | # -*- coding: utf-8 -*-
import os.path
from bs4 import BeautifulSoup
try:
from io import open
except ImportError:
pass
def find_sub(parent, text):
"""
Given an a ToC entry find an entry directly underneath it with the given
text. Here is the structure of a rendered table of contents with two levels
of depth:
.. code-block:: html
<div class="toctree-wrapper compound">
<ul>
<li>
<a href="foo">Grandparent</a>
<ul>
<li>
<a href="bar">Parent</a>
<ul>
<li>
<a href="baz">Child</a>
</li>
</ul>
</li>
</ul>
</li>
</div>
For convenience, the provided parent can be either the <a> tag of the parent
entry or the <ul> tag containing all its ToC descendents.
"""
assert parent.name in ['a', 'ul']
ul = parent.nextSibling if parent.name == 'a' else parent
for child in ul.findChildren(recursive=False):
sub = child.find('a', href=True, text=text, recursive=False)
if sub:
return sub
return None
def get_html_soup(app, path):
with open(os.path.join(app.outdir, path), encoding='utf-8') as f:
html = f.read()
# get rid of whitespace; otherwise tests break cf.
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/#next-sibling-and-previous-sibling
html = ''.join(line.strip() for line in html.split('\n'))
return BeautifulSoup(html, 'html.parser')
| StarcoderdataPython |
4892045 | #!/usr/bin/env python3
"""
Script to analyze the graph for Lightning features.
https://github.com/lightningnetwork/lightning-rfc/blob/master/09-features.md
"""
import asyncio
import os
import time
from electrum_spero.logging import get_logger, configure_logging
from electrum_spero.simple_config import SimpleConfig
from electrum_spero import constants
from electrum_spero.daemon import Daemon
from electrum_spero.wallet import create_new_wallet
from electrum_spero.util import create_and_start_event_loop, log_exceptions, bh2u, bfh
from electrum_spero.lnutil import LnFeatures
logger = get_logger(__name__)
# Configuration parameters
IS_TESTNET = False
TIMEOUT = 5 # for Lightning peer connections
WORKERS = 30 # number of workers that concurrently fetch results for feature comparison
NODES_PER_WORKER = 50
VERBOSITY = '' # for debugging set '*', otherwise ''
FLAG = LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT # chose the 'opt' flag
PRESYNC = False # should we sync the graph or take it from an already synced database?
config = SimpleConfig({"testnet": IS_TESTNET, "verbosity": VERBOSITY})
configure_logging(config)
loop, stopping_fut, loop_thread = create_and_start_event_loop()
# avoid race condition when starting network, in debug starting the asyncio loop
# takes some time
time.sleep(2)
if IS_TESTNET:
constants.set_testnet()
daemon = Daemon(config, listen_jsonrpc=False)
network = daemon.network
assert network.asyncio_loop.is_running()
# create empty wallet
wallet_dir = os.path.dirname(config.get_wallet_path())
wallet_path = os.path.join(wallet_dir, "ln_features_wallet_main")
if not os.path.exists(wallet_path):
create_new_wallet(path=wallet_path, config=config)
# open wallet
wallet = daemon.load_wallet(wallet_path, password=<PASSWORD>, manual_upgrades=False)
wallet.start_network(network)
async def worker(work_queue: asyncio.Queue, results_queue: asyncio.Queue, flag):
"""Connects to a Lightning peer and checks whether the announced feature
from the gossip is equal to the feature in the init message.
Returns None if no connection could be made, True or False otherwise."""
count = 0
while not work_queue.empty():
if count > NODES_PER_WORKER:
return
work = await work_queue.get()
# only check non-spero addresses
addr = None
for a in work['addrs']:
if not "spero" in a[0]:
addr = a
if not addr:
await results_queue.put(None)
continue
# handle ipv4/ipv6
if ':' in addr[0]:
connect_str = f"{bh2u(work['pk'])}@[{addr.host}]:{addr.port}"
else:
connect_str = f"{bh2u(work['pk'])}@{addr.host}:{addr.port}"
print(f"worker connecting to {connect_str}")
try:
peer = await wallet.lnworker.add_peer(connect_str)
res = await asyncio.wait_for(peer.initialized, TIMEOUT)
if res:
if peer.features & flag == work['features'] & flag:
await results_queue.put(True)
else:
await results_queue.put(False)
else:
await results_queue.put(None)
except Exception as e:
await results_queue.put(None)
@log_exceptions
async def node_flag_stats(opt_flag: LnFeatures, presync: False):
"""Determines statistics for feature advertisements by nodes on the Lighting
network by evaluation of the public graph.
opt_flag: The optional-flag for a feature.
presync: Sync the graph. Can take a long time and depends on the quality
of the peers. Better to use presynced graph from regular wallet use for
now.
"""
try:
await wallet.lnworker.channel_db.data_loaded.wait()
# optionally presync graph (not relyable)
if presync:
network.start_gossip()
# wait for the graph to be synchronized
while True:
await asyncio.sleep(5)
# logger.info(wallet.network.lngossip.get_sync_progress_estimate())
cur, tot, pct = wallet.network.lngossip.get_sync_progress_estimate()
print(f"graph sync progress {cur}/{tot} ({pct}%) channels")
if pct >= 100:
break
with wallet.lnworker.channel_db.lock:
nodes = wallet.lnworker.channel_db._nodes.copy()
# check how many nodes advertize opt/req flag in the gossip
n_opt = 0
n_req = 0
print(f"analyzing {len(nodes.keys())} nodes")
# 1. statistics on graph
req_flag = LnFeatures(opt_flag >> 1)
for n, nv in nodes.items():
features = LnFeatures(nv.features)
if features & opt_flag:
n_opt += 1
if features & req_flag:
n_req += 1
# analyze numbers
print(
f"opt: {n_opt} ({100 * n_opt/len(nodes)}%) "
f"req: {n_req} ({100 * n_req/len(nodes)}%)")
# 2. compare announced and actual feature set
# put nodes into a work queue
work_queue = asyncio.Queue()
results_queue = asyncio.Queue()
# fill up work
for n, nv in nodes.items():
addrs = wallet.lnworker.channel_db._addresses[n]
await work_queue.put({'pk': n, 'addrs': addrs, 'features': nv.features})
tasks = [asyncio.create_task(worker(work_queue, results_queue, opt_flag)) for i in range(WORKERS)]
try:
await asyncio.gather(*tasks)
except Exception as e:
print(e)
# analyze results
n_true = 0
n_false = 0
n_tot = 0
while not results_queue.empty():
i = results_queue.get_nowait()
n_tot += 1
if i is True:
n_true += 1
elif i is False:
n_false += 1
print(f"feature comparison - equal: {n_true} unequal: {n_false} total:{n_tot}")
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(
node_flag_stats(FLAG, presync=PRESYNC), loop)
| StarcoderdataPython |
3395783 | '''
From Wikipedia, the free encyclopaedia:
A happy number is defined by the following process:
Starting with any positive integer, replace the number by the sum
of the squares of its digits, and repeat the process until the number
equals 1 (where it will stay), or it loops endlessly in a cycle which
does not include 1. Those numbers for which this process ends in
1 are happy numbers, while those that do not end in 1 are unhappy numbers.
Write a Python program to check whether a number is "happy" or not.
Sample Input:
(7)
(932)
(6)
Sample Output:
True
True
False
'''
def is_Happy_num(n):
past = set()
while n != 1:
n = sum(int(i)**2 for i in str(n))
if n in past:
return False
past.add(n)
return True
print(is_Happy_num(7))
print(is_Happy_num(932))
print(is_Happy_num(6))
print(is_Happy_num(103))
print(is_Happy_num(47))
| StarcoderdataPython |
1625560 | import gym
from gym import error, spaces, utils
from gym.utils import seeding
import cpufreq
import pyRAPL
import time
import numpy as np
from math import ceil
class FinalEnv02(gym.Env):
### DEFAULT PERSONAL VALUES
DEF_POWER = 65.0
DEF_SOCKET = 0
DEF_CORES = [0,1,2,3,4,5,6,7]
DEF_MAXSTEPS = 20
DEF_SEED = None
DEF_MINPOWER = 15.0
DEF_MAXPOWER = 115.0
DEF_POWERSTEP = 3.0
DEF_DECISION = 0.25 # 4 decisions / sec
def __init__(self, **config):
### CPUEnv constant values.
# POWER power cap to reach
self.POWER = config.get('power', self.DEF_POWER)
# SOCKET socket to get pyRAPL measures
# CORES CPU cores assigned to SOCKET
self.SOCKET = config.get('socket', self.DEF_SOCKET)
self.CORES = config.get('cores', self.DEF_CORES)
# MAXSTEPS maximum iterations for environment
# SEED seed for RNG reporducibility
self.MAXSTEPS = config.get('maxsteps', self.DEF_MAXSTEPS)
self.SEED = config.get('seed', self.DEF_SEED)
# MINPOWER minimum in power bandwidth
# MAXPOWER maximum in power bandwidth
self.MINPOWER = config.get('minpower', self.DEF_MINPOWER)
self.MAXPOWER = config.get('maxpower', self.DEF_MAXPOWER)
assert(self.MINPOWER < self.MAXPOWER)
# DECISION_TIME time spent between actions (frequency change and power measure)
# MEASURE_TIME time spent measuring energy data
# SLEEP_TIME* waiting time after frequency change
self.DECISION_TIME = config.get('decision_time', self.DEF_DECISION)
self.MEASURE_TIME = config.get('measure_time', self.DECISION_TIME)
self.SLEEP_TIME = self.DECISION_TIME - self.MEASURE_TIME
# POWERSTEP size of intervals of observation space
# POWERPOINTS extrema of power intervals
# INTERVALS list power intervals
self.POWERSTEP = config.get('powstep', self.DEF_POWERSTEP)
self.POWERPOINTS = self.get_powerpoints(self.POWERSTEP)
self.INTERVALS = self.get_intervals(self.POWERPOINTS)
### Default metadata.
self.metadata = { 'render.modes': ['human'] }
### Frequency control.
# _cpu cpufreq class control
# _frequencies list of available frequencies (<= order)
# _freqpos position of current frequency
self._cpu = cpufreq.cpuFreq()
self._frequencies = sorted( self._cpu.available_frequencies )[:-1]
self._freqpos = -1
# Set used cores to 'userspace' scheme for frequency modification.
self._cpu.set_governors('userspace', self.CORES)
### Power measure.
pyRAPL.setup(
devices = [pyRAPL.Device.PKG],
socket_ids = [self.SOCKET]
)
### Action space.
# 0: hold frequency
# 1: lower frequency
# 2: raise frequency
self.action_space = gym.spaces.Discrete(3)
self.HOLD_FREQ = 0
self.LOWER_FREQ = 1
self.RAISE_FREQ = 2
### Action rewards:
# See 'get_reward()'
# REWARD_CLOSER given when action approaches goal
# REWARD_FARTHER given when action gets farther from goal
# REWARD_GOAL given when action gets to goal state
self.REWARD_CLOSER = +1
self.REWARD_FARTHER = -1
self.REWARD_GOAL = +2
### Observation space:
# Interval partition of power range of CPU.
# Shape of intervals: (power_i, power_i+1]
self.observation_space = gym.spaces.Discrete( len(self.INTERVALS) + 1 )
# _power: current power consumption
# _state: interval of current power consumption
# _goal: interval of self.LIMIT
self._power = 0.0
self._state = 0
self._goal = self.get_state(self.POWER)
### CPUEnv: random number generator.
# RNG random number generator
self.RNG = None
self.seed( self.SEED )
### CPUEnv: general environment variables.
# _reward: accumulated environment reward
# _done: boolean value to indicate if goal or max steps were reached
# _info: dict for auxiliary debug values
# _count: counts the number of steps taken during environment action
self._reward = None
self._acc_reward = None
self._done = None
self._info = None
self._count = None
self.reset()
def reset(self, reset_freqpos = None):
### General environment variables.
self._reward = 0
self._acc_reward = 0
self._done = False
self._info = {}
self._count = 0
### Choose preset or random initial frequency.
if reset_freqpos is None:
self._freqpos = self.RNG.choice( np.arange( len(self._frequencies) ) )
else:
self._freqpos = reset_freqpos
freq = self._frequencies[ self._freqpos ]
### Set frequency, wait sleep time and measure.
self._power = self.set_wait_measure(freq, 'Reset')
### Set state from measured power.
self._state = self.get_state( self._power )
self.update_info()
return self._state
def step(self, action):
### Check if max steps reached.
if self._count == self.MAXSTEPS:
self._done = True
return self._state, self._reward, self._done, self._info
assert self.action_space.contains(action)
### DECIDE ACTION:
if action == self.HOLD_FREQ:
pass
elif action == self.RAISE_FREQ:
if self._freqpos == len(self._frequencies) - 1:
pass
else:
self._freqpos += 1
elif action == self.LOWER_FREQ:
if self._freqpos == 0:
pass
else:
self._freqpos -= 1
### DO ACTION, WAIT AND MEASURE:
freq = self._frequencies[ self._freqpos ]
label = f"Iter {self._count + 1}"
next_power = self.set_wait_measure(freq, label)
next_state = self.get_state( next_power )
### REWARD:
self._reward = self.get_reward(next_state, self._state)
self._acc_reward += self._reward
### GOAL: no goal.
### INFO AND STATE UPDATE:
self._power = next_power
self._state = next_state
self._count += 1
self.update_info()
### RETURN:
return [self._state, self._reward, self._done, self._info]
def render(self, mode='human'):
### Print current environtment state info.
print(self._info)
def seed(self, seed=None):
### Make random number generator from seed.
self.RNG, seed = seeding.np_random(seed)
return [seed]
def close(self):
### Reset CPU to default system values.
self._cpu.reset()
### AUXILIARY ENV METHODS
def get_powerpoints(self, pstep):
powers = []
ppoint = self.MINPOWER
powers.append(ppoint)
while ppoint < self.MAXPOWER:
ppoint += pstep
powers.append(ppoint)
return powers
def get_intervals(self, powerpoints):
intervals = []
# First interval.
ppoint = powerpoints[0]
intervals.append( [None, ppoint] )
for i in range(1, len(powerpoints)):
intervals.append( [ppoint, powerpoints[i]] )
ppoint = powerpoints[i]
# Last interval.
intervals.append( [ppoint, None] )
return intervals
def get_state(self, power):
pos = np.searchsorted(self.POWERPOINTS, power, side='right')
return pos + 1
def get_reward(self, state, prev_state):
### Positive while on goal.
if state == self._goal:
return self.REWARD_GOAL
if state < self._goal:
if state - prev_state > 0:
return self.REWARD_CLOSER
else:
return self.REWARD_FARTHER
if state > self._goal:
if state - prev_state < 0:
return self.REWARD_CLOSER
else:
return self.REWARD_FARTHER
def update_info(self):
self._info['step'] = self._count
self._info['state'] = self._state
self._info['interval'] = self.INTERVALS[self._state - 1]
self._info['reward'] = self._reward
self._info['acc_reward'] = self._acc_reward
self._info['freqpos'] = self._freqpos
self._info['frequency'] = self._frequencies[ self._freqpos ]
self._info['power'] = self._power
### AUXILIARY FREQUENCY/MEASURE METHODS
def set_frequency(self, freq):
### Check if current frequency is above or below
current_freq = self._cpu.get_min_freq()[ self.CORES[0] ]
if current_freq < freq:
# Above
self._cpu.set_max_frequencies(freq, self.CORES)
self._cpu.set_min_frequencies(freq, self.CORES)
else:
# Below
self._cpu.set_min_frequencies(freq, self.CORES)
self._cpu.set_max_frequencies(freq, self.CORES)
self._cpu.set_frequencies(freq, self.CORES)
def measure_power(self, label):
meter = pyRAPL.Measurement(label=label)
while meter._results is None or meter._results.pkg is None:
meter.begin()
time.sleep(self.MEASURE_TIME)
meter.end()
m_energy = meter._results.pkg[self.SOCKET] # micro-J
m_time = meter._results.duration # micro-s
power = m_energy / m_time # watts
return power
def set_wait_measure(self, freq, label):
self.set_frequency(freq)
time.sleep(self.SLEEP_TIME)
power = self.measure_power(label)
return power
| StarcoderdataPython |
11289275 | <reponame>Mhh123/tornado<gh_stars>0
'''
This module is the same as the sessions module, except that:
1. NotificationMixin sets a "notifications" property instead a "session" one,
and that the NotificationManager ("notifications") gets an object only once, and
deletes it from the database after retrieving;
2. The objects are stored in db 1 (for default) instead of 0 to avoid conflicts
with sessions. (You can change this setting with the "db_notifications" setting
in the "storage" setting.)
'''
from pycket.session import create_mixin, SessionManager
class NotificationManager(SessionManager):
STORAGE_CATEGORY = 'db_notifications'
def get(self, name, default=None):
'''
Retrieves the object with "name", like with SessionManager.get(), but
removes the object from the database after retrieval, so that it can be
retrieved only once
'''
#SessionManager1.get(self, name, default)
session_object = super(NotificationManager, self).get(name, default)
if session_object is not None:
self.delete(name)
return session_object
class NotificationMixin(object):
@property
def notifications(self):
'''
Returns a NotificationManager instance
'''
return create_mixin(self, '__notification_manager', NotificationManager) | StarcoderdataPython |
11317274 | <filename>backgen.py
#!/usr/bin/python3
import argparse
import numpy as np
import pyopencl as cl
import cv2 as cv
import sys
class BackgroundSubtractor:
def __init__(self, weight=0.5, threshold=1.0, join_weight=30, platform=0, kernel_source="kernel.cl"):
self.input=input
self.weight=weight
self.join_weight=join_weight
self.threshold=threshold
self.src=open(kernel_source, "r").read()
platforms=cl.get_platforms()
if len(platforms)<=platform:
raise IndexError(f"Could not find platform {platform}!")
self.device=platforms[platform].get_devices()
def run(self, input, silent=True, deflicker=False, sidebyside=False,
output_img=None, output_vid=None, nframes=10):
mf=cl.mem_flags
ctx=cl.Context(self.device)
cmd_queue=cl.CommandQueue(ctx)
prg=cl.Program(ctx, self.src).build()
capture=cv.VideoCapture(input)
if capture.isOpened():
fimg=capture.read()[1]
fimg=fimg.astype(np.float32)
fps=capture.get(cv.CAP_PROP_FPS)
vidout=None
if output_vid is not None:
vidout=cv.VideoWriter(output_vid,
cv.VideoWriter_fourcc(*"mp4v"),
fps, fimg.shape[:2][::-1])
weight=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR|mf.HOST_NO_ACCESS, hostbuf=np.float32(self.weight*fps))
threshold=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR|mf.HOST_NO_ACCESS, hostbuf=np.float32(self.threshold))
jweight=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR|mf.HOST_NO_ACCESS, hostbuf=np.int32(self.join_weight))
width=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR|mf.HOST_NO_ACCESS, hostbuf=np.int32(fimg.shape[1]))
height=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR|mf.HOST_NO_ACCESS, hostbuf=np.int32(fimg.shape[0]))
histogram,img_histogram, lut=(None, None, None)
if deflicker:
histogram=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR|mf.HOST_NO_ACCESS, hostbuf=np.zeros(256*3).astype(np.int32))
img_histogram=cl.Buffer(ctx, mf.WRITE_ONLY|mf.COPY_HOST_PTR, hostbuf=np.zeros(256*3).astype(np.int32))
lut=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR|mf.HOST_NO_ACCESS, hostbuf=np.zeros(256*3).astype(np.int32))
img=cl.Buffer(ctx, mf.COPY_HOST_PTR, hostbuf=fimg)
background=cl.Buffer(ctx, mf.COPY_HOST_PTR, hostbuf=fimg)
_,nimg=capture.read()
new_img=cl.Buffer(ctx, mf.READ_ONLY|mf.COPY_HOST_PTR, hostbuf=nimg.astype(np.float32))
if deflicker:
prg.cal_histogram(cmd_queue, fimg.shape[:2], None, histogram, img, width, height)
prg.fin_histogram(cmd_queue, (3, ), None, histogram)
res=np.empty_like(fimg).astype(np.float32)
try:
while True:
if deflicker:
cl.enqueue_fill_buffer(cmd_queue, img_histogram, np.int32(0), 0, 3*4*256)
prg.cal_histogram(cmd_queue, fimg.shape[:2], None, img_histogram, new_img, width, height)
prg.fin_histogram(cmd_queue, (3, ), None, img_histogram)
prg.cal_lut(cmd_queue, (3, ), None, histogram, img_histogram, lut)
prg.deflicker(cmd_queue, fimg.shape[:2], None, lut, new_img, width, height)
prg.join_histogram(cmd_queue, (3, ), None, histogram, img_histogram, jweight)
prg.backsub(cmd_queue, fimg.shape[:2], None, img, background, new_img, width, height, weight, threshold)
if (not silent) or (vidout is not None):
cl.enqueue_copy(cmd_queue, res, background)
if vidout is not None:
vidout.write(res.astype(np.uint8))
if not silent:
cv.imshow('background', res.astype(np.uint8))
if sidebyside: cv.imshow('real', nimg)
if cv.waitKey(1) == 27: break
_,nimg=capture.read()
if nimg is None: break
cl.enqueue_copy(cmd_queue, new_img, nimg.astype(np.float32))
except IndexError:
pass
cl.enqueue_copy(cmd_queue, res, background)
if output_img is not None:
cv.imwrite(output_img, res.astype(np.uint8))
if vidout is not None:
vidout.write(res.astype(np.uint8))
if not silent:
cv.imshow('background', res.astype(np.uint8))
cv.waitKey(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This program generates a static background image from videos using '+
"an opencl kernel which calculates pixel values based on averages.")
parser.add_argument('--input', type=str, help='Path to a video or a sequence of image.', default=None)
parser.add_argument('--output', type=str, help='Save the last frame to this path', default=None)
parser.add_argument('--vidoutput', type=str, help='Write video to path', default=None)
parser.add_argument('--weight', type=float, help='the weight which a new image gets merged into (0;inf]', default=0.5)
parser.add_argument('--threshold', type=int, help='threshold which an pixel is seen as changed [1;255]', default=1)
parser.add_argument('--silent', nargs='?', const=True, help='silent mode, do not show images', default=False)
parser.add_argument('--sidebyside', nargs='?', const=True, help='show the generated background and the newest frame side by side', default=False)
parser.add_argument('--deflicker', nargs='?', const=False, help='deflickers frames before processing them with backsub', default=False)
args = parser.parse_args()
subtractor=BackgroundSubtractor(weight=args.weight, threshold=args.threshold)
if args.input is None:
parser.print_help()
sys.exit(1)
subtractor.run(args.input, silent=args.silent, deflicker=args.deflicker, sidebyside=args.sidebyside, output_img=args.output, output_vid=args.vidoutput)
| StarcoderdataPython |
9653817 | <reponame>mtag-dev/esg<filename>tests/test_default_headers.py
import httpx
import pytest
from esg import Config
from tests.utils import run_server
async def app(scope, receive, send):
assert scope["type"] == "http"
await send({"type": "http.response.start", "status": 200, "headers": []})
await send({"type": "http.response.body", "body": b"", "more_body": False})
@pytest.mark.asyncio
async def test_default_default_headers():
config = Config(app=app, loop="asyncio", limit_max_requests=1)
async with run_server(config):
async with httpx.AsyncClient() as client:
response = await client.get("http://127.0.0.1:8000")
assert response.headers["server"] == "esg" and response.headers["date"]
@pytest.mark.asyncio
async def test_override_server_header():
config = Config(
app=app,
loop="asyncio",
limit_max_requests=1,
headers=[("Server", "over-ridden")],
)
async with run_server(config):
async with httpx.AsyncClient() as client:
response = await client.get("http://127.0.0.1:8000")
assert (
response.headers["server"] == "over-ridden" and response.headers["date"]
)
@pytest.mark.asyncio
async def test_disable_default_server_header():
config = Config(
app=app,
loop="asyncio",
limit_max_requests=1,
server_header=False,
)
async with run_server(config):
async with httpx.AsyncClient() as client:
response = await client.get("http://127.0.0.1:8000")
assert "server" not in response.headers
@pytest.mark.asyncio
async def test_override_server_header_multiple_times():
config = Config(
app=app,
loop="asyncio",
limit_max_requests=1,
headers=[("Server", "over-ridden"), ("Server", "another-value")],
)
async with run_server(config):
async with httpx.AsyncClient() as client:
response = await client.get("http://127.0.0.1:8000")
assert (
response.headers["server"] == "over-ridden, another-value"
and response.headers["date"]
)
@pytest.mark.asyncio
async def test_add_additional_header():
config = Config(
app=app,
loop="asyncio",
limit_max_requests=1,
headers=[("X-Additional", "new-value")],
)
async with run_server(config):
async with httpx.AsyncClient() as client:
response = await client.get("http://127.0.0.1:8000")
assert (
response.headers["x-additional"] == "new-value"
and response.headers["server"] == "esg"
and response.headers["date"]
)
@pytest.mark.asyncio
async def test_disable_default_date_header():
config = Config(
app=app,
loop="asyncio",
limit_max_requests=1,
date_header=False,
)
async with run_server(config):
async with httpx.AsyncClient() as client:
response = await client.get("http://127.0.0.1:8000")
assert "date" not in response.headers
| StarcoderdataPython |
3435193 | <reponame>ashidagithub/HJDS201909LA
# -*- coding: UTF-8 -*-
# ------------------------(max to 80 columns)-----------------------------------
# author by : (学员ID)
# created: 2019.7.10
# Description:
# 防御类道具
# ------------------------(max to 80 columns)-----------------------------------
import sys
sys.path.append('..')
# 引用自定义的类及功能包
from pkg_KOG.class_equipment import Equipment
class EQDefense(Equipment):
# 添加独有加成
restore_life_force = 0.0
def show_me_unique(self):
print('-----独有加成-----')
print(' +回血:%0.2f' % (self.restore_life_force))
print('-----------------')
return
if __name__ == '__main__':
eq = EQDefense()
eq.show_me()
eq.show_me_unique()
| StarcoderdataPython |
9639826 |
'''
coder: <NAME>, <NAME>
date: April 2017
summary: Trains a LSTM on the activity or tcid sequences that are saved in db called Seqdb.
here, we want to know whether we can predict the crash based on the sequence data.
if we can, then we are able to extract the pattern from the sequence data.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.callbacks import ModelCheckpoint
import keras
import tensorflow as tf
from keras.models import load_model
# from keras import metrics
import keras.backend as K
from keras.utils import np_utils
from database.seqdb import Seqdb
# from database.psudo_seqdb import PsudoSeqdb
import numpy as np
import pdb,os
from libs import utilitylib,evallib,modellib,datalib,classifiers
from keras.utils import plot_model
from keras.callbacks import TensorBoard,EarlyStopping
import shutil,time
from keras import optimizers
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score,cross_validate
from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn.metrics import recall_score
from _init_global_vars_train import *
from bayes_opt import BayesianOptimization
def getModel():
model = modellib.buildModel(configs)
adam = optimizers.Adam(lr=configs['learningRate'],decay= .0001)
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=[ 'accuracy']) # metrics=[ 'accuracy']
return model
def evaluateLSTM(embeddingSize,
lstmSize,
lr,
networkType):
configs['embeddingSize'] = int(embeddingSize)
configs['lstmSize'] = int(lstmSize)
configs['learningRate'] = lr
if int(networkType) == 1:
configs['networkType'] = "bidrectional"
else:
configs['networkType'] = "lstm"
modelCV = KerasClassifier(build_fn=getModel,
epochs=configs['epochs'],
batch_size=configs['batchSize'],
verbose=0)
kfold = KFold(n_splits=5, random_state=seed)
scoring = ['precision', 'recall','accuracy','roc_auc','f1']
results = cross_validate(modelCV, xTrain, yTrain, cv=kfold, scoring = scoring, return_train_score=False)
return np.mean(results['test_f1'])
if __name__ == '__main__':
# Initialize
seed = 7
np.random.seed(seed)
global configs
with tf.device('cpu:0'):
shutil.rmtree(ROOT_DIR, ignore_errors=True)
# Flag to show if the data is synthetic or real
seqdb = Seqdb(SEQUENCE_FILE,crashIndex = CRASH_INDEX ); print('Loading data...')
(xTrain, yTrain), (xTest,yTest) = seqdb.loadData(maxSeqSize = configs['maxSeqSize'],
testSplit = configs['testSplit'],
dedup = configs['dedup'],
filters = ACTIONS_TO_BE_FILTERED,
testFlag = configs['testFlag'],
p2nRatio = configs['p2nRatio'])
seqdb.summary()
configs['maxlen'] = seqdb.longestSeqSize
configs['actionCount'] = seqdb.actionCount
configs['trainSize'] = yTrain.size
xTrain,xTest = datalib.seqPadding(xTrain,xTest,configs['maxlen'])
lstmBO = BayesianOptimization(evaluateLSTM, {'embeddingSize' : (1,10),
'lstmSize' : (4,20),
'lr' : (.001,.03),
'networkType' :(.5,1.5)})
lstmBO.explore({ 'embeddingSize' : [3 ,8 ,2],
'lstmSize' : [6,14,6],
'lr' : [.01,.02,.001],
'networkType' : [.5,.5,1]})
lstmBO.maximize(n_iter=20,acq= 'ei')
print(lstmBO.res['max'])
| StarcoderdataPython |
1965288 | <reponame>renmengye/tfplus
from data_provider import DataProvider
class SampleDataProvider(DataProvider):
def __init__(self, data_provider):
super(SampleDataProvider, self).__init__()
self._data_provider = data_provider
pass
@property
def data_provider(self):
return self._data_provider
def reject(self, idx):
"""
Whether to reject a sample.
"""
raise Exception('Not implemented')
def get_size(self):
return self.data_provider.get_size()
def get_batch_idx(self, idx, **kwargs):
new_idx = self.reject(idx)
if len(new_idx) > 0:
return self.data_provider.get_batch_idx(new_idx, **kwargs)
else:
return self.data_provider.get_batch_idx([new_idx[0]], **kwargs)
pass
| StarcoderdataPython |
3291294 | <filename>sockets/experiment/util/__init__.py
# Copyright (c) 2021, Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
from .message_passing_protocol import MPPSocket
| StarcoderdataPython |
5183099 | import os
from oauth2client import client, tools
from oauth2client.file import Storage
from .settings import CLIENT_SECRET_FILE, SCOPES, APPLICATION_NAME, flags
def get_credentials(credentials_path, flags=flags):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
credential_path = os.path.expanduser(credentials_path)
credential_folder = os.path.dirname(credential_path)
if not os.path.isdir(credential_folder):
os.makedirs(credential_folder)
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
| StarcoderdataPython |
11399481 | '''
ml load anaconda
conda activate ke_seg
cd /home/nfs/em4/shuchangyong/submit
python val.py --data-dir /home/nfs/em2/wei_dataset/raw_datasets/CITYSCAPES/data/cityscapes --restore-from new_rn18-cityscape_singleAndWhole_val-75.90_test-74.58.pth --gpu 0 --type val --figsavepath 75.9val
python val.py --data-dir /home/nfs/em2/wei_dataset/raw_datasets/CITYSCAPES/data/cityscapes --restore-from new_rn18-cityscape_singleAndWhole_val-75.90_test-74.58.pth --gpu 2 --type test --figsavepath 74.58test
python val.py --data-dir /home/nfs/em2/wei_dataset/raw_datasets/CITYSCAPES/data/cityscapes --restore-from new_rn18-cityscape_singleAndWhole_val-75.15_test-74.18.pth --gpu 1 --type val --figsavepath 75.15val
python val.py --data-dir /home/nfs/em2/wei_dataset/raw_datasets/CITYSCAPES/data/cityscapes --restore-from new_rn18-cityscape_singleAndWhole_val-75.15_test-74.18.pth --gpu 3 --type test --figsavepath 74.18test
python val.py --data-dir /home/nfs/em2/wei_dataset/raw_datasets/CITYSCAPES/data/cityscapes --restore-from new_rn18-cityscape_singleAndWhole_val-75.02_test-00.00.pth --gpu 1 --type val --figsavepath 75.02val
python val.py --data-dir /home/nfs/em2/wei_dataset/raw_datasets/CITYSCAPES/data/cityscapes --restore-from new_rn18-cityscape_singleAndWhole_val-75.02_test-00.00.pth --gpu 2 --type test --figsavepath 00.00test
'''
import os
import torch
from options import ValOptions
from torch.utils import data
from dataset.datasets import CSTrainValSet, CSTestSet
from networks.pspnet import Res_pspnet, BasicBlock, Bottleneck
from utils.evaluate import evaluate_main
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
args = ValOptions().initialize()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if args.type == 'val':
loader = data.DataLoader(CSTrainValSet(args.data_dir, args.val_data_list, crop_size=(1024, 2048), scale=False, mirror=False), batch_size=1, shuffle=False, pin_memory=True)
elif args.type == 'test':
loader = data.DataLoader(CSTestSet(args.data_dir, args.test_data_list, crop_size=(1024, 2048)), batch_size=1, shuffle=False, pin_memory=True)
student = Res_pspnet(BasicBlock, [2, 2, 2, 2], num_classes = args.num_classes)
student.load_state_dict(torch.load(args.restore_from))
print("=> load " + str(args.restore_from))
mean_IU, IU_array = evaluate_main(args.figsavepath, student, loader, args.num_classes, args.type)
print('mean_IU: {:.6f} IU_array: \n{}'.format(mean_IU, IU_array))
| StarcoderdataPython |
377730 | import logging
import sys, os
import eons, esam
import pandas as pd
#Class name is what is used at cli, so we defy convention here in favor of ease-of-use.
#Outputs all data in self.data and ASSUMES self.data is a SampleSet containing only flat data
class out_excel(esam.DataFunctor):
def __init__(self, name=eons.INVALID_NAME):
super().__init__(name)
self.requiredKWArgs.append("file")
def UserFunction(self, **kwargs):
df = pd.DataFrame([d.__dict__ for d in self.data.data])
df.to_excel(kwargs.get("file")) | StarcoderdataPython |
12822048 | <reponame>awwong1/apollo<gh_stars>0
from applications.charge_list.models import ChargeList, ActivityCharge, TimeCharge, UnitCharge, \
ActivityChargeActivityCount
from rest_framework import relations
from rest_framework.serializers import HyperlinkedModelSerializer
class ChargeListSerializer(HyperlinkedModelSerializer):
url = relations.HyperlinkedIdentityField(view_name="charge-list-detail")
price_list = relations.HyperlinkedRelatedField(view_name="price-list-detail", read_only=True)
station = relations.HyperlinkedRelatedField(view_name="station-detail", read_only=True)
class Meta:
model = ChargeList
class ActivityChargeSerializer(HyperlinkedModelSerializer):
url = relations.HyperlinkedIdentityField(view_name="activity-charge-detail")
price_list_item = relations.HyperlinkedRelatedField(view_name="activity-price-list-item-detail", read_only=True)
charge_list = relations.HyperlinkedRelatedField(view_name="charge-list-detail", read_only=True)
billing_business = relations.HyperlinkedRelatedField(view_name="business-detail", read_only=True)
class Meta:
model = ActivityCharge
class TimeChargeSerializer(HyperlinkedModelSerializer):
url = relations.HyperlinkedIdentityField(view_name="time-charge-detail")
price_list_item = relations.HyperlinkedRelatedField(view_name="time-price-list-item-detail", read_only=True)
charge_list = relations.HyperlinkedRelatedField(view_name="charge-list-detail", read_only=True)
billing_business = relations.HyperlinkedRelatedField(view_name="business-detail", read_only=True)
class Meta:
model = TimeCharge
class ActivityChargeActivityCountSerializer(HyperlinkedModelSerializer):
url = relations.HyperlinkedIdentityField(view_name="activity-charge-activity-count-detail")
activity_charge = relations.HyperlinkedRelatedField(view_name="activity-charge-detail", read_only=True)
class Meta:
model = ActivityChargeActivityCount
class UnitChargeSerializer(HyperlinkedModelSerializer):
url = relations.HyperlinkedIdentityField(view_name="unit-charge-detail")
price_list_item = relations.HyperlinkedRelatedField(view_name="unit-price-list-item-detail", read_only=True)
charge_list = relations.HyperlinkedRelatedField(view_name="charge-list-detail", read_only=True)
billing_business = relations.HyperlinkedRelatedField(view_name="business-detail", read_only=True)
class Meta:
model = UnitCharge
| StarcoderdataPython |
11359803 | #!/usr/bin/env python3
import sys, re, os, csv, getpass, argparse, mysql.connector, time, json
from Bio import Entrez
import pandas as pd
def create_and_parse_argument_options(argument_list):
parser = argparse.ArgumentParser(description='Update structural folds tables from ECOD and SCOPe, given a PDB ID')
parser.add_argument('input_taxids', help='PDB identifier to query', type=str)
parser.add_argument('user_name', help='Username for connecting to DESIRE', type=str)
parser.add_argument('-host','--db_host', help='Defines database host (default: 172.16.31.10)', type=str, default='172.16.31.10')
parser.add_argument('-schema','--db_schema', help='Defines schema to use (default: SEREB)', type=str, default='SEREB')
parser.add_argument('-dl','--download_most_recent_phylogeny', help='Update latest phylogeny.', default=False, action="store_true")
parser.add_argument('-pf','--phylogeny_file', help='Use this file to store and read phylogeny.', type=str, default='./data/downloaded_phylogeny.json')
parser.add_argument('-commit','--commit_changes', help='Commit the changes to the DB', action="store_true")
commandline_args = parser.parse_args(argument_list)
return commandline_args
def initiate_connection(uname, host, database):
pw = getpass.getpass("Password: ")
cnx = mysql.connector.connect(user=uname, password=pw, host=host, database=database)
return cnx
def load_species_taxids(file_path):
with open(file_path) as f:
input_species_txids = f.read().splitlines()
return input_species_txids
def download_phylogeny(input_taxids, newfileLoc, phylogeny_levels):
Entrez.email = "<EMAIL>" # Always tell NCBI who you are
phylogeny_structure = dict()
phylogeny_levels.reverse()
for taxid in input_taxids:
time.sleep(1)
oneTaxon = dict()
handle = Entrez.efetch(db="Taxonomy", id=taxid, retmode="xml")
records = Entrez.read(handle)
oneTaxon[records[0]['TaxId']] = ['strain',records[0]['ScientificName']]
lineages = records[0]["LineageEx"]
for i in lineages:
for j in sorted(phylogeny_levels):
if j == i['Rank']:
oneTaxon[i['TaxId']] = [i['Rank'],i['ScientificName']]
phylogeny_structure[records[0]['TaxId']] = oneTaxon
print(oneTaxon)
json.dump(phylogeny_structure, open(newfileLoc, 'w' ))
return phylogeny_structure
def makeLevelTxid(txidDict):
lvTotxid = dict()
for k,v in txidDict.items():
lvTotxid[v[0]] = k
return lvTotxid
def getParentRecurse(phylogenyLevels, lvTotxid):
if len(phylogenyLevels) == 0:
return 0
while phylogenyLevels[-1] not in lvTotxid:
return getParentRecurse(phylogenyLevels[:phylogenyLevels.index(phylogenyLevels[-1])], lvTotxid)
return lvTotxid[phylogenyLevels[-1]]
def constructParentsDict(txidDict, lvToTxid, phylogenyLevels):
txidToParent = dict()
for txid, levelName in txidDict.items():
txidToParent[txid] = getParentRecurse(phylogenyLevels[:phylogenyLevels.index(levelName[0])], lvToTxid)
return txidToParent
def main(commandline_arguments):
comm_args = create_and_parse_argument_options(commandline_arguments)
input_taxids = load_species_taxids(comm_args.input_taxids)
phylogeny_levels = ['superkingdom', 'phylum', 'class', 'order', 'family' , 'genus', 'species', 'strain']
if comm_args.download_most_recent_phylogeny:
phylogeny_structure = download_phylogeny(input_taxids, comm_args.phylogeny_file, phylogeny_levels)
else:
phylogeny_structure = json.load( open( comm_args.phylogeny_file ) )
for txid in input_taxids:
lvTotxid = makeLevelTxid(phylogeny_structure[txid])
parentDict = constructParentsDict(phylogeny_structure[txid], lvTotxid, phylogeny_levels)
for k in phylogeny_structure[txid].keys():
phylogeny_structure[txid][k].append(parentDict[k])
cnx = initiate_connection(comm_args.user_name, comm_args.db_host, comm_args.db_schema)
cursor = cnx.cursor()
# '''---Species Table---'''################################################################################
for strain_Id in input_taxids:
strain = phylogeny_structure[strain_Id][strain_Id][1]
query = ("INSERT INTO `Species`(`strain_id`,`strain`) VALUES('"+strain_Id+"','"+strain+"')")
print(query)
cursor.execute(query)
#Fix the recursive relationship
'''---TaxGroups Table---'''##############################################################################
phylogeny_levels.insert(0,'strain')
cursor.execute("SET FOREIGN_KEY_CHECKS=0")
for strain_Id in input_taxids:
list_with_levels=['no_strain','no_species','no_genus','no_family','no_order','no_class','no_phylum','no_superkingdom']
for ids in phylogeny_structure[strain_Id].keys():
list_with_levels[phylogeny_levels.index(phylogeny_structure[strain_Id][ids][0])] = ids
for ids in phylogeny_structure[strain_Id].keys():
group_lvl = phylogeny_structure[strain_Id][ids][0] #should be group level
group_name = phylogeny_structure[strain_Id][ids][1] #should be the groupName
parent = phylogeny_structure[strain_Id][ids][2] #should be the parent
query = ("INSERT INTO `TaxGroups`(`taxgroup_id`,`groupLevel`,`groupName`,`parent`) VALUES('"+ids+"','"+group_lvl+"','"+group_name+"','"+str(parent)+"')")
print(query, type(parent))
try:
cursor.execute(query)
except:
print("Skipped")
for strainID in input_taxids:
for ids in phylogeny_structure[strainID].keys():
if ids != strainID:
taxgroupID = ids
query = ("INSERT INTO `Species_TaxGroup`(`strain_id`,`taxgroup_id`) VALUES('"+strainID+"','"+taxgroupID+"')")
print(query)
try:
cursor.execute(query)
except:
print("Skipped")
cursor.execute("SET FOREIGN_KEY_CHECKS=1")
if comm_args.commit_changes:
cnx.commit()
cursor.close()
cnx.close()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | StarcoderdataPython |
3511473 | <reponame>jordan9001/dobby2
if __name__ == '__main__':
print("Please import this file from a dobby script")
exit(-1)
import struct
from .dobby import *
from .dobby_const import *
# windows kernel helper functions
def createIrq(ctx, irqtype, inbuf):
raise NotImplementedError("TODO")
def createDrvObj(ctx, start, size, entry, path, name="DriverObj"):
dobjsz = 0x150
d = ctx.alloc(dobjsz)
dex = ctx.alloc(0x50)
dte = ctx.alloc(0x120)
# initialize driver object
# type = 0x4
ctx.setu16(d + 0x00, 0x4)
# size = 0x150
ctx.setu16(d + 0x02, dobjsz)
# DeviceObject = 0
ctx.setu64(d + 0x08, 0x0)
# flags = ??
#TODO
ctx.trySymbolizeMemory(d+0x10, 8, name+".Flags")
# DriverStart = start
ctx.setu64(d + 0x18, start)
# DriverSize = size
ctx.setu32(d + 0x20, size)
# DriverSection = LDR_DATA_TABLE_ENTRY
# not sure what most of these fields are, so we will see what is used
# set up DriverSection
ctx.trySymbolizeMemory(dte+0x0, 0x10, name + ".DriverSection.InLoadOrderLinks")
ctx.trySymbolizeMemory(dte+0x10, 0x10, name + ".DriverSection.InMemoryOrderLinks")
ctx.trySymbolizeMemory(dte+0x20, 0x10, name + ".DriverSection.InInitializationOrderLinks")
ctx.setu64(dte+0x30, start)
ctx.setu64(dte+0x38, entry)
ctx.setu64(dte+0x40, size)
initUnicodeStr(ctx, dte+0x48, path)
initUnicodeStr(ctx, dte+0x58, path.split('\\')[-1])
ctx.trySymbolizeMemory(dte+0x68, 0x8, name + ".DriverSection.Flags")
ctx.trySymbolizeMemory(dte+0x70, 0x10, name + ".DriverSection.HashLinks")
ctx.setu64(dte+0x80, 0) # TimeDateStamp
ctx.trySymbolizeMemory(dte+0x88, 0x8, name + ".DriverSection.EntryPointActivationContext")
ctx.setu64(dte+0x90, 0) # Lock
ctx.trySymbolizeMemory(dte+0x98, 0x8, name + ".DriverSection.DdagNode")
ctx.trySymbolizeMemory(dte+0xa0, 0x10, name + ".DriverSection.NodeModuleLink")
ctx.trySymbolizeMemory(dte+0xb0, 0x8, name + ".DriverSection.LoadContext")
ctx.trySymbolizeMemory(dte+0xb8, 0x8, name + ".DriverSection.ParentDllBase")
ctx.trySymbolizeMemory(dte+0xc0, 0x8, name + ".DriverSection.SwitchBackContext")
ctx.trySymbolizeMemory(dte+0xc8, 0x20, name + ".DriverSection.IndexNodeStuff")
ctx.trySymbolizeMemory(dte+0xf8, 0x8, name + ".DriverSection.OriginalBase")
ctx.trySymbolizeMemory(dte+0x100, 0x8, name + ".DriverSection.LoadTime")
ctx.setu32(dte+0x108, 0) # BaseNameHashValue
ctx.setu32(dte+0x10c, 0) # LoadReasonStaticDependency
ctx.trySymbolizeMemory(dte+0x110, 4, name + ".DriverSection.ImplicitPathOptions")
ctx.setu32(dte+0x118, 0) # DependentLoadFlags
ctx.setu32(dte+0x11c, 0) # SigningLevel
#ctx.trySymbolizeMemory(d+0x28, 8, name+".DriverSection")
ctx.setu64(d+0x28, dte)
# DriverExtension = dex
ctx.setu64(d + 0x30, dex)
# DriverName
initUnicodeStr(ctx, d+0x38, "\\Driver\\" + name)
# HardwareDatabase = ptr str
hd = createUnicodeStr(ctx, "\\REGISTRY\\MACHINE\\HARDWARE\\DESCRIPTION\\SYSTEM")
ctx.setu64(d + 0x48, hd)
# FastIoDispatch = 0
ctx.setu64(d + 0x50, 0x0)
# DriverInit = DriverEntry
ctx.setu64(d + 0x58, entry)
# DriverStartIO = 0
ctx.setu64(d + 0x60, 0x0)
# DriverUnload = 0
ctx.setu64(d + 0x68, 0x0)
# MajorFunctions = 0
ctx.setMemVal(d + 0x70, b"\x00" * 8 * 28)
# initialize driver extension
# ext.DriverObject = d
ctx.setu64(dex + 0x00, d)
# ext.AddDevice = 0
ctx.setu64(dex + 0x08, 0)
# ext.Count = 0
ctx.setu64(dex + 0x10, 0)
# ext.ServiceKeyName
initUnicodeStr(ctx, dex+0x18, name)
# ext.ClientDriverExtension = 0
ctx.setu64(dex + 0x28, 0)
# ext.FsFilterCallbacks = 0
ctx.setu64(dex + 0x30, 0)
# ext.KseCallbacks = 0
ctx.setu64(dex + 0x38, 0)
# ext.DvCallbacks = 0
ctx.setu64(dex + 0x40, 0)
# ext.VerifierContext = 0
ctx.setu64(dex + 0x48, 0)
return d
def createUnicodeStr(ctx, s):
ustr = ctx.alloc(0x10)
initUnicodeStr(ctx, ustr, s)
return ustr
def initUnicodeStr(ctx, addr, s):
us = s.encode("UTF-16-LE")
buf = ctx.alloc(len(us))
ctx.setMemVal(buf, us)
ctx.setu16(addr + 0, len(us))
ctx.setu16(addr + 2, len(us))
ctx.setu64(addr + 0x8, buf)
def readUnicodeStr(ctx, addr):
l = ctx.getu16(addr)
ptr = ctx.getu64(addr+0x8)
if ctx.issym and ctx.isSymbolizedMemory(addr+8, 8):
print("Tried to read from a symbolized buffer in a unicode string")
return ""
b = ctx.getMemVal(ptr, l)
return str(b, "UTF_16_LE")
def setIRQL(ctx, newlevel):
oldirql = ctx.getRegVal(DB_X86_R_CR8)
#TODO save old one at offset from gs see KeRaiseIrqlToDpcLevel
ctx.setRegVal(DB_X86_R_CR8, newlevel)
return oldirql
def KeBugCheckEx_hook(hook, ctx, addr, sz, op, provider):
code = ctx.getRegVal(DB_X86_R_RCX)
print(f"Bug Check! Code: {code:x}. See other 4 params for more info")
return HookRet.FORCE_STOP_INS
def ExAllocatePoolWithTag_hook(hook, ctx, addr, sz, op, provider):
#TODO actually have an allocator? Hope they don't do this a lot
#TODO memory permissions based on pool
pool = ctx.getRegVal(DB_X86_R_RCX)
amt = ctx.getRegVal(DB_X86_R_RDX)
tag = struct.pack("<I", ctx.getRegVal(DB_X86_R_R8))
area = ctx.alloc(amt)
ctx.active.globstate["poolAllocations"].append((pool, amt, tag, area))
print("ExAllocatePoolWithTag", hex(amt), tag, '=', hex(area))
ctx.doRet(area)
return HookRet.OP_DONE_INS
def ExFreePoolWithTag_hook(hook, ctx, addr, sz, op, provider):
#TODO actually do this?
area = ctx.getRegVal(DB_X86_R_RCX)
print("ExFreePoolWithTag", hex(area))
ctx.doRet(area)
return HookRet.OP_DONE_INS
def RtlDuplicateUnicodeString_hook(hook, ctx, addr, sz, op, provider):
add_nul = ctx.getRegVal(DB_X86_R_RCX)
src = ctx.getRegVal(DB_X86_R_RDX)
dst = ctx.getRegVal(DB_X86_R_R8)
# check bounds
if not ctx.inBounds(src, 0x10, MEM_READ):
print("RtlDuplicateUnicodeString: src oob")
return HookRet.STOP_INS
if not ctx.inBounds(dst, 0x10, MEM_WRITE):
print("RtlDuplicateUnicodeString: dst oob")
return HookRet.STOP_INS
numbytes = ctx.getu16(src)
srcbuf = ctx.getu64(src+8)
srcval = b""
if numbytes != 0:
# check buffers
if not ctx.inBounds(srcbuf, numbytes, MEM_READ):
print("RtlDuplicateUnicodeString: src.buf oob")
return HookRet.STOP_INS
for i in range(numbytes):
if ctx.issym and ctx.isSymbolizedMemory(srcbuf+i, 1):
print("RtlDuplicateUnicodeString: symbolized in src.buf")
return HookRet.STOP_INS
srcval = ctx.getMemVal(srcbuf, numbytes)
if add_nul > 1 or (add_nul == 1 and numbytes != 0):
srcval += b"\x00\x00"
if len(srcval) == 0:
# null buffer, 0 len
ctx.setu16(dst + 0x0, 0)
ctx.setu16(dst + 0x2, 0)
ctx.setu64(dst + 0x8, 0)
else:
dstbuf = ctx.alloc(len(srcval))
ctx.setMemVal(dstbuf, srcval)
ctx.setu16(dst + 0x0, numbytes)
ctx.setu16(dst + 0x2, numbytes)
ctx.setu64(dst + 0x8, dstbuf)
s = str(srcval, "UTF_16_LE")
ctx.doRet(0)
print(f"RtlDuplicateUnicodeString : \"{s}\"")
return HookRet.OP_DONE_INS
def IoCreateFileEx_hook(hook, ctx, addr, sz, op, provider):
h = ctx.active.globstate["nexthandle"]
ctx.active.globstate["nexthandle"] += 1
phandle = ctx.getRegVal(DB_X86_R_RCX)
oa = ctx.getRegVal(DB_X86_R_R8)
iosb = ctx.getRegVal(DB_X86_R_R9)
sp = ctx.getRegVal(DB_X86_R_RSP)
disp = ctx.getu32(sp + 0x28 + (3 * 8))
driverctx = ctx.getu64(sp + 0x28 + (10 * 8))
if ctx.issym and ctx.isSymbolizedMemory(oa+0x10, 8):
print("Unicode string in object attributes is symbolized")
return HookRet.FORCE_STOP_INS
namep = ctx.getu64(oa+0x10)
name = readUnicodeStr(ctx, namep)
ctx.setu64(phandle, h)
# set up iosb
info = 0
disp_str = ""
if disp == 0:
disp_str = "FILE_SUPERSEDE"
info = 0 # FILE_SUPERSEDED
elif disp == 1:
disp_str = "FILE_OPEN"
info = 1 # FILE_OPENED
elif disp == 2:
disp_str = "FILE_CREATE"
info = 2 # FILE_CREATED
elif disp == 3:
disp_str = "FILE_OPEN_IF"
info = 2 # FILE_CREATED
elif disp == 4:
disp_str = "FILE_OVERWRITE_IF"
info = 3 # FILE_OVERWRITTEN
elif disp == 5:
disp_str = "FILE_OVERWRITE_IF"
info = 2 # FILE_CREATED
ctx.setu64(iosb, 0)
ctx.setu64(iosb+8, info)
objinfo = (h, name, disp, driverctx, provider)
ctx.active.globstate["handles"][h] = objinfo
ctx.doRet(0)
print(f"IoCreateFileEx: \"{name}\" {disp_str} = {h}")
return HookRet.STOP_INS
def IoCreateDevice_hook(hook, ctx, addr, sz, op, provider):
drvobj = ctx.getRegVal(DB_X86_R_RCX)
easz = ctx.getRegVal(DB_X86_R_RDX)
dname = ctx.getRegVal(DB_X86_R_R8)
dtype = ctx.getRegVal(DB_X86_R_R9)
sp = ctx.getRegVal(DB_X86_R_RSP)
char = ctx.getu32(sp + 0x28 + (0 * 8))
exclusive = ctx.getu64(sp + 0x28 + (1 * 8))
outdev = ctx.getu64(sp + 0x28 + (2 * 8))
name = readUnicodeStr(ctx, dname)
print(f"Driver is trying to create device {name}")
return HookRet.FORCE_STOP_INS
def ZwClose_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
name = ctx.active.globstate["handles"][h][1]
del ctx.active.globstate["handles"][h]
print(f"Closed File {h} ({name})")
ctx.doRet(0)
return HookRet.OP_DONE_INS
def ZwWriteFile_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
evt = ctx.getRegVal(DB_X86_R_RDX)
apcrou = ctx.getRegVal(DB_X86_R_R8)
apcctx = ctx.getRegVal(DB_X86_R_R9)
sp = ctx.getRegVal(DB_X86_R_RSP)
iosb = ctx.getu64(sp + 0x28 + (0 * 8))
buf = ctx.getu64(sp + 0x28 + (1 * 8))
blen = ctx.getu32(sp + 0x28 + (2 * 8))
poff = ctx.getu64(sp + 0x28 + (3 * 8))
if apcrou != 0:
print("ZwWriteFile with apcroutine!")
return HookRet.FORCE_STOP_INS
name = ctx.active.globstate["handles"][h][1]
off = 0
if poff != 0:
off = ctx.getu64(poff)
ctx.setu64(iosb, 0)
ctx.setu64(iosb+8, blen)
ctx.doRet(0)
print(f"ZwWriteFile: {h}({name})) {hex(blen)} bytes{(' at offset ' + hex(off)) if poff != 0 else ''}")
ctx.printMem(buf, blen)
return HookRet.OP_DONE_INS
def ZwReadFile_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
sp = ctx.getRegVal(DB_X86_R_RSP)
iosb = ctx.getu64(sp + 0x28 + (0 * 8))
buf = ctx.getu64(sp + 0x28 + (1 * 8))
blen = ctx.getu32(sp + 0x28 + (2 * 8))
poff = ctx.getu64(sp + 0x28 + (3 * 8))
print(f"ZwReadFile: {h}({name}) {hex(blen)} into {hex(buf)}")
if poff:
offval = ctx.getu64(poff)
print(f"Read is at offset {hex(offval)}")
ctx.doRet(0)
return HookRet.FORCE_STOP_INS
def ZwFlushBuffersFile_hook(hook, ctx, addr, sz, op, provider):
h = ctx.getRegVal(DB_X86_R_RCX)
iosb = ctx.getRegVal(DB_X86_R_RDX)
ctx.setu64(iosb, 0)
ctx.setu64(iosb+8, 0)
print(f"ZwFlushBuffersFile {h}")
ctx.doRet(0)
return HookRet.DONE_INS
def KeAreAllApcsDisabled_hook(hook, ctx, addr, sz, op, provider):
# checks:
# currentthread.SpecialAcpDisable
# KeAreInterruptsEnabled (IF in rflags)
# cr8 == 0
#TODO do all the above checks
cr8val = ctx.getRegVal(DB_X86_R_CR8)
ie = ((ctx.getRegVal(DB_X86_R_EFLAGS) >> 9) & 1)
ret = 0 if cr8val == 0 and ie == 1 else 1
print(f"KeAreAllApcsDisabled : {ret}")
ctx.doRet(ret)
return HookRet.DONE_INS
def KeIpiGenericCall_hook(hook, ctx, addr, sz, op, provider):
fcn = ctx.getRegVal(DB_X86_R_RCX)
arg = ctx.getRegVal(DB_X86_R_RDX)
# set IRQL to IPI_LEVEL
old_level = setIRQL(ctx, 0xe)
# do IpiGeneric Call
ctx.setRegVal(DB_X86_R_RCX, arg)
ctx.setRegVal(DB_X86_R_RIP, fcn)
# set hook for when we finish
def finish_KeIpiGenericCall_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
setIRQL(ctx, old_level)
rval = ctx.getRegVal(DB_X86_R_RAX)
print(f"KeIpiGenericCall returned {hex(rval)}")
return HookRet.OP_CONT_INS
curstack = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(curstack)
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish_KeIpiGenericCall_hook, label="")
print(f"KeIpiGenericCall {hex(fcn)} ({hex(arg)})")
return HookRet.OP_DONE_INS
def ZwQuerySystemInformation_hook(hook, ctx, addr, sz, op, provider):
infoclass = ctx.getRegVal(DB_X86_R_RCX)
buf = ctx.getRegVal(DB_X86_R_RDX)
buflen = ctx.getRegVal(DB_X86_R_R8)
retlenptr = ctx.getRegVal(DB_X86_R_R9)
if infoclass == 0x0b: #SystemModuleInformation
# buffer should contain RTL_PROCESS_MODULES structure
raise NotImplementedError(f"Unimplemented infoclass SystemModuleInformation in ZwQuerySystemInformation")
elif infoclass == 0x4d: #SystemModuleInformationEx
# buffer should contain RTL_PROCESS_MODULE_INFORMATION_EX
# has to include the module we are emulating
# just copy over a good buffer from the computer?
# if they actually use the info we are in trouble
# actually load in a bunch of modules? :(
# might have to support paging in/out if that needs to happen
# for now just try a good value
# see side_utils for doing this from python to get example output
# TODO provide a good output, but symbolize any real addresses
raise NotImplementedError(f"Unimplemented infoclass SystemModuleInformationEx in ZwQuerySystemInformation")
else:
raise NotImplementedError(f"Unimplemented infoclass in ZwQuerySystemInformation : {hex(infoclass)}")
def ExSystemTimeToLocalTime_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr0"])
print("ExSystemTimeToLocalTime")
return HookRet.DONE_INS
def RtlTimeToTimeFields_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr1"])
print("RtlTimeToTimeFields")
return HookRet.DONE_INS
def _stricmp_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr2"])
s1addr = ctx.getRegVal(DB_X86_R_RCX)
s2addr = ctx.getRegVal(DB_X86_R_RDX)
s1 = ctx.getCStr(s1addr)
s2 = ctx.getCStr(s2addr)
print(f"_stricmp \"{s1}\" vs \"{s2}\"")
return HookRet.OP_DONE_INS
def wcscat_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr3"])
s1addr = ctx.getRegVal(DB_X86_R_RCX)
s2addr = ctx.getRegVal(DB_X86_R_R8)
num = ctx.getRegVal(DB_X86_R_RDX)
s1 = ctx.getCWStr(s1addr)
s2 = ctx.getCWStr(s2addr)
print(f"wcscat_s ({num}) \"{s1}\" += \"{s2}\"")
return HookRet.OP_DONE_INS
def wcscpy_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr4"])
dst = ctx.getRegVal(DB_X86_R_RCX)
src = ctx.getRegVal(DB_X86_R_R8)
num = ctx.getRegVal(DB_X86_R_RDX)
s = ctx.getCWStr(src)
print(f"wcscpy_s {hex(dst)[2:]}({num}) <= \"{s}\"")
return HookRet.OP_DONE_INS
def RtlInitUnicodeString_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr5"])
src = ctx.getRegVal(DB_X86_R_RDX)
s = ctx.getCWStr(src)
print(f"RtlInitUnicodeString \"{s}\"")
return HookRet.OP_DONE_INS
def swprintf_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr6"])
buf = ctx.getRegVal(DB_X86_R_RCX)
fmt = ctx.getRegVal(DB_X86_R_R8)
fmts = ctx.getCWStr(fmt)
# set hook for after return
sp = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(sp)
def finish_swprintf_s_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
s = ctx.getCWStr(buf)
print(f"Finished swprintf_s: \"{s}\" from \"{fmts}\"")
return HookRet.OP_CONT_INS
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish_swprintf_s_hook, label="")
return HookRet.OP_DONE_INS
def vswprintf_s_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr7"])
buf = ctx.getRegVal(DB_X86_R_RCX)
fmt = ctx.getRegVal(DB_X86_R_R8)
fmts = ctx.getCWStr(fmt)
# set hook for after return
sp = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(sp)
def finish_vswprintf_s_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
s = ctx.getCWStr(buf)
print(f"Finished vswprintf_s: \"{s}\" from \"{fmts}\"")
return HookRet.OP_CONT_INS
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish_vswprintf_s_hook, label="")
return HookRet.OP_DONE_INS
def _vsnwprintf_hook(hook, ctx, addr, sz, op, provider):
ctx.setRegVal(DB_X86_R_RIP, ctx.active.globstate["_thunk_symaddr8"])
buf = ctx.getRegVal(DB_X86_R_RCX)
fmt = ctx.getRegVal(DB_X86_R_R8)
fmts = ctx.getCWStr(fmt)
# set hook for after return
sp = ctx.getRegVal(DB_X86_R_RSP)
retaddr = ctx.getu64(sp)
def finish__vsnwprintf_s_hook(hook, ctx, addr, sz, op, provider):
# remove self
ctx.delHook(hook)
s = ctx.getCWStr(buf)
print(f"Finished _vsnwprintf_s: \"{s}\" from \"{fmts}\"")
return HookRet.OP_CONT_INS
ctx.addHook(retaddr, retaddr+1, MEM_EXECUTE, handler=finish__vsnwprintf_s_hook, label="")
return HookRet.OP_DONE_INS
def createThunkHooks(ctx):
# have to be in higher scope for pickling the hooks
name = "ExSystemTimeToLocalTime"
ctx.active.globstate["_thunk_symaddr0"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, ExSystemTimeToLocalTime_hook, "ignore")
name = "RtlTimeToTimeFields"
ctx.active.globstate["_thunk_symaddr1"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, RtlTimeToTimeFields_hook, "ignore")
name = "_stricmp"
ctx.active.globstate["_thunk_symaddr2"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, _stricmp_hook, "ignore")
name = "wcscat_s"
ctx.active.globstate["_thunk_symaddr3"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, wcscat_s_hook, "ignore")
name = "wcscpy_s"
ctx.active.globstate["_thunk_symaddr4"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, wcscpy_s_hook, "ignore")
name = "RtlInitUnicodeString"
ctx.active.globstate["_thunk_symaddr5"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, RtlInitUnicodeString_hook, "ignore")
name = "swprintf_s"
ctx.active.globstate["_thunk_symaddr6"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, swprintf_s_hook, "ignore")
name = "vswprintf_s"
ctx.active.globstate["_thunk_symaddr7"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, vswprintf_s_hook, "ignore")
name = "_vsnwprintf"
ctx.active.globstate["_thunk_symaddr8"] = ctx.getImageSymbol(name, "ntoskrnl.exe")
ctx.setApiHandler(name, _vsnwprintf_hook, "ignore")
def setNtosThunkHook(ctx, name, dostop):
ctx.setApiHandler(name, ctx.createThunkHook(name, "ntoskrnl.exe", dostop), "ignore")
def registerWinHooks(ctx):
ctx.setApiHandler("RtlDuplicateUnicodeString", RtlDuplicateUnicodeString_hook, "ignore")
ctx.setApiHandler("KeBugCheckEx", KeBugCheckEx_hook, "ignore")
ctx.setApiHandler("ExAllocatePoolWithTag", ExAllocatePoolWithTag_hook, "ignore")
ctx.setApiHandler("ExFreePoolWithTag", ExFreePoolWithTag_hook, "ignore")
ctx.setApiHandler("IoCreateFileEx", IoCreateFileEx_hook, "ignore")
ctx.setApiHandler("ZwClose", ZwClose_hook, "ignore")
ctx.setApiHandler("ZwWriteFile", ZwWriteFile_hook, "ignore")
ctx.setApiHandler("ZwReadFile", ZwReadFile_hook, "ignore")
ctx.setApiHandler("ZwFlushBuffersFile", ZwFlushBuffersFile_hook, "ignore")
ctx.setApiHandler("KeAreAllApcsDisabled", KeAreAllApcsDisabled_hook, "ignore")
ctx.setApiHandler("KeIpiGenericCall", KeIpiGenericCall_hook, "ignore")
ctx.setApiHandler("IoCreateDevice", IoCreateDevice_hook, "ignore")
createThunkHooks(ctx)
def loadNtos(ctx, base=0xfffff8026be00000):
# NOTE just because we load ntos doesn't mean it is initialized at all
# Make sure you initalize the components you intend to use
print("Loading nt...")
ctx.loadPE("ntoskrnl.exe", base)
print("Loaded!")
def kuser_time_hook(hk, ctx, addr, sz, op, provider):
# InterruptTime is 100ns scale time since start
it = ctx.getTicks()
# SystemTime is 100ns scale, as timestamp
st = ctx.getTime()
# TickCount is 1ms scale, as ticks update as if interrupts have maximum period?
# TODO adjust this?
tc = int(it // 10000)
shared_data_addr = 0xfffff78000000000
# write the values back
bts = struct.pack("<QI", tc, tc>>32)
ctx.setMemVal(shared_data_addr + 0x320, bts)
bts = struct.pack("<QIQI", it, it>>32, st, st>>32)
ctx.setMemVal(shared_data_addr + 0x8, bts)
if shared_data_addr + 0x8 <= addr < shared_data_addr + 0x14:
print("Read from InterruptTime")
if shared_data_addr + 0x14 <= addr < shared_data_addr + 0x20:
print("Read from SystemTime")
if shared_data_addr + 0x320 <= addr < shared_data_addr + 0x330:
print("Read from TickCount")
return HookRet.CONT_INS
def initSys(ctx):
# setup global state we track
ctx.active.globstate["poolAllocations"] = [] # see ExAllocatePoolWithTag
ctx.active.globstate["handles"] = {} # number : (object,)
ctx.active.globstate["nexthandle"] = 1
loadNtos(ctx)
registerWinHooks(ctx)
# setup KUSER_SHARED_DATA at 0xFFFFF78000000000
shared_data_addr = 0xfffff78000000000
shared_data_sz = 0x720
ctx.addAnn(shared_data_addr, shared_data_addr + shared_data_sz, "GLOBAL", "_KUSER_SHARED_DATA")
ctx.updateBounds(shared_data_addr, shared_data_addr + shared_data_sz, MEM_READ, False)
#TODO verify tick count/time works how you think
# time is # of 100-nanosecond intervals
# these numbers aren't actually any good because we hook out a looot of functionality?
# but eh, if things don't work then use a volatile symbol hook here
ctx.addHook(shared_data_addr + 0x8, shared_data_addr+0x20, MEM_READ, kuser_time_hook, "Interrupt and System Time hook")
ctx.addHook(shared_data_addr + 0x320, shared_data_addr+0x32c, MEM_READ, kuser_time_hook, "Tick Time hook")
ctx.setMemVal(
shared_data_addr + 0x0,
b'\x00\x00\x00\x00' + # +0x0 .TickCountLowDeprecated
b'\x00\x00\xa0\x0f' + # +0x4 .TickCountMultiplier
# HOOK THIS and use instruction count to add to it
b'O\xcaW[\xd8\x05\x00\x00\xd8\x05\x00\x00' + # +0x8 .InterruptTime
# HOOK THIS and use instruction count to add to it
b'\x19E~M\xe7\x8c\xd6\x01\xe7\x8c\xd6\x01' + # +0x14 .SystemTime
b'\x00\xa0\x11\x87!\x00\x00\x00!\x00\x00\x00' + # +0x20 .TimeZoneBias
b'd\x86' + # +0x2c .ImageNumberLow
b'd\x86' + # +0x2e .ImageNumberHigh
b'C\x00:\x00\\\x00W\x00I\x00N\x00D\x00O\x00' + # +0x30 .NtSystemRoot
b'W\x00S\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00' + # +0x238 .MaxStackTraceDepth
b'\x00\x00\x00\x00' + # +0x23c .CryptoExponent
b'\x02\x00\x00\x00' + # +0x240 .TimeZoneId
b'\x00\x00 \x00' + # +0x244 .LargePageMinimum
b'\x00\x00\x00\x00' + # +0x248 .AitSamplingValue
b'\x00\x00\x00\x00' + # +0x24c .AppCompatFlag
b'I\x00\x00\x00\x00\x00\x00\x00' + # +0x250 .RNGSeedVersion
b'\x00\x00\x00\x00' + # +0x258 .GlobalValidationRunlevel
b'\x1c\x00\x00\x00' + # +0x25c .TimeZoneBiasStamp
b'aJ\x00\x00' + # +0x260 .NtBuildNumber
b'\x01\x00\x00\x00' + # +0x264 .NtProductType
b'\x01' + # +0x268 .ProductTypeIsValid
b'\x00' + # +0x269 .Reserved0
b'\t\x00' + # +0x26a .NativeProcessorArchitecture
b'\n\x00\x00\x00' + # +0x26c .NtMajorVersion
b'\x00\x00\x00\x00' + # +0x270 .NtMinorVersion
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x274, 0x4), "kuser_shared_data.ProcessorFeature[0:4]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x278, 0x8), "kuser_shared_data.ProcessorFeature[4:c]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x280, 0x20), "kuser_shared_data.ProcessorFeature[c:2c]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x2a0, 0x10), "kuser_shared_data.ProcessorFeature[2c:3c]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x2b0, 0x8), "kuser_shared_data.ProcessorFeature[3c:44]")
#ctx.symbolizeMemory(MemoryAccess(shared_data_addr + 0x2b8, 0x4), "kuser_shared_data.reserved3")
b'\x00\x00\x01\x01\x00\x00\x01\x00\x01\x01\x01\x00\x01\x01\x01\x00' + # +0x274 .ProcessorFeatures
b'\x00\x01\x00\x00\x00\x01\x01\x01\x00\x00\x00\x00\x01\x00\x00\x00' +
b'\x01\x01\x00\x00\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\xff\xff\xfe\x7f' + # +0x2b4 .Reserved1
b'\x00\x00\x00\x80' + # +0x2b8 .Reserved3
b'\x00\x00\x00\x00' + # +0x2bc .TimeSlip
b'\x00\x00\x00\x00' + # +0x2c0 .AlternativeArchitecture
b' \x00\x00\x00' + # +0x2c4 .BootId
b'\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x2c8 .SystemExpirationDate
b'\x10\x03\x00\x00' + # +0x2d0 .SuiteMask
# Yeah, go ahead and keep this one a 0
b'\x00' + # +0x2d4 .KdDebuggerEnabled # Yeah, go ahead and keep this one a 0
b'\n' + # +0x2d5 .Reserved
b'<\x00' + # +0x2d6 .CyclesPerYield
b'\x01\x00\x00\x00' + # +0x2d8 .ActiveConsoleId
b'\x04\x00\x00\x00' + # +0x2dc .DismountCount
b'\x01\x00\x00\x00' # +0x2e0 .ComPlusPackage
)
#TODO hook this properly
ctx.trySymbolizeMemory(shared_data_addr + 0x2e4, 0x4, "kuser_shared_data.LastSystemRITEventTickCount")
#b'\xc9\x85N&' + # +0x2e4 .LastSystemRITEventTickCount
ctx.setMemVal(
shared_data_addr + 0x2e8,
b'\x94\xbb?\x00' + # +0x2e8 .NumberOfPhysicalPages
b'\x00' + # +0x2ec .SafeBootMode
b'\x01' + # +0x2ed .VirtualizationFlags #TODO worth symbolizing?
b'\x00\x00' + # +0x2ee .Reserved12
#TODO should any of these be changed?
# ULONG DbgErrorPortPresent : 1;
# ULONG DbgElevationEnabled : 1; // second bit
# ULONG DbgVirtEnabled : 1; // third bit
# ULONG DbgInstallerDetectEnabled : 1; // fourth bit
# ULONG DbgSystemDllRelocated : 1;
# ULONG DbgDynProcessorEnabled : 1;
# ULONG DbgSEHValidationEnabled : 1;
# ULONG SpareBits : 25;
b'\x0e\x01\x00\x00' + # +0x2f0 .SpareBits
b'\x00\x00\x00\x00' + # +0x2f4 .DataFlagsPad
b'\xc3\x00\x00\x00\x00\x00\x00\x00' + # +0x2f8 .TestRetInstruction
b'\x80\x96\x98\x00\x00\x00\x00\x00' + # +0x300 .QpcFrequency
b'\x00\x00\x00\x00' + # +0x308 .SystemCall
b'\x00\x00\x00\x00' + # +0x30c .UserCetAvailableEnvironments
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x310 .SystemCallPad
# HOOK THIS and use instruction count to add to it
b'\x17\x9es\x02\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x320 .ReservedTickCountOverlay
b'\x00\x00\x00\x00' + # +0x32c .TickCountPad
b'\xd3PB\x1b' + # +0x330 .Cookie
b'\x00\x00\x00\x00' + # +0x334 .CookiePad
b'\xbc\x1d\x00\x00\x00\x00\x00\x00' + # +0x338 .ConsoleSessionForegroundProcessId
#TODO hook this?
b'\xa2{H\x1a\x00\x00\x00\x00' + # +0x340 .TimeUpdateLock
b'-\x83\x87[\xd8\x05\x00\x00' + # +0x348 .BaselineSystemTimeQpc
b'-\x83\x87[\xd8\x05\x00\x00' + # +0x350 .BaselineInterruptTimeQpc
b'\x00\x00\x00\x00\x00\x00\x00\x80' + # +0x358 .QpcSystemTimeIncrement
b'\x00\x00\x00\x00\x00\x00\x00\x80' + # +0x360 .QpcInterruptTimeIncrement
b'\x01' + # +0x368 .QpcSystemTimeIncrementShift
b'\x01' + # +0x369 .QpcInterruptTimeIncrementShift
b'\x18\x00' + # +0x36a .UnparkedProcessorCount
b'\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x36c .EnclaveFeatureMask
b'\x03\x00\x00\x00' + # +0x37c .TelemetryCoverageRound
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x380 .UserModeGlobalLogger
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00' + # +0x3a0 .ImageFileExecutionOptions
b'\x01\x00\x00\x00' + # +0x3a4 .LangGenerationCount
b'\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x3a8 .Reserved4
b'\x17\xfc\x9eU\xd8\x03\x00\x00' + # +0x3b0 .InterruptTimeBias
b'\xcd"\x15G\xd8\x03\x00\x00' + # +0x3b8 .QpcBias
b'\x18\x00\x00\x00' + # +0x3c0 .ActiveProcessorCount
b'\x01' + # +0x3c4 .ActiveGroupCount
b'\x00' + # +0x3c5 .Reserved9
b'\x83' + # +0x3c6 .QpcBypassEnabled
b'\x00' + # +0x3c7 .QpcShift
b'\x9a,\x17\xcdq\x8c\xd6\x01' + # +0x3c8 .TimeZoneBiasEffectiveStart
b'\x000\x9d;\x14\xb0\xd6\x01' + # +0x3d0 .TimeZoneBiasEffectiveEnd
b'\x07\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00' + # +0x3d8 .XState
b'@\x03\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xa0\x00\x00\x00' +
b'\xa0\x00\x00\x00\x00\x01\x00\x00@\x02\x00\x00\x00\x01\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00@\x03\x00\x00\xa0\x00\x00\x00' +
b'\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + # +0x710 .FeatureConfigurationChangeStamp
b'\x00\x00\x00\x00' # +0x71c .Spare
)
# setup KPCR and KPRCB
#TODO
| StarcoderdataPython |
8171349 | """
Comet Chart Example
----------------------------
Inspired by `Zan Armstrong's comet chart <https://www.zanarmstrong.com/infovisresearch>`_
this plot uses ``mark_trail`` to visualize change of grouped data over time.
A more elaborate example and explanation of creating comet charts in Altair
is shown in `this blogpost <https://medium.com/de-dataverbinders/comet-charts-in-python-visualizing-statistical-mix-effects-and-simpsons-paradox-with-altair-6cd51fb58b7c>`_.
"""
# category: other charts
import altair as alt
import vega_datasets
(
alt.Chart(vega_datasets.data.barley.url)
.transform_pivot("year", value="yield", groupby=["variety", "site"])
.transform_fold(["1931", "1932"], as_=["year", "yield"])
.transform_calculate(calculate="datum['1932'] - datum['1931']", as_="delta")
.mark_trail()
.encode(
x=alt.X('year:O', title=None),
y=alt.Y('variety:N', title='Variety'),
size=alt.Size('yield:Q', scale=alt.Scale(range=[0, 12]), legend=alt.Legend(values=[20, 60], title='Barley Yield (bushels/acre)')),
color=alt.Color('delta:Q', scale=alt.Scale(domainMid=0), legend=alt.Legend(title='Yield Delta (%)')),
tooltip=alt.Tooltip(['year:O', 'yield:Q']),
column=alt.Column('site:N', title='Site')
)
.configure_view(stroke=None)
.configure_legend(orient='bottom', direction='horizontal')
.properties(title='Barley Yield comparison between 1932 and 1931')
)
| StarcoderdataPython |
9602417 | # tests/test_provider_heroku_heroku.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:18:37 UTC)
def test_provider_import():
import terrascript.provider.heroku.heroku
def test_resource_import():
from terrascript.resource.heroku.heroku import heroku_account_feature
from terrascript.resource.heroku.heroku import heroku_addon
from terrascript.resource.heroku.heroku import heroku_addon_attachment
from terrascript.resource.heroku.heroku import heroku_app
from terrascript.resource.heroku.heroku import heroku_app_config_association
from terrascript.resource.heroku.heroku import heroku_app_feature
from terrascript.resource.heroku.heroku import heroku_app_release
from terrascript.resource.heroku.heroku import heroku_app_webhook
from terrascript.resource.heroku.heroku import heroku_build
from terrascript.resource.heroku.heroku import heroku_cert
from terrascript.resource.heroku.heroku import heroku_collaborator
from terrascript.resource.heroku.heroku import heroku_config
from terrascript.resource.heroku.heroku import heroku_domain
from terrascript.resource.heroku.heroku import heroku_drain
from terrascript.resource.heroku.heroku import heroku_formation
from terrascript.resource.heroku.heroku import heroku_pipeline
from terrascript.resource.heroku.heroku import heroku_pipeline_config_var
from terrascript.resource.heroku.heroku import heroku_pipeline_coupling
from terrascript.resource.heroku.heroku import heroku_review_app_config
from terrascript.resource.heroku.heroku import heroku_slug
from terrascript.resource.heroku.heroku import heroku_space
from terrascript.resource.heroku.heroku import heroku_space_app_access
from terrascript.resource.heroku.heroku import heroku_space_inbound_ruleset
from terrascript.resource.heroku.heroku import (
heroku_space_peering_connection_accepter,
)
from terrascript.resource.heroku.heroku import heroku_space_vpn_connection
from terrascript.resource.heroku.heroku import heroku_ssl
from terrascript.resource.heroku.heroku import heroku_team_collaborator
from terrascript.resource.heroku.heroku import heroku_team_member
def test_datasource_import():
from terrascript.data.heroku.heroku import heroku_addon
from terrascript.data.heroku.heroku import heroku_app
from terrascript.data.heroku.heroku import heroku_pipeline
from terrascript.data.heroku.heroku import heroku_space
from terrascript.data.heroku.heroku import heroku_space_peering_info
from terrascript.data.heroku.heroku import heroku_team
from terrascript.data.heroku.heroku import heroku_team_members
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.heroku.heroku
#
# t = terrascript.provider.heroku.heroku.heroku()
# s = str(t)
#
# assert 'https://github.com/heroku/terraform-provider-heroku' in s
# assert '4.6.0' in s
| StarcoderdataPython |
11313041 | <filename>flaskr/__init__.py<gh_stars>0
import os
from flask import Flask
def create_app(test_config = None):
"""Flask Application Factory"""
# Create and configure application
app = Flask(__name__, instance_relative_config = True)
app.config.from_mapping(
SECRET_KEY = 'dev',
database = os.path.join(app.instance_path, 'flaskr.sqlite')
)
if test_config is None:
# Load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent = True)
else:
# Load the test config if passed in
app.config.from_mapping(test_config)
# Ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# A simple page that says hello
@app.route('/hello')
def hello():
return 'Hello world!'
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint = 'index')
return app
| StarcoderdataPython |
11377891 | import django_filters
from .common_filters import ValueInFilter, NumberInFilter, GeoFilterSet, GeomFilter
from ..models import FN122
class FN122InProjectFilter(GeoFilterSet):
"""A fitlerset that allows us to select subsets of catch count objects by
by attributes of the catch counts (fn122 data only)"""
roi = GeomFilter(field_name="sample__geom__within", method="filter_roi")
buffered_point = GeomFilter(
field_name="sample__geom__within", method="filter_point"
)
management_unit__in = ValueInFilter(field_name="sample__management_units__slug")
management_unit__not__in = ValueInFilter(
field_name="sample__management_units__slug", exclude=True
)
# Effort Attributes
# we could add gear depth here if it was populated more regularly.
eff = ValueInFilter(field_name="eff")
eff_not = ValueInFilter(field_name="eff", exclude=True)
effdst = django_filters.NumberFilter(field_name="effdst", lookup_expr="exact")
effdst__gte = django_filters.NumberFilter(field_name="effdst", lookup_expr="gte")
effdst__lte = django_filters.NumberFilter(field_name="effdst", lookup_expr="lte")
effdst__gt = django_filters.NumberFilter(field_name="effdst", lookup_expr="gt")
effdst__lt = django_filters.NumberFilter(field_name="effdst", lookup_expr="lt")
grdep = django_filters.NumberFilter(field_name="grdep", lookup_expr="exact")
grdep__gte = django_filters.NumberFilter(field_name="grdep", lookup_expr="gte")
grdep__lte = django_filters.NumberFilter(field_name="grdep", lookup_expr="lte")
grdep__gt = django_filters.NumberFilter(field_name="grdep", lookup_expr="gt")
grdep__lt = django_filters.NumberFilter(field_name="grdep", lookup_expr="lt")
grtem0 = django_filters.NumberFilter(field_name="grtem0", lookup_expr="exact")
grtem0__gte = django_filters.NumberFilter(field_name="grtem0", lookup_expr="gte")
grtem0__lte = django_filters.NumberFilter(field_name="grtem0", lookup_expr="lte")
grtem0__gt = django_filters.NumberFilter(field_name="grtem0", lookup_expr="gt")
grtem0__lt = django_filters.NumberFilter(field_name="grtem0", lookup_expr="lt")
grtem1 = django_filters.NumberFilter(field_name="grtem1", lookup_expr="exact")
grtem1__gte = django_filters.NumberFilter(field_name="grtem1", lookup_expr="gte")
grtem1__lte = django_filters.NumberFilter(field_name="grtem1", lookup_expr="lte")
grtem1__gt = django_filters.NumberFilter(field_name="grtem1", lookup_expr="gt")
grtem1__lt = django_filters.NumberFilter(field_name="grtem1", lookup_expr="lt")
class Meta:
model = FN122
fields = ["eff"]
class FN122Filter(FN122InProjectFilter):
"""A filter that is inherited from FN122InProjectFilter and allows
additional filters based on attributes of the parent tables
(project, net set attributes).
"""
# FN011 attributes
year = django_filters.CharFilter(
field_name="sample__project__year", lookup_expr="exact"
)
year__gte = django_filters.NumberFilter(
field_name="sample__project__year", lookup_expr="gte"
)
year__lte = django_filters.NumberFilter(
field_name="sample__project__year", lookup_expr="lte"
)
year__gt = django_filters.NumberFilter(
field_name="sample__project__year", lookup_expr="gt"
)
year__lt = django_filters.NumberFilter(
field_name="sample__project__year", lookup_expr="lt"
)
prj_date0 = django_filters.DateFilter(
field_name="sample__project__prj_date0", help_text="format: yyyy-mm-dd"
)
prj_date0__gte = django_filters.DateFilter(
field_name="sample__project__prj_date0",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
prj_date0__lte = django_filters.DateFilter(
field_name="sample__project__prj_date0",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
prj_date1 = django_filters.DateFilter(
field_name="sample__project__prj_date1", help_text="format: yyyy-mm-dd"
)
prj_date1__gte = django_filters.DateFilter(
field_name="sample__project__prj_date1",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
prj_date1__lte = django_filters.DateFilter(
field_name="sample__project__prj_date1",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
prj_cd = ValueInFilter(field_name="sample__project__prj_cd")
prj_cd__not = ValueInFilter(field_name="sample__project__prj_cd", exclude=True)
prj_cd__like = django_filters.CharFilter(
field_name="sample__project__prj_cd", lookup_expr="icontains"
)
prj_cd__not_like = django_filters.CharFilter(
field_name="sample__project__prj_cd", lookup_expr="icontains", exclude=True
)
prj_cd__endswith = django_filters.CharFilter(
field_name="sample__project__prj_cd", lookup_expr="endswith"
)
prj_cd__not_endswith = django_filters.CharFilter(
field_name="sample__project__prj_cd", lookup_expr="endswith", exclude=True
)
prj_nm__like = django_filters.CharFilter(
field_name="sample__project__prj_nm", lookup_expr="icontains"
)
prj_nm__not_like = django_filters.CharFilter(
field_name="sample__project__prj_nm", lookup_expr="icontains", exclude=True
)
prj_ldr = django_filters.CharFilter(
field_name="sample__project__prj_ldr__username", lookup_expr="iexact"
)
protocol = ValueInFilter(field_name="sample__project__protocol__abbrev")
protocol__not = ValueInFilter(
field_name="sample__project__protocol__abbrev", exclude=True
)
lake = ValueInFilter(field_name="sample__project__lake__abbrev")
lake__not = ValueInFilter(field_name="sample__project__lake__abbrev", exclude=True)
# FN121 Attributes
sam = ValueInFilter(field_name="sample__sam")
sam__not = ValueInFilter(field_name="sample__sam", exclude=True)
sidep__gte = django_filters.NumberFilter(
field_name="sample__sidep", lookup_expr="gte"
)
sidep__lte = django_filters.NumberFilter(
field_name="sample__sidep", lookup_expr="lte"
)
grtp = ValueInFilter(field_name="sample__grtp")
grtp__not = ValueInFilter(field_name="sample__grtp", exclude=True)
gr = ValueInFilter(field_name="sample__gr")
gr__not = ValueInFilter(field_name="sample__gr", exclude=True)
# grid is a little trick - requires us to filter lake too - user beware!
grid = NumberInFilter(field_name="sample__grid__grid")
grid__not = NumberInFilter(field_name="sample__grid__grid", exclude=True)
effdur__gte = django_filters.NumberFilter(
field_name="sample__effdur", lookup_expr="gte"
)
effdur__lte = django_filters.NumberFilter(
field_name="sample__effdur", lookup_expr="lte"
)
set_date = django_filters.DateFilter(
field_name="sample__effdt0", help_text="format: yyyy-mm-dd"
)
set_date__gte = django_filters.DateFilter(
field_name="sample__effdt0", lookup_expr="gte", help_text="format: yyyy-mm-dd"
)
set_date__lte = django_filters.DateFilter(
field_name="sample__effdt0", lookup_expr="lte", help_text="format: yyyy-mm-dd"
)
lift_date = django_filters.DateFilter(
field_name="sample__effdt1", help_text="format: yyyy-mm-dd"
)
lift_date__gte = django_filters.DateFilter(
field_name="sample__effdt1", lookup_expr="gte", help_text="format: yyyy-mm-dd"
)
lift_date__lte = django_filters.DateFilter(
field_name="sample__effdt1", lookup_expr="lte", help_text="format: yyyy-mm-dd"
)
set_time = django_filters.TimeFilter(
field_name="sample__efftm0", help_text="format: HH:MM"
)
set_time__gte = django_filters.TimeFilter(
field_name="sample__efftm0", lookup_expr="gte", help_text="format: HH:MM"
)
set_time__lte = django_filters.TimeFilter(
field_name="sample__efftm0", lookup_expr="lte", help_text="format: HH:MM"
)
lift_time = django_filters.TimeFilter(
field_name="sample__efftm1", help_text="format: HH:MM"
)
lift_time__gte = django_filters.TimeFilter(
field_name="sample__efftm1", lookup_expr="gte", help_text="format: HH:MM"
)
lift_time__lte = django_filters.TimeFilter(
field_name="sample__efftm1", lookup_expr="lte", help_text="format: HH:MM"
)
class Meta:
model = FN122
fields = ["eff"]
| StarcoderdataPython |
9730613 | <reponame>underflow101/code-example<filename>ps/greedy/changes.py
# changes.py
# book p.90
import sys
input = sys.stdin.readline
n = 1260
cnt = 0
coin_list = [500, 100, 50, 10]
for coin in coin_list:
cnt += n // coin
n %= coin
print(cnt) | StarcoderdataPython |
4906136 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from bson import ObjectId
from django.views.decorators.csrf import csrf_exempt
import pymongo
from PenBlog.func import *
__author__ = 'quanix'
def show_all(request):
db = connect_mongodb_database(request)
info = db.infos.find_one()
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
for category in categories:
category['Id'] = str(category['_id'])
return render_admin_and_back(request, 'show-categories.html', {
'page':u'分类',
'categories':categories,
'selection':'categories',
})
@csrf_exempt
def new(request):
db = connect_mongodb_database(request)
# 普通访问
if request.method == 'GET':
return render_admin_and_back(request, 'edit-category.html', {
'page':u'新分类',
})
elif request.method == 'POST':
d = request.POST
order = int(d['category-order']) if d['category-order'] else 0
update = {
'Title':d['category-title'],
'Description':d['category-description'],
'Order': order,
}
# 插入新的Category
db.categories.insert(update)
# 对链接重新排序
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
for i in xrange(0, len(categories)):
if categories[i]['Order'] != i:
db.categories.update(categories[i], {"$set":{'Order': i}})
return redirect(request, '新建分类成功', 'admin/show-categories/')
def update_category_of_articles(coll, old_cat, new_cat):
"""
更新文章集合的分类
"""
old = old_cat['Title']
new = new_cat['Title']
if old != new:
for a in coll.find({'Categories': old}):
array = a['Categories']
array[array.index(old)] = new
coll.update({'Id': a['Id']}, {'$set': {'Categories': array}})
@csrf_exempt
def edit(request, objectId):
db = connect_mongodb_database(request)
id = ObjectId(objectId)
# 普通访问
if request.method == 'GET':
category = db.categories.find_one({'_id':id})
return render_admin_and_back(request, 'edit-category.html', {
'page':u'编辑分类',
'category': category,
})
elif request.method == 'POST':
d = request.POST
order = int(d['category-order']) if d['category-order'] else 0
update = {
'Title':d['category-title'],
'Description':d['category-description'],
'Order':order,
}
# 取得所有Category
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
# 创建或取得编辑中的Category
category = filter(lambda i: i['_id'] == id, categories)[0]
db.categories.update(category, {'$set': update})
categories.remove(category)
categories.insert(order, category)
# 对所有链接重新排序
for i in xrange(0, len(categories)):
if categories[i]['Order'] != i:
db.categories.update(categories[i], {"$set":{'Order': i}})
# 更新所有文章的分类
update_category_of_articles(db.articles, category, update)
update_category_of_articles(db.hidden_articles, category, update)
return redirect(request, '编辑分类成功', 'admin/show-categories/')
@csrf_exempt
def delete(request, objectId):
db = connect_mongodb_database(request)
id = ObjectId(objectId)
if request.method == 'GET':
db.categories.remove({'_id': id})
# 取得所有Category
categories = list(db.categories.find(sort=[('Order', pymongo.ASCENDING)]))
# 对所有链接重新排序
for i in xrange(0, len(categories)):
if categories[i]['Order'] != i:
db.categories.update(categories[i], {"$set":{'Order': i}})
return redirect(request, '删除分类成功', 'admin/show-categories/') | StarcoderdataPython |
9645357 | <gh_stars>0
import random
import time
from unittest import mock
import funcy
import pytest
from taskhawk.exceptions import ValidationError, TaskNotFound
from taskhawk.models import Message, Priority, Metadata
from .tasks import send_email
class TestMetadata:
def test_new(self, message_data):
metadata = Metadata(message_data)
assert metadata.timestamp == message_data['metadata']['timestamp']
assert metadata.headers == message_data['headers']
assert metadata.id == message_data['id']
assert metadata.version == message_data['metadata']['version']
assert metadata.priority == Priority[message_data['metadata']['priority']]
assert metadata.provider_metadata is None
def test_as_dict(self, message_data):
metadata = Metadata(message_data)
assert metadata.as_dict() == {
'priority': metadata.priority.name,
'timestamp': metadata.timestamp,
'version': metadata.version,
}
def test_equal(self, message_data):
assert Metadata(message_data) == Metadata(message_data)
def test_equal_fail(self, message_data):
metadata = Metadata(message_data)
message_data['id'] = "foobar"
metadata2 = Metadata(message_data)
assert metadata != metadata2
@mock.patch('taskhawk.models.get_consumer_backend', autospec=True)
def test_extend_visibility_timeout(self, mock_get_consumer_backend, message_data):
visibility_timeout_s = random.randint(0, 1000)
metadata = Metadata(message_data)
metadata.provider_metadata = object()
metadata.extend_visibility_timeout(visibility_timeout_s)
mock_get_consumer_backend.assert_called_once_with(priority=metadata.priority)
mock_get_consumer_backend.return_value.extend_visibility_timeout.assert_called_once_with(
visibility_timeout_s, metadata.provider_metadata
)
class TestMessageMethods:
publisher = 'myapi'
@mock.patch('taskhawk.models.time.time', autospec=True)
def test_create_metadata(self, mock_time):
mock_time.return_value = time.time()
assert Message._create_metadata(Priority.high) == {
'priority': Priority.high.name,
'timestamp': int(mock_time.return_value * 1000),
'version': Message.CURRENT_VERSION,
}
def test_new(self, message_data):
message = Message.new(
message_data['task'],
Priority[message_data['metadata']['priority']],
message_data['args'],
message_data['kwargs'],
message_data['id'],
message_data['headers'],
)
assert message.id == message_data['id']
assert message.headers == message_data['headers']
assert message.task_name == message_data['task']
assert message.args == message_data['args']
assert message.kwargs == message_data['kwargs']
assert message.priority == Priority[message_data['metadata']['priority']]
def test_as_dict(self, message):
assert message.as_dict() == {
'id': message.id,
'metadata': message.metadata.as_dict(),
'headers': message.headers,
'task': message.task_name,
'args': message.args,
'kwargs': message.kwargs,
}
def test_validate_looks_up_task(self, message):
message.validate()
assert message.task == send_email.task
def test_validate_str_timestamp(self, message_data):
message_data['metadata']['timestamp'] = '2015-11-11T21:29:54Z'
Message(message_data).validate()
def test_validate_bad_timestamp(self, message_data):
message_data['metadata']['timestamp'] = 'foobar'
with pytest.raises(ValidationError):
Message(message_data)
@pytest.mark.parametrize(
'missing_data',
['id', 'metadata', 'metadata__version', 'metadata__timestamp', 'headers', 'task', 'args', 'kwargs'],
)
@pytest.mark.parametrize('is_none', [True, False])
def test_validate_missing_data(self, missing_data, is_none, message_data):
if missing_data.startswith('metadata__'):
if is_none:
message_data['metadata'][missing_data.split('__')[1]] = None
else:
del message_data['metadata'][missing_data.split('__')[1]]
else:
if is_none:
message_data[missing_data] = None
else:
del message_data[missing_data]
with pytest.raises(ValidationError):
Message(message_data).validate()
def test_validate_invalid_version(self, message_data):
message_data['metadata']['version'] = '2.0'
with pytest.raises(ValidationError):
Message(message_data).validate()
@mock.patch('taskhawk.task_manager.Task.find_by_name', side_effect=TaskNotFound)
def test_validate_missing_task(self, _, message_data):
with pytest.raises(ValidationError):
Message(message_data).validate()
@mock.patch('tests.tasks._send_email', autospec=True)
def test_call_task(self, mock_send_email, message):
message.validate()
message.call_task()
mock_send_email.assert_called_once()
assert mock_send_email.call_args[0] == tuple(message.args)
assert funcy.project(mock_send_email.call_args[1], message.kwargs.keys()) == message.kwargs
assert mock_send_email.call_args[1]['headers'] == message.headers
assert mock_send_email.call_args[1]['metadata'] == Metadata(message.as_dict())
def test_equal(self, message):
assert message == message
def test_equal_fail(self, message):
assert message != message.as_dict()
def test_items(self, message):
assert dict(message.items()) == message.as_dict()
| StarcoderdataPython |
11384204 | <filename>kgame/actor.py
from kivy.uix.widget import Widget
from kivy.graphics.context_instructions import Color
class Actor(Widget):
def __init__(self,scene,**args):
super(Actor,self).__init__(**args)
self.scene= scene
self.child_map = {}
self.touched = False
self.init_properties(**args)
self.update_size_pos()
def init_properties(self,**args):
self.init_property('design_size',(0,0),**args)
self.init_property('design_pos',(0,0),**args)
self.init_property('design_touch',False,**args)
def init_property(self,name,default,**args):
if name in args:
setattr(self,name,args[name])
else:
setattr(self,name,default)
def update_size_pos(self):
if self.scene is not None:
self.scene.update_dimension(self)
def resize(self,size):
self.design_size = size
self.update_size_pos()
def move(self,pos):
self.design_pos = pos
self.update_size_pos()
def on_touch_down(self,touch):
result = super(Actor,self).on_touch_down(touch)
if self.handle_touch and not result and self.collide_point(touch.x,touch.y):
self.touched = True
return True
return False
def on_touch_move(self,touch):
super(Actor,self).on_touch_move(touch)
def on_touch_up(self,touch):
result = super(Actor,self).on_touch_up(touch)
if not result and self.touched and self.collide_point(touch.x,touch.y):
self.on_click()
self.touched = False
def on_click(self):
self.scene.trigger_event(('on_click',self.id))
def add_widget(self,widget):
super(Actor,self).add_widget(widget)
if widget.id is not None:
self.child_map[widget.id] = widget
| StarcoderdataPython |
97444 | import torch
import superp
import math
############################################
# set default data type to double
############################################
torch.set_default_dtype(torch.float64)
torch.set_default_tensor_type(torch.DoubleTensor)
# torch.set_default_dtype(torch.float32)
# torch.set_default_tensor_type(torch.FloatTensor)
############################################
# set the super-rectangle range
############################################
# set the initial in super-rectangle
INIT = [[-20 / 180 * math.pi, 20 / 180 * math.pi], \
[- 20 / 180 * math.pi, 20 / 180 * math.pi]
]
INIT_SHAPE = 1 # 2 for circle, 1 for rectangle
SUB_INIT = []
SUB_INIT_SHAPE = []
# the the unsafe in super-rectangle
UNSAFE = [[-90 / 180 * math.pi, 90 / 180 * math.pi], \
[-90 / 180 * math.pi, 90 / 180 * math.pi]
]
UNSAFE_SHAPE = 1 # 2 for circle, 1 for rectangle
SUB_UNSAFE = [ [[-90 / 180 * math.pi, -30 / 180 * math.pi], [-90 / 180 * math.pi, 90 / 180 * math.pi]], \
[[30 / 180 * math.pi, 90 / 180 * math.pi], [-90 / 180 * math.pi, 90 / 180 * math.pi]], \
[[-30 / 180 * math.pi, 30 / 180 * math.pi], [-90 / 180 * math.pi, -30 / 180 * math.pi]], \
[[-30 / 180 * math.pi, 30 / 180 * math.pi], [30 / 180 * math.pi, 90 / 180 * math.pi]]
]
SUB_UNSAFE_SHAPE = [1, 1, 1, 1]
# the the domain in super-rectangle
DOMAIN = [[-90 / 180 * math.pi, 90 / 180 * math.pi], \
[-90 / 180 * math.pi, 90 / 180 * math.pi]
]
DOMAIN_SHAPE = 1 # 1 for rectangle
ASYMP = [0.0, 0.0]
############################################
# set the range constraints
############################################
# accept a two-dimensional tensor and return a
# tensor of bool with the same number of columns
def cons_init(x):
return x[:, 0] == x[:, 0] # equivalent to True
def cons_unsafe(x):
unsafe1 = (x[:, 0] >= -90 / 180 * math.pi - superp.TOL_DATA_GEN) & (x[:, 0] <= -30 / 180 * math.pi + superp.TOL_DATA_GEN) & (x[:, 1] >= -90 / 180 * math.pi - superp.TOL_DATA_GEN) \
& (x[:, 1] <= 90 / 180 * math.pi + superp.TOL_DATA_GEN)
unsafe2 = (x[:, 0] >= 30 / 180 * math.pi - superp.TOL_DATA_GEN) & (x[:, 0] <= 90 / 180 * math.pi + superp.TOL_DATA_GEN) & (x[:, 1] >= -90 / 180 * math.pi - superp.TOL_DATA_GEN) \
& (x[:, 1] <= 90 / 180 * math.pi + superp.TOL_DATA_GEN)
unsafe3 = (x[:, 0] >= -30 / 180 * math.pi - superp.TOL_DATA_GEN) & (x[:, 0] <= 30 / 180 * math.pi + superp.TOL_DATA_GEN) & (x[:, 1] >= -90 / 180 * math.pi - superp.TOL_DATA_GEN) \
& (x[:, 1] <= -30 / 180 * math.pi + superp.TOL_DATA_GEN)
unsafe4 = (x[:, 0] >= -30 / 180 * math.pi - superp.TOL_DATA_GEN) & (x[:, 0] <= 30 / 180 * math.pi + superp.TOL_DATA_GEN) & (x[:, 1] >= 30 / 180 * math.pi - superp.TOL_DATA_GEN) \
& (x[:, 1] <= 90 / 180 * math.pi + superp.TOL_DATA_GEN)
return unsafe1 | unsafe2 | unsafe3 | unsafe4 # x[:, 0] stands for x1 and x[:, 1] stands for x2
def cons_domain(x):
return x[:, 0] == x[:, 0] # equivalent to True
def cons_asymp(x):
return torch.norm(x - torch.tensor(ASYMP), dim=1) >= superp.RADIUS_ASYMP
############################################
# set the vector field
############################################
# this function accepts a tensor input and returns the vector field of the same size
def vector_field(x, ctrl_nn):
# the vector of functions
def f(i, x):
if i == 1:
return x[:, 1] # x[:, 1] stands for x2
elif i == 2:
return 9.8 * (x[:, 0] - torch.pow(x[:, 0], 3) / 6.0) + (ctrl_nn(x))[:, 0]
else:
print("Vector function error!")
exit()
vf = torch.stack([f(i + 1, x) for i in range(superp.DIM_S)], dim=1)
return vf | StarcoderdataPython |
5037181 | import os # isort:skip
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.12.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(!4n0_-8h_p9z3hjlxw3(w-@zn&r^72gzy*y9&k3tuth7)^&=+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [] | StarcoderdataPython |
8197262 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from logs import LOGGER
from paddlespeech.cli import VectorExecutor
vector_executor = VectorExecutor()
def get_audio_embedding(path):
"""
Use vpr_inference to generate embedding of audio
"""
try:
embedding = vector_executor(audio_file=path)
embedding = embedding / np.linalg.norm(embedding)
embedding = embedding.tolist()
return embedding
except Exception as e:
LOGGER.error(f"Error with embedding:{e}")
return None
| StarcoderdataPython |
4802920 | import os
from transform import app
if __name__ == '__main__':
port = int(os.getenv("PORT"))
app.run(debug=True, host='0.0.0.0', port=port)
| StarcoderdataPython |
3575924 | <filename>robonomicsinterface/classes/account.py
import typing as tp
from dataclasses import dataclass
from logging import getLogger
from substrateinterface import Keypair
from ..constants import REMOTE_WS, TYPE_REGISTRY
from ..exceptions import NoPrivateKeyException
from ..types import TypeRegistryTyping
from ..utils import create_keypair
logger = getLogger(__name__)
@dataclass
class Account:
"""
Dataclass to hold account info and node connection parameters
"""
def __init__(
self,
seed: tp.Optional[str] = None,
remote_ws: tp.Optional[str] = None,
type_registry: tp.Optional[TypeRegistryTyping] = None,
) -> None:
"""
Save node connection parameters and create a keypair to sign transactions and define address if seed was passed
as a parameter.
:param seed: Account seed (mnemonic or raw) as a key to sign transactions.
:param remote_ws: Node url. Default node address is "wss://kusama.rpc.robonomics.network". Another address may
be specified (e.g. "ws://127.0.0.1:9944" for local node).
:param type_registry: Types used in the chain. Defaults are the most frequently used in Robonomics.
"""
self.remote_ws: tp.Optional[str] = remote_ws or REMOTE_WS
self.type_registry: tp.Optional[TypeRegistryTyping] = type_registry or TYPE_REGISTRY
if seed:
self.keypair: tp.Optional[Keypair] = create_keypair(seed)
self._address: str = self.keypair.ss58_address
else:
self.keypair: tp.Optional[Keypair] = None
def get_address(self) -> str:
"""
Determine account address if seed was passed when creating an instance
:return: Account ss58 address
"""
if not self.keypair:
raise NoPrivateKeyException("No private key was provided, unable to determine account address")
return str(self.keypair.ss58_address)
| StarcoderdataPython |
5084812 | # This program knows about the frequencies of various FM radio stations in
# London.
#
# Usage:
#
# $ python radio_freq.py [station_name]
#
# For instance:
#
# $ python radio_freq.py "Radio 4"
# You can listen to Radio 4 on 92.5 FM
#
# or:
#
# $ python radio_freq.py "BBC Radio 5"
# I don't know the frequency of BBC Radio 5
fm_frequencies = {
'89.1 MHz': 'BBC Radio 2',
'91.3 MHz': 'BBC Radio 3',
'93.5 MHz': 'BBC Radio 4',
'94.9 MHz': 'BBC London',
'95.8 MHz': 'Capital FM',
'97.3 MHz': 'LBC',
'98.8 MHz': 'BBC Radio 1',
'100.0 MHz': 'Kiss FM',
'100.9 MHz': 'Classic FM',
'105.4 MHz': 'Magic',
'105.8 MHz': 'Virgin',
'106.2 MHz': 'Heart 106.2',
}
print('I know about {} FM radio stations'.format(len(fm_frequencies)))
# TODO:
# * Implement the program as described in the comments at the top of the file.
# TODO (extra):
# * Change the program so that if the radio station is not found, the user is
# given a list of all stations that the program does know about.
# * Change the program so that if it is called without arguments, a table of
# all radio stations and their frequencies is displayed.
| StarcoderdataPython |
6492134 | <reponame>Mimino666/python-xextract<gh_stars>10-100
from lxml import etree
import six
from .extractor_list import XPathExtractorList
class XPathExtractor(object):
_parser = etree.HTMLParser
_tostring_method = 'html'
def __init__(self, body=None, namespaces=None, _root=None):
self.namespaces = namespaces
if _root is None:
self._root = self._get_root(body)
else:
self._root = _root
def _get_root(self, body, encoding=None):
body = body.strip() or self._empty_doc
if isinstance(body, six.text_type):
body = body.encode('utf-8')
encoding = 'utf-8'
parser = self._parser(recover=True, encoding=encoding)
return etree.fromstring(body, parser=parser)
def select(self, xpath):
if not hasattr(self._root, 'xpath'):
return XPathExtractorList([])
if isinstance(xpath, etree.XPath):
result = xpath(self._root)
else:
result = self._root.xpath(xpath, namespaces=self.namespaces)
if not isinstance(result, list):
result = [result]
return XPathExtractorList(self.__class__(_root=x, namespaces=self.namespaces) for x in result)
def extract(self):
try:
return etree.tostring(self._root, method=self._tostring_method,
encoding=six.text_type, with_tail=False)
except (AttributeError, TypeError):
if self._root is True:
return u'1'
elif self._root is False:
return u'0'
else:
return six.text_type(self._root)
def register_namespace(self, prefix, uri):
if self.namespaces is None:
self.namespaces = {}
self.namespaces[prefix] = uri
def __nonzero__(self):
return bool(self.extract())
def __str__(self):
data = repr(self.extract()[:40])
return '<%s data=%s>' % (type(self).__name__, data)
__repr__ = __str__
class XmlXPathExtractor(XPathExtractor):
_parser = etree.XMLParser
_tostring_method = 'xml'
_empty_doc = '<?xml version="1.0" encoding="UTF-8"?>'
class HtmlXPathExtractor(XPathExtractor):
_parser = etree.HTMLParser
_tostring_method = 'html'
_empty_doc = '<html/>'
| StarcoderdataPython |
6565539 | #! /usr/bin/env python
import datetime
from peewee import *
from flask.ext.security import Security, PeeweeUserDatastore, UserMixin, RoleMixin, login_required
from app import app
class UnknownField(object):
pass
class BaseModel(Model):
class Meta:
database = SqliteDatabase(app.config['DATABASE']['name'])
class Photo(BaseModel):
datetaken = DateTimeField(null=True)
filetype = TextField(null=False)
privacy = IntegerField(null=True)
sha1 = TextField(null=False,unique=True)
ts = DateTimeField(default=datetime.datetime.now)
class Comment(BaseModel):
comment = TextField(null=False)
photo = IntegerField(null=False)
ts = DateTimeField(default=datetime.datetime.now)
class Gallery(BaseModel):
description = TextField(null=True)
title = TextField(null=False,unique=True)
ts = DateTimeField(default=datetime.datetime.now)
class Photoset(BaseModel):
description = TextField(null=True)
title = TextField(null=False,unique=True)
ts = DateTimeField(default=datetime.datetime.now)
class Tag(BaseModel):
name = TextField(null=False,unique=True)
ts = DateTimeField(default=datetime.datetime.now)
class PhotoPhotoset(BaseModel):
photo = ForeignKeyField(Photo,null=False)
photoset = ForeignKeyField(Photoset,null=False)
ts = DateTimeField(default=datetime.datetime.now)
class PhotosetGallery(BaseModel):
gallery = ForeignKeyField(Gallery,null=False)
photoset = ForeignKeyField(Photoset,null=False)
ts = DateTimeField(default=datetime.datetime.now)
class PhotoTag(BaseModel):
photo = ForeignKeyField(Photo,null=False)
tag = ForeignKeyField(Tag,null=False)
ts = DateTimeField(default=datetime.datetime.now)
class ImportMeta(BaseModel):
sha1 = TextField(null=False,unique=True)
photo = IntegerField(null=False)
importpath = TextField(null=True)
importsource = TextField(null=True)
filedate = DateTimeField(null=True)
s3 = IntegerField(null=True)
ts = DateTimeField(default=datetime.datetime.now)
class Role(BaseModel, RoleMixin):
name = CharField(unique=True)
description = TextField(null=True)
class User(BaseModel, UserMixin):
email = TextField(unique=True)
password = TextField(null=False)
active = BooleanField(default=True)
confirmed_at = DateTimeField(null=True)
ts = DateTimeField(default=datetime.datetime.now)
class UserRoles(BaseModel):
user = ForeignKeyField(User, related_name='role')
role = ForeignKeyField(Role, related_name='user')
name = property(lambda self: self.role.name)
description = property(lambda self: self.role.description)
| StarcoderdataPython |
3538388 | #!/usr/bin/env python3
"""
Read BAM files to extract all splice junctions
Takes manifest of BAM false
Outputs bed files with
"""
import os
import pysam
from multiprocessing import Pool
import numpy as np
class BamToJuncBed:
def __init__(self,args):
self.args = args
self.manifest_filename = args.manifest
self.output_prefix = args.output_prefix
self.n_threads = args.number_threads
self.bed_directory = os.path.join(os.getcwd(),f"{self.output_prefix}_junction_beds")
try:
os.mkdir(self.bed_directory)
except FileExistsError:
pass
bams, new_manifest = self.parseManifest()
self.annotated = self.getAnnotated()
print(f"Finding junctions from {len(bams)} BAM files...")
self.bamsToBeds(bams)
self.writeNewManifest(new_manifest)
def parseManifest(self):
"""
"""
bams = []
new_manifest = []
with open(self.manifest_filename, "r") as manifestFile:
for line in manifestFile:
samplename, filename, metadata, condition = line.rstrip().split("\t")
if filename.endswith(".bam") or filename.endswith(".sam"):
bedfilename = os.path.join(self.bed_directory,f"{os.path.basename(filename)[:-4]}.junc.bed")
bams.append([samplename,filename,metadata,condition,bedfilename])
new_manifest.append(f"{samplename}\t{bedfilename}\t{metadata}\t{condition}")
else:
new_manifest.append(line)
return bams, new_manifest
def getAnnotated(self):
genes = {}
names = {}
transcripts = {}
with open(self.args.annotation) as gtf:
for line in gtf:
if line.startswith("#"):
continue
row = line.rstrip().split('\t')
if row[2] == "transcript":
info = [x.split('"') for x in row[8].split(';')]
tid = [x[1] for x in info if 'transcript_id' in x[0]][0]
gid = [x[1] for x in info if 'gene_name' in x[0]][0]
genes[tid] = gid
transcripts[(tid,row[0],row[6])] = []
if row[2] == "exon":
info = [x.split('"') for x in row[8].split(';')]
tid = [x[1] for x in info if 'transcript_id' in x[0]][0]
transcripts[(tid,row[0],row[6])].append((int(row[3]),int(row[4])))
annotated = {}
for transcript,exons in transcripts.items():
tid,chromosome,strand = transcript
for i in range(len(exons)-1):
annotated[(chromosome,exons[i][1],exons[i+1][0]-1,strand)] = genes[tid]
return annotated
def getJunctionsFromBam(self,sample):
"""
"""
min_length = self.args.min_length
max_length = self.args.max_length
min_reads = self.args.min_reads
fasta = self.args.genome
samplename, filename, metadata, condition, bedfilename = sample
#genome = pysam.AlignmentFile(filename)
#############
save = pysam.set_verbosity(0)
try:
genome = pysam.AlignmentFile(filename)
except ValueError:
print("Using: pysam.AlignmentFile(filename,check_sq=False) with",filename)
genome = pysam.AlignmentFile(filename,check_sq=False)
pysam.set_verbosity(save)
##############
counts = {}
leftDiversity = {}
rightDiversity = {}
overhangs = {}
for read in genome.fetch(until_eof=True):
if True: #read.is_read2:
if read.is_reverse:
strand = "-"
else:
strand = "+"
else:
if read.is_reverse:
strand = "+"
else:
strand = "-"
blocks = read.get_blocks()
try:
read_start = blocks[0][0]
except IndexError:
continue
read_end = blocks[-1][1]
for i in range(len(blocks)-1):
junction = (read.reference_name,blocks[i][1],blocks[i+1][0],strand)
length = junction[2] - junction[1]
if length >= min_length and length <= max_length:
leftOH = blocks[i][1]-blocks[i][0]
rightOH = blocks[i+1][1]-blocks[i+1][0]
overhang = min(leftOH,rightOH)
try:
counts[junction] += 1
overhangs[junction] = max(overhang,overhangs[junction])
try:
leftDiversity[junction][read_start] += 1
rightDiversity[junction][read_end] += 1
except KeyError:
leftDiversity[junction][read_start] = 1
rightDiversity[junction][read_end] = 1
except KeyError:
counts[junction] = 1
overhangs[junction] = overhang
leftDiversity[junction] = {read_start:1}
rightDiversity[junction] = {read_end:1}
filteredJunctions = []
leftEntropy = {}
rightEntropy = {}
if genome:
leftMotif = {}
rightMotif = {}
genome = pysam.FastaFile(fasta)
for junction in sorted(counts):
chromosome,left,right,strand = junction
if genome:
if left not in leftMotif:
try:
leftMotif[left] = genome.fetch(chromosome,left,left+2)
except KeyError:
leftMotif[left] = "NN"
if right not in rightMotif:
try:
rightMotif[right] = genome.fetch(chromosome,right-2,right)
except KeyError:
rightMotif[right] = "NN"
leftEntropy[junction] = 0
total = sum(leftDiversity[junction].values())
for species,count in leftDiversity[junction].items():
prop = count/total
leftEntropy[junction] -= (prop) * np.log(prop)
rightEntropy[junction] = 0
total = sum(rightDiversity[junction].values())
for species,count in rightDiversity[junction].items():
prop = count/total
rightEntropy[junction] -= (prop) * np.log(prop)
filteredJunctions.append(junction)
#
if self.args.strands in ("inferCombine", "inferOnly"):
opposite = {"+":"-", "-":"+"}
plus_motifs = {"GT_AG","GC_AG","AT_AC"}
minus_motifs = {"CT_AC","CT_GC","GT_AT"}
firstFiltered = filteredJunctions
filteredJunctions = []
motif = f"{leftMotif[left]}_{rightMotif[right]}"
for junction in firstFiltered:
chromosome,left,right,strand = junction
complement = (chromosome,left,right,opposite[strand])
if complement not in counts:
filteredJunctions.append(junction)
elif (junction in self.annotated or
(strand == "+" and motif in plus_motifs) or
(strand == "-" and motif in minus_motifs)):
filteredJunctions.append(junction)
if self.args.strands == "inferCombine":
counts[junction] += counts[complement]
elif (complement in self.annotated or
(strand == "-" and motif in plus_motifs) or
(strand == "+" and motif in minus_motifs)):
pass
else:
filteredJunctions.append(junction)
with open(bedfilename,"w") as bedOut:
for junction in filteredJunctions:
chromosome,left,right,strand = junction
name = f"e:{leftEntropy[junction]:0.02f}:{rightEntropy[junction]:0.02f};o:{overhangs[junction]};m:{leftMotif[left]}_{rightMotif[right]};a:{self.annotated.get(junction,'?')}"
bedOut.write(f"{chromosome}\t{left}\t{right}\t{name}\t{counts[junction]}\t{strand}\n")
return filename, bedfilename, len(filteredJunctions)
def bamsToBeds(self,bams):
"""
"""
with Pool(self.n_threads) as pool:
for info in pool.imap(self.getJunctionsFromBam,bams):
filename,bedfilename,num_junctions = info
print("bam:", filename)
print("number of junctions found:",num_junctions)
print("saved to bed:", bedfilename)
def writeNewManifest(self,new_manifest):
"""
"""
#newManifestPath = os.path.join(path,f"{outputPrefix}_manifest.txt")
new_manifest_path = f"{self.output_prefix}_manifest.txt"
with open(new_manifest_path,"w") as manifest_file:
for line in new_manifest:
manifest_file.write(line+"\n")
print("new manifest written to:", new_manifest_path)
def add_parser(parser):
""" """
parser.add_argument("--manifest","-m",
help="Tab-separated file of samples and bam file paths.")
parser.add_argument("--output_prefix","-o",
help="Prefix for junction file directory and manifest.")
parser.add_argument("--genome","-g",
help="Optional. Genome fasta file to report splice site motifs")
parser.add_argument("--annotation","-a",
help="Optional. Gene annotation gtf file to report annotation status")
parser.add_argument("--max_length",type=int,default=100000,
help="Maximum distance between ends of junction [Default 100000]")
parser.add_argument("--min_length",type=int,default=50,
help="Minimum distance between ends of junction [Default 50]")
parser.add_argument("--min_reads",type=int,default=5,
help="Minimum number of reads required to report junction [Default 5]")
#parser.add_argument("--min_overhang",type=int,default=5,
# help="Minimum read overhang before or after junction [Default 5]")
parser.add_argument("--no_multimap",action="store_true",
help="STILL IN PROGRESS")
parser.add_argument("--filter",choices=["gtag_only","all"],default="gtag_only",
help="STILL IN PROGRESS" )
parser.add_argument("--number_threads","-n",type=int,default=1,
help="Number of bam files to search concurrently [Default 1]")
#parser.add_argument("--min_entropy",default=1,type=float,
# help="Minimum Shannon entropy required to report junction")
parser.add_argument("--strands","-s",default="keepBoth",
choices=["keepBoth","inferOnly","inferCombine"],
help="How to handle junctions with same coordinates on opposite strands. [Default 'keepBoth'. Options 'inferOnly','inferCombine']")
def run_with(args):
"""
"""
BamToJuncBed(args)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
add_parser(parser)
args = parser.parse_args()
run_with(args)
| StarcoderdataPython |
6496664 | <gh_stars>0
from Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.instruccion import *
from Compi2RepoAux.team21.Analisis_Ascendente.storageManager.jsonMode import *
import Compi2RepoAux.team21.Analisis_Ascendente.Tabla_simbolos.TablaSimbolos as TS
import Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.Select as Select
from Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.Time import Time
from Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.expresion import *
from Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.Expresiones.Trigonometrica import Trigonometrica
from Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.Expresiones.IdAsId import IdAsId,Id,IdId
from Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.Expresiones.Math import Math_
from prettytable import PrettyTable
from Compi2RepoAux.team21.Analisis_Ascendente.storageManager.jsonMode import *
from Compi2RepoAux.team21.Analisis_Ascendente.Instrucciones.Expresiones.Where import Where
class Selectp4(Instruccion):
def ejecutar(Select,ts,Consola, Exceptions):
insert('prueba1', 'tabla1', [1, 'Fredy', 'Ramirez'])
insert('prueba1', 'tabla1', [2, 'Mauro', 'Martinez'])
insert('prueba1', 'tabla1', [3, 'Javier', 'Lima'])
insert('prueba1', 'tabla1', [4, 'Yisus', 'Yisusx2'])
insert('prueba1', 'tabla1', [5, 'Jacks', 'Wade'])
insert('prueba1', 'tabla2', [1, 'Mario', 'Guatemala', 'Contabilidad'])
insert('prueba1', 'tabla2', [2, 'Eli', 'Dubai', 'Progra'])
insert('prueba1', 'tabla2', [3, 'Sis', 'Brasil', 'Master Chief'])
insert('prueba1', 'tabla2', [4, 'Fredy', 'Noruega', 'God of War'])
insert('prueba1', 'tabla2', [5, 'Luigi', 'Italia', '<NAME>'])
insert('prueba1', 'tabla3', [1, 'Sic', 'USA', '4av. km 19.3'])
insert('prueba1', 'tabla3', [2, 'Pepe', 'Irak', 'en amatitlan ese'])
insert('prueba1', 'tabla3', [3, 'Etesech', 'China', 'perdido'])
insert('prueba1', 'tabla3', [4, 'ufuefue', 'Japon', 'Selva'])
insert('prueba1', 'tabla3', [5, 'osas', 'Venezuela', 'Jungla'])
tablasRef = {}
cont=0
x = PrettyTable()
x.clear()
en=[]
DataSelect=[]
columnasT = len(Select.columnas)
Error = False;
resultado = FROM(Select, tablasRef, Exceptions) #leemos las tablas
for i in tablasRef.items():
print(i)
for columna in Select.columnas:
print('what -- ' + str(len(Select.columnas)))
Permutar = False
if (ts.validar_sim("usedatabase1234") and cont<=columnasT):
cont=cont+1
simboloBD = ts.buscar_sim(ts.buscar_sim("usedatabase1234").valor)
print(simboloBD.id)
entornoBD = simboloBD.Entorno
listado_tablas = entornoBD.simbolos
listadoCampo = {}
if resultado[0]: #si vienen valores de ids de tablas validos
for k in tablasRef.keys():
if not existeTabla(listado_tablas, tablasRef.get(k)):
Exceptions.append('no existe tabla' + tablasRef.get(k))
Error = True
break;
if not Error:
if isinstance(columna, str): # todos los campitos es decir *
cont22 = 0
NombreAnterior=''
for k in tablasRef.keys():
if cont22 >0:
Permutar= True
DataAux=[]
DataAux= DataSelect.copy()
DataSelect=[]
DataJson = extractTable(simboloBD.id, tablasRef.get(k))
DataSelect = ActualizarTabla(x,DataJson,Contt[0],Contt[2],DataAux,None)
print("sad-->"+str((DataSelect)))
en = []
Contt = []
Contt=encabezados(tablasRef.get(k),tablasRef,listado_tablas,en)
#listadoCampo[tablasRef.get(k)] = Cont
DataJson= extractTable(simboloBD.id,tablasRef.get(k))
NombreAnterior= tablasRef.get(k)
DataSelect = agregarData(x,DataJson,Contt[0],Contt[2], DataSelect,None,Permutar,tablasRef.get(k))
cont22=cont22 + 1
print('agregar---'+k)
elif isinstance(columna, Id):
nombreCampo = columna.id
print('campo'+nombreCampo)
Frecuencia = CamposRepetidos(tablasRef,Exceptions,listado_tablas,nombreCampo)
print('Frecuencia <<<->>>'+str(Frecuencia))
if (Frecuencia == 1):
referencia = BuscarCampoTablas(tablasRef, Exceptions, listado_tablas, nombreCampo)
DataSelectAux=[]
if(referencia[0]):#quiere decir que existe en una de las tablas del from
DataJson= extractTable(simboloBD.id,referencia[1])
columna=[]
for column in DataJson:
columna.append(column[referencia[2]])
#DataSelectAux.append([nombreCampo,columna,referencia[1]])
DataSelectAux.append(nombreCampo)
DataSelectAux.append(columna)
DataSelectAux.append((referencia[1]))
DataSelect = PermutarData(DataSelect,DataSelectAux);
#x.add_column(nombreCampo,columna)
elif Frecuencia == 0 :
Exceptions.append('No existe campo en tablas de referencia')
print('No existe campo en tablas de referencia')
Error = True
break;
else:
Exceptions.append('Existe ambigüedad en campos de tablas de referencia campo'+ nombreCampo)
print('Existe ambigüedad en campos de tablas de referencia campo'+ nombreCampo)
Error = True
break;
elif isinstance(columna, IdId): #cuando viene con puntito id.id la columna
nombreCampo = columna.id2
print( 'campito bebé-->'+str(nombreCampo))
if isinstance(columna.id1,Id):
nombreTabla = columna.id1.id #alias de la tabla
if isinstance(columna.id2,Id) :
nombreCampo = columna.id2.id
Tabla = existeAliasTabla(tablasRef,nombreTabla) #nombre de tabla con ese alias
if Tabla[0]: # existe un alias para una tabla en ese select
if nombreCampo == '*':
en = []
ContT = []
ContT = encabezados(str(Tabla[1]), tablasRef, listado_tablas, en)
DataJson = extractTable(simboloBD.id, str(Tabla[1]))
if len(DataSelect) > 0:
Permutar = True
DataAux = []
DataAux = DataSelect.copy()
DataSelect = []
DataJson = extractTable(simboloBD.id, str(Tabla[1]))
DataSelect = ActualizarTabla(x, DataJson, ContT[0], ContT[2], DataAux,
None)
print("sad-->" + str((DataSelect)))
DataSelect = agregarData(x, DataJson, ContT[0], ContT[2], DataSelect, nombreTabla,
Permutar, str(Tabla[1]))
else:
referencia = existeCampo(nombreCampo,(listado_tablas.get(Tabla[1]).Entorno).simbolos)
DataSelectAux = []
if (referencia[0]): # quiere decir que existe en una de las tablas del from
DataJson = extractTable(simboloBD.id, Tabla[1])
columna = []
for column in DataJson:
columna.append(column[referencia[1]])
#DataSelect.append([str(nombreTabla)+'.'+str(nombreCampo), columna])
DataSelectAux.append(str(nombreTabla)+'.'+str(nombreCampo))
DataSelectAux.append(columna)
DataSelectAux.append((Tabla[1]))
DataSelect = PermutarData(DataSelect, DataSelectAux);
#x.add_column(str(nombreTabla)+'.'+str(nombreCampo), columna)
else:
Exceptions.append('no existe campo en tabla' + Tabla[1])
print('no existe campo en tabla' + Tabla[1])
Error = True
break;
else:
Exceptions.append('no existe tabla con ese alias' + nombreTabla)
print('no existe tabla con ese alias' + nombreTabla)
Error = True
break;
else:
Exceptions.append('tipo invalido en campo')
Error = True
break;
else:
Error = True
break
else:
print("no se seleciona una bd")
if (not Error):
#x.clear()
DataSelect = EvaluarWhere(x,Select,DataSelect,Exceptions,Consola)
print('lo que llego')
print(DataSelect)
for i in range(len(DataSelect[0])):
columnas=[]
for fila in DataSelect[1]:
columnas.append(fila[i])
x.add_column(str(DataSelect[0][i]),columnas)
Consola.append('\n' + x.get_string() + '\n')
#cont=cont+1
def existeTabla(listadoTablas,tablaBuscar):
for tablita in listadoTablas:
if listadoTablas.get(tablita).id == tablaBuscar:
return True
return False
def existeAliasTabla(tablasRef, Alias):
for k in tablasRef.keys():
if k == Alias:
return [True, tablasRef.get(k)]
return [False,False]
def encabezados(tablaR,tablasRef,listado_tablas,en):
contadorCampos = 0
campos = []
for elemento in listado_tablas: #tablas en ts
if listado_tablas.get(elemento).id == tablaR:
entornoTabla = listado_tablas.get(elemento).Entorno
lista_campos = entornoTabla.simbolos
for campito in lista_campos:
nombreCampo = str(lista_campos.get(campito).id)
enc = ''
if nombreCampo in en:
for k in tablasRef.keys():
if tablasRef.get(k) == tablaR:
enc = k
break
en.append(enc + '.' + nombreCampo)
campos.append(nombreCampo)
else:
en.append(nombreCampo)
campos.append(nombreCampo)
contadorCampos = contadorCampos + 1
return [contadorCampos,campos,en]
return [0,campos,en]
def ActualizarTabla(x,DataJson,rango,en,DataSelect3, alias):
filas = len(DataJson)
print('filas nueva tabla'+str(filas))
DataSelectAux =[]
row = []
for columna in DataSelect3: #3
row = []
for fila in columna[1]: #2
for i in range(filas):
row.append(str(fila))
DataSelectAux.append([str(columna[0]),row,columna[2]])
return DataSelectAux
def agregarData(x,DataJson,rango,en,DataSelect3,alias,Permutar,tabla):
if Permutar:
for i in range(rango):
columna = []
print(len(DataSelect3[0][1]))
for j in range(int(len(DataSelect3[0][1])/len(DataJson))):
for row in DataJson:
columna.append(row[i])
if alias != None:
DataSelect3.append([alias+'.'+en[i], columna,tabla])
else:
DataSelect3.append([en[i], columna, tabla])
return DataSelect3
else:
for i in range(rango):
columna = []
for column in DataJson:
columna.append(column[i])
if alias != None:
DataSelect3.append([alias + '.' + en[i], columna, tabla])
else:
DataSelect3.append([en[i], columna, tabla])
#x.add_column(en[i], columna)
return DataSelect3
#x.add_column(en[i], columna)
def FROM(Select,tablasRef,Exceptions):
if Select.subquery != None: # resolver subquery primero que de aca saldrían los datos
print('what -- ' + type(Select.subquery).__name__)
else: # las tablas vendrían en inner
for tablas in Select.inner: # nada mas obteniendo los id de las tablas solicitadas
# print('tablas55555 -- ' + tablas.id1.id)
if isinstance(tablas, Id):
tablasRef[str(tablas.id)] = tablas.id
elif isinstance(tablas, IdAsId):
if isinstance(tablas.id1, Id) and isinstance(tablas.id2, Id):
if str(tablas.id2.id) in tablasRef.keys():
Exceptions.append('alias repetidos')
print('alias repetidos-->'+ tablas.id2.id)
return [False,tablasRef]
tablasRef[str(tablas.id2.id)] = str(tablas.id1.id)
else:
Exceptions.append()
return [False,tablasRef]
return [True,tablasRef,Exceptions]
def BuscarCampoTablas(tablasRef,Exceptions,listado_tablas,campobuscar):
for ref in tablasRef.keys():
entornoTabla = listado_tablas.get(tablasRef.get(ref)).Entorno
lista_campos = entornoTabla.simbolos
contadorPosicion= 0
for campo in lista_campos:
if campobuscar == lista_campos.get(campo).id:
return [True,tablasRef.get(ref),contadorPosicion]
contadorPosicion = contadorPosicion + 1
return [False,False,False]
def CamposRepetidos(tablasRef,Exceptions,listado_tablas,campobuscar):
contador = 0
for ref in tablasRef.keys():
entornoTabla = listado_tablas.get(tablasRef.get(ref)).Entorno
lista_campos = entornoTabla.simbolos
for campo in lista_campos:
if campobuscar == lista_campos.get(campo).id:
contador = contador + 1
return contador
def existeCampo(nombreCampo,lista):
contador = 0
for campo in lista:
if nombreCampo == lista.get(campo).id:
return [True,contador]
contador += 1
return [False,contador]
def EvaluarWhere(x,Select,DataSelect,Exceptions,Consola):
filas = Where.Resolver(Select.complementS,Exceptions,Consola,DataSelect)
l=[]
'''for c in DataSelect:
l.append(str(c[0]))
x.field_names= l'''
DataSelectAux = []
row = []
if filas[0]:
registros=[]
columnas=[]
for i in filas[1]:
registros=[]
columnas=[]
for column in DataSelect:
registros.append(column[1][i])
columnas.append(column[0])
row.append(registros)
#x.add_row(registros)
print('rows')
return [columnas,row]
else:
print(filas[1])
def PermutarData(DataSelect3, DataSelectAux):
if len(DataSelect3)>0: # hay más entonces
# buscar si hay una columna de la misma tabla
veces =-1
columnaAux = []
contcolumna=0
for columna in DataSelect3:
if columna[2] == DataSelectAux[2]:
veces = int(len(columna[1])/len(DataSelectAux[1]))
break;
contcolumna = contcolumna + 1
print('veces' + str(veces))
if veces>0:
if DataSelect3[contcolumna][1][0] == DataSelect3[contcolumna][1][1]:
for row in DataSelectAux[1]:
for i in range(veces):
columnaAux.append(row)
DataSelect3.append([DataSelectAux[0],columnaAux,DataSelectAux[2]])
else:
for i in range(veces):
for row in DataSelectAux[1]:
columnaAux.append(row)
DataSelect3.append([DataSelectAux[0], columnaAux, DataSelectAux[2]])
return DataSelect3
else:
DataSelect3 = ActualizarTabla(None,DataSelectAux[1],None,None,DataSelect3,None) # permutado la data existente hasta el momento
columna = []
print(len(DataSelect3[0][1]))
for j in range(int(len(DataSelect3[0][1]) / len(DataSelectAux[1]))):
for row in DataSelectAux[1]:
columna.append(row)
DataSelect3.append([DataSelectAux[0], columna, DataSelectAux[2]])
return DataSelect3
else:
print('lo que llega')
print(DataSelectAux)
DataSelect3.append([DataSelectAux[0],DataSelectAux[1],DataSelectAux[2]])
return DataSelect3
| StarcoderdataPython |
6442704 | # Skip if long ints are not supported.
import skip_if
skip_if.no_bigint()
print((2**64).to_bytes(9, "little"))
print((-2**64).to_bytes(9, "little", signed=True))
print(int.from_bytes(b"\x00\x01\0\0\0\0\0\0", "little"))
print(int.from_bytes(b"\x01\0\0\0\0\0\0\0", "little"))
print(int.from_bytes(b"\x00\x01\0\0\0\0\0\0", "little"))
| StarcoderdataPython |
3523570 | <reponame>HackSoftware/HackGraderTests<filename>grader_e2e/tests/test_grader_errors.py
import json
from datetime import datetime
from unittest import TestCase
from settings.base import BASE_DIR, THRESHOLD
from ..helper_tests import prepare_and_get, prepare_and_post, poll
from ..helpers import read_binary_file, elapsed_time
class HackTesterErrorTests(TestCase):
def setUp(self):
self.start = datetime.now()
def test_posting_python_solution_with_flake8_error_and_lint_true_is_invalid(self):
data = {
"test_type": "unittest",
"language": "python",
"solution": read_binary_file(BASE_DIR + 'fixtures/binary/solution_flake8_error.py'),
"test": read_binary_file(BASE_DIR + 'fixtures/binary/tests.py'),
"extra_options": {
"lint": True
}
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('lint_error', response_text['output']['test_status'])
def test_posting_ruby_solution_with_rubocop_error_with_lint_true_is_invalid(self):
data = {
"test_type": "unittest",
"language": "ruby",
"solution": read_binary_file(BASE_DIR + 'fixtures/binary/solution_rubocop_error.rb'),
"test": read_binary_file(BASE_DIR + 'fixtures/binary/tests.rb'),
"extra_options": {
"lint": True
}
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('lint_error', response_text['output']['test_status'])
def test_fork_bomb(self):
data = {
"test_type": "unittest",
"language": "python",
"solution": read_binary_file(BASE_DIR + 'fixtures/fork_bomb.py'),
"test": read_binary_file(BASE_DIR + 'fixtures/binary/tests.py'),
"extra_options": {
"lint": True
}
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('test_run_error', response_text['output']['test_status'])
def test_memory_limit(self):
data = {
"test_type": "unittest",
"language": "python",
"solution": read_binary_file(BASE_DIR + 'fixtures/memory_limit.py'),
"test": read_binary_file(BASE_DIR + 'fixtures/memory_limit_tests.py'),
"extra_options": {
"lint": True
}
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('test_run_error', response_text['output']['test_status'])
def test_js_infinite_loop_solution_with_valid_tests(self):
data = {
"test_type": "unittest",
"language": "javascript/nodejs",
"solution": read_binary_file(BASE_DIR + 'fixtures/binary/infiniteLoop_solution.js'),
"test": read_binary_file(BASE_DIR + 'fixtures/binary/infiniteLoop_tests.js'),
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('time_limit_reached', response_text['output']['test_status'])
def test_python_infinite_loop_solution_with_valid_tests(self):
data = {
"test_type": "unittest",
"language": "python",
"solution": read_binary_file(BASE_DIR + 'fixtures/binary/while_true_solution.py'),
"test": read_binary_file(BASE_DIR + 'fixtures/binary/while_true_tests.py'),
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('time_limit_reached', response_text['output']['test_status'])
def test_ruby_infinite_loop_solution_with_valid_tests(self):
data = {
"test_type": "unittest",
"language": "ruby",
"solution": read_binary_file(BASE_DIR + 'fixtures/binary/exec_loop_solution.rb'),
"test": read_binary_file(BASE_DIR + 'fixtures/binary/exec_loop_tests.rb'),
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('time_limit_reached', response_text['output']['test_status'])
def test_time_limit_when_it_is_too_small(self):
data = {
"test_type": "unittest",
"language": "python",
"solution": read_binary_file(BASE_DIR + 'fixtures/binary/solution.py'),
"test": read_binary_file(BASE_DIR + 'fixtures/binary/tests.py'),
"extra_options": {
"lint": True,
"time_limit": 1,
}
}
response = prepare_and_post(data)
self.assertEqual(202, response.status_code)
response, check_url, path, req_and_resource = prepare_and_get(response)
while response.status_code != 200:
self.assertEqual(204, response.status_code)
response = poll(check_url, path, req_and_resource)
time = elapsed_time(self.start)
if time > THRESHOLD:
break
self.assertEqual(200, response.status_code)
response_text = json.loads(response.text)
self.assertEqual('time_limit_reached', response_text['output']['test_status'])
| StarcoderdataPython |
286655 | s=input()
print(s[0]+str(len(s)-2)+s[-1]) | StarcoderdataPython |
9736434 | # Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
""" A test helper class that provides is the root
of all API helper classes
"""
# pylint: disable=too-few-public-methods
import pytest
from tests.rbac.api.user.user_helper import UserTestHelper
from tests.rbac.api.role.role_helper import RoleTestHelper
from tests.rbac.api.proposal.proposal_helper import ProposalTestHelper
from tests.rbac.api.base.base_helper import BaseApiHelper
class ApiTestHelper(BaseApiHelper):
""" A test helper class that provides is the root
of all API helper classes
"""
def __init__(self):
super().__init__()
self.user = UserTestHelper()
self.role = RoleTestHelper()
self.proposal = ProposalTestHelper()
# pylint: disable=invalid-name
helper = ApiTestHelper()
__all__ = ["helper"]
| StarcoderdataPython |
1991952 | <filename>apps/courses/adminx.py
import xadmin
from .models import Course, Lesson, Video, CourseResources, CourseClassify2, CourseClassify, CourseWiki
# 课程直接添加章节
# 没法完成在章节中再嵌套视频
# 但是可以有多个inline。在添加课程时添加课程资源
class LessonInline(object):
model = Lesson
extra = 0
class CourseResourcesInline(object):
model = CourseResources
extra = 0
class CourseClassify2InLine(object):
model = CourseClassify2
extra = 0
class CourseClassifyAdmin(object):
inlines = [CourseClassify2InLine]
list_display = ['name', 'add_time']
model_icon = 'fa fa-book'
class CourseClassify2Admin(object):
list_display = ['name', 'parent_classify', 'add_time']
model_icon = 'fa fa-book'
class LessonInline(object):
model = Lesson
extra = 0
class CourseResourceInline(object):
model = Lesson
extra = 0
class CourseAdmin(object):
list_display = ['name', 'desc', 'degree', 'learn_times', 'teacher']
search_fields = ['name', 'desc', 'detail', 'degree']
list_filter = ['name', 'desc', 'degree', 'learn_times']
# ordering = ['-click_nums'] # 默认排序
relfield_style = 'fk-ajax' # 当有外键指向时,会以ajax方式加载。下拉搜索框
readonly_fields = ['fav_nums', 'stu_nums'] # 只读,不能修改
exclude = ['learn_times'] # 详情页不显示;管理员添加视频时,触发器自动更新
# 课程页直接添加章节
inlines = [LessonInline, CourseResourcesInline]
# 可以在列表上快速修改内容
list_editable = ['degree', 'desc']
model_icon = 'fa fa-cloud'
# 配置ueditor,在ueditor.py中get
# style_fields = {"detail": "ueditor"}
# 如果是教师用户,则需要过滤列表中的数据
def queryset(self):
qs = super(CourseAdmin, self).queryset()
if self.request.user.is_superuser:
return qs
else:
return qs.filter(teacher=self.request.user.teacher)
class LessonAdmin(object):
list_display = ['name', 'course', 'add_time']
search_fields = ['course__name', 'name']
list_filter = ['course', 'name', 'add_time'] # 由外键course的name字段过滤
model_icon = 'fa fa-folder'
# 如果是教师用户,则需要过滤列表中的数据
def queryset(self):
qs = super(LessonAdmin, self).queryset()
if self.request.user.is_superuser:
return qs
else:
return qs.filter(course__teacher=self.request.user.teacher)
class VideoAdmin(object):
list_display = ['name', 'get_course', 'lesson', 'add_time']
search_fields = ['lesson__name', 'lesson__course__name', 'name']
list_filter = ['lesson', 'lesson__course', 'name', 'add_time']
model_icon = 'fa fa-video-camera'
# 如果是教师用户,则需要过滤列表中的数据
def queryset(self):
qs = super(VideoAdmin, self).queryset()
if self.request.user.is_superuser:
return qs
else:
return qs.filter(lesson__course__teacher=self.request.user.teacher)
class WikiAdmin(object):
list_display = ['course', 'wiki', 'add_time']
model_icon = 'fa fa-leaf'
# 如果是教师用户,则需要过滤列表中的数据
def queryset(self):
qs = super(WikiAdmin, self).queryset()
if self.request.user.is_superuser:
return qs
else:
return qs.filter(course__teacher=self.request.user.teacher)
class CourseResourceAdmin(object):
list_display = ['name', 'course', 'add_time']
search_fields = ['course__name', 'name', 'download']
list_filter = ['course', 'name', 'download', 'add_time']
model_icon = 'fa fa-file'
# 如果是教师用户,则需要过滤列表中的数据
def queryset(self):
qs = super(CourseResourceAdmin, self).queryset()
if self.request.user.is_superuser:
return qs
else:
return qs.filter(course__teacher=self.request.user.teacher)
xadmin.site.register(Course, CourseAdmin)
xadmin.site.register(Lesson, LessonAdmin)
xadmin.site.register(Video, VideoAdmin)
xadmin.site.register(CourseResources, CourseResourceAdmin)
xadmin.site.register(CourseClassify, CourseClassifyAdmin)
xadmin.site.register(CourseClassify2, CourseClassify2Admin)
xadmin.site.register(CourseWiki, WikiAdmin) | StarcoderdataPython |
9636646 | # MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Optional
from openspeech.modules.wrapper import Linear
class RelativeMultiHeadAttention(nn.Module):
r"""
Multi-head attention with relative positional encoding.
This concept was proposed in the "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"
Args:
dim (int): The dimension of model
num_heads (int): The number of attention heads.
dropout_p (float): probability of dropout
Inputs: query, key, value, pos_embedding, mask
- **query** (batch, time, dim): Tensor containing query vector
- **key** (batch, time, dim): Tensor containing key vector
- **value** (batch, time, dim): Tensor containing value vector
- **pos_embedding** (batch, time, dim): Positional embedding tensor
- **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked
Returns:
- **outputs**: Tensor produces by relative multi head attention module.
"""
def __init__(
self,
dim: int = 512,
num_heads: int = 16,
dropout_p: float = 0.1,
) -> None:
super(RelativeMultiHeadAttention, self).__init__()
assert dim % num_heads == 0, "d_model % num_heads should be zero."
self.dim = dim
self.d_head = int(dim / num_heads)
self.num_heads = num_heads
self.sqrt_dim = math.sqrt(dim)
self.query_proj = Linear(dim, dim)
self.key_proj = Linear(dim, dim)
self.value_proj = Linear(dim, dim)
self.pos_proj = Linear(dim, dim, bias=False)
self.dropout = nn.Dropout(p=dropout_p)
self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head))
self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head))
torch.nn.init.xavier_uniform_(self.u_bias)
torch.nn.init.xavier_uniform_(self.v_bias)
self.out_proj = Linear(dim, dim)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
pos_embedding: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
batch_size = value.size(0)
query = self.query_proj(query).view(batch_size, -1, self.num_heads, self.d_head)
key = self.key_proj(key).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3)
value = self.value_proj(value).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3)
pos_embedding = self.pos_proj(pos_embedding).view(batch_size, -1, self.num_heads, self.d_head)
content_score = torch.matmul((query + self.u_bias).transpose(1, 2), key.transpose(2, 3))
pos_score = torch.matmul((query + self.v_bias).transpose(1, 2), pos_embedding.permute(0, 2, 3, 1))
pos_score = self._relative_shift(pos_score)
score = (content_score + pos_score) / self.sqrt_dim
if mask is not None:
mask = mask.unsqueeze(1)
score.masked_fill_(mask, -1e4)
attn = F.softmax(score, -1)
attn = self.dropout(attn)
context = torch.matmul(attn, value).transpose(1, 2)
context = context.contiguous().view(batch_size, -1, self.dim)
return self.out_proj(context)
def _relative_shift(self, pos_score: Tensor) -> Tensor:
batch_size, num_heads, seq_length1, seq_length2 = pos_score.size()
zeros = pos_score.new_zeros(batch_size, num_heads, seq_length1, 1)
padded_pos_score = torch.cat([zeros, pos_score], dim=-1)
padded_pos_score = padded_pos_score.view(batch_size, num_heads, seq_length2 + 1, seq_length1)
pos_score = padded_pos_score[:, :, 1:].view_as(pos_score)
return pos_score
| StarcoderdataPython |
334290 | <reponame>jcwon0/BlurHPE
from .nms import oks_nms, soft_oks_nms
from .post_transforms import (affine_transform, flip_back, fliplr_joints,
fliplr_regression, get_affine_transform,
get_warp_matrix, rotate_point, transform_preds,
warp_affine_joints)
__all__ = [
'oks_nms', 'soft_oks_nms', 'affine_transform', 'rotate_point', 'flip_back',
'fliplr_joints', 'fliplr_regression', 'transform_preds',
'get_affine_transform', 'get_warp_matrix', 'warp_affine_joints'
]
| StarcoderdataPython |
1605772 | """""
The problem follows the In-place Reversal of a LinkedList pattern. We can use a similar approach as discussed in Reverse a LinkedList. Here are the steps we need to follow:
1. Skip the first p-1 nodes, to reach the node at position p.
2. Remember the node at position p-1 to be used later to connect with the reversed sub-list.
3. Next, reverse the nodes from p to q using the same approach discussed in Reverse a LinkedList.
4. Connect the p-1 and q+1 nodes to the reversed sub-list.
"""""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if m == n:
return head
# after skipping 'm-1' nodes, currentNode will point to 'm'th node
currentListLength = 0
previousNode, currentNode = None, head
while currentNode and currentListLength < m:
currentListLength += 1
previousNode = currentNode
currentNode = currentNode.next
lastNodeOfFirstPart = previousNode # we are interested in three parts of the LinkedList, the part before index 'p', the part between 'p' and 'q', and the part after index 'q'
lastNodeOfReversedSublist = currentNode # after reversing the LinkedList 'current' will become the last-node/tail-node of the sub-list
currentListLength = 0
while currentNode and currentListLength < n - m + 1: # reverse nodes between 'p' and 'q'
nextNode = currentNode.next
currentNode.next = previousNode
previousNode = currentNode
currentNode = nextNode
currentListLength += 1
if lastNodeOfFirstPart: # connect with the first part
lastNodeOfFirstPart.next = previousNode # 'previous' is now the first-node/head-node of the sub-list
else:
head = previousNode # this means p == 1 i.e., we are changing the first node (head) of the LinkedList
lastNodeOfReversedSublist.next = currentNode # connect with the last part
return head
| StarcoderdataPython |
11317175 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Processor, NotConfiguredException
from googleapiclient import discovery
class ProjectsProcessor(Processor):
def process(self, config_key=None):
if config_key is None:
config_key = 'projects'
if config_key not in self.config:
raise NotConfiguredException('No settings configured!')
projects_config = self.config[config_key]
service = discovery.build('cloudresourcemanager',
'v1',
http=self._get_branded_http())
projects = []
if 'get' in projects_config:
for project in projects_config['get']:
project_id = self._jinja_expand_string(project)
request = service.projects().get(projectId=project_id)
response = request.execute()
projects.append(response)
else:
page_token = None
project_filter = None
if 'filter' in projects_config:
project_filter = self._jinja_expand_string(
projects_config['filter'])
indexing = 'projectId'
if 'indexing' in projects_config:
if projects_config['indexing'] == 'parent':
indexing = 'parent'
if projects_config['indexing'] == 'list':
indexing = 'list'
if indexing == 'list':
projects = []
else:
projects = {}
while True:
request = service.projects().list(filter=project_filter,
pageToken=page_token)
response = request.execute()
for project in response.get('projects', []):
if 'jinjaFilter' in projects_config:
jf_template = self.jinja_environment.from_string(
projects_config['jinjaFilter'])
jf_template.name = 'project_filter'
jf_str = jf_template.render(project)
if jf_str.strip() == '':
continue
if indexing == 'list':
projects.append(project)
elif indexing == 'projectId':
projects[project['projectId']] = project
elif indexing == 'parent':
parent = '%s/%s' % (project['parent']['type'],
project['parent']['id'])
if parent not in projects:
projects[parent] = {}
projects[parent][project['projectId']] = project
if 'nextPageToken' in response:
page_token = response['nextPageToken']
continue
break
return {
'projects': projects,
}
| StarcoderdataPython |
287774 | from aiounittest import AsyncTestCase
from robot.collector.shortcut import *
class ContextCollectorTest(AsyncTestCase):
async def test_context(self):
any_value = {'key': 'value'}
collector = context()
_, result = await collector(any_value, None)
self.assertEqual(result, any_value)
self.assertIsNot(result, any_value)
| StarcoderdataPython |
1737597 | <filename>results/2plus1D/compare_lithium_ion_2plus1D.py
import pybamm
import numpy as np
import matplotlib.pyplot as plt
import sys
# set logging level and increase recursion limit
pybamm.set_logging_level("INFO")
sys.setrecursionlimit(10000)
# load models
models = [
pybamm.lithium_ion.SPM(name="1D SPM"),
pybamm.lithium_ion.SPMe(name="1D SPMe"),
pybamm.lithium_ion.DFN(name="1D DFN"),
pybamm.lithium_ion.SPM(
{"current collector": "potential pair", "dimensionality": 2}, name="2+1D SPM"
),
pybamm.lithium_ion.SPMe(
{"current collector": "potential pair", "dimensionality": 2}, name="2+1D SPMe"
),
pybamm.lithium_ion.DFN(
{"current collector": "potential pair", "dimensionality": 2}, name="2+1D DFN"
),
]
# load parameter values
param = models[0].default_parameter_values
C_rate = 1
param.update({"C-rate": C_rate})
# make current collectors not so conductive, just for illustrative purposes
param.update(
{
"Negative current collector conductivity [S.m-1]": 5.96e6,
"Positive current collector conductivity [S.m-1]": 3.55e6,
}
)
# process models
for model in models:
param.process_model(model)
# process geometry and discretise models
meshes = [None] * len(models)
for i, model in enumerate(models):
geometry = model.default_geometry
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {
var.x_n: 5,
var.x_s: 5,
var.x_p: 5,
var.r_n: 5,
var.r_p: 5,
var.y: 5,
var.z: 5,
}
meshes[i] = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)
disc = pybamm.Discretisation(meshes[i], model.default_spatial_methods)
disc.process_model(model)
# solve models and process time and voltage for plotting on different meshes
solutions = [None] * len(models)
times = [None] * len(models)
voltages = [None] * len(models)
t_eval = np.linspace(0, 1, 1000)
for i, model in enumerate(models):
model.convert_to_format = "casadi" # use casadi for jacobian
solution = model.default_solver.solve(model, t_eval)
solutions[i] = solution
times[i] = pybamm.ProcessedVariable(
model.variables["Time [h]"], solution.t, solution.y
)
voltages[i] = pybamm.ProcessedVariable(
model.variables["Terminal voltage [V]"], solution.t, solution.y, mesh=meshes[i]
)
# plot terminal voltage
t = np.linspace(0, solution.t[-1], 100)
for i, model in enumerate(models):
plt.plot(times[i](t), voltages[i](t), label=model.name)
plt.xlabel("Time [h]")
plt.ylabel("Terminal voltage [V]")
plt.legend()
# add C-rate, delta, and alpha to title
delta = param.evaluate(pybamm.standard_parameters_lithium_ion.delta)
alpha = param.evaluate(pybamm.standard_parameters_lithium_ion.alpha)
plt.title(
r"C-rate = {:3d}, $\alpha$ = {:.6f} , $\delta$ = {:.6f}".format(
C_rate, alpha, delta
)
)
# save and show
file_name = "discharge_curve_2plus1D_comparison.eps"
plt.savefig(file_name, format="eps", dpi=1000)
plt.show()
| StarcoderdataPython |
6660786 | <reponame>gymk/ANLP
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 2 15:55:40 2019
@author: <NAME>
"""
import nltk
from nltk.corpus import stopwords
import pandas as pd
stop_words = stopwords.words('english')
def getHeapLawValues(corpus_name):
words = nltk.Text(nltk.corpus.gutenberg.words(corpus_name))
# normalize the words
words = [w.lower() for w in words if w.isalpha()]
# remove stop words
words = [w for w in words if w not in stop_words]
M = len(set(words))
T = len(words)
print(corpus_name, ' M: ', M, ' T: ', T, 'Ratio: ', M/T)
return [M, T, M/T]
column_headers = ['M', 'T', 'Ratio']
df = pd.DataFrame(columns=column_headers)
for corpus in nltk.corpus.gutenberg.fileids():
df.loc[corpus] = getHeapLawValues(corpus)
print(df)
#df.plot()
df.Ratio.plot() | StarcoderdataPython |
8053256 | <filename>util.py
import json
def loadConfig():
with open('./config.json', 'r') as f:
content = json.load(f)
return content
def filtFileName(rawName, kind='JSON'):
'''
A util function just used for make objname.json to objname. Default file type is json
'''
l = len(kind) + 1
return rawName[:-l]
DEBUG = False
SENSOR = 'Sensor'
INTERACTION = 'Interaction'
TAG = 'Tag'
OBJECT = 'Object'
RAW = 'Raw'
RECG = 'Recg' | StarcoderdataPython |
3536540 | <gh_stars>100-1000
"""
Tests for mimic identity :mod:`mimic.rest.identity_api`
"""
from __future__ import absolute_import, division, unicode_literals
import json
import uuid
from six import text_type
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.task import Clock
from mimic.core import MimicCore
from mimic.resource import MimicRoot
from mimic.test.dummy import (
make_example_internal_api,
make_example_external_api
)
from mimic.test.helpers import json_request, request, get_template_id
from mimic.test.mixins import IdentityAuthMixin, InvalidJsonMixin, ServiceIdHeaderMixin
class TestIdentityOSKSCatalogTenantAdminEndpointTemplatesList(
SynchronousTestCase, IdentityAuthMixin, ServiceIdHeaderMixin):
"""
Tests for ``/identity/v2.0/<tenant-id>/OS-KSCATALOG/endpointTemplates``,
provided by :obj:`mimic.rest.idenity_api.IdentityApi`
"""
def setUp(self):
self.tenant_id = 'some_tenant'
self.core = MimicCore(Clock(), [])
self.root = MimicRoot(self.core).app.resource()
self.uri = (
"/identity/v2.0/tenants/" + self.tenant_id +
"/OS-KSCATALOG/endpoints"
)
self.eeapi_name = u"externalServiceName"
self.eeapi = make_example_external_api(
self,
name=self.eeapi_name,
set_enabled=True
)
self.headers = {
b'X-Auth-Token': [b'<KEY>']
}
self.verb = b"GET"
def test_list_only_internal_apis_available(self):
"""
GET will not list Internal APIs.
"""
self.core.add_api(make_example_internal_api(self))
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
headers=self.headers))
self.assertEqual(response.code, 200)
self.assertEqual(len(json_body['endpoints']), 0)
self.assertEqual(len(json_body['endpoints_links']), 0)
def test_list_single_template(self):
"""
GET will list an external API if it has a endpoint template.
"""
self.core.add_api(self.eeapi)
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
headers=self.headers))
self.assertEqual(response.code, 200)
self.assertEqual(len(json_body['endpoints']), 1)
self.assertEqual(len(json_body['endpoints_links']), 0)
def test_list_template_all_disabled(self):
"""
GET will not list endpoint templates that are disabled.
"""
self.core.add_api(self.eeapi)
id_key = get_template_id(self, self.eeapi)
self.eeapi.endpoint_templates[id_key].enabled_key = False
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
headers=self.headers))
self.assertEqual(response.code, 200)
self.assertEqual(len(json_body['endpoints']), 0)
self.assertEqual(len(json_body['endpoints_links']), 0)
def test_list_single_template_external_and_internal_apis(self):
"""
GET will only list external API endpoint templates.
"""
self.core.add_api(self.eeapi)
self.core.add_api(make_example_internal_api(self))
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
headers=self.headers))
self.assertEqual(response.code, 200)
self.assertEqual(len(json_body['endpoints']), 1)
self.assertEqual(len(json_body['endpoints_links']), 0)
def test_multiple_external_apis(self):
"""
GET will list multiple external APIs.
"""
api_list = [
make_example_external_api(
self,
name=self.eeapi_name + text_type(uuid.uuid4()),
service_type='service-' + text_type(uuid.uuid4()),
set_enabled=True
)
for ignored in range(10)
]
# eeapi should be the first entry in the list
api_list.insert(0, self.eeapi)
for api in api_list:
self.core.add_api(api)
self.assertEqual(len(self.core._uuid_to_api_external),
len(api_list))
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
headers=self.headers))
def get_header(header_name):
return response.headers.getRawHeaders(header_name)[0].decode("utf-8")
self.assertEqual(response.code, 200)
self.assertEqual(len(json_body['endpoints']),
len(api_list))
self.assertEqual(len(json_body['endpoints_links']), 0)
class TestIdentityOSKSCatalogTenantAdminEndpointTemplatesCreate(
SynchronousTestCase, IdentityAuthMixin, InvalidJsonMixin):
"""
Tests for ``/identity/v2.0/<tenant-id>/OS-KSCATALOG/endpointTemplates``,
provided by :obj:`mimic.rest.idenity_api.IdentityApi`
"""
def setUp(self):
self.tenant_id = 'some_tenant'
self.core = MimicCore(Clock(), [])
self.root = MimicRoot(self.core).app.resource()
self.uri = (
"/identity/v2.0/tenants/" + self.tenant_id +
"/OS-KSCATALOG/endpoints"
)
self.eeapi_name = u"externalServiceName"
self.eeapi = make_example_external_api(
self,
name=self.eeapi_name,
set_enabled=False
)
self.headers = {
b'X-Auth-Token': [b'ABCDEF987654321']
}
self.verb = b"POST"
def test_json_body_missing_required_field_oskscatalog(self):
"""
POST with the OS-KSCATALOG:endointTemplate body entirely missing
results in 400.
"""
data = {
'id': text_type(uuid.uuid4()),
}
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
body=data,
headers=self.headers))
self.assertEqual(response.code, 400)
self.assertEqual(json_body['badRequest']['code'], 400)
self.assertTrue(
json_body['badRequest']['message'].startswith(
"Invalid Content. OS-KSCATALOG:endpointTemplate:id is "
"required."
)
)
def test_json_body_missing_required_field_template_id(self):
"""
POST with the OS-KSCATALOG:endointTemplate body missing it's content
results in 400.
"""
data = {
"OS-KSCATALOG:endpointTemplate": {
}
}
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
body=data,
headers=self.headers))
self.assertEqual(response.code, 400)
self.assertEqual(json_body['badRequest']['code'], 400)
self.assertTrue(
json_body['badRequest']['message'].startswith(
"Invalid Content. OS-KSCATALOG:endpointTemplate:id is "
"required."
)
)
def test_invalid_template_id(self):
"""
POST with invalid endpointTemplate ID results in 404.
"""
self.core.add_api(self.eeapi)
data = {
"OS-KSCATALOG:endpointTemplate": {
"id": "some-id"
}
}
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
body=data,
headers=self.headers))
self.assertEqual(response.code, 404)
self.assertEqual(json_body['itemNotFound']['code'], 404)
self.assertTrue(
json_body['itemNotFound']['message'].startswith(
"Unable to locate an External API with the given Template ID."
)
)
def test_enable_template(self):
"""
POST can update an existing endpoint template resulting in a 201.
"""
self.core.add_api(self.eeapi)
id_key = get_template_id(self, self.eeapi)
data = {
"OS-KSCATALOG:endpointTemplate": {
"id": id_key
}
}
req = request(self, self.root, self.verb,
self.uri,
body=json.dumps(data).encode("utf-8"),
headers=self.headers)
response = self.successResultOf(req)
self.assertEqual(response.code, 201)
class TestIdentityOSKSCatalogTenantAdminEndpointTemplatesDelete(SynchronousTestCase, IdentityAuthMixin):
"""
Tests for ``/identity/v2.0/<tenant-id>/OS-KSCATALOG/endpointTemplates``,
provided by :obj:`mimic.rest.idenity_api.IdentityApi`
"""
def setUp(self):
self.tenant_id = 'some_tenant'
self.core = MimicCore(Clock(), [])
self.root = MimicRoot(self.core).app.resource()
self.eeapi_name = u"externalServiceName"
self.eeapi = make_example_external_api(
self,
name=self.eeapi_name
)
self.template_id = get_template_id(self, self.eeapi)
self.assertIsNotNone(self.template_id)
self.uri = (
"/identity/v2.0/tenants/" + self.tenant_id +
"/OS-KSCATALOG/endpoints/" + self.template_id
)
self.headers = {
b'X-Auth-Token': [b'<KEY>']
}
self.verb = b"DELETE"
def test_invalid_template_id(self):
"""
DELETE with an invalid endpoint template id results in 404.
"""
self.eeapi.remove_template(self.template_id)
self.core.add_api(self.eeapi)
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
headers=self.headers))
self.assertEqual(response.code, 404)
self.assertEqual(json_body['itemNotFound']['code'], 404)
self.assertTrue(
json_body['itemNotFound']['message'].startswith(
"Unable to locate an External API with the given Template ID."
)
)
def test_template_id_not_enabled_for_tenant(self):
"""
DELETE for endpoint template not enabled for a tenant or globally
results in 404.
"""
self.core.add_api(self.eeapi)
(response, json_body) = self.successResultOf(
json_request(self, self.root, self.verb,
self.uri,
headers=self.headers))
self.assertEqual(response.code, 404)
self.assertEqual(json_body['itemNotFound']['code'], 404)
self.assertEqual(
json_body['itemNotFound']['message'],
"Template not enabled for tenant"
)
def test_disable_template(self):
"""
DELETE for endpoint template enabled for tenant results in 204.
"""
self.core.add_api(self.eeapi)
self.eeapi.enable_endpoint_for_tenant(
self.tenant_id,
self.template_id
)
eeapi2 = make_example_external_api(
self,
name="alternate " + self.eeapi_name
)
ept_id2 = get_template_id(self, eeapi2)
eeapi2.remove_template(ept_id2)
self.core.add_api(eeapi2)
req = request(self, self.root, self.verb,
self.uri,
headers=self.headers)
response = self.successResultOf(req)
self.assertEqual(response.code, 204)
| StarcoderdataPython |
175362 | <reponame>Agamiru/exceptions_and_logging
import logging
import os
EXTRA_KWARGS = ["err_type"]
DEFAULT_LOGGER_NAME = "default_logger"
MSG_FORMAT = '%(asctime)s - APPLICATION ERROR - %(levelname)s\n' \
'Error Type: %(err_type)s\n' \
'Error Message: %(message)s\n'
module_dir = os.path.dirname(__file__)
DEFAULT_LOG_CONFIG_FILE = os.path.join(module_dir, "logging_config.yaml")
class DefaultFormatter(logging.Formatter):
"""
Default message formatter. Adds extra double lines for readability.
"""
def __init__(
self, fmt=MSG_FORMAT,
datefmt='%m/%d/%Y %I:%M:%S %p'
):
super().__init__(fmt=fmt, datefmt=datefmt)
def format(self, record):
s = super().format(record)
return s + "\n" * 2
default_dict_config = {
"version": 1,
"formatters": {
"default": {
"()": DefaultFormatter,
"format": MSG_FORMAT,
}
},
"handlers": {
"console": {
"class": 'logging.StreamHandler',
"formatter": "default"
},
"file": {
"class": "logging.FileHandler",
"formatter": "default",
"filename": "application.log",
"encoding": "utf-8"
}
},
"loggers": {
DEFAULT_LOGGER_NAME: {
"level": "ERROR",
"handlers": ["console", "file"]
},
},
'root': {
'level': 'DEBUG',
'handlers': ["console"]
},
}
| StarcoderdataPython |
1690035 | <gh_stars>1-10
isbn = []
for count in range(13):
isbn.append(int(input("Please enter next digit of ISBN: ")))
calculated_digit = 0
count = 0
while count < 12:
calculated_digit = calculated_digit + isbn[count]
count = count + 1
calculated_digit = calculated_digit + isbn[count] * 3
count = count + 1
while calculated_digit >= 10:
calculated_digit = calculated_digit - 10
calculated_digit = 10 - calculated_digit
if calculated_digit == 10:
calculated_digit = 0
if calculated_digit == isbn[12]:
print("Valid ISBN")
else:
print("Invalid ISBN") | StarcoderdataPython |
252364 | from output.models.nist_data.atomic.duration.schema_instance.nistschema_sv_iv_atomic_duration_max_inclusive_1_xsd.nistschema_sv_iv_atomic_duration_max_inclusive_1 import NistschemaSvIvAtomicDurationMaxInclusive1
__all__ = [
"NistschemaSvIvAtomicDurationMaxInclusive1",
]
| StarcoderdataPython |
1216 | <reponame>bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis
# Load json bulk files into elasticsearch
import json
import os
import time
import traceback
import elasticsearch
from etl.common.store import list_entity_files
from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template
class ElasticSearchException(Exception):
pass
# Init Elasticsearch and test connection
def init_es_client(url, logger):
es_client = elasticsearch.Elasticsearch([url])
try:
info = es_client.info()
logger.debug('Connected to node "{}" of cluster "{}" on "{}"'.format(info['name'], info['cluster_name'], url))
except elasticsearch.exceptions.ConnectionError as e:
logger.error('Connection error: Elasticsearch unavailable on "{}".\nPlease check your configuration'.format(url))
raise e
return es_client
def check_error(response):
if response.get('errors'):
raise ElasticSearchException(response)
def create_index(es_client, index_name, logger):
logger.debug('Creating index "{}"...'.format(index_name))
check_error(es_client.indices.create(index_name))
def delete_index(es_client, index_name, logger):
logger.debug('Deleting index "{}"...'.format(index_name))
check_error(es_client.indices.delete(index_name))
def create_template(es_client, es_config, document_type, base_index_name, logger):
template_name = 'template_elixir_' + base_index_name
template_pattern = base_index_name + '-d*'
mapping = es_config['document-mappings'].get(document_type+"_mapping")
if not mapping:
return
logger.debug('Creating template "{}" on pattern "{}"...'.format(template_name, template_pattern))
template_body = {'template': template_pattern, 'mappings': mapping}
if 'index-settings' in es_config:
template_body['settings'] = es_config['index-settings']
check_error(es_client.indices.put_template(name=template_name, body=template_body))
def bulk_index(es_client, index_name, file_path, logger):
file_name = os.path.basename(file_path)
logger.debug('Bulk indexing file "{}" in index "{}"...'.format(file_name, index_name))
with open(file_path, 'r') as file:
check_error(es_client.bulk(index=index_name, body=file.read(), timeout='2000ms'))
def create_alias(es_client, alias_name, base_index_name, logger):
logger.debug('Creating alias "{}" for index "{}"'.format(alias_name, base_index_name))
check_error(es_client.indices.put_alias(alias_name, base_index_name))
def get_indices(es_client, base_index_name):
indices = es_client.cat.indices(base_index_name + '-d*', params={'h': 'index'})
index_names = list(map(lambda i: i['index'], indices))
index_names.sort(reverse=True)
return index_names
def load_source(source, config, source_bulk_dir, log_dir):
"""
Full Elasticsearch documents indexing
"""
source_name = source['schema:identifier']
action = 'load-elasticsearch-' + source_name
log_file = get_file_path([log_dir, action], ext='.log', recreate=True)
logger = create_logger(source_name, log_file, config['options']['verbose'])
load_config = config['load-elasticsearch']
es_client = init_es_client(load_config['url'], logger)
logger.info("Loading '{}' into elasticsearch '{}'...".format(source_bulk_dir, load_config['url']))
try:
if not os.path.exists(source_bulk_dir):
raise FileNotFoundError(
'No such file or directory: \'{}\'.\n'
'Please make sure you have run the BrAPI extraction and Elasticsearch document transformation'
' before trying to launch the transformation process.'
.format(source_bulk_dir))
bulk_files = list(list_entity_files(source_bulk_dir))
all_document_types = set(map(first, bulk_files))
document_types = load_config.get('document-types') or all_document_types
document_types = document_types.intersection(all_document_types)
index_by_document = dict()
logger.info("Preparing index with template mapping...")
timestamp = int(time.time())
for document_type in document_types:
base_index_name = replace_template(
load_config['index-template'],
{'source': source['schema:identifier'], 'documentType': document_type}
).lower()
create_template(es_client, load_config, document_type, base_index_name, logger)
index_name = base_index_name + '-d' + str(timestamp)
create_index(es_client, index_name, logger)
index_by_document[document_type] = base_index_name, index_name
logger.info("Bulk indexing...")
for document_type, file_path in bulk_files:
if document_type in index_by_document:
base_index_name, index_name = index_by_document[document_type]
bulk_index(es_client, index_name, file_path, logger)
logger.info("Creating index aliases and deleting old indices...")
for document_type, (base_index_name, index_name) in index_by_document.items():
create_alias(es_client, index_name, base_index_name, logger)
new_index, *old_indices = get_indices(es_client, base_index_name)
for old_index in old_indices[1:]:
delete_index(es_client, old_index, logger)
logger.info("SUCCEEDED Loading {}.".format(source_name))
except Exception as e:
logger.debug(traceback.format_exc())
logger.debug(getattr(e, 'long_message', ''))
logger.info("FAILED Loading {} Elasticsearch documents.\n"
"=> Check the logs ({}) for more details."
.format(source_name, log_file))
def main(config):
log_dir = config['log-dir']
bulk_dir = os.path.join(config['data-dir'], 'json-bulk')
if not os.path.exists(bulk_dir):
raise Exception('No json bulk folder found in ' + bulk_dir)
sources = config['sources']
for (source_name, source) in sources.items():
source_bulk_dir = get_folder_path([bulk_dir, source_name])
load_source(source, config, source_bulk_dir, log_dir)
| StarcoderdataPython |
11295213 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import string
class CharsetError(Exception):
pass
CHARSETS = {
"a": string.ascii_lowercase,
"A": string.ascii_uppercase,
"1": string.digits,
"!": string.punctuation,
"*": string.printable,
}
PREDEFINED_CHARSETS = {
"base32": CHARSETS["A"] + "234567=",
"base64": CHARSETS["a"] + CHARSETS["A"] + CHARSETS["1"] + "/+=",
"printable": CHARSETS["*"],
}
def get_charset(charset):
charset = charset or "printable"
if charset in PREDEFINED_CHARSETS.keys():
return PREDEFINED_CHARSETS[charset]
try:
_ = ""
for c in set(charset):
_ += CHARSETS[c]
return _
except KeyError as err:
raise CharsetError("Bad character set")
| StarcoderdataPython |
6612709 | import os
import logging.config
import json
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# set up logging
with open(os.path.join(base_dir, 'config', 'logging.json'), 'r') as f:
logging_config = json.load(f)
logging.config.dictConfig(logging_config)
logging.info("Running from "+base_dir)
| StarcoderdataPython |
5179149 | """ Isomorphic widgets for working with JSON-LD algorithms
"""
from ._version import __version__, module_name
from .widget_jsonld import Compact, Expand, Flatten, Frame, Normalize
__all__ = ["__version__", "Expand", "Compact", "Flatten", "Frame", "Normalize"]
def _jupyter_labextension_paths():
return [{"src": "labextension", "dest": module_name}]
| StarcoderdataPython |
11388921 | <reponame>mohamed-a/hypermodern-python<filename>src/hypermodern_python/console.py
# src/hypermodern_python/console.py
import click
from . import __version__
@click.command()
@click.version_option(version=__version__)
def main():
"""The hypermodern Python project."""
click.echo("Hello, world!") | StarcoderdataPython |
256667 | """
.. module:: news
:platform: Unix
:synopsis: This module contains the interface of the events' retrieval
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import feedparser
from clnews.utils import remove_html
from clnews.exceptions import ChannelDataNotFound, ChannelServerError, \
ChannelRetrieveEventsError
class Event(object):
""" Wraps up the data of an event."""
def __init__(self, title, url, date, summary=None):
""" Initializes the class.
Args:
title (str): The title of the event.
url (str): The URL of the event
date (str): The date of the event.
Kwargs:
summary (str): The summary title of the event.
"""
self.title = title
self.url = url
self.date = date
self.summary = remove_html(summary) if summary else ""
def __repr__(self):
return "%s, %s" % (self.title, self.url)
class Channel(object):
""" Implements the Channel functionality."""
def __init__(self, name, url):
""" Initializes the class.
Args:
name (str): The name of the channel.
url (str): The URL of the channel.
"""
self.name = name
self.url = url
self.events = []
def _get_data(self):
response = feedparser.parse(self.url)
if response.status == 200:
pass
elif response.status == 404:
raise ChannelDataNotFound
else:
raise ChannelServerError
return response.entries
def get_events(self):
""" Retrieves the current events.
Raises:
ChannelRetrieveEventsError: An error occured while retrieving the
events.
"""
event_entries = self._get_data()
try:
self.events = [Event(e.title, e.link, e.published, e.summary)
for e
in event_entries]
except TypeError:
# when the event list is not a list as it should
raise ChannelRetrieveEventsError
return self.events
| StarcoderdataPython |
4882980 | <filename>api/kubeops_api/migrations/0040_auto_20191104_1009.py
# Generated by Django 2.1.11 on 2019-11-04 10:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kubeops_api', '0039_dns'),
]
def forwards_func(apps, schema_editor):
DNS = apps.get_model("kubeops_api", "DNS")
db_alias = schema_editor.connection.alias
DNS.objects.using(db_alias).create(dns1="172.16.58.3", dns2="8.8.8.8")
def reverse_func(apps, schema_editor):
DNS = apps.get_model("kubeops_api", "DNS")
db_alias = schema_editor.connection.alias
for dns in DNS.objects.using(db_alias).all():
dns.delete()
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| StarcoderdataPython |
6442710 | # -*- coding: utf-8 -*-
import importlib
import inspect
import json
import random
import string
import fnmatch
import os
from django.http import HttpResponse
from django.test import TestCase
def find_files_recursively(root_path, filename='*', prefix='', suffix='.py', exclude_name=()):
"""
递归的查找文件
:param root_path: 文件路径
:param filename: 需要搜索的文件名
:param prefix: 文件名前缀
:param suffix: 文件名后缀
:param exclude_name: 去除的文件名
:return: 搜索出的文件
"""
matches = {}
for root, dir_names, file_names in os.walk(root_path):
for name in fnmatch.filter(file_names, prefix + filename + suffix):
if name in exclude_name:
continue
matches[name] = os.path.join(root, name)
return matches
def add_app_testcase(base_dir, app_name):
"""
获取当前app下testing文件夹中的所有测试用例
:return:
"""
app_dir = os.path.join(base_dir, app_name)
if not os.path.exists(app_dir):
print u'不存在app{0}的路径'.format(app_name)
return
testing_dir = os.path.join(app_dir, 'testing')
if not os.path.exists(testing_dir):
print u'{0}不存在testing文件夹'.format(app_name)
return
# def match_regex(string):
# """
# 正则匹配
# :param string:
# :return: 匹配结果
# """
# return re.findall(r"^class (\w+)\((\w+TestCase\w+)\):", string, flags=re.MULTILINE)
matches = find_files_recursively(testing_dir, exclude_name=('__init__.py', 'mock.py'))
test_cases = {}
for file_name, file_path in matches.items():
if file_name[:4] != 'test':
continue
file_module = importlib.import_module('{0}.testing.{1}'.format(app_name, file_name[:-3]))
for name, obj in inspect.getmembers(file_module):
if not inspect.isclass(obj):
continue
if issubclass(obj, TestCase):
test_cases[name] = obj
return test_cases
def author(author_name='George1994'):
"""
作者装饰器
:param author_name: 作者名字
:return:
"""
def _decor(func):
func.author_name = author_name
return func
return _decor
def str_time_format(time, str_format="%Y-%m-%d"):
"""
将时间转化为固定格式的字符串
:param time: 时间
:param str_format: 格式
:return:
"""
return time.strftime(str_format)
def json_http_response(content):
"""
将内容序列化后以response的形式返回
:param content: 字典的内容
:return:
"""
return HttpResponse(content=json.dumps(content), mimetype="application/json; charset=UTF-8",
status=200)
def random_str(num=12):
"""
生成指定个数的随机字符串
:param num: 个数
:return:
"""
return "".join(random.sample(string.ascii_letters + string.digits, num))
def random_num(start=-100000, end=100000):
"""
生成指定范围的随机数字
:param start: 起始
:param end: 结束
:return:
"""
return random.randint(start, end)
def get_user_by_request(request):
return request.user
| StarcoderdataPython |
11205088 | """
Implements validation and conversion for the OCI Manifest JSON.
See: https://github.com/opencontainers/image-spec/blob/master/manifest.md
Example:
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 32654,
"digest": "sha256:9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
],
"annotations": {
"com.example.key1": "value1",
"com.example.key2": "value2"
}
}
"""
import json
import logging
import hashlib
from collections import namedtuple
from jsonschema import validate as validate_schema, ValidationError
from digest import digest_tools
from image.shared import ManifestException
from image.shared.interfaces import ManifestInterface
from image.shared.types import ManifestImageLayer
from image.docker.schema2 import EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_SIZE
from image.oci import (
OCI_IMAGE_MANIFEST_CONTENT_TYPE,
OCI_IMAGE_CONFIG_CONTENT_TYPE,
OCI_IMAGE_LAYER_CONTENT_TYPES,
OCI_IMAGE_NON_DISTRIBUTABLE_LAYER_CONTENT_TYPES,
OCI_IMAGE_TAR_GZIP_LAYER_CONTENT_TYPE,
OCI_IMAGE_TAR_GZIP_NON_DISTRIBUTABLE_LAYER_CONTENT_TYPE,
ADDITIONAL_LAYER_CONTENT_TYPES,
ALLOWED_ARTIFACT_TYPES,
)
from image.oci.config import OCIConfig
from image.oci.descriptor import get_descriptor_schema
from image.docker.schema1 import DockerSchema1ManifestBuilder
from util.bytes import Bytes
# Keys.
OCI_MANIFEST_VERSION_KEY = "schemaVersion"
OCI_MANIFEST_MEDIATYPE_KEY = "mediaType"
OCI_MANIFEST_CONFIG_KEY = "config"
OCI_MANIFEST_SIZE_KEY = "size"
OCI_MANIFEST_DIGEST_KEY = "digest"
OCI_MANIFEST_LAYERS_KEY = "layers"
OCI_MANIFEST_URLS_KEY = "urls"
OCI_MANIFEST_ANNOTATIONS_KEY = "annotations"
# Named tuples.
OCIManifestConfig = namedtuple("OCIManifestConfig", ["size", "digest"])
OCIManifestLayer = namedtuple(
"OCIManifestLayer", ["index", "digest", "is_remote", "urls", "compressed_size"]
)
OCIManifestImageLayer = namedtuple(
"OCIManifestImageLayer",
["history", "blob_layer", "v1_id", "v1_parent_id", "compressed_size", "blob_digest"],
)
logger = logging.getLogger(__name__)
class MalformedOCIManifest(ManifestException):
"""
Raised when a manifest fails an assertion that should be true according to the OCI Manifest
spec.
"""
pass
class OCIManifest(ManifestInterface):
METASCHEMA = {
"type": "object",
"properties": {
OCI_MANIFEST_VERSION_KEY: {
"type": "number",
"description": "The version of the schema. Must always be `2`.",
"minimum": 2,
"maximum": 2,
},
OCI_MANIFEST_MEDIATYPE_KEY: {
"type": "string",
"description": "The media type of the schema.",
"enum": [OCI_IMAGE_MANIFEST_CONTENT_TYPE],
},
OCI_MANIFEST_CONFIG_KEY: get_descriptor_schema(ALLOWED_ARTIFACT_TYPES),
OCI_MANIFEST_LAYERS_KEY: {
"type": "array",
"description": "The array MUST have the base layer at index 0. Subsequent layers MUST then follow in stack order (i.e. from layers[0] to layers[len(layers)-1])",
"items": get_descriptor_schema(
OCI_IMAGE_LAYER_CONTENT_TYPES + ADDITIONAL_LAYER_CONTENT_TYPES
),
},
},
"required": [OCI_MANIFEST_VERSION_KEY, OCI_MANIFEST_CONFIG_KEY, OCI_MANIFEST_LAYERS_KEY,],
}
def __init__(self, manifest_bytes, validate=False):
assert isinstance(manifest_bytes, Bytes)
self._payload = manifest_bytes
self._filesystem_layers = None
self._cached_built_config = None
try:
self._parsed = json.loads(self._payload.as_unicode())
except ValueError as ve:
raise MalformedOCIManifest("malformed manifest data: %s" % ve)
try:
validate_schema(self._parsed, OCIManifest.METASCHEMA)
except ValidationError as ve:
raise MalformedOCIManifest("manifest data does not match schema: %s" % ve)
for layer in self.filesystem_layers:
if layer.is_remote and not layer.urls:
raise MalformedOCIManifest("missing `urls` for remote layer")
def validate(self, content_retriever):
"""
Performs validation of required assertions about the manifest.
Raises a ManifestException on failure.
"""
# Nothing to validate.
@property
def is_manifest_list(self):
return False
@property
def schema_version(self):
return 2
@property
def manifest_dict(self):
return self._parsed
@property
def media_type(self):
return OCI_IMAGE_MANIFEST_CONTENT_TYPE
@property
def digest(self):
return digest_tools.sha256_digest(self._payload.as_encoded_str())
@property
def config(self):
config = self._parsed[OCI_MANIFEST_CONFIG_KEY]
return OCIManifestConfig(
size=config[OCI_MANIFEST_SIZE_KEY], digest=config[OCI_MANIFEST_DIGEST_KEY],
)
@property
def filesystem_layers(self):
"""
Returns the file system layers of this manifest, from base to leaf.
"""
if self._filesystem_layers is None:
self._filesystem_layers = list(self._generate_filesystem_layers())
return self._filesystem_layers
@property
def leaf_filesystem_layer(self):
"""
Returns the leaf file system layer for this manifest.
"""
return self.filesystem_layers[-1]
@property
def layers_compressed_size(self):
return sum(layer.compressed_size for layer in self.filesystem_layers)
@property
def has_remote_layer(self):
for layer in self.filesystem_layers:
if layer.is_remote:
return True
return False
@property
def is_image_manifest(self):
return self.manifest_dict["config"]["mediaType"] == OCI_IMAGE_CONFIG_CONTENT_TYPE
@property
def blob_digests(self):
return [str(layer.digest) for layer in self.filesystem_layers] + [str(self.config.digest)]
@property
def local_blob_digests(self):
return [str(layer.digest) for layer in self.filesystem_layers if not layer.is_remote] + [
str(self.config.digest)
]
@property
def annotations(self):
""" Returns the annotations on the manifest itself. """
return self._parsed.get(OCI_MANIFEST_ANNOTATIONS_KEY) or {}
def get_blob_digests_for_translation(self):
return self.blob_digests
def get_manifest_labels(self, content_retriever):
if not self.is_image_manifest:
return dict(self.annotations)
built_config = self._get_built_config(content_retriever)
labels = {}
labels.update(built_config.labels or {})
labels.update(self.annotations)
return labels
def get_layers(self, content_retriever):
"""
Returns the layers of this manifest, from base to leaf or None if this kind of manifest does
not support layers.
"""
if not self.is_image_manifest:
return
for image_layer in self._manifest_image_layers(content_retriever):
is_remote = image_layer.blob_layer.is_remote if image_layer.blob_layer else False
urls = image_layer.blob_layer.urls if image_layer.blob_layer else None
yield ManifestImageLayer(
layer_id=image_layer.v1_id,
compressed_size=image_layer.compressed_size,
is_remote=is_remote,
urls=urls,
command=image_layer.history.command,
blob_digest=image_layer.blob_digest,
created_datetime=image_layer.history.created_datetime,
author=image_layer.history.author,
comment=image_layer.history.comment,
internal_layer=image_layer,
)
@property
def bytes(self):
return self._payload
def child_manifests(self, content_retriever):
return None
def _manifest_image_layers(self, content_retriever):
assert self.is_image_manifest
# Retrieve the configuration for the manifest.
config = self._get_built_config(content_retriever)
history = list(config.history)
digest_history = hashlib.sha256()
v1_layer_parent_id = None
v1_layer_id = None
# The history entry in OCI config is optional. If none was given, then generate the
# "images" based on the layer data, with empty config (with exception of the final layer).
if not history:
for index, filesystem_layer in enumerate(self.filesystem_layers):
digest_history.update(str(filesystem_layer.digest).encode("ascii"))
digest_history.update(b"||")
v1_layer_parent_id = v1_layer_id
v1_layer_id = digest_history.hexdigest()
yield OCIManifestImageLayer(
history=config.synthesized_history
if index == len(self.filesystem_layers) - 1
else None,
blob_layer=filesystem_layer,
blob_digest=str(filesystem_layer.digest),
v1_id=v1_layer_id,
v1_parent_id=v1_layer_parent_id,
compressed_size=filesystem_layer.compressed_size,
)
return
# Make sure we aren't missing any history entries if it was specified.
if len(history) < len(self.filesystem_layers):
raise MalformedOCIManifest(
"Found less history (%s) than layer blobs (%s)"
% (len(history), len(self.filesystem_layers))
)
blob_index = 0
for history_index, history_entry in enumerate(history):
if not history_entry.is_empty and blob_index >= len(self.filesystem_layers):
raise MalformedOCIManifest("Missing history entry #%s" % blob_index)
v1_layer_parent_id = v1_layer_id
blob_layer = None if history_entry.is_empty else self.filesystem_layers[blob_index]
blob_digest = EMPTY_LAYER_BLOB_DIGEST if blob_layer is None else str(blob_layer.digest)
compressed_size = EMPTY_LAYER_SIZE if blob_layer is None else blob_layer.compressed_size
# Create a new synthesized V1 ID for the history layer by hashing its content and
# the blob associated with it.
digest_history.update(json.dumps(history_entry.raw_entry).encode("utf-8"))
digest_history.update(b"|")
digest_history.update(b"%d" % history_index)
digest_history.update(b"|")
digest_history.update(blob_digest.encode("ascii"))
digest_history.update(b"||")
v1_layer_id = digest_history.hexdigest()
yield OCIManifestImageLayer(
history=history_entry,
blob_layer=blob_layer,
blob_digest=blob_digest,
v1_id=v1_layer_id,
v1_parent_id=v1_layer_parent_id,
compressed_size=compressed_size,
)
if not history_entry.is_empty:
blob_index += 1
@property
def is_empty_manifest(self):
return len(self._parsed[OCI_MANIFEST_LAYERS_KEY]) == 0
@property
def has_legacy_image(self):
return self.is_image_manifest and not self.has_remote_layer and not self.is_empty_manifest
def generate_legacy_layers(self, images_map, content_retriever):
assert not self.has_remote_layer
assert self.is_image_manifest
# NOTE: We use the DockerSchema1ManifestBuilder here because it already contains
# the logic for generating the DockerV1Metadata. All of this will go away once we get
# rid of legacy images in the database, so this is a temporary solution.
v1_builder = DockerSchema1ManifestBuilder("", "", "")
self._populate_schema1_builder(v1_builder, content_retriever)
return v1_builder.build().generate_legacy_layers(images_map, content_retriever)
def get_leaf_layer_v1_image_id(self, content_retriever):
# NOTE: If there exists a layer with remote content, then we consider this manifest
# to not support legacy images.
if self.has_remote_layer or not self.is_image_manifest:
return None
return self.get_legacy_image_ids(content_retriever)[-1].v1_id
def get_legacy_image_ids(self, content_retriever):
if self.has_remote_layer or not self.is_image_manifest:
return None
return [l.v1_id for l in self._manifest_image_layers(content_retriever)]
def convert_manifest(
self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever
):
if self.media_type in allowed_mediatypes:
return self
if not self.is_image_manifest:
return None
# If this manifest is not on the allowed list, try to convert the schema 1 version (if any)
schema1 = self.get_schema1_manifest(namespace_name, repo_name, tag_name, content_retriever)
if schema1 is None:
return None
return schema1.convert_manifest(
allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever
)
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
if self.has_remote_layer or not self.is_image_manifest:
return None
v1_builder = DockerSchema1ManifestBuilder(namespace_name, repo_name, tag_name)
self._populate_schema1_builder(v1_builder, content_retriever)
return v1_builder.build()
def unsigned(self):
return self
def get_requires_empty_layer_blob(self, content_retriever):
if not self.is_image_manifest:
return False
schema2_config = self._get_built_config(content_retriever)
if schema2_config is None:
return None
return schema2_config.has_empty_layer
def _populate_schema1_builder(self, v1_builder, content_retriever):
"""
Populates a DockerSchema1ManifestBuilder with the layers and config from this schema.
"""
assert not self.has_remote_layer
assert self.is_image_manifest
schema2_config = self._get_built_config(content_retriever)
layers = list(self._manifest_image_layers(content_retriever))
for index, layer in enumerate(reversed(layers)): # Schema 1 layers are in reverse order
v1_compatibility = schema2_config.build_v1_compatibility(
layer.history, layer.v1_id, layer.v1_parent_id, index == 0, layer.compressed_size
)
v1_builder.add_layer(str(layer.blob_digest), json.dumps(v1_compatibility))
return v1_builder
def _get_built_config(self, content_retriever):
assert self.is_image_manifest
if self._cached_built_config:
return self._cached_built_config
config_bytes = content_retriever.get_blob_bytes_with_digest(self.config.digest)
if config_bytes is None:
raise MalformedOCIManifest("Could not load config blob for manifest")
if len(config_bytes) != self.config.size:
msg = "Size of config does not match that retrieved: %s vs %s" % (
len(config_bytes),
self.config.size,
)
raise MalformedOCIManifest(msg)
self._cached_built_config = OCIConfig(Bytes.for_string_or_unicode(config_bytes))
return self._cached_built_config
def _generate_filesystem_layers(self):
for index, layer in enumerate(self._parsed[OCI_MANIFEST_LAYERS_KEY]):
content_type = layer[OCI_MANIFEST_MEDIATYPE_KEY]
is_remote = content_type in OCI_IMAGE_NON_DISTRIBUTABLE_LAYER_CONTENT_TYPES
try:
digest = digest_tools.Digest.parse_digest(layer[OCI_MANIFEST_DIGEST_KEY])
except digest_tools.InvalidDigestException:
raise MalformedOCIManifest(
"could not parse manifest digest: %s" % layer[OCI_MANIFEST_DIGEST_KEY]
)
yield OCIManifestLayer(
index=index,
compressed_size=layer[OCI_MANIFEST_SIZE_KEY],
digest=digest,
is_remote=is_remote,
urls=layer.get(OCI_MANIFEST_URLS_KEY),
)
class OCIManifestBuilder(object):
"""
A convenient abstraction around creating new OCIManifest.
"""
def __init__(self):
self.config = None
self.filesystem_layers = []
def clone(self):
cloned = OCIManifestBuilder()
cloned.config = self.config
cloned.filesystem_layers = list(self.filesystem_layers)
return cloned
def set_config(self, schema2_config):
"""
Sets the configuration for the manifest being built.
"""
self.set_config_digest(schema2_config.digest, schema2_config.size)
def set_config_digest(self, config_digest, config_size):
"""
Sets the digest and size of the configuration layer.
"""
self.config = OCIManifestConfig(size=config_size, digest=config_digest)
def add_layer(self, digest, size, urls=None):
"""
Adds a filesystem layer to the manifest.
"""
self.filesystem_layers.append(
OCIManifestLayer(
index=len(self.filesystem_layers),
digest=digest,
compressed_size=size,
urls=urls,
is_remote=bool(urls),
)
)
def build(self, ensure_ascii=True):
"""
Builds and returns the OCIManifest.
"""
assert self.filesystem_layers
assert self.config
def _build_layer(layer):
if layer.urls:
return {
OCI_MANIFEST_MEDIATYPE_KEY: OCI_IMAGE_TAR_GZIP_NON_DISTRIBUTABLE_LAYER_CONTENT_TYPE,
OCI_MANIFEST_SIZE_KEY: layer.compressed_size,
OCI_MANIFEST_DIGEST_KEY: str(layer.digest),
OCI_MANIFEST_URLS_KEY: layer.urls,
}
return {
OCI_MANIFEST_MEDIATYPE_KEY: OCI_IMAGE_TAR_GZIP_LAYER_CONTENT_TYPE,
OCI_MANIFEST_SIZE_KEY: layer.compressed_size,
OCI_MANIFEST_DIGEST_KEY: str(layer.digest),
}
manifest_dict = {
OCI_MANIFEST_VERSION_KEY: 2,
OCI_MANIFEST_MEDIATYPE_KEY: OCI_IMAGE_MANIFEST_CONTENT_TYPE,
# Config
OCI_MANIFEST_CONFIG_KEY: {
OCI_MANIFEST_MEDIATYPE_KEY: OCI_IMAGE_CONFIG_CONTENT_TYPE,
OCI_MANIFEST_SIZE_KEY: self.config.size,
OCI_MANIFEST_DIGEST_KEY: str(self.config.digest),
},
# Layers
OCI_MANIFEST_LAYERS_KEY: [_build_layer(layer) for layer in self.filesystem_layers],
}
json_str = json.dumps(manifest_dict, ensure_ascii=ensure_ascii, indent=3)
return OCIManifest(Bytes.for_string_or_unicode(json_str))
| StarcoderdataPython |
8107406 | import matplotlib.pyplot as plt
import ipywidgets as widgets
import pynwb
import hdmf
import ndx_grayscalevolume
from collections import OrderedDict
from nwbwidgets import behavior, misc, base, ecephys, image, ophys
from matplotlib.pyplot import Figure
from pynwb.base import ProcessingModule
def fig2widget(fig: Figure, **kwargs):
out = widgets.Output()
with out:
plt.show(fig)
return out
def dict2accordion(d, neurodata_vis_spec):
children = [widgets.HTML('Rendering...') for _ in d]
accordion = widgets.Accordion(children=children, selected_index=None)
for i, label in enumerate(d):
if hasattr(d[label], 'description') and d[label].description:
accordion.set_title(i, label + ': ' + d[label].description)
else:
accordion.set_title(i, label)
accordion.set_title(i, label)
def on_selected_index(change):
if change.new is not None and isinstance(change.owner.children[change.new], widgets.HTML):
children[change.new] = nwb2widget(list(d.values())[change.new], neurodata_vis_spec=neurodata_vis_spec)
change.owner.children = children
accordion.observe(on_selected_index, names='selected_index')
return accordion
def processing_module(node: ProcessingModule, neurodata_vis_spec: OrderedDict):
return nwb2widget(node.data_interfaces, neurodata_vis_spec=neurodata_vis_spec)
def show_text_fields(node, exclude=('comments', 'interval'), **kwargs):
info = []
for key in node.fields:
if key not in exclude and isinstance(key, (str, float, int)):
info.append(widgets.Text(value=repr(getattr(node, key)), description=key, disabled=True))
return widgets.VBox(info)
default_neurodata_vis_spec = OrderedDict({
pynwb.ophys.TwoPhotonSeries: ophys.show_two_photon_series,
ndx_grayscalevolume.GrayscaleVolume: ophys.show_grayscale_volume,
pynwb.ophys.PlaneSegmentation: ophys.show_plane_segmentation,
pynwb.ophys.DfOverF: ophys.show_df_over_f,
pynwb.ophys.RoiResponseSeries: ophys.show_roi_response_series,
pynwb.misc.AnnotationSeries: OrderedDict({
'text': show_text_fields,
'times': misc.show_annotations}),
pynwb.core.LabelledDict: dict2accordion,
pynwb.ProcessingModule: processing_module,
hdmf.common.DynamicTable: base.show_dynamic_table,
pynwb.ecephys.LFP: ecephys.show_lfp,
pynwb.behavior.Position: behavior.show_position,
pynwb.behavior.SpatialSeries: OrderedDict({
'over time': behavior.show_spatial_series_over_time,
'trace': behavior.show_spatial_series}),
pynwb.image.GrayscaleImage: image.show_grayscale_image,
pynwb.image.ImageSeries: image.show_image_series,
pynwb.image.IndexSeries: image.show_index_series,
pynwb.TimeSeries: base.show_timeseries,
pynwb.core.NWBDataInterface: base.show_neurodata_base,
})
def vis2widget(vis):
if isinstance(vis, widgets.Widget):
return vis
elif isinstance(vis, plt.Figure):
return fig2widget(vis)
else:
raise ValueError('unsupported vis type')
def nwb2widget(node, neurodata_vis_spec=default_neurodata_vis_spec):
for ndtype, spec in neurodata_vis_spec.items():
if isinstance(node, ndtype):
if isinstance(spec, (dict, OrderedDict)):
tabs_spec = list(spec.items())
children = [tabs_spec[0][1](node)] + [widgets.HTML('Rendering...')
for _ in range(len(tabs_spec) - 1)]
tab = widgets.Tab(children=children)
[tab.set_title(i, label) for i, (label, _) in enumerate(tabs_spec)]
def on_selected_index(change):
if isinstance(change.owner.children[change.new], widgets.HTML):
children[change.new] = vis2widget(tabs_spec[change.new][1](node))
change.owner.children = children
tab.observe(on_selected_index, names='selected_index')
return tab
elif callable(spec):
return vis2widget(spec(node, neurodata_vis_spec=neurodata_vis_spec))
out1 = widgets.Output()
with out1:
print(node)
return out1
| StarcoderdataPython |
4838926 | import os
import librosa
import tempfile
import numpy as np
import shutil
import random
DL_VIDEO = ("ffmpeg -i $(youtube-dl -f \"mp4\" --get-url {url}) -ss {start_time} -to {end_time} {file} </dev/null > /dev/null 2>&1 ;")
EXTRACT_AUDIO = ("ffmpeg -i {video_file} -f {audio_ext} -ar {sample_rate} -ac 1 -vn {audio_file} </dev/null > /dev/null 2>&1;")
CUT_AUDIO = ("sox {file} {trim_file} trim {start_time} {length};")
EXTRACT_FRAMES = ("ffmpeg -i {video_file} -vf fps={fps} {file} </dev/null > /dev/null 2>&1;")
FILE = ("{}.{}")
def url_video(youtube_id):
return "https://www.youtube.com/watch?v=" + youtube_id
def extract_data(video_file, audio_path, frames_path,
filename, audio_ext, sample_rate, fps):
cmd = ""
if audio_path != None:
audio_file = os.path.join(audio_path, FILE.format(filename, audio_ext))
cmd += EXTRACT_AUDIO.format(video_file=video_file,
audio_ext=audio_ext,
sample_rate=sample_rate,
audio_file=audio_file)
if frames_path != None:
frame_files = os.path.join(frames_path, FILE.format(filename + ":%02d", "jpg"))
cmd += EXTRACT_FRAMES.format(video_file=video_file,
fps=fps,
sample_rate=sample_rate,
file=frame_files)
os.system(cmd)
os.remove(video_file)
def cut_audio(youtube_id, audio_path, filename,
start_time, end_time, ext="wav"):
file = os.path.join(audio_path, FILE.format(filename, ext))
trim_file= os.path.join(tempfile.gettempdir(), FILE.format("trim_" + filename, ext))
length = end_time - start_time
cmd = CUT_AUDIO.format(file=file,
trim_file=trim_file,
start_time=start_time,
length=length)
os.system(cmd)
os.remove(file)
shutil.move(trim_file, file)
def download_data(youtube_id, filename, start_time,
end_time, audio_path, frames_path, fps=25,
video_ext="mp4", audio_ext="wav", sample_rate=16000):
video_file = os.path.join(tempfile.gettempdir(), FILE.format(filename, video_ext))
cmd = DL_VIDEO.format(url=url_video(youtube_id),
start_time=start_time,
end_time=end_time,
file=video_file)
os.system(cmd)
if os.path.exists(video_file):
extract_data(video_file, audio_path, frames_path, filename, audio_ext, sample_rate, fps)
#cut_audio(youtube_id, audio_path, filename, 0, 3)
return True
return False
| StarcoderdataPython |
3520606 | <reponame>Zeyu-Li/django_ecommerse_app
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.shortcuts import reverse
# country
from django_countries.fields import CountryField
CATEGORY_CHOICES = (
('Tech', 'Technology'),
('Access', 'Accessories'),
('Trans', 'Transportation'),
('Misc', 'Other'),
)
LABEL_CHOICES = (
('N', 'New'),
('L', 'Limited'),
('S', 'Sold Out'),
)
class Item(models.Model):
''' the shopping item itself '''
title = models.CharField(max_length=100) # name of product
category = models.CharField(choices=CATEGORY_CHOICES, max_length=50) # category
label = models.CharField(choices=LABEL_CHOICES, max_length=1, blank=True, null=True) # label (optional)
description = models.TextField(max_length=2500) # description of product
image = models.ImageField(upload_to='images', blank=True, null=True) # product image
price = models.DecimalField(max_digits=6, decimal_places=2, default=10) # price of item
discounted_price = models.DecimalField(max_digits=6, decimal_places=2, blank=True, null=True) # discounted price (optional)
slug = models.SlugField() # custom slag for item website
def __str__(self):
return self.title
def get_absolute_url(self):
# return product website
return reverse("product", kwargs={
"slug": self.slug
})
def get_add_to_cart_url(self):
''' add item to cart '''
return reverse("add_to_cart", kwargs={
"slug": self.slug
})
def get_remove_from_cart_url(self):
''' removes item to cart '''
return reverse("remove_from_cart", kwargs={
"slug": self.slug
})
class OrderItem(models.Model):
''' a list of ordered items '''
user = models.ForeignKey(User, on_delete=models.CASCADE) # user
item = models.ForeignKey(Item, on_delete=models.CASCADE, null=True) # list of items
quantity = models.IntegerField(default=1) # how many items there are
ordered = models.BooleanField(default=False) # is order
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_total_item_price(self):
''' calculates the total price of items '''
# if has a discount price
if self.item.discounted_price:
return self.quantity * self.item.discounted_price
return self.quantity * self.item.price
class Order(models.Model):
''' the order itself '''
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True) # user
items = models.ManyToManyField(OrderItem) # list of items belonging to user
start_date = models.DateTimeField(auto_now_add=True) # start of first order
ordered_date = models.DateTimeField() # order date
ordered = models.BooleanField(default=False) # is order
billing_address = models.ForeignKey('BillingAddress', on_delete=models.SET_NULL, blank=True, null=True) # billing address
payment = models.ForeignKey('Payment', on_delete=models.SET_NULL, blank=True, null=True) # pay
def __str__(self):
return self.user.username
def get_final_price(self):
''' gets the final price of purchases '''
total = 0
for order_item in self.items.all():
total += order_item.get_total_item_price()
return total
class BillingAddress(models.Model):
''' handling billing address '''
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True) # user
street_address = models.CharField(max_length=400)
apartment_address = models.CharField(max_length=400)
country = CountryField(multiple=False)
zip_address = models.CharField(max_length=40)
def __str__(self):
return self.user.username
class Payment(models.Model):
''' for when a customer pays '''
stripe_charge_id = models.CharField(max_length=50)
user = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True) # user
amount = models.DecimalField(max_digits=6, decimal_places=2)
timestamp = models.DateField(auto_now_add=True)
def __str__(self):
return self.user.username
| StarcoderdataPython |
278794 | <reponame>focusonecc/common
# -*- coding: utf-8 -*-
# @Author: theo-l
# @Date: 2017-09-11 10:39:29
# @Last Modified by: theo-l
# @Last Modified time: 2017-09-15 17:41:09
import six
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import Paginator
from django.db.models.query import QuerySet, Q
from django.db import models
from django.http import Http404, JsonResponse
from django.views.generic import ListView
from django.views.generic.edit import ModelFormMixin
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from django.urls import reverse
class XView(ListView, ModelFormMixin):
###########################################################
# attribute access method
###########################################################
# FormMixin
initial = {}
form_class = None # 指定model详情页面form类
pk_url_kwargs = 'pk' # 指定model详情页面url对应的kwargs名称
resource_name = None # 定义URLconf中的resource部分的名称, 默认为model名称
# attributes of MultipleObjectMixin
allow_empty = True
queryset = None
model = None
ordering = None # 指定那些字段可以被用来在model列表页面进行排序
page_kwargs = 'page'
paginate_by = None
paginate_orphans = 0
paginator_class = Paginator
context_object_list_name = None # model 列表页面的数据列表实例对象
request_order_key_name = 'order_by'
fields = None # specify which fields should be display on the detail page's factory form
list_template_name = None # 指定model列表页面的模板文件名称
detail_template_name = None # 指定model详情页面的模板文件名称
list_template_suffix = '_list' # 指定model列表页面的模板文件名后缀
detail_template_suffix = '_detail' # 指定model详情页面的模板文件名后缀
context_object_name = None # 指定model详情页面中的上下文实例对象名称
context_form_object_name = 'form'
search_key_name = 'qk' # 默认搜索关键字名称 the keyword search name
search_key_type = 'qt' # 默认的关键字搜索类型 **Deprecated**
search_fields = [] # 可以被用来进行搜索的model的字段名称列表
detail_add_url_suffix = 'detail' # model的默认编辑页面url的后缀
app_label = None # 当前视图所在的Django应用名称, 主要用来反向解析URL
# 用来构建默认的列表/详情的URL的名称后缀
list_url_name_suffix = '_manager' # url(regex, view, name='resource_name'+'_manager')
detail_url_name_suffix = '_detail' # url(regex, view, name='resource_name'+'_detail')
detail_form_action_url_name = 'form_action' # 一个模板上线文变量用来在model详情页面的表单的action使用的URL上
new_detail_url_name = 'new_detail_url' # 一个模板上下文变量用来访问model创建页面的url
list_url_name = 'list_url' # 一个模板上下文变量用来访问model的列表页面的url
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
"""
根据用户请求方式以及页面类型来将请求转发到相应的视图中
"""
method_name = request.method.lower()
self.request_type = 'list' if kwargs.get(self.pk_url_kwargs, None) is None else 'detail'
if method_name in self.http_method_names:
handler = getattr(self, '{}_{}'.format(method_name, self.request_type), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
@property
def urls(self):
urls = [
url(r'{}/$'.format(self.get_resource_name()), self.__class__.as_view(),
name=self.get_list_url_name()),
url(r'{}/(?P<{}>[\w\d-]+)/$'.format(self.get_resource_name(), self.pk_url_kwargs), self.__class__.as_view(),
name=self.get_detail_url_name()),
]
return self.prepend_urls() + urls
def prepend_urls(self):
"""
add custom urls to process before the default configuration
"""
return []
def get_resource_name(self):
"""
构造model资源URL的主要部分
"""
if self.resource_name:
return self.resource_name
elif self.model:
return self.model._meta.model_name
elif self.queryset:
return self.queryset.model._meta.model_name
else:
raise ImproperlyConfigured(
"You need specify at least one of the attributes (resource_name, model, queryset)"
)
def get_detail_url_name(self):
"""
构造model资源详情URLconf的名称
"""
return '{}{}'.format(self.get_resource_name(), self.detail_url_name_suffix)
def get_list_url_name(self):
"""
构造model资源列表URLConf的名称
"""
return '{}{}'.format(self.get_resource_name(), self.list_url_name_suffix)
def get_success_url(self):
"""
重载方法:构造model资源详情页面表单action处理结果的URL
"""
return reverse(
'{}:{}'.format(self.app_label,
self.get_list_url_name()) if self.app_label else self.get_list_url_name())
def get_list_url(self):
"""
构造model资源列表页面的URL
"""
return reverse(
'{}:{}'.format(self.app_label,
self.get_list_url_name()) if self.app_label else self.get_list_url_name())
def get_list(self, request, *args, **kwargs):
"""
model资源的列表数据响应视图
"""
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty:
# When pagination is enabled and object_list is a queryset,
# it's better to do a cheap query than to load the unpaginated
# queryset in memory.
if self.get_paginate_by(self.object_list) is not None and hasattr(self.object_list, 'exists'):
is_empty = not self.object_list.exists()
else:
is_empty = len(self.object_list) == 0
if is_empty:
raise Http404(_("Empty list and '%(class_name)s.allow_empty' is False.") % {
'class_name': self.__class__.__name__,
})
return self.render_to_response(self.get_context_data(**kwargs))
def get_detail(self, request, *args, **kwargs):
"""
处理model资源详情请求的默认视图, 根据 get_object 返回的结果:
1. 如果pk参数对应了一个model实例对象,意味着更新实例
2. 否则, 意味着新建一个实例
"""
self.object = self.get_object()
return self.render_to_response(self.get_context_data(**kwargs))
def get_object(self, queryset=None):
"""
用来初始化model详情页面请求, 同时检查相应model实例对象
"""
queryset = queryset or self.get_queryset()
try:
pk = self.kwargs.get(self.pk_url_kwargs)
return queryset.filter(pk=pk).get()
except Exception:
return None
def post_list(self, request, *args, **kwargs):
"""
用来处理创建一个新对象的请求视图
"""
print('POST List: {}'.format(request.path_info))
form = self.get_form()
if form.is_valid():
print('valid post')
return self.form_valid(form)
else:
print('invalid post')
return self.form_invalid(form)
def post_detail(self, request, *args, **kwargs):
"""
用来更新一个model实例对象的请求视图
"""
print('POST Detail: {}'.format(request.path_info))
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
print(form.errors)
return self.form_invalid(form)
def get_detail_context_data(self, **kwargs):
"""
构造model详情页面的上下文变量
"""
context = {}
if self.context_form_object_name not in kwargs:
kwargs[self.context_form_object_name] = self.get_form()
if self.detail_form_action_url_name not in kwargs:
kwargs[self.detail_form_action_url_name] = self.get_detail_form_action_url()
context.update(kwargs)
if hasattr(self, 'object') and self.object is not None:
context['object'] = self.object
context_object_name = self.get_detail_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
# Insert the detail template names to context
context.update({'template': self.get_detail_template_names()})
self._show_context_data(context)
return context
def get_detail_form_action_url(self):
"""
构造model详情页面的表单action的提交的URL
"""
if hasattr(self, 'object') and self.object:
return reverse('{}:{}'.format(self.app_label,
self.get_detail_url_name()) if self.app_label else self.get_detail_url_name(),
kwargs={self.pk_url_kwarg: self.object.pk})
return reverse(
'{}:{}'.format(self.app_label,
self.get_list_url_name() if self.app_label else self.get_list_url_name()))
def get_list_context_data(self, **kwargs):
"""
构造model列表页面模板的上下文变量集合
"""
context = {}
context.update(kwargs)
context.update({'template': self.get_list_template_names()})
queryset = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(queryset)
context_object_name = self.get_list_content_object_name(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context.update({
'paginator' : paginator,
'page_obj' : page,
'is_paginated': is_paginated,
'object_list' : queryset
})
if context_object_name is not None:
context[context_object_name] = self.reorder_context_queryset(queryset)
else:
context.update({
'paginator' : None,
'page_obj' : None,
'is_paginated': None,
'object_list' : queryset
})
if context_object_name is not None:
context[context_object_name] = self.reorder_context_queryset(queryset)
context[self.new_detail_url_name] = ''.join([reverse(
'{}:{}'.format(self.app_label,
self.get_list_url_name()) if self.app_label else self.get_list_url_name()),
self.detail_add_url_suffix, '/'])
self._show_context_data(context)
return context
def _show_context_data(self, context):
print('get {} context:'.format(self.request_type))
for key, value in context.items():
value = value.encode('utf-8') if isinstance(value, six.string_types) else value
print(' {} => {}'.format(key, value))
def get_context_data(self, **kwargs):
"""
顶层的模板上下文数据准备方法
"""
context = {}
context['view'] = self
# Insert the url kwargs to context data
context.update(**kwargs)
# Insert the request GET query data into context
context.update(**{key: value.strip() for key, value in self.request.GET.dict().items()})
context[self.list_url_name] = self.get_list_url()
# Insert the detail/list context data to context
extra_context_method = getattr(self, 'get_{}_context_data'.format(self.request_type), None)
if extra_context_method is not None:
return extra_context_method(**context)
return context
def remove_url_keys(self, kwargs):
"""
将以下的参数从请求的查询参数中去除:
1. paginator params
2. keyword search name
3. keyword search type
4. request order by key name
"""
for key in [self.page_kwarg, self.search_key_name, self.search_key_type, self.request_order_key_name]:
if key in kwargs:
del kwargs[key]
def get_queryset(self):
"""
重载方法用来实现列表页面数据的更加复杂的操作, 包括:
1. 关键字检索
2. 客户端动态排序请求
3. model字段过滤
"""
queryset = super(XView, self).get_queryset()
queries = self.request.GET.copy()
request_order = self.get_order_by_query(queries)
# keyword search
keyword = queries.get(self.search_key_name, '').strip()
if keyword:
q = Q()
for field in self.search_fields:
q = Q(**{'{}__icontains'.format(field): keyword}) | q
queryset = queryset.filter(q)
self.remove_url_keys(queries)
filters = self.build_filters(**queries)
# model fields' filter
if filters:
queryset = queryset.filter(**filters)
# request ordering sort
if request_order:
queryset = queryset.order_by(*request_order)
return queryset
def render_to_response(self, context, **response_kwargs):
"""
重载方法, 用来使用定制的模板来渲染响应的结果
"""
response_kwargs.setdefault('content_type', self.content_type)
self._show_context_data(response_kwargs)
return self.response_class(request=self.request, template=context['template'],
context=context, using=self.template_engine, **response_kwargs)
def json_response(self, data):
"""
为客户端请求返回一个接送数据对象
"""
return JsonResponse(data)
def get_detail_template_names(self):
"""
返回详情页面模板文件名称
"""
if self.detail_template_name is not None:
return [self.detail_template_name]
names = []
if hasattr(self, 'object') and self.object is not None and isinstance(self.object, models.Model):
meta = self.object._meta
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
meta = self.model._meta
elif hasattr(self, 'queryset') and self.queryset is not None and isinstance(self.queryset, QuerySet):
meta = self.queryset.model._meta
if meta is not None:
names.append('{}/{}{}.html'.format(meta.app_label, meta.model_name, self.detail_template_suffix))
if not names:
raise ImproperlyConfigured(
"XView requires either a definition of "
"'detail_template_name' or an model attribute")
return names
def get_list_template_names(self):
"""
返回列表页面模板文件名称
"""
if self.list_template_name is not None:
return [self.list_template_name]
names = []
if hasattr(self, 'object_list') and self.object_list is not None and isinstance(self.object_list, QuerySet):
meta = self.object_list.model._meta
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
meta = self.model._meta
elif hasattr(self, 'queryset') and self.queryset is not None and isinstance(self.queryset, QuerySet):
meta = self.queryset.model._meta
else:
meta = None
if meta is not None:
names.append('{}/{}{}.html'.format(meta.app_label, meta.model_name, self.list_template_suffix))
if not names:
raise ImproperlyConfigured(
"XView requires either a definition of "
"'list_template_name' or an model attribute")
return names
def get_detail_context_object_name(self, obj):
"""
返回详细上下文中的model实例对象名称
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
return obj._meta.model_name
else:
return None
def get_list_content_object_name(self, object_list):
"""
返回列表上下文中的model实例对象名称
"""
if self.context_object_list_name:
return self.context_object_list_name
if hasattr(object_list, 'model'):
return '{}_list'.format(object_list.model._meta.model_name)
else:
return None
def build_filters(self, **filters):
"""
在此可以用来定制更多的过滤动作条件
"""
return {k: v for k, v in filters.items() if v}
def get_order_by_query(self, queries):
"""
从用户的请求参数中获取排序参数,用来对列表数据进行排序,
TODO 可以通过 ordering 参数指定默认的列表数据排序
"""
if self.request_order_key_name in queries:
return [f for f in queries.getlist(self.request_order_key_name) if f]
return []
def reorder_context_queryset(self, queryset):
"""
一个用来给列表请求的最终的数据列表进行排序的接口
"""
return queryset
| StarcoderdataPython |
3526587 | <reponame>bopopescu/PyHouse_1
"""
-*- test-case-name: PyHouse.src.Modules.web.test.test_web_schedules -*-
@name: PyHouse/src/Modules/web/web_schedules.py
@author: <NAME>
@contact: <EMAIL>
@copyright: (c) 2013-2016 by <NAME>
@license: MIT License
@note: Created on Jun 3, 2013
@summary: Web interface to schedules for the selected house.
"""
# Import system type stuff
import os
from nevow import athena
from nevow import loaders
# Import PyMh files and modules.
from Modules.Core.data_objects import ScheduleBaseData
from Modules.Web.web_utils import JsonUnicode, GetJSONHouseInfo
from Modules.Computer import logging_pyh as Logger
# from Modules.Scheduling import schedule
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
g_debug = 0
LOG = Logger.getLogger('PyHouse.webSchedule ')
class SchedulesElement(athena.LiveElement):
""" a 'live' schedules element.
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'schedulesElement.html'))
jsClass = u'schedules.SchedulesWidget'
def __init__(self, p_workspace_obj, _p_params):
self.m_workspace_obj = p_workspace_obj
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
@athena.expose
def getHouseData(self):
l_house = GetJSONHouseInfo(self.m_pyhouse_obj)
return l_house
@athena.expose
def saveScheduleData(self, p_json):
"""A new/changed schedule is returned. Process it and update the internal data via schedule.py
"""
l_json = JsonUnicode().decode_json(p_json)
l_delete = l_json['Delete']
l_schedule_ix = int(l_json['Key'])
if l_delete:
try:
del self.m_pyhouse_obj.House.Schedules[l_schedule_ix]
except AttributeError as e:
LOG.warning('Failed to delete schedule: {0:}, ix:{1:}').format(e, l_schedule_ix)
return
try:
l_obj = self.m_pyhouse_obj.House.Schedules[l_schedule_ix]
except KeyError:
l_obj = ScheduleBaseData()
l_obj.Name = l_json['Name']
l_obj.Active = l_json['Active']
l_obj.Key = l_schedule_ix
#
l_obj.ScheduleType = l_json['ScheduleType']
l_obj.Time = l_json['Time']
l_obj.DOW = l_json['DOW']
# print('fetched DOW {}'.format(l_obj.DOW))
l_obj.ScheduleMode = l_json['ScheduleMode']
#
l_obj.Level = int(l_json['Level'])
l_obj.LightName = l_json['LightName']
l_obj.Rate = l_json['Rate']
l_obj.RoomName = l_json['RoomName']
#
l_obj._DeleteFlag = l_json['Delete']
self.m_pyhouse_obj.House.Schedules[l_schedule_ix] = l_obj
self.m_pyhouse_obj.APIs.House.ScheduleAPI.RestartSchedule()
# ## END DBK
| StarcoderdataPython |
6412280 | <gh_stars>0
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
@python_2_unicode_compatible
class Embed(CMSPlugin):
"""
"""
source = models.CharField(
_('Source'),
max_length=255,
blank=False,
null=True,
)
allow_fullscreen = models.BooleanField(
_('Allow fullscreen'),
default=False,
)
ratio = models.CharField(
_('Ratio'),
max_length=5,
blank=False,
null=False,
choices=(
('21by9', _('21:9')),
('16by9', _('16:9')),
('4by3', _('4:3')),
('1by1', _('1:1')),
),
default='16by9',
)
def __str__(self):
if self.source:
return '%s' % self.source
else:
return 'None'
| StarcoderdataPython |
1884874 | import h5py
import numpy as np
from tensorflow.keras.utils import Sequence
import os
import sys
sys.path.append(
os.path.realpath(os.path.join(os.path.abspath(__file__), os.path.pardir, os.path.pardir, os.path.pardir)))
from backend.ocr_service.tokenization import Tokenizer
import backend.ocr_service.image_processing as image_processing
class HDF5Dataset:
def __init__(self, source_path, charset, max_text_length, batch_size):
self.source_path = source_path
self.tokenizer = Tokenizer(charset, max_text_length)
self.training_batch_size = batch_size
with h5py.File(self.source_path, "r") as source:
self.training_data_generator = TrainingDataGenerator(samples=np.array(source["train"]['dt']),
labels=np.array(source["train"]['gt']),
batch_size=batch_size,
tokenizer=self.tokenizer)
self.training_set_size = self.training_data_generator.size
self.valid_data_generator = DataGenerator(samples=np.array(source["valid"]['dt']),
batch_size=batch_size,
labels=np.array(source["valid"]['gt']),
tokenizer=self.tokenizer)
self.valid_set_size = self.valid_data_generator.size
self.test_data_generator = DataGenerator(samples=np.array(source["test"]['dt']),
batch_size=batch_size,
labels=np.array(source["test"]['gt']),
tokenizer=self.tokenizer)
self.test_set_size = self.test_data_generator.size
class DataGenerator(Sequence):
def __init__(self,
samples: np.array,
labels: np.array,
batch_size: int,
tokenizer: Tokenizer):
self.samples = samples
self.labels = labels
self.tokenizer = tokenizer
self.batch_size = batch_size
self.current_epoch = 0
self.size = len(self.labels)
self.steps_number = int(np.ceil(self.size / self.batch_size))
def __len__(self):
"""
Denote the number of batches per epoch
"""
return self.steps_number
def __getitem__(self, index):
"""
Generate the next batch of validation data.
"""
x_valid = self.samples[index * self.batch_size:(index + 1) * self.batch_size]
x_valid = image_processing.normalize(x_valid)
y_valid = self.labels[index * self.batch_size:(index + 1) * self.batch_size]
y_valid = [self.tokenizer.encode(y) for y in y_valid]
y_valid = [np.pad(y, (0, self.tokenizer.maxlen - len(y))) for y in y_valid]
y_valid = np.asarray(y_valid, dtype=np.int16)
return x_valid, y_valid
def on_epoch_end(self):
"""
Update indexes after each epoch
"""
self.current_epoch += 1
class TrainingDataGenerator(DataGenerator):
def __init__(self,
samples: np.array,
labels: np.array,
batch_size: int,
tokenizer: Tokenizer):
# the DataGenerator initializer will save and initialize
# samples, labels, tokenizer, batch_size, current_epoch, size and step_number
super().__init__(samples=samples,
labels=labels,
batch_size=batch_size,
tokenizer=tokenizer)
# this will be useful to shuffle the samples and labels at the end of each epoch
self.arange = np.arange(len(self.labels))
np.random.seed(42)
# override the DataGenerator __getitem__ in order to also perform augmentation
def __getitem__(self, index):
"""
Generate the next batch of training data.
Augment the X
"""
x_train = self.samples[index * self.batch_size:(index + 1) * self.batch_size]
y_train = self.labels[index * self.batch_size:(index + 1) * self.batch_size]
x_train = image_processing.manual_augmentation(x_train,
rotation_range=0.5,
scale_range=0.02,
height_shift_range=0.02,
width_shift_range=0.01,
erode_range=3,
dilate_range=3)
x_train = image_processing.albumentations_augmentation(x_train)
x_train = image_processing.normalize(x_train)
y_train = [self.tokenizer.encode(y) for y in y_train]
y_train = [np.pad(y, (0, self.tokenizer.maxlen - len(y))) for y in y_train]
y_train = np.asarray(y_train, dtype=np.int16)
return x_train, y_train
# override the DataGenerator on_epoch_end method to shuffle the samples and labels
def on_epoch_end(self):
"""
Update indexes after each epoch
"""
self.current_epoch += 1
np.random.shuffle(self.arange)
self.samples = self.samples[self.arange]
self.labels = self.labels[self.arange]
| StarcoderdataPython |
70262 | """
Tunnel generator
Generates actually two tunnels. One of glass
(so we can see through), on of air (so we can walk inside).
Positioning of player can still be improved.
"""
import mcpi.minecraft as minecraft
import mcpi.block as block
def generate_tunnel():
mc = minecraft.Minecraft.create()
position = mc.player.getPos()
mc.setBlocks(position.x, position.y, position.z,
position.x+3, position.y+3, position.z+100,
block.GLASS)
mc.setBlocks(position.x+1, position.y+1, position.z,
position.x+2, position.y+2, position.z+100,
block.AIR)
if __name__ == '__main__':
generate_tunnel()
| StarcoderdataPython |
3513145 | <filename>admin01/serializer.py
from rest_framework import serializers
from admin01.models import *
from reception.serializers import *
# 用户等级序列化类
class UserLevelSerializer(serializers.ModelSerializer):
discount = serializers.DecimalField(max_digits=7, decimal_places=2, default=10)
class Meta:
model = UserLevel
fields = "__all__"
# 用户等级条件序列化类
class ConditionSerializersModel(serializers.ModelSerializer):
level = serializers.CharField(source='level.level')
level_id = serializers.IntegerField()
class Meta:
model = UserLevelCondition
fields = '__all__'
# 用户等级条件反序列化类
class UserLevelConditionSerializers(serializers.Serializer):
level_id = serializers.IntegerField()
time = serializers.IntegerField()
amount = serializers.DecimalField(max_digits=7, decimal_places=2)
def create(self, data):
m = UserLevelCondition.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.level_id = validated_data['level_id']
instance.time = validated_data['time']
instance.amount = validated_data['amount']
instance.save()
return instance
# 标签的序列化类
class TagSerializersModel(serializers.ModelSerializer):
class Meta:
model = Tag
fields = '__all__'
# 站内信的序列化类
class SiteMessageSerializersModel(serializers.ModelSerializer):
class Meta:
model = SiteMessage
fields = '__all__'
# 站内信的反系列化类
class SiteMessageSerializers(serializers.Serializer):
title = serializers.CharField(max_length=50)
content = serializers.CharField()
def create(self, data):
m = SiteMessage.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.title = validated_data['title']
instance.content = validated_data['content']
instance.save()
return instance
# 路径序列化类
class PathSerializersModel(serializers.ModelSerializer):
num = serializers.SerializerMethodField()
stageList = serializers.SerializerMethodField()
def get_num(self, row):
try:
n = Course.objects.filter(path_id=row.id).count()
except:
n = 0
return n
def get_stageList(self, row):
try:
sList = PathStage.objects.filter(path_id=row.id).order_by('sort')
sList = PathStageSerializersModel(sList, many=True)
return sList.data
except:
return []
class Meta:
model = Path
fields = '__all__'
class LightPathSerializersModel(serializers.ModelSerializer):
num = serializers.SerializerMethodField()
def get_num(self, row):
try:
n = Course.objects.filter(path_id=row.id).count()
except:
n = 0
return n
class Meta:
model = Path
fields = '__all__'
# 路径反序列化类
class PathSerializers(serializers.Serializer):
name = serializers.CharField(max_length=50)
pic = serializers.CharField(max_length=255)
info = serializers.CharField(max_length=255)
study_num = serializers.IntegerField(default=0)
def create(self, data):
m = Path.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.name = validated_data['name']
instance.pic = validated_data['pic']
instance.info = validated_data['info']
instance.study_num = validated_data['study_num']
instance.save()
return instance
# 阶段的序列化
class PathStageSerializersModel(serializers.ModelSerializer):
path_name = serializers.CharField(source='path.name')
path_id = serializers.CharField()
courseList = serializers.SerializerMethodField()
def get_courseList(self, row):
try:
cList = Course.objects.filter(pathstage_id=row.id).all()
cList = CourseSerializersModel(cList, many=True)
return cList.data
except:
return []
class Meta:
model = PathStage
fields = '__all__'
# 阶段的反序列化
class PathStageSerializers(serializers.Serializer):
name = serializers.CharField(max_length=50)
path_id = serializers.IntegerField()
sort = serializers.IntegerField()
def create(self, data):
m = PathStage.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.path_id = validated_data['path_id']
instance.sort = validated_data['sort']
instance.save()
return instance
# 课程序列化
class CourseSerializersModel(serializers.ModelSerializer):
teacher = serializers.CharField(source='teacher.name')
pathstage = serializers.CharField(source='pathstage.name')
path = serializers.CharField(source='pathstage.name')
tag = serializers.CharField(source='tag.name')
teacher_id = serializers.IntegerField()
path_id = serializers.IntegerField()
tag_id = serializers.IntegerField()
prices = serializers.SerializerMethodField()
def get_prices(self, row):
try:
n = Price.objects.filter(course_id=row.id).all().order_by('discount')
n = PriceSerializersModel(n, many=True).data
except:
n = {}
return n
class Meta:
model = Course
fields = '__all__'
# 课程反序列化
class CourseSerializers(serializers.Serializer):
title = serializers.CharField(max_length=50)
pic = serializers.CharField(max_length=255)
info = serializers.CharField(max_length=255)
teacher_id = serializers.IntegerField()
path_id = serializers.IntegerField()
pathstage_id = serializers.IntegerField()
online = serializers.IntegerField()
member = serializers.IntegerField()
attention = serializers.IntegerField()
learn = serializers.IntegerField()
comment_num = serializers.IntegerField()
tag_id = serializers.IntegerField()
section_num = serializers.IntegerField()
recommand = serializers.CharField(max_length=50)
detail = serializers.CharField(max_length=50)
def create(self, data):
m = Course.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.title = validated_data['title']
instance.pic = validated_data['pic']
instance.info = validated_data['info']
instance.teacher_id = validated_data['teacher_id']
instance.pathstage_id = validated_data['pathstage_id']
instance.path_id = validated_data['path_id']
instance.online = validated_data['online']
instance.member = validated_data['member']
instance.attention = validated_data['attention']
instance.learn = validated_data['learn']
instance.comment_num = validated_data['comment_num']
instance.tag_id = validated_data['tag_id']
instance.section_num = validated_data['section_num']
instance.recommand = validated_data['recommand']
instance.detail = validated_data['detail']
instance.save()
return instance
# 讲师的序列化
class TeacherSerializersModel(serializers.ModelSerializer):
num = serializers.SerializerMethodField()
def get_num(self, row):
try:
n = Course.objects.filter(teacher_id=row.id).count()
except:
n = 0
return n
class Meta:
model = Teacher
fields = '__all__'
# 讲师序列化
class TeacherSerializers(serializers.Serializer):
name = serializers.CharField(max_length=50)
desc = serializers.CharField(max_length=255)
pic = serializers.CharField(max_length=255)
def create(self, data):
m = Teacher.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.name = validated_data['name']
instance.desc = validated_data['desc']
instance.pic = validated_data['pic']
instance.save()
return instance
# 章节序列化
class SectionSerializersModel(serializers.ModelSerializer):
class Meta:
model = Section
fields = '__all__'
# 章节反序列化
class SectionSerializers(serializers.Serializer):
course_id = serializers.IntegerField()
section = serializers.CharField()
video = serializers.CharField()
sort = serializers.IntegerField()
def create(self, data):
m = Section.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.course_id = validated_data['course_id']
instance.section = validated_data['section']
instance.video = validated_data['video']
instance.sort = validated_data['sort']
instance.save()
return instance
# 价格的序列化类
class PriceSerializersModel(serializers.ModelSerializer):
level = serializers.SerializerMethodField()
def get_level(self, row):
if row.type > 0:
c = UserLevel.objects.get(id=row.type)
name = c.level
else:
name = ''
return name
class Meta:
model = Price
fields = '__all__'
class PriceSerializers(serializers.Serializer):
type = serializers.IntegerField()
course_id = serializers.IntegerField()
discount = serializers.FloatField()
discoun_price = serializers.DecimalField(max_digits=7, decimal_places=2)
def create(self, data):
m = Price.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.discount = validated_data['discount']
instance.discoun_price = validated_data['discoun_price']
instance.save()
return instance
# 优惠券序列化
class CouponModelSerializer(serializers.ModelSerializer):
# course = serializers.SlugRelatedField(slug_field='name',read_only=True)
course_name = serializers.SerializerMethodField()
def get_course_name(self, row):
if row.course > 0:
name = ''
try:
c = Course.objects.get(id=row.course)
name = c.title
except:
name: ''
else:
name = ''
return name
class Meta:
model = Coupon
fields = '__all__'
class CouponSerializer(serializers.Serializer):
name = serializers.CharField() # 优惠券名称
count = serializers.IntegerField() # 优惠券数量
type = serializers.IntegerField() # 1首次注册会员送 2全场能用 3指定商品 4指定会员 #优惠券类型
course = serializers.IntegerField(default=0) # 类型为3时指定课程
start_time = serializers.DateTimeField() # 会员开始时间
end_time = serializers.DateTimeField() # 会员结束时间
status = serializers.IntegerField() # 1可用,2过期 #使用状态
condition = serializers.DecimalField(max_digits=7, decimal_places=2) # 满多少钱可以使用
money = serializers.DecimalField(max_digits=7, decimal_places=2) # 优惠券金额
def create(self, data):
m = Coupon.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.name = validated_data['name']
instance.count = validated_data['count']
instance.type = validated_data['type']
instance.course = validated_data['course']
instance.start_time = validated_data['start_time']
instance.end_time = validated_data['end_time']
instance.status = validated_data['status']
instance.condition = validated_data['condition']
instance.money = validated_data['money']
instance.save()
return instance
class ActiveSerializersModel(serializers.ModelSerializer):
class Meta:
model = Act
fields = '__all__'
class TimeSerializersModel(serializers.ModelSerializer):
class Meta:
model = Time
fields = '__all__'
class SkSerializersModel(serializers.ModelSerializer):
course_name = serializers.CharField(source='course.title')
act_name = serializers.CharField(source='act.title')
start = serializers.CharField(source='time.start')
end = serializers.CharField(source='time.end')
pic = serializers.CharField(source='course.pic')
info = serializers.CharField(source='course.info')
section_num = serializers.CharField(source='course.section_num')
date = serializers.CharField(source='act.date')
class Meta:
model = Sk
fields = '__all__'
class UserSiteMessageSerializersModel(serializers.ModelSerializer):
title = serializers.CharField(source='message.title')
content = serializers.CharField(source='message.content')
class Meta:
model = UserSiteMessage
fields = '__all__'
class UserCourseSerializersModel(serializers.ModelSerializer):
course_name = serializers.CharField(source='course.title')
pic = serializers.CharField(source='course.pic')
section_name = serializers.CharField(source='section.section')
class Meta:
model = UserCourse
fields = '__all__'
class ReportSerializersModel(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
pic = serializers.CharField(source='user.img')
section_name = serializers.CharField(source='section.section')
course_name = serializers.CharField(source='course.title')
count = serializers.SerializerMethodField()
def get_count(self, row):
try:
n = len(row.report_content)
except:
n = 0
return n
class Meta:
model = Report
fields = '__all__'
class ReportSerializers(serializers.Serializer):
section_id = serializers.IntegerField()
user_id = serializers.IntegerField()
report_content = serializers.CharField()
report_title = serializers.CharField()
report_browse = serializers.IntegerField(default=0, allow_null=True)
linknum = serializers.IntegerField(default=0, allow_null=True)
course_id = serializers.IntegerField()
def create(self, data):
m = Report.objects.create(**data)
return m
def update(self, instance, data):
instance.section = data['section']
instance.user = data['user']
instance.report_content = data['report_content']
instance.report_title = data['report_title']
instance.report_browse = data['report_browse']
instance.seclinknumtion = data['linknum']
instance.course = data['course']
instance.save()
return instance
| StarcoderdataPython |
11263421 | import numpy as np
from numpy import copy
from functools import reduce
from math import pi
from nevergrad.optimization import optimizerlib,registry
from nevergrad.optimization.utils import Archive,Value
import threading
import nevergrad
from hqca.opts.core import *
class nevergrad_opt(OptimizerInstance):
def __init__(self,**kwargs):
'''
Need to get the optimizer
'''
OptimizerInstance.__init__(self,**kwargs)
OptimizerInstance._nevergrad_keywords(self,**kwargs)
def check(self,initial=False):
if self._conv_crit in ['iterations']:
if self.energy_calls>=self.max_iter:
self.crit=0
else:
self.crit=1
elif self._conv_crit=='ImpAv':
pass
elif self._conv_crit in ['default','MaxDist']:
if initial:
self.vectors.sort(key=lambda x:x[0],reverse=False)
self._update_MaxDist()
else:
dist = 0
for i in range(len(self.vectors[0][2])):
dist+=(self.vectors[0][2][i]-self.y[i])**2
dist = dist**(1/2)
comp2 = self.E<self.vectors[ 0][0]
if not comp2:
for i in reversed(range(1,self.Nv)):
comp1 =dist<=self.vectors[i][3]
comp2 =dist>self.vectors[i-1][3]
if comp1 and comp2:
self.vectors.insert(
i,
[
self.E,
self.x,
self.y.copy(),
dist]
)
del self.vectors[self.Nv]
break
elif comp2:
self.vectors.insert(
0,
[
self.E,
self.x,
self.y.copy(),
0])
del self.vectors[self.Nv]
self._update_MaxDist()
self.best_f = self.vectors[0][0]
self.best_x = self.vectors[0][1]
self.best_y = self.vectors[0][2]
self.crit = self.max_d
def _update_MaxDist(self):
self.max_d=0
for n,v in enumerate(self.vectors):
if n==0:
self.vectors[0][3]=0
else:
dist = 0
for i in range(len(self.vectors[0][2])):
dist+=(self.vectors[0][2][i]-v[2][i])**2
dist = dist**(1/2)
v[3]=dist
if dist>=self.max_d:
self.max_d = dist
self.max_n = n
def initialize(self,start):
self.Np = len(start)
self.temp_dat = []
if type(self.shift)==type(None):
self.shift = start
self.opt = registry[self.opt_name](
len(start),
budget=self.max_iter
)
for i in range(0,self.Nv):
x = self.opt.ask()
y = np.asarray(x.args)[0]*self.unity+self.shift
E = self.f(y)
self.temp_dat.append([x.args,E])
self.energy_calls+=1
self.vectors.append(
[
E,
x,
y,
0])
self.opt.tell(x,E)
self.x = x
self.y = y
self.E = E
self.check(initial=True)
def next_step(self):
self.x = self.opt.ask()
self.y = np.asarray(self.x.args)[0]*self.unity+self.shift
self.E = self.f(self.y)
self.opt.tell(self.x,self.E)
self.temp_dat.append([self.x.args,self.E])
self.check()
self.energy_calls+=1
def save_opt(self):
'''
little function to try and convert an object and see if it will save
properly with pickle.
'''
del self.opt,self.x
def reload_opt(self):
'''
function to reload data from the temp_dat object
'''
self.opt = registry[self.opt_name](
self.Np,
budget=self.max_iter
)
#try:
# for step in self.temp_dat:
# for i in range(step[1].count):
# x = self.opt.ask()
# print(x,step[0],step[1].mean)
# self.opt.tell(x,step[1].mean)
#except Exception as e:
# traceback.print_exc()
okay = True
try:
for item in self.temp_dat:
x = self.opt.ask()
print(x.args,item[0])
self.opt.tell(x,item[1])
except KeyError:
print('huh')
okay=False
it+=1
| StarcoderdataPython |
8073729 | <reponame>bradywatkinson/sublime-flowtype
from FlowType.tests.base import TestCase
class TestAddPragma(TestCase):
"""Test suite for `flowtype_add_pragma` command."""
def test_no_pragma_if_not_js(self):
"""Do not add a pragma if it's not a JavaScript file."""
self.view.set_syntax_file("Packages/Python/Python.tmLanguage")
string = """Lorem ipsum dolor sit amet.
Ut sit amet gravida nibh."""
self.setText(string)
self.view.run_command("flowtype_add_pragma")
first_row = self.getRow(1)
self.assertEqual(first_row, "Lorem ipsum dolor sit amet.")
def test_add_pragma(self):
"""Add a pragma if it's a JavaScript file."""
string = """Lorem ipsum dolor sit amet.
Ut sit amet gravida nibh."""
self.setText(string)
self.view.run_command("flowtype_add_pragma")
first_row = self.getRow(1)
self.assertEqual(first_row, "// @flow")
| StarcoderdataPython |
9776782 | <filename>Misc/splat.py
#use splat (*) to take unknown num of args
def sumAll(*args):
sum = 0
for i in args:
sum += i
return sum
print(sumAll(2,3,4,5)) | StarcoderdataPython |
1858597 | <filename>baseconverter/__init__.py
from baseconverter import *
from exceptions import *
import exceptions
import baseconverter
| StarcoderdataPython |
1910298 | <filename>tensorflow_fold/blocks/test_lib.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common methods for testing TensorFlow Fold."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
# import google3
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow_fold.util import proto_tools
# pylint: disable=g-import-not-at-top,unused-import
if six.PY3:
import unittest.mock as mock
else:
import mock
# pylint: enable=g-import-not-at-top,unused-import
# Make sure SerializedMessageToTree can see our proto files.
proto_tools.map_proto_source_tree_path(
'', os.getcwd()) # Tests run in the bazel root directory.
proto_tools.import_proto_file('tensorflow_fold/util/test.proto')
proto_tools.import_proto_file('tensorflow_fold/util/test3.proto')
class TestCase(tf.test.TestCase):
def assertRaisesWithLiteralMatch(self, exception, literal, callable_obj,
*args, **kwargs):
with self.assertRaises(exception) as ctx:
callable_obj(*args, **kwargs)
self.assertEqual(str(ctx.exception), literal)
# Open-sourced here:
# <github.com/google/google-apputils/blob/master/google/apputils/basetest.py>
def assertSameStructure(self, a, b, aname='a', bname='b', msg=None):
"""Asserts that two values contain the same structural content.
The two arguments should be data trees consisting of trees of dicts and
lists. They will be deeply compared by walking into the contents of dicts
and lists; other items will be compared using the == operator.
If the two structures differ in content, the failure message will indicate
the location within the structures where the first difference is found.
This may be helpful when comparing large structures.
Args:
a: The first structure to compare.
b: The second structure to compare.
aname: Variable name to use for the first structure in assertion messages.
bname: Variable name to use for the second structure.
msg: Additional text to include in the failure message.
"""
# Accumulate all the problems found so we can report all of them at once
# rather than just stopping at the first
problems = []
_walk_structure_for_problems(a, b, aname, bname, problems)
# Avoid spamming the user toooo much
max_problems_to_show = self.maxDiff // 80
if len(problems) > max_problems_to_show:
problems = problems[0:max_problems_to_show-1] + ['...']
if problems:
failure_message = '; '.join(problems)
if msg:
failure_message += (': ' + msg)
self.fail(failure_message)
# Open-sourced here:
# <github.com/google/google-apputils/blob/master/google/apputils/basetest.py>
def _walk_structure_for_problems(a, b, aname, bname, problem_list):
"""The recursive comparison behind assertSameStructure."""
if type(a) != type(b): # pylint: disable=unidiomatic-typecheck
problem_list.append('%s is a %r but %s is a %r' %
(aname, type(a), bname, type(b)))
# If they have different types there's no point continuing
return
if isinstance(a, collections.Mapping):
for k in a:
if k in b:
_walk_structure_for_problems(
a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k),
problem_list)
else:
problem_list.append('%s has [%r] but %s does not' % (aname, k, bname))
for k in b:
if k not in a:
problem_list.append('%s lacks [%r] but %s has it' % (aname, k, bname))
# Strings are Sequences but we'll just do those with regular !=
elif (isinstance(a, collections.Sequence) and
not isinstance(a, six.string_types)):
minlen = min(len(a), len(b))
for i in xrange(minlen):
_walk_structure_for_problems(
a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i),
problem_list)
for i in xrange(minlen, len(a)):
problem_list.append('%s has [%i] but %s does not' % (aname, i, bname))
for i in xrange(minlen, len(b)):
problem_list.append('%s lacks [%i] but %s has it' % (aname, i, bname))
else:
if a != b:
problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b))
def main():
tf.test.main()
| StarcoderdataPython |
8143818 | <gh_stars>0
from .client import ClientWrapper
from .user import UserWrapper
| StarcoderdataPython |
11244698 | #!/usr/bin/env python
# Copyright (C) 2020, <NAME>.
#
# This file is part of pyrex.
#
# pyrex is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyrex is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyrex. If not, see <http://www.gnu.org/licenses/>.
#
# Import useful things
from distutils.core import setup
from setuptools import find_packages
# Load README for long description
with open('README.md', 'r') as f:
readme = f.read()
setup(name='pyrex',
version='0.2.0',
description='Python package for transforming circular gravitational waveforms to low-exentric waveforms from numerical simulations.',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
include_package_data=True,
# package_dir={'pyrex': 'codes'},
url='https://github.com/Yoshinta/pyrex',
download_url='https://github.com/Yoshinta/pyrex/archive/master.zip',
keywords=['numerical relativity', 'gravitational waves', 'waveform', 'eccentric', 'compact binary'],
install_requires=[
'jsonschema>=3.0.2',
'pickleshare>=0.7.5',
'scipy>=1.3.1',
'lalsuite>=6.62',
'h5py>=2.10',
'sxs==2019.9.9.23.27.50'],
classifiers=(
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Scientific/Engineering'
),
license='MIT')
| StarcoderdataPython |
3541865 | from gameCore import GameCore
from menu import menu
world ='''----------------------------------------
----------------------------------------
----------------------------------------
----------------------------------------
----------------------------------------
----------------------------------------
----------------------------------------
----------------------------------------
----------------------------------------
----------------------------------------'''
#rows = 40, colums = 10
rows = 40
columns = 10
if __name__ == "__main__" :
new_menu = menu()
result = new_menu._run()
if result == 1:
new_game = GameCore(world,rows,new_menu.selected_ship,new_menu.fps)
new_game._run()
| StarcoderdataPython |
3527693 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: <NAME> (<EMAIL>)
#
from __future__ import absolute_import, division, unicode_literals
from collections import Iterable, Mapping, Set
from mo_dots import is_data, is_sequence, tuplewrap, unwrap, wrap
from mo_dots.objects import datawrap
from mo_future import PY2, iteritems
from mo_logs import Log
from mo_logs.exceptions import suppress_exception
DEBUG = False
class UniqueIndex(Set, Mapping):
"""
DEFINE A SET OF ATTRIBUTES THAT UNIQUELY IDENTIFIES EACH OBJECT IN A list.
THIS ALLOWS set-LIKE COMPARISIONS (UNION, INTERSECTION, DIFFERENCE, ETC) WHILE
STILL MAINTAINING list-LIKE FEATURES
KEYS CAN BE DOT-DELIMITED PATHS TO DEEP INNER OBJECTS
"""
def __init__(self, keys, data=None, fail_on_dup=True):
self._data = {}
self._keys = tuplewrap(keys)
self.count = 0
self.fail_on_dup = fail_on_dup
if data:
for d in data:
self.add(d)
def __getitem__(self, key):
try:
_key = value2key(self._keys, key)
if len(self._keys) == 1 or len(_key) == len(self._keys):
d = self._data.get(_key)
return wrap(d)
else:
output = wrap([
d
for d in self._data.values()
if all(wrap(d)[k] == v for k, v in _key.items())
])
return output
except Exception as e:
Log.error("something went wrong", e)
def __setitem__(self, key, value):
Log.error("Use add() to ad to an index")
# try:
# key = value2key(self._keys, key)
# d = self._data.get(key)
# if d != None:
# Log.error("key already filled")
# self._data[key] = unwrap(value)
# self.count += 1
#
# except Exception as e:
# Log.error("something went wrong", e)
def keys(self):
return self._data.keys()
def pop(self):
output = iteritems(self._data).next()[1]
self.remove(output)
return wrap(output)
def add(self, val):
val = datawrap(val)
key = value2key(self._keys, val)
if key == None:
Log.error("Expecting key to be not None")
try:
d = self._data.get(key)
except Exception as e:
key = value2key(self._keys, val)
if d is None:
self._data[key] = unwrap(val)
self.count += 1
elif d is not val:
if self.fail_on_dup:
Log.error("{{new|json}} with key {{key|json}} already filled with {{old|json}}", key=key, new=val, old=self[val])
elif DEBUG:
Log.warning("key {{key|json}} already filled\nExisting\n{{existing|json|indent}}\nValue\n{{value|json|indent}}",
key=key,
existing=d,
value=val
)
def extend(self, values):
for v in values:
self.add(v)
def remove(self, val):
key = value2key(self._keys, datawrap(val))
if key == None:
Log.error("Expecting key to not be None")
d = self._data.get(key)
if d is None:
# ALREADY GONE
return
else:
del self._data[key]
self.count -= 1
def __contains__(self, key):
return self[key] != None
if PY2:
def __iter__(self):
return (wrap(v) for v in self._data.itervalues())
else:
def __iter__(self):
return (wrap(v) for v in self._data.values())
def __sub__(self, other):
output = UniqueIndex(self._keys, fail_on_dup=self.fail_on_dup)
for v in self:
if v not in other:
output.add(v)
return output
def __and__(self, other):
output = UniqueIndex(self._keys)
for v in self:
if v in other:
output.add(v)
return output
def __or__(self, other):
output = UniqueIndex(self._keys)
for v in self:
output.add(v)
for v in other:
with suppress_exception:
output.add(v)
return output
def __ior__(self, other):
for v in other:
with suppress_exception:
self.add(v)
return self
def __xor__(self, other):
if not isinstance(other, Iterable):
Log.error("Expecting other to be iterable")
other = UniqueIndex(keys=self._keys, data=other, fail_on_dup=False)
return (self-other) | (other-self)
def __len__(self):
if self.count == 0:
for d in self:
self.count += 1
return self.count
def subtract(self, other):
return self.__sub__(other)
def intersect(self, other):
return self.__and__(other)
def value2key(keys, val):
if len(keys) == 1:
if is_data(val):
return val[keys[0]]
elif is_sequence(val):
return val[0]
else:
return val
else:
if is_data(val):
return datawrap({k: val[k] for k in keys})
elif is_sequence(val):
return datawrap(dict(zip(keys, val)))
else:
Log.error("do not know what to do here")
| StarcoderdataPython |
3503918 | <filename>old/cube.py
#!/bin/python3
import pygame
from pygame.locals import *
from OpenGL.GL import *
#from OpenGl.GLU import *
import random
from OpenGL.GLU import *
from constants import Constants
# to sramble white on top facing green facing
#colors = {
# 'blue': '#DDA0DD',
# 'orange': '#EE82EE',
# 'yellow': '#8B008B',
# 'green': '#FF00FF',
# 'red': '#DA70D6',
# 'white': '#BA55D3',
#}
colors = (
(1, 1, 0), # Back Yellow
(1, 0, 0), # Left Red
(1, 1, 1), # Face White
(1, 0.5, 0), # Right Orange
(0, 1, 0), # Up Green
(0, 0, 1), # Down Blue
)
vertices = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1),
)
surfaces = (
(0,1,2,3),
(3,2,7,6),
(6,7,5,4),
(4,5,1,0),
(1,5,7,2),
(4,0,3,6),
)
def draw_Cube(vertices):
glBegin(GL_QUADS)
x = 0
for surface in surfaces:
glColor3fv(colors[x])
for vertex in surface:
glVertex3fv(vertices[vertex])
x += 1
glEnd()
def main():
# Setup
pygame.init()
display = (800, 600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
glEnable(GL_DEPTH_TEST)
# Pre-inishilize variables so I don't get a referenced before assignment error
x_move = 0
y_move = 0
z_move = 0
roll = 0
gluPerspective(45, (display[0]/display[1]), 0.1, 50)
# Move back
glTranslatef(0, 0, -5)
#glRotatef(90, 1, 0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_KP4:
x_move = 0.3
if event.key == pygame.K_KP6:
x_move = -0.3
if event.key == pygame.K_KP8:
y_move = -0.3
if event.key == pygame.K_KP2:
y_move = 0.3
if event.key == pygame.K_w:
z_move = 0.3
if event.key == pygame.K_s:
z_move = -0.3
if event.key == pygame.K_KP7:
roll = 0.3
if event.key == pygame.K_KP9:
roll = -0.3
elif event.type == pygame.KEYUP:
if event.key == pygame.K_KP4 or event.key == pygame.K_KP6:
x_move = 0
if event.key == pygame.K_KP8 or event.key == pygame.K_KP2:
y_move = 0
if event.key == pygame.K_w or event.key == pygame.K_s:
z_move = 0
if event.key ==pygame.K_KP7 or event.key == pygame.K_KP9:
roll = 0
glRotatef(1, y_move, x_move, roll)
x = glGetDoublev(GL_MODELVIEW_MATRIX)
camera_x = x[3][0]
camera_y = x[3][1]
camera_z = x[3][2]
glTranslatef(0, 0, z_move)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
draw_Cube(vertices)
pygame.display.flip()
if __name__ == '__main__':
main()
pygame.quit()
quit()
| StarcoderdataPython |
6511846 | <filename>World/Object/Unit/Spell/Constants/SpellCastResult.py
from enum import Enum
class SpellCastResult(Enum):
SPELL_FAILED_AFFECTING_COMBAT = 0x00
SPELL_FAILED_ALREADY_AT_FULL_HEALTH = 0x01
SPELL_FAILED_ALREADY_AT_FULL_MANA = 0x02
SPELL_FAILED_ALREADY_AT_FULL_POWER = 0x03
SPELL_FAILED_ALREADY_BEING_TAMED = 0x04
SPELL_FAILED_ALREADY_HAVE_CHARM = 0x05
SPELL_FAILED_ALREADY_HAVE_SUMMON = 0x06
SPELL_FAILED_ALREADY_OPEN = 0x07
SPELL_FAILED_AURA_BOUNCED = 0x08
SPELL_FAILED_AUTOTRACK_INTERRUPTED = 0x09
SPELL_FAILED_BAD_IMPLICIT_TARGETS = 0x0A
SPELL_FAILED_BAD_TARGETS = 0x0B
SPELL_FAILED_CANT_BE_CHARMED = 0x0C
SPELL_FAILED_CANT_BE_DISENCHANTED = 0x0D
SPELL_FAILED_CANT_BE_DISENCHANTED_SKILL = 0x0E
SPELL_FAILED_CANT_BE_PROSPECTED = 0x0F
SPELL_FAILED_CANT_CAST_ON_TAPPED = 0x10
SPELL_FAILED_CANT_DUEL_WHILE_INVISIBLE = 0x11
SPELL_FAILED_CANT_DUEL_WHILE_STEALTHED = 0x12
SPELL_FAILED_CANT_STEALTH = 0x13
SPELL_FAILED_CASTER_AURASTATE = 0x14
SPELL_FAILED_CASTER_DEAD = 0x15
SPELL_FAILED_CHARMED = 0x16
SPELL_FAILED_CHEST_IN_USE = 0x17
SPELL_FAILED_CONFUSED = 0x18
SPELL_FAILED_DONT_REPORT = 0x19
SPELL_FAILED_EQUIPPED_ITEM = 0x1A
SPELL_FAILED_EQUIPPED_ITEM_CLASS = 0x1B
SPELL_FAILED_EQUIPPED_ITEM_CLASS_MAINHAND = 0x1C
SPELL_FAILED_EQUIPPED_ITEM_CLASS_OFFHAND = 0x1D
SPELL_FAILED_ERROR = 0x1E
SPELL_FAILED_FIZZLE = 0x1F
SPELL_FAILED_FLEEING = 0x20
SPELL_FAILED_FOOD_LOWLEVEL = 0x21
SPELL_FAILED_HIGHLEVEL = 0x22
SPELL_FAILED_HUNGER_SATIATED = 0x23
SPELL_FAILED_IMMUNE = 0x24
SPELL_FAILED_INTERRUPTED = 0x25
SPELL_FAILED_INTERRUPTED_COMBAT = 0x26
SPELL_FAILED_ITEM_ALREADY_ENCHANTED = 0x27
SPELL_FAILED_ITEM_GONE = 0x28
SPELL_FAILED_ITEM_NOT_FOUND = 0x29
SPELL_FAILED_ITEM_NOT_READY = 0x2A
SPELL_FAILED_LEVEL_REQUIREMENT = 0x2B
SPELL_FAILED_LINE_OF_SIGHT = 0x2C
SPELL_FAILED_LOWLEVEL = 0x2D
SPELL_FAILED_LOW_CASTLEVEL = 0x2E
SPELL_FAILED_MAINHAND_EMPTY = 0x2F
SPELL_FAILED_MOVING = 0x30
SPELL_FAILED_NEED_AMMO = 0x31
SPELL_FAILED_NEED_AMMO_POUCH = 0x32
SPELL_FAILED_NEED_EXOTIC_AMMO = 0x33
SPELL_FAILED_NOPATH = 0x34
SPELL_FAILED_NOT_BEHIND = 0x35
SPELL_FAILED_NOT_FISHABLE = 0x36
SPELL_FAILED_NOT_FLYING = 0x37
SPELL_FAILED_NOT_HERE = 0x38
SPELL_FAILED_NOT_INFRONT = 0x39
SPELL_FAILED_NOT_IN_CONTROL = 0x3A
SPELL_FAILED_NOT_KNOWN = 0x3B
SPELL_FAILED_NOT_MOUNTED = 0x3C
SPELL_FAILED_NOT_ON_TAXI = 0x3D
SPELL_FAILED_NOT_ON_TRANSPORT = 0x3E
SPELL_FAILED_NOT_READY = 0x3F
SPELL_FAILED_NOT_SHAPESHIFT = 0x40
SPELL_FAILED_NOT_STANDING = 0x41
SPELL_FAILED_NOT_TRADEABLE = 0x42
SPELL_FAILED_NOT_TRADING = 0x43
SPELL_FAILED_NOT_UNSHEATHED = 0x44
SPELL_FAILED_NOT_WHILE_GHOST = 0x45
SPELL_FAILED_NO_AMMO = 0x46
SPELL_FAILED_NO_CHARGES_REMAIN = 0x47
SPELL_FAILED_NO_CHAMPION = 0x48
SPELL_FAILED_NO_COMBO_POINTS = 0x49
SPELL_FAILED_NO_DUELING = 0x4A
SPELL_FAILED_NO_ENDURANCE = 0x4B
SPELL_FAILED_NO_FISH = 0x4C
SPELL_FAILED_NO_ITEMS_WHILE_SHAPESHIFTED = 0x4D
SPELL_FAILED_NO_MOUNTS_ALLOWED = 0x4E
SPELL_FAILED_NO_PET = 0x4F
SPELL_FAILED_NO_POWER = 0x50
SPELL_FAILED_NOTHING_TO_DISPEL = 0x51
SPELL_FAILED_NOTHING_TO_STEAL = 0x52
SPELL_FAILED_ONLY_ABOVEWATER = 0x53
SPELL_FAILED_ONLY_DAYTIME = 0x54
SPELL_FAILED_ONLY_INDOORS = 0x55
SPELL_FAILED_ONLY_MOUNTED = 0x56
SPELL_FAILED_ONLY_NIGHTTIME = 0x57
SPELL_FAILED_ONLY_OUTDOORS = 0x58
SPELL_FAILED_ONLY_SHAPESHIFT = 0x59
SPELL_FAILED_ONLY_STEALTHED = 0x5A
SPELL_FAILED_ONLY_UNDERWATER = 0x5B
SPELL_FAILED_OUT_OF_RANGE = 0x5C
SPELL_FAILED_PACIFIED = 0x5D
SPELL_FAILED_POSSESSED = 0x5E
SPELL_FAILED_REAGENTS = 0x5F
SPELL_FAILED_REQUIRES_AREA = 0x60
SPELL_FAILED_REQUIRES_SPELL_FOCUS = 0x61
SPELL_FAILED_ROOTED = 0x62
SPELL_FAILED_SILENCED = 0x63
SPELL_FAILED_SPELL_IN_PROGRESS = 0x64
SPELL_FAILED_SPELL_LEARNED = 0x65
SPELL_FAILED_SPELL_UNAVAILABLE = 0x66
SPELL_FAILED_STUNNED = 0x67
SPELL_FAILED_TARGETS_DEAD = 0x68
SPELL_FAILED_TARGET_AFFECTING_COMBAT = 0x69
SPELL_FAILED_TARGET_AURASTATE = 0x6A
SPELL_FAILED_TARGET_DUELING = 0x6B
SPELL_FAILED_TARGET_ENEMY = 0x6C
SPELL_FAILED_TARGET_ENRAGED = 0x6D
SPELL_FAILED_TARGET_FRIENDLY = 0x6E
SPELL_FAILED_TARGET_IN_COMBAT = 0x6F
SPELL_FAILED_TARGET_IS_PLAYER = 0x70
SPELL_FAILED_TARGET_IS_PLAYER_CONTROLLED = 0x71
SPELL_FAILED_TARGET_NOT_DEAD = 0x72
SPELL_FAILED_TARGET_NOT_IN_PARTY = 0x73
SPELL_FAILED_TARGET_NOT_LOOTED = 0x74
SPELL_FAILED_TARGET_NOT_PLAYER = 0x75
SPELL_FAILED_TARGET_NO_POCKETS = 0x76
SPELL_FAILED_TARGET_NO_WEAPONS = 0x77
SPELL_FAILED_TARGET_UNSKINNABLE = 0x78
SPELL_FAILED_THIRST_SATIATED = 0x79
SPELL_FAILED_TOO_CLOSE = 0x7A
SPELL_FAILED_TOO_MANY_OF_ITEM = 0x7B
SPELL_FAILED_TOTEM_CATEGORY = 0x7C
SPELL_FAILED_TOTEMS = 0x7D
SPELL_FAILED_TRAINING_POINTS = 0x7E
SPELL_FAILED_TRY_AGAIN = 0x7F
SPELL_FAILED_UNIT_NOT_BEHIND = 0x80
SPELL_FAILED_UNIT_NOT_INFRONT = 0x81
SPELL_FAILED_WRONG_PET_FOOD = 0x82
SPELL_FAILED_NOT_WHILE_FATIGUED = 0x83
SPELL_FAILED_TARGET_NOT_IN_INSTANCE = 0x84
SPELL_FAILED_NOT_WHILE_TRADING = 0x85
SPELL_FAILED_TARGET_NOT_IN_RAID = 0x86
SPELL_FAILED_DISENCHANT_WHILE_LOOTING = 0x87
SPELL_FAILED_PROSPECT_WHILE_LOOTING = 0x88
SPELL_FAILED_PROSPECT_NEED_MORE = 0x89
SPELL_FAILED_TARGET_FREEFORALL = 0x8A
SPELL_FAILED_NO_EDIBLE_CORPSES = 0x8B
SPELL_FAILED_ONLY_BATTLEGROUNDS = 0x8C
SPELL_FAILED_TARGET_NOT_GHOST = 0x8D
SPELL_FAILED_TOO_MANY_SKILLS = 0x8E
SPELL_FAILED_TRANSFORM_UNUSABLE = 0x8F
SPELL_FAILED_WRONG_WEATHER = 0x90
SPELL_FAILED_DAMAGE_IMMUNE = 0x91
SPELL_FAILED_PREVENTED_BY_MECHANIC = 0x92
SPELL_FAILED_PLAY_TIME = 0x93
SPELL_FAILED_REPUTATION = 0x94
SPELL_FAILED_MIN_SKILL = 0x95
SPELL_FAILED_NOT_IN_ARENA = 0x96
SPELL_FAILED_NOT_ON_SHAPESHIFT = 0x97
SPELL_FAILED_NOT_ON_STEALTHED = 0x98
SPELL_FAILED_NOT_ON_DAMAGE_IMMUNE = 0x99
SPELL_FAILED_NOT_ON_MOUNTED = 0x9A
SPELL_FAILED_TOO_SHALLOW = 0x9B
SPELL_FAILED_TARGET_NOT_IN_SANCTUARY = 0x9C
SPELL_FAILED_TARGET_IS_TRIVIAL = 0x9D
SPELL_FAILED_BM_OR_INVISGOD = 0x9E
SPELL_FAILED_EXPERT_RIDING_REQUIREMENT = 0x9F
SPELL_FAILED_ARTISAN_RIDING_REQUIREMENT = 0xA0
SPELL_FAILED_NOT_IDLE = 0xA1
SPELL_FAILED_NOT_INACTIVE = 0xA2
SPELL_FAILED_PARTIAL_PLAYTIME = 0xA3
SPELL_FAILED_NO_PLAYTIME = 0xA4
SPELL_FAILED_NOT_IN_BATTLEGROUND = 0xA5
SPELL_FAILED_ONLY_IN_ARENA = 0xA6
SPELL_FAILED_TARGET_LOCKED_TO_RAID_INSTANCE = 0xA7
SPELL_FAILED_UNKNOWN = 0xA8
SPELL_NOT_FOUND = 0xFE
SPELL_CAST_OK = 0xFF
| StarcoderdataPython |
6427628 | import argparse
import os
import pickle
import scipy.ndimage
import cv2
import numpy as np
import tensorflow as tf
import Networks as Nets
from Params import CTCInferenceParams
from distutils.util import strtobool
import DataHandeling
import sys
from utils import log_print, get_model, bbox_crop, bbox_fill
__author__ = '<EMAIL>'
try:
import tensorflow.python.keras as k
except AttributeError:
import tensorflow.keras as k
if not tf.__version__.split('.')[0] == '2':
raise ImportError(f'Required tensorflow version 2.x. current version is: {tf.__version__}')
def inference():
# Load Model
with open(os.path.join(params.model_path, 'model_params.pickle'), 'rb') as fobj:
model_dict = pickle.load(fobj)
model_cls = get_model(model_dict['name'])
device = '/gpu:0' if params.gpu_id >= 0 else '/cpu:0'
with tf.device(device):
model = model_cls(*model_dict['params'], data_format=params.data_format, pad_image=True)
model.load_weights(os.path.join(params.model_path, 'model.ckpt'))
log_print("Restored from {}".format(os.path.join(params.model_path, 'model.ckpt')))
base_out_temp_vis_fname = base_out_temp_label_fname = base_out_fname = None
if not params.dry_run:
if params.save_intermediate_path:
base_out_temp_vis_fname = os.path.join(params.save_intermediate_vis_path, 'softmax{time:03d}.tif')
base_out_temp_label_fname = os.path.join(params.save_intermediate_label_path, 'mask{time:03d}.tif')
base_out_fname = os.path.join(params.output_path, 'mask{time:03d}.tif')
dataset = params.data_reader(params.sequence_path, params.filename_format,
pre_sequence_frames=params.pre_sequence_frames).dataset
try:
for T, image in enumerate(dataset):
t = T - params.pre_sequence_frames
image_shape = image.shape
if len(image_shape) == 2:
if params.data_format == 'NCHW':
image = tf.reshape(image, [1, 1, 1, image_shape[0], image_shape[1]])
else:
image = tf.reshape(image, [1, 1, image_shape[0], image_shape[1], 1])
elif len(image_shape) == 3:
image = tf.reshape(image, [1, 1, image_shape[0], image_shape[1], image_shape[2]])
else:
raise ValueError()
_, image_softmax = model(image, training=False)
image_softmax_np = np.squeeze(image_softmax.numpy(), (0, 1))
if t < 0:
continue
if not params.dry_run:
seg_edge = np.greater_equal(image_softmax_np[2], 0.2)
seg_cell = np.logical_and(np.equal(np.argmax(image_softmax_np, 0), 1).astype(np.float32),
np.logical_not(seg_edge))
seg_edge = seg_edge.astype(np.float32)
seg_cell = scipy.ndimage.morphology.binary_fill_holes(seg_cell).astype(np.float32)
seg_edge = np.maximum((seg_edge - seg_cell), 0)
cc_out = cv2.connectedComponentsWithStats(seg_cell.astype(np.uint8), 8, cv2.CV_32S)
num_cells = cc_out[0]
labels = cc_out[1]
stats = cc_out[2]
dist, ind = scipy.ndimage.morphology.distance_transform_edt(1 - seg_cell, return_indices=True)
labels = labels[ind[0, :], ind[1, :]] * seg_edge * (dist < params.edge_dist) + labels
for n in range(1, num_cells):
bw = labels == n
if not np.any(bw):
continue
bw_crop, loc = bbox_crop(bw)
fill_crop = scipy.ndimage.morphology.binary_fill_holes(bw_crop).astype(np.float32)
fill_diff = fill_crop - bw_crop
bw_fill = bbox_fill(bw, fill_diff, loc)
labels = labels + bw_fill * n
# filter by fov
if params.FOV:
fov_im = np.ones_like(labels)
fov_im[:params.FOV, :] = 0
fov_im[-params.FOV:, :] = 0
fov_im[:, params.FOV] = 0
fov_im[:, -params.FOV:] = 0
fov_labels = labels * fov_im
unique_fov_labels = np.unique(fov_labels.flatten())
remove_ind = np.setdiff1d(np.arange(num_cells), unique_fov_labels)
else:
remove_ind = []
if params.save_intermediate:
if params.data_format == 'NCHW':
image_softmax_np = np.transpose(image_softmax_np, (1, 2, 0))
out_fname = base_out_temp_vis_fname.format(time=t)
sigoutnp_vis = np.flip(np.round(image_softmax_np * (2 ** 16 - 1)).astype(np.uint16), 2)
cv2.imwrite(filename=out_fname, img=sigoutnp_vis.astype(np.uint16))
log_print("Saved File: {}".format(out_fname))
labels_out = np.zeros_like(labels, dtype=np.uint16)
# isbi_out_dict = {}
p = 0
for n in range(1, num_cells):
area = stats[n, cv2.CC_STAT_AREA]
if params.min_cell_size <= area <= params.max_cell_size and not (n in remove_ind):
p += 1
# isbi_out_dict[p] = [p, 0, 0, 0]
labels_out[labels == n] = p
else:
labels[labels == n] = 0
out_fname = base_out_fname.format(time=t)
cv2.imwrite(filename=out_fname, img=labels_out.astype(np.uint16))
log_print("Saved File: {}".format(out_fname))
if params.save_intermediate:
out_fname = base_out_temp_label_fname.format(time=t)
cv2.imwrite(filename=out_fname, img=labels_out.astype(np.uint16))
log_print("Saved File: {}".format(out_fname))
except (KeyboardInterrupt, ValueError) as err:
print('Error: {}'.format(str(err)))
finally:
print('Done!')
if __name__ == '__main__':
class AddNets(argparse.Action):
import Networks as Nets
def __init__(self, option_strings, dest, **kwargs):
super(AddNets, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
nets = [getattr(Nets, v) for v in values]
setattr(namespace, self.dest, nets)
class AddReader(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(AddReader, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
reader = getattr(DataHandeling, values)
setattr(namespace, self.dest, reader)
class AddDatasets(argparse.Action):
def __init__(self, option_strings, dest, *args, **kwargs):
super(AddDatasets, self).__init__(option_strings, dest, *args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if len(values) % 2:
raise ValueError("dataset values should be of length 2*N where N is the number of datasets")
datastets = []
for i in range(0, len(values), 2):
datastets.append((values[i], strtobool(values[i + 1])))
setattr(namespace, self.dest, datastets)
arg_parser = argparse.ArgumentParser(description='Run Inference LSTMUnet Segmentation')
arg_parser.add_argument('--gpu_id', dest='gpu_id', type=str,
help="Visible GPUs: example, '0,2,3', use -1 for CPU")
arg_parser.add_argument('--model_path', dest='model_path', type=str,
help="Path to trained model generated by train2D.py, folder should contain model.ckpt.*")
arg_parser.add_argument('--sequence_path', dest='sequence_path', type=str,
help="Path to sequence images. Folder should contain image files")
arg_parser.add_argument('--filename_format', dest='filename_format', type=str,
help="Format of file using wildcard (*) to indicate timestep. Default: 't*.tif'")
arg_parser.add_argument('--data_format', dest='data_format', type=str, choices=['NCHW', 'NWHC'],
help="Data format NCHW or NHWC")
arg_parser.add_argument('--min_cell_size', dest='min_cell_size', type=int,
help="Minimum cell size")
arg_parser.add_argument('--max_cell_size', dest='max_cell_size', type=int,
help="Maximum cell size")
arg_parser.add_argument('--num_iterations', dest='num_iterations', type=int,
help="Maximum number of training iterations")
arg_parser.add_argument('--edge_dist', dest='edge_dist', type=int,
help="Maximum edge width to add to cell object")
arg_parser.add_argument('--pre_sequence_frames', dest='pre_sequence_frames', type=int,
help="Number of frames to run before sequence, uses mirror of first N frames.")
arg_parser.add_argument('--save_intermediate', dest='save_intermediate', action='store_const', const=True,
help="Save intermediate files")
arg_parser.add_argument('--save_intermediate_path', dest='save_intermediate_path', type=str,
help="Path to save intermediate files, used only with --save_intermediate")
arg_parser.add_argument('--dry_run', dest='dry_run', action='store_const', const=True,
help="Do not write any outputs: for debugging only")
sys_args = sys.argv
input_args = arg_parser.parse_args()
args_dict = {key: val for key, val in vars(input_args).items() if not (val is None)}
params = CTCInferenceParams(args_dict)
tf_eps = tf.constant(1E-8, name='epsilon')
try:
inference()
finally:
log_print('Done')
| StarcoderdataPython |
6543948 | from numpy import matrix
from numpy import shape
def scal(alpha, x):
"""
Compute alpha * x, overwriting x
x can be row or column vectors.
"""
assert type(x) is matrix and len(x.shape) is 2, \
"laff.scal: vector x must be a 2D numpy.matrix"
if(type(alpha) is matrix):
m_alpha, n_alpha = alpha.shape
assert isinstance(alpha,(int,float,complex)) or (m_alpha is 1 and n_alpha is 1), \
"laff.scal: alpha must be a scalar or a 1 x 1 matrix"
m_x, n_x = x.shape
assert m_x is 1 or n_x is 1, "laff.copy: x is not a vector"
if m_x is 1: # x is a row
for i in range(n_x): x[0, i] = alpha * x[0, i]
elif n_x is 1: # x is a column
for i in range(m_x): x[i, 0] = alpha * x[i, 0]
| StarcoderdataPython |
6660274 | <gh_stars>0
import time
import os
import numpy
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import numpy as np
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset
from yolov3_tf2.utils import draw_outputs
flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './checkpoints/yolov3.tf', 'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('tfrecord', None, 'tfrecord instead of image')
flags.DEFINE_string('output', './2062_output.png', 'path to output image')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
flags.DEFINE_string('image', './data/images/test/2062.png', 'path to input image')
def main(_argv):
# flags.DEFINE_string('image'+p, './data/images/'+p, 'path to input image')
# flags.DEFINE_string('output', './output- '+p, 'path to output image')
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights).expect_partial()
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
# if FLAGS.tfrecord:
# dataset = load_tfrecord_dataset(
# FLAGS.tfrecord, FLAGS.classes, FLAGS.size)
# dataset = dataset.shuffle(512)
# img_raw, _label = next(iter(dataset.take(1)))
# else:
path = 'data/images/validation'
for p in os.listdir(path):
img_full_path = os.path.join(path, p)
print(p)
img_raw = tf.image.decode_image(open(img_full_path, 'rb').read(), channels=3)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
logging.info('time: {}'.format(t2 - t1))
########################################################################
logging.info('detections:')
detected_objects = []
for i in range(nums[0]):
logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
logging.info('\t{}, {}, {}'.format(p, class_names[int(classes[0][i])], np.array(scores[0][i])))
detected_objects.append(class_names[int(classes[0][i])])
c = numpy.array(detected_objects)
unique, counts = numpy.unique(c, return_counts=True)
if "car" in unique and counts[int(np.where(unique == "car")[0])] > 5 or "motorbike" in unique and counts[
int(np.where(unique == "motorbike")[0])] > 2 or "bus" in unique and counts[
int(np.where(unique == "bus")[0])] > 2 or "truck" in unique and counts[
int(np.where(unique == "motorbike")[0])] > 3:
logging.info('\t{}, {}, {}'.format(p, "traffic", 1.0))
if "bird" in unique:
logging.info('\t{}, {}, {}'.format(p, "flying", 1.0))
logging.info('\t{}, {}, {}'.format(p, "nature", 1.0))
if "cat" or "dog" or "horse" or "sheep" or "cow" or "elephant" or "bear" or "zebra" or "giraffe" in unique:
logging.info('\t{}, {}, {}'.format(p, "animal", 1.0))
if "backpack" or "handbag" or "tie" in unique:
logging.info('\t{}, {}, {}'.format(p, "outfit", 1.0))
if "suitcase" in unique and "person":
logging.info('\t{}, {}, {}'.format(p, "tourist", 1.0))
if "suitcase" in unique and "person":
logging.info('\t{}, {}, {}'.format(p, "tourist", 1.0))
if "aeroplane" in unique:
logging.info('\t{}, {}, {}'.format(p, "flying", 1.0))
logging.info('\t{}, {}, {}'.format(p, "travel", 1.0))
if "skateboard" or "surfboard" or "snowboard" or "skis" in unique:
logging.info('\t{}, {}, {}'.format(p, "snow", 1.0))
logging.info('\t{}, {}, {}'.format(p, "cool", 1.0))
if "laptop" or "cell phone" or "remote" in unique:
logging.info('\t{}, {}, {}'.format(p, "technology", 1.0))
if "keyboard" or "cell phone" in unique:
logging.info('\t{}, {}, {}'.format(p, "communication", 1.0))
logging.info('\t{}, {}, {}'.format(p, "connection", 1.0))
if "toilet" or "vase" in unique:
logging.info('\t{}, {}, {}'.format(p, "indoors", 1.0))
if "chair" or "sofa" or "bed" or "diningtable" or "tvmonitor" or "microwave" or "oven" or \
"toaster" or "hair drier" or "refrigerator" or "toothbrush" in unique:
logging.info('\t{}, {}, {}'.format(p, "furniture", 1.0))
logging.info('\t{}, {}, {}'.format(p, "home", 1.0))
if "cell phone" in unique:
logging.info('\t{}, {}, {}'.format(p, "telephone", 1.0))
if "boat" in unique:
logging.info('\t{}, {}, {}'.format(p, "river", 1.0))
if "bus" in unique or "traffic light" in unique or "car" in unique or "parking meter" in unique:
logging.info('\t{}, {}, {}'.format(p, "street", 1.0))
if "bus" in unique or "traffic light" in unique or "car" in unique or "truck" in unique or "train" in unique or "fire hydrant" in unique or "stop sign" in unique:
logging.info('\t{}, {}, {}'.format(p, "road", 1.0))
logging.info('\t{}, {}, {}'.format(p, "city", 1.0))
logging.info('\t{}, {}, {}'.format(p, "outdoors", 1.0))
if "stop sign" in unique:
logging.info('\t{}, {}, {}'.format(p, "sign", 1.0))
if "clock" in unique:
logging.info('\t{}, {}, {}'.format(p, "time", 1.0))
if "bus" in unique:
logging.info('\t{}, {}, {}'.format(p, "travel", 1.0))
if "bottle" in unique:
logging.info('\t{}, {}, {}'.format(p, "water", 1.0))
for obj_cnt in zip(unique, counts):
if obj_cnt[0] == "person":
if obj_cnt[1] > 0:
logging.info('\t{}, {}, {}'.format(p, "human", 1.0))
elif obj_cnt[1] > 2:
logging.info('\t{}, {}, {}'.format(p, "people", 1.0))
elif 3 < obj_cnt[1] < 6:
logging.info('\t{}, {}, {}'.format(p, "meeting", 1.0))
logging.info('\t{}, {}, {}'.format(p, "group", 1.0))
elif obj_cnt[1] >= 6:
logging.info('\t{}, {}, {}'.format(p, "crowd", 1.0))
if obj_cnt[0] == "car" and obj_cnt[1] > 4:
logging.info('\t{}, {}, {}'.format(p, "rally", 1.0))
if obj_cnt[0] == "aeroplane" and obj_cnt[1] > 3:
logging.info('\t{}, {}, {}'.format(p, "aircraft", 1.0))
logging.info('\t{}, {}, {}'.format(p, "airplane", 1.0))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
out_img = "data/output/out_" + p
cv2.imwrite(out_img, img)
logging.info('output saved to: {}'.format(out_img))
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
5024130 | <filename>augmentations_tuner/fastautoaugment/__init__.py
from .search import augsearch | StarcoderdataPython |
1905095 | <filename>test_mtgp.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 29 12:47:44 2021
@author: apple
"""
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
train_x = torch.linspace(0, 1, 100)
train_y = torch.stack([
torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,
torch.cos(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * 0.2,
], -1)
class MultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.MultitaskMean(
gpytorch.means.ConstantMean(), num_tasks=2
)
self.covar_module = gpytorch.kernels.MultitaskKernel(
gpytorch.kernels.RBFKernel(), num_tasks=2, rank=1
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
model = MultitaskGPModel(train_x, train_y, likelihood)
# this is for running the notebook in our testing framework
import os
smoke_test = ('CI' in os.environ)
training_iterations = 2 if smoke_test else 50
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item()))
optimizer.step()
# Set into eval mode
model.eval()
likelihood.eval()
# Initialize plots
f, (y1_ax, y2_ax) = plt.subplots(1, 2, figsize=(8, 3))
# Make predictions
with torch.no_grad(), gpytorch.settings.fast_pred_var():
test_x = torch.linspace(0, 1, 51)
predictions = likelihood(model(test_x))
mean = predictions.mean
lower, upper = predictions.confidence_region()
# This contains predictions for both tasks, flattened out
# The first half of the predictions is for the first task
# The second half is for the second task
# Plot training data as black stars
y1_ax.plot(train_x.detach().numpy(), train_y[:, 0].detach().numpy(), 'k*')
# Predictive mean as blue line
y1_ax.plot(test_x.numpy(), mean[:, 0].numpy(), 'b')
# Shade in confidence
y1_ax.fill_between(test_x.numpy(), lower[:, 0].numpy(), upper[:, 0].numpy(), alpha=0.5)
y1_ax.set_ylim([-3, 3])
y1_ax.legend(['Observed Data', 'Mean', 'Confidence'])
y1_ax.set_title('Observed Values (Likelihood)')
# Plot training data as black stars
y2_ax.plot(train_x.detach().numpy(), train_y[:, 1].detach().numpy(), 'k*')
# Predictive mean as blue line
y2_ax.plot(test_x.numpy(), mean[:, 1].numpy(), 'b')
# Shade in confidence
y2_ax.fill_between(test_x.numpy(), lower[:, 1].numpy(), upper[:, 1].numpy(), alpha=0.5)
y2_ax.set_ylim([-3, 3])
y2_ax.legend(['Observed Data', 'Mean', 'Confidence'])
y2_ax.set_title('Observed Values (Likelihood)')
| StarcoderdataPython |
1923941 | import argparse
from models import run_full_precision_mlp, run_binary_concept_mlp, run_binary_xgemm_mlp
def parse_arguments():
"""
Parse command line arguments and return them
:return: command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--full_precision', help='trains full precision model', action="store_true"
)
parser.add_argument(
'--binary_concept', help='trains binary proof-of-concept model', action="store_true"
)
parser.add_argument(
'--binary_xgemm', help='trains binary with xnor model', action="store_true"
)
parser.add_argument(
'--batch_size', help='number of input per batch (used in training, validating and testing)', type=int, default=256
)
parser.add_argument(
'--learning_rate', help='starting traning learning rate', type=float, default=1e-2
)
parser.add_argument(
'--steps', help='number of steps', type=int, default=1000
)
parser.add_argument(
'--display_step', help='step used for logging', type=int, default=100
)
parser.add_argument(
'--hidden_size', help='number of neurons in hidden layers', type=int, default=2000
)
args = parser.parse_args()
models = [args.full_precision, args.binary_concept, args.binary_xgemm]
if not any(models):
raise RuntimeError('Select some model to train')
if len([x for x in models if x]) > 1:
raise RuntimeError('Too many models selected, select one')
return args
def log_args(args):
print(
"[LOG] network model: {}\n"
"[LOG] batch size: {}\n"
"[LOG] learning rate: {}\n"
"[LOG] number of neurons in hidden layers: {}\n"
"[LOG] training steps {} (report every {} steps)".format(
"FULL PRECISION" if args.full_precision else "BINARY (proof-of-concept)" if args.binary_concept else "BINARY XGEMM",
args.batch_size, args.learning_rate, args.hidden_size, args.steps, args.display_step
)
)
if __name__ == '__main__':
# parse and log arguments
args = parse_arguments()
log_args(args)
kwargs = dict(
learning_rate=args.learning_rate,
num_steps=args.steps,
batch_size=args.batch_size,
display_step=args.display_step,
hidden_size=args.hidden_size
)
if args.full_precision:
run_full_precision_mlp(**kwargs)
if args.binary_concept:
run_binary_concept_mlp(**kwargs)
if args.binary_xgemm:
run_binary_xgemm_mlp(**kwargs)
| StarcoderdataPython |
1708378 | import numpy as np
from mcfly.models.base_hyperparameter_generator import generate_base_hyperparameter_set, \
get_regularization
def test_regularization_is_float():
""" Regularization should be a float. """
reg = get_regularization(0, 5)
assert isinstance(reg, np.float), "Expected different type."
def test_regularization_0size_interval():
""" Regularization from zero size interval [2,2] should be 10^-2. """
reg = get_regularization(2, 2)
assert reg == 0.01
def test_base_hyper_parameters_reg():
""" Base hyper parameter set should contain regularization. """
hyper_parameter_set = generate_base_hyperparameter_set(low_lr=1,
high_lr=4,
low_reg=1,
high_reg=3)
assert 'regularization_rate' in hyper_parameter_set.keys()
| StarcoderdataPython |
3506099 | <gh_stars>1-10
# MIT License
#
# Copyright(c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sklearn.preprocessing as skpp
import pandas as pd
def create_labels_from_y_values(y_values):
"""
Return y labels and labels object
Parameters
----------
y_values: numpy array
Returns
-------
labelled y_values, Label encoder
"""
le = skpp.LabelEncoder()
label_y = le.fit_transform(y_values)
return label_y, le
def transform_values_into_categorical(df, categorical_vars):
"""
Return df with categorized variables
Parameters
----------
df: Pandas dataframe
categorical_vars: array with var names
Returns
-------
Df with dummy columns for categorical variables
"""
for var in categorical_vars:
df_dummy = pd.get_dummies(df[var], prefix=var)
df = df.drop([var], axis=1)
df = pd.concat((df, df_dummy), axis=1)
return df
def normalize_numeric_variables(df, numeric_vars):
"""
Return df with normalized variables
Parameters
----------
df: Pandas dataframe
numeric_vars: array with var names
Returns
-------
Df with normalized columns for numerical variables
"""
pass
def create_new_column_with_difference_between_2(df, column1, column2):
"""
Return df with new delta column: column1 - column2
Parameters
----------
df: Pandas dataframe
column1: Column string
column2: Column string
Returns
-------
Df with added delta column
"""
df['delta_' + column1 + "_" + column2] = df[column1] - df[column2]
return df
| StarcoderdataPython |
1884163 | <filename>emd/__init__.py
"""
Earth Mover's Distance in Python
"""
from emd import emd
| StarcoderdataPython |
8007498 | import bpy
import bpy_extras
from mathutils import Matrix, Vector
import numpy as np
import errno
from pathlib import Path
def MakeDir(path):
try:
path.mkdir(parents=True)
except OSError as e:
if e.errno == errno.EEXIST and path.is_dir():
pass
else:
raise
# This code is from
# http://blender.stackexchange.com/questions/38009/3x4-camera-matrix-from-blender-camera
def GetIntrinsics(cam):
""" Create a 3x4 P matrix from Blender camera """
camd = cam.data
f_in_mm = camd.lens
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camd.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
alpha_u = f_in_mm * s_u
alpha_v = f_in_mm * s_v
u_0 = resolution_x_in_px * scale / 2
v_0 = resolution_y_in_px * scale / 2
skew = 0 # only use rectangular pixels
K = Matrix(
((alpha_u, skew, u_0),
( 0 , alpha_v, v_0),
( 0 , 0, 1 )))
return K
def GetExtrinsics(cam):
""" Get camera rotation and translation matrices of a Blender camera. """
# There are 3 coordinate systems involved:
# 1. The World coordinates: "world"
# - right-handed
# 2. The Blender camera coordinates: "bcam"
# - x is horizontal
# - y is up
# - right-handed: negative z look-at direction
# 3. The desired computer vision camera coordinates: "cv"
# - x is horizontal
# - y is down (to align to the actual pixel coordinates
# used in digital images)
# - right-handed: positive z look-at direction
# bcam stands for blender camera
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1*R_world2bcam * location
# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = R_bcam2cv*R_world2bcam
T_world2cv = R_bcam2cv*T_world2bcam
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],)
))
return RT
def GetCameraProjectionMatrix(cam):
K = GetIntrinsics(cam)
RT = GetExtrinsics(cam)
return K*RT, K, RT
def GetRenderingResolution():
scene = bpy.context.scene
s = scene.render.resolution_percentage / 100
im_w = scene.render.resolution_x * s
im_h = scene.render.resolution_y * s
return im_w, im_h
def _NumpyToCvMatrix(matrix, name, indentation=0):
""" Format a matrix as an YML string.
:param matrix: Numpy matrix. dtype=float64 is assumed for now
:param name: YML name
:param indentation: number of spaces to indent
:return: YML string
"""
indent = " " * indentation
s = (indent + "{0}: !!opencv-matrix\n" +
indent + " rows: {1}\n" +
indent + " cols: {2}\n" +
indent + " dt: d\n" +
indent + " data: {3}\n").format(name, matrix.shape[0], matrix.shape[1], matrix.flatten().tolist())
return s
def WriteCamera(cam, cam_filename):
""" Write camera intrinsics and extrinsics to a .yml file
:param cam Blender camera object
:param cam_filname Output file
"""
K = GetIntrinsics(cam)
P_gt = GetExtrinsics(cam)
sz = GetRenderingResolution()
f = open(cam_filename, "w")
f.write("%YAML:1.0\n---\n")
f.write("image_size: {0}\n".format([int(sz[0]), int(sz[1])]))
f.write(_NumpyToCvMatrix(np.array(K), "K"))
f.write(_NumpyToCvMatrix(np.zeros((1, 5)), "d"))
f.write(_NumpyToCvMatrix(np.array(P_gt), "P_gt"))
f.close()
def WriteInfoFile(cam, pattern, fname):
""" Write chessboard corners, as observed by a camera to an .info.yml file
:param cam Blender camera object doing the observations
:param pattern ChessboardPattern object
:param fname Output file
"""
flat_corners = []
for c in pattern.GetInnerCorners2D(cam.name):
flat_corners.append(c[0] / c[2])
flat_corners.append(c[1] / c[2])
f = open(fname, "w")
f.write("%YAML:1.0\n---\n")
f.write("pattern_size: [{0}, {1}]\n".format(pattern.inner_cols, pattern.inner_rows))
f.write("side_length: {0}\n".format(pattern.side_length))
f.write("chessboard_corners: {0}\n".format(flat_corners))
f.close()
| StarcoderdataPython |
3348711 | """Fancy style for fonts with Powerline symbols."""
from prompt_toolkit.token import Token
style = {
Token.Tabs.Tab: '#bbb',
Token.Tabs.Tab.Text: 'bg:#bbb #222',
Token.Tabs.Tab.Active: '#0bb',
Token.Tabs.Tab.Active.Text: 'bg:#0bb #222',
}
def _tab_template(tab_style):
"""Helper for tab templates given a style (tab active)."""
return [(tab_style, '\ue0ba'), (tab_style.Text, '[{index}] {tab.title}'), (tab_style, '\ue0b8')]
templates = {
'tab': _tab_template(Token.Tabs.Tab),
'tab.active': _tab_template(Token.Tabs.Tab.Active),
'prompt': [(Token.Prompt.Text, 'tosh'), (Token.Prompt, '\ue0b4 ')]
}
| StarcoderdataPython |
9604934 | <filename>src/python/data_combine.py
from pathlib import Path
import pandas as pd
import numpy as np
from scipy.sparse import load_npz, csc_matrix, hstack, save_npz
import os
class Combiner:
def __init__(self):
self.prefix = Path("data")
dtypes = {"sample_id": "int", "query_id": "int", "doc_id": "int", "label": "int"}
self.relation_df = pd.read_csv(self.prefix / "config/samples.tsv", sep="\t", dtype=dtypes)
self.semantic_features = self.relation_df.copy(deep=True).drop(columns=["label", "sample_id"]).set_index(["query_id", "doc_id"])
self.syntax_features = self.relation_df.copy(deep=True).drop(columns=["label", "sample_id"]).set_index(["query_id", "doc_id"])
self.feature_matrix = None
self.feature_names = []
def combine(self):
self.combine_semantic_features()
self.combine_syntax_features()
relation_df = self.relation_df.copy(deep=True).drop(columns=["label"]).set_index(["query_id", "doc_id"])
dense_features = relation_df.merge(self.semantic_features, left_index=True, right_index=True)
dense_features = dense_features.merge(self.syntax_features, left_index=True, right_index=True)
dense_features = dense_features.set_index("sample_id").sort_index()
relation_df = self.relation_df.copy(deep=True).drop(columns=["doc_id"]).set_index(["sample_id"])
dense_features = dense_features.merge(relation_df, left_index=True, right_index=True)
idx = dense_features.index.values
input_path = self.prefix / "click/click_statistic.npz"
print("Loading click statistic from '{}'...".format(input_path), end="")
click_statistic = load_npz(input_path)
click_statistic_header = np.load(self.prefix / "click/click_statistic_header.npy")
assert np.all(click_statistic[idx].getcol(0).data.astype(np.int) == dense_features.sort_index().index.values)
print("OK\n Merging with semantic and syntax data...", end="")
self.feature_matrix = hstack((click_statistic[idx], csc_matrix(dense_features)))
self.feature_names = np.hstack((click_statistic_header, dense_features.columns.values))
print("OK")
return self
def save(self):
matrix_output_path = str(self.prefix / "feature_matrix.npz")
save_npz(matrix_output_path, self.feature_matrix)
print("Feature matrix saved to {}".format(matrix_output_path))
names_output_path = str((self.prefix / "feature_names.npy"))
np.save(names_output_path, self.feature_names)
print("Feature names saved to {}".format(names_output_path))
def combine_semantic_features(self):
prefix = self.prefix / "semantic"
for name in os.listdir(prefix):
input_path = prefix / name
print("Loading semantic features from '{}'...".format(input_path), end="")
dtypes = {"query_id": "int32", "doc_id": "int32"}
features_df = pd.read_csv(prefix / name,
sep="\t", dtype=dtypes).set_index(["query_id", "doc_id"])
column_prefix = name[:name.find("_query_doc_embedding_similarity")] + "_"
features_df.rename(columns={x: column_prefix + x for x in features_df.columns}, inplace=True)
print("OK\n Merging with other...", end="")
self.semantic_features = self.semantic_features.merge(features_df, left_index=True, right_index=True)
print("OK")
def combine_syntax_features(self):
prefix = self.prefix / "syntax"
for name in os.listdir(prefix):
input_path = prefix / name / "scores.tsv"
dtypes = {"query_id": "int32", "doc_id": "int32"}
print("Loading syntax features from '{}'...".format(input_path), end="")
features_df = pd.read_csv(input_path,
sep="\t", dtype=dtypes).set_index(["query_id", "doc_id"])
features_df.rename(columns={x: name + "_" + x for x in features_df.columns}, inplace=True)
print("OK\n Merging with other...", end="")
self.syntax_features = self.syntax_features.merge(features_df, left_index=True, right_index=True)
print("OK")
def main():
combiner = Combiner()
combiner.combine().save()
if __name__ == "__main__":
main() | StarcoderdataPython |
4828185 | import re
from pathlib import Path
import abc
from typing import Set
from rivals_workshop_assistant import paths
from .sprite_generation import generate_sprite_for_file_name
class Asset(abc.ABC):
def __init__(self, asset_string: str):
self.asset_string = asset_string
@classmethod
def get_from_text(cls, text) -> Set["Asset"]:
raise NotImplementedError
def supply(self, root_dir: Path) -> None:
raise NotImplementedError
def __eq__(self, other):
return self.asset_string == other.asset_string
def __hash__(self):
return hash(self.__class__.__name__ + self.asset_string)
class Sprite(Asset):
_pattern = r"""sprite_get\(\s*["']([^)"']+?)["')]\s*\)"""
@classmethod
def get_from_text(cls, text) -> Set["Sprite"]:
asset_strings = set(re.findall(pattern=cls._pattern, string=text))
return set(Sprite(string) for string in asset_strings)
def supply(self, root_dir: Path):
file_name = self.asset_string
if not file_name.endswith(".png"):
file_name = file_name + ".png"
path = root_dir / paths.SPRITES_FOLDER / file_name
if not path.exists():
sprite = generate_sprite_for_file_name(file_name)
if sprite:
path.parent.mkdir(parents=True, exist_ok=True)
sprite.save(path.as_posix())
ASSET_TYPES = [Sprite]
| StarcoderdataPython |
11386260 | from PyQt5 import QtGui
from PyQt5.QtCore import QPoint
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QWidget, QStyleOption, QStyle
class PMoveWidget(QWidget):
_parent_ = None
def __init__(self, parent: QWidget, **kwargs):
super().__init__(**kwargs)
self._parent_ = parent
self._movable_ = True
def mousePressEvent(self, event):
if self._movable_:
self._parent_.oldPos = event.globalPos()
def mouseMoveEvent(self, event):
if self._movable_:
delta = QPoint(event.globalPos() - self._parent_.oldPos)
self._parent_.move(self._parent_.x() + delta.x(), self._parent_.y() + delta.y())
self._parent_.oldPos = event.globalPos()
def setMovable(self, movable: bool):
self._movable_ = movable
def isMovable(self) -> bool:
return self._movable_
def paintEvent(self, a0: QtGui.QPaintEvent) -> None:
opt = QStyleOption()
opt.initFrom(self)
painter = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.