id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
134514 | # -*- coding: utf-8 -*-
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from string import Formatter as StringFormatter
from six.moves.urllib.parse import urlencode
class BaseManager(object):
"""Base class for Endpoint Manager objects."""
url = ''
def __init__(self, api_client):
self.api_client = api_client
self._formatter = StringFormatter()
def _get_format_kwargs(self, **kwargs):
it = self._formatter.parse(self.url)
output = {i[1]: '' for i in it}
for key in output.keys():
if kwargs.get(key):
output[key] = kwargs[key]
if 'endpoint' in output.keys():
output.pop('endpoint')
return output
def get_url(self,
endpoint,
kwargs,
authorized_args=[]):
"""Returns the required url for a request against CloudKitty's API.
:param endpoint: The endpoint on which the request should be done
:type endpoint: str
:param kwargs: kwargs that will be used to build the query (part after
'?' in the url) and to format the url.
:type kwargs: dict
:param authorized_args: The arguments that are authorized in url
parameters
:type authorized_args: list
"""
query_kwargs = {
key: kwargs[key] for key in authorized_args
if kwargs.get(key, None)
}
kwargs = self._get_format_kwargs(**kwargs)
url = self.url.format(endpoint=endpoint, **kwargs)
query = urlencode(query_kwargs)
if query:
url += '?' + query
return url
| StarcoderdataPython |
103028 | <filename>koosli/decorators.py
# -*- coding: utf-8 -*-
from functools import update_wrapper
from flask import g, request, redirect, url_for, current_app, abort
def admin_required(user):
'''Ensure that the currently logged in user is an admin.
If user is not logged in, redirect to login page.
'''
def decorator(fn):
def wrapped_function(*args, **kwargs):
if not user.is_authenticated():
return redirect(url_for('user.login'))
elif not user.is_admin():
abort(403)
return fn(*args, **kwargs)
return update_wrapper(wrapped_function, fn)
return decorator
| StarcoderdataPython |
4836737 | <reponame>RubenvanHeusden/HFO-Robotkeeper<filename>example/test_keepers/lowlevelactionset.py
from actionset import ActionSet
import hfo
class LowLevelActionSet(ActionSet):
def __init__(self):
ActionSet.__init__(self, action_set="low_level")
self._action_list = [(hfo.DASH, 80, angle) for angle in [0, 45, 90, 135, 180, -45, -90, -135]]+[tuple([hfo.NOOP])]
| StarcoderdataPython |
1781629 | # Python - 3.6.0
def folding(a, b):
count = 0
while a > 0 and b > 0:
minval = min(a, b)
a, b = max(a - minval, minval), min(a - minval, minval)
count += 1
return count
| StarcoderdataPython |
3345073 | """
The const argument of add_arguments() is used to hold connstant values that are not read from the command line but are required for the
various ArguemntParser actions. The two common uses of it are:
1>
When add_arguemt() is called with action='store_const' or action='append_const'. These actions add the consts value to one of the attributes
of the object returned by parse_args()
2>
When add_argument() is called with option strings (like -f or --foo) and nargs='?'. T
his creates an optional argument that can be followed by zero or one command-line arguments.
When parsing the command line, if the option string is encountered with no command-line argument following it,
the value of const will be assumed instead.
"""
| StarcoderdataPython |
4842131 | #! /usr/bin/env python
from time import sleep, time
import rospy
from std_msgs.msg import String, Int32
from o2as_debug_monitor.msg import PressureSensoState
if __name__ == "__main__":
# Initialize the ROS node
rospy.init_node("test_debug_monitor")
# Initialize publishers
pub1 = rospy.Publisher("/o2as_state/kitting_set_id", Int32, queue_size=1)
pub2 = rospy.Publisher("/o2as_state/task", String, queue_size=1)
pub3 = rospy.Publisher("/o2as_state/subtask", String, queue_size=1)
pub4 = rospy.Publisher("/o2as_state/operation", String, queue_size=1)
pub5 = rospy.Publisher("/o2as_fastening_tools/screw_suctioned",
PressureSensoState, queue_size=18) # see issue 133
# Wait until debug monitor launch
sleep(2);
# Loop over set lists
for i in range(1, 4):
# Start round
msg = Int32()
msg.data = i
pub1.publish(msg)
msg = String()
msg.data = "Task round {} started".format(i)
pub2.publish(msg)
# Loop over subtasks
for j in range(10):
msg = String()
msg.data = "Subtask {} started".format(j)
pub3.publish(msg)
# Loop over operations
for k in range(10):
msg = String()
msg.data = "Operation {} started".format(k)
pub4.publish(msg)
# Do some operation
sleep(1)
msg = String()
msg.data = "Operation {} finished".format(k)
pub4.publish(msg)
msg = String()
msg.data = "Subtask {} finished".format(j)
pub3.publish(msg)
| StarcoderdataPython |
3217442 | #this text game will help you determine which phone is best for you by answering 3 questions. So from this the program will use the input to calculate the phone that is best for he/she.
import random
def main():
Name= raw_input("Your name:")
phone= raw_input ("Your current phone:")
sp= smartphone()
cp= currentPhone()
p=output(Name,phone,cp,sp)
print p
def calculation(afford,use,superRandom,password):
return afford,use,superRandom,password
def smartphone():
afford= money()
use= rely()
superRandom= random1()
password= <PASSWORD>()
phoneModel = calculation(afford,use,superRandom,password)
if phoneModel <=3:
return "Nokia 3310"
if phoneModel <=5:
return " Iphone6s"
if phoneModel <= 8:
return " Samsung s7"
if phoneModel <= 10:
return " Samsung a7"
if phoneModel < 11:
return " Samsung Note5"
if phoneModel > 12:
return "Samsung Note4"
def rely():
rely= raw_input("""
What do typically rely on your smartphone?
a. playing games
b. calling people
c. for work
d. use for SNS(social network site)
(type a,b,c,d):
""")
if rely== "a":
return 1
elif rely== "b":
return 2
elif rely== "c":
return 3
elif rely== "d":
return 4
else:
random.randint(1,4)
def money():
money= raw_input("""
How much you can afford to spend on a smartphone?
a. 0.0 baht
b. no worried my parent can afford for me
c. money is not my problem
d. I rather got it for free
(type a,b,c,d):
""")
if money== "a" and "b" and "c" and "d":
return random.randint(1,3)
else:
random.randint(1,4)
def security():
security= raw_input("""
What do you preferred for phone security
a. fingerprint
b. passwords
c. none, simple slider
d. a pattern
(type a,b,c,d):
""")
if not security == "a" or security == "d":
return 0
elif security == "a" and "d":
return 3
else:
return random.random()
def random1():
random1= raw_input("""
How do you want to calculate your result?
a. use random to select your phone
b. I don't care
c. don't use random
d. Whatever
(type a,b,c,d):
""")
if random1== "a"or "b" or "c" or "d":
return random.random()/2
else:
return random.randint(1,4)
def currentPhone():
currentPhone=raw_input("""
Do you think your phone suit you?
(type y or n):
""")
if currentPhone == "y":
return True
else:
return False
def output(Name,phone,cp,sp):
out = """
Hello {},
You are currently using {}.
This program will help you to decide to buy a phone that best suits you.
I think my current phone suit me: {}
From the test that you took
The result is {}.
""". format(Name,phone,cp,sp)
return out
main()
| StarcoderdataPython |
194769 | <gh_stars>1-10
# Testbed to perform experiments in order to determine best values for
# the node numbers in LRU cache. Tables version.
from time import time
from tables import *
import tables
print "PyTables version-->", tables.__version__
filename = "/tmp/junk-tables-100.h5"
NLEAVES = 2000
NROWS = 1000
class Particle(IsDescription):
name = StringCol(16, pos=1) # 16-character String
lati = Int32Col(pos=2) # integer
longi = Int32Col(pos=3) # integer
pressure = Float32Col(pos=4) # float (single-precision)
temperature = Float64Col(pos=5) # double (double-precision)
def create_junk():
# Open a file in "w"rite mode
fileh = openFile(filename, mode = "w")
# Create a new group
group = fileh.createGroup(fileh.root, "newgroup")
for i in xrange(NLEAVES):
# Create a new table in newgroup group
table = fileh.createTable(group, 'table'+str(i), Particle,
"A table", Filters(1))
particle = table.row
print "Creating table-->", table._v_name
# Fill the table with particles
for i in xrange(NROWS):
# This injects the row values.
particle.append()
table.flush()
# Finally, close the file
fileh.close()
def modify_junk_LRU():
fileh = openFile(filename,'a')
group = fileh.root.newgroup
for j in range(5):
print "iter -->", j
for tt in fileh.walkNodes(group):
if isinstance(tt,Table):
pass
# for row in tt:
# pass
fileh.close()
def modify_junk_LRU2():
fileh = openFile(filename,'a')
group = fileh.root.newgroup
for j in range(20):
t1 = time()
for i in range(100):
# print "table-->", tt._v_name
tt = getattr(group,"table"+str(i))
# for row in tt:
# pass
print "iter and time -->", j+1, round(time()-t1,3)
fileh.close()
def modify_junk_LRU3():
fileh = openFile(filename,'a')
group = fileh.root.newgroup
for j in range(3):
t1 = time()
for tt in fileh.walkNodes(group, "Table"):
title = tt.attrs.TITLE
for row in tt:
pass
print "iter and time -->", j+1, round(time()-t1,3)
fileh.close()
if 1:
#create_junk()
#modify_junk_LRU() # uses the iterator version (walkNodes)
#modify_junk_LRU2() # uses a regular loop (getattr)
modify_junk_LRU3() # uses a regular loop (getattr)
else:
import profile, pstats
profile.run('modify_junk_LRU2()', 'modify.prof')
stats = pstats.Stats('modify.prof')
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
| StarcoderdataPython |
1736256 | <filename>lbry/wallet/network.py
import logging
import asyncio
import json
from time import perf_counter
from operator import itemgetter
from typing import Dict, Optional, Tuple
import aiohttp
from lbry import __version__
from lbry.error import IncompatibleWalletServerError
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
from lbry.wallet.stream import StreamController
log = logging.getLogger(__name__)
class ClientSession(BaseClientSession):
def __init__(self, *args, network, server, timeout=30, on_connect_callback=None, **kwargs):
self.network = network
self.server = server
super().__init__(*args, **kwargs)
self._on_disconnect_controller = StreamController()
self.on_disconnected = self._on_disconnect_controller.stream
self.framer.max_size = self.max_errors = 1 << 32
self.timeout = timeout
self.max_seconds_idle = timeout * 2
self.response_time: Optional[float] = None
self.connection_latency: Optional[float] = None
self._response_samples = 0
self.pending_amount = 0
self._on_connect_cb = on_connect_callback or (lambda: None)
self.trigger_urgent_reconnect = asyncio.Event()
@property
def available(self):
return not self.is_closing() and self.response_time is not None
@property
def server_address_and_port(self) -> Optional[Tuple[str, int]]:
if not self.transport:
return None
return self.transport.get_extra_info('peername')
async def send_timed_server_version_request(self, args=(), timeout=None):
timeout = timeout or self.timeout
log.debug("send version request to %s:%i", *self.server)
start = perf_counter()
result = await asyncio.wait_for(
super().send_request('server.version', args), timeout=timeout
)
current_response_time = perf_counter() - start
response_sum = (self.response_time or 0) * self._response_samples + current_response_time
self.response_time = response_sum / (self._response_samples + 1)
self._response_samples += 1
return result
async def send_request(self, method, args=()):
self.pending_amount += 1
log.debug("send %s%s to %s:%i", method, tuple(args), *self.server)
try:
if method == 'server.version':
return await self.send_timed_server_version_request(args, self.timeout)
request = asyncio.ensure_future(super().send_request(method, args))
while not request.done():
done, pending = await asyncio.wait([request], timeout=self.timeout)
if pending:
log.debug("Time since last packet: %s", perf_counter() - self.last_packet_received)
if (perf_counter() - self.last_packet_received) < self.timeout:
continue
log.info("timeout sending %s to %s:%i", method, *self.server)
raise asyncio.TimeoutError
if done:
try:
return request.result()
except ConnectionResetError:
log.error(
"wallet server (%s) reset connection upon our %s request, json of %i args is %i bytes",
self.server[0], method, len(args), len(json.dumps(args))
)
raise
except (RPCError, ProtocolError) as e:
log.warning("Wallet server (%s:%i) returned an error. Code: %s Message: %s",
*self.server, *e.args)
raise e
except ConnectionError:
log.warning("connection to %s:%i lost", *self.server)
self.synchronous_close()
raise
except asyncio.CancelledError:
log.info("cancelled sending %s to %s:%i", method, *self.server)
# self.synchronous_close()
raise
finally:
self.pending_amount -= 1
async def ensure_session(self):
# Handles reconnecting and maintaining a session alive
# TODO: change to 'ping' on newer protocol (above 1.2)
retry_delay = default_delay = 1.0
while True:
try:
if self.is_closing():
await self.create_connection(self.timeout)
await self.ensure_server_version()
self._on_connect_cb()
if (perf_counter() - self.last_send) > self.max_seconds_idle or self.response_time is None:
await self.ensure_server_version()
retry_delay = default_delay
except RPCError as e:
await self.close()
log.debug("Server error, ignoring for 1h: %s:%d -- %s", *self.server, e.message)
retry_delay = 60 * 60
except IncompatibleWalletServerError:
await self.close()
retry_delay = 60 * 60
log.debug("Wallet server has an incompatible version, retrying in 1h: %s:%d", *self.server)
except (asyncio.TimeoutError, OSError):
await self.close()
retry_delay = min(60, retry_delay * 2)
log.debug("Wallet server timeout (retry in %s seconds): %s:%d", retry_delay, *self.server)
try:
await asyncio.wait_for(self.trigger_urgent_reconnect.wait(), timeout=retry_delay)
except asyncio.TimeoutError:
pass
finally:
self.trigger_urgent_reconnect.clear()
async def ensure_server_version(self, required=None, timeout=3):
required = required or self.network.PROTOCOL_VERSION
response = await asyncio.wait_for(
self.send_request('server.version', [__version__, required]), timeout=timeout
)
if tuple(int(piece) for piece in response[0].split(".")) < self.network.MINIMUM_REQUIRED:
raise IncompatibleWalletServerError(*self.server)
return response
async def create_connection(self, timeout=6):
connector = Connector(lambda: self, *self.server)
start = perf_counter()
await asyncio.wait_for(connector.create_connection(), timeout=timeout)
self.connection_latency = perf_counter() - start
async def handle_request(self, request):
controller = self.network.subscription_controllers[request.method]
controller.add(request.args)
def connection_lost(self, exc):
log.debug("Connection lost: %s:%d", *self.server)
super().connection_lost(exc)
self.response_time = None
self.connection_latency = None
self._response_samples = 0
self._on_disconnect_controller.add(True)
class Network:
PROTOCOL_VERSION = __version__
MINIMUM_REQUIRED = (0, 65, 0)
def __init__(self, ledger):
self.ledger = ledger
self.session_pool = SessionPool(network=self, timeout=self.config.get('connect_timeout', 6))
self.client: Optional[ClientSession] = None
self.server_features = None
self._switch_task: Optional[asyncio.Task] = None
self.running = False
self.remote_height: int = 0
self._concurrency = asyncio.Semaphore(16)
self._on_connected_controller = StreamController()
self.on_connected = self._on_connected_controller.stream
self._on_header_controller = StreamController(merge_repeated_events=True)
self.on_header = self._on_header_controller.stream
self._on_status_controller = StreamController(merge_repeated_events=True)
self.on_status = self._on_status_controller.stream
self.subscription_controllers = {
'blockchain.headers.subscribe': self._on_header_controller,
'blockchain.address.subscribe': self._on_status_controller,
}
self.aiohttp_session: Optional[aiohttp.ClientSession] = None
@property
def config(self):
return self.ledger.config
async def switch_forever(self):
while self.running:
if self.is_connected:
await self.client.on_disconnected.first
self.server_features = None
self.client = None
continue
self.client = await self.session_pool.wait_for_fastest_session()
log.info("Switching to SPV wallet server: %s:%d", *self.client.server)
try:
self.server_features = await self.get_server_features()
self._update_remote_height((await self.subscribe_headers(),))
self._on_connected_controller.add(True)
log.info("Subscribed to headers: %s:%d", *self.client.server)
except (asyncio.TimeoutError, ConnectionError):
log.info("Switching to %s:%d timed out, closing and retrying.", *self.client.server)
self.client.synchronous_close()
self.server_features = None
self.client = None
async def start(self):
self.running = True
self.aiohttp_session = aiohttp.ClientSession()
self._switch_task = asyncio.ensure_future(self.switch_forever())
# this may become unnecessary when there are no more bugs found,
# but for now it helps understanding log reports
self._switch_task.add_done_callback(lambda _: log.info("Wallet client switching task stopped."))
self.session_pool.start(self.config['default_servers'])
self.on_header.listen(self._update_remote_height)
async def stop(self):
if self.running:
self.running = False
await self.aiohttp_session.close()
self._switch_task.cancel()
self.session_pool.stop()
@property
def is_connected(self):
return self.client and not self.client.is_closing()
def rpc(self, list_or_method, args, restricted=True):
session = self.client if restricted else self.session_pool.fastest_session
if session and not session.is_closing():
return session.send_request(list_or_method, args)
else:
self.session_pool.trigger_nodelay_connect()
raise ConnectionError("Attempting to send rpc request when connection is not available.")
async def retriable_call(self, function, *args, **kwargs):
async with self._concurrency:
while self.running:
if not self.is_connected:
log.warning("Wallet server unavailable, waiting for it to come back and retry.")
await self.on_connected.first
await self.session_pool.wait_for_fastest_session()
try:
return await function(*args, **kwargs)
except asyncio.TimeoutError:
log.warning("Wallet server call timed out, retrying.")
except ConnectionError:
pass
raise asyncio.CancelledError() # if we got here, we are shutting down
def _update_remote_height(self, header_args):
self.remote_height = header_args[0]["height"]
def get_transaction(self, tx_hash, known_height=None):
# use any server if its old, otherwise restrict to who gave us the history
restricted = known_height in (None, -1, 0) or 0 > known_height > self.remote_height - 10
return self.rpc('blockchain.transaction.get', [tx_hash], restricted)
def get_transaction_batch(self, txids):
# use any server if its old, otherwise restrict to who gave us the history
return self.rpc('blockchain.transaction.get_batch', txids, True)
def get_transaction_and_merkle(self, tx_hash, known_height=None):
# use any server if its old, otherwise restrict to who gave us the history
restricted = known_height in (None, -1, 0) or 0 > known_height > self.remote_height - 10
return self.rpc('blockchain.transaction.info', [tx_hash], restricted)
def get_transaction_height(self, tx_hash, known_height=None):
restricted = not known_height or 0 > known_height > self.remote_height - 10
return self.rpc('blockchain.transaction.get_height', [tx_hash], restricted)
def get_merkle(self, tx_hash, height):
restricted = 0 > height > self.remote_height - 10
return self.rpc('blockchain.transaction.get_merkle', [tx_hash, height], restricted)
def get_headers(self, height, count=10000, b64=False):
restricted = height >= self.remote_height - 100
return self.rpc('blockchain.block.headers', [height, count, 0, b64], restricted)
# --- Subscribes, history and broadcasts are always aimed towards the master client directly
def get_history(self, address):
return self.rpc('blockchain.address.get_history', [address], True)
def broadcast(self, raw_transaction):
return self.rpc('blockchain.transaction.broadcast', [raw_transaction], True)
def subscribe_headers(self):
return self.rpc('blockchain.headers.subscribe', [True], True)
async def subscribe_address(self, address, *addresses):
addresses = list((address, ) + addresses)
server_addr_and_port = self.client.server_address_and_port # on disconnect client will be None
try:
return await self.rpc('blockchain.address.subscribe', addresses, True)
except asyncio.TimeoutError:
log.warning(
"timed out subscribing to addresses from %s:%i",
*server_addr_and_port
)
# abort and cancel, we can't lose a subscription, it will happen again on reconnect
if self.client:
self.client.abort()
raise asyncio.CancelledError()
def unsubscribe_address(self, address):
return self.rpc('blockchain.address.unsubscribe', [address], True)
def get_server_features(self):
return self.rpc('server.features', (), restricted=True)
def get_claims_by_ids(self, claim_ids):
return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
def resolve(self, urls):
return self.rpc('blockchain.claimtrie.resolve', urls)
def claim_search(self, **kwargs):
return self.rpc('blockchain.claimtrie.search', kwargs)
async def new_resolve(self, server, urls):
message = {"method": "resolve", "params": {"urls": urls, "protobuf": True}}
async with self.aiohttp_session.post(server, json=message) as r:
result = await r.json()
return result['result']
async def new_claim_search(self, server, **kwargs):
kwargs['protobuf'] = True
message = {"method": "claim_search", "params": kwargs}
async with self.aiohttp_session.post(server, json=message) as r:
result = await r.json()
return result['result']
class SessionPool:
def __init__(self, network: Network, timeout: float):
self.network = network
self.sessions: Dict[ClientSession, Optional[asyncio.Task]] = dict()
self.timeout = timeout
self.new_connection_event = asyncio.Event()
@property
def online(self):
return any(not session.is_closing() for session in self.sessions)
@property
def available_sessions(self):
return (session for session in self.sessions if session.available)
@property
def fastest_session(self):
if not self.online:
return None
return min(
[((session.response_time + session.connection_latency) * (session.pending_amount + 1), session)
for session in self.available_sessions] or [(0, None)],
key=itemgetter(0)
)[1]
def _get_session_connect_callback(self, session: ClientSession):
loop = asyncio.get_event_loop()
def callback():
duplicate_connections = [
s for s in self.sessions
if s is not session and s.server_address_and_port == session.server_address_and_port
]
already_connected = None if not duplicate_connections else duplicate_connections[0]
if already_connected:
self.sessions.pop(session).cancel()
session.synchronous_close()
log.debug("wallet server %s resolves to the same server as %s, rechecking in an hour",
session.server[0], already_connected.server[0])
loop.call_later(3600, self._connect_session, session.server)
return
self.new_connection_event.set()
log.info("connected to %s:%i", *session.server)
return callback
def _connect_session(self, server: Tuple[str, int]):
session = None
for s in self.sessions:
if s.server == server:
session = s
break
if not session:
session = ClientSession(
network=self.network, server=server
)
session._on_connect_cb = self._get_session_connect_callback(session)
task = self.sessions.get(session, None)
if not task or task.done():
task = asyncio.create_task(session.ensure_session())
task.add_done_callback(lambda _: self.ensure_connections())
self.sessions[session] = task
def start(self, default_servers):
for server in default_servers:
self._connect_session(server)
def stop(self):
for session, task in self.sessions.items():
task.cancel()
session.synchronous_close()
self.sessions.clear()
def ensure_connections(self):
for session in self.sessions:
self._connect_session(session.server)
def trigger_nodelay_connect(self):
# used when other parts of the system sees we might have internet back
# bypasses the retry interval
for session in self.sessions:
session.trigger_urgent_reconnect.set()
async def wait_for_fastest_session(self):
while not self.fastest_session:
self.trigger_nodelay_connect()
self.new_connection_event.clear()
await self.new_connection_event.wait()
return self.fastest_session
| StarcoderdataPython |
36065 | import sys
sys.path.append("/home/ly/workspace/mmsa")
seed = 1938
import numpy as np
import torch
from torch import nn
from torch import optim
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from models.bigru_rcnn_gate import *
from utils.train import *
from typing import *
from utils.load_raw_yelp import *
from utils.dataset import *
from utils.train import *
from utils.train import *
def main():
train_set, valid_set, test_set = load_glove_data(config)
batch_size = 2
workers = 2
train_loader, valid_loader, test_loader = get_loader(batch_size, workers, get_collate_fn(config),
train_set, valid_set, test_set)
model = Model(config)
#X, y = iter(valid_loader).next()
#res = model(X)
loss = nn.CrossEntropyLoss()
# get_parameter_number(model), loss
viz = get_Visdom()
lr = 1e-3
epoches = 20
optimizer = get_regal_optimizer(model, optim.AdamW, lr)
k_batch_train_visdom(model, optimizer, loss, valid_loader, viz, 30, 10, use_cuda=False)
if __name__ == "__main__":
# torch.cuda.set_device(1)
main() | StarcoderdataPython |
3292078 |
from NENV import *
import binhex
class NodeBase(Node):
pass
class _Ignore_Deprecation_Warning_Node(NodeBase):
"""
"""
title = '_ignore_deprecation_warning'
type_ = 'binhex'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, binhex._ignore_deprecation_warning())
class Binhex_Node(NodeBase):
"""
binhex(infilename, outfilename): create binhex-encoded copy of a file"""
title = 'binhex'
type_ = 'binhex'
init_inputs = [
NodeInputBP(label='inp'),
NodeInputBP(label='out'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, binhex.binhex(self.input(0), self.input(1)))
class Getfileinfo_Node(NodeBase):
"""
"""
title = 'getfileinfo'
type_ = 'binhex'
init_inputs = [
NodeInputBP(label='name'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, binhex.getfileinfo(self.input(0)))
class Hexbin_Node(NodeBase):
"""
hexbin(infilename, outfilename) - Decode binhexed file"""
title = 'hexbin'
type_ = 'binhex'
init_inputs = [
NodeInputBP(label='inp'),
NodeInputBP(label='out'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, binhex.hexbin(self.input(0), self.input(1)))
export_nodes(
_Ignore_Deprecation_Warning_Node,
Binhex_Node,
Getfileinfo_Node,
Hexbin_Node,
)
| StarcoderdataPython |
118347 | from django.contrib import admin
from .models import Time
# register models to show up on admin page
admin.site.register(Time) | StarcoderdataPython |
3362038 | <reponame>LinWeizheDragon/AutoFidgetDetection
import os
import json
import cv2
import math
import numpy as np
import pandas as pd
from utility.base_config import *
from scipy.signal import savgol_filter
from utility.colors import *
from utility.decompose_string import decompose_string, decompose_string_hand
from component.basic_processor import BasicProcessor
class HandCrossAnalyser(BasicProcessor):
def __init__(self, name, path_data):
BasicProcessor.__init__(self, name, path_data, None)
def compute_stationary_rectangles(self, min_length=100, cutoff=0):
'''
This function compute stationary rectangles out of all the rectangles detected in time series.
:return:
continuous_segments: list of segments
valid_intersect_data: stored rectangle coordinates for each valid segments
'''
cap = cv2.VideoCapture(self.video_path)
data = np.load(self.processed_file)
intersect_data = {}
# try:
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(length)
t = 0
while (t < data.shape[0]):
print('progress', t / data.shape[0], end='\r')
# Display all the data points
'''
for i in range(25):
frame = self.paint_point(frame, [data[t, i * 2], data[t, i * 2 + 1]])
for i in range(25, 95):
frame = self.paint_point(frame, [data[t, i * 2], data[t, i * 2 + 1]], color=COLOR_BLUE)
for i in range(95, 116):
frame = self.paint_point(frame, [data[t, i * 2], data[t, i * 2 + 1]], color=COLOR_GREEN)
for i in range(116, 137):
frame = self.paint_point(frame, [data[t, i * 2], data[t, i * 2 + 1]], color=COLOR_YELLOW)
#'''
left_hand_data = data[t, 194:232].reshape(-1, 2)
right_hand_data = data[t, 236:274].reshape(-1, 2)
face_data = data[t, 50:190].reshape(-1, 2)
# frame = self.paint_rectangle_to_points(frame, left_hand_data, color=COLOR_GREEN)
# frame = self.paint_rectangle_to_points(frame, right_hand_data, color=COLOR_YELLOW)
# Check hands overlapping
intersect = self.check_overlap(left_hand_data, right_hand_data, tolerance=5)
if intersect is not None:
points = np.vstack((left_hand_data, right_hand_data))
cordinates = list(np.min(points, axis=0).astype(int)) + list(np.max(points, axis=0).astype(int))
intersect_data[t] = cordinates
t += 1
# except Exception as e:
# print(e)
cap.release()
# compute continuous segment
continuous_segments = []
for i in intersect_data.keys():
if len(continuous_segments) == 0:
continuous_segments.append([i, i + 1])
else:
if continuous_segments[-1][1] == i:
continuous_segments[-1][1] += 1
else:
continuous_segments.append([i, i + 1])
# validate stationarity
# valid_intersect_data = {}
# for session in continuous_segments:
# starting_time = session[0]
# ending_time = session[1]
# for i in range(starting_time + 1, ending_time):
# previous_rect = np.array(intersect_data[i - 1])
# current_rect = np.array(intersect_data[i])
# change = np.sum(np.power(current_rect - previous_rect, 2))
# if change <= 6:
# # rectangle stationary
# valid_intersect_data[i] = current_rect
valid_intersect_data = intersect_data
# recompute continuous segments
continuous_segments = []
for i in valid_intersect_data.keys():
if len(continuous_segments) == 0:
continuous_segments.append([i, i + 1])
else:
if continuous_segments[-1][1] == i:
continuous_segments[-1][1] += 1
else:
continuous_segments.append([i, i + 1])
# min length of stationary rectangle
new_segments = []
for segment in continuous_segments:
if segment[1] - segment[0] >= (min_length + cutoff * 2):
new_segments.append([segment[0] + cutoff, segment[1] - cutoff])
if cutoff != 0:
for x in range(segment[0], segment[0] + cutoff):
del valid_intersect_data[x]
for x in range(segment[1] - cutoff, segment[1]):
del valid_intersect_data[x]
else:
for x in range(segment[0], segment[1]):
del valid_intersect_data[x]
continuous_segments = new_segments
# print(continuous_segments)
# print(intersect_data.keys())
# print(valid_intersect_data.keys())
print('\n')
return continuous_segments, valid_intersect_data
def compute_static_hands_without_crossing(self, hand='left', min_length=100, cutoff=0):
# read hand cross data
hand_cross_segments, hand_cross_intersect_data = self.compute_stationary_rectangles(min_length=20)
data = np.load(self.processed_smooth_file)
cap = cv2.VideoCapture(self.video_path)
static_data = {}
# try:
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(length)
t = 1
while (t < data.shape[0]):
print('progress', t / data.shape[0], end='\r')
if hand == 'left':
hand_data = data[t, 194:232].reshape(-1, 2)
previous_hand_data = data[t - 1, 194:232].reshape(-1, 2)
else:
hand_data = data[t, 236:274].reshape(-1, 2)
previous_hand_data = data[t - 1, 236:274].reshape(-1, 2)
ret, frame = cap.read()
# without hand cross segments
if t not in hand_cross_intersect_data.keys():
difference = hand_data - previous_hand_data
difference = np.mean(np.power(difference, 2))
# print(difference)
if difference < 0.7:
frame = self.paint_rectangle_to_points(frame, hand_data, color=COLOR_YELLOW)
points = hand_data
cordinates = list(np.min(points, axis=0).astype(int)) + list(np.max(points, axis=0).astype(int))
static_data[t] = cordinates
# cv2.imshow('frame', frame)
k = cv2.waitKey(40) & 0xff
if k == 27:
break
t += 1
# input()
cv2.destroyAllWindows()
cap.release()
# smoothing the data
smooth_array = np.zeros((data.shape[0], 1))
for t in static_data.keys():
smooth_array[t, :] = 1
print(smooth_array.shape)
y = savgol_filter(np.ravel(smooth_array), 11, 3)
smooth_array = np.array(y).reshape((-1, 1))
smooth_array[smooth_array >= 0.5] = 1
smooth_array[smooth_array < 0.5] = 0
# compute continuous segment
continuous_segments = []
for i in range(data.shape[0]):
if smooth_array[i, 0] == 1:
if len(continuous_segments) == 0:
continuous_segments.append([i, i + 1])
else:
if continuous_segments[-1][1] == i:
continuous_segments[-1][1] += 1
else:
continuous_segments.append([i, i + 1])
new_segments = []
for segment in continuous_segments:
if segment[1] - segment[0] >= (min_length + cutoff * 2):
new_segments.append([segment[0] + cutoff, segment[1] - cutoff])
continuous_segments = new_segments
return continuous_segments
def compute_static_and_rhythmic_with_hand_cross(self):
hand_cross_segments, hand_cross_intersect_data = self.compute_stationary_rectangles(min_length=20)
label_data = json.load(open('optical_flow_results_for_export_label.json', 'r'))
try:
label_data = label_data[str(self.participant_id)][str(self.session_id)]
except Exception as e:
print('no hands playing data...')
label_data = {}
window_size = 100
window_step = 50
FFT_thres = 30
STD_thres = 8
data = np.load(self.processed_file)
# generate label array
label_array = np.zeros((data.shape[0], 1))
label_centroid = {}
for segment in label_data.keys():
starting = int(segment.split(',')[0])
ending = int(segment.split(',')[1])
centroid = int(math.floor((starting + ending) / 2))
p = (centroid, label_data[segment][0], label_data[segment][1])
label_centroid[centroid] = label_data[segment]
print('preprocessing label data')
for t in range(data.shape[0]):
related_centroids = [(i, label_centroid[i])
for i in range(int(t - 0.5 * window_size), int(t + 0.5 * window_size))
if i in label_centroid.keys()]
if len(related_centroids) == 0:
continue
if len(related_centroids) == 1:
closest_centroid = related_centroids[0]
else:
id_1 = related_centroids[0][0]
id_2 = related_centroids[1][0]
if abs(id_1 - t) < abs(id_2 - t):
closest_centroid = related_centroids[0]
else:
closest_centroid = related_centroids[1]
avg_fft = closest_centroid[1][0]
avg_std = closest_centroid[1][1]
if avg_fft >= FFT_thres and avg_std >= STD_thres:
label_array[t, :] = 3 # dynamic + rhythmic
elif avg_fft >= FFT_thres and avg_std < STD_thres:
label_array[t, :] = 2 # rhythmic
elif avg_fft < FFT_thres and avg_std >= STD_thres:
label_array[t, :] = 1 # dynamic
elif avg_fft < FFT_thres and avg_std < STD_thres:
label_array[t, :] = 0 # static
for x in range(4):
# compute continuous segment
continuous_segments = []
for i in range(data.shape[0]):
if i in hand_cross_intersect_data.keys():
if label_array[i, 0] == x:
if len(continuous_segments) == 0:
continuous_segments.append([i, i + 1])
else:
if continuous_segments[-1][1] == i:
continuous_segments[-1][1] += 1
else:
continuous_segments.append([i, i + 1])
if x == 0:
static_segments = continuous_segments
if x == 1:
dynamic_segments = continuous_segments
if x == 2:
rhythmic_segments = continuous_segments
if x == 3:
dynamic_rythmic_segments = continuous_segments
return static_segments, dynamic_segments, rhythmic_segments, dynamic_rythmic_segments
def analyse_hand_cross_optical_flow(self):
data = {}
for root, dirs, files in os.walk(os.path.join(DATA_FOLDER, 'hand_cross_analysis_optical_flow')):
for file in files:
if '.npy' in file:
data[file] = np.load(os.path.join(root, file))
if data[file].shape[0] == 0:
print(file, data[file].shape)
label_data = {}
from keras.models import load_model
model = load_model(
os.path.join(DATA_FOLDER, 'pre-trained', 'hierarchical_DNN.h5')
)
for file in data.keys():
participant_id, session_id, starting, ending = decompose_string(file)
sub_data = data[file]
if sub_data.shape[0] != 100:
continue
FFT, STD, MEAN = self.analyse_sequence_new(self.get_first_derivative(sub_data))
FFT = np.mean(FFT, axis=1)
STD = STD # np.mean(STD)
MEAN = MEAN # np.mean(MEAN, axis=0)
single_x = [
FFT.reshape((1, -1)), STD.reshape((1, -1)), MEAN.reshape((1, -1))
]
label = int(np.argmax(model.predict(single_x), axis=1)[0])
print(label)
label_data.setdefault(participant_id, {}).setdefault(session_id, {})['{},{}'.format(starting, ending)] = label
json.dump(label_data, open(
os.path.join(DATA_FOLDER, 'hand_cross_analysis_optical_flow', 'optical_flow_result.json'),
'w'))
print('saving completed.')
def analyse_hand_action_optical_flow(self):
data = {}
for root, dirs, files in os.walk(os.path.join(DATA_FOLDER, 'hand_action_analysis_optical_flow')):
for file in files:
if '.npy' in file:
data[file] = np.load(os.path.join(root, file))
if data[file].shape[0] == 0:
print(file, data[file].shape)
label_data = {}
from keras.models import load_model
model = load_model(
os.path.join(DATA_FOLDER, 'pre-trained', 'hierarchical_DNN_hand.h5')
)
for file in data.keys():
participant_id, session_id, starting, ending, hand = decompose_string_hand(file)
sub_data = data[file]
if sub_data.shape[0] != 100:
continue
FFT, STD, MEAN = self.analyse_sequence_new(self.get_first_derivative(sub_data))
FFT = np.mean(FFT, axis=1)
STD = STD # np.mean(STD)
MEAN = MEAN # np.mean(MEAN, axis=0)
single_x = [
FFT.reshape((1, -1)), STD.reshape((1, -1)), MEAN.reshape((1, -1))
]
label = int(np.argmax(model.predict(single_x), axis=1)[0])
print(label)
label_data.setdefault(participant_id, {}).setdefault(session_id, {}).setdefault(hand, {})['{},{}'.format(starting, ending)] = label
json.dump(label_data, open(
os.path.join(DATA_FOLDER, 'hand_action_analysis_optical_flow', 'optical_flow_result.json'),
'w'))
print('saving completed.')
| StarcoderdataPython |
3391651 | from flask import Blueprint, render_template, flash
from flask_application_tutorial.auth import get_db
bp = Blueprint("articles", __name__, url_prefix="/articles")
@bp.route("/articles")
def articles():
# open connection
con = get_db()
# return articles
articles = con.execute("SELECT * FROM articles").fetchall()
if not articles:
flash("No articles found", "danger")
return render_template("articles/articles.html")
else:
return render_template("articles/articles.html", articles=articles)
@bp.route("/article/<string:id>/")
def article(id):
# open connection
con = get_db()
# return articles
article = con.execute("SELECT * FROM articles WHERE id = :id", [id]).fetchone()
return render_template("articles/article.html", article=article)
| StarcoderdataPython |
3396424 | <reponame>StichtingIAPC/swipe
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-29 18:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sales', '0001_initial'),
('customer_invoicing', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='receiptcustinvoice',
name='receipt',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='sales.Transaction'),
),
migrations.AddField(
model_name='custominvoiceline',
name='custom_invoice',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='customer_invoicing.CustomCustInvoice'),
),
]
| StarcoderdataPython |
136499 | <filename>tests/v1/test_user_register.py
import pytest
from flask import url_for
from api.blueprints.v1.resources.user_register import User
class TestUserResource:
def test_user_register_with_missing_user_data(self, client):
res = client.post(url_for('v1.userregister'))
assert res.status_code == 400
def test_user_register_with_missing_incorrect_username(self, client):
res = client.post(url_for('v1.userregister', password="<PASSWORD>"))
assert res.json.get('message').get('username') == 'This field cannot be blank.'
assert res.status_code == 400
def test_user_register_with_missing_incorrect_password(self, client):
res = client.post(url_for('v1.userregister', username='pablo'))
assert res.json.get('message').get('password') == 'This field cannot be blank.'
assert res.status_code == 400
def test_user_register_success(self, client):
res = client.post(url_for('v1.userregister', username="pablonew", password="<PASSWORD>"))
assert res.json.get('message') == 'User created successfully.'
assert res.status_code == 201
def test_user_exisiting_user(self, client):
client.post(url_for('v1.userregister', username="pablotest", password="<PASSWORD>"))
res = client.post(url_for('v1.userregister', username="pablotest", password="<PASSWORD>"))
print(res.json)
assert res.json.get('message') == 'A user with that username already exists.'
assert res.status_code == 400
| StarcoderdataPython |
120594 | <reponame>ScorpionResponse/freelancefinder<gh_stars>1-10
"""Wrapper for hackernews."""
import datetime
import logging
import bleach
import hackernews
from django.utils import timezone
from jobs.models import Post
logger = logging.getLogger(__name__)
class HackerHarvest(object):
"""Wrapper client for hackernews harvester."""
def __init__(self, source):
"""Initialize the harvester."""
self.source = source
self.client = hackernews.HackerNews()
def job_stories(self):
"""Gather job postings and turn them into posts."""
for story_id in self.client.job_stories(limit=100):
try:
story = self.client.get_item(story_id)
except hackernews.InvalidItemID as iiid:
logger.warning('Tried to get non-existent job story with ID: %s; ex: %s', story_id, iiid)
continue
post = self.parse_job_to_post(story, subarea='jobs')
post.title = 'Full Time - {}'.format(post.title)
post.title = post.title[:255]
yield post
def hiring_jobs(self):
"""Gather posts from the Who is Hiring? thread."""
post_id = self.source.config.filter(config_key='post_id-who_is_hiring').first().config_value
hn_item = self.client.get_item(post_id)
# r'\s*(?P<company>[^|]+?)\s*\|\s*(?P<title>[^|]+?)\s*\|\s*(?P<locations>[^|]+?)\s*(?:\|\s*(?P<attrs>.+))?$'
for comment_id in hn_item.kids:
try:
comment = self.client.get_item(comment_id)
except hackernews.InvalidItemID as iiid:
logger.warning('Tried to get non-existent comment with ID: %s; ex: %s', comment_id, iiid)
continue
if comment.text is None:
logger.debug("Skipping blank comment: %s", comment)
continue
post = self.parse_job_to_post(comment, subarea='who_is_hiring')
post.title = 'Hiring - {}'.format(post.title)
post.title = post.title[:255]
yield post
def who_wants_jobs(self):
"""Gather posts from the Who wants to be hired? thread."""
post_id = self.source.config.filter(config_key='post_id-who_wants_to_be_hired').first().config_value
hn_item = self.client.get_item(post_id)
for comment_id in hn_item.kids:
try:
comment = self.client.get_item(comment_id)
except hackernews.InvalidItemID as iiid:
logger.warning('Tried to get non-existent comment with ID: %s; ex: %s', comment_id, iiid)
continue
if comment.text is None:
logger.debug("Skipping blank comment: %s", comment)
continue
post = self.parse_job_to_post(comment, subarea='who_wants_to_be_hired', insert_author=True)
post.title = 'For Hire - {}'.format(post.title)
post.title = post.title[:255]
yield post
def freelancer_jobs(self):
"""Gather posts from the Freelancers thread."""
post_id = self.source.config.filter(config_key='post_id-freelancer').first().config_value
hn_item = self.client.get_item(post_id)
for comment_id in hn_item.kids:
try:
comment = self.client.get_item(comment_id)
except hackernews.InvalidItemID as iiid:
logger.warning('Tried to get non-existent comment with ID: %s; ex: %s', comment_id, iiid)
continue
if comment.text is None:
logger.debug("Skipping blank comment: %s", comment)
continue
post = self.parse_job_to_post(comment, subarea='freelancer', insert_author=True)
if 'SEEKING WORK' in post.title.upper():
post.title = 'For Hire - {}'.format(post.title)
elif 'SEEKING FREELANCER' in post.title.upper():
post.title = 'Freelance - {}'.format(post.title)
# TODO(Paul): Just set the is_freelance flag?
post.is_freelance = True
post.title = post.title[:255]
yield post
def check_who_is_hiring(self):
"""Check for new who is hiring posts every month."""
month_year = datetime.date.today().strftime("%B %Y")
if self.source.config.filter(config_key='processed_date-last_month', config_value=month_year).exists():
return
who_is_hiring_user = self.client.get_user('whoishiring')
new_posts = [False, False, False]
for post_id in who_is_hiring_user.submitted[:7]:
hn_item = self.client.get_item(post_id)
if month_year in hn_item.title:
if 'Who is hiring?' in hn_item.title:
new_posts[0] = post_id
if 'Freelancer? Seeking freelancer?' in hn_item.title:
new_posts[1] = post_id
if 'Who wants to be hired?' in hn_item.title:
new_posts[2] = post_id
if all(new_posts):
self.source.config.update_or_create(config_key='processed_date-last_month', defaults={'config_value': month_year})
self.source.config.update_or_create(config_key='post_id-who_is_hiring', defaults={'config_value': new_posts[0]})
self.source.config.update_or_create(config_key='post_id-freelancer', defaults={'config_value': new_posts[1]})
self.source.config.update_or_create(config_key='post_id-who_wants_to_be_hired', defaults={'config_value': new_posts[2]})
def parse_job_to_post(self, job_info, subarea, insert_author=False):
"""Convert a comment or story to a Post."""
title = job_info.title
if title is None:
title = job_info.text.split('<')[0]
if insert_author:
title = job_info.by + ' - ' + title
title_cleaned = bleach.clean(title[:255], strip=True)
desc = ''
if job_info.text:
desc = job_info.text
url = job_info.url
if not url:
url = "https://news.ycombinator.com/item?id={}".format(job_info.item_id)
created = timezone.make_aware(job_info.submission_time, is_dst=False)
post = Post(url=url, source=self.source, title=title_cleaned, description=desc, unique=job_info.item_id, created=created, subarea=subarea)
return post
| StarcoderdataPython |
192232 | <filename>setup.py
#!/usr/bin/env python3
import os
from setuptools import setup
from pathlib import Path
thisDir = Path(__file__).parent
setup(use_scm_version = True)
| StarcoderdataPython |
1623800 | <gh_stars>0
'''
Gameprogrammierung mit Python und Pygame Zero
Version 1.00, 22.09.2021
Der Hobbyelektroniker
https://community.hobbyelektroniker.ch
https://www.youtube.com/c/HobbyelektronikerCh
Die Rechte der unten angegebenen Quellen sind zu beachten!
Der restliche Code kann mit Quellenangabe frei verwendet werden.
- Als Entwicklungsumgebung wird Thonny verwendet: https://thonny.org
- Die Ideen zu diesem Tutorial stammt von https://aposteriori.trinket.io/game-development-with-pygame-zero
- Grafiken von https://www.aposteriori.com.sg/wp-content/uploads/2020/02/image_pack.zip
- Sounds von https://opengameart.org
- Das Modul pgzhelper:https://www.aposteriori.com.sg/wp-content/uploads/2021/01/pgzhelper.zip
- Tool zur Bestimmung der Farbwerte:https://www.rapidtables.com/web/color/RGB_Color.html
- Dokumentation zu Pygame Zero: https://pygame-zero.readthedocs.io/en/stable/
'''
# Ein einfaches Spiel mit 3 Elementen
# Das Spielfeld: Hintergrund mit Punkteanzeige
# Der Spieler: animierte Figur, die springen kann
# Die Hindernisse: tauchen unregelmässig auf und müssen übersprungen werden
# Ziel: Möglichst viele Hindernisse nicht berühren
'''
Video 2
Der Spieler
'''
import pgzrun
from spieler import Spieler
WIDTH = 800
HEIGHT = 600
TITLE = "Jump and Run"
HIMMEL = 400
class Spiel:
def __init__(self):
self.aktiv = False # Spiel nicht aktiv
self.finished = False # Kein beendetes Spiel
self.punkte = 0 # Der Punktestand ist 0
self.spieler = Spieler(100,HIMMEL) # Der Spieler wird erzeugt
self.hindernisse = [] # Leere Liste für die Hindernisse
def start(self):
self.aktiv = True # Das Spiel ist aktiv
self.finished = False # Das Spiel ist noch nicht beendet
self.punkte = 0 # Punktestand 0
self.hindernisse.clear() # Alle Hindernisse entfernen
self.spieler.start() # Der Spieler muss hier gestartet werden
def stopp(self):
self.aktiv = False # Das Spiel ist nicht mehr aktiv
self.finished = True # Das Spiel ist beendet
self.spieler.stopp() # Ein Spielstopp soll auch den Spieler stoppen
def zeichne_spielfeld(self):
farbe_himmel = (163, 232, 254) # blau
screen.draw.filled_rect(Rect(0, 0, WIDTH, HIMMEL), farbe_himmel)
farbe_boden = (88, 242, 152) # grün
screen.draw.filled_rect(Rect(0, HIMMEL, WIDTH, WIDTH - HIMMEL), farbe_boden)
farbe_schrift = (255, 255, 255) # weiss
screen.draw.text(str(self.punkte), (WIDTH - 80, 10), fontsize=60, color=farbe_schrift)
if self.finished:
screen.draw.text("GAME OVER", (20, 20), fontsize=80, color=(255, 0, 0))
screen.draw.text("Neues Spiel mit S", (20, 80), fontsize=60, color=(255, 255, 0))
elif not self.aktiv:
screen.draw.text("Spiel starten mit S", (20, 20), fontsize=60, color=(255, 255, 0))
def zeichne_spieler(self):
self.spieler.draw()
def zeichne_hindernisse(self):
pass
# Beispiel: self.figur.draw()
def draw(self):
self.zeichne_spielfeld()
self.zeichne_spieler()
self.zeichne_hindernisse()
def update(self):
# Start-Taste nur abfragen, wenn das Spiel nicht aktiv ist
if not self.aktiv and keyboard.s:
# neues Spiel starten
self.start()
# Wenn das Spiel nicht aktiv ist, hier abbrechen
if not self.aktiv: return
if keyboard.up:
# Wenn die UP-Taste gedrückt wird, soll der Spieler springen
self.spieler.sprung()
# Die Update-Methode jeder Figur muss aufgerufen werden
self.spieler.update()
spiel = Spiel()
def update():
spiel.update()
def draw():
spiel.draw()
pgzrun.go()
| StarcoderdataPython |
45707 | <reponame>ggsdc/corn
from .RoutesGenerator import RoutesGenerator
from .MIPModel import MIPModel
from collections import defaultdict, OrderedDict
from pytups import SuperDict, TupList
from datetime import datetime
import pulp as pl
import pickle
import json
import itertools
class PeriodicMIP(MIPModel):
def __init__(self, instance, solution=None):
super().__init__(instance, solution)
self.log = ""
self.solver = "Periodic MIP Model"
self.routes_generator = RoutesGenerator(self.instance)
self.range_hours = list(range(self.horizon))
self.routes = dict()
self.unused_routes = dict()
self.value_greedy = None
self.interval_routes = 6
self.nb_routes = 1000
self.start_time = datetime.now()
self.start_time_string = datetime.now().strftime("%d.%m-%Hh%M")
self.print_log = False
self.save_results = False
self.artificial_quantities = dict()
self.limit_artificial_round = 0
self.last_solution_round = -1
self.Hmin = 0
self.Hmax = self.horizon
self.resolution_interval = 180
self.resolve_margin = 48
self.solution_greedy = None
self.locations_in = dict()
self.unique_locations_in = dict()
self.hour_of_visit = dict()
self.k_visit_hour = dict()
self.nb_visits = dict()
self.final_routes = None
self.coef_part_inventory_conservation = 0.8
self.coef_inventory_conservation = 1
self.time_limit = 100000
# Variables
self.route_var = SuperDict()
self.artificial_quantities_var = SuperDict()
self.artificial_binary_var = SuperDict()
self.inventory_var = SuperDict()
self.quantity_var = SuperDict()
self.trailer_quantity_var = SuperDict()
def solve(self, config=None):
self.start_time = datetime.now()
self.start_time_string = datetime.now().strftime("%d.%m-%Hh%M")
if config is None:
config = dict()
config = dict(config)
self.time_limit = config.get("timeLimit", 100000)
self.coef_inventory_conservation = config.get(
"inventoryConservation", self.coef_inventory_conservation
)
self.coef_part_inventory_conservation = config.get(
"partialInventoryConservation", self.coef_inventory_conservation
)
solver_name = self.get_solver(config)
self.print_in_console("Started at: ", self.start_time_string)
self.nb_routes = config.get("nb_routes_per_run", self.nb_routes)
used_routes = dict()
self.Hmin = 0
self.Hmax = min(self.resolution_interval, self.horizon)
while self.Hmin < self.horizon and self._get_remaining_time() > 0:
self.print_in_console(
f"=================== Hmin = {self.Hmin} ========================"
)
self.print_in_console(
f"=================== Hmax = {self.Hmax} ========================"
)
current_round = 0
new_routes = self.generate_initial_routes()
self.routes = dict(list(self.routes.items()) + list(new_routes.items()))
previous_value = None
self.unused_routes = pickle.loads(pickle.dumps(new_routes, -1))
self.print_in_console(
"=================== ROUND 0 ========================"
)
self.print_in_console(
"Initial empty solving at: ", datetime.now().strftime("%H:%M:%S")
)
config_first = dict(
solver=solver_name,
gapRel=0.1,
timeLimit=min(200.0, self._get_remaining_time()),
msg=self.print_log,
)
def config_iteration(self):
return dict(
solver=solver_name,
gapRel=0.05,
timeLimit=min(100.0, self._get_remaining_time()),
msg=self.print_log,
warmStart=(current_round != 1),
)
solver = pl.getSolver(**config_first)
used_routes, previous_value = self.solve_one_iteration(
solver, used_routes, previous_value, current_round
)
current_round += 1
while len(self.unused_routes) != 0 and self._get_remaining_time() > 0:
self.print_in_console(
f"=================== ROUND {current_round} ========================"
)
solver = pl.getSolver(**config_iteration(self))
used_routes, previous_value = self.solve_one_iteration(
solver, used_routes, previous_value, current_round
)
current_round += 1
self.Hmax = min(self.Hmax + self.resolution_interval, self.horizon)
self.Hmin += self.resolution_interval
self.set_final_id_shifts()
self.post_process()
self.final_routes = used_routes
self.print_in_console(used_routes)
if self.save_results:
with open(
f"res/solution-schema-{self.start_time_string}-final.json", "w"
) as fd:
json.dump(self.solution.to_dict(), fd)
return 1
def solve_one_iteration(self, solver, used_routes, previous_value, current_round):
if 0 < current_round <= self.limit_artificial_round + 1:
self.generate_new_routes()
self.artificial_quantities = dict()
old_used_routes = pickle.loads(pickle.dumps(used_routes, -1))
previous_routes_infos = [
(shift["id_shift"], shift["trailer"], shift["driver"])
for shift in self.solution.get_all_shifts()
]
if current_round > 0:
selected_routes = self.select_routes(self.nb_routes)
used_routes = SuperDict({**used_routes, **selected_routes})
self.initialize_parameters(used_routes)
model = self.new_model(
used_routes,
previous_routes_infos,
current_round <= self.limit_artificial_round,
)
status = model.solve(solver=solver)
if status == 1:
self.to_solution(model, used_routes, current_round)
if current_round > self.limit_artificial_round:
self.check_and_save(current_round)
if status != 1 or current_round == 0:
used_routes = old_used_routes
return used_routes, None
if not (
previous_value is None
or pl.value(model.objective) < previous_value
or (
(current_round > self.limit_artificial_round)
and (self.last_solution_round <= self.limit_artificial_round)
)
):
return old_used_routes, previous_value
keep = (
self.route_var.vfilter(lambda v: pl.value(v) > 0.5)
.keys_tl()
.take(0)
.to_dict(None)
.vapply(lambda v: True)
)
used_routes = {
r: route for r, route in used_routes.items() if keep.get(r, False)
}
if current_round > self.limit_artificial_round:
previous_value = pl.value(model.objective)
self.last_solution_round = current_round
return used_routes, previous_value
def generate_initial_routes(self):
"""
Generates shifts from initial methods
The generated shifts already have a departure time and respect the duration constraint
:return: A dictionary whose keys are the indices of the shifts and whose values are instances of RouteLabel
"""
# 'shortestRoutes', 'FromForecast', 'RST'
(
routes,
nb_greedy,
self.value_greedy,
self.solution_greedy,
) = self.routes_generator.generate_initial_routes(
methods=["FromClusters"],
unique=True,
nb_random=0,
nb_select_at_random_greedy=0,
)
self.print_in_console("Value greedy: ", self.value_greedy)
partial_interval_routes = self.interval_routes
while nb_greedy * (self.horizon // partial_interval_routes) > 400:
partial_interval_routes = 2 * partial_interval_routes
routes_greedy = routes[0:nb_greedy]
other_routes = routes[nb_greedy:]
routes = [
route.copy().get_label_with_start(start)
for offset in range(0, partial_interval_routes, self.interval_routes)
for route in routes_greedy
for start in range(
max(self.Hmin - 72, 0) + offset, self.Hmax, partial_interval_routes
)
if start + PeriodicMIP.duration_route(route) < self.Hmax
] + [
route.copy().get_label_with_start(start)
for offset in range(0, partial_interval_routes, self.interval_routes)
for route in other_routes
for start in range(
max(self.Hmin - 72, 0) + offset, self.Hmax, partial_interval_routes
)
if start + PeriodicMIP.duration_route(route) < self.Hmax
]
self.print_in_console(routes)
self.nb_routes = max(
self.nb_routes, nb_greedy * (self.horizon // partial_interval_routes)
)
self.print_in_console(f"Nb routes from initial generation: {len(routes)}")
self.print_in_console(f"Nb routes per run: {self.nb_routes}")
if len(self.routes) != 0:
start_id = max(list(self.routes.keys())) + 1
else:
start_id = 0
self.print_in_console(dict(enumerate(routes, start_id)))
return dict(enumerate(routes, start_id))
def generate_new_routes(self):
"""
Generates new shifts based on the values of the artificial variables , i.e. based on the ideal deliveries
:return: A dictionary whose keys are the indices of the shifts and whose values are instances of RouteLabel
"""
self.print_in_console(
"Generating new routes at: ", datetime.now().strftime("%H:%M:%S")
)
new_routes = self.routes_generator.generate_new_routes(
self.artificial_quantities, make_clusters=True
)
new_routes = [
route
for route in new_routes
if route.start + PeriodicMIP.duration_route(route) <= self.Hmax
]
enumerated_new_routes = list(enumerate(new_routes, max(self.routes.keys()) + 1))
self.print_in_console(f"Generated {len(enumerated_new_routes)} new routes")
self.routes = OrderedDict(enumerated_new_routes + list(self.routes.items()))
self.unused_routes = OrderedDict(
enumerated_new_routes + list(self.unused_routes.items())
)
def new_model(self, used_routes, previous_routes_infos, artificial_variables):
self.print_in_console(
"Generating new model at: ", datetime.now().strftime("%H:%M:%S")
)
model = pl.LpProblem("Roadef", pl.LpMinimize)
self.create_variables(used_routes, previous_routes_infos, artificial_variables)
model = self.create_constraints(model, used_routes, artificial_variables)
return model
def create_variables(
self, used_routes, previous_routes_infos, artificial_variables
):
# Indices
ind_td_routes = self.get_td_routes(used_routes)
ind_customers_hours = self.get_customers_hours()
# Initial quantities from previous solutions
initial_quantities = dict()
for r, route in self.solution.get_id_and_shifts():
k = defaultdict(int)
for step in route["route"]:
if step["location"] == 0:
continue
k[step["location"]] += 1
initial_quantities[
(
step["location"],
r,
self.solution.get_shift_property(r, "trailer"),
k[step["location"]],
)
] = step["quantity"]
# Variables : route
self.route_var = pl.LpVariable.dicts("route", ind_td_routes, 0, 1, pl.LpBinary)
previous_routes_infos_s = set(previous_routes_infos)
for k, v in self.route_var.items():
v.setInitialValue(k in previous_routes_infos_s)
if self.routes[k[0]].start < self.Hmin - self.resolve_margin:
v.fixValue()
self.route_var = SuperDict(self.route_var)
self.print_in_console("Var 'route'")
# Variables : Artificial Quantity
if artificial_variables:
self.artificial_quantities_var = pl.LpVariable.dicts(
"ArtificialQuantity", ind_customers_hours, None, 0
)
self.artificial_binary_var = pl.LpVariable.dicts(
"ArtificialBinary", ind_customers_hours, 0, 1, pl.LpBinary
)
for (i, h) in ind_customers_hours:
if h < self.Hmin - self.resolve_margin:
self.artificial_binary_var[i, h].setInitialValue(0)
self.artificial_binary_var[i, h].fixValue()
self.artificial_quantities_var = SuperDict(self.artificial_quantities_var)
self.artificial_binary_var = SuperDict(self.artificial_binary_var)
else:
self.artificial_quantities_var = SuperDict()
self.artificial_binary_var = SuperDict()
self.print_in_console("Var 'ArtificialQuantity'")
# Variables : Inventory
_capacity = lambda i: self.instance.get_customer_property(i, "Capacity")
self.inventory_var = {
(i, h): pl.LpVariable(f"Inventory{i, h}", 0, _capacity(i))
for (i, h) in self.get_var_inventory_domain()
}
self.inventory_var = SuperDict(self.inventory_var)
self.print_in_console("Var 'inventory'")
# Variables : quantity
for (r, i, tr, k) in self.get_var_quantity_s_domain(used_routes):
self.quantity_var[i, r, tr, k] = pl.LpVariable(f"quantity{i, r, tr, k}", 0)
if initial_quantities.get((i, r, tr, k), None) is None:
continue
self.quantity_var[i, r, tr, k].setInitialValue(
initial_quantities[(i, r, tr, k)]
)
"""if self.routes[r].start < self.Hmin - self.resolve_margin:
self.variables["quantity"][(i, r, tr, k)].fixValue() """
for (r, i, tr, k) in self.get_var_quantity_p_domain(used_routes):
self.quantity_var[i, r, tr, k] = pl.LpVariable(
f"quantity{i, r, tr, k}", upBound=0
)
if initial_quantities.get((i, r, tr, k), None) is None:
continue
self.quantity_var[i, r, tr, k].setInitialValue(
initial_quantities[(i, r, tr, k)]
)
"""if self.routes[r].start < self.Hmin - self.resolve_margin:
self.variables["quantity"][(i, r, tr, k)].fixValue() """
self.print_in_console("Var 'quantity'")
# Variables : TrailerQuantity
_capacity = lambda tr: self.instance.get_trailer_property(tr, "Capacity")
self.trailer_quantity_var = {
(tr, h): pl.LpVariable(
f"TrailerQuantity{tr, h}",
lowBound=0,
upBound=_capacity(tr),
)
for (tr, h) in self.get_var_trailer_quantity_domain()
}
self.trailer_quantity_var = SuperDict(self.trailer_quantity_var)
self.print_in_console("Var 'TrailerQuantity'")
def create_constraints(self, model, used_routes, artificial_variables):
# Indices
ind_td_routes = TupList(self.get_td_routes(used_routes))
ind_td_routes_r = ind_td_routes.to_dict(result_col=[1, 2])
ind_customers_hours = self.get_customers_hours()
_sum_c7_c12_domain = {
i: self.get_sum_c7_c12_domain(used_routes, i)
for i in self.instance.get_id_customers()
}
# Constraints (1), (2) - Minimize the total cost
costs = ind_td_routes.to_dict(None).vapply(lambda v: self.cost(*v))
objective = pl.lpSum(self.route_var * costs)
if artificial_variables:
objective += pl.lpSum(2000 * self.artificial_binary_var.values_tl())
model += objective, "Objective"
self.print_in_console("Added (Objective) - w/o art. vars.")
# Constraints : (3) - A shift is only realized by one driver with one driver
for r, tr_dr in ind_td_routes_r.items():
model += (
pl.lpSum(self.route_var[r, tr, dr] for tr, dr in tr_dr) <= 1,
f"C3_r{r}",
)
self.print_in_console("Added (3)")
# Constraints : (7) - Conservation of the inventory
for (i, h) in ind_customers_hours:
artificial_var = 0
if artificial_variables:
artificial_var = self.artificial_quantities_var[i, h]
model += (
-self.inventory_var[i, h]
+ self.inventory_var[i, h - 1]
- pl.lpSum(
self.quantity_var[i, r, tr, k]
for (r, tr, k) in _sum_c7_c12_domain[i]
if self.k_visit_hour[(i, h, r, k)]
)
- artificial_var
- self.instance.get_customer_property(i, "Forecast")[h]
== 0
), f"C7_i{i}_h{h}"
for i in self.all_customers:
initial_tank = self.instance.get_customer_property(i, "InitialTankQuantity")
model += self.inventory_var[i, -1] == initial_tank, f"C7_i{i}_h-1"
self.print_in_console("Added (7)")
# Constraints: (A2) - The quantity delivered in an artificial delivery respects quantities constraints
if artificial_variables:
for (i, h) in ind_customers_hours:
model += (
self.artificial_quantities_var[i, h]
+ self.artificial_binary_var[i, h]
* min(
self.instance.get_customer_property(i, "Capacity"),
self.routes_generator.max_trailer_capacities,
)
>= 0
), f"CA2_i{i}_h{h}"
self.print_in_console(f"Added (A2)")
# Constraints : (9) - Conservation of the trailers' inventories
_sum_c9_domain = self.get_sum_c9_domain(used_routes)
for (tr, h) in self.get_c4_c9_domain():
model += (
pl.lpSum(
self.quantity_var[i, r, tr, k] * self.k_visit_hour[(i, h, r, k)]
for (r, i, k) in _sum_c9_domain
)
+ self.trailer_quantity_var[tr, h - 1]
== self.trailer_quantity_var[tr, h]
), f"C9_tr{tr}_h{h}"
for tr in self.instance.get_id_trailers():
initial_quantity = self.instance.get_trailer_property(tr, "InitialQuantity")
model += (
self.trailer_quantity_var[tr, -1] == initial_quantity,
f"C9_tr{tr}_h-1",
)
self.print_in_console("Added (9)")
# Constraints: (15) - Conservation of the total inventory between the beginning of the time horizon and the end
if self.Hmax >= self.horizon:
model += (
pl.lpSum(
self.coef_inventory_conservation * self.inventory_var[i, -1]
- self.inventory_var[i, self.Hmax - 1]
for i in self.instance.get_id_customers()
)
<= 0
), f"C15"
self.print_in_console("Added (15)")
else:
model += (
pl.lpSum(
self.coef_part_inventory_conservation * self.inventory_var[i, -1]
- self.inventory_var[i, self.Hmax - 1]
for i in self.instance.get_id_customers()
)
<= 0
), f"C15"
self.print_in_console("Added (15)")
# Constraints: (10) - Quantities delivered don't exceed trailer capacity
_drivers = self.instance.get_id_drivers()
for (r, i, tr, k) in self.get_c10_domain(used_routes):
_capacity = self.instance.get_customer_property(i, "Capacity")
model += (
pl.lpSum(self.route_var[r, tr, dr] * _capacity for dr in _drivers)
>= -self.quantity_var[i, r, tr, k],
f"C10_i{i}_r{r}_tr{tr}_k{k}",
)
self.print_in_console("Added (10)")
# Constraints: (11), (12)
for (r, route, i, tr, k) in self.get_c11_c12_domain(used_routes):
# Constraint: (11) - Quantities delivered don't exceed the quantity in the trailer
visited = lambda j: route.visited[j][0]
q_tup = lambda j, kp: (visited(j), r, tr, kp)
hour_tup = lambda j, kp: (
visited(j),
self.hour_of_visit[i, r, k],
r,
kp,
)
visit_tup = lambda j, kp: (r, visited(j), kp, i, k)
model += (
pl.lpSum(
(
self.quantity_var[q_tup(j, kp)]
* self.k_visit_hour[hour_tup(j, kp)]
* self.visit_before_on_route(*visit_tup(j, kp))
)
for (j, kp) in self.get_sum_c11_c14_domain(i, r, k)
)
+ self.quantity_var[i, r, tr, k]
+ self.trailer_quantity_var[(tr, self.hour_of_visit[i, r, k] - 1)]
>= 0
), f"C11_i{i}_r{r}_tr{tr}_k{k}"
# Constraint: (12) - Quantities delivered don't exceed available space in customer tank
model += (
pl.lpSum(
(
self.quantity_var[i, rp, trp, kp]
* self.k_visit_hour[i, self.hour_of_visit[i, r, k], rp, kp]
* self.visit_before(i, r, k, rp, kp),
)
for (rp, trp, kp) in _sum_c7_c12_domain[i]
if r != rp and tr != trp
)
- self.inventory_var[i, self.hour_of_visit[i, r, k] - 1]
+ self.quantity_var[i, r, tr, k]
+ self.instance.get_customer_property(i, "Capacity")
>= 0
), f"C12_i{i}_r{r}_tr{tr}_k{k}"
self.print_in_console("Added (11), (12)")
# Constraints: (13) - Quantities loaded at a source don't exceed trailer capacity
_drivers = self.instance.get_id_drivers()
for (r, i, tr, k) in self.get_c13_domain(used_routes):
_capacity = self.instance.get_trailer_property(tr, "Capacity")
model += (
self.quantity_var[i, r, tr, k]
<= pl.lpSum(self.route_var[r, tr, dr] for dr in _drivers) * _capacity
), f"C13_i{i}_r{r}_tr{tr}_k{k}"
self.print_in_console("Added (13)")
# Constraints: (14) - Quantities loaded at a source don't exceed free space in the trailer
for (r, route, i, tr, k) in self.get_c14_domain(used_routes):
visited = lambda j: route.visited[j][0]
q_tup = lambda j, kp: (visited(j), r, tr, kp)
hour_tup = lambda j, kp: (
visited(j),
self.hour_of_visit[(i, r, k)],
r,
kp,
)
visit_tup = lambda j, kp: (r, visited(j), kp, i, k)
model += (
pl.lpSum(
(
self.quantity_var[q_tup(j, kp)]
* self.k_visit_hour[hour_tup(j, kp)]
* self.visit_before_on_route(*visit_tup(j, kp)),
)
for (j, kp) in self.get_sum_c11_c14_domain(i, r, k)
)
+ self.quantity_var[i, r, tr, k]
+ self.trailer_quantity_var[(tr, self.hour_of_visit[(i, r, k)] - 1)]
<= self.instance.get_trailer_property(tr, "Capacity")
), f"C14_i{i}_r{r}_tr{tr}_k{k}"
self.print_in_console("Added (14)")
# Constraints (4), (5) - Two shifts with same trailer can't happen at the same time
# and two shifts realized by the same driver must leave time for the driver to rest between both
_sum_c4_domain = self.get_sum_c4_domain(used_routes)
_sum_c5_domain = self.get_sum_c5_domain(used_routes)
for (tr, h) in self.get_c4_c9_domain():
model += (
pl.lpSum(
self.route_var[r, tr, dr]
for r, route, dr in _sum_c4_domain
if self.runs_at_hour(r, h)
)
<= 1,
f"C4_tr{tr}_h{h}",
)
self.print_in_console("Added (4)")
for (dr, h) in self.get_c5_domain():
model += (
pl.lpSum(
self.route_var[r, tr, dr]
for r, route, tr in _sum_c5_domain
if self.blocks_driver_at_hour(r, dr, h)
)
<= 1,
f"C5_dr{dr}_h{h}",
)
self.print_in_console("Added (5)")
return model
def select_routes(self, nb=500):
"""
Selects the indicated number of shifts from self.unused_routes and deletes them from self.unused_routes
:return: A dictionary whose keys are the indices of the shifts and whose values are instances of RouteLabel
"""
self.print_in_console(
"Selecting routes at: ", datetime.now().strftime("%H:%M:%S")
)
selected_routes = dict()
nb = min(nb, len(self.unused_routes))
for r in list(self.unused_routes.keys())[0:nb]:
selected_routes[r] = self.unused_routes[r]
del self.unused_routes[r]
self.print_in_console(selected_routes)
return selected_routes
def get_td_routes(self, used_routes):
""" Returns a list of every possible truple (index_route, index_trailer, index_driver) """
return list(
itertools.product(
used_routes,
self.instance.get_id_trailers(),
self.instance.get_id_drivers(),
)
)
def get_customers_hours(self):
return list(itertools.product(self.all_customers, range(self.Hmax)))
def get_var_inventory_domain(self):
return [
(i, h) for i in self.all_customers for h in list(range(self.Hmax)) + [-1]
]
def get_var_trailer_quantity_domain(self):
return [
(tr, h)
for tr in self.instance.get_id_trailers()
for h in list(range(self.Hmax)) + [-1]
]
def get_var_quantity_s_domain(self, used_routes):
return [
(r, i, tr, k)
for r in used_routes
for i in self.all_sources
for tr in self.instance.get_id_trailers()
for k in range(1, self.nb_visits[i, r] + 1)
]
def get_var_quantity_p_domain(self, used_routes):
return [
(r, i, tr, k)
for r in used_routes
for i in self.all_customers
for tr in self.instance.get_id_trailers()
for k in range(1, self.nb_visits[i, r] + 1)
]
def get_sum_c4_domain(self, used_routes):
return [
(r, route, dr)
for r, route in used_routes.items()
for dr in self.instance.get_id_drivers()
]
def get_sum_c5_domain(self, used_routes):
return [
(r, route, tr)
for r, route in used_routes.items()
for tr in self.instance.get_id_trailers()
]
def get_sum_c9_domain(self, used_routes):
return [
(r, i, k)
for r in used_routes
for i in self.unique_locations_in[r]
for k in range(1, self.nb_visits[i, r] + 1)
]
def get_sum_c7_c12_domain(self, used_routes, i):
return [
(r, tr, k)
for r in used_routes
for tr in self.instance.get_id_trailers()
for k in range(1, self.nb_visits[i, r] + 1)
]
def get_sum_c11_c14_domain(self, i, r, k):
return [
(j, kp)
for j in range(1, self.k_position(i, r, k))
for kp in range(1, self.nb_visits[self.routes[r].visited[j][0], r] + 1)
]
def get_c4_c9_domain(self):
return [
(tr, h) for tr in self.instance.get_id_trailers() for h in range(self.Hmax)
]
def get_c5_domain(self):
return [
(dr, h) for dr in self.instance.get_id_drivers() for h in range(self.Hmax)
]
def get_c10_domain(self, used_routes):
return [
(r, i, tr, k)
for r in used_routes
for i in self.all_customers
for tr in self.instance.get_id_trailers()
for k in range(1, self.nb_visits[i, r] + 1)
]
def get_c11_c12_domain(self, used_routes):
return [
(r, route, i, tr, k)
for r, route in used_routes.items()
for i in self.unique_customers_in(route)
for tr in self.instance.get_id_trailers()
for k in range(1, self.nb_visits[i, r] + 1)
]
def get_c13_domain(self, used_routes):
return [
(r, i, tr, k)
for r in used_routes
for i in self.instance.get_id_sources()
for tr in self.instance.get_id_trailers()
for k in range(1, self.nb_visits[i, r] + 1)
]
def get_c14_domain(self, used_routes):
return [
(r, route, i, tr, k)
for r, route in used_routes.items()
for i in self.unique_sources_in(route)
for tr in self.instance.get_id_trailers()
for k in range(1, self.nb_visits[i, r] + 1)
]
| StarcoderdataPython |
1615414 | <filename>super_resolution/EDSR-PyTorch/src/example.py
import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
from benchmark import benchmarking
import os
data_dir = os.environ['TESTDATADIR']
assert data_dir is not None, "No data directory"
print('TESTDATADIR = {}'.format(data_dir))
args.dir_data = data_dir
print('dir_data = {}'.format(args.dir_data))
args.data_test = ['valid']
print('data_test = {}'.format(args.data_test))
args.scale = [2]
print('scale = {}'.format(args.scale))
args.pre_train = './edsr_baseline_download.pt'
print('pre_train = {}'.format(args.pre_train))
args.test_only = True
print('test_only = {}'.format(args.test_only))
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
model = model.Model(args, checkpoint)
loader = data.Data(args)
loss = loss.Loss(args, checkpoint) if not args.test_only else None
@benchmarking(team=3, task=1, model=model, preprocess_fn=None)
def inference(model, data_loader, **kwargs):
dev = kwargs['device']
if dev == 'cuda':
args.cpu = False
args.ngraph = ''
# from trainer import Trainer
model = model.to(torch.device('cpu' if args.cpu else 'cuda'))
t = Trainer(args, data_loader, model, loss, checkpoint)
metric = t.test()
if dev == 'cpu':
args.cpu = True
# args.ngraph = ''
args.ngraph = './edsr.model'
# from trainer import Trainer
model = model.to(torch.device('cpu' if args.cpu else 'cuda'))
t = Trainer(args, data_loader, model, loss, checkpoint)
metric = t.test()
if metric == 'nan':
metric = 0
return metric
def main():
os.nice(20)
inference(model, loader)
# raise ValueError("crashed because I'm a bad exception")
if __name__ == "__main__":
try:
main()
except Exception as e:
logger.exception("main crashed. Error: %s", e)
| StarcoderdataPython |
3299292 | '''
Author: Mitchell
This code is used to find the mahalanobis distance between three variables and highlight points of interest
- Edited by <NAME> for app purposes
'''
import pandas as pd
import numpy as np
import scipy as sp
import scipy.signal as sg
from scipy.stats import chi2
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import math
from helpers import cmdArgs
import os
import sys
def mahalanobis(x=None, data=None, cov=None):
"""Compute the Mahalanobis Distance between each row of x and the data
x : vector or matrix of data with, say, p columns.
data : ndarray of the distribution from which Mahalanobis distance of each observation of x is to be computed.
cov : covariance matrix (p x p) of the distribution. If None, will be computed from data.
"""
# print("Started Mahalanobis Function")
x_minus_mu = x - np.mean(data)
if not cov:
cov = np.cov(data.values.T)
inv_covmat = sp.linalg.inv(cov)
left_term = np.dot(x_minus_mu, inv_covmat)
mahal = np.dot(left_term, x_minus_mu.T)
# print("Ended mahalanobis Function")
return mahal.diagonal()
def exportFig(fig, new_file_path):
fig.write_html(new_file_path)
def findPOI(calc_file_path, new_file_path, is_export, var1, var2, var3,
p_limit = 0.003, windowSize = 60, groupSize = 5, depthLimit = 0):
"""
This function utilizes mahalanobis distance to find points of interest.
A group of points of 'groupSize' number of windows is slid across the data at 1 window intervals.
Each window consists of 'windowSize' times the sampling frequency number of points.
The mean mahalanobis distance of overlaping windows was then recorded.
Variables:
calc_file_path : .csv file of data (must include var1, var2, var3, and a sampling frequency 'fs')
target_dir : target directory to save to if is_export
is_export : True or False for exporting or simpy viewing
var1, var2, var3 : strings of the names of the variables to calcualate mahalanobis distance between
p_limit : p value of mahalanobis distance must be less than this value to trigger POI
windowSize : length in seconds of a window of data, default 60 seconds to calculate mahalanobis distance
groupSize : number of instersecting windows to calucualte mean from, default 5 windows
depthLimit : minimum depth necessary in order for POI to trigger, default 0
"""
# Pull in Data
data = pd.read_csv(calc_file_path)
# Pull Specific Variables :
fs = data['fs'].tolist()[0]
p = np.array(data["Depth"])
# True time in datetime format
"""commented out due to errors with marker plot and datetime"""
# t = data["Time"]
# Calculate time
numData = len(p)
t = [x/fs for x in range(numData)]
t_hr = [x/3600 for x in t]
# Pull variables of interest into one dataframe
df_x = data[[var1, var2, var3]]
# Compute index length of window
window = int(fs*windowSize) # Converted to int because some of fs are floats
numWindows = math.floor(len(p)/window) # Number of windows in dataset
# Create groups to run through
group = window*groupSize # GroupSize defined by function inputs
numGroups = math.floor(len(p)/group)
# Alternate Windowing method:
# Create Dataframe for calculating mean mahalanobis between intersections
mean_df = pd.DataFrame(np.NaN, index = range(group + window*(groupSize-1)), columns = range(groupSize))
# Create an array to compile averaged mahalanobis distances per window
l_mahala = [0]*len(p)
# Loop through windows of the data
for i in range(numWindows):
# Print Status of function in console
# print(i+1,"of", numWindows)
# First windows within the groupSize
if i < groupSize:
# Fill the mean dataframe
mean_df[i][window*i : window*i + group] = mahalanobis(x = df_x[(window*i): (window*i+group)], data = data[[var1, var2, var3]][(window*i):(window*i + group)])
l_mahala[window*i:window*(i+1)] = mean_df.mean(axis = 1)[window*i:window*(i+1)]
# Middle Windows with full overlap for mean calculation
elif i >= groupSize and i <= (numWindows - groupSize):
# Shift mean dataframe to track the next window
mean_df.shift(periods = -window, axis = 0)
mean_df.shift(periods = -1, axis = 1)
# Fill mean dataframe and calculate overlapping means
mean_df[(groupSize-1)][window*(groupSize-1):] = mahalanobis(x = df_x[(window*i): (window*i+group)], data = data[[var1, var2, var3]][(window*i):(window*i + group)])
l_mahala[window*i:window*(i+1)] = mean_df.mean(axis = 1)[window*(groupSize-1):window*groupSize]
# End of the data, must account for not full window at the end
else:
# Shift mean dataframe to track the next window
mean_df.shift(periods = -window, axis = 0)
mean_df.shift(periods = -1, axis = 1)
# Fill mean dataframe and calculate overlapping means
mean_df[(groupSize-1)][window*(groupSize-1): (window*(groupSize-1) + len(data[var1][(window*i):]))] = mahalanobis(x = df_x[(window*i):], data = data[[var1, var2, var3]][(window*i):])
# Fill averaged mahalanobis, if tree for final window which most likely is not full
if i == numWindows - 1:
l_mahala[window*i:] = mean_df.mean(axis = 1)[window*(groupSize-1):(window*(groupSize-1) + len(data[var1][(window*i):]))]
else:
l_mahala[window*i:window*(i+1)] = mean_df.mean(axis = 1)[window*(groupSize-1):window*groupSize]
# Insert mean mahalanobis distance list into dataframe
df_x['mahala'] = l_mahala
# Compute P-Values of mahalanobis distance
df_x['p_value'] = 1 - chi2.cdf(df_x['mahala'], 2)
# Pull indexes of points with p values smaller than the set limit
POIpts = df_x.loc[df_x.p_value < p_limit]
# Pull and x and y for depth of POI
POIx = [t_hr[i] for i in POIpts.index if p[i] > depthLimit]
POIy = [p[i] for i in POIpts.index if p[i] > depthLimit]
# Make Figure
fig = go.Figure(
make_subplots(
# Deifne dimensions of subplot
rows = 2, cols=1,
# Define what plot goes where and the type of plot
specs = [[{}],
[{}]],
shared_xaxes = True
)
)
# Create traces for the data and add to figure
# fig.add_trace(go.Scattergl(x = sT_hr, y = sP, mode = "lines", name = "Depth"), row = 1, col = 1)
fig.add_trace(go.Scattergl(x = t_hr, y = p, mode = "lines", name = "Depth"), row = 1, col = 1)
fig.add_trace(go.Scattergl(x = POIx, y = POIy, mode = "markers", name = "POI", marker = dict(color = 'green', symbol = 'square', size = 10)), row = 1, col = 1)
fig.add_trace(go.Scattergl(x = t_hr, y = df_x[var1], mode = "lines", name = var1), row = 2, col = 1)
fig.add_trace(go.Scattergl(x = t_hr, y = df_x[var2], mode = "lines", name = var2), row = 2, col = 1)
fig.add_trace(go.Scattergl(x = t_hr, y = df_x[var3], mode = "lines", name = var3), row = 2, col = 1)
# Update x-axis
fig.update_xaxes(title = "Time (hr)", rangeslider = dict(visible = True), row = 2, col = 1)
# Update y-axis
fig.update_yaxes(title = "Depth (m)", autorange = "reversed", row = 1, col = 1)
# Code to add windowing button -
"""commented out until x axis is in datetime format"""
# fig.update_layout(
# title = filename.split('/')[-1],
# width = 1200,
# height = 800,
# xaxis=dict(
# rangeselector=dict(
# buttons=list([
# dict(count=1,
# label="1 hr",
# step="hour",
# stepmode="backward"),
# dict(count=30,
# label="30 min",
# step="minute",
# stepmode="backward"),
# dict(count=10,
# label="10 min",
# step="minute",
# stepmode="backward"),
# dict(count=1,
# label="1 min",
# step="minute",
# stepmode="backward"),
# dict(count=30,
# label="30 sec",
# step="second",
# stepmode="backward"),
# dict(step="all")
# ])
# ),
# type="date"
# )
# )
# Show figure and save as an HTML
if is_export:
exportFig(fig, new_file_path)
else:
fig.show()
def main(cmdLineArgs: dict):
try:
calc_file_path = cmdLineArgs['calcFilePath']
new_file_path = cmdLineArgs['newFilePath']
is_export = cmdLineArgs['isExport']
var1 = cmdLineArgs['variableOne']
var2 = cmdLineArgs['variableTwo']
var3 = cmdLineArgs['variableThree']
p_limit = cmdLineArgs['pLimit']
p_limit = float(p_limit)
window_size = cmdLineArgs['windowSize']
window_size = float(window_size)
group_size = cmdLineArgs['groupSize']
group_size = int(group_size)
depth_limit = cmdLineArgs['depthLimit']
depth_limit = float(depth_limit)
is_export = True if is_export == "True" else False
findPOI(calc_file_path, new_file_path, is_export, var1, var2, var3,
p_limit, window_size, group_size, depth_limit)
return "SUCCESS"
except Exception as e:
raise Exception(e)
if __name__ == "__main__":
print(main())
sys.stdout.flush() | StarcoderdataPython |
4801331 | # -*- codeing = utf-8 -*-
from bs4 import BeautifulSoup # 网页解析,获取数据
import re # 正则表达式,进行文字匹配`
import urllib.request, urllib.error # 制定URL,获取网页数据
import xlwt # 进行excel操作
import time
import random
import re
import time
import requests
import threading
from lxml import html
etree=html.etree
from bs4 import BeautifulSoup
from queue import Queue
from threading import Thread
import pandas as pd
findcomment = re.compile(r'<span class="short">(.*)</span>')
findtime=re.compile(r'<span class="comment-time" title="(.*)"')
findstar_list=re.compile(r'<span class="(.*)" title="(.*)"></span>')
findTitle = re.compile(r'<p class="pl2">> <a href="(.*)">去 (.*) 的页面</a></p>')
io = 'D:\\Top250.xls'
df = pd.read_excel(io)
def askURL(url):
pc_agent = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
"Mozilla/5.0 (X11; Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0"
]
agent = random.choice(pc_agent)
head = {'User-Agent': agent}
# 用户代理,表示告诉豆瓣服务器,我们是什么类型的机器、浏览器(本质上是告诉浏览器,我们可以接收什么水平的文件内容)
request = urllib.request.Request(url, headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return html
def run(q): ##q="id"
while q.empty() is not True:
datalist2 = []
qq=q.get()
j=0
for i in range(0, 20):
time.sleep(1)
url = "https://movie.douban.com/subject/" + str(qq) + "/comments?start=" + str(
i * 20) + "&limit=20&status=P&sort=new_score"
print(url)
html = askURL(url)
soup = BeautifulSoup(html, "html.parser")
# for item in soup.find_all('p', class_="pl2"): # 查找符合要求的字符串
# j = j + 1
# #print(item)
# if j==1:
# #print(re.findall(r"\"keywords\">\n<meta content=\"(.+?)短评"))
# title = (re.findall(findTitle[1], str(item)))[0]
# print(title)
for item in soup.find_all('div', class_="comment"): # 查找符合要求的字符串
data = [] # 保存一部电影所有信息
comment = re.findall(findcomment, str(item))
comment_time = re.findall(findtime, str(item))
comment_star = re.findall(findstar_list, str(item))
if len(comment_star) == 0:
num1 = 0.0
else:
star = comment_star[0][0]
num = int(star[7:9])
num1 = num / 5
data.append(comment)
data.append(comment_time)
data.append(num1)
datalist2.append(data)
book = xlwt.Workbook(encoding="utf-8", style_compression=0) # 创建workbook对象
sheet = book.add_sheet('豆瓣电影Top1comment', cell_overwrite_ok=True) # 创建工作表
col = ("评论", "时间", "评分")
i = 0
sheet.write(0, 0, col[0])
sheet.write(0, 1, col[1])
sheet.write(0, 2, col[2])
for item in datalist2:
data = item
sheet.write(i + 1, 0, data[0])
sheet.write(i + 1, 1, data[1])
sheet.write(i + 1, 2, data[2])
i = i + 1
a = df[df['id'].isin([int(qq)])].index.values[0]
savepath2 = "豆瓣电影Top" +str(a+1) + "comment.xls"
print(savepath2)
book.save(savepath2)
q.task_done()
def main():
queue=Queue()
# io='D:\\Top250.xls'
# df = pd.read_excel(io)
df_li=df.values.tolist()
result=[]
for s_li in df_li:
result.append(s_li[8])
for i in result:
queue.put(str(i))
for i in range(10):
thread = Thread(target=run, args=(queue,))
thread.daemon = True # 随主线程退出而退出
thread.start()
queue.join() # 队列消费完 线程结束
if __name__ == "__main__": # 当程序执行时
# 调用函数
main()
| StarcoderdataPython |
83639 | """
Use cases related to writing data to an output repository.
"""
import logging
import warnings
from pathlib import Path
from typing import List
from openpyxl import load_workbook
from engine.repository.datamap import InMemorySingleDatamapRepository
from engine.use_cases.parsing import ParseDatamapUseCase
from engine.use_cases.typing import (MASTER_DATA_FOR_FILE,
ColData)
warnings.filterwarnings("ignore", ".*Conditional Formatting*.")
warnings.filterwarnings("ignore", ".*Sparkline Group*.")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(levelname)s - %(message)s",
datefmt="%d-%b-%y %H:%M:%S",
)
logger = logging.getLogger(__name__)
class WriteMasterToTemplates:
"""
Re-creation of the old bcompiler -a functionality.
Writes data from a given master to a blank template and saves each file according
to each relevant column in the master.
"""
def __init__(self, output_repo, datamap: Path, master: Path, blank_template: Path):
self.output_repo = output_repo
self._datamap = datamap
self._master_path = master
self._master_sheet = load_workbook(master).active
self._blank_template = blank_template
self._col_a_vals: List[str]
def _check_datamap_matches_cola(self) -> bool:
parsed_dm_data = self._parse_dm_uc.execute(obj=True)
self._dml_line_tup = [(x.key, x.sheet, x.cellref) for x in parsed_dm_data]
self._col_a_vals = []
for cell in next(self._master_sheet.columns):
try:
self._col_a_vals.append(cell.value.strip())
except AttributeError:
self._col_a_vals.append("EMPTY")
self._col_a_vals = self._col_a_vals[1:]
_pass = zip([x[0] for x in self._dml_line_tup], self._col_a_vals)
return all([x[0] == x[1] for x in _pass])
def _get_keys_in_datamap_not_in_master(self) -> List[str]:
dm_keys_s = set([x[0] for x in self._dml_line_tup])
master_keys_s = set(self._col_a_vals)
return list(dm_keys_s - master_keys_s)
def execute(self) -> None:
"""
Writes a master file to multiple templates using blank_template,
based on the blank_template and the datamap.
"""
master_data: MASTER_DATA_FOR_FILE = []
self.parse_dm_repo = InMemorySingleDatamapRepository(str(self._datamap))
self._parse_dm_uc = ParseDatamapUseCase(self.parse_dm_repo)
if not self._check_datamap_matches_cola():
_missing_keys = self._get_keys_in_datamap_not_in_master()
# You shall not pass if this is a problem
if _missing_keys:
for m in _missing_keys:
logger.critical(
f"Key {m} in the datamap but not in the master. Not continuing."
)
raise RuntimeError(
"Not continuing. Ensure all keys from datamap are in the master."
)
cola = [x.value for x in list(self._master_sheet.columns)[0]][1:]
for col in list(self._master_sheet.columns)[1:]:
tups = []
try:
file_name = col[0].value.split(".")[0]
except AttributeError:
logger.warning(f"Found values in cells beyond end of expected end column. "
"For most reliable results, use a clean master file.")
break
logger.info(f"Extracting data for {file_name} from {self._master_path}")
for i, key in enumerate(cola, start=1):
if key is not None:
key = key.strip()
else:
# TODO - create a log register so this does not have to be repeated for every
# column of data in the master ().
logger.warning(f"Found values in cells beyond end of expected end row. "
"For most reliable results, use a clean master file.")
break
try:
sheet = [dml[1] for dml in self._dml_line_tup if dml[0] == key][0]
except IndexError:
continue
cd = ColData(
key=key,
sheet=sheet,
cellref=[dml[2] for dml in self._dml_line_tup if dml[0] == key][0],
value=col[i].value,
file_name=file_name,
)
tups.append(cd)
master_data.append(tups)
self.output_repo.write(master_data, from_json=False)
| StarcoderdataPython |
3390412 | """
Unit tests for SyncWorker.py
"""
import unittest
func = __import__("SyncWorker")
def mock_event(text_value=""):
return {
"channel_name": ["test_channel"],
"command": ["/slack-unittest"],
"user_name": ["test_user_namee"],
"user_id": ["test_user_id"],
"text": [text_value],
"response_url": ["test_url"],
}
class TestFunction(unittest.TestCase):
def test_lambda_handler(self):
ret = func.lambda_handler(mock_event(text_value="sync"), None)
self.assertEqual(
ret,
{
"body": "Processed <@test_user_id> `/slack-unittest sync` by SyncWorker.",
"statusCode": 200
}
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4827593 | <gh_stars>1-10
# import json
import datetime
from httpretty import HTTPretty
from social.p3 import urlencode
from social.exceptions import AuthMissingParameter
from tests.open_id import OpenIdTest
JANRAIN_NONCE = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
class LiveJournalOpenIdTest(OpenIdTest):
backend_path = 'social.backends.livejournal.LiveJournalOpenId'
expected_username = 'foobar'
discovery_body = ''.join([
'<xrds:XRDS xmlns:xrds="xri://$xrds" xmlns="xri://$xrd*($v*2.0)">',
'<XRD>',
'<Service priority="0">',
'<Type>http://specs.openid.net/auth/2.0/signon</Type>',
'<URI>http://www.livejournal.com/openid/server.bml</URI>',
'<LocalID>http://foobar.livejournal.com/</LocalID>',
'</Service>',
'</XRD>',
'</xrds:XRDS>'
])
server_response = urlencode({
'janrain_nonce': JANRAIN_NONCE,
'openid.mode': 'id_res',
'openid.claimed_id': 'http://foobar.livejournal.com/',
'openid.identity': 'http://foobar.livejournal.com/',
'openid.op_endpoint': 'http://www.livejournal.com/openid/server.bml',
'openid.return_to': 'http://myapp.com/complete/livejournal/?'
'janrain_nonce=' + JANRAIN_NONCE,
'openid.response_nonce': JANRAIN_NONCE + 'wGp2rj',
'openid.assoc_handle': '1364932966:ZTiur8sem3r2jzZougMZ:4d1cc3b44e',
'openid.ns': 'http://specs.openid.net/auth/2.0',
'openid.signed': 'mode,claimed_id,identity,op_endpoint,return_to,'
'response_nonce,assoc_handle',
'openid.sig': 'Z8MOozVPTOBhHG5ZS1NeGofxs1Q=',
})
server_bml_body = '\n'.join([
'assoc_handle:1364935340:ZhruPQ7DJ9eGgUkeUA9A:27f8c32464',
'assoc_type:HMAC-SHA1',
'dh_server_public:WzsRyLomvAV3vwvGUrfzXDgfqnTF+m1l3JWb55fyHO7visPT4tmQ'
'iTjqFFnSVAtAOvQzoViMiZQisxNwnqSK4lYexoez1z6pP5ry3pqxJAEYj60vFGvRztict'
'Eo0brjhmO1SNfjK1ppjOymdykqLpZeaL5fsuLtMCwTnR/JQZVA=',
'enc_mac_key:<KEY>
'expires_in:1207060',
'ns:http://specs.openid.net/auth/2.0',
'session_type:DH-SHA1',
''
])
def openid_url(self):
return super(LiveJournalOpenIdTest, self).openid_url() + '/data/yadis'
def post_start(self):
self.strategy.remove_from_request_data('openid_lj_user')
def _setup_handlers(self):
HTTPretty.register_uri(
HTTPretty.POST,
'http://www.livejournal.com/openid/server.bml',
headers={'Accept-Encoding': 'identity',
'Content-Type': 'application/x-www-form-urlencoded'},
status=200,
body=self.server_bml_body
)
HTTPretty.register_uri(
HTTPretty.GET,
'http://foobar.livejournal.com/',
headers={
'Accept-Encoding': 'identity',
'Accept': 'text/html; q=0.3,'
'application/xhtml+xml; q=0.5,'
'application/xrds+xml'
},
status=200,
body=self.discovery_body
)
def test_login(self):
self.strategy.set_request_data({'openid_lj_user': 'foobar'})
self._setup_handlers()
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_request_data({'openid_lj_user': 'foobar'})
self._setup_handlers()
self.do_partial_pipeline()
def test_failed_login(self):
self._setup_handlers()
self.do_login.when.called_with().should.throw(
AuthMissingParameter
)
| StarcoderdataPython |
4809433 | """Hadith Reader
This script work as the main script for the hadith reader.
It runs the hadith reader application.
It creates an instance of QApplication class. It then creates an instance
of the QMainWindow class and passes the instance as argument to the setupUi
method of the Ui_MainWindow class. The Ui_MainWindow class is generated by
the ./bin/pyuic5 script. The setupUi method customizes the QMainWindow
instance adding widgets and layouts
The script then further customizes the QMainWindow instance
by loading Hadith data
Finally the script runs the applications by calling the exec_ method
"""
import sys
from source.hreader import Ui_MainWindow
from source.hmanager import Ui_Manager
from PyQt5 import QtWidgets
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
ui_manager = Ui_Manager()
ui_manager.initialize_ui(ui);
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3365967 | <gh_stars>1-10
import math
import traceback
from datetime import datetime
from time import sleep
from typing import *
from github import Github, RateLimitExceededException
from github.GithubException import GithubException
from github.NamedUser import NamedUser
from github.Repository import Repository
from . import _config
from .LoggingUtils import LoggingUtils
from .BashUtils import BashUtils
class GitHubUtils:
logger = LoggingUtils.get_logger("GitHubUtils", LoggingUtils.DEBUG)
GITHUB_SEARCH_ITEMS_MAX = 1000
try:
DEFAULT_ACCESS_TOKEN = _config.get_config("github_access_token")
DEFAULT_GITHUB_OBJECT = Github(DEFAULT_ACCESS_TOKEN, per_page=100)
except:
DEFAULT_ACCESS_TOKEN = None
DEFAULT_GITHUB_OBJECT = None
logger.info("Fail to get github_access_token from config file. Using GitHubUtils APIs will require compulsory input access_token")
# end try
@classmethod
def get_github(cls, access_token: str = None) -> Github:
if access_token is None:
return cls.DEFAULT_GITHUB_OBJECT
else:
return Github(access_token)
class wait_rate_limit:
"""
Wait for rate limit of the github accessor. For use with "with".
Use the default github accessor if no argument is given.
"""
DEFAULT_GITHUB_OBJECT = None
logger = None
def __init__(self, github: Github = DEFAULT_GITHUB_OBJECT):
self.github = github
return
def __enter__(self):
if self.github is None:
self.github = self.DEFAULT_GITHUB_OBJECT
# end if
# Check rate limit
rate_limit_remain, rate_limit = self.github.rate_limiting
if rate_limit_remain <= 1:
self.logger.debug("Rate limit {} / {}".format(rate_limit_remain, rate_limit))
rate_limit_reset_time = datetime.fromtimestamp(self.github.rate_limiting_resettime)
rate_limit_wait_seconds = math.ceil((rate_limit_reset_time - datetime.now()).total_seconds()) + 1
if rate_limit_wait_seconds > 0:
self.logger.warning("Rate limit will recover at: {}, will wait for {} seconds.".format(rate_limit_reset_time, rate_limit_wait_seconds))
sleep(rate_limit_wait_seconds)
self.logger.warning("Rate limit recovered")
# end if
# end if
return self.github
def __exit__(self, type, value, tb):
return
# end class
wait_rate_limit.DEFAULT_GITHUB_OBJECT = DEFAULT_GITHUB_OBJECT
wait_rate_limit.logger = logger
T = TypeVar("T")
@classmethod
def ensure_github_api_call(cls, call: Callable[[Github], T], github: Github = DEFAULT_GITHUB_OBJECT, max_retry_times: int = float("inf")) -> T:
retry_times = 0
while True:
try:
with cls.wait_rate_limit(github) as g:
return call(g)
# end with
except (GithubException, RateLimitExceededException) as e:
if e.status == 422:
cls.logger.warning("Validation Error. Will not retry.")
raise
else:
cls.logger.warning("Unexpected exception during api call: {}".format(traceback.format_exc()))
retry_times += 1
if retry_times > max_retry_times:
cls.logger.warning("Exceeding max retry times {}".format(max_retry_times))
raise
# end if
retry_wait_time = min(retry_times * 30, 600)
cls.logger.warning("Will wait {} seconds before retry {}".format(retry_wait_time, retry_times))
sleep(retry_wait_time)
# end try
# end while
@classmethod
def search_repos(cls, q: str = "", sort: str = "stars", order: str = "desc",
is_allow_fork: bool = False,
max_num_repos: int = GITHUB_SEARCH_ITEMS_MAX,
github: Github = DEFAULT_GITHUB_OBJECT,
max_retry_times: int = float("inf"),
*_, **qualifiers) -> List[Repository]:
"""
Searches the repos by querying GitHub API v3.
:return: a list of full names of the repos match the query.
"""
cls.logger.debug("Search for repos with query {}, sort {}, order {}".format(q, sort, order))
repos = list()
num_repos = 0
repos_iterator = iter(github.search_repositories(q, sort, order, **qualifiers))
while True:
try:
repo = cls.ensure_github_api_call(lambda g: next(repos_iterator), github, max_retry_times)
# Check fork
if not is_allow_fork:
if repo.fork:
continue
# end if, if
repos.append(repo)
num_repos += 1
# Check number
if num_repos >= max_num_repos:
break
# end if
except StopIteration:
break
except:
cls.logger.warning("Unknown exception: {}".format(traceback.format_exc()))
cls.logger.warning("Returning partial results")
break
# end try except
# end while
if num_repos < max_num_repos:
cls.logger.info("Got {}/{} repos".format(num_repos, max_num_repos))
else:
cls.logger.info("Got {}/{} repos".format(num_repos, max_num_repos))
# end if
return repos
@classmethod
def search_users(cls, q: str = "", sort: str = "repositories", order: str = "desc",
max_num_users: int = GITHUB_SEARCH_ITEMS_MAX,
github: Github = DEFAULT_GITHUB_OBJECT,
max_retry_times: int = float("inf"),
*_, **qualifiers) -> List[NamedUser]:
"""
Searches the users by querying GitHub API v3.
:return: a list of usernames (login) of the users match the query.
"""
cls.logger.debug("Search for users with query {}, sort {}, order {}".format(q, sort, order))
users = list()
num_users = 0
users_iterator = iter(github.search_users(q, sort, order, **qualifiers))
while True:
try:
user = cls.ensure_github_api_call(lambda g: next(users_iterator), github, max_retry_times)
users.append(user)
num_users += 1
# Check number
if num_users >= max_num_users:
break
# end if
except StopIteration:
break
except:
cls.logger.warning("Unknown exception: {}".format(traceback.format_exc()))
cls.logger.warning("Returning partial results.")
break
# end try except
# end while
if num_users < max_num_users:
cls.logger.warning("Got {}/{} users".format(num_users, max_num_users))
else:
cls.logger.info("Got {}/{} users".format(num_users, max_num_users))
# end if
return users
@classmethod
def search_repos_of_language(cls, language: str, max_num_repos: int = float("inf"),
is_allow_fork: bool = False,
max_retry_times: int = float("inf"),
strategies: List[str] = None) -> List[Repository]:
"""
Searches for all the repos of the language.
:return: a list of full names of matching repos.
"""
if strategies is None:
strategies = ["search_repos", "search_users"]
# end if
# Check supported strategies
supported_strategies = ["search_repos", "search_users", "enum_users"]
for strategy in strategies:
assert strategy in supported_strategies, strategy
# end for
names_repos = dict()
try:
# Strategy 1: search repos (limited to 1000)
strategy = "search_repos"
if strategy in strategies:
cls.logger.info("Using strategy {}".format(strategy))
new_repos = cls.search_repos("language:{}".format(language), is_allow_fork=is_allow_fork, max_retry_times=max_retry_times, max_num_repos=max_num_repos)
for repo in new_repos:
names_repos[repo.full_name] = repo
# end for
cls.logger.warning("Progress {}/{} repos.".format(len(names_repos), max_num_repos))
if len(names_repos) >= max_num_repos:
return list(names_repos.values())
# end if
# end if
# Strategy 2: search users (~37000?)
strategy = "search_users"
if strategy in strategies:
cls.logger.info("Using strategy {}".format(strategy))
s_users = set()
# s_users = s_users.union([u.login for u in cls.search_users("language:{}".format(language), sort="repositories", max_retry_times=max_retry_times)])
s_users = s_users.union([u.login for u in cls.search_users("language:{}".format(language), sort="followers", max_retry_times=max_retry_times)])
# s_users = s_users.union([u.login for u in cls.search_users("language:{}".format(language), sort="joined", max_retry_times=max_retry_times)])
users_count = 0
total_users_count = len(s_users)
for user in s_users:
try:
new_repos = cls.search_repos("language:{} user:{}".format(language, user), is_allow_fork=is_allow_fork, max_retry_times=max_retry_times)
except GithubException as e:
cls.logger.warning("Cannot get the repos of user {}".format(user))
continue
# end try
for repo in new_repos:
names_repos[repo.full_name] = repo
# end for
users_count += 1
cls.logger.debug("Progress {}/{} repos, {}/{} users.".format(len(names_repos), max_num_repos, users_count, total_users_count))
if len(names_repos) >= max_num_repos:
return list(names_repos.values())
# end if
# end for
# end if
# Strategy 3: enum users (?)
strategy = "enum_users"
if strategy in strategies:
cls.logger.warning("Strategy {} is not implemented yet.".format(strategy))
cls.logger.warning("Nothing happens.")
# end if
except KeyboardInterrupt as e:
cls.logger.warning("Interrupted. Returning partial results.")
finally:
cls.logger.warning("Got {}/{} repos.".format(len(names_repos), max_num_repos))
return list(names_repos.values())
@classmethod
def is_url_valid_git_repo(cls, url: str) -> bool:
if BashUtils.run(f"git ls-remote {url}").return_code == 0:
return True
else:
return False
| StarcoderdataPython |
1702435 | <filename>com/dfu/sqoopetl/model/DBTableInfo.py<gh_stars>1-10
#!/usr/bin/env python
#-*- encoding: utf-8 -*-
'''
Created on 2018年7月17日
@author: zuiweng.df
'''
"""
数据库信息
"""
class ConnDBInfo(object):
def __init__(self,ip,port,dbName,userName,passwd):
self.ip=ip;
self.port=port;
self.dbName=dbName;
self.userName=userName;
self.passwd=<PASSWORD>;
def toConnString(self):
return "jdbc:mysql://%s:%s/%s" % (self.ip,self.port,self.dbName)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __hash__(self):
return hash(self.dbName)
def __str__(self):
return " #### ip-> %s #### port-> %s #### db-> %s #### userName -> %s" % ( self.ip,self.port,self.dbName,self.userName)
def __repr__(self, *args, **kwargs):
return " #### ip-> %s #### port-> %s #### db-> %s #### userName -> %s" % ( self.ip,self.port,self.dbName,self.userName)
"""
提取的表格信息
"""
class ETLTable(object):
def __init__(self,dbName,realTableName,tableTemplate):
self.tableTemplateId=tableTemplate.sid
self.dbName=dbName
self.realTableName=realTableName
self.isMutTable=tableTemplate.isMutTable
self.mergeCol=tableTemplate.mergeCol
self.incrementCol=tableTemplate.incrementCol
self.enable=tableTemplate.enable
self.createTable=tableTemplate.createTable
self.etlAllData=tableTemplate.etlAllData
self.torder=tableTemplate.torder
self.targetTableName=tableTemplate.tableName
self.mapperCount=tableTemplate.mapperCount
self.pkeyName=tableTemplate.pkeyName
self.incrementType=tableTemplate.incrementType
self.etlIncreamData=tableTemplate.etlIncreamData
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __hash__(self):
return hash(self.dbName)+hash(self.tableName)
def __str__(self):
return " tableInfo:dbName>%s tableName->%s " % ( self.dbName,self.tableName)
def __repr__(self):
return " tableInfo:dbName>%s tableName->%s " % ( self.dbName,self.tableName)
"""
自定义异常
"""
class EtlException(Exception):
pass | StarcoderdataPython |
1610781 | from .resource import Resource
from collections import Iterator
import copy
try:
# python 2
from urllib import quote
except ImportError:
# python 3
from urllib.parse import quote
class Pages(Iterator):
def __init__(self, opts, url, path, params):
if isinstance(path, list):
pages_url = '/'.join([url] + [quote(elem) for elem in path])
else:
pages_url = '/'.join([url, quote(path)])
self.resource = Resource(pages_url, **opts)
self.params = params
self._root_resource = Resource(url[:url.find('/v0')], **opts)
self.response = None
def _handle_page(self, querydict={}, val='next', **headers):
"""
Executes the request getting the next (or previous) page,
incrementing (or decrementing) the current page.
"""
params = copy.copy(self.params)
params.update(querydict)
# update uri based on next page
if self.response:
self.response.raise_for_status()
_next = self.response.links.get(val, {}).get('url')
if _next:
response = self._root_resource._make_request(
'GET', _next, params, **headers)
self._handle_res(None, response)
return response
else:
raise StopIteration
else:
response = self.resource._make_request(
'GET', '', params, **headers)
self._handle_res(None, response)
return response
def _handle_res(self, session, response):
"""
Stores the response, which we use for determining
next and prev pages.
"""
self.response = response
def reset(self):
"""
Clear the page's current place.
page_1 = page.next().result()
page_2 = page.next().result()
page.reset()
page_x = page.next().result()
assert page_x.url == page_1.url
"""
self.response = None
def next(self, querydict={}, **headers):
"""
Gets the next page of results.
Raises `StopIteration` when there are no more results.
"""
return self._handle_page(querydict, **headers)
def __next__(self):
return self.next()
def prev(self, querydict={}, **headers):
"""
Gets the previous page of results.
Raises `StopIteration` when there are no more results.
Note: Only collection searches provide a `prev` value.
For all others, `prev` will always return `StopIteration`.
"""
return self._handle_page(querydict, 'prev', **headers)
def all(self):
results = []
for response in self:
response.raise_for_status()
results.extend(response['results'])
return results
| StarcoderdataPython |
3204281 | from dash import dcc, html
import plotly.express as px
class Boxplot(html.Div):
def __init__(self, name, df):
"""
:param name: name of the plot
:param df: dataframe
"""
self.html_id = name.lower().replace(" ", "-")
self.df = df
self.name = name
self.fig = None
self.title_id = self.html_id + "-t"
# Equivalent to `html.Div([...])`
super().__init__(
className="graph_card",
children=[
html.H6(id=self.title_id,
children=self.name
),
dcc.Graph(id=self.html_id),
],
)
def update(self, cols):
colss = ["RiskPerformance"] + cols
self.fig = px.box(self.df[colss], color="RiskPerformance")
self.fig.update_layout(
height=800,
yaxis_zeroline=False,
xaxis_zeroline=False,
dragmode='select'
)
self.fig.update_xaxes(fixedrange=True)
self.fig.update_yaxes(fixedrange=True)
return self.fig
| StarcoderdataPython |
1754787 | <gh_stars>0
# read the data from data source
# save it in the data/raw for further process
import os
from get_data import read_params, get_data
import argparse
def load_and_save(config_path):
config = read_params(config_path)
df = get_data(config_path)
new_cols = [col.replace(" ", "_") for col in df.columns]
raw_data_path = config["load_data"]["raw_dataset_csv"]
df.to_csv(raw_data_path, sep=",", index=False, header=new_cols)
if __name__=="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
load_and_save(config_path=parsed_args.config) | StarcoderdataPython |
56081 | # -*- coding: utf-8 -*-
"""Basic calculation classes including add, sub, mul, and div.
- Author: <NAME>
- Contact: <EMAIL>
"""
from abc import ABC
class Calculator(ABC):
"""An abstract class of basic computations."""
def operate(self: "Calculator", left: int, right: int) -> int:
"""Operate the defined calcuation with two given operands."""
raise NotImplementedError
class Adder(Calculator):
"""Addition."""
def operate(self: "Adder", left: int, right: int) -> int:
"""Add two integers."""
return left + right
class Subtractor(Calculator):
"""Subtraction."""
def operate(self: "Subtractor", left: int, right: int) -> int:
"""Subtract two integers."""
return left - right
class Multiplier(Calculator):
"""Muliplication."""
def operate(self: "Multiplier", left: int, right: int) -> int:
"""Multiply two integers."""
return left * right
class Divider(Calculator):
"""Division."""
def operate(self: "Divider", left: int, right: int) -> int:
"""Divide two integers."""
return left // right
| StarcoderdataPython |
3203596 | '''
Calculando total
'''
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum, F, FloatField
''' ------------ '''
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
q.aggregate(Sum(F('price_sale') * F('quantity')), output_field=FloatField())
# falhou
''' ------------ '''
# Django 1.8.3
# http://stackoverflow.com/a/35076326/802542
from core.models import SaleDetail
from django.db.models import Sum, F, FloatField
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
q.aggregate(Sum(F('price_sale') * ('quantity'), output_field=FloatField()))
# falhou
''' ------------ '''
qs = SaleDetail.objects.filter(sale=1).values_list('price_sale', 'quantity')
list(map(lambda q: q[0] * q[1], qs))
# funciona no template, mas não funciona no Admin.
''' ------------ '''
# Django 1.7
# http://pt.stackoverflow.com/a/66694/761
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum
SaleDetail.objects.extra(
select={'subtotal': 'round(price_sale * quantity, 2)',
}).values('price_sale', 'quantity', 'subtotal').filter(sale=2)
SaleDetail.objects.extra(
select={'total': 'round(sum(price_sale*quantity),2)', }).values('total').filter(sale=2)
# OK
''' ------------ '''
# Django 1.8
from vendas_project.vendas.models import SaleDetail
from django.db.models import Sum, F, FloatField
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
qs = q.annotate(
subtotal=(F('price_sale') * F('quantity')),
output_field=FloatField())
# Falhou
''' ------------ '''
# Django 1.8
from vendas_project.vendas.models import SaleDetail
from django.db.models import F, FloatField, ExpressionWrapper
q = SaleDetail.objects.filter(sale=1).values('price_sale', 'quantity')
qs = q.annotate(subtotal=ExpressionWrapper(
F('price_sale') * F('quantity')), output_field=FloatField())
qs[0].subtotal
t = qs.aggregate(total=Sum('subtotal'))
t.total
'''
Copiando uma venda
'''
from vendas_project.vendas.models import Sale, SaleDetail
s = Sale.objects.filter(pk=300) # filtra a Venda pelo pk
d = SaleDetail.objects.filter(sale=s) # filtra os itens dessa Venda
s = Sale.objects.get(pk=s) # com o get pega o pk da Venda que foi filtrada
s.pk = None
s.save() # salva uma cópia da Venda
for i in d:
n = SaleDetail.objects.create(
sale=s, product=i.product, quantity=i.quantity, price_sale=i.price_sale)
| StarcoderdataPython |
3332449 | <reponame>Clemson-DPA/dpa-pipe-backend
from rest_framework import routers
from locations import rest_api as loc_api
from products import rest_api as product_api
from ptasks import rest_api as ptask_api
from users import rest_api as user_api
api_router = routers.DefaultRouter()
# ---- locations
api_router.register(
r'locations',
loc_api.LocationViewSet,
)
# ---- products
api_router.register(
r'product-categories',
product_api.ProductCategoryViewSet,
)
api_router.register(
r'products',
product_api.ProductViewSet,
)
api_router.register(
r'product-versions',
product_api.ProductVersionViewSet,
)
api_router.register(
r'product-representations',
product_api.ProductRepresentationViewSet,
)
api_router.register(
r'product-representation-statuses',
product_api.ProductRepresentationStatusViewSet,
)
api_router.register(
r'product-groupings',
product_api.ProductGroupingViewSet,
)
api_router.register(
r'product-subscriptions',
product_api.ProductSubscriptionViewSet,
)
# ---- ptasks
api_router.register(
r'ptasks',
ptask_api.PTaskViewSet,
)
api_router.register(
r'ptask-assignments',
ptask_api.PTaskAssignmentViewSet,
)
api_router.register(
r'ptask-versions',
ptask_api.PTaskVersionViewSet,
)
# ---- users
api_router.register(
r'users',
user_api.UserViewSet,
)
| StarcoderdataPython |
1791529 | <reponame>megmogmog1965/PythonMachineLearning<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Apr 15, 2017
@author: <NAME>
'''
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from chapter03._3_2_1 import _plot_decision_regions
def plot_xor():
np.random.seed(0)
# X.
X_xor = np.random.randn(200, 2)
# y.
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
# SVM.
svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)
svm.fit(X_xor, y_xor)
# plot decision regions.
_plot_decision_regions(X_xor, y_xor, classifier=svm)
plt.legend(loc='upper left')
plt.show()
def plot_iris(gamma):
# load samples.
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
# split data for testing and training.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# standardize.
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
# combined.
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
# SVM.
svm = SVC(kernel='rbf', random_state=0, gamma=gamma, C=1.0)
svm.fit(X_train_std, y_train)
# plot.
_plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150))
plt.xlabel('petal length [standardized]')
plt.xlabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
plot_xor()
plot_iris(gamma=0.2)
plot_iris(gamma=100.0)
| StarcoderdataPython |
67694 | <gh_stars>1-10
'''
Note: http://bugs.python.org/issue11077 seems to indicate that tk is
supposed to be thread-safe, but everyone else on the net insists that
it isn't. Be safe, don't call into the GUI from another thread.
'''
try:
import tkinter as tk
except ImportError:
print("pyfrc robot simulation requires python tkinter support to be installed")
raise
import queue
from ..version import __version__
from ..wpilib._wpilib import _core, _fake_time
from .ui_widgets import PanelIndicator, Tooltip, ValueWidget
class SimUI(object):
def __init__(self, manager):
'''
initializes all default values and creates
a board, waits for run() to be called
to start the board
manager - sim manager class instance
'''
self.manager = manager
self.root = tk.Tk()
self.root.wm_title("PyFRC Robot Simulator v%s" % __version__)
# setup mode switch
frame = tk.Frame(self.root)
frame.pack(side=tk.TOP, anchor=tk.W)
self._setup_widgets(frame)
self.root.resizable(width=0, height=0)
self.mode_start_tm = 0
self.text_id = None
# Set up idle_add
self.queue = queue.Queue()
# connect to the controller
self.manager.on_mode_change(lambda mode: self.idle_add(self.on_robot_mode_change, mode))
self.on_robot_mode_change(self.manager.get_mode())
self.timer_fired()
def _setup_widgets(self, frame):
top = tk.Frame(frame)
top.pack(side=tk.TOP, fill=tk.X)
bottom = tk.Frame(frame)
bottom.pack(fill=tk.X)
# status bar
self.status = tk.Label(frame, bd=1, relief=tk.SUNKEN, anchor=tk.E)
self.status.pack(fill=tk.X)
# analog
slot = tk.LabelFrame(top, text='Analog')
self.analog = []
for i in range(1, 9):
label = tk.Label(slot, text=str(i))
label.grid(column=0, row=i)
vw = ValueWidget(slot, width=120, clickable=True, minval=-10.0, maxval=10.0)
vw.grid(column=1, row=i)
# driver station default voltage
if i == 8:
vw.set_value(7.6)
self.analog.append(vw)
slot.pack(side=tk.LEFT, fill=tk.Y, padx=5)
# digital
slot = tk.LabelFrame(top, text='Digital')
label = tk.Label(slot, text='PWM')
label.grid(column=0, columnspan=2, row=0)
self.pwm = []
for i in range(1, 11):
label = tk.Label(slot, text=str(i))
label.grid(column=0, row=i)
vw = ValueWidget(slot)
vw.grid(column=1, row=i)
self.pwm.append(vw)
label = tk.Label(slot, text='Digital I/O')
label.grid(column=2, columnspan=4, row=0)
self.dio = []
for i in range(1, 8):
label = tk.Label(slot, text=str(i))
label.grid(column=2, row=i)
pi = PanelIndicator(slot, clickable=True)
pi.grid(column=3, row=i)
self.dio.append(pi)
for i in range(8, 15):
label = tk.Label(slot, text=str(i))
label.grid(column=4, row=i-7)
pi = PanelIndicator(slot, clickable=True)
pi.grid(column=5, row=i-7)
self.dio.append(pi)
label = tk.Label(slot, text='Relay')
label.grid(column=6, columnspan=2, row=0, padx=5)
self.relays = []
for i in range(1, 9):
label = tk.Label(slot, text=str(i))
label.grid(column=6, row=i, sticky=tk.E)
pi = PanelIndicator(slot)
pi.grid(column=7, row=i)
self.relays.append(pi)
slot.pack(side=tk.LEFT, fill=tk.Y, padx=5)
# CAN
self.can_slot = tk.LabelFrame(top, text='CAN')
self.can_slot.pack(side=tk.LEFT, fill=tk.Y, padx=5)
self.can = {}
# solenoid
slot = tk.LabelFrame(top, text='Solenoid')
self.solenoids = []
for i in range(1, 9):
label = tk.Label(slot, text=str(i))
label.grid(column=0, row=i)
pi = PanelIndicator(slot)
pi.grid(column=1, row=i)
self.solenoids.append(pi)
slot.pack(side=tk.LEFT, fill=tk.Y, padx=5)
# joysticks
slot = tk.LabelFrame(bottom, text='Joysticks')
self.joysticks = []
for i in range(1, 5):
axes = []
buttons = []
col = i*3
label = tk.Label(slot, text='Stick %s' % i)
label.grid(column=col, columnspan=3, row=0)
for j, t in enumerate(['X', 'Y', 'Z', 'T']):
label = tk.Label(slot, text=t)
label.grid(column=col, row=j+1)
vw = ValueWidget(slot, clickable=True, default=0.0)
vw.grid(column=col+1, row=j+1, columnspan=2)
axes.append(vw)
for j in range(1, 11):
var = tk.IntVar()
ck = tk.Checkbutton(slot, text=str(j), variable=var)
ck.grid(column=col+1+(1-j%2), row=5 + int((j - 1) / 2))
buttons.append((ck, var))
if j == 1:
Tooltip.create(ck, 'Trigger')
elif j == 2:
Tooltip.create(ck, 'Top')
self.joysticks.append((axes, buttons))
slot.pack(side=tk.LEFT, fill=tk.Y, padx=5)
# simulation control
sim = tk.LabelFrame(bottom, text='Robot')
self.state_buttons = []
self.mode = tk.IntVar()
def _set_mode():
self.manager.set_mode(self.mode.get())
button = tk.Radiobutton(sim, text='Disabled', variable=self.mode, \
value=self.manager.MODE_DISABLED, command=_set_mode)
button.pack(fill=tk.X)
self.state_buttons.append(button)
button = tk.Radiobutton(sim, text='Autonomous', variable=self.mode, \
value=self.manager.MODE_AUTONOMOUS, command=_set_mode)
button.pack(fill=tk.X)
self.state_buttons.append(button)
button = tk.Radiobutton(sim, text='Teleoperated', variable=self.mode, \
value=self.manager.MODE_OPERATOR_CONTROL, command=_set_mode)
button.pack(fill=tk.X)
self.state_buttons.append(button)
self.robot_dead = tk.Label(sim, text='Robot died!', fg='red')
sim.pack(side=tk.LEFT, fill=tk.Y)
# timing control
control = tk.LabelFrame(bottom, text='Time')
#self.
def _set_realtime():
if realtime_mode.get() == 0:
step_button.pack_forget()
step_entry.pack_forget()
self.on_pause(False)
else:
step_button.pack(fill=tk.X)
step_entry.pack()
self.on_pause(True)
realtime_mode = tk.IntVar()
button = tk.Radiobutton(control, text='Run', variable=realtime_mode,
value=0, command=_set_realtime)
button.pack(fill=tk.X)
button = tk.Radiobutton(control, text='Pause', variable=realtime_mode,
value=1, command=_set_realtime)
button.pack(fill=tk.X)
step_button = tk.Button(control, text='Step', command=self.on_step_time)
self.step_entry = tk.StringVar()
self.step_entry.set("0.025")
step_entry = tk.Entry(control, width=6, textvariable=self.step_entry)
Tooltip.create(step_button, 'Click this to increment time by the step value')
Tooltip.create(step_entry, 'Time to step (in seconds)')
realtime_mode.set(0)
control.pack(side=tk.LEFT, fill=tk.Y)
def _add_CAN(self, canId, device):
row = len(self.can)
lbl = tk.Label(self.can_slot, text=str(canId))
lbl.grid(column=0, row=row)
motor = ValueWidget(self.can_slot, default=0.0)
motor.grid(column=1, row=row)
flvar = tk.IntVar()
fl = tk.Checkbutton(self.can_slot, text='F', variable=flvar)
fl.grid(column=2, row=row)
rlvar = tk.IntVar()
rl = tk.Checkbutton(self.can_slot, text='R', variable=rlvar)
rl.grid(column=3, row=row)
Tooltip.create(motor, device.__class__.__name__)
Tooltip.create(fl, 'Forward limit switch')
Tooltip.create(rl, 'Reverse limit switch')
self.can[canId] = (motor, flvar, rlvar)
def idle_add(self, callable, *args):
'''Call this with a function as the argument, and that function
will be called on the GUI thread via an event
This function returns immediately
'''
self.queue.put((callable, args))
def __process_idle_events(self):
'''This should never be called directly, it is called via an
event, and should always be on the GUI thread'''
while True:
try:
callable, args = self.queue.get(block=False)
except queue.Empty:
break
callable(*args)
def run(self):
# and launch the thread
self.root.mainloop() # This call BLOCKS
def timer_fired(self):
'''Polling loop for events from other threads'''
self.__process_idle_events()
# grab the simulation lock, gather all of the
# wpilib objects, and display them on the screen
self.update_widgets()
# call next timer_fired (or we'll never call timer_fired again!)
delay = 100 # milliseconds
self.root.after(delay, self.timer_fired) # pause, then call timer_fired again
def update_widgets(self):
with _core._WPILibObject._sim_lock:
# TODO: support multiple slots?
# analog module
# -> TODO: voltage and value should be the same?
for i, ch in enumerate(_core.AnalogModule._channels):
analog = self.analog[i]
if ch is None:
analog.set_disabled()
else:
analog.set_disabled(False)
self._set_tooltip(analog, ch)
ch.voltage = analog.get_value()
# digital module
for i, ch in enumerate(_core.DigitalModule._io):
dio = self.dio[i]
if ch is None:
dio.set_disabled()
else:
self._set_tooltip(dio, ch)
# determine which one changed, and set the appropriate one
ret = dio.sync_value(ch.value)
if ret is not None:
ch.value = ret
for i, ch in enumerate(_core.DigitalModule._pwm):
pwm = self.pwm[i]
if ch is None:
pwm.set_disabled()
else:
self._set_tooltip(pwm, ch)
pwm.set_value(ch.value)
for i, ch in enumerate(_core.DigitalModule._relays):
relay = self.relays[i]
if ch is None:
relay.set_disabled()
else:
self._set_tooltip(relay, ch)
if not ch.on:
relay.set_off()
elif ch.forward:
relay.set_on()
else:
relay.set_back()
# solenoid
for i, ch in enumerate(_core.Solenoid._channels):
sol = self.solenoids[i]
if ch is None:
sol.set_disabled()
else:
self._set_tooltip(sol, ch)
sol.set_value(ch.value)
# CAN
# detect new devices
if len(self.can) != len(_core.CAN._devices):
existing = list(self.can.keys())
for k, v in sorted(_core.CAN._devices.items()):
if k in existing:
continue
self._add_CAN(k, v)
for k, (motor, fl, rl) in self.can.items():
can = _core.CAN._devices[k]
motor.set_value(can.value)
can.forward_ok = True if fl.get() else False
can.reverse_ok = True if rl.get() else False
# joystick/driver station
sticks = _core.DriverStation.GetInstance().sticks
stick_buttons = _core.DriverStation.GetInstance().stick_buttons
for i, (axes, buttons) in enumerate(self.joysticks):
for j, ax in enumerate(axes):
sticks[i][j] = ax.get_value()
for j, (ck, var) in enumerate(buttons):
stick_buttons[i][j] = True if var.get() else False
tm = _fake_time.FAKETIME.Get()
mode_tm = tm - self.mode_start_tm
self.status.config(text="Time: %.03f mode, %.03f total" % (mode_tm, tm))
def _set_tooltip(self, widget, obj):
if not hasattr(widget, 'has_tooltip'):
# only show the parent object, otherwise the tip is confusing
while hasattr(obj, '_parent'):
obj = obj._parent
Tooltip.create(widget, obj.__class__.__name__.strip('_'))
def on_robot_mode_change(self, mode):
self.mode.set(mode)
self.mode_start_tm = _fake_time.FAKETIME.Get()
# this is not strictly true... a robot can actually receive joystick
# commands from the driver station in disabled mode. However, most
# people aren't going to use that functionality...
controls_disabled = False if mode == self.manager.MODE_OPERATOR_CONTROL else True
state = tk.DISABLED if controls_disabled else tk.NORMAL
for axes, buttons in self.joysticks:
for axis in axes:
axis.set_disabled(disabled=controls_disabled)
for ck, var in buttons:
ck.config(state=state)
if not self.manager.is_alive():
for button in self.state_buttons:
button.config(state=tk.DISABLED)
self.robot_dead.pack()
#
# Time related callbacks
#
def on_pause(self, pause):
print("Pause")
if pause:
_fake_time.FAKETIME.Pause()
else:
_fake_time.FAKETIME.Resume()
def on_step_time(self):
val = self.step_entry.get()
try:
tm = float(self.step_entry.get())
except ValueError:
tk.messagebox.showerror("Invalid step time", "'%s' is not a valid number" % val)
return
if tm > 0:
_fake_time.FAKETIME.Resume(tm)
| StarcoderdataPython |
1634910 | import os
from flask import render_template, redirect, url_for, flash
import flask_login
import scrypt
from simple_recipes import app, login_manager
from simple_recipes.db.users import *
from simple_recipes.forms import UserForm
class User(flask_login.UserMixin):
pass
@login_manager.user_loader
def user_loader(user_name):
d = get_user(user_name=user_name)
if not d: return
user = User()
user.id = user_name
return user
@login_manager.request_loader
def request_loader(request):
if not request.form: return
user_name = request.form.get('user_name')
entered_password = request.form.get('user_pw')
if user_name and entered_password and get_user(user_name=user_name):
user = User()
user.id = user_name
if is_user_password_valid(entered_password, user_name=user_name):
user.is_authenticated = True
else: flash("Incorrect Password")
@app.route('/login/', methods=['GET', 'POST'])
def login():
form = UserForm()
form.user_name.required = True
form.current_pw.required = True
if form.validate_on_submit():
user_name = form.user_name.data
user_pw = form.current_pw.data
try:
if is_user_password_valid(user_pw, user_name=user_name):
user = User()
user.id = user_name
flask_login.login_user(user)
return redirect(url_for('account'))
else:
flash("Incorrect username or password")
except PermissionError as exc:
flash(exc)
return render_template('users/login.html', form=form)
@app.route('/logout/')
def logout():
flask_login.logout_user()
flash("Logged out successfully")
return redirect(url_for('login'))
@app.route('/account/', methods=['GET', 'POST'])
@flask_login.login_required
def account():
user_name = flask_login.current_user.id
user_data = get_user(user_name=user_name)
if user_data['user_status'] == RESET:
flash( "Your account has been reset. "
"You might want to go ahead and change your password.")
change_user_status(user_name=user_name)
return render_template('users/account.html')
@app.route('/account/change_password/', methods=['GET', 'POST'])
@flask_login.login_required
def change_password():
form = UserForm()
user_name = flask_login.current_user.id
current_pw = form.current_pw.data
form.current_pw.required = True
form.new_pw.required = True
form.new_pw_confirmation.required = True
if form.validate_on_submit():
if form.new_pw.data != form.new_pw_confirmation.data:
flash("New password must match in both fields!")
if is_user_password_valid(user_name, current_pw):
salt = os.urandom(64)
hashed = scrypt.hash(form.new_pw.data, salt)
update_user_password(hashed, salt, user_name=user_name)
flash("Password successfully changed")
return redirect(url_for('account'))
return render_template('users/change_password.html', form=form) | StarcoderdataPython |
3353269 |
import pytest
import {{ cookiecutter.package_name }} as pkg
def test_simple():
assert pkg.remove_this() == 42
| StarcoderdataPython |
148997 | #!/usr/bin/env python
# Does a kind of grep that prints lines between two given regular expressions
# (where between is inclusive)
import re
import sys
import optparse
#-- main stuff
def parsePositionalArgs (argv, parser) :
if (len (argv) <= 1) :
parser.error("must supply more args")
else :
sp = argv[0]
ep = argv[1]
if (len (argv) >= 3) :
fn = argv[2]
infile = open (fn, "r")
else :
infile = sys.stdin
return (sp, ep, infile)
def doGrep (sp, ep, infile, options):
inMatch = False
endMatches = 0
if options.endN :
endMTarget = options.endN
else :
endMTarget = 0
for line in infile:
if inMatch :
print line,
matchEnd = ep.match(line)
if matchEnd :
endMatches += 1
if endMatches >= endMTarget :
inMatch = False
else :
inMatch = sp.match(line)
if inMatch :
print line,
endMatches = 0
return
def main ():
usage = 'usage: %prog [opts] startPat endPat file'
parser = optparse.OptionParser(usage=usage)
parser.add_option("-n", "--endN", action="store", type="int", dest="endN",
help="stop only after n matches of end pattern")
options, args = parser.parse_args()
sp, ep, infile = parsePositionalArgs(args, parser)
doGrep (re.compile(sp), re.compile(ep), infile, options)
return 0
#-- go!
main()
| StarcoderdataPython |
12232 | """TilePyramid creation."""
import pytest
from shapely.geometry import Point
from shapely.ops import unary_union
from types import GeneratorType
from tilematrix import TilePyramid, snap_bounds
def test_init():
"""Initialize TilePyramids."""
for tptype in ["geodetic", "mercator"]:
assert TilePyramid(tptype)
with pytest.raises(ValueError):
TilePyramid("invalid")
with pytest.raises(ValueError):
TilePyramid()
assert hash(TilePyramid(tptype))
def test_metatiling():
"""Metatiling setting."""
for metatiling in [1, 2, 4, 8, 16]:
assert TilePyramid("geodetic", metatiling=metatiling)
try:
TilePyramid("geodetic", metatiling=5)
raise Exception()
except ValueError:
pass
def test_tile_size():
"""Tile sizes."""
for tile_size in [128, 256, 512, 1024]:
tp = TilePyramid("geodetic", tile_size=tile_size)
assert tp.tile_size == tile_size
def test_intersect():
"""Get intersecting Tiles."""
# same metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 1, 1)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# smaller metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic", metatiling=2).tile(5, 1, 1)
control = {(5, 2, 2), (5, 2, 3), (5, 3, 3), (5, 3, 2)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# bigger metatiling
tp = TilePyramid("geodetic", metatiling=2)
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 0, 0)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
intersect_tile = TilePyramid("geodetic").tile(4, 12, 31)
control = {(4, 6, 15)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# different CRSes
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("mercator").tile(5, 1, 1)
try:
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
raise Exception()
except ValueError:
pass
def test_tilepyramid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic") == TilePyramid("geodetic")
assert TilePyramid("geodetic") != TilePyramid("geodetic", metatiling=2)
assert TilePyramid("geodetic") != TilePyramid("geodetic", tile_size=512)
assert TilePyramid("mercator") == TilePyramid("mercator")
assert TilePyramid("mercator") != TilePyramid("mercator", metatiling=2)
assert TilePyramid("mercator") != TilePyramid("mercator", tile_size=512)
# epsg based
assert TilePyramid(gepsg) == TilePyramid(gepsg)
assert TilePyramid(gepsg) != TilePyramid(gepsg, metatiling=2)
assert TilePyramid(gepsg) != TilePyramid(gepsg, tile_size=512)
# proj based
assert TilePyramid(gproj) == TilePyramid(gproj)
assert TilePyramid(gproj) != TilePyramid(gproj, metatiling=2)
assert TilePyramid(gproj) != TilePyramid(gproj, tile_size=512)
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds) == TilePyramid(abounds)
assert TilePyramid(gproj) != TilePyramid(abounds)
# other type
assert TilePyramid("geodetic") != "string"
def test_grid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic").grid == TilePyramid("geodetic").grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", metatiling=2).grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", tile_size=512).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator").grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", metatiling=2).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", tile_size=512).grid
# epsg based
assert TilePyramid(gepsg).grid == TilePyramid(gepsg).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, metatiling=2).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, tile_size=512).grid
# proj based
assert TilePyramid(gproj).grid == TilePyramid(gproj).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, metatiling=2).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, tile_size=512).grid
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds).grid == TilePyramid(abounds).grid
assert TilePyramid(gproj).grid != TilePyramid(abounds).grid
def test_tile_from_xy():
tp = TilePyramid("geodetic")
zoom = 5
# point inside tile
p_in = (0.5, 0.5, zoom)
control_in = [
((5, 15, 32), "rb"),
((5, 15, 32), "lb"),
((5, 15, 32), "rt"),
((5, 15, 32), "lt"),
]
for tile_id, on_edge_use in control_in:
tile = tp.tile_from_xy(*p_in, on_edge_use=on_edge_use)
assert tile.id == tile_id
assert Point(p_in[0], p_in[1]).within(tile.bbox())
# point is on tile edge
p_edge = (0, 0, zoom)
control_edge = [
((5, 16, 32), "rb"),
((5, 16, 31), "lb"),
((5, 15, 32), "rt"),
((5, 15, 31), "lt"),
]
for tile_id, on_edge_use in control_edge:
tile = tp.tile_from_xy(*p_edge, on_edge_use=on_edge_use)
assert tile.id == tile_id
assert Point(p_edge[0], p_edge[1]).touches(tile.bbox())
with pytest.raises(ValueError):
tp.tile_from_xy(180, -90, zoom, on_edge_use="rb")
with pytest.raises(ValueError):
tp.tile_from_xy(180, -90, zoom, on_edge_use="lb")
tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="rt")
assert tile.id == (5, 31, 0)
tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="lt")
assert tile.id == (5, 31, 63)
with pytest.raises(TypeError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="lt")
with pytest.raises(TypeError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="rt")
tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="rb")
assert tile.id == (5, 0, 0)
tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="lb")
assert tile.id == (5, 0, 63)
with pytest.raises(ValueError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="invalid")
def test_tiles_from_bounds(grid_definition_proj):
# global pyramids
tp = TilePyramid("geodetic")
parent = tp.tile(8, 5, 5)
from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)])
children = set([t.id for t in parent.get_children()])
assert from_bounds == children
# non-global pyramids
tp = TilePyramid(grid_definition_proj)
parent = tp.tile(8, 0, 0)
from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)])
children = set([t.id for t in parent.get_children()])
assert from_bounds == children
def test_tiles_from_bounds_batch_by_row():
tp = TilePyramid("geodetic")
bounds = (0, 0, 90, 90)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
else:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
assert tile.row == previous_tile.row
assert tile.row == previous_row + 1
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_column():
tp = TilePyramid("geodetic")
bounds = (0, 0, 90, 90)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="column")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_column = None
tiles = 0
for tile_column in tp.tiles_from_bounds(bounds, zoom, batch_by="column"):
assert isinstance(tile_column, GeneratorType)
previous_tile = None
for tile in tile_column:
tiles += 1
if previous_column is None:
if previous_tile is not None:
assert tile.row == previous_tile.row + 1
else:
if previous_tile is not None:
assert tile.row == previous_tile.row + 1
assert tile.col == previous_tile.col
assert tile.col == previous_column + 1
previous_tile = tile
previous_column = tile.col
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_row_antimeridian_bounds():
tp = TilePyramid("geodetic")
bounds = (0, 0, 185, 95)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col > previous_tile.col
else:
if previous_tile is not None:
assert tile.col > previous_tile.col
assert tile.row == previous_tile.row
assert tile.row > previous_row
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_row_both_antimeridian_bounds():
tp = TilePyramid("geodetic")
bounds = (-185, 0, 185, 95)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
else:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
assert tile.row == previous_tile.row
assert tile.row == previous_row + 1
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_snap_bounds():
bounds = (0, 1, 2, 3)
tp = TilePyramid("geodetic")
zoom = 8
snapped = snap_bounds(bounds=bounds, tile_pyramid=tp, zoom=zoom)
control = unary_union(
[tile.bbox() for tile in tp.tiles_from_bounds(bounds, zoom)]
).bounds
assert snapped == control
pixelbuffer = 10
snapped = snap_bounds(
bounds=bounds, tile_pyramid=tp, zoom=zoom, pixelbuffer=pixelbuffer
)
control = unary_union(
[tile.bbox(pixelbuffer) for tile in tp.tiles_from_bounds(bounds, zoom)]
).bounds
assert snapped == control
def test_deprecated():
tp = TilePyramid("geodetic")
assert tp.type
assert tp.srid
assert tp.tile_x_size(0)
assert tp.tile_y_size(0)
assert tp.tile_height(0)
assert tp.tile_width(0)
| StarcoderdataPython |
166201 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import joblib
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
import tqdm
import itertools
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold, StratifiedKFold
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator
plt.interactive(True)
pd.options.display.max_columns = 15
pic_folder = 'pic/'
# version = 'hidden_size'
# version = 'test_11'
# version = 'window_size_0'
# version = 'time_step_0'
versions_list = [
# 'window_size_0',
# 'window_size_1',
# 'hidden_size_2',
# 'hidden_size_3',
# 'normalization_0',
# 'time_step_0',
# 'time_step_1',
# 'time_step_2',
# # 'time_step_1_60',
# 'february_27',
'march_1',
]
classic_versions_list = [
# 'window_size_0',
# 'time_step_1',
# 'time_step_2',
# 'time_step_1_60',
]
attention_names_dict = {
0: 'RNN',
1: 'RNN + Input Attention, Softmax',
2: 'RNN + Input Attention, Sigmoid',
4: 'RNN + Input Attention, [0, 1] clamp',
}
df_results = pd.DataFrame()
for version in versions_list:
df_results_new = pd.read_csv(f'data/df_results_{version}.csv')
df_results_new = df_results_new.loc[df_results_new['score_val'] != -1, :]
df_results_new.set_index(['time_step', 'window_size', 'batch_size', 'attention', 'hidden_size', 'normalization', 'n_repeat'], inplace=True)
# df_results_new = df_results_new[['score_test', 'score_val']]
df_results_new = df_results_new[['score_test', 'dumb_score_test', 'score_val', 'dumb_score_val']]
df_results = pd.concat([df_results, df_results_new])
for version in classic_versions_list:
df_results_classic = pd.read_csv(f'data/df_results_classic_{version}.csv')
df_results_classic = df_results_classic.loc[df_results_classic['alg_name'] != 'Random Forest 1']
df_results_classic = df_results_classic.loc[df_results_classic['time_step'] != 60]
df_results_classic = df_results_classic.loc[df_results_classic['window_size'] != 600]
df_results_classic['batch_size'] = -1
df_results_classic['hidden_size'] = -1
df_results_classic['normalization'] = -1
df_results_classic.rename(columns={'alg_name': 'attention'}, inplace=True)
df_results_classic.set_index(['time_step', 'window_size', 'batch_size', 'attention', 'hidden_size', 'normalization', 'n_repeat'], inplace=True)
df_results_classic.rename(columns={'score_val': 'score_test'}, inplace=True)
df_results_classic.rename(index={'Random Forest 0': 'Random Forest'}, inplace=True)
# df_results_classic.rename(index={'Logistic Regression': 'Random Forest'})
df_results = pd.concat([df_results, df_results_classic])
# df_results = df_results.join(df_results_classic)
def plot_dependency(df_results, labels, colors, labels_col='attention', dependency_col='hidden_size', label2text_dict=None,
xlabel='', suffix='v0', plot_errors=True):
df_grouped = df_results.groupby([labels_col, dependency_col])['score_test']
df_agg_mean = df_grouped.mean().reset_index()
df_agg_std = df_grouped.std().reset_index()
print(df_agg_mean)
print(df_agg_std)
# labels = np.unique(df_agg_mean[labels_col])
plt.figure(figsize=(13.5, 9))
# plt.figure(figsize=(20, 15))
for label, color in zip(labels, colors):
mask_label = df_agg_mean[labels_col] == label
dependency_values = df_agg_mean.loc[mask_label, dependency_col]
means4label = df_agg_mean.loc[mask_label, 'score_test'].values.ravel()
stds4label = df_agg_std.loc[mask_label, 'score_test'].values.ravel()
print(means4label)
print(stds4label)
if label2text_dict is not None:
if label in label2text_dict:
label_text = label2text_dict[label]
else:
label_text = label
else:
label_text = label
lower = means4label - stds4label
upper = means4label + stds4label
plt.plot(dependency_values, means4label, label=label_text, color=color, lw=5)
plt.scatter(dependency_values, means4label, marker='o', s=140, color=color)
if plot_errors:
plt.fill_between(dependency_values, lower, upper, alpha=0.1, color=color)
plt.tick_params(axis='both', which='major', labelsize=25, size=20)
plt.xticks()
# plt.xlabel('Window Size, s', fontsize=35)
plt.xlabel(xlabel, fontsize=35)
plt.ylabel('ROC AUC', fontsize=35)
plt.legend(fontsize=20) # 16 if not enough space # , loc='lower right')
# plt.xlim(97, 610) # UPDATE IF 60 IS ADDED!!!
plt.tight_layout()
plt.savefig(f'pic/{labels_col}_{dependency_col}_{suffix}.png')
plt.close()
def plot_dependency_time_step(df_results, labels, colors, labels_col='attention', dependency_col='hidden_size', label2text_dict=None,
xlabel='', suffix='v0', plot_errors=True):
df_grouped = df_results.groupby([labels_col, dependency_col])['score_test']
df_agg_mean = df_grouped.mean().reset_index()
df_agg_std = df_grouped.std().reset_index()
print(df_agg_mean)
print(df_agg_std)
# labels = np.unique(df_agg_mean[labels_col])
# plt.figure(figsize=(13.5, 9))
fig, ax = plt.subplots(figsize=(13.5, 9))
# plt.figure(figsize=(20, 15))
for label, color in zip(labels, colors):
mask_label = df_agg_mean[labels_col] == label
dependency_values = df_agg_mean.loc[mask_label, dependency_col]
means4label = df_agg_mean.loc[mask_label, 'score_test'].values.ravel()
stds4label = df_agg_std.loc[mask_label, 'score_test'].values.ravel()
print(means4label)
print(stds4label)
if label2text_dict is not None:
if label in label2text_dict:
label_text = label2text_dict[label]
else:
label_text = label
else:
label_text = label
lower = means4label - stds4label
upper = means4label + stds4label
mask_part_1 = dependency_values < 40
mask_part_2 = dependency_values >= 30
dependency_values_part_1 = dependency_values[mask_part_1]
means4label_part_1 = means4label[mask_part_1]
plt.plot(dependency_values_part_1, means4label_part_1, label=label_text, color=color, lw=5)
dependency_values_part_2 = dependency_values[mask_part_2]
means4label_part_2 = means4label[mask_part_2]
plt.plot(dependency_values_part_2, means4label_part_2, color=color, lw=5, linestyle='--')
plt.scatter(dependency_values, means4label, marker='o', s=140, color=color)
if plot_errors:
plt.fill_between(dependency_values, lower, upper, alpha=0.1, color=color)
ax.yaxis.set_major_locator(MultipleLocator(0.05))
ax.xaxis.set_major_locator(MultipleLocator(10))
plt.tick_params(axis='both', which='major', labelsize=25, size=20)
plt.xticks()
# plt.xlabel('Window Size, s', fontsize=35)
plt.xlabel(xlabel, fontsize=35)
plt.ylabel('ROC AUC', fontsize=35)
plt.legend(fontsize=15) # 16 if not enough space # , loc='lower right')
# plt.xlim(97, 610) # UPDATE IF 60 IS ADDED!!!
plt.tight_layout()
plt.savefig(f'pic/{labels_col}_{dependency_col}_{suffix}.png')
plt.close()
if version == 'time_step':
xlabel = 'Time Step, s'
elif version == 'hidden_size':
xlabel = 'Hidden Size'
elif version == 'window_size':
xlabel = 'Window Size, s'
elif version == 'window_size':
xlabel = 'Window Size, s'
else:
xlabel = version
colors = ['blue', 'orange', 'green', 'red', 'cyan', 'gold', 'black']
labels = [0, 1, 2, 4] + ['Logistic Regression', 'Random Forest', 'SVM']
### hidden_size
plot_dependency(df_results, labels, colors, 'attention', dependency_col='hidden_size',
label2text_dict=attention_names_dict, xlabel='Hidden Size', plot_errors=False)
### window_size
# plot_dependency(df_results, labels, colors, 'attention', dependency_col='window_size',
# label2text_dict=attention_names_dict, xlabel='Window Size, s', plot_errors=False)
### time_step
mask_not_five = [df_results.index[i][0] != 5 for i in range(len(df_results))]
plot_dependency_time_step(df_results.iloc[mask_not_five], labels, colors, 'attention', dependency_col='time_step',
label2text_dict=attention_names_dict, xlabel='Time Step, s', plot_errors=False)
### normalization
plot_dependency(df_results, labels, colors, 'attention', dependency_col='normalization',
label2text_dict=attention_names_dict, xlabel='normalization', plot_errors=False)
# df_att_norm_mean = df_results.groupby(['attention', 'normalization']).mean()
# df_att_norm_std = df_results.groupby(['attention', 'normalization']).std()
# df_att_norm_mean.rename(columns={'score_test': 'score_test_mean'}, inplace=True)
# df_att_norm_mean['score_test_std'] = df_att_norm_std['score_test']
#
# df_att_norm_mean.to_csv('data_best/df_att_norm.csv')
#
for alg_name, color in zip(alg_names_list, colors):
mask_alg = results_no_index['alg_name'] == alg_name
mean4alg = results_mean.iloc[mask_alg.nonzero()].values.ravel()
std4alg = results_std.iloc[mask_alg.nonzero()].values.ravel()
lower = mean4alg - std4alg
upper = mean4alg + std4alg
plt.plot(window_size_list, mean4alg, label=alg_name, linewidth=5, color=color)
plt.scatter(window_size_list, mean4alg, marker='o', s=140, color=color)
plt.fill_between(window_size_list, lower, upper, alpha=0.3, color=color)
plt.tick_params(axis='both', which='major', labelsize=30)
plt.xticks()
# plt.xlabel('Window Size, s', fontsize=35)
plt.xlabel('Window Size, s', fontsize=35)
plt.ylabel('ROC AUC', fontsize=35)
plt.legend(fontsize=32)
# plt.xlim(97, 610) # UPDATE IF 60 IS ADDED!!!
plt.tight_layout()
plt.savefig('pic/classical_ml_window_size_v0.png')
df_results.groupby('time_step').mean()
df_results.groupby('window_size').mean()
df_results.groupby('batch_size').mean()
df_results.groupby('hidden_size').mean()
df_results.groupby('attention').mean() # For RNN 4 is better than 0!!!
df_results.groupby('normalization').mean() # For RNN 4 is better than 0!!!
df_results.groupby(['time_step', 'window_size']).mean()
df_results.groupby(['time_step', 'batch_size']).mean()
df_results.groupby(['time_step', 'hidden_size']).mean()
df_results.groupby(['window_size', 'batch_size']).mean()
df_results.groupby(['window_size', 'hidden_size']).mean()
df_results.groupby(['batch_size', 'hidden_size']).mean()
df_results.groupby(['attention', 'time_step']).mean()
df_results.groupby(['attention', 'window_size']).mean()
df_results.groupby(['attention', 'hidden_size']).mean()
df_results.groupby(['attention', 'hidden_size', 'window_size', 'time_step']).mean()
df_results.groupby(['time_step', 'window_size', 'hidden_size']).mean()
"""
Inference after v2:
1. time_step 20 is probably better
2. window_size 300 should be used. Larger values have better scores, but the system is not so flexible
3. batch_size can be any from 8 to 256. Let's set it to 64
4. hidden size 32 is the best. 64 is slightly worse, 16 is worse
"""
"""
Inference after v1 or v0:
1. Anyway, timestep 30 looks too much. Timestep 5 actually looks fine, but it requires a lot of training.
2. window_size 120 is too short and noisy. 600 has the best score, but the target become too trivial.
3. batch_size 2 isn't a good idea. 16 and 128 aren't very distinguishable, the optimal can be from 8 to inf.
4. The best hidden size is 32, the worst is 2, 8 is ok. More is better till some limit. Maybe to check 16, 64, 128(too much?), ...
5. time_step 10 and window_size 300 is ok.
6. Shorter time_step requires higher batch_size
7. Probably higher time_step require lesser hidden_size
"""
time_step_list = [10, 20] # 10 is already tested
window_size_list = [120, 180, 300, 600]
alg_names_list = ['Logistic Regression', 'Random Forest', 'SVM']
version = 'v0'
df_results = pd.read_csv(f'data/df_results_classic_{version}.csv')
df_results.set_index(['window_size', 'alg_name', 'n_repeat'], inplace=True)
def mean_std(x):
# return pd.Series([x.mean(), x.std()], index=['mean', 'std']).T
return pd.DataFrame({
'mean': x.mean(),
'std': x.std(),
})
# df_results.groupby(['window_size', 'alg_name']).apply(lambda x: mean_std(x))
results_mean = df_results.groupby(['window_size', 'alg_name']).apply(lambda x: x.mean())
results_std = df_results.groupby(['window_size', 'alg_name']).apply(lambda x: x.std())
results_no_index = results_std.reset_index().drop(columns='score_val')
colors = ['blue', 'orange', 'green']
plt.interactive(True)
plt.close()
plt.figure(figsize=(12, 9))
for alg_name, color in zip(alg_names_list, colors):
mask_alg = results_no_index['alg_name'] == alg_name
mean4alg = results_mean.iloc[mask_alg.nonzero()].values.ravel()
std4alg = results_std.iloc[mask_alg.nonzero()].values.ravel()
lower = mean4alg - std4alg
upper = mean4alg + std4alg
plt.plot(window_size_list, mean4alg, label=alg_name, linewidth=5, color=color)
plt.scatter(window_size_list, mean4alg, marker='o', s=140, color=color)
plt.fill_between(window_size_list, lower, upper, alpha=0.3, color=color)
plt.tick_params(axis='both', which='major', labelsize=30)
plt.xticks()
# plt.xlabel('Window Size, s', fontsize=35)
plt.xlabel('Window Size, s', fontsize=35)
plt.ylabel('ROC AUC', fontsize=35)
plt.legend(fontsize=32)
# plt.xlim(97, 610) # UPDATE IF 60 IS ADDED!!!
plt.tight_layout()
plt.savefig('pic/classical_ml_window_size_v0.png')
##### Loading from separate series files
# filenames = os.listdir('data')
# def check_relevance(filename):
# cond_1 = filename[-4:] == '.csv'
# cond_2 = filename[:7] == 'series_'
# cond_3 = filename[-6:-4] == version
# return cond_1 and cond_2 and cond_3
#
# relevant_series = [filename for filename in filenames if check_relevance(filename)]
#
# df_results = pd.DataFrame()
#
# for series_path in relevant_series:
# series2append = pd.read_csv(f'data/{series_path}')
# index_names = ['time_step', 'window_size', 'batch_size', 'hidden_size']
# series2append.columns = index_names + list(series2append.columns[4:])
# series2append.set_index(index_names, inplace=True)
# df_results = df_results.append(series2append)
#
# df_agg_mean = df_results.groupby(['attention', 'hidden_size'])['score_test'].mean().reset_index()
# df_agg_std = df_results.groupby(['attention', 'hidden_size'])['score_test'].std().reset_index()
# attention_list = [0, 1, 2]
# hidden_size_list = [8, 32, 64]
#
# plt.close()
# for attention in attention_list:
# mask = df_agg_mean['attention'] == attention
# hidden_sizes = df_agg_mean.loc[mask, 'hidden_size']
# means = df_agg_mean.loc[mask, 'score_test']
# stds = df_agg_std.loc[mask, 'score_test']
# plt.plot(hidden_sizes, means, label=attention_names_dict[attention])
#
#
# plt.legend()
# plt.tight_layout() | StarcoderdataPython |
179293 | <gh_stars>0
""" Problem: sWAP cASE || Task:
You are given a string and your task is to swap cases. In other words, convert all lowercase letters to uppercase letters and vice versa.
Created on Wed Oct 10 10:50:38 2018
@author: nagiAI
"""
import string
def swap_case(s):
result = ""
for i in s:
if (ord(i) >= 97) and (ord(i) <= 122):
result += i.upper()
elif (ord(i) >= 65) and (ord(i) <= 90):
result += i.lower()
else:
result += i
return result
# Another way
def swap_case_new(s):
result = ""
for i in s:
if (i in string.ascii_lowercase):
result += i.upper()
elif (i in string.ascii_uppercase):
result += i.lower()
else:
result += i
return result
# Uncomment for testing
#print(swap_case("Pythonist 2 "))
| StarcoderdataPython |
3302242 | import math
from fractions import Fraction
globals()["\x5f\x5f\x6e\x61\x6d\x65\x5f\x5f"] = "\x4a\x73\x6b\x4d\x61\x66\x73\x2e\x70\x79"
__description__ = "Jsk Troll's Python toolkit for easy maths ;) "
if not __name__ in __file__:
rename(__file__, __name__)
message = "Try again."
raise Exception(message)
def sign(n):
req = [int, float, str]
if type(n) not in req:
raise TypeError("{} type not allowed.".format(type(n)))
n=float(n)
if n>=0:
n_sign='+'
else:
n_sign='-'
return n_sign
def signed(n):
req = [float, int, str]
if type(n) not in req:
raise TypeError("{} type not allowed.".format(type(n)))
if isint(n):
unsigned_n = int(abs(float(n)))
else:
unsigned_n = abs(float(n))
signed_n = "{} {}".format(sign(n), unsigned_n) #sign(n), str(abs(n))
return signed_n
def isint(n):
req = [int, float, str, Fraction]
if type(n) not in req:
raise TypeError("{} type not allowed.".format(type(n)))
if float(n)>=0:
result = math.ceil(float(n)) == int(n)
else:
result = math.floor(float(n)) == int(n)
return result
def intify(n):
req = [int, float, str, Fraction]
if type(n) not in req:
raise TypeError("{} type not allowed.".format(type(n)))
n = float(n)
if isint(n):
return int(n)
return n
def ikhtizal(bast, maqam, form="default"):
req = [int, float, str, Fraction, "default"]
for n in [bast, maqam, form]:
if type(n) not in req:
raise TypeError("{} type not allowed.".format(type(n)))
l7asil = bast / maqam
bast = int(bast)
maqam = int(maqam)
pgcd = math.gcd(bast, maqam)
bast = int(bast / pgcd)
maqam = int(maqam / pgcd)
if form=="default":
result = Fraction(bast, maqam)
result = intify(result)
elif form==str:
result = str(Fraction(bast, maqam))
elif form==float:
result = float(Fraction(bast, maqam))
elif form==int:
result = int(Fraction(bast, maqam))
elif form==Fraction:
result = Fraction(bast, maqam)
return result
def solve1():
print("حل معادلة من الدرجة الأولى من الشكل ax=b")
# passing variables
a = input("a = ")
b = input("b = ")
# equation form
equation = f"{a}x = {b}"
print("\n" + equation)
# solving
print(f'x = {b}/{a}\n')
x = ikhtizal(float(b), float(a))
print(f"حل المعادلة {equation} هو :")
print(f"x = ", end="")
return x
def solve2():
print("حل معادلة من الدرجة الثانية من الشكل ax²+bx+c=0")
# passing variables
a = input("a = ")
if float(a) == 0:
raise ValueError("a≠0")
b = input("b = ")
c = input("c = ")
# equation form
equation = f"{a}x² {signed(b)}x {signed(c)} = 0"
print("\n" + equation)
# converting
a = intify(a)
b = intify(b)
if b>=0:
dB = b
else:
dB = "({0})".format(b)
c = intify(c)
# solving
print("\tحساب المميز ∆ : ")
print("∆ = b² - 4ac\n")
print(f"∆ = {b}² - 4({a})({c})")
print(f"∆ = {b**2} {signed((-4) * a * c)}")
delta = (b**2) - (4*a*c)
delta = intify(delta)
print(f"∆ = {delta}")
if delta>0: # 2 different solutions
print(f"بما أن 0<∆ فإن المعادلة {equation} تقبل حلين مختلفين وهما : \n")
rootDelta = intify(math.sqrt(delta))
lbast1 = intify(-b-rootDelta)
lbast2 = intify(-b+rootDelta)
x = ( ikhtizal(lbast1, 2*a), ikhtizal(lbast2, 2*a) ) #double "()" to make it a tuple array
print(f"x1 = (-b-√∆)/2a = -{dB}-√{delta}/2({a}) = {-b}-{rootDelta}/{2*a} = {lbast1}/{2*a} = {x[0]}")
print(f"x2 = (-b+√∆)/2a = -{dB}+√{delta}/2({a}) = {-b}+{rootDelta}/{2*a} = {lbast2}/{2*a} = {x[1]}\n")
print("S = ", end="")
return x
elif delta==0: # 2 doubled solutions
print(f"بما أن 0=∆ فإن المعادلة {equation} تقبل حل مضاعف وهو : \n")
x = ikhtizal(-b, 2*a)
print(f"x1 = x2 = -b/2a = -{dB}/2({a}) = {-b}/{2*a} = {x}")
print("x1 = x2 = ", end="")
return x
else: # No solutions
print(f"بما أن 0>∆ فإن المعادلة {equation} لا تقبل حلول .\n")
print("S = ", end="")
return None
def solve3():
pass
def solve():
passage = "حل معادلة من الدرجة : (1-3) \n"
degree = input(passage)
if degree == '1':
print(solve1())
elif degree == '2':
print(solve2())
elif degree == '3':
print("unfinished project")
return None
print(solve3())
if __name__=="__main__":
solve()
# usage : simply click on this file to run it or import it to your script
| StarcoderdataPython |
133413 | # pylint: disable=no-member, missing-docstring
from unittest import TestCase
from pytest import mark
from celery import shared_task
from django.test.utils import override_settings
from edx_django_utils.cache import RequestCache
@mark.django_db
class TestClearRequestCache(TestCase):
"""
Tests _clear_request_cache is called after celery task is run.
"""
def _get_cache(self):
return RequestCache("TestClearRequestCache")
@shared_task
def _dummy_task(self):
""" A task that adds stuff to the request cache. """
self._get_cache().set("cache_key", "blah blah")
@override_settings(CLEAR_REQUEST_CACHE_ON_TASK_COMPLETION=True)
def test_clear_cache_celery(self):
self._dummy_task.apply(args=(self,)).get()
assert not self._get_cache().get_cached_response('cache_key').is_found
| StarcoderdataPython |
162674 | # Generated from JavaLexer.g4 by ANTLR 4.9.3
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u0082")
buf.write("\u045a\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3")
buf.write("\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t")
buf.write("\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3")
buf.write("\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17")
buf.write("\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21")
buf.write("\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\26")
buf.write("\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34")
buf.write("\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36")
buf.write("\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3")
buf.write("\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$")
buf.write("\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3")
buf.write("\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3(\3(")
buf.write("\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3")
buf.write("+\3+\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3-\3-\3-\3-\3")
buf.write("-\3-\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3/\3/\3")
buf.write("/\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\62\3")
buf.write("\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63")
buf.write("\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65")
buf.write("\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38")
buf.write("\38\38\38\38\38\39\39\39\3:\3:\3:\3:\3:\3;\3;\3;\3;\3")
buf.write(";\3;\3;\3;\3;\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3=\3=\3")
buf.write("=\3=\3=\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3")
buf.write("@\3@\3A\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3B\3B\3B\3B\3C\3")
buf.write("C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3D\3D\3D\5D\u02d7\nD\3D\6")
buf.write("D\u02da\nD\rD\16D\u02db\3D\5D\u02df\nD\5D\u02e1\nD\3D")
buf.write("\5D\u02e4\nD\3E\3E\3E\3E\7E\u02ea\nE\fE\16E\u02ed\13E")
buf.write("\3E\5E\u02f0\nE\3E\5E\u02f3\nE\3F\3F\7F\u02f7\nF\fF\16")
buf.write("F\u02fa\13F\3F\3F\7F\u02fe\nF\fF\16F\u0301\13F\3F\5F\u0304")
buf.write("\nF\3F\5F\u0307\nF\3G\3G\3G\3G\7G\u030d\nG\fG\16G\u0310")
buf.write("\13G\3G\5G\u0313\nG\3G\5G\u0316\nG\3H\3H\3H\5H\u031b\n")
buf.write("H\3H\3H\5H\u031f\nH\3H\5H\u0322\nH\3H\5H\u0325\nH\3H\3")
buf.write("H\3H\5H\u032a\nH\3H\5H\u032d\nH\5H\u032f\nH\3I\3I\3I\3")
buf.write("I\5I\u0335\nI\3I\5I\u0338\nI\3I\3I\5I\u033c\nI\3I\3I\5")
buf.write("I\u0340\nI\3I\3I\5I\u0344\nI\3J\3J\3J\3J\3J\3J\3J\3J\3")
buf.write("J\5J\u034f\nJ\3K\3K\3K\5K\u0354\nK\3K\3K\3L\3L\3L\7L\u035b")
buf.write("\nL\fL\16L\u035e\13L\3L\3L\3M\3M\3M\3M\3M\7M\u0367\nM")
buf.write("\fM\16M\u036a\13M\3M\3M\3M\7M\u036f\nM\fM\16M\u0372\13")
buf.write("M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3O\3O\3P\3P\3Q\3Q\3R\3R\3")
buf.write("S\3S\3T\3T\3U\3U\3V\3V\3W\3W\3X\3X\3Y\3Y\3Z\3Z\3[\3[\3")
buf.write("\\\3\\\3]\3]\3^\3^\3_\3_\3_\3`\3`\3`\3a\3a\3a\3b\3b\3")
buf.write("b\3c\3c\3c\3d\3d\3d\3e\3e\3e\3f\3f\3f\3g\3g\3h\3h\3i\3")
buf.write("i\3j\3j\3k\3k\3l\3l\3m\3m\3n\3n\3o\3o\3o\3p\3p\3p\3q\3")
buf.write("q\3q\3r\3r\3r\3s\3s\3s\3t\3t\3t\3u\3u\3u\3v\3v\3v\3w\3")
buf.write("w\3w\3w\3x\3x\3x\3x\3y\3y\3y\3y\3y\3z\3z\3z\3{\3{\3{\3")
buf.write("|\3|\3}\3}\3}\3}\3~\6~\u03f7\n~\r~\16~\u03f8\3~\3~\3\177")
buf.write("\3\177\3\177\3\177\7\177\u0401\n\177\f\177\16\177\u0404")
buf.write("\13\177\3\177\3\177\3\177\3\177\3\177\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0080\7\u0080\u040f\n\u0080\f\u0080\16\u0080")
buf.write("\u0412\13\u0080\3\u0080\3\u0080\3\u0081\3\u0081\7\u0081")
buf.write("\u0418\n\u0081\f\u0081\16\u0081\u041b\13\u0081\3\u0082")
buf.write("\3\u0082\5\u0082\u041f\n\u0082\3\u0082\3\u0082\3\u0083")
buf.write("\3\u0083\3\u0083\3\u0083\5\u0083\u0427\n\u0083\3\u0083")
buf.write("\5\u0083\u042a\n\u0083\3\u0083\3\u0083\3\u0083\6\u0083")
buf.write("\u042f\n\u0083\r\u0083\16\u0083\u0430\3\u0083\3\u0083")
buf.write("\3\u0083\3\u0083\3\u0083\5\u0083\u0438\n\u0083\3\u0084")
buf.write("\3\u0084\3\u0084\7\u0084\u043d\n\u0084\f\u0084\16\u0084")
buf.write("\u0440\13\u0084\3\u0084\5\u0084\u0443\n\u0084\3\u0085")
buf.write("\3\u0085\3\u0086\3\u0086\7\u0086\u0449\n\u0086\f\u0086")
buf.write("\16\u0086\u044c\13\u0086\3\u0086\5\u0086\u044f\n\u0086")
buf.write("\3\u0087\3\u0087\5\u0087\u0453\n\u0087\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\5\u0088\u0459\n\u0088\4\u0370\u0402\2")
buf.write("\u0089\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27")
buf.write("\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30")
buf.write("/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'")
buf.write("M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q")
buf.write(":s;u<w=y>{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089F")
buf.write("\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099")
buf.write("N\u009bO\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9")
buf.write("V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9")
buf.write("^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9")
buf.write("f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9")
buf.write("n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9")
buf.write("v\u00ebw\u00edx\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9")
buf.write("~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101\u0082\u0103")
buf.write("\2\u0105\2\u0107\2\u0109\2\u010b\2\u010d\2\u010f\2\3\2")
buf.write("\35\3\2\63;\4\2NNnn\4\2ZZzz\5\2\62;CHch\6\2\62;CHaach")
buf.write("\3\2\629\4\2\629aa\4\2DDdd\3\2\62\63\4\2\62\63aa\6\2F")
buf.write("FHHffhh\4\2RRrr\4\2--//\6\2\f\f\17\17))^^\6\2\f\f\17\17")
buf.write("$$^^\4\2\13\13\"\"\4\2\f\f\17\17\5\2\13\f\16\17\"\"\4")
buf.write("\2GGgg\n\2$$))^^ddhhppttvv\3\2\62\65\3\2\62;\4\2\62;a")
buf.write("a\6\2&&C\\aac|\4\2\2\u0081\ud802\udc01\3\2\ud802\udc01")
buf.write("\3\2\udc02\ue001\2\u0486\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3")
buf.write("\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2")
buf.write("\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2")
buf.write("\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2")
buf.write("!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2")
buf.write("\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3")
buf.write("\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2")
buf.write("\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2")
buf.write("\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2")
buf.write("\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3")
buf.write("\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c")
buf.write("\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2")
buf.write("m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2")
buf.write("\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2")
buf.write("\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2")
buf.write("\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d")
buf.write("\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2")
buf.write("\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b")
buf.write("\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2")
buf.write("\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9")
buf.write("\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2")
buf.write("\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7")
buf.write("\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2")
buf.write("\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5")
buf.write("\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2")
buf.write("\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3")
buf.write("\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2")
buf.write("\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1")
buf.write("\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2")
buf.write("\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef")
buf.write("\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2")
buf.write("\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd")
buf.write("\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\3\u0111\3\2\2")
buf.write("\2\5\u011a\3\2\2\2\7\u0121\3\2\2\2\t\u0129\3\2\2\2\13")
buf.write("\u012f\3\2\2\2\r\u0134\3\2\2\2\17\u0139\3\2\2\2\21\u013f")
buf.write("\3\2\2\2\23\u0144\3\2\2\2\25\u014a\3\2\2\2\27\u0150\3")
buf.write("\2\2\2\31\u0159\3\2\2\2\33\u0161\3\2\2\2\35\u0164\3\2")
buf.write("\2\2\37\u016b\3\2\2\2!\u0170\3\2\2\2#\u0175\3\2\2\2%\u017d")
buf.write("\3\2\2\2\'\u0183\3\2\2\2)\u018b\3\2\2\2+\u0191\3\2\2\2")
buf.write("-\u0195\3\2\2\2/\u0198\3\2\2\2\61\u019d\3\2\2\2\63\u01a8")
buf.write("\3\2\2\2\65\u01af\3\2\2\2\67\u01ba\3\2\2\29\u01be\3\2")
buf.write("\2\2;\u01c8\3\2\2\2=\u01cd\3\2\2\2?\u01d4\3\2\2\2A\u01d8")
buf.write("\3\2\2\2C\u01e0\3\2\2\2E\u01e8\3\2\2\2G\u01f2\3\2\2\2")
buf.write("I\u01f9\3\2\2\2K\u0200\3\2\2\2M\u0206\3\2\2\2O\u020d\3")
buf.write("\2\2\2Q\u0216\3\2\2\2S\u021c\3\2\2\2U\u0223\3\2\2\2W\u0230")
buf.write("\3\2\2\2Y\u0235\3\2\2\2[\u023b\3\2\2\2]\u0242\3\2\2\2")
buf.write("_\u024c\3\2\2\2a\u0250\3\2\2\2c\u0255\3\2\2\2e\u025e\3")
buf.write("\2\2\2g\u0264\3\2\2\2i\u026b\3\2\2\2k\u0270\3\2\2\2m\u0279")
buf.write("\3\2\2\2o\u0281\3\2\2\2q\u0287\3\2\2\2s\u028a\3\2\2\2")
buf.write("u\u028f\3\2\2\2w\u0298\3\2\2\2y\u029d\3\2\2\2{\u02a8\3")
buf.write("\2\2\2}\u02ac\3\2\2\2\177\u02b2\3\2\2\2\u0081\u02b9\3")
buf.write("\2\2\2\u0083\u02c0\3\2\2\2\u0085\u02c8\3\2\2\2\u0087\u02e0")
buf.write("\3\2\2\2\u0089\u02e5\3\2\2\2\u008b\u02f4\3\2\2\2\u008d")
buf.write("\u0308\3\2\2\2\u008f\u032e\3\2\2\2\u0091\u0330\3\2\2\2")
buf.write("\u0093\u034e\3\2\2\2\u0095\u0350\3\2\2\2\u0097\u0357\3")
buf.write("\2\2\2\u0099\u0361\3\2\2\2\u009b\u0377\3\2\2\2\u009d\u037c")
buf.write("\3\2\2\2\u009f\u037e\3\2\2\2\u00a1\u0380\3\2\2\2\u00a3")
buf.write("\u0382\3\2\2\2\u00a5\u0384\3\2\2\2\u00a7\u0386\3\2\2\2")
buf.write("\u00a9\u0388\3\2\2\2\u00ab\u038a\3\2\2\2\u00ad\u038c\3")
buf.write("\2\2\2\u00af\u038e\3\2\2\2\u00b1\u0390\3\2\2\2\u00b3\u0392")
buf.write("\3\2\2\2\u00b5\u0394\3\2\2\2\u00b7\u0396\3\2\2\2\u00b9")
buf.write("\u0398\3\2\2\2\u00bb\u039a\3\2\2\2\u00bd\u039c\3\2\2\2")
buf.write("\u00bf\u039f\3\2\2\2\u00c1\u03a2\3\2\2\2\u00c3\u03a5\3")
buf.write("\2\2\2\u00c5\u03a8\3\2\2\2\u00c7\u03ab\3\2\2\2\u00c9\u03ae")
buf.write("\3\2\2\2\u00cb\u03b1\3\2\2\2\u00cd\u03b4\3\2\2\2\u00cf")
buf.write("\u03b6\3\2\2\2\u00d1\u03b8\3\2\2\2\u00d3\u03ba\3\2\2\2")
buf.write("\u00d5\u03bc\3\2\2\2\u00d7\u03be\3\2\2\2\u00d9\u03c0\3")
buf.write("\2\2\2\u00db\u03c2\3\2\2\2\u00dd\u03c4\3\2\2\2\u00df\u03c7")
buf.write("\3\2\2\2\u00e1\u03ca\3\2\2\2\u00e3\u03cd\3\2\2\2\u00e5")
buf.write("\u03d0\3\2\2\2\u00e7\u03d3\3\2\2\2\u00e9\u03d6\3\2\2\2")
buf.write("\u00eb\u03d9\3\2\2\2\u00ed\u03dc\3\2\2\2\u00ef\u03e0\3")
buf.write("\2\2\2\u00f1\u03e4\3\2\2\2\u00f3\u03e9\3\2\2\2\u00f5\u03ec")
buf.write("\3\2\2\2\u00f7\u03ef\3\2\2\2\u00f9\u03f1\3\2\2\2\u00fb")
buf.write("\u03f6\3\2\2\2\u00fd\u03fc\3\2\2\2\u00ff\u040a\3\2\2\2")
buf.write("\u0101\u0415\3\2\2\2\u0103\u041c\3\2\2\2\u0105\u0437\3")
buf.write("\2\2\2\u0107\u0439\3\2\2\2\u0109\u0444\3\2\2\2\u010b\u0446")
buf.write("\3\2\2\2\u010d\u0452\3\2\2\2\u010f\u0458\3\2\2\2\u0111")
buf.write("\u0112\7c\2\2\u0112\u0113\7d\2\2\u0113\u0114\7u\2\2\u0114")
buf.write("\u0115\7v\2\2\u0115\u0116\7t\2\2\u0116\u0117\7c\2\2\u0117")
buf.write("\u0118\7e\2\2\u0118\u0119\7v\2\2\u0119\4\3\2\2\2\u011a")
buf.write("\u011b\7c\2\2\u011b\u011c\7u\2\2\u011c\u011d\7u\2\2\u011d")
buf.write("\u011e\7g\2\2\u011e\u011f\7t\2\2\u011f\u0120\7v\2\2\u0120")
buf.write("\6\3\2\2\2\u0121\u0122\7d\2\2\u0122\u0123\7q\2\2\u0123")
buf.write("\u0124\7q\2\2\u0124\u0125\7n\2\2\u0125\u0126\7g\2\2\u0126")
buf.write("\u0127\7c\2\2\u0127\u0128\7p\2\2\u0128\b\3\2\2\2\u0129")
buf.write("\u012a\7d\2\2\u012a\u012b\7t\2\2\u012b\u012c\7g\2\2\u012c")
buf.write("\u012d\7c\2\2\u012d\u012e\7m\2\2\u012e\n\3\2\2\2\u012f")
buf.write("\u0130\7d\2\2\u0130\u0131\7{\2\2\u0131\u0132\7v\2\2\u0132")
buf.write("\u0133\7g\2\2\u0133\f\3\2\2\2\u0134\u0135\7e\2\2\u0135")
buf.write("\u0136\7c\2\2\u0136\u0137\7u\2\2\u0137\u0138\7g\2\2\u0138")
buf.write("\16\3\2\2\2\u0139\u013a\7e\2\2\u013a\u013b\7c\2\2\u013b")
buf.write("\u013c\7v\2\2\u013c\u013d\7e\2\2\u013d\u013e\7j\2\2\u013e")
buf.write("\20\3\2\2\2\u013f\u0140\7e\2\2\u0140\u0141\7j\2\2\u0141")
buf.write("\u0142\7c\2\2\u0142\u0143\7t\2\2\u0143\22\3\2\2\2\u0144")
buf.write("\u0145\7e\2\2\u0145\u0146\7n\2\2\u0146\u0147\7c\2\2\u0147")
buf.write("\u0148\7u\2\2\u0148\u0149\7u\2\2\u0149\24\3\2\2\2\u014a")
buf.write("\u014b\7e\2\2\u014b\u014c\7q\2\2\u014c\u014d\7p\2\2\u014d")
buf.write("\u014e\7u\2\2\u014e\u014f\7v\2\2\u014f\26\3\2\2\2\u0150")
buf.write("\u0151\7e\2\2\u0151\u0152\7q\2\2\u0152\u0153\7p\2\2\u0153")
buf.write("\u0154\7v\2\2\u0154\u0155\7k\2\2\u0155\u0156\7p\2\2\u0156")
buf.write("\u0157\7w\2\2\u0157\u0158\7g\2\2\u0158\30\3\2\2\2\u0159")
buf.write("\u015a\7f\2\2\u015a\u015b\7g\2\2\u015b\u015c\7h\2\2\u015c")
buf.write("\u015d\7c\2\2\u015d\u015e\7w\2\2\u015e\u015f\7n\2\2\u015f")
buf.write("\u0160\7v\2\2\u0160\32\3\2\2\2\u0161\u0162\7f\2\2\u0162")
buf.write("\u0163\7q\2\2\u0163\34\3\2\2\2\u0164\u0165\7f\2\2\u0165")
buf.write("\u0166\7q\2\2\u0166\u0167\7w\2\2\u0167\u0168\7d\2\2\u0168")
buf.write("\u0169\7n\2\2\u0169\u016a\7g\2\2\u016a\36\3\2\2\2\u016b")
buf.write("\u016c\7g\2\2\u016c\u016d\7n\2\2\u016d\u016e\7u\2\2\u016e")
buf.write("\u016f\7g\2\2\u016f \3\2\2\2\u0170\u0171\7g\2\2\u0171")
buf.write("\u0172\7p\2\2\u0172\u0173\7w\2\2\u0173\u0174\7o\2\2\u0174")
buf.write("\"\3\2\2\2\u0175\u0176\7g\2\2\u0176\u0177\7z\2\2\u0177")
buf.write("\u0178\7v\2\2\u0178\u0179\7g\2\2\u0179\u017a\7p\2\2\u017a")
buf.write("\u017b\7f\2\2\u017b\u017c\7u\2\2\u017c$\3\2\2\2\u017d")
buf.write("\u017e\7h\2\2\u017e\u017f\7k\2\2\u017f\u0180\7p\2\2\u0180")
buf.write("\u0181\7c\2\2\u0181\u0182\7n\2\2\u0182&\3\2\2\2\u0183")
buf.write("\u0184\7h\2\2\u0184\u0185\7k\2\2\u0185\u0186\7p\2\2\u0186")
buf.write("\u0187\7c\2\2\u0187\u0188\7n\2\2\u0188\u0189\7n\2\2\u0189")
buf.write("\u018a\7{\2\2\u018a(\3\2\2\2\u018b\u018c\7h\2\2\u018c")
buf.write("\u018d\7n\2\2\u018d\u018e\7q\2\2\u018e\u018f\7c\2\2\u018f")
buf.write("\u0190\7v\2\2\u0190*\3\2\2\2\u0191\u0192\7h\2\2\u0192")
buf.write("\u0193\7q\2\2\u0193\u0194\7t\2\2\u0194,\3\2\2\2\u0195")
buf.write("\u0196\7k\2\2\u0196\u0197\7h\2\2\u0197.\3\2\2\2\u0198")
buf.write("\u0199\7i\2\2\u0199\u019a\7q\2\2\u019a\u019b\7v\2\2\u019b")
buf.write("\u019c\7q\2\2\u019c\60\3\2\2\2\u019d\u019e\7k\2\2\u019e")
buf.write("\u019f\7o\2\2\u019f\u01a0\7r\2\2\u01a0\u01a1\7n\2\2\u01a1")
buf.write("\u01a2\7g\2\2\u01a2\u01a3\7o\2\2\u01a3\u01a4\7g\2\2\u01a4")
buf.write("\u01a5\7p\2\2\u01a5\u01a6\7v\2\2\u01a6\u01a7\7u\2\2\u01a7")
buf.write("\62\3\2\2\2\u01a8\u01a9\7k\2\2\u01a9\u01aa\7o\2\2\u01aa")
buf.write("\u01ab\7r\2\2\u01ab\u01ac\7q\2\2\u01ac\u01ad\7t\2\2\u01ad")
buf.write("\u01ae\7v\2\2\u01ae\64\3\2\2\2\u01af\u01b0\7k\2\2\u01b0")
buf.write("\u01b1\7p\2\2\u01b1\u01b2\7u\2\2\u01b2\u01b3\7v\2\2\u01b3")
buf.write("\u01b4\7c\2\2\u01b4\u01b5\7p\2\2\u01b5\u01b6\7e\2\2\u01b6")
buf.write("\u01b7\7g\2\2\u01b7\u01b8\7q\2\2\u01b8\u01b9\7h\2\2\u01b9")
buf.write("\66\3\2\2\2\u01ba\u01bb\7k\2\2\u01bb\u01bc\7p\2\2\u01bc")
buf.write("\u01bd\7v\2\2\u01bd8\3\2\2\2\u01be\u01bf\7k\2\2\u01bf")
buf.write("\u01c0\7p\2\2\u01c0\u01c1\7v\2\2\u01c1\u01c2\7g\2\2\u01c2")
buf.write("\u01c3\7t\2\2\u01c3\u01c4\7h\2\2\u01c4\u01c5\7c\2\2\u01c5")
buf.write("\u01c6\7e\2\2\u01c6\u01c7\7g\2\2\u01c7:\3\2\2\2\u01c8")
buf.write("\u01c9\7n\2\2\u01c9\u01ca\7q\2\2\u01ca\u01cb\7p\2\2\u01cb")
buf.write("\u01cc\7i\2\2\u01cc<\3\2\2\2\u01cd\u01ce\7p\2\2\u01ce")
buf.write("\u01cf\7c\2\2\u01cf\u01d0\7v\2\2\u01d0\u01d1\7k\2\2\u01d1")
buf.write("\u01d2\7x\2\2\u01d2\u01d3\7g\2\2\u01d3>\3\2\2\2\u01d4")
buf.write("\u01d5\7p\2\2\u01d5\u01d6\7g\2\2\u01d6\u01d7\7y\2\2\u01d7")
buf.write("@\3\2\2\2\u01d8\u01d9\7r\2\2\u01d9\u01da\7c\2\2\u01da")
buf.write("\u01db\7e\2\2\u01db\u01dc\7m\2\2\u01dc\u01dd\7c\2\2\u01dd")
buf.write("\u01de\7i\2\2\u01de\u01df\7g\2\2\u01dfB\3\2\2\2\u01e0")
buf.write("\u01e1\7r\2\2\u01e1\u01e2\7t\2\2\u01e2\u01e3\7k\2\2\u01e3")
buf.write("\u01e4\7x\2\2\u01e4\u01e5\7c\2\2\u01e5\u01e6\7v\2\2\u01e6")
buf.write("\u01e7\7g\2\2\u01e7D\3\2\2\2\u01e8\u01e9\7r\2\2\u01e9")
buf.write("\u01ea\7t\2\2\u01ea\u01eb\7q\2\2\u01eb\u01ec\7v\2\2\u01ec")
buf.write("\u01ed\7g\2\2\u01ed\u01ee\7e\2\2\u01ee\u01ef\7v\2\2\u01ef")
buf.write("\u01f0\7g\2\2\u01f0\u01f1\7f\2\2\u01f1F\3\2\2\2\u01f2")
buf.write("\u01f3\7r\2\2\u01f3\u01f4\7w\2\2\u01f4\u01f5\7d\2\2\u01f5")
buf.write("\u01f6\7n\2\2\u01f6\u01f7\7k\2\2\u01f7\u01f8\7e\2\2\u01f8")
buf.write("H\3\2\2\2\u01f9\u01fa\7t\2\2\u01fa\u01fb\7g\2\2\u01fb")
buf.write("\u01fc\7v\2\2\u01fc\u01fd\7w\2\2\u01fd\u01fe\7t\2\2\u01fe")
buf.write("\u01ff\7p\2\2\u01ffJ\3\2\2\2\u0200\u0201\7u\2\2\u0201")
buf.write("\u0202\7j\2\2\u0202\u0203\7q\2\2\u0203\u0204\7t\2\2\u0204")
buf.write("\u0205\7v\2\2\u0205L\3\2\2\2\u0206\u0207\7u\2\2\u0207")
buf.write("\u0208\7v\2\2\u0208\u0209\7c\2\2\u0209\u020a\7v\2\2\u020a")
buf.write("\u020b\7k\2\2\u020b\u020c\7e\2\2\u020cN\3\2\2\2\u020d")
buf.write("\u020e\7u\2\2\u020e\u020f\7v\2\2\u020f\u0210\7t\2\2\u0210")
buf.write("\u0211\7k\2\2\u0211\u0212\7e\2\2\u0212\u0213\7v\2\2\u0213")
buf.write("\u0214\7h\2\2\u0214\u0215\7r\2\2\u0215P\3\2\2\2\u0216")
buf.write("\u0217\7u\2\2\u0217\u0218\7w\2\2\u0218\u0219\7r\2\2\u0219")
buf.write("\u021a\7g\2\2\u021a\u021b\7t\2\2\u021bR\3\2\2\2\u021c")
buf.write("\u021d\7u\2\2\u021d\u021e\7y\2\2\u021e\u021f\7k\2\2\u021f")
buf.write("\u0220\7v\2\2\u0220\u0221\7e\2\2\u0221\u0222\7j\2\2\u0222")
buf.write("T\3\2\2\2\u0223\u0224\7u\2\2\u0224\u0225\7{\2\2\u0225")
buf.write("\u0226\7p\2\2\u0226\u0227\7e\2\2\u0227\u0228\7j\2\2\u0228")
buf.write("\u0229\7t\2\2\u0229\u022a\7q\2\2\u022a\u022b\7p\2\2\u022b")
buf.write("\u022c\7k\2\2\u022c\u022d\7|\2\2\u022d\u022e\7g\2\2\u022e")
buf.write("\u022f\7f\2\2\u022fV\3\2\2\2\u0230\u0231\7v\2\2\u0231")
buf.write("\u0232\7j\2\2\u0232\u0233\7k\2\2\u0233\u0234\7u\2\2\u0234")
buf.write("X\3\2\2\2\u0235\u0236\7v\2\2\u0236\u0237\7j\2\2\u0237")
buf.write("\u0238\7t\2\2\u0238\u0239\7q\2\2\u0239\u023a\7y\2\2\u023a")
buf.write("Z\3\2\2\2\u023b\u023c\7v\2\2\u023c\u023d\7j\2\2\u023d")
buf.write("\u023e\7t\2\2\u023e\u023f\7q\2\2\u023f\u0240\7y\2\2\u0240")
buf.write("\u0241\7u\2\2\u0241\\\3\2\2\2\u0242\u0243\7v\2\2\u0243")
buf.write("\u0244\7t\2\2\u0244\u0245\7c\2\2\u0245\u0246\7p\2\2\u0246")
buf.write("\u0247\7u\2\2\u0247\u0248\7k\2\2\u0248\u0249\7g\2\2\u0249")
buf.write("\u024a\7p\2\2\u024a\u024b\7v\2\2\u024b^\3\2\2\2\u024c")
buf.write("\u024d\7v\2\2\u024d\u024e\7t\2\2\u024e\u024f\7{\2\2\u024f")
buf.write("`\3\2\2\2\u0250\u0251\7x\2\2\u0251\u0252\7q\2\2\u0252")
buf.write("\u0253\7k\2\2\u0253\u0254\7f\2\2\u0254b\3\2\2\2\u0255")
buf.write("\u0256\7x\2\2\u0256\u0257\7q\2\2\u0257\u0258\7n\2\2\u0258")
buf.write("\u0259\7c\2\2\u0259\u025a\7v\2\2\u025a\u025b\7k\2\2\u025b")
buf.write("\u025c\7n\2\2\u025c\u025d\7g\2\2\u025dd\3\2\2\2\u025e")
buf.write("\u025f\7y\2\2\u025f\u0260\7j\2\2\u0260\u0261\7k\2\2\u0261")
buf.write("\u0262\7n\2\2\u0262\u0263\7g\2\2\u0263f\3\2\2\2\u0264")
buf.write("\u0265\7o\2\2\u0265\u0266\7q\2\2\u0266\u0267\7f\2\2\u0267")
buf.write("\u0268\7w\2\2\u0268\u0269\7n\2\2\u0269\u026a\7g\2\2\u026a")
buf.write("h\3\2\2\2\u026b\u026c\7q\2\2\u026c\u026d\7r\2\2\u026d")
buf.write("\u026e\7g\2\2\u026e\u026f\7p\2\2\u026fj\3\2\2\2\u0270")
buf.write("\u0271\7t\2\2\u0271\u0272\7g\2\2\u0272\u0273\7s\2\2\u0273")
buf.write("\u0274\7w\2\2\u0274\u0275\7k\2\2\u0275\u0276\7t\2\2\u0276")
buf.write("\u0277\7g\2\2\u0277\u0278\7u\2\2\u0278l\3\2\2\2\u0279")
buf.write("\u027a\7g\2\2\u027a\u027b\7z\2\2\u027b\u027c\7r\2\2\u027c")
buf.write("\u027d\7q\2\2\u027d\u027e\7t\2\2\u027e\u027f\7v\2\2\u027f")
buf.write("\u0280\7u\2\2\u0280n\3\2\2\2\u0281\u0282\7q\2\2\u0282")
buf.write("\u0283\7r\2\2\u0283\u0284\7g\2\2\u0284\u0285\7p\2\2\u0285")
buf.write("\u0286\7u\2\2\u0286p\3\2\2\2\u0287\u0288\7v\2\2\u0288")
buf.write("\u0289\7q\2\2\u0289r\3\2\2\2\u028a\u028b\7w\2\2\u028b")
buf.write("\u028c\7u\2\2\u028c\u028d\7g\2\2\u028d\u028e\7u\2\2\u028e")
buf.write("t\3\2\2\2\u028f\u0290\7r\2\2\u0290\u0291\7t\2\2\u0291")
buf.write("\u0292\7q\2\2\u0292\u0293\7x\2\2\u0293\u0294\7k\2\2\u0294")
buf.write("\u0295\7f\2\2\u0295\u0296\7g\2\2\u0296\u0297\7u\2\2\u0297")
buf.write("v\3\2\2\2\u0298\u0299\7y\2\2\u0299\u029a\7k\2\2\u029a")
buf.write("\u029b\7v\2\2\u029b\u029c\7j\2\2\u029cx\3\2\2\2\u029d")
buf.write("\u029e\7v\2\2\u029e\u029f\7t\2\2\u029f\u02a0\7c\2\2\u02a0")
buf.write("\u02a1\7p\2\2\u02a1\u02a2\7u\2\2\u02a2\u02a3\7k\2\2\u02a3")
buf.write("\u02a4\7v\2\2\u02a4\u02a5\7k\2\2\u02a5\u02a6\7x\2\2\u02a6")
buf.write("\u02a7\7g\2\2\u02a7z\3\2\2\2\u02a8\u02a9\7x\2\2\u02a9")
buf.write("\u02aa\7c\2\2\u02aa\u02ab\7t\2\2\u02ab|\3\2\2\2\u02ac")
buf.write("\u02ad\7{\2\2\u02ad\u02ae\7k\2\2\u02ae\u02af\7g\2\2\u02af")
buf.write("\u02b0\7n\2\2\u02b0\u02b1\7f\2\2\u02b1~\3\2\2\2\u02b2")
buf.write("\u02b3\7t\2\2\u02b3\u02b4\7g\2\2\u02b4\u02b5\7e\2\2\u02b5")
buf.write("\u02b6\7q\2\2\u02b6\u02b7\7t\2\2\u02b7\u02b8\7f\2\2\u02b8")
buf.write("\u0080\3\2\2\2\u02b9\u02ba\7u\2\2\u02ba\u02bb\7g\2\2\u02bb")
buf.write("\u02bc\7c\2\2\u02bc\u02bd\7n\2\2\u02bd\u02be\7g\2\2\u02be")
buf.write("\u02bf\7f\2\2\u02bf\u0082\3\2\2\2\u02c0\u02c1\7r\2\2\u02c1")
buf.write("\u02c2\7g\2\2\u02c2\u02c3\7t\2\2\u02c3\u02c4\7o\2\2\u02c4")
buf.write("\u02c5\7k\2\2\u02c5\u02c6\7v\2\2\u02c6\u02c7\7u\2\2\u02c7")
buf.write("\u0084\3\2\2\2\u02c8\u02c9\7p\2\2\u02c9\u02ca\7q\2\2\u02ca")
buf.write("\u02cb\7p\2\2\u02cb\u02cc\7/\2\2\u02cc\u02cd\7u\2\2\u02cd")
buf.write("\u02ce\7g\2\2\u02ce\u02cf\7c\2\2\u02cf\u02d0\7n\2\2\u02d0")
buf.write("\u02d1\7g\2\2\u02d1\u02d2\7f\2\2\u02d2\u0086\3\2\2\2\u02d3")
buf.write("\u02e1\7\62\2\2\u02d4\u02de\t\2\2\2\u02d5\u02d7\5\u010b")
buf.write("\u0086\2\u02d6\u02d5\3\2\2\2\u02d6\u02d7\3\2\2\2\u02d7")
buf.write("\u02df\3\2\2\2\u02d8\u02da\7a\2\2\u02d9\u02d8\3\2\2\2")
buf.write("\u02da\u02db\3\2\2\2\u02db\u02d9\3\2\2\2\u02db\u02dc\3")
buf.write("\2\2\2\u02dc\u02dd\3\2\2\2\u02dd\u02df\5\u010b\u0086\2")
buf.write("\u02de\u02d6\3\2\2\2\u02de\u02d9\3\2\2\2\u02df\u02e1\3")
buf.write("\2\2\2\u02e0\u02d3\3\2\2\2\u02e0\u02d4\3\2\2\2\u02e1\u02e3")
buf.write("\3\2\2\2\u02e2\u02e4\t\3\2\2\u02e3\u02e2\3\2\2\2\u02e3")
buf.write("\u02e4\3\2\2\2\u02e4\u0088\3\2\2\2\u02e5\u02e6\7\62\2")
buf.write("\2\u02e6\u02e7\t\4\2\2\u02e7\u02ef\t\5\2\2\u02e8\u02ea")
buf.write("\t\6\2\2\u02e9\u02e8\3\2\2\2\u02ea\u02ed\3\2\2\2\u02eb")
buf.write("\u02e9\3\2\2\2\u02eb\u02ec\3\2\2\2\u02ec\u02ee\3\2\2\2")
buf.write("\u02ed\u02eb\3\2\2\2\u02ee\u02f0\t\5\2\2\u02ef\u02eb\3")
buf.write("\2\2\2\u02ef\u02f0\3\2\2\2\u02f0\u02f2\3\2\2\2\u02f1\u02f3")
buf.write("\t\3\2\2\u02f2\u02f1\3\2\2\2\u02f2\u02f3\3\2\2\2\u02f3")
buf.write("\u008a\3\2\2\2\u02f4\u02f8\7\62\2\2\u02f5\u02f7\7a\2\2")
buf.write("\u02f6\u02f5\3\2\2\2\u02f7\u02fa\3\2\2\2\u02f8\u02f6\3")
buf.write("\2\2\2\u02f8\u02f9\3\2\2\2\u02f9\u02fb\3\2\2\2\u02fa\u02f8")
buf.write("\3\2\2\2\u02fb\u0303\t\7\2\2\u02fc\u02fe\t\b\2\2\u02fd")
buf.write("\u02fc\3\2\2\2\u02fe\u0301\3\2\2\2\u02ff\u02fd\3\2\2\2")
buf.write("\u02ff\u0300\3\2\2\2\u0300\u0302\3\2\2\2\u0301\u02ff\3")
buf.write("\2\2\2\u0302\u0304\t\7\2\2\u0303\u02ff\3\2\2\2\u0303\u0304")
buf.write("\3\2\2\2\u0304\u0306\3\2\2\2\u0305\u0307\t\3\2\2\u0306")
buf.write("\u0305\3\2\2\2\u0306\u0307\3\2\2\2\u0307\u008c\3\2\2\2")
buf.write("\u0308\u0309\7\62\2\2\u0309\u030a\t\t\2\2\u030a\u0312")
buf.write("\t\n\2\2\u030b\u030d\t\13\2\2\u030c\u030b\3\2\2\2\u030d")
buf.write("\u0310\3\2\2\2\u030e\u030c\3\2\2\2\u030e\u030f\3\2\2\2")
buf.write("\u030f\u0311\3\2\2\2\u0310\u030e\3\2\2\2\u0311\u0313\t")
buf.write("\n\2\2\u0312\u030e\3\2\2\2\u0312\u0313\3\2\2\2\u0313\u0315")
buf.write("\3\2\2\2\u0314\u0316\t\3\2\2\u0315\u0314\3\2\2\2\u0315")
buf.write("\u0316\3\2\2\2\u0316\u008e\3\2\2\2\u0317\u0318\5\u010b")
buf.write("\u0086\2\u0318\u031a\7\60\2\2\u0319\u031b\5\u010b\u0086")
buf.write("\2\u031a\u0319\3\2\2\2\u031a\u031b\3\2\2\2\u031b\u031f")
buf.write("\3\2\2\2\u031c\u031d\7\60\2\2\u031d\u031f\5\u010b\u0086")
buf.write("\2\u031e\u0317\3\2\2\2\u031e\u031c\3\2\2\2\u031f\u0321")
buf.write("\3\2\2\2\u0320\u0322\5\u0103\u0082\2\u0321\u0320\3\2\2")
buf.write("\2\u0321\u0322\3\2\2\2\u0322\u0324\3\2\2\2\u0323\u0325")
buf.write("\t\f\2\2\u0324\u0323\3\2\2\2\u0324\u0325\3\2\2\2\u0325")
buf.write("\u032f\3\2\2\2\u0326\u032c\5\u010b\u0086\2\u0327\u0329")
buf.write("\5\u0103\u0082\2\u0328\u032a\t\f\2\2\u0329\u0328\3\2\2")
buf.write("\2\u0329\u032a\3\2\2\2\u032a\u032d\3\2\2\2\u032b\u032d")
buf.write("\t\f\2\2\u032c\u0327\3\2\2\2\u032c\u032b\3\2\2\2\u032d")
buf.write("\u032f\3\2\2\2\u032e\u031e\3\2\2\2\u032e\u0326\3\2\2\2")
buf.write("\u032f\u0090\3\2\2\2\u0330\u0331\7\62\2\2\u0331\u033b")
buf.write("\t\4\2\2\u0332\u0334\5\u0107\u0084\2\u0333\u0335\7\60")
buf.write("\2\2\u0334\u0333\3\2\2\2\u0334\u0335\3\2\2\2\u0335\u033c")
buf.write("\3\2\2\2\u0336\u0338\5\u0107\u0084\2\u0337\u0336\3\2\2")
buf.write("\2\u0337\u0338\3\2\2\2\u0338\u0339\3\2\2\2\u0339\u033a")
buf.write("\7\60\2\2\u033a\u033c\5\u0107\u0084\2\u033b\u0332\3\2")
buf.write("\2\2\u033b\u0337\3\2\2\2\u033c\u033d\3\2\2\2\u033d\u033f")
buf.write("\t\r\2\2\u033e\u0340\t\16\2\2\u033f\u033e\3\2\2\2\u033f")
buf.write("\u0340\3\2\2\2\u0340\u0341\3\2\2\2\u0341\u0343\5\u010b")
buf.write("\u0086\2\u0342\u0344\t\f\2\2\u0343\u0342\3\2\2\2\u0343")
buf.write("\u0344\3\2\2\2\u0344\u0092\3\2\2\2\u0345\u0346\7v\2\2")
buf.write("\u0346\u0347\7t\2\2\u0347\u0348\7w\2\2\u0348\u034f\7g")
buf.write("\2\2\u0349\u034a\7h\2\2\u034a\u034b\7c\2\2\u034b\u034c")
buf.write("\7n\2\2\u034c\u034d\7u\2\2\u034d\u034f\7g\2\2\u034e\u0345")
buf.write("\3\2\2\2\u034e\u0349\3\2\2\2\u034f\u0094\3\2\2\2\u0350")
buf.write("\u0353\7)\2\2\u0351\u0354\n\17\2\2\u0352\u0354\5\u0105")
buf.write("\u0083\2\u0353\u0351\3\2\2\2\u0353\u0352\3\2\2\2\u0354")
buf.write("\u0355\3\2\2\2\u0355\u0356\7)\2\2\u0356\u0096\3\2\2\2")
buf.write("\u0357\u035c\7$\2\2\u0358\u035b\n\20\2\2\u0359\u035b\5")
buf.write("\u0105\u0083\2\u035a\u0358\3\2\2\2\u035a\u0359\3\2\2\2")
buf.write("\u035b\u035e\3\2\2\2\u035c\u035a\3\2\2\2\u035c\u035d\3")
buf.write("\2\2\2\u035d\u035f\3\2\2\2\u035e\u035c\3\2\2\2\u035f\u0360")
buf.write("\7$\2\2\u0360\u0098\3\2\2\2\u0361\u0362\7$\2\2\u0362\u0363")
buf.write("\7$\2\2\u0363\u0364\7$\2\2\u0364\u0368\3\2\2\2\u0365\u0367")
buf.write("\t\21\2\2\u0366\u0365\3\2\2\2\u0367\u036a\3\2\2\2\u0368")
buf.write("\u0366\3\2\2\2\u0368\u0369\3\2\2\2\u0369\u036b\3\2\2\2")
buf.write("\u036a\u0368\3\2\2\2\u036b\u0370\t\22\2\2\u036c\u036f")
buf.write("\13\2\2\2\u036d\u036f\5\u0105\u0083\2\u036e\u036c\3\2")
buf.write("\2\2\u036e\u036d\3\2\2\2\u036f\u0372\3\2\2\2\u0370\u0371")
buf.write("\3\2\2\2\u0370\u036e\3\2\2\2\u0371\u0373\3\2\2\2\u0372")
buf.write("\u0370\3\2\2\2\u0373\u0374\7$\2\2\u0374\u0375\7$\2\2\u0375")
buf.write("\u0376\7$\2\2\u0376\u009a\3\2\2\2\u0377\u0378\7p\2\2\u0378")
buf.write("\u0379\7w\2\2\u0379\u037a\7n\2\2\u037a\u037b\7n\2\2\u037b")
buf.write("\u009c\3\2\2\2\u037c\u037d\7*\2\2\u037d\u009e\3\2\2\2")
buf.write("\u037e\u037f\7+\2\2\u037f\u00a0\3\2\2\2\u0380\u0381\7")
buf.write("}\2\2\u0381\u00a2\3\2\2\2\u0382\u0383\7\177\2\2\u0383")
buf.write("\u00a4\3\2\2\2\u0384\u0385\7]\2\2\u0385\u00a6\3\2\2\2")
buf.write("\u0386\u0387\7_\2\2\u0387\u00a8\3\2\2\2\u0388\u0389\7")
buf.write("=\2\2\u0389\u00aa\3\2\2\2\u038a\u038b\7.\2\2\u038b\u00ac")
buf.write("\3\2\2\2\u038c\u038d\7\60\2\2\u038d\u00ae\3\2\2\2\u038e")
buf.write("\u038f\7?\2\2\u038f\u00b0\3\2\2\2\u0390\u0391\7@\2\2\u0391")
buf.write("\u00b2\3\2\2\2\u0392\u0393\7>\2\2\u0393\u00b4\3\2\2\2")
buf.write("\u0394\u0395\7#\2\2\u0395\u00b6\3\2\2\2\u0396\u0397\7")
buf.write("\u0080\2\2\u0397\u00b8\3\2\2\2\u0398\u0399\7A\2\2\u0399")
buf.write("\u00ba\3\2\2\2\u039a\u039b\7<\2\2\u039b\u00bc\3\2\2\2")
buf.write("\u039c\u039d\7?\2\2\u039d\u039e\7?\2\2\u039e\u00be\3\2")
buf.write("\2\2\u039f\u03a0\7>\2\2\u03a0\u03a1\7?\2\2\u03a1\u00c0")
buf.write("\3\2\2\2\u03a2\u03a3\7@\2\2\u03a3\u03a4\7?\2\2\u03a4\u00c2")
buf.write("\3\2\2\2\u03a5\u03a6\7#\2\2\u03a6\u03a7\7?\2\2\u03a7\u00c4")
buf.write("\3\2\2\2\u03a8\u03a9\7(\2\2\u03a9\u03aa\7(\2\2\u03aa\u00c6")
buf.write("\3\2\2\2\u03ab\u03ac\7~\2\2\u03ac\u03ad\7~\2\2\u03ad\u00c8")
buf.write("\3\2\2\2\u03ae\u03af\7-\2\2\u03af\u03b0\7-\2\2\u03b0\u00ca")
buf.write("\3\2\2\2\u03b1\u03b2\7/\2\2\u03b2\u03b3\7/\2\2\u03b3\u00cc")
buf.write("\3\2\2\2\u03b4\u03b5\7-\2\2\u03b5\u00ce\3\2\2\2\u03b6")
buf.write("\u03b7\7/\2\2\u03b7\u00d0\3\2\2\2\u03b8\u03b9\7,\2\2\u03b9")
buf.write("\u00d2\3\2\2\2\u03ba\u03bb\7\61\2\2\u03bb\u00d4\3\2\2")
buf.write("\2\u03bc\u03bd\7(\2\2\u03bd\u00d6\3\2\2\2\u03be\u03bf")
buf.write("\7~\2\2\u03bf\u00d8\3\2\2\2\u03c0\u03c1\7`\2\2\u03c1\u00da")
buf.write("\3\2\2\2\u03c2\u03c3\7\'\2\2\u03c3\u00dc\3\2\2\2\u03c4")
buf.write("\u03c5\7-\2\2\u03c5\u03c6\7?\2\2\u03c6\u00de\3\2\2\2\u03c7")
buf.write("\u03c8\7/\2\2\u03c8\u03c9\7?\2\2\u03c9\u00e0\3\2\2\2\u03ca")
buf.write("\u03cb\7,\2\2\u03cb\u03cc\7?\2\2\u03cc\u00e2\3\2\2\2\u03cd")
buf.write("\u03ce\7\61\2\2\u03ce\u03cf\7?\2\2\u03cf\u00e4\3\2\2\2")
buf.write("\u03d0\u03d1\7(\2\2\u03d1\u03d2\7?\2\2\u03d2\u00e6\3\2")
buf.write("\2\2\u03d3\u03d4\7~\2\2\u03d4\u03d5\7?\2\2\u03d5\u00e8")
buf.write("\3\2\2\2\u03d6\u03d7\7`\2\2\u03d7\u03d8\7?\2\2\u03d8\u00ea")
buf.write("\3\2\2\2\u03d9\u03da\7\'\2\2\u03da\u03db\7?\2\2\u03db")
buf.write("\u00ec\3\2\2\2\u03dc\u03dd\7>\2\2\u03dd\u03de\7>\2\2\u03de")
buf.write("\u03df\7?\2\2\u03df\u00ee\3\2\2\2\u03e0\u03e1\7@\2\2\u03e1")
buf.write("\u03e2\7@\2\2\u03e2\u03e3\7?\2\2\u03e3\u00f0\3\2\2\2\u03e4")
buf.write("\u03e5\7@\2\2\u03e5\u03e6\7@\2\2\u03e6\u03e7\7@\2\2\u03e7")
buf.write("\u03e8\7?\2\2\u03e8\u00f2\3\2\2\2\u03e9\u03ea\7/\2\2\u03ea")
buf.write("\u03eb\7@\2\2\u03eb\u00f4\3\2\2\2\u03ec\u03ed\7<\2\2\u03ed")
buf.write("\u03ee\7<\2\2\u03ee\u00f6\3\2\2\2\u03ef\u03f0\7B\2\2\u03f0")
buf.write("\u00f8\3\2\2\2\u03f1\u03f2\7\60\2\2\u03f2\u03f3\7\60\2")
buf.write("\2\u03f3\u03f4\7\60\2\2\u03f4\u00fa\3\2\2\2\u03f5\u03f7")
buf.write("\t\23\2\2\u03f6\u03f5\3\2\2\2\u03f7\u03f8\3\2\2\2\u03f8")
buf.write("\u03f6\3\2\2\2\u03f8\u03f9\3\2\2\2\u03f9\u03fa\3\2\2\2")
buf.write("\u03fa\u03fb\b~\2\2\u03fb\u00fc\3\2\2\2\u03fc\u03fd\7")
buf.write("\61\2\2\u03fd\u03fe\7,\2\2\u03fe\u0402\3\2\2\2\u03ff\u0401")
buf.write("\13\2\2\2\u0400\u03ff\3\2\2\2\u0401\u0404\3\2\2\2\u0402")
buf.write("\u0403\3\2\2\2\u0402\u0400\3\2\2\2\u0403\u0405\3\2\2\2")
buf.write("\u0404\u0402\3\2\2\2\u0405\u0406\7,\2\2\u0406\u0407\7")
buf.write("\61\2\2\u0407\u0408\3\2\2\2\u0408\u0409\b\177\2\2\u0409")
buf.write("\u00fe\3\2\2\2\u040a\u040b\7\61\2\2\u040b\u040c\7\61\2")
buf.write("\2\u040c\u0410\3\2\2\2\u040d\u040f\n\22\2\2\u040e\u040d")
buf.write("\3\2\2\2\u040f\u0412\3\2\2\2\u0410\u040e\3\2\2\2\u0410")
buf.write("\u0411\3\2\2\2\u0411\u0413\3\2\2\2\u0412\u0410\3\2\2\2")
buf.write("\u0413\u0414\b\u0080\2\2\u0414\u0100\3\2\2\2\u0415\u0419")
buf.write("\5\u010f\u0088\2\u0416\u0418\5\u010d\u0087\2\u0417\u0416")
buf.write("\3\2\2\2\u0418\u041b\3\2\2\2\u0419\u0417\3\2\2\2\u0419")
buf.write("\u041a\3\2\2\2\u041a\u0102\3\2\2\2\u041b\u0419\3\2\2\2")
buf.write("\u041c\u041e\t\24\2\2\u041d\u041f\t\16\2\2\u041e\u041d")
buf.write("\3\2\2\2\u041e\u041f\3\2\2\2\u041f\u0420\3\2\2\2\u0420")
buf.write("\u0421\5\u010b\u0086\2\u0421\u0104\3\2\2\2\u0422\u0423")
buf.write("\7^\2\2\u0423\u0438\t\25\2\2\u0424\u0429\7^\2\2\u0425")
buf.write("\u0427\t\26\2\2\u0426\u0425\3\2\2\2\u0426\u0427\3\2\2")
buf.write("\2\u0427\u0428\3\2\2\2\u0428\u042a\t\7\2\2\u0429\u0426")
buf.write("\3\2\2\2\u0429\u042a\3\2\2\2\u042a\u042b\3\2\2\2\u042b")
buf.write("\u0438\t\7\2\2\u042c\u042e\7^\2\2\u042d\u042f\7w\2\2\u042e")
buf.write("\u042d\3\2\2\2\u042f\u0430\3\2\2\2\u0430\u042e\3\2\2\2")
buf.write("\u0430\u0431\3\2\2\2\u0431\u0432\3\2\2\2\u0432\u0433\5")
buf.write("\u0109\u0085\2\u0433\u0434\5\u0109\u0085\2\u0434\u0435")
buf.write("\5\u0109\u0085\2\u0435\u0436\5\u0109\u0085\2\u0436\u0438")
buf.write("\3\2\2\2\u0437\u0422\3\2\2\2\u0437\u0424\3\2\2\2\u0437")
buf.write("\u042c\3\2\2\2\u0438\u0106\3\2\2\2\u0439\u0442\5\u0109")
buf.write("\u0085\2\u043a\u043d\5\u0109\u0085\2\u043b\u043d\7a\2")
buf.write("\2\u043c\u043a\3\2\2\2\u043c\u043b\3\2\2\2\u043d\u0440")
buf.write("\3\2\2\2\u043e\u043c\3\2\2\2\u043e\u043f\3\2\2\2\u043f")
buf.write("\u0441\3\2\2\2\u0440\u043e\3\2\2\2\u0441\u0443\5\u0109")
buf.write("\u0085\2\u0442\u043e\3\2\2\2\u0442\u0443\3\2\2\2\u0443")
buf.write("\u0108\3\2\2\2\u0444\u0445\t\5\2\2\u0445\u010a\3\2\2\2")
buf.write("\u0446\u044e\t\27\2\2\u0447\u0449\t\30\2\2\u0448\u0447")
buf.write("\3\2\2\2\u0449\u044c\3\2\2\2\u044a\u0448\3\2\2\2\u044a")
buf.write("\u044b\3\2\2\2\u044b\u044d\3\2\2\2\u044c\u044a\3\2\2\2")
buf.write("\u044d\u044f\t\27\2\2\u044e\u044a\3\2\2\2\u044e\u044f")
buf.write("\3\2\2\2\u044f\u010c\3\2\2\2\u0450\u0453\5\u010f\u0088")
buf.write("\2\u0451\u0453\t\27\2\2\u0452\u0450\3\2\2\2\u0452\u0451")
buf.write("\3\2\2\2\u0453\u010e\3\2\2\2\u0454\u0459\t\31\2\2\u0455")
buf.write("\u0459\n\32\2\2\u0456\u0457\t\33\2\2\u0457\u0459\t\34")
buf.write("\2\2\u0458\u0454\3\2\2\2\u0458\u0455\3\2\2\2\u0458\u0456")
buf.write("\3\2\2\2\u0459\u0110\3\2\2\2\65\2\u02d6\u02db\u02de\u02e0")
buf.write("\u02e3\u02eb\u02ef\u02f2\u02f8\u02ff\u0303\u0306\u030e")
buf.write("\u0312\u0315\u031a\u031e\u0321\u0324\u0329\u032c\u032e")
buf.write("\u0334\u0337\u033b\u033f\u0343\u034e\u0353\u035a\u035c")
buf.write("\u0368\u036e\u0370\u03f8\u0402\u0410\u0419\u041e\u0426")
buf.write("\u0429\u0430\u0437\u043c\u043e\u0442\u044a\u044e\u0452")
buf.write("\u0458\3\2\3\2")
return buf.getvalue()
class JavaLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
ABSTRACT = 1
ASSERT = 2
BOOLEAN = 3
BREAK = 4
BYTE = 5
CASE = 6
CATCH = 7
CHAR = 8
CLASS = 9
CONST = 10
CONTINUE = 11
DEFAULT = 12
DO = 13
DOUBLE = 14
ELSE = 15
ENUM = 16
EXTENDS = 17
FINAL = 18
FINALLY = 19
FLOAT = 20
FOR = 21
IF = 22
GOTO = 23
IMPLEMENTS = 24
IMPORT = 25
INSTANCEOF = 26
INT = 27
INTERFACE = 28
LONG = 29
NATIVE = 30
NEW = 31
PACKAGE = 32
PRIVATE = 33
PROTECTED = 34
PUBLIC = 35
RETURN = 36
SHORT = 37
STATIC = 38
STRICTFP = 39
SUPER = 40
SWITCH = 41
SYNCHRONIZED = 42
THIS = 43
THROW = 44
THROWS = 45
TRANSIENT = 46
TRY = 47
VOID = 48
VOLATILE = 49
WHILE = 50
MODULE = 51
OPEN = 52
REQUIRES = 53
EXPORTS = 54
OPENS = 55
TO = 56
USES = 57
PROVIDES = 58
WITH = 59
TRANSITIVE = 60
VAR = 61
YIELD = 62
RECORD = 63
SEALED = 64
PERMITS = 65
NON_SEALED = 66
DECIMAL_LITERAL = 67
HEX_LITERAL = 68
OCT_LITERAL = 69
BINARY_LITERAL = 70
FLOAT_LITERAL = 71
HEX_FLOAT_LITERAL = 72
BOOL_LITERAL = 73
CHAR_LITERAL = 74
STRING_LITERAL = 75
TEXT_BLOCK = 76
NULL_LITERAL = 77
LPAREN = 78
RPAREN = 79
LBRACE = 80
RBRACE = 81
LBRACK = 82
RBRACK = 83
SEMI = 84
COMMA = 85
DOT = 86
ASSIGN = 87
GT = 88
LT = 89
BANG = 90
TILDE = 91
QUESTION = 92
COLON = 93
EQUAL = 94
LE = 95
GE = 96
NOTEQUAL = 97
AND = 98
OR = 99
INC = 100
DEC = 101
ADD = 102
SUB = 103
MUL = 104
DIV = 105
BITAND = 106
BITOR = 107
CARET = 108
MOD = 109
ADD_ASSIGN = 110
SUB_ASSIGN = 111
MUL_ASSIGN = 112
DIV_ASSIGN = 113
AND_ASSIGN = 114
OR_ASSIGN = 115
XOR_ASSIGN = 116
MOD_ASSIGN = 117
LSHIFT_ASSIGN = 118
RSHIFT_ASSIGN = 119
URSHIFT_ASSIGN = 120
ARROW = 121
COLONCOLON = 122
AT = 123
ELLIPSIS = 124
WS = 125
COMMENT = 126
LINE_COMMENT = 127
IDENTIFIER = 128
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'abstract'", "'assert'", "'boolean'", "'break'", "'byte'",
"'case'", "'catch'", "'char'", "'class'", "'const'", "'continue'",
"'default'", "'do'", "'double'", "'else'", "'enum'", "'extends'",
"'final'", "'finally'", "'float'", "'for'", "'if'", "'goto'",
"'implements'", "'import'", "'instanceof'", "'int'", "'interface'",
"'long'", "'native'", "'new'", "'package'", "'private'", "'protected'",
"'public'", "'return'", "'short'", "'static'", "'strictfp'",
"'super'", "'switch'", "'synchronized'", "'this'", "'throw'",
"'throws'", "'transient'", "'try'", "'void'", "'volatile'",
"'while'", "'module'", "'open'", "'requires'", "'exports'",
"'opens'", "'to'", "'uses'", "'provides'", "'with'", "'transitive'",
"'var'", "'yield'", "'record'", "'sealed'", "'permits'", "'non-sealed'",
"'null'", "'('", "')'", "'{'", "'}'", "'['", "']'", "';'", "','",
"'.'", "'='", "'>'", "'<'", "'!'", "'~'", "'?'", "':'", "'=='",
"'<='", "'>='", "'!='", "'&&'", "'||'", "'++'", "'--'", "'+'",
"'-'", "'*'", "'/'", "'&'", "'|'", "'^'", "'%'", "'+='", "'-='",
"'*='", "'/='", "'&='", "'|='", "'^='", "'%='", "'<<='", "'>>='",
"'>>>='", "'->'", "'::'", "'@'", "'...'" ]
symbolicNames = [ "<INVALID>",
"ABSTRACT", "ASSERT", "BOOLEAN", "BREAK", "BYTE", "CASE", "CATCH",
"CHAR", "CLASS", "CONST", "CONTINUE", "DEFAULT", "DO", "DOUBLE",
"ELSE", "ENUM", "EXTENDS", "FINAL", "FINALLY", "FLOAT", "FOR",
"IF", "GOTO", "IMPLEMENTS", "IMPORT", "INSTANCEOF", "INT", "INTERFACE",
"LONG", "NATIVE", "NEW", "PACKAGE", "PRIVATE", "PROTECTED",
"PUBLIC", "RETURN", "SHORT", "STATIC", "STRICTFP", "SUPER",
"SWITCH", "SYNCHRONIZED", "THIS", "THROW", "THROWS", "TRANSIENT",
"TRY", "VOID", "VOLATILE", "WHILE", "MODULE", "OPEN", "REQUIRES",
"EXPORTS", "OPENS", "TO", "USES", "PROVIDES", "WITH", "TRANSITIVE",
"VAR", "YIELD", "RECORD", "SEALED", "PERMITS", "NON_SEALED",
"DECIMAL_LITERAL", "HEX_LITERAL", "OCT_LITERAL", "BINARY_LITERAL",
"FLOAT_LITERAL", "HEX_FLOAT_LITERAL", "BOOL_LITERAL", "CHAR_LITERAL",
"STRING_LITERAL", "TEXT_BLOCK", "NULL_LITERAL", "LPAREN", "RPAREN",
"LBRACE", "RBRACE", "LBRACK", "RBRACK", "SEMI", "COMMA", "DOT",
"ASSIGN", "GT", "LT", "BANG", "TILDE", "QUESTION", "COLON",
"EQUAL", "LE", "GE", "NOTEQUAL", "AND", "OR", "INC", "DEC",
"ADD", "SUB", "MUL", "DIV", "BITAND", "BITOR", "CARET", "MOD",
"ADD_ASSIGN", "SUB_ASSIGN", "MUL_ASSIGN", "DIV_ASSIGN", "AND_ASSIGN",
"OR_ASSIGN", "XOR_ASSIGN", "MOD_ASSIGN", "LSHIFT_ASSIGN", "RSHIFT_ASSIGN",
"URSHIFT_ASSIGN", "ARROW", "COLONCOLON", "AT", "ELLIPSIS", "WS",
"COMMENT", "LINE_COMMENT", "IDENTIFIER" ]
ruleNames = [ "ABSTRACT", "ASSERT", "BOOLEAN", "BREAK", "BYTE", "CASE",
"CATCH", "CHAR", "CLASS", "CONST", "CONTINUE", "DEFAULT",
"DO", "DOUBLE", "ELSE", "ENUM", "EXTENDS", "FINAL", "FINALLY",
"FLOAT", "FOR", "IF", "GOTO", "IMPLEMENTS", "IMPORT",
"INSTANCEOF", "INT", "INTERFACE", "LONG", "NATIVE", "NEW",
"PACKAGE", "PRIVATE", "PROTECTED", "PUBLIC", "RETURN",
"SHORT", "STATIC", "STRICTFP", "SUPER", "SWITCH", "SYNCHRONIZED",
"THIS", "THROW", "THROWS", "TRANSIENT", "TRY", "VOID",
"VOLATILE", "WHILE", "MODULE", "OPEN", "REQUIRES", "EXPORTS",
"OPENS", "TO", "USES", "PROVIDES", "WITH", "TRANSITIVE",
"VAR", "YIELD", "RECORD", "SEALED", "PERMITS", "NON_SEALED",
"DECIMAL_LITERAL", "HEX_LITERAL", "OCT_LITERAL", "BINARY_LITERAL",
"FLOAT_LITERAL", "HEX_FLOAT_LITERAL", "BOOL_LITERAL",
"CHAR_LITERAL", "STRING_LITERAL", "TEXT_BLOCK", "NULL_LITERAL",
"LPAREN", "RPAREN", "LBRACE", "RBRACE", "LBRACK", "RBRACK",
"SEMI", "COMMA", "DOT", "ASSIGN", "GT", "LT", "BANG",
"TILDE", "QUESTION", "COLON", "EQUAL", "LE", "GE", "NOTEQUAL",
"AND", "OR", "INC", "DEC", "ADD", "SUB", "MUL", "DIV",
"BITAND", "BITOR", "CARET", "MOD", "ADD_ASSIGN", "SUB_ASSIGN",
"MUL_ASSIGN", "DIV_ASSIGN", "AND_ASSIGN", "OR_ASSIGN",
"XOR_ASSIGN", "MOD_ASSIGN", "LSHIFT_ASSIGN", "RSHIFT_ASSIGN",
"URSHIFT_ASSIGN", "ARROW", "COLONCOLON", "AT", "ELLIPSIS",
"WS", "COMMENT", "LINE_COMMENT", "IDENTIFIER", "ExponentPart",
"EscapeSequence", "HexDigits", "HexDigit", "Digits", "LetterOrDigit",
"Letter" ]
grammarFileName = "JavaLexer.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.3")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| StarcoderdataPython |
132004 | <filename>tests/test_maze.py
#!usr/bin/python3
import sys, os
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/.."))
from classes.maze import Maze, Cell
class TestingMaze:
def test_width(self):
maze = Maze(20, 5)
assert maze.getCols() == 20
def test_min_width(self):
maze = Maze(2,2)
assert maze.getCols() == 5
def test_height(self):
maze = Maze(5,50)
assert maze.getRows() == 50
def test_min_height(self):
maze = Maze(2,2)
assert maze.getRows() == 5
def test_maze_name(self):
maze = Maze(5,5)
assert maze.getName() == "MAZE_5x5"
def test_maze_get(self):
maze = Maze(5,5)
assert isinstance(maze[0][0], Cell)
def test_maze_set_name(self):
maze = Maze(5,5)
maze.setName("Testing")
assert maze.getName() == "Testing"
def test_maze_fill_pound(self):
maze = Maze(5,5)
assert maze[0][0].getElement() == '#'
def test_maze_fill_space(self):
maze = Maze(5,5)
assert maze[4][4].getElement() != ' '
def test_maze_image_null(self):
maze = Maze(5,5)
assert maze.getMazeImage() == []
def test_maze_eimage_null(self):
maze = Maze(5,5)
assert maze.getMazeEnhancedImage() == []
def test_maze_len(self):
maze = Maze(5,10)
assert len(maze) == 10
class TestingMazeGenerated:
pass
| StarcoderdataPython |
4833766 | <filename>test_script.py
import unittest
import script
import setupFolder #to set up folder structure for test cases
import os
import string
import shutil #to do force remove
import random
from random import randint
class TestReadfile(unittest.TestCase):
def setUp(self):
print("SETUP")
self.rootFolder=os.getcwd()
self.testFolder="testFolder"
self.testPath=os.path.join(self.rootFolder,self.testFolder)
if (os.path.isdir(self.testPath)):
shutil.rmtree(self.testPath)
os.mkdir(self.testPath)
def tearDown(self):
print("TEARDOWN")
os.chdir(self.rootFolder);
shutil.rmtree(self.testPath)
def test_invalidDir(self):
print("TEST_INVALIDDIR")
with self.assertRaises(Exception):
script.findTreasure("./jskghkjb")
self.assertTrue(true)
with self.assertRaises(Exception):
script.findTreasure("=")
self.assertTrue(true)
with self.assertRaises(Exception):
script.findTreasure("./,/k")
self.assertTrue(true)
def test_validDirWithNoRegex(self):
print("TEST_VALIDDIR_WITH_NO_REGEX")
os.chdir(self.rootFolder);
referenceFolder = setupFolder.generateFolderContent(self.testFolder)
os.chdir(self.rootFolder);
self.assertEqual(referenceFolder, script.findTreasure(self.testFolder))
def test_validDirWithRegex(self, regex=r"[A-Z]\d[A-Z] \d[A-Z]\d"):
print("TEST_VALIDDIR_WITH_REGEX")
os.chdir(self.rootFolder);
referenceFolder = setupFolder.generateFolderContent(self.testFolder, regex)
os.chdir(self.rootFolder);
self.assertEqual(referenceFolder, script.findTreasure(self.testFolder, regex))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3307305 | # 4/29/2018
from __future__ import division
import calendar
import csv
from collections import Counter
import gensim
import matplotlib.pyplot as plt
from math import sqrt
import numpy as np
import pandas as pd
import platform
import os
import random
import re, ast
import scipy
import sklearn
from sklearn import linear_model
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn import cross_validation, datasets, linear_model
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import sys
import time
import xlsxwriter
import pickle
from matplotlib.backends.backend_pdf import PdfPages
import imblearn
from imblearn.pipeline import Pipeline as imbPipeline
from imblearn.over_sampling import SMOTE
from sklearn.metrics import make_scorer
from sklearn.metrics import confusion_matrix
| StarcoderdataPython |
3239972 | """
ASDF tags for geometry related models.
"""
from asdf import yamlutil
from ..gwcs_types import GWCSTransformType
from .. geometry import (ToDirectionCosines, FromDirectionCosines,
SphericalToCartesian, CartesianToSpherical)
__all__ = ['DirectionCosinesType', 'SphericalCartesianType']
class DirectionCosinesType(GWCSTransformType):
name = "direction_cosines"
types = [ToDirectionCosines, FromDirectionCosines]
version = "1.1.0"
@classmethod
def from_tree_transform(cls, node, ctx):
transform_type = node['transform_type']
if transform_type == 'to_direction_cosines':
return ToDirectionCosines()
elif transform_type == 'from_direction_cosines':
return FromDirectionCosines()
else:
raise TypeError(f"Unknown model_type {transform_type}")
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, FromDirectionCosines):
transform_type = 'from_direction_cosines'
elif isinstance(model, ToDirectionCosines):
transform_type = 'to_direction_cosines'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {'transform_type': transform_type}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
class SphericalCartesianType(GWCSTransformType):
name = "spherical_cartesian"
types = [SphericalToCartesian, CartesianToSpherical]
version = "1.1.0"
@classmethod
def from_tree_transform(cls, node, ctx):
transform_type = node['transform_type']
wrap_lon_at = node['wrap_lon_at']
if transform_type == 'spherical_to_cartesian':
return SphericalToCartesian(wrap_lon_at=wrap_lon_at)
elif transform_type == 'cartesian_to_spherical':
return CartesianToSpherical(wrap_lon_at=wrap_lon_at)
else:
raise TypeError(f"Unknown model_type {transform_type}")
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, SphericalToCartesian):
transform_type = 'spherical_to_cartesian'
elif isinstance(model, CartesianToSpherical):
transform_type = 'cartesian_to_spherical'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {
'transform_type': transform_type,
'wrap_lon_at': model.wrap_lon_at
}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
| StarcoderdataPython |
1643852 | # coding: utf-8
import logging
from enum import IntEnum, auto
from PyQt5 import uic
from PyQt5.QtCore import (QAbstractTableModel, QModelIndex, Qt)
from PyQt5.QtWidgets import (QHeaderView,
QDataWidgetMapper)
from mhw_armor_edit.assets import Assets
from mhw_armor_edit.editor.models import EditorPlugin
from mhw_armor_edit.ftypes.itm import Itm
from mhw_armor_edit.import_export import ImportExportManager
from mhw_armor_edit.utils import get_t9n_item, get_t9n
log = logging.getLogger()
ItmEditorWidget, ItmEditorWidgetBase = uic.loadUiType(Assets.load_asset_file("item_editor.ui"))
class FlagAttr:
def __init__(self, flag):
self.flag = flag
def __get__(self, that, owner):
if that is None:
return self
return that.entry.flags & self.flag != 0
def __set__(self, that, toggle):
if toggle:
that.entry.flags = that.entry.flags | self.flag
else:
that.entry.flags = that.entry.flags & ~self.flag
class Column(IntEnum):
name = 0
description = 1
id = 2
sub_type = 3
type = 4
rarity = 5
carry_limit = 6
order = 7
icon_id = 8
icon_color = 9
sell_price = 10
buy_price = 11
flag_is_default_item = 12
flag_is_quest_only = 13
flag_unknown1 = 14
flag_is_consumable = 15
flag_is_appraisal = 16
flag_unknown2 = 17
flag_is_mega = 18
flag_is_level_one = 19
flag_is_level_two = 20
flag_is_level_three = 21
flag_is_glitter = 22
flag_is_deliverable = 23
flag_is_not_shown = 24
class ModelAdapter:
flag_is_default_item = FlagAttr(2 ** 0)
flag_is_quest_only = FlagAttr(2 ** 1)
flag_unknown1 = FlagAttr(2 ** 2)
flag_is_consumable = FlagAttr(2 ** 3)
flag_is_appraisal = FlagAttr(2 ** 4)
flag_unknown2 = FlagAttr(2 ** 5)
flag_is_mega = FlagAttr(2 ** 6)
flag_is_level_one = FlagAttr(2 ** 7)
flag_is_level_two = FlagAttr(2 ** 8)
flag_is_level_three = FlagAttr(2 ** 9)
flag_is_glitter = FlagAttr(2 ** 10)
flag_is_deliverable = FlagAttr(2 ** 11)
flag_is_not_shown = FlagAttr(2 ** 12)
def __init__(self, model, entry):
self.model = model
self.entry = entry
self.description = get_t9n(self.model, "t9n", self.entry.id * 2 + 1)
self.name = get_t9n_item(self.model, "t9n", self.entry.id)
def __getitem__(self, index):
attr = Column(index).name
try:
return getattr(self, attr)
except AttributeError:
return getattr(self.entry, attr)
def __setitem__(self, index, value):
attr = Column(index).name
if hasattr(self, attr):
setattr(self, attr, value)
else:
setattr(self.entry, attr, value)
class ItmTableModel(QAbstractTableModel):
def __init__(self, parent=None):
super().__init__(parent)
self.model = None
self.entries = []
def columnCount(self, parent: QModelIndex=None, *args, **kwargs):
return len(Column)
def rowCount(self, parent: QModelIndex=None, *args, **kwargs):
return len(self.entries)
def headerData(self, section, orient, role=None):
if role == Qt.DisplayRole:
if orient == Qt.Horizontal:
return Column(section).name
def data(self, qindex: QModelIndex, role=None):
if role == Qt.DisplayRole or role == Qt.EditRole:
entry = self.entries[qindex.row()]
column = qindex.column()
adapt = ModelAdapter(self.model, entry)
return adapt[column]
elif role == Qt.UserRole:
entry = self.entries[qindex.row()]
return entry
def setData(self, qindex: QModelIndex, value, role=None):
if role == Qt.EditRole or role == Qt.DisplayRole:
entry = self.entries[qindex.row()]
try:
value = int(value)
return self.set_entry_value(entry, value, qindex)
except (TypeError, ValueError):
return False
return False
def set_entry_value(self, entry, value, qindex):
adapt = ModelAdapter(self.model, entry)
try:
adapt[qindex.column()] = value
self.dataChanged.emit(qindex, qindex)
return True
except (ValueError, TypeError):
return False
def update(self, model):
self.beginResetModel()
self.model = model
if self.model is None:
self.entries = []
else:
self.entries = model.data.entries
self.endResetModel()
class ItmEditor(ItmEditorWidgetBase, ItmEditorWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.model = None
self.itm_model = ItmTableModel(self)
self.mapper = QDataWidgetMapper(self)
self.mapper.setModel(self.itm_model)
self.item_browser.setModel(self.itm_model)
self.item_browser.activated.connect(self.handle_item_browser_activated)
self.import_export_manager = ImportExportManager(
self.item_browser, ItmPlugin.import_export.get("safe_attrs"))
self.import_export_manager.connect_custom_context_menu()
self.mapper.addMapping(self.name_value, Column.name, b"text")
self.mapper.addMapping(self.id_value, Column.id, b"text")
self.mapper.addMapping(self.description_value, Column.description, b"text")
self.mapper.addMapping(self.subtype_value, Column.sub_type, b"currentIndex")
self.mapper.addMapping(self.type_value, Column.type, b"currentIndex")
self.mapper.addMapping(self.rarity_value, Column.rarity)
self.mapper.addMapping(self.carry_limit_value, Column.carry_limit)
self.mapper.addMapping(self.sort_order_value, Column.order)
self.mapper.addMapping(self.icon_id_value, Column.icon_id)
self.mapper.addMapping(self.icon_color_value, Column.icon_color)
self.mapper.addMapping(self.sell_price_value, Column.sell_price)
self.mapper.addMapping(self.buy_price_value, Column.buy_price)
self.add_flag_mapping(self.flag_is_default_item, Column.flag_is_default_item)
self.add_flag_mapping(self.flag_is_quest_only, Column.flag_is_quest_only)
self.add_flag_mapping(self.flag_unknown1, Column.flag_unknown1)
self.add_flag_mapping(self.flag_is_consumable, Column.flag_is_consumable)
self.add_flag_mapping(self.flag_is_appraisal, Column.flag_is_appraisal)
self.add_flag_mapping(self.flag_unknown2, Column.flag_unknown2)
self.add_flag_mapping(self.flag_is_mega, Column.flag_is_mega)
self.add_flag_mapping(self.flag_is_level_one, Column.flag_is_level_one)
self.add_flag_mapping(self.flag_is_level_two, Column.flag_is_level_two)
self.add_flag_mapping(self.flag_is_level_three, Column.flag_is_level_three)
self.add_flag_mapping(self.flag_is_glitter, Column.flag_is_glitter)
self.add_flag_mapping(self.flag_is_deliverable, Column.flag_is_deliverable)
self.add_flag_mapping(self.flag_is_not_shown, Column.flag_is_not_shown)
def handle_item_browser_activated(self, qindex):
source_qindex = qindex.model().mapToSource(qindex)
self.mapper.setCurrentModelIndex(source_qindex)
def add_flag_mapping(self, widget, flag_column):
self.mapper.addMapping(widget, flag_column)
widget.released.connect(self.mapper.submit)
def set_model(self, model):
self.model = model
self.itm_model.update(model)
if model is not None:
header = self.item_browser.header()
# header = self.item_browser.horizontalHeader()
header.hideSection(Column.description)
header.setSectionResizeMode(Column.name, QHeaderView.Stretch)
header.setSectionResizeMode(Column.id, QHeaderView.Fixed)
header.resizeSection(Column.id, 50)
header.setStretchLastSection(False)
for i in range(3, self.itm_model.columnCount(None)):
header.hideSection(i)
self.item_browser.sortByColumn(Column.id, Qt.AscendingOrder)
class ItmPlugin(EditorPlugin):
pattern = "*.itm"
data_factory = Itm
widget_factory = ItmEditor
relations = {
r"common\item\itemData.itm": {
"t9n": r"common\text\steam\item_eng.gmd",
}
}
| StarcoderdataPython |
3355654 | <gh_stars>0
def fat(n):
print(n)
return 1 if (n < 1) else n * fat(n-1)
x = fat(5)
print(x)
| StarcoderdataPython |
161294 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['FirewallRuleArgs', 'FirewallRule']
@pulumi.input_type
class FirewallRuleArgs:
def __init__(__self__, *,
firewall_group_id: pulumi.Input[str],
ip_type: pulumi.Input[str],
protocol: pulumi.Input[str],
subnet: pulumi.Input[str],
subnet_size: pulumi.Input[int],
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a FirewallRule resource.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] source: Possible values ("", cloudflare)
"""
pulumi.set(__self__, "firewall_group_id", firewall_group_id)
pulumi.set(__self__, "ip_type", ip_type)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "subnet", subnet)
pulumi.set(__self__, "subnet_size", subnet_size)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if port is not None:
pulumi.set(__self__, "port", port)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> pulumi.Input[str]:
"""
The firewall group that the firewall rule will belong to.
"""
return pulumi.get(self, "firewall_group_id")
@firewall_group_id.setter
def firewall_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "firewall_group_id", value)
@property
@pulumi.getter(name="ipType")
def ip_type(self) -> pulumi.Input[str]:
"""
The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
"""
return pulumi.get(self, "ip_type")
@ip_type.setter
def ip_type(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_type", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def subnet(self) -> pulumi.Input[str]:
"""
IP address that you want to define for this firewall rule.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="subnetSize")
def subnet_size(self) -> pulumi.Input[int]:
"""
The number of bits for the subnet in CIDR notation. Example: 32.
"""
return pulumi.get(self, "subnet_size")
@subnet_size.setter
def subnet_size(self, value: pulumi.Input[int]):
pulumi.set(self, "subnet_size", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
A simple note for a given firewall rule
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
TCP/UDP only. This field can be a specific port or a colon separated port range.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
Possible values ("", cloudflare)
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@pulumi.input_type
class _FirewallRuleState:
def __init__(__self__, *,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering FirewallRule resources.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] source: Possible values ("", cloudflare)
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
"""
if firewall_group_id is not None:
pulumi.set(__self__, "firewall_group_id", firewall_group_id)
if ip_type is not None:
pulumi.set(__self__, "ip_type", ip_type)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if source is not None:
pulumi.set(__self__, "source", source)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if subnet_size is not None:
pulumi.set(__self__, "subnet_size", subnet_size)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The firewall group that the firewall rule will belong to.
"""
return pulumi.get(self, "firewall_group_id")
@firewall_group_id.setter
def firewall_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firewall_group_id", value)
@property
@pulumi.getter(name="ipType")
def ip_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
"""
return pulumi.get(self, "ip_type")
@ip_type.setter
def ip_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_type", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
A simple note for a given firewall rule
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
TCP/UDP only. This field can be a specific port or a colon separated port range.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
Possible values ("", cloudflare)
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input[str]]:
"""
IP address that you want to define for this firewall rule.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="subnetSize")
def subnet_size(self) -> Optional[pulumi.Input[int]]:
"""
The number of bits for the subnet in CIDR notation. Example: 32.
"""
return pulumi.get(self, "subnet_size")
@subnet_size.setter
def subnet_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "subnet_size", value)
class FirewallRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Provides a Vultr Firewall Rule resource. This can be used to create, read, modify, and delete Firewall rules.
## Example Usage
Create a Firewall Rule
```python
import pulumi
import pulumi_vultr as vultr
my_firewallgroup = vultr.FirewallGroup("myFirewallgroup", description="base firewall")
my_firewallrule = vultr.FirewallRule("myFirewallrule",
firewall_group_id=my_firewallgroup.id,
protocol="tcp",
ip_type="v4",
subnet="0.0.0.0",
subnet_size=0,
port="8090",
notes="my firewall rule")
```
## Import
Firewall Rules can be imported using the Firewall Group `ID` and Firewall Rule `ID`, e.g.
```sh
$ pulumi import vultr:index/firewallRule:FirewallRule my_rule b6a859c5-b299-49dd-8888-b1abbc517d08,1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] source: Possible values ("", cloudflare)
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FirewallRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Vultr Firewall Rule resource. This can be used to create, read, modify, and delete Firewall rules.
## Example Usage
Create a Firewall Rule
```python
import pulumi
import pulumi_vultr as vultr
my_firewallgroup = vultr.FirewallGroup("myFirewallgroup", description="base firewall")
my_firewallrule = vultr.FirewallRule("myFirewallrule",
firewall_group_id=my_firewallgroup.id,
protocol="tcp",
ip_type="v4",
subnet="0.0.0.0",
subnet_size=0,
port="8090",
notes="my firewall rule")
```
## Import
Firewall Rules can be imported using the Firewall Group `ID` and Firewall Rule `ID`, e.g.
```sh
$ pulumi import vultr:index/firewallRule:FirewallRule my_rule b6a859c5-b299-49dd-8888-b1abbc517d08,1
```
:param str resource_name: The name of the resource.
:param FirewallRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FirewallRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FirewallRuleArgs.__new__(FirewallRuleArgs)
if firewall_group_id is None and not opts.urn:
raise TypeError("Missing required property 'firewall_group_id'")
__props__.__dict__["firewall_group_id"] = firewall_group_id
if ip_type is None and not opts.urn:
raise TypeError("Missing required property 'ip_type'")
__props__.__dict__["ip_type"] = ip_type
__props__.__dict__["notes"] = notes
__props__.__dict__["port"] = port
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["source"] = source
if subnet is None and not opts.urn:
raise TypeError("Missing required property 'subnet'")
__props__.__dict__["subnet"] = subnet
if subnet_size is None and not opts.urn:
raise TypeError("Missing required property 'subnet_size'")
__props__.__dict__["subnet_size"] = subnet_size
super(FirewallRule, __self__).__init__(
'vultr:index/firewallRule:FirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None) -> 'FirewallRule':
"""
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] source: Possible values ("", cloudflare)
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FirewallRuleState.__new__(_FirewallRuleState)
__props__.__dict__["firewall_group_id"] = firewall_group_id
__props__.__dict__["ip_type"] = ip_type
__props__.__dict__["notes"] = notes
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["source"] = source
__props__.__dict__["subnet"] = subnet
__props__.__dict__["subnet_size"] = subnet_size
return FirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> pulumi.Output[str]:
"""
The firewall group that the firewall rule will belong to.
"""
return pulumi.get(self, "firewall_group_id")
@property
@pulumi.getter(name="ipType")
def ip_type(self) -> pulumi.Output[str]:
"""
The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
"""
return pulumi.get(self, "ip_type")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
"""
A simple note for a given firewall rule
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[str]]:
"""
TCP/UDP only. This field can be a specific port or a colon separated port range.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[str]]:
"""
Possible values ("", cloudflare)
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def subnet(self) -> pulumi.Output[str]:
"""
IP address that you want to define for this firewall rule.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter(name="subnetSize")
def subnet_size(self) -> pulumi.Output[int]:
"""
The number of bits for the subnet in CIDR notation. Example: 32.
"""
return pulumi.get(self, "subnet_size")
| StarcoderdataPython |
68810 | # ----------------------------------------------------------------------
# inv.ResourceGroup tests
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.inv.models.resourcegroup import ResourceGroup
def test_clean_leagacy_id():
left = ResourceGroup._get_collection().find_one({"_legacy_id": {"$exists": True}})
assert left is None, "_legacy_id field has been left by migration"
| StarcoderdataPython |
1696869 | logs = {
"img": [
"[INFO] Loading input image: {}",
"[ERROR] On '{}': you need to pass the image path!",
"\te.g. --img='Pictures/notNord.jpg'"
],
"out": [
"[INFO] Set output image name: {}",
"[ERROR] On '{}': no output filename specify!",
"\te.g. --out='Pictures/nord.jpg'"
],
"navg": [
"[INFO] No average pixels selected for algorithm optimization",
"[ERROR] On '{}': the average pixels do not take any values!",
"\te.g. --no-average"
],
"pxls": [
"[INFO] Set up pixels width area: {}",
"[INFO] Set up pixels height area: {}",
"[ERROR] On '{}': no value specify within the area pixels!",
"\te.g. --pixels-area=2 or -pa=-4,-3"
],
"blur": [
"[INFO] Blur enabled",
"[ERROR] On '{}': the blur argument do not take any values!",
"\te.g. --blur"
],
"pals": [
"[INFO] Use all color set: {}",
"[INFO] Use palette set: {}",
"\t {} \u2713",
"\t {} \u2718",
"[WARNING] No theme specified, use default Nord theme",
"[WARNING] No set found for: {} \u2753",
],
"err": [
"[INFO] No image created, solve all ERROR and retry."
]
}
| StarcoderdataPython |
10027 | """
Pycovjson - Command line interface
Author: rileywilliams
Version: 0.1.0
"""
import argparse
from pycovjson.write import Writer
from pycovjson.read_netcdf import NetCDFReader as Reader
def main():
"""
Command line interface for pycovjson - Converts Scientific Data Formats into CovJSON and saves to disk.
:argument -i: Input file path.
:argument -o: Output file name.
:argument -t: Use Tiling.
:argument -v: Which variable to populate coverage with.
:argument -s: [tile shape]: Tile shape.
:argument -n: Use interactive mode.
:argument -u: MongoDB URL
"""
parser = argparse.ArgumentParser(
description='Convert Scientific Data Formats into CovJSON.')
parser.add_argument('-i', '--input', dest='inputfile',
help='Name of input file', required=True)
parser.add_argument('-o', '--output', dest='outputfile',
help='Name and location of output file', default='coverage.covjson')
parser.add_argument('-t', '--tiled', action='store_true', help='Apply tiling')
parser.add_argument('-s', '--shape', nargs='+',
help='Tile shape, list', type=int)
parser.add_argument('-v', dest='variable',
help='Variable to populate coverage with', required=True)
parser.add_argument('-n', '--interactive', action='store_true', help='Enter interactive mode')
parser.add_argument('-u', '--endpoint_url', dest='endpoint_url', nargs=1,
help='MongoDB endpoint for CovJSON persistence')
args = parser.parse_args()
inputfile = args.inputfile
outputfile = args.outputfile
variable = args.variable
tiled = args.tiled
tile_shape = args.shape
interactive = args.interactive
endpoint_url = args.endpoint_url
if interactive:
axis = input('Which Axis?', Reader.get_axis(variable))
if tiled and len(tile_shape) == 0:
reader = Reader(inputfile)
shape_list = reader.get_shape(variable)
dims = reader.get_dimensions(variable)
print(list(zip(dims, shape_list)))
tile_shape = input(
'Enter the shape tile shape as a list of comma separated integers')
tile_shape = tile_shape.split(',')
tile_shape = list(map(int, tile_shape))
print(tile_shape)
if outputfile == None:
outputfile = outputfile.default
Writer(outputfile, inputfile, [variable],
tiled=tiled, tile_shape=tile_shape, endpoint_url=endpoint_url).write()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3268451 | <gh_stars>0
import boto3
dynamoDB = boto3.resource('dynamodb')
table = dynamoDB.Table('users')
def lambda_handler(event, context):
# TODO implement
print(event)
event = event['queryStringParameters']
email = event['email']
data = {"email":email}
print("This is email: " + email)
print("This is data")
print(data)
table.put_item(Item=event)
return {
'statusCode':200,
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin":"*"
},
'body':'Registration successful'
}
# return {"code":200, 'headers': {"Content-Type": "application/json","Access-Control-Allow-Origin":"*"},"message":""}
| StarcoderdataPython |
182513 | <gh_stars>1-10
import nltk
from nltk.corpus import stopwords
import heapq
nltk.download('stopwords')
nltk.download('punkt')
def nltk_summarizer(raw_text):
stop_words = set(stopwords.words("english"))
word_frequencies = {}
for word in nltk.word_tokenize(raw_text):
if word not in stop_words:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
maximum_frequncy = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word] / maximum_frequncy)
sentence_list = nltk.sent_tokenize(raw_text)
sentence_scores = {}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequencies.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word]
else:
sentence_scores[sent] += word_frequencies[word]
summary_sentences = heapq.nlargest(7, sentence_scores, key=sentence_scores.get)
summary = ' '.join(summary_sentences)
return summary
| StarcoderdataPython |
169627 | from src.Shared.Helpers.Transformer import Transformer
from src.User.Domain.Entities.User import User
class UserTransformer(Transformer):
# roleTransformer: RoleTransformer
def __init__(self):
super()
# self.roleTransformer = RoleTransformer()
def transform(self, user: User):
return {
"id": str(user.pk),
"firstName": user.firstName,
"lastName": user.lastName,
"email": user.email,
"birthday": user.birthday,
"documentType": user.documentType,
"documentNumber": user.documentNumber,
"gender": user.gender,
"phone": user.phone,
"country": user.country,
"address": user.address,
"enable": user.enable,
"roles": user.getRoles(),
"permissions": user.permissions
}
| StarcoderdataPython |
18707 | import os
import sys
import json
import argparse
import numpy as np
sys.path.append('Camera_Intrinsics_API/')
from get_camera_intrinsics import CameraIntrinsicsHelper
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
default='data/videos_sfm/',
help="COLMAP output folder of videos",
)
parser.add_argument(
"--input_dir_greedy",
type=str,
default='data/videos_sfm_greedy/',
help="Folder for the COLMAP outputs - greedy.",
)
parser.add_argument(
"--annotation_dir",
type=str,
default='data/v1/annotations/',
help="annotation folder. Must contain the vq3d_<split>.json files.",
)
parser.add_argument(
"--output_filename",
type=str,
default='data/v1/scan_to_intrinsics.json',
)
args = parser.parse_args()
dataset = {}
for split in ['train', 'val']:
a = json.load(open(os.path.join(args.annotation_dir,
f'vq3d_{split}.json'), 'r'))
for video in a['videos']:
video_uid=video['video_uid']
scan_uid=video['scan_uid']
dataset[video_uid]=scan_uid
helper = CameraIntrinsicsHelper()
datadir=args.input_dir
datadir_2=args.input_dir_greedy
cpt=0
all_intrinsics = {}
for video_uid in os.listdir(datadir):
scan_uid=dataset[video_uid]
intrinsic_txt = os.path.join(datadir,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
intrinsic_txt = os.path.join(datadir_2,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
cpt+=1
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
outputs = {}
for scan_uid, d in all_intrinsics.items():
print(' ')
print('Scan uid: ', scan_uid)
outputs[scan_uid]={}
for resolution, v in d.items():
print(' -- resolution: ', resolution)
resolution_str = str(resolution)
outputs[scan_uid][resolution_str]={
'f': np.median([float(i[0]) for i in v]),
'cx': np.median([float(i[1]) for i in v]),
'cy': np.median([float(i[2]) for i in v]),
'k1': np.median([float(i[3]) for i in v]),
'k2': np.median([float(i[4]) for i in v]),
}
for i in v:
print(' -- -- -- : ', i)
print(' ')
print(' -- -- -- : ',
outputs[scan_uid][resolution_str]['f'],
outputs[scan_uid][resolution_str]['cx'],
outputs[scan_uid][resolution_str]['cy'],
outputs[scan_uid][resolution_str]['k1'],
outputs[scan_uid][resolution_str]['k2'],
)
json.dump(outputs, open(output_filename, 'w'))
| StarcoderdataPython |
1659828 | <filename>src/genie/libs/parser/iosxe/tests/test_show_install.py<gh_stars>1-10
#!/bin/env python
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError,\
SchemaMissingKeyError
from genie.libs.parser.iosxe.show_install import (ShowInstallSummary)
class TestShowInstallSummary(unittest.TestCase):
dev = Device(name='dev')
empty_output = {'execute.return_value': ' '}
golden_parsed_output = {
'location': {
'R0': {
'auto_abort_timer': 'active on install_activate',
'pkg_state': {
1: {
'filename_version': 'bootflash:utah.bm.smu.may15.bin',
'state': 'U',
'type': 'SMU',
},
2: {
'filename_version': '10.69.1.0.66982',
'state': 'C',
'type': 'IMG',
},
},
'time_before_rollback': '01:49:42',
},
},
}
golden_output = {'execute.return_value': '''\
Router#show install summary
[ R0 ] Installed Package(s) Information:
State (St): I - Inactive, U - Activated & Uncommitted,
C - Activated & Committed, D - Deactivated & Uncommitted
--------------------------------------------------------------------------------
Type St Filename/Version
--------------------------------------------------------------------------------
SMU U bootflash:utah.bm.smu.may15.bin
IMG C 10.69.1.0.66982
--------------------------------------------------------------------------------
Auto abort timer: active on install_activate, time before rollback - 01:49:42
--------------------------------------------------------------------------------
'''}
golden_parsed_output_2 = {
'location': {
'R0 R1': {
'auto_abort_timer': 'inactive',
'pkg_state': {
1: {
'filename_version': '10.106.1.0.277',
'state': 'I',
'type': 'IMG',
},
2: {
'filename_version': '10.106.1.0.277',
'state': 'C',
'type': 'IMG',
},
},
},
},
}
golden_output_2 = {'execute.return_value': '''\
Router#show install summary
[ R0 R1 ] Installed Package(s) Information:
State (St): I - Inactive, U - Activated & Uncommitted,
C - Activated & Committed, D - Deactivated & Uncommitted
--------------------------------------------------------------------------------
Type St Filename/Version
--------------------------------------------------------------------------------
IMG I 10.106.1.0.277
IMG C 10.106.1.0.277
--------------------------------------------------------------------------------
Auto abort timer: inactive
--------------------------------------------------------------------------------
'''}
def test_empty(self):
self.dev = Mock(**self.empty_output)
obj = ShowInstallSummary(device=self.dev)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.dev = Mock(**self.golden_output)
obj = ShowInstallSummary(device=self.dev)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_2(self):
self.dev = Mock(**self.golden_output_2)
obj = ShowInstallSummary(device=self.dev)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3257215 | <reponame>kevinnguyenhoang91/PodToBUILD
import json
import os
def render_podfile(pods):
"""
This renders out a podfile for a build test
"""
print("project 'PodsHost/PodsHost.xcodeproj'")
print("target 'ios-app' do")
skip_pods = [
# This a macOS pod build we build for iOS
# It should read podspecs.
"Chameleon"
]
for pod in pods:
if pod in skip_pods:
continue
print(" pod \"" + pod + "\"")
print("end")
def render_buildfile(pods):
"""
This renders out a podfile for a build test
"""
print("objc_library(name='all', deps=[")
skip_pods = [
# This a macOS pod build we build for iOS
# It should read podspecs.
"Chameleon",
# These are malformed in the podfile deps
"lottie-ios",
"R.swift",
"R.swift.Library",
"ReactiveCocoa",
"SQLite.swift",
]
for pod in pods:
if pod in skip_pods:
continue
print("\"//Vendor/" + pod + "\",")
print("])")
# Notes:
# dump these urls into ~/Library/Caches/CocoaPods/
# prime the cocoapods search cache ( pod search x )?
# objc_url = 'https://api.github.com/search/repositories?q=language:objc&sort=stars&order=desc&per_page=100'
# swift_url = 'https://api.github.com/search/repositories?q=language:swift&sort=stars&order=desc&per_page=100'
def main():
# curl
with open(os.environ["HOME"] + "/Library/Caches/CocoaPods/Top100SwiftPods.json") as top_pods:
repo_res = json.load(top_pods)
# This is a value of search terms keyed by pods
with open(os.environ["HOME"] + "/Library/Caches/CocoaPods/search_index.json") as all_pods:
pods_json = json.load(all_pods)
pod_repo = pods_json["master"]
top_pods = []
# This returns the max results from a github search query
max_results = 50
for repo in repo_res["items"]:
name = repo["name"]
# Find a search term including the name
search = pod_repo.get(name, None)
if not search:
continue
# Find a pod of the name
if not name in search:
continue
top_pods.append(name)
if len(top_pods) == max_results:
break
#render_podfile(top_pods)
render_buildfile(top_pods)
main()
| StarcoderdataPython |
3271600 | # Copyright (c) 2020, <NAME>.
# Distributed under the MIT License. See LICENSE for more info.
"""A module defining plots for PCA variance."""
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from psynlig.colors import generate_colors
def _create_figure_if_needed(axi, figsize=None):
"""Create a figure if needed (axi is None)."""
fig = None
if axi is None:
if figsize is None:
fig, axi = plt.subplots(nrows=1, ncols=1, constrained_layout=True)
else:
fig, axi = plt.subplots(
figsize=figsize, nrows=1, ncols=1, constrained_layout=True
)
return fig, axi
def pca_explained_variance(pca, axi=None, figsize=None, **kwargs):
"""Plot the explained variance as function of PCA components.
Parameters
----------
pca : object like :class:`sklearn.decomposition._pca.PCA`
The results from a PCA analysis.
axi : object like :class:`matplotlib.axes.Axes`, optional
If given, the plot will be added to the specified axis.
Otherwise, a new axis (and figure) will be created here.
figsize : tuple of ints, optional
A desired size of the figure, if created here.
kwargs : dict, optional
Additional settings for plotting explained variance.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot, if the figure is created
here. Oterwise, it is None.
axi : object like :class:`matplotlib.axes.Axes`
The axis containing the plot.
"""
fig, axi = _create_figure_if_needed(axi, figsize=figsize)
var = [0] + list(np.cumsum(pca.explained_variance_ratio_))
comp = range(0, len(var))
axi.plot(comp, var, **kwargs)
axi.axhline(y=1, color='black', ls=':')
axi.set(xlabel='Number of components',
ylabel='Explained variance (fraction)')
axi.xaxis.set_major_locator(MaxNLocator(integer=True))
return fig, axi
def pca_residual_variance(pca, axi=None, figsize=None, **kwargs):
"""Plot the residual variance as function of PCA components.
Parameters
----------
pca : object like :class:`sklearn.decomposition._pca.PCA`
The results from a PCA analysis.
axi : object like :class:`matplotlib.axes.Axes`, optional
If given, the plot will be added to the specified axis.
Otherwise, a new axis (and figure) will be created here.
figsize : tuple of ints, optional
A desired size of the figure, if created here.
kwargs : dict, optional
Additional settings for plotting explained variance.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot, if the figure is created
here. Oterwise, it is None.
axi : object like :class:`matplotlib.axes.Axes`
The axis containing the plot.
"""
fig, axi = _create_figure_if_needed(axi, figsize=figsize)
var = 1 - np.array([0] + list(np.cumsum(pca.explained_variance_ratio_)))
comp = range(0, len(var))
axi.axhline(y=0, color='black', ls=':')
axi.plot(comp, var, **kwargs)
axi.set(xlabel='Number of components',
ylabel='Residual variance (fraction)')
axi.xaxis.set_major_locator(MaxNLocator(integer=True))
return fig, axi
def pca_scree(pca, axi=None, figsize=None, **kwargs):
"""Plot the eigenvalues as function of PCA components.
Parameters
----------
pca : object like :class:`sklearn.decomposition._pca.PCA`
The results from a PCA analysis.
axi : object like :class:`matplotlib.axes.Axes`, optional
If given, the plot will be added to the specified axis.
Otherwise, a new axis (and figure) will be created here.
figsize : tuple of ints, optional
A desired size of the figure, if created here.
kwargs : dict, optional
Additional settings for the plotting.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot, if the figure is created
here. Oterwise, it is None.
axi : object like :class:`matplotlib.axes.Axes`
The axis containing the plot.
"""
fig, axi = _create_figure_if_needed(axi, figsize=figsize)
eigenvalues = pca.explained_variance_
comp = range(1, len(eigenvalues) + 1)
axi.plot(comp, eigenvalues, **kwargs)
axi.set(xlabel='Principal component',
ylabel='Eigenvalue')
axi.xaxis.set_major_locator(MaxNLocator(integer=True))
axi.set_xlim(min(comp) - 0.25, max(comp) + 0.25)
return fig, axi
def pca_explained_variance_bar(pca, axi=None, figsize=None, **kwargs):
"""Plot the explained variance per principal component.
Parameters
----------
pca : object like :class:`sklearn.decomposition._pca.PCA`
The results from a PCA analysis.
axi : object like :class:`matplotlib.axes.Axes`, optional
If given, the plot will be added to the specified axis.
Otherwise, a new axis (and figure) will be created here.
figsize : tuple of ints, optional
A desired size of the figure, if created here.
kwargs : dict, optional
Additional settings for plotting explained variance.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot, if the figure is created
here. Oterwise, it is None.
axi : object like :class:`matplotlib.axes.Axes`
The axis containing the plot.
"""
fig, axi = _create_figure_if_needed(axi, figsize=figsize)
var = pca.explained_variance_ratio_
comp = ['PC{}'.format(i + 1) for i in range(len(var))]
xpos = range(len(var))
axi.bar(xpos, var, **kwargs)
axi.set_xticks(xpos)
axi.set_xticklabels(
comp,
rotation='vertical',
)
axi.set(
xlabel='Principal component',
ylabel='Explained variance (fraction) per component',
)
return fig, axi
def pca_explained_variance_pie(pca, axi=None, figsize=None,
cmap=None, tol=1.0e-3):
"""Show the explained variance as function of PCA components in a pie.
Parameters
----------
pca : object like :class:`sklearn.decomposition._pca.PCA`
The results from a PCA analysis.
axi : object like :class:`matplotlib.axes.Axes`, optional
If given, the plot will be added to the specified axis.
Otherwise, a new axis (and figure) will be created here.
figsize : tuple of ints, optional
A desired size of the figure, if created here.
cmap : string or object like :class:`matplotlib.colors.Colormap`, optional
The color map to use for generating colors.
tol : float, optional
A tolerance for the missing variance. If the unexplained
variance is less than this tolerance, it will not be shown in
the pie chart.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot, if the figure is created
here. Oterwise, it is None.
axi : object like :class:`matplotlib.axes.Axes`
The axis containing the plot.
"""
fig, axi = _create_figure_if_needed(axi, figsize=figsize)
var = list(pca.explained_variance_ratio_)
missing = 1 - sum(var)
comp = ['PC{}'.format(i + 1) for i in range(len(var))]
if missing > tol:
comp.append('Not explained')
var.append(missing)
colors = generate_colors(len(comp), cmap=cmap)
axi.pie(
var,
labels=comp,
colors=colors[:len(comp)],
wedgeprops=dict(width=0.5, edgecolor='w'),
textprops={'fontsize': 'x-large'},
normalize=False,
)
axi.set(aspect='equal')
return fig, axi
| StarcoderdataPython |
8847 | <gh_stars>0
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
from datetime import datetime
import sys
import argparse
import torch
import os
from inspect import currentframe, getframeinfo
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
from shapes import get_bounds
def pretty_log(log_string):
cf = currentframe().f_back
filename = getframeinfo(cf).filename.split("/")[-1]
print(
"{} {}:{} {}".format(
datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string
)
)
sys.stdout.flush()
## Train Fxns ##
def get_base_train_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", type=int, default=1, help="0 for cpu")
parser.add_argument("--batchsize", type=int, default=64, help="batchsize")
parser.add_argument("--dataset", default="shapes", help="shapes/segments/both")
parser.add_argument(
"--epochsize", type=int, default=1000, help="number of examples in an epoch"
)
parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs")
parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube")
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--num_layers", type=int, default=3, help="num layers")
parser.add_argument(
"--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding"
)
parser.add_argument(
"--num_words", type=int, default=256, help="number of words for the blockid embeds"
)
parser.add_argument("--lr", type=float, default=0.1, help="step size for net")
parser.add_argument(
"--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)"
)
parser.add_argument("--momentum", type=float, default=0.0, help="momentum")
parser.add_argument("--checkpoint", default="", help="where to save model")
parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers")
return parser
def add_dataset_flags(parser):
parser.add_argument(
"--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob"
)
parser.add_argument("--useid", type=bool, default=False, help="use blockid")
parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size")
parser.add_argument("--fixed_center", type=bool, default=False, help="fixed_center")
parser.add_argument(
"--min_seg_size", type=int, default=6, help="min seg size for seg data type"
)
parser.add_argument(
"--use_saved_data",
type=bool,
default=False,
help="use preparsed data for this min_seg_size",
)
def add_directional_flags(parser):
parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb")
parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb")
parser.add_argument(
"--seg_direction_net", type=bool, default=False, help="use segdirnet module"
)
parser.add_argument(
"--seg_use_viewer_pos", type=bool, default=False, help="use viewer pos in seg"
)
parser.add_argument(
"--seg_use_viewer_look", type=bool, default=False, help="use viewer look in seg"
)
parser.add_argument(
"--seg_use_direction", type=bool, default=False, help="use direction in seg"
)
parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers")
parser.add_argument(
"--cont_use_direction", type=bool, default=False, help="use direction in context"
)
parser.add_argument(
"--cont_use_xyz_from_viewer_look",
type=bool,
default=False,
help="use xyz position relative to viewer look in context emb",
)
def get_dataloader(dataset, opts, collate_fxn):
def init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32))
return torch.utils.data.DataLoader(
dataset,
batch_size=opts["batchsize"],
shuffle=True,
pin_memory=True,
drop_last=True,
num_workers=opts["num_workers"],
worker_init_fn=init_fn,
collate_fn=collate_fxn,
)
def to_cuda(list_modules):
for m in list_modules:
m.cuda()
def multitensor_collate_fxn(x):
"""
Takes a list of BATCHSIZE lists of tensors of length D.
Returns a list of length D of batched tensors.
"""
num_tensors_to_batch = len(x[0])
regroup_tensors = [[] for i in range(num_tensors_to_batch)]
for t_list in x:
for i, t in enumerate(t_list):
regroup_tensors[i].append(t.unsqueeze(0))
batched_tensors = [torch.cat(tl) for tl in regroup_tensors]
return batched_tensors
## 3D Utils ##
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_subsegment_corner(S):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(S)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_S = []
for s in S:
new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_S, shift_zero_vec
def subset_and_scale_3d(init_array, mins, maxs, scale=1):
return scale * init_array[mins[0] : maxs[0], mins[1] : maxs[1], mins[2] : maxs[2]]
def combine_seg_context(seg, context, seg_shift, seg_mult=1):
completed_context = context.clone()
# Calculate the region to copy over, sometimes the segment
# falls outside the range of the context bounding box
c_mins = [int(i) for i in seg_shift]
c_maxs = [int(min(ss + 8, 32)) for ss in seg_shift]
s_mins = [0 for i in range(3)]
# If the edge of the segment goes past the edge of the context (ss + 8 > 32),
# remove the extra from the segment.
s_maxs = [int(8 - max(0, (ss + 8) - 32)) for ss in seg_shift]
seg_to_add = subset_and_scale_3d(seg, s_mins, s_maxs, seg_mult)
context_subset = subset_and_scale_3d(completed_context, c_mins, c_maxs, 1)
completed_context[c_mins[0] : c_maxs[0], c_mins[1] : c_maxs[1], c_mins[2] : c_maxs[2]] = (
seg_to_add + context_subset
)
return completed_context
def get_vector(start, end):
return end - start
def get_random_viewer_info(sl):
viewer_pos = torch.tensor(random_int_triple(0, sl - 1))
viewer_look = torch.tensor(random_int_triple(0, sl - 1))
if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0):
if viewer_look[0] < sl + 1:
viewer_look[0] += 1
else:
viewer_look[0] -= 1
return viewer_pos, viewer_look
def b_greater_than_a(a, b):
if a == b:
return 0
return 1 if b > a else -1
def shift_block(b, s):
return tuple((tuple((b[0][0] + s[0], b[0][1] + s[1], b[0][2] + s[2])), b[1]))
def rotate_block(b, c, r):
""" rotates the block b around the point c by 90*r degrees
in the xz plane. r should be 1 or -1."""
# TODO add a reflection
c = np.array(c)
p = np.add(b[0], -c)
x = p[0]
z = p[2]
if r == -1:
p[0] = z
p[2] = -x
else:
p[0] = -z
p[2] = x
return (tuple(p + c), b[1])
def random_int_triple(minval, maxval):
t = [
random.randint(minval, maxval),
random.randint(minval, maxval),
random.randint(minval, maxval),
]
return t
def check_inrange(x, minval, maxval):
"""inclusive check"""
return all([v >= minval for v in x]) and all([v <= maxval for v in x])
def normalize(batched_vector):
vec = batched_vector.double()
norm = torch.norm(vec, dim=1)
# Set norm to 1 if it's 0
norm = norm + norm.eq(0).double()
expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1])
return torch.div(vec, expanded_norm)
def get_rotation_matrix(viewer_pos, viewer_look):
# VP, VL: N x 3, VP_to_VL: N x 3
vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2]
nlook_vec = normalize(vp_to_vl)
nly = nlook_vec[:, 1]
# Nlx necessary to correct for the range of acrcos
nlx = nlook_vec[:, 0]
nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double()
# Take care of nans created by raising 0 to a power
# and then masking the sin theta to 0 as intended
base = 1 - nly * nly
nan_mask = torch.isnan(torch.pow(base, 0.5)).double()
base = base + nan_mask
sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5)
nly = nly.unsqueeze(1)
sin_theta = sin_theta.unsqueeze(1)
rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1)
rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1)
rm = torch.cat([rm_pt1, rm_pt2], 1)
return rm
def rotate_x_y(coord, rotation_matrix):
return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0)
def float_equals(a, b, epsilon):
return True if abs(a - b) < epsilon else False
def get_argmax_list(vals, epsilon, minlist=False, maxlen=None):
mult = -1 if minlist else 1
max_ind = []
for i, v in enumerate(vals):
if not max_ind or float_equals(max_ind[0][1], v, epsilon):
if maxlen and len(max_ind) == maxlen:
continue
max_ind.append((i, v))
elif mult * (v - max_ind[0][1]) > 0:
max_ind = [(i, v)]
return max_ind
def get_firstmax(vals, epsilon, minlist=False):
return get_argmax_list(vals, epsilon, minlist, 1)[0]
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords):
# First verify the sizing and unsqueeze if necessary
btc_sizes = batched_target_coords.size()
vp_sizes = viewer_pos.size()
vl_sizes = viewer_look.size()
if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2:
raise Exception("One input has too many dimensions")
if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3:
raise Exception("The last dimension of all inputs should be size 3")
if len(btc_sizes) < 3:
for i in range(3 - len(btc_sizes)):
batched_target_coords = batched_target_coords.unsqueeze(0)
if len(vp_sizes) == 1:
viewer_pos = viewer_pos.unsqueeze(0)
if len(vl_sizes) == 1:
viewer_look = viewer_look.unsqueeze(0)
n = batched_target_coords.size()[0]
d = batched_target_coords.size()[1]
# Handle xy and z separately
# XY = N X D x 2
xy = batched_target_coords[:, :, 0:2].double()
# Z = N x D x 1
z = batched_target_coords[:, :, 2].unsqueeze(2).double()
## XY
# Shift such that viewer pos is the origin
# VPXY, VLXY: N x 2
vpxy = viewer_pos.double()[:, 0:2]
vlxy = viewer_look.double()[:, 0:2]
vpxy_to_vlxy = vlxy - vpxy
# VPXY to XY: N x D x 2
vpxy_to_xy = xy - vpxy.unsqueeze(1).expand(n, d, -1)
# Rotate them around the viewer position such that a normalized
# viewer look vector would be (0, 1)
# Rotation_matrix: N x 2 x 2
rotation_matrix = get_rotation_matrix(viewer_pos, viewer_look)
# N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2
r_vpxy_to_vlxy = torch.bmm(vpxy_to_vlxy.unsqueeze(1), rotation_matrix).unsqueeze(1)
# RM: N x 2 x 2 ==> N x D x 2 x 2
expanded_rm = rotation_matrix.unsqueeze(1).expand(n, d, 2, 2).contiguous().view(-1, 2, 2)
# N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2
reshape_vpxy_to_xy = vpxy_to_xy.contiguous().view(-1, 1, 2)
r_vpxy_to_xy = torch.bmm(reshape_vpxy_to_xy, expanded_rm).contiguous().view(n, d, 2)
# N x D x 2
# Get the xy position in this rotated coord system with rvl as the origin
rvl_to_rxy = r_vpxy_to_xy - r_vpxy_to_vlxy.squeeze(1).expand(n, d, 2)
## Z
# VLZ = N x 1
vlz = viewer_look.double()[:, 2]
# Z = N x D x 1
diffz = z - vlz.view(-1, 1, 1).expand(n, d, -1)
## Combine
# rvl_to_rxy: N x D x 2, diffz: N x D x 1
new_xyz = torch.cat([rvl_to_rxy, diffz], 2)
return new_xyz
def get_dir_dist(viewer_pos, viewer_look, batched_target_coords):
if len(batched_target_coords.size()) == 1:
batched_target_coords = batched_target_coords.unsqueeze(0)
xyz = get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords)
dist = xyz.abs()
direction = xyz.gt(0).double() - xyz.lt(0).double()
return direction, dist
def get_sampled_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.random.choice(3, p=ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
def get_max_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.argmax(ndists)
direction = directions[dim].item()
dim_l = [(0 if i == dim else 1) for i in range(3)]
dir_l = [0, 1] if direction == -1 else [1, 0]
return torch.tensor(dim_l + dir_l, dtype=torch.long)
# outputs a dense voxel rep (np array) from a sparse one.
# size should be a tuple of (H, W, D) for the desired voxel representation
# useid=True puts the block id into the voxel representation,
# otherwise put a 1
def densify(blocks, size, center=(0, 0, 0), useid=False):
V = np.zeros((size[0], size[1], size[2]), dtype="int32")
offsets = (size[0] // 2 - center[0], size[1] // 2 - center[1], size[2] // 2 - center[2])
for b in blocks:
x = b[0][0] + offsets[0]
y = b[0][1] + offsets[1]
z = b[0][2] + offsets[2]
if x >= 0 and y >= 0 and z >= 0 and x < size[0] and y < size[1] and z < size[2]:
if type(b[1]) is int:
V[x, y, z] = b[1]
else:
V[x, y, z] = b[1][0]
if not useid:
V[V > 0] = 1
return V, offsets
def center_of_mass(S, seg=None):
seg = seg or [True for i in S]
if len(S[0]) == 2:
m = list(np.round(np.mean([S[i][0] for i in range(len(S)) if seg[i]], axis=0)))
else:
m = list(np.round(np.mean([S[i] for i in range(len(S)) if seg[i]], axis=0)))
return [int(i) for i in m]
def check_l1_dist(a, b, d):
return abs(b[0] - a[0]) <= d[0] and abs(b[1] - a[1]) <= d[1] and abs(b[2] - a[2]) <= d[2]
def sparsify_segment(seg, context):
seg_sparse = []
for i, use in enumerate(seg):
if use:
seg_sparse.append(context[i])
return seg_sparse
def get_dense_array_from_sl(sparse_shape, sl, useid):
center = [sl // 2, sl // 2, sl // 2]
shape_dense, _ = np.asarray(densify(sparse_shape, [sl, sl, sl], center=center, useid=useid))
return shape_dense
def convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, c_sl, s_sl, useid, vis=False
):
context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid)
seg_dense_uncentered = get_dense_array_from_sl(seg_sparse, c_sl, useid)
# For visualization
if vis:
context_dense = context_dense + seg_dense_uncentered
else:
context_dense = context_dense - seg_dense_uncentered
shifted_seg_sparse, shift_vec = shift_subsegment_corner(seg_sparse)
seg_dense_centered = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid)
target_coord = [-x for x in shift_vec]
target_index = coord_to_index(target_coord, c_sl)
return [
torch.from_numpy(context_dense),
torch.from_numpy(seg_dense_centered),
torch.tensor([target_index]),
]
############################################################################
# For these "S" is a list of blocks in ((x,y,z),(id, meta)) format
# the segment is a list of the same length as S with either True or False
# at each entry marking whether that block is in the segment
# each outputs a list of blocks in ((x,y,z),(id, meta)) format
def shift_negative_vec(S, segment, vec, args):
N = []
for s in range(len(segment)):
if not segment[s]:
new_coords = tuple(np.add(S[s][0], vec))
N.append([new_coords, S[s][1]])
else:
if "seg_id" in args:
N.append([S[s][0], (args["seg_id"], S[s][1][1])])
else:
N.append(S[s])
return N
def shift_negative(S, segment, args):
shift_max = args["shift_max"]
"""takes the blocks not in the sgement and shifts them randomly"""
shift_vec = random_int_triple(-shift_max, shift_max)
return shift_negative_vec(S, segment, shift_vec, args)
def rotate_negative(S, segment, args):
c = center_of_mass(S, seg=segment)
r = random.choice([1, -1])
return [rotate_block(S[i], c, r) if segment[i] else S[i] for i in range(len(S))]
def replace_negative(S, segment, args):
data = args["data"]
oseg, oS = data.get_positive()
c_pos = center_of_mass(S, seg=segment)
c_neg = center_of_mass(oS, seg=oseg)
offset = np.add(c_pos, -np.array(c_neg))
N = [S[i] for i in range(len(S)) if not segment[i]]
return N + [shift_block(oS[i], offset) for i in range(len(oS)) if oseg[i]]
class NegativeSampler:
def __init__(self, dataloader, shift_max=10, ntype_probs=[0.6, 0.2, 0.2]):
# self.data_prob = [x['prob'] for x in dataloaders.values()]
# self.dataloaders = [x['data'] for x in dataloaders.values()]
self.dataloader = dataloader
self.shift_max = shift_max
self.ntype_probs = ntype_probs
self.negative_samplers = [shift_negative, rotate_negative, replace_negative]
def build_negative(self, S, segment):
negative_fn = np.random.choice(self.negative_samplers, p=self.ntype_probs)
return negative_fn(S, segment, {"shift_max": self.shift_max, "data": self.dataloader})
| StarcoderdataPython |
46418 | """
Metrics for (mulit-horizon) timeseries forecasting.
"""
from pytorch_forecasting.metrics.base_metrics import (
DistributionLoss,
Metric,
MultiHorizonMetric,
MultiLoss,
MultivariateDistributionLoss,
convert_torchmetric_to_pytorch_forecasting_metric,
)
from pytorch_forecasting.metrics.distributions import (
BetaDistributionLoss,
ImplicitQuantileNetworkDistributionLoss,
LogNormalDistributionLoss,
MQF2DistributionLoss,
MultivariateNormalDistributionLoss,
NegativeBinomialDistributionLoss,
NormalDistributionLoss,
)
from pytorch_forecasting.metrics.point import MAE, MAPE, MASE, RMSE, SMAPE, CrossEntropy, PoissonLoss, TweedieLoss
from pytorch_forecasting.metrics.quantile import QuantileLoss
__all__ = [
"MultiHorizonMetric",
"DistributionLoss",
"MultivariateDistributionLoss",
"MultiLoss",
"Metric",
"convert_torchmetric_to_pytorch_forecasting_metric",
"MAE",
"MAPE",
"MASE",
"PoissonLoss",
"TweedieLoss",
"CrossEntropy",
"SMAPE",
"RMSE",
"BetaDistributionLoss",
"NegativeBinomialDistributionLoss",
"NormalDistributionLoss",
"LogNormalDistributionLoss",
"MultivariateNormalDistributionLoss",
"ImplicitQuantileNetworkDistributionLoss",
"QuantileLoss",
"MQF2DistributionLoss",
]
| StarcoderdataPython |
1734343 | import turtle
def polygon(sides, length):
t = turtle.Turtle()
t.color("lime")
t.speed(0)
angle = 360 / sides
for side in range(sides):
t.forward(length)
t.right(angle)
t.hideturtle()
for n in [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]:
polygon(n, 35)
input()
| StarcoderdataPython |
80453 | <reponame>waweru12/The-news-highlighter
import unittest
from app.models import Source
from app.models import Article
class SourceTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Source class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_source = Source('KTN', 'KTN-NEWS', 'Home of News', 'https://ktn.co.ke', 'general', 'ke')
def test_instance(self):
'''
Test to check if new_source instance exists
'''
self.assertTrue(isinstance(self.new_source,Source))
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Article class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article('Wekesa', 'Kenyan Cars', 'The variety and rich culture that exists in Kenyan motorsport', 'https://ktn.co.ke', 'https://ktn.co.ke/image1', '24/06/2012', 'kenyan motorshow is among the best')
def test_instance(self):
'''
Test to check if new_Article instance exists
'''
self.assertTrue(isinstance(self.new_article,Article))
| StarcoderdataPython |
156798 | <filename>registrobrepp/contact/brcreatecontactcommand.py
from eppy.doc import EppCreateContactCommand
from registrobrepp.common.authinfo import AuthInfo
from registrobrepp.contact.disclose import Disclose
from registrobrepp.contact.phone import Phone
from registrobrepp.contact.postalinfo import PostalInfo
class BrEppCreateContactCommand(EppCreateContactCommand):
def __init__(self, id: str, postalinfo1: PostalInfo, email: str, authinfo: AuthInfo = None, postalinfo2: PostalInfo = None,
voice: Phone = None, fax: Phone = None, disclose: Disclose = None):
postalInfo = [postalinfo1]
if postalinfo2:
postalInfo.append(postalinfo2)
dct = {
'epp': {
'command': {
'create': {
'contact:create': {
'id': id,
'postalInfo': postalInfo,
'voice': voice,
'fax': fax,
'email': email,
'authInfo': authinfo,
'disclose': disclose
}
}
}
}
}
extra_nsmap = {
'lacniccontact': 'urn:ietf:params:xml:ns:lacniccontact-1.0',
'brorg': 'urn:ietf:params:xml:ns:brorg-1.0',
'lacnicorg': 'urn:ietf:params:xml:ns:lacnicorg-1.0'
}
super(BrEppCreateContactCommand, self).__init__(dct=self.annotate(dct), extra_nsmap=extra_nsmap)
| StarcoderdataPython |
3244989 | <reponame>kyapp69/GCodeViewer
import logging
import OpenGL
OpenGL.FORWARD_COMPATIBLE_ONLY = True
# ^ See http://pyopengl.sourceforge.net/documentation/deprecations.html
import OpenGL.GL as gl
class ShaderLoader(object):
@classmethod
def load_vertex_shader(cls, file_path):
shader_id = gl.glCreateShader(gl.GL_VERTEX_SHADER)
with open(file_path, 'r') as file_handle:
code = file_handle.read()
logging.info("Compiling shader : %s" % file_path)
gl.glShaderSource(shader_id, code)
gl.glCompileShader(shader_id)
result = gl.glGetShaderiv(shader_id, gl.GL_COMPILE_STATUS)
info = gl.glGetShaderInfoLog(shader_id)
logging.info("Result: %s" % result)
logging.info("Info: %s" % info)
if info:
raise Exception(info)
return shader_id
@classmethod
def load_fragment_shader(cls, file_path):
shader_id = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
with open(file_path, 'r') as file_handle:
code = file_handle.read()
logging.info("Compiling shader : %s" % file_path)
gl.glShaderSource(shader_id, code)
gl.glCompileShader(shader_id)
result = gl.glGetShaderiv(shader_id, gl.GL_COMPILE_STATUS)
info = gl.glGetShaderInfoLog(shader_id)
logging.info("Result: %s" % result)
logging.info("Info: %s" % info)
if info:
raise Exception(info)
return shader_id
@classmethod
def load_shaders(cls, vertex_shader_file, fragment_shader_file):
shader_program = gl.glCreateProgram()
vertex_shader_id = cls.load_vertex_shader(vertex_shader_file)
fragment_shader_id = cls.load_fragment_shader(fragment_shader_file)
logging.info("Creating shader program")
gl.glAttachShader(shader_program, vertex_shader_id)
gl.glAttachShader(shader_program, fragment_shader_id)
gl.glLinkProgram(shader_program)
result = gl.glGetProgramiv(shader_program, gl.GL_LINK_STATUS)
info = gl.glGetProgramInfoLog(shader_program)
logging.info("Result: %s" % result)
logging.info("Info: %s" % info)
if info:
raise Exception(info)
gl.glDeleteShader(vertex_shader_id)
gl.glDeleteShader(fragment_shader_id)
return shader_program
| StarcoderdataPython |
136691 | # price.py
from .helper_functions import (build_url, load_data, timestamp_to_date,
date_to_timestamp)
def get_current_price(fsyms, tsyms, e='all', try_conversion=True, full=False,
format='raw'):
"""Get latest trading price or full trading information in display or raw
format for the specified FROM/TO currency pairs.
Args:
fsyms: Single string or list of FROM symbols.
tsyms: Single string or list of TO symbols.
e: Default returns average price across all exchanges.
Can be set to the name of a single exchange.
try_conversion: If the crypto does not trade directly into the toSymbol
requested, BTC will be used for conversion.
If set to false, it will try to get values
without using any conversion at all.
full: Default of False returns only the latest price. True returns the
following dictionary structure containing the full trading info:
format: Default returns the 'RAW' format. Can be set to 'DISPLAY'
format.
Returns:
Returns a dictionary containing the latest price pairs
if full is set to false:
{fsym1: {tsym1: ..., tsym2:..., ...},
fsym2: {...},
...}
or full trading info dictionaries for all the price pairs in the other
case:
{fsym1: {tsym1: {'CHANGE24HOUR': ...,
'CHANGEPCT24HOUR': ...,
'FLAGS': ...,
'FROMSYMBOL': ...,
'HIGH24HOUR': ...,
'LASTMARKET': ...,
'LASTTRADEID': ...,
'LASTUPDATE': ...,
'LASTVOLUME': ...,
'LASTVOLUMETO': ...,
'LOW24HOUR': ...,
'MARKET' ...,
'MKTCAP': ...,
'OPEN24HOUR': ...,
'PRICE': ...,
'SUPPLY': ...,
'TOSYMBOL': ...,
'TYPE': ...,
'VOLUME24HOUR': ...,
'VOLUME24HOURTO': ...},
tsym2: ..., ...},
fsym2: {...},
...}
"""
# select API function based on 'full' parameter value
if not full:
func = 'pricemulti'
else:
func = 'pricemultifull'
# convert single fsym and tsym input to single element lists
if not isinstance(fsyms, list):
fsyms = [fsyms]
if not isinstance(tsyms, list):
tsyms = [tsyms]
# load data
url = build_url(func, fsyms=fsyms, tsyms=tsyms, e=e,
try_conversion=try_conversion)
data = load_data(url)
# select right format to return for full requests
if full and format == 'raw':
data = data['RAW']
elif full and format == 'display':
data = data['DISPLAY']
return data
def get_current_trading_info(fsym, tsym, markets='all', try_conversion=True,
format='raw'):
"""
Get the latest trading info of the requested pair as a volume weighted
average based on the markets requested.
Args:
fsym: FROM symbol.
tsym: TO symbol.
markets: List containing the market names.
try_conversion: If the crypto does not trade directly into the toSymbol
requested, BTC will be used for conversion.
If set to false, it will try to get values
without using any conversion at all.
format: Default returns the 'RAW' format. Can be set to 'DISPLAY'
format.
Returns:
The returned latest average trading information dictionary contains
the following key value pairs:
{'PRICE': ...,
'LASTVOLUMETO': ...,
'TOSYMBOL': ...,
'LOW24HOUR': ...,
'CHANGE24HOUR': ...,
'FROMSYMBOL': ...,
'FLAGS': ...,
'VOLUME24HOUR': ...,
'HIGH24HOUR': ...,
'LASTUPDATE': ...,
'VOLUME24HOURT': ...,
'LASTMARKET': ...,
'CHANGEPCT24HOUR': ...,
'OPEN24HOUR': ...,
'MARKET': ...,
'LASTTRADEID': ...,
'LASTVOLUME': ...}
"""
# load data
url = build_url('generateAvg', fsym=fsym, tsym=tsym, markets=markets,
try_conversion=try_conversion)
data = load_data(url)
# select format to return
if format == 'raw':
data = data['RAW']
elif format == 'display':
data = data['DISPLAY']
return {fsym: {tsym: data}}
def get_day_average_price(fsym, tsym, e='all', try_conversion=True,
avg_type='HourVWAP', utc_hour_diff=0):
"""
Get the current days average price of a currency pair.
Args:
fsym: FROM symbol.
tsym: TO symbol.
e: Default returns average price across all exchanges.
Can be set to the name of a single exchange.
try_conversion: If the crypto does not trade directly into the toSymbol
requested, BTC will be used for conversion.
If set to false, it will try to get values
without using any conversion at all.
avg_type: 'HourVWAP' returns a volume weighted average of the hourly
close price.
The other option 'MidHighLow' gives the average
between the 24 hour high and low.
utc_hour_diff: Pass hour difference to UTC for different time zone.
# add 'toTs' parameter
######################
######################
######################
######################
######################
######################
Returns:
Returns a price dictionairy containing the current
days average price as float.
{fsym: {tsym: price}}
"""
# load data
url = build_url('dayAvg', fsym=fsym, tsym=tsym, e=e,
try_conversion=try_conversion, avg_type=avg_type,
utc_hour_diff=utc_hour_diff)
data = load_data(url)
# remove 'ConversionType' information
del data['ConversionType']
return {fsym: data}
def get_historical_eod_price(fsym, tsyms, date, e='all', try_conversion=True):
"""Get end of day price for cryptocurrency in any other currency for the
requested timestamp.
Args:
fsym: FROM symbol.
tsyms: Single string or list of TO symbols.
date: Date as string with this format: "Y-m-d H:M:S".
e: Default returns average price across all exchanges.
Can be set to the name of a single exchange.
try_conversion: If the crypto does not trade directly into the toSymbol
requested, BTC will be used for conversion.
If set to false, it will try to get values
without using any conversion at all.
Returns:
Returns a dictionary containing the end of day price pairs for the
provided date.
{fsym: {tsym1: ..., tsym2: ..., ...}}
"""
# convert single fsym and tsym input to single element lists
if not isinstance(tsyms, list):
tsyms = [tsyms]
# convert date to timestamp
ts = date_to_timestamp(date)
# load data
url = build_url("pricehistorical", fsym=fsym, tsyms=tsyms, ts=ts,
e=e, try_conversion=try_conversion)
data = load_data(url)
return data
def get_historical_data(fsym, tsym, freq, info='full', e='all',
try_conversion=True, aggregate=1, limit=1440,
to_ts=False):
"""Get minute-by-minute historical price and volume information for
the requested currency pair. Available data is limited to the last 7 days.
Args:
fsym: FROM symbol.
tsym: TO symbol.
freq: Frequency of the data. Can be set to 'minute', 'hour' or 'day'.
info: Select price or volume information to return. Default of 'full'
returns all of them. Can be set to 'high', 'low', 'open', 'close',
'volumefrom', and 'volumeto' or a list containing several of those
values.
e: Default returns average price across all exchanges.
Can be set to the name of a single exchange.
try_conversion: If the crypto does not trade directly into the toSymbol
requested, BTC will be used for conversion.
If set to false, it will try to get values
without using any conversion at all.
aggregate: Aggregates the minute prices into
bins of the specified size.
limit: Number of minute prices. The limit settings depend on the freq
selected:
minute: default = 1440, min = 1, max = 2000
hour: default = 168, min = 1, max 2000
day: default = 30, min = 1, max 2000
Using aggregate reduces the maximum number of points that can be
returned by a factor equal to the chosen bin size.
'toTs': get historical data at a specific date.
This parameter can be :
* a datetime.datetime object like :
datetime.datetime(2019, 5, 16, 18, 1, 48)
* a int timestamp like : 1558022508
* a string timestamp like : "1558022508"
Returns:
List of dictionairies containing the price and volume information for
each requested tick.
[{'time': ..., 'close': ..., 'high': ..., 'low': ..., 'open': ...,
'volumefrom': ..., 'volumeto': ...},
{...},
...]
"""
# load data
url = build_url(freq, fsym=fsym, tsym=tsym, freq=freq, e=e,
try_conversion=try_conversion, aggregate=aggregate,
limit=limit, to_ts=to_ts)
data = load_data(url)
data = data['Data']
# convert timestamps to nice date format
for d in data:
d['time'] = timestamp_to_date(d['time'])
# convert single input info to single element list
if not isinstance(info, list):
info = [info]
# select information to return
if info[0] == 'full':
return data
else:
for d in data:
for k, v in list(d.items()):
if k not in info and k != 'time':
del d[k]
return data
| StarcoderdataPython |
4834733 | import distributedGrepTest
import wordCountTest
import URLFrequencyTest
import json
f = open("wordcount.json", 'r')
worcountconfig = f.read()
f.close()
worcountconfig = json.loads(worcountconfig)
wordCountTest.test(worcountconfig["inputfile"], worcountconfig["outputdir"])
f = open("distributedgrep.json", 'r')
distributedgrepconfig = f.read()
f.close()
distributedgrepconfig = json.loads(distributedgrepconfig)
distributedGrepTest.test(distributedgrepconfig["inputfile"], distributedgrepconfig["outputdir"], "distributed")
f = open("urlfrequency.json", 'r')
urlfrequencyconfig = f.read()
f.close()
urlfrequencyconfig = json.loads(urlfrequencyconfig)
URLFrequencyTest.test(urlfrequencyconfig["inputfile"], urlfrequencyconfig["outputdir"]) | StarcoderdataPython |
1728519 | '''
area_curves.py
Find the area enclosed by two curves between two points
'''
from sympy import Integral, Symbol, SympifyError, sympify
def find_area(f, g, var, a, b):
a = Integral(f-g, (var, a, b)).doit()
return a
if __name__ == '__main__':
f = input('Enter the upper function in one variable: ')
g = input('Enter the lower upper function in one variable: ')
var = input('Enter the variable: ')
l = float(input('Enter the lower bound of the enclosed region: '))
u = float(input('Enter the upper bound of the enclosed region: '))
try:
f = sympify(f)
g = sympify(g)
except SympifyError:
print('One of the functions entered is invalid')
else:
var = Symbol(var)
print('Area enclosed by {0} and {1} is: {2} '.
format(f, g, find_area(f, g, var, l, u)))
| StarcoderdataPython |
1701923 | # wallstop.py
import time
import brickpi3
import grovepi
BP = brickpi3.BrickPi3()
ultrasonic_sensor_port = 4
try:
while grovepi.ultrasonicRead(ultrasonic_sensor_port) > 15:
print("Sensor: %6d Motor A: %6d B: %6d C: %6d D: %6d" \
% (grovepi.ultrasonicRead(ultrasonic_sensor_port), \
BP.get_motor_encoder(BP.PORT_A), \
BP.get_motor_encoder(BP.PORT_B), \
BP.get_motor_encoder(BP.PORT_C), \
BP.get_motor_encoder(BP.PORT_D)))
BP.set_motor_power(BP.PORT_A+BP.PORT_D,30)
except IOError as error:
print(error)
except TypeError as error:
print(error)
except KeyboardInterrupt:
print("You pressed ctrl+C...")
BP.offset_motor_encoder(BP.PORT_A, BP.get_motor_encoder(BP.PORT_A))
BP.offset_motor_encoder(BP.PORT_B, BP.get_motor_encoder(BP.PORT_B))
BP.offset_motor_encoder(BP.PORT_C, BP.get_motor_encoder(BP.PORT_C))
BP.offset_motor_encoder(BP.PORT_D, BP.get_motor_encoder(BP.PORT_D))
BP.reset_all() | StarcoderdataPython |
3357903 |
class ModelManager( object ):
pass
class ModelSerializer( object ):
pass
| StarcoderdataPython |
3292019 | import unittest
from datetime import timedelta
from datetimerange import DateTimeRange
from logreader.lineage import Lineage
from tests.character_factories import eve, female
class TestLineage(unittest.TestCase):
def test_duration_at_least_eve_fertility(self):
e = eve()
sut = Lineage(e)
self.assertEqual(e.fertility_period(), sut.duration())
def test_duration_is_eve_birth_to_max_fertility_end_of_girls(self):
e = eve()
daughter = female(birth=e.birth + timedelta(minutes=5))
e.kids.append(daughter)
granddaughter = female(birth=daughter.birth + timedelta(minutes=20))
daughter.kids.append(granddaughter)
sut = Lineage(e)
expected = DateTimeRange(e.birth, granddaughter.fertility_period().end_datetime)
self.assertEqual(expected, sut.duration())
| StarcoderdataPython |
1618468 | import sys
import logging
from datetime import datetime
from pathlib import Path
from nltk.tokenize import sent_tokenize
from base import dataset, embedding_index, embedding_model, word_weight, sentence_splitter
from base.dataset import Dataset
from base.document import Document
from base.embedding_index import EmbeddingIndex
from base.embedding_model import EmbeddingModel
from base.word_weight import WordWeight
from base.sentence_splitter import SentenceSplitter, KGramSplitter
from base.text_index import TextIndex, KGramIndex
from colr import Colr as C
from evaluation.metrics import bleu_on_corpus
class Interactor:
DEFAULT_LOG_FOLDER = Path('logs')
def __init__(self, _dataset: Dataset, _embedding_model: EmbeddingModel, _embedding_index: EmbeddingIndex,
_sentence_splitter: SentenceSplitter, _word_weights: WordWeight, _documents_limit: int,
_text_index_bin: str):
self.dataset: Dataset = _dataset
self.embedding_model: EmbeddingModel = _embedding_model
self.embedding_index: EmbeddingIndex = _embedding_index
self.sentence_splitter: SentenceSplitter = _sentence_splitter
self.word_weights: WordWeight = _word_weights
self.documents_limit: int = _documents_limit
self.text_index_bin: str = _text_index_bin
self.text_index: TextIndex = None
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.INFO)
self._create_log_handler()
def _create_log_handler(self):
if not Interactor.DEFAULT_LOG_FOLDER.exists():
Interactor.DEFAULT_LOG_FOLDER.mkdir()
current_date = datetime.now().strftime('%Y.%m.%d %H.%M.%S')
log_filename = f'interactor {current_date}'
file_handler = logging.FileHandler(Interactor.DEFAULT_LOG_FOLDER / log_filename)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
def _initialize(self):
self.logger.info('Loading dataset')
self.dataset.load(sentence_splitter=sent_tokenize, documents_limit=self.documents_limit)
self.logger.info('Loading embedding model')
self.embedding_model.load()
self.logger.info('Initializing word weights')
self.word_weights.initialize(self.dataset)
self.logger.info('Creating text index')
self.text_index = TextIndex(self.dataset, self.embedding_model, self.embedding_index,
self.sentence_splitter, self.word_weights, self.logger)
self.text_index.build(self.text_index_bin)
self.logger.info('Initialization completed successfully')
def _process_input(self, text: str):
document = Document(text)
sentences = document.split_to_sentences(sent_tokenize)
for sentence in sentences:
response = self.text_index.search(sentence, neighbours=5, splitter_neighbours=10)
if response is None:
continue
for r in response:
sys.stdout.write(f'{str(sentence)} -> {str(r)}\n')
sys.stdout.flush()
def interact(self):
self._initialize()
# print(f"Bleu on corpus: {bleu_on_corpus(self.text_index.dataset.get_sentences(), self.text_index)}")
while True:
sys.stdout.write('> ')
sys.stdout.flush()
text = sys.stdin.readline().strip()
if len(text) == 0:
continue
self._process_input(text)
class KGramInteractor:
DEFAULT_LOG_FOLDER = Path('logs')
def __init__(self, _dataset: Dataset, _embedding_model: EmbeddingModel, _k_gram_size: int,
_word_weights: WordWeight, _documents_limit: int, _text_index_bin: str):
self.k_gram_size = _k_gram_size
self.dataset: Dataset = _dataset
self.embedding_model: EmbeddingModel = _embedding_model
self.word_weights: WordWeight = _word_weights
self.documents_limit: int = _documents_limit
self.text_index_bin: str = _text_index_bin
self.text_index: TextIndex = None
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.INFO)
self._create_log_handler()
def _create_log_handler(self):
if not Interactor.DEFAULT_LOG_FOLDER.exists():
Interactor.DEFAULT_LOG_FOLDER.mkdir()
current_date = datetime.now().strftime('%Y.%m.%d %H.%M.%S')
log_filename = f'k_gram_interactor {current_date}'
file_handler = logging.FileHandler(Interactor.DEFAULT_LOG_FOLDER / log_filename)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
def _initialize(self):
self.logger.info('Loading dataset')
self.dataset.load(sentence_splitter=sent_tokenize, documents_limit=self.documents_limit)
self.logger.info('Loading embedding model')
self.embedding_model.load()
self.logger.info('Initializing word weights')
self.word_weights.initialize(self.dataset)
self.logger.info('Creating text index')
self.text_index = KGramIndex(self.dataset, self.embedding_model, self.k_gram_size,
self.word_weights, self.logger)
self.text_index.build(self.text_index_bin)
self.logger.info('Initialization completed successfully')
def _process_input(self, text: str):
document = Document(text)
sentences = document.split_to_sentences(sent_tokenize)
for sentence in sentences:
response = self.text_index.search(sentence, neighbours=2)
if response is None:
continue
sentence = sentence.get_tokens_by_indices(sentence.get_alphabetic_tokens())
sys.stdout.write(str(sentence) + '\n')
sys.stdout.write(str(response) + '\n')
sys.stdout.write('====================\n')
for r, word in zip(response, sentence):
c = max(ri[0] for ri in r)
sys.stdout.write(str(C().b_rgb(255 * min(1, 1 - (c - 0.5) / 0.5), 0, 0).rgb(255, 255, 255, word)))
sys.stdout.write(' ')
sys.stdout.write('\n')
for r, word in zip(response, sentence):
r.sort(key=lambda ri: -ri[0])
sys.stdout.write(word + ': ')
for ri in r:
sys.stdout.write(f'({ri[1]}: {ri[0]:0.3f}) ')
sys.stdout.write('\n')
sys.stdout.write('====================\n')
sys.stdout.flush()
def interact(self):
self._initialize()
while True:
sys.stdout.write('> ')
sys.stdout.flush()
text = sys.stdin.readline().strip()
if len(text) == 0:
continue
self._process_input(text)
def process_text(self, src: Path, dst: Path):
self._initialize()
with src.open('r') as inp, dst.open('w') as outp:
document = Document(inp.read())
sentences = document.split_to_sentences(sent_tokenize)
for sentence in sentences:
try:
response = self.text_index.search(sentence, neighbours=2)
if response is None:
continue
sentence = sentence.get_tokens_by_indices(sentence.get_alphabetic_tokens())
for word in sentence:
outp.write(f'{word} ')
outp.write('\n')
for r in response:
outp.write(f'{r:0.8f} ')
outp.write('\n')
outp.flush()
except:
pass
if __name__ == '__main__':
"""
Interactor(
_dataset=dataset.nips_papers,
_embedding_model=embedding_model.glove128,
# _embedding_index=embedding_index.knn,
# _embedding_index=embedding_index.faiss,
_sentence_splitter=sentence_splitter.five_gram,
_word_weights=word_weight.idf_word_weight,
_documents_limit=100,
_text_index_bin='nips_100doc_glove128_v1.bin'
).interact()
"""
KGramInteractor(
_dataset=dataset.nips_papers,
_embedding_model=embedding_model.glove50,
_k_gram_size=5,
_word_weights=word_weight.idf_word_weight,
_documents_limit=100,
_text_index_bin='nips_100doc_glove50_5gram_v0.bin'
).interact()
| StarcoderdataPython |
3214929 | import json
import multiprocessing
import time
import requests
import snappi
from flask import Flask, Response, request
from otg_gnmi.common.utils import init_logging, get_current_time
from tests.utils.common import get_mockserver_status
from tests.utils.settings import MockConfig
app = Flask(__name__)
CONFIG = MockConfig()
logfile = 'flask'+'-'+str(get_current_time())+'.log'
flask_logger = init_logging(
'test',
'mockserver',
logfile
)
@app.route('/status', methods=['GET'])
def get_status():
return Response(
status=200,
response=json.dumps({'status': 'up'}),
headers={'Content-Type': 'application/json'})
@app.route('/config', methods=['POST'])
def set_config():
global CONFIG
config = snappi.api().config()
config.deserialize(request.data.decode('utf-8'))
test = config.options.port_options.location_preemption
if test is not None and isinstance(test, bool) is False:
return Response(status=590,
response=json.dumps({'detail': 'invalid data type'}),
headers={'Content-Type': 'application/json'})
else:
status = get_mockserver_status()
if status == "200":
CONFIG = config
return Response(status=200,
response=json.dumps({'warnings': []}),
headers={'Content-Type': 'application/json'})
elif status == "200-warning":
CONFIG = config
return Response(status=200,
response=json.dumps(
{'warnings': ['mock 200 set_config warning']}),
headers={'Content-Type': 'application/json'})
elif status == "400":
return Response(status=400,
response=json.dumps(
{'errors': ['mock 400 set_config error']}),
headers={'Content-Type': 'application/json'})
elif status == "500":
return Response(status=500,
response=json.dumps(
{'errors': ['mock 500 set_config error']}),
headers={'Content-Type': 'application/json'})
else:
return Response(status=501,
response=json.dumps(
{'errors': ['set_config is not implemented']}),
headers={'Content-Type': 'application/json'})
@app.route('/config', methods=['GET'])
def get_config():
global CONFIG
status = get_mockserver_status()
if status in ["200", "200-warning"]:
return Response(CONFIG.serialize() if CONFIG is not None else '{}',
mimetype='application/json',
status=200)
elif status == "400":
return Response(status=400,
response=json.dumps(
{'errors': ['mock 400 get_config error']}),
headers={'Content-Type': 'application/json'})
elif status == "500":
return Response(status=500,
response=json.dumps(
{'errors': ['mock 500 get_config error']}),
headers={'Content-Type': 'application/json'})
else:
return Response(status=501,
response=json.dumps(
{'errors': ['get_config is not implemented']}),
headers={'Content-Type': 'application/json'})
@app.route('/results/metrics', methods=['POST'])
def get_metrics():
status = get_mockserver_status()
global CONFIG
if status in ["200", "200-warning"]:
api = snappi.api()
metrics_request = api.metrics_request()
metrics_request.deserialize(request.data.decode('utf-8'))
metrics_response = api.metrics_response()
if metrics_request.choice == 'port':
for metric in CONFIG.port_metrics:
metrics_response.port_metrics.metric(
name=metric['name'],
frames_tx=10000,
frames_rx=10000
)
elif metrics_request.choice == 'flow':
for metric in CONFIG.flow_metrics:
metrics_response.flow_metrics.metric(
name=metric['name'],
port_tx="P1",
port_rx="P2",
frames_tx=10000,
frames_rx=10000
)
elif metrics_request.choice == 'bgpv4':
for metric in CONFIG.bgpv4_metrics:
metrics_response.bgpv4_metrics.metric(
name=metric['name'],
session_state=metric["session_state"],
session_flap_count=0,
routes_advertised=1000,
routes_received=500
)
elif metrics_request.choice == 'bgpv6':
for metric in CONFIG.bgpv6_metrics:
metrics_response.bgpv6_metrics.metric(
name=metric['name'],
session_state=metric["session_state"],
session_flap_count=0,
routes_advertised=1000,
routes_received=500
)
elif metrics_request.choice == 'isis':
for metric in CONFIG.isis_metrics:
metrics_response.isis_metrics.metric(
name=metric['name'],
l1_sessions_up=metric["l1_sessions_up"],
)
return Response(metrics_response.serialize(),
mimetype='application/json',
status=200)
elif status == "400":
return Response(status=400,
response=json.dumps(
{'errors': ['mock 400 get_metrics error']}),
headers={'Content-Type': 'application/json'})
elif status == "500":
return Response(status=500,
response=json.dumps(
{'errors': ['mock 500 get_metrics error']}),
headers={'Content-Type': 'application/json'})
else:
return Response(status=501,
response=json.dumps(
{'errors': ['get_metrics is not implemented']}),
headers={'Content-Type': 'application/json'})
@app.route('/results/states', methods=['POST'])
def get_states():
status = get_mockserver_status()
global CONFIG
if status in ["200", "200-warning"]:
api = snappi.api()
states_request = api.states_request()
states_request.deserialize(request.data.decode('utf-8'))
flask_logger.info('get_status Request : [%s]', states_request)
states_response = api.states_response()
if states_request.choice == 'ipv4_neighbors':
states_response.choice = 'ipv4_neighbors'
for state in CONFIG.ipv4_neighbors:
states_response.ipv4_neighbors.state(
ethernet_name=state['ethernet_name'],
ipv4_address=state['ipv4_address'],
link_layer_address="aa:bb:cc:dd:ee:ff"
)
elif states_request.choice == 'ipv6_neighbors':
states_response.choice = 'ipv6_neighbors'
for state in CONFIG.ipv6_neighbors:
states_response.ipv6_neighbors.state(
ethernet_name=state['ethernet_name'],
ipv6_address=state['ipv6_address'],
link_layer_address="aa:bb:cc:dd:ee:ff"
)
flask_logger.info('get_status Responese : [%s]', states_response)
return Response(states_response.serialize(),
mimetype='application/json',
status=200)
elif status == "400":
return Response(status=400,
response=json.dumps(
{'errors': ['mock 400 get_states error']}),
headers={'Content-Type': 'application/json'})
elif status == "500":
return Response(status=500,
response=json.dumps(
{'errors': ['mock 500 get_states error']}),
headers={'Content-Type': 'application/json'})
else:
return Response(status=501,
response=json.dumps(
{'errors': ['get_states is not implemented']}),
headers={'Content-Type': 'application/json'})
@app.after_request
def after_request(resp):
print(request.method, request.url, ' -> ', resp.status)
return resp
def web_server():
app.run(port=11020, debug=True, use_reloader=False)
class SnappiServer(object):
def __init__(self):
self._CONFIG = None
def start(self):
self._web_server_thread = multiprocessing.Process(
target=web_server, args=())
self._web_server_thread.start()
self._wait_until_ready()
return self
def stop(self):
self._web_server_thread.terminate()
def _wait_until_ready(self):
while True:
try:
r = requests.get(url='http://127.0.0.1:11020/status')
res = r.json()
if res['status'] != 'up':
raise Exception('waiting for SnappiServer to be up')
break
except Exception as e:
print(e)
pass
time.sleep(.1)
| StarcoderdataPython |
3382679 | <reponame>subhacom/mbnet
# kc_ggn_feedback_dclamp.py ---
# Author: <NAME>
# Created: Tue Aug 20 10:58:08 2019 (-0400)
# Last-Updated: Wed Dec 11 17:32:49 2019 (-0500)
# By: <NAME>
# Version: $Id$
# Code:
"""This script for testing expansion of the dynamic range of a KC due to GGN inhibition.
Instead of running a whole simulation in the full network, we play the
GGN membrane potential back to the KC.
We use GGN Vm from two simulations, one with low PN activity and
another with high PN activity.
"""
from __future__ import print_function
import os
import sys
sys.path += ['D:/subhasis_ggn/model/mb', 'D:/subhasis_ggn/model/mb/network', 'D:/subhasis_ggn/model/nrn']
import argparse
import numpy as np
from collections import defaultdict
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
import h5py as h5
from config import Q_, h, logger, timestamp, mypid, myjobid, nrn_version
from timeit import default_timer as timer
import ephys
import nrnutils as nu
import neurograph as ng
import nsdf
GGN_KC_SYN_PARAMS = {
'vmid': Q_('-40mV').to('mV').m,
'vslope': Q_('5.0mV').to('mV').m,
'e': Q_('-80mV').to('mV').m,
'gbar': Q_('1e-3uS').to('uS').m,
'tau': Q_('4.0ms').to('ms').m
}
# keep global reference of created model components so that they are
# not garbage collected when out of scope
model_dict = {}
def make_kc_with_dynaclamp(kc_name, kc_file, inject, tstart, tend, ggn_vm=None):
"""Read KC model from `kc_file`, inject current `inject` nA, apply
dynamic clamp `ggn_vm`, which should be a 2D array with time (ms)
in column 0, and voltage (mV) in column 1.
"""
global model_dict
kc = nu.create_cell(kc_name, filename=kc_file)
model_dict[kc] = None
iclamp = ephys.setup_current_clamp(kc.soma, pos=0.5, delay=Q_(tstart, 'ms'),
duration=Q_((tend - tstart), 'ms'),
amplitude=Q_(inject, 'nA'))
model_dict[iclamp] = None
ggn_g_vec = None
if ggn_vm is not None:
syn = h.GradedSyn(kc.soma(0.5))
for attr, value in GGN_KC_SYN_PARAMS.items():
setattr(syn, attr, value)
model_dict[syn] = None
ggn_comp = h.Section('ggn')
model_dict[ggn_comp] = None
h.setpointer(ggn_comp(0.5)._ref_v, 'vpre', syn)
ggn_vm_vec = h.Vector(ggn_vm[:, 1])
tvec = h.Vector(ggn_vm[:, 0])
model_dict[tvec] = None
# vec.play(var_reference, t, continuous) for interpolating
ret = ggn_vm_vec.play(ggn_comp(0.5)._ref_v, tvec, 1)
print('####', ret)
model_dict[ggn_vm_vec] = None
ggn_g_vec = h.Vector()
ggn_g_vec.record(syn._ref_g)
model_dict[ggn_g_vec] = None
kc_vm_vec = h.Vector()
kc_vm_vec.record(kc.soma(0.5)._ref_v)
model_dict[kc_vm_vec] = None
print('Built model')
return (kc_vm_vec, ggn_g_vec)
def make_parser():
parser = argparse.ArgumentParser(description='Simulate KC with GGN inhibition at multiple current injections')
parser.add_argument('--kc-file', type=str, dest='kc_file',
required=True, help='KC cell template file'
' (.hoc)')
parser.add_argument('--kc', type=str, dest='kc', required=True,
help='KC cell template name in template file')
parser.add_argument('--ggn-vm-file', type=str, action='append', dest='ggn_vm_file',
required=True,
help='CSV file with column 0 time in ms, column 1 GGN Vm in mV')
parser.add_argument('--istart', type=str, dest='istart', help='Starting amplitude of current (with unit)')
parser.add_argument('--iend', type=str, dest='iend', help='Ending amplitude of current (with unit)')
parser.add_argument('--di', type=str, dest='di', help='Current increments (with unit)')
parser.add_argument('--tstart', type=str, dest='tstart', help='Current injection start time (with unit)')
parser.add_argument('--tend', type=str, dest='tend', help='Current injection end time (with unit)')
return parser
def main():
parser = make_parser()
args = parser.parse_args()
logger.info('Command line args: {}'.format(str(sys.argv)))
print(args.ggn_vm_file)
# KCs with GGN inhibition
inhibited_vec = defaultdict(list)
solo_vec_list = []
tstart = Q_(args.tstart).to('ms').m
tend = Q_(args.tend).to('ms').m
istart = Q_(args.istart).to('nA').m
iend = Q_(args.iend).to('nA').m
di = Q_(args.di).to('nA').m
irange = np.arange(istart, iend + di/2.0, di)
logger.info('Starting current: {} nA'.format(istart))
logger.info('End current: {} nA'.format(iend))
logger.info('Increment: {} nA'.format(di))
logger.info('current range: {}'.format(irange))
ggn_vm = {}
for input_file in args.ggn_vm_file:
ggn_vm[input_file] = np.loadtxt(input_file)
for inject in irange:
for input_file, vm in ggn_vm.items():
kc_vvec, ggn_gvec = make_kc_with_dynaclamp(args.kc, args.kc_file, inject, tstart, tend, vm)
inhibited_vec[input_file].append((kc_vvec, ggn_gvec))
# KC without any inhibition
kc_vvec, ggn_gvec = make_kc_with_dynaclamp(args.kc, args.kc_file, inject, tstart, tend)
solo_vec_list.append(kc_vvec)
tvec = h.Vector()
tvec.record(h._ref_t)
h.tstop = tend
print('Init')
h.init()
print('Run')
h.run()
print('Finished simulation')
fig, ax = plt.subplots(nrows=len(irange)+1, ncols=len(ggn_vm)+1, sharex='all', sharey='all')
t = np.array(tvec.x)
solo_data = []
for ii, vvec in enumerate(solo_vec_list):
ax[ii+1, 0].plot(tvec, vvec, color='#e66101')
solo_data.append(np.array(vvec.x))
combined = np.vstack(solo_data)
prefix = 'UTC' + timestamp.strftime('%Y%m%d_%H%M%S')
fname = '{}_solo_kc.npz'.format(prefix)
np.savez(fname,
t=t,
vm=combined,
inject=irange)
logger.info('Saved solo KC data in {}'.format(fname))
for jj, input_file in enumerate(args.ggn_vm_file):
fname = '{}_{}.npz'.format(prefix, os.path.basename(input_file))
data = []
kc_vm_list = inhibited_vec[input_file]
for ii, (vvec, gvec) in enumerate(kc_vm_list):
data.append(np.array(vvec.x))
ax[ii+1, jj+1].plot(tvec, vvec, color='#e66101')
ax[ii+1, 0].set_ylabel('{} pA'.format(irange[ii]*1e3))
# ax[ii+1, 0].set_ylabel('{} pA'.format(int(np.round(irange[ii]*1e3)))) # to avoid decimal point when integer values
# ax[0, jj+1].plot(tvec, gvec)
# ax[0, jj+1].plot(ggn_vm[input_file][:,0], ggn_vm[input_file][:,1])
ax[0, jj+1].set_title(input_file)
combined = np.vstack(data)
np.savez(fname, combined=combined, irange=irange, ggn_vm=ggn_vm[input_file])
logger.info('Saved data from dynamic clamp with input from {} in {}'.format(
input_file, fname))
for axis in ax.flat:
axis.set_xlim(250, 1750)
fig.set_size_inches(210/25.4, 290/25.4)
fig.tight_layout()
fig.savefig('{}_KC_dynamic_range_with_ggn_vm.svg'.format(prefix))
plt.show()
print('End')
if __name__ == '__main__':
main()
#
# kc_ggn_feedback_dclamp.py ends here
| StarcoderdataPython |
3209124 | <reponame>HarduinLearnsCoding/Pattern-Recognition
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
#optimizing fn generation
def gauss(x, K, B, x0, stddev):
return K + B * np.exp(-(x - x0) ** 2 / (2 * stddev ** 2))
def gaussian_curve_fit(x, y):
mean = sum(x * y) / sum(y)
stddev = np.sqrt(sum(y * (x - mean) ** 2) / sum(y))
popt, pcov = curve_fit(gauss, x, y, p0=[min(y), max(y), mean, stddev])
return popt
#data creation
np.random.seed(104)
xdata = np.linspace(3, 10, 100)
#Perfect gaussian
ydata_perfect = gauss(xdata, 20, 5, 6, 1)
ydata = np.random.normal(ydata_perfect, 1, 100)
K, B, x0, stddev = gaussian_curve_fit(xdata, ydata)
plt.plot(xdata, ydata, 'c', label='Noisy Data')
plt.plot(xdata, ydata_perfect, '-.k', label='Perfect Data')
plt.plot(xdata, gauss(xdata, *gaussian_curve_fit(xdata, ydata)), ':r', label='fit')
plt.legend()
plt.title('Gaussian fit')
plt.xlabel('X Data')
plt.ylabel('Y Data')
plt.show() | StarcoderdataPython |
106723 | import os
import json
from pathlib import Path
import pem
from Crypto.PublicKey import RSA
from jupyterhub.handlers import BaseHandler
from illumidesk.authenticators.utils import LTIUtils
from illumidesk.lti13.auth import get_jwk
from tornado import web
from urllib.parse import urlencode
from urllib.parse import quote
class LTI13ConfigHandler(BaseHandler):
"""
Handles JSON configuration file for LTI 1.3
"""
async def get(self) -> None:
"""
Gets the JSON config which is used by LTI platforms
to install the external tool.
- The extensions key contains settings for specific vendors, such as canvas,
moodle, edx, among others.
- The tool uses public settings by default. Users that wish to install the tool with
private settings should either copy/paste the json or toggle the application to private
after it is installed with the platform.
- Usernames are obtained by first attempting to get and normalize values sent when
tools are installed with public settings. If private, the username is set using the
anonumized user data when requests are sent with private installation settings.
"""
lti_utils = LTIUtils()
self.set_header('Content-Type', 'application/json')
# get the origin protocol
protocol = lti_utils.get_client_protocol(self)
self.log.debug('Origin protocol is: %s' % protocol)
# build the full target link url value required for the jwks endpoint
target_link_url = f'{protocol}://{self.request.host}/'
self.log.debug('Target link url is: %s' % target_link_url)
keys = {
'title': 'IllumiDesk',
'scopes': [
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem',
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/result.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/score',
'https://purl.imsglobal.org/spec/lti-nrps/scope/contextmembership.readonly',
'https://canvas.instructure.com/lti/public_jwk/scope/update',
'https://canvas.instructure.com/lti/data_services/scope/create',
'https://canvas.instructure.com/lti/data_services/scope/show',
'https://canvas.instructure.com/lti/data_services/scope/update',
'https://canvas.instructure.com/lti/data_services/scope/list',
'https://canvas.instructure.com/lti/data_services/scope/destroy',
'https://canvas.instructure.com/lti/data_services/scope/list_event_types',
'https://canvas.instructure.com/lti/feature_flags/scope/show',
'https://canvas.instructure.com/lti/account_lookup/scope/show',
],
'extensions': [
{
'platform': 'canvas.instructure.com',
'settings': {
'platform': 'canvas.instructure.com',
'placements': [
{
'placement': 'course_navigation',
'message_type': 'LtiResourceLinkRequest',
'windowTarget': '_blank',
'target_link_uri': target_link_url,
'custom_fields': {
'email': '$Person.email.primary',
'lms_user_id': '$User.id',
}, # noqa: E231
},
{
'placement': 'assignment_selection',
'message_type': 'LtiResourceLinkRequest',
'target_link_uri': target_link_url,
},
],
},
'privacy_level': 'public',
}
],
'description': 'IllumiDesk Learning Tools Interoperability (LTI) v1.3 tool.',
'custom_fields': {
'email': '$Person.email.primary',
'lms_user_id': '$User.id',
}, # noqa: E231
'public_jwk_url': f'{target_link_url}hub/lti13/jwks',
'target_link_uri': target_link_url,
'oidc_initiation_url': f'{target_link_url}hub/oauth_login',
}
self.write(json.dumps(keys))
class LTI13JWKSHandler(BaseHandler):
"""
Handler to serve our JWKS
"""
def get(self) -> None:
"""
- This method requires that the LTI13_PRIVATE_KEY environment variable
is set with the full path to the RSA private key in PEM format.
"""
if not os.environ.get('LTI13_PRIVATE_KEY'):
raise EnvironmentError('LTI13_PRIVATE_KEY environment variable not set')
key_path = os.environ.get('LTI13_PRIVATE_KEY')
# check the pem permission
if not os.access(key_path, os.R_OK):
self.log.error(f'The pem file {key_path} cannot be load')
raise PermissionError()
private_key = pem.parse_file(key_path)
public_key = RSA.import_key(private_key[0].as_text()).publickey().exportKey()
self.log.debug('public_key is %s' % public_key)
jwk = get_jwk(public_key)
self.log.debug('the jwks is %s' % jwk)
keys_obj = {'keys': []}
keys_obj['keys'].append(jwk)
# we do not need to use json.dumps because tornado is converting our dict automatically and adding the content-type as json
# https://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write
self.write(keys_obj)
class FileSelectHandler(BaseHandler):
@web.authenticated
async def get(self):
"""Return a sorted list of notebooks recursively found in shared path"""
user = self.current_user
auth_state = await user.get_auth_state()
self.log.debug('Current user for file select handler is %s' % user.name)
# decoded = self.authenticator.decoded
self.course_id = auth_state['course_id']
self.grader_name = f'grader-{self.course_id}'
self.grader_root = Path(
'/home',
self.grader_name,
)
self.course_root = self.grader_root / self.course_id
self.course_shared_folder = Path('/shared', self.course_id)
a = ''
link_item_files = []
notebooks = list(self.course_shared_folder.glob('**/*.ipynb'))
notebooks.sort()
for f in notebooks:
fpath = str(f.relative_to(self.course_shared_folder))
self.log.debug('Getting files fpath %s' % fpath)
if fpath.startswith('.') or f.name.startswith('.'):
self.log.debug('Ignoring file %s' % fpath)
continue
# generate the assignment link that uses gitpuller
user_redirect_path = quote('/user-redirect/git-pull', safe='')
assignment_link_path = f'?next={user_redirect_path}'
urlpath_workspace = f'tree/{self.course_id}/{fpath}'
self.log.debug(f'urlpath_workspace:{urlpath_workspace}')
query_params_for_git = [
('repo', f'/home/jovyan/shared/{self.course_id}'),
('branch', 'master'),
('urlpath', urlpath_workspace),
]
encoded_query_params_without_safe_chars = quote(urlencode(query_params_for_git), safe='')
url = f'https://{self.request.host}/{assignment_link_path}?{encoded_query_params_without_safe_chars}'
self.log.debug('URL to fetch files is %s' % url)
link_item_files.append(
{
'path': fpath,
'content_items': json.dumps(
{
"@context": "http://purl.imsglobal.org/ctx/lti/v1/ContentItem",
"@graph": [
{
"@type": "LtiLinkItem",
"@id": url,
"url": url,
"title": f.name,
"text": f.name,
"mediaType": "application/vnd.ims.lti.v1.ltilink",
"placementAdvice": {"presentationDocumentTarget": "frame"},
}
],
}
),
}
)
self.log.debug('Rendering file-select.html template')
html = self.render_template(
'file_select.html',
files=link_item_files,
action_url=auth_state['launch_return_url'],
)
self.finish(html)
| StarcoderdataPython |
108992 | class NCBaseError(Exception):
def __init__(self, message) -> None:
super(NCBaseError, self).__init__(message)
class DataTypeMismatchError(Exception):
def __init__(self, provided_data, place:str=None, required_data_type:str=None) -> None:
message = f"{provided_data} datatype isn't supported for {place}.\nRequired datatype is: {required_data_type}, got: {str(type(provided_data).__name__)}"
super(DataTypeMismatchError, self).__init__(message)
class InsufficientArgumentsError(Exception):
def __init__(self, message):
message = f"Insufficient arguments.\n{message}"
super(InsufficientArgumentsError, self).__init__(message)
class InvalidArgumentsError(Exception):
def __init__(self, message:str) -> None:
super(InvalidArgumentsError, self).__init__(message)
class DirectoryAlreadyExistsError(Exception):
def __init__(self,project_dir):
message = f"{project_dir} already exists at the location."
super(DirectoryAlreadyExistsError, self).__init__(message)
class ImportNameNotFoundError(Exception):
def __init__(self, location) -> None:
message = f"import_name notm provided for the sister app at {location}"
super(ImportNameNotFoundError, self).__init__(message)
class ConfigurationError(Exception):
def __init__(self, message) -> None:
super(ConfigurationError, self).__init__(message) | StarcoderdataPython |
1681211 | <reponame>nagylzs/python-venus-lib
"""PostgreSQL database adapter package.
Uses the psycopg2 extension."""
import copy
import getpass
import os
import re
import sys
import psycopg2
from venus.db.dbo import connection
# http://www.postgresql.org/docs/9.2/static/libpq-pgpass.html
_PGPASS_PAT = re.compile(r"([^:]+):([^:]+):([^:]+):([^:]+):([^:]+)")
_CONNSTRING_PATTERN = pat = re.compile(r"([^=]+)=([^\s]*)")
def _read_pgpass():
"""Read valid lines from the user's .pgpass file."""
if sys.platform == "win32":
fpath = os.path.join(
os.environ["APPDATA"], "postgresql",
"pgpass.conf")
elif "PGPASSFILE" in os.environ:
fpath = os.environ["PGPASSFILE"]
elif "HOME" in os.environ:
fpath = os.path.join(os.environ["HOME"], ".pgpass")
else:
fpath = None
if fpath and os.path.isfile(fpath):
for line in open(fpath, "r").readlines():
hit = _PGPASS_PAT.match(line.strip())
if hit:
yield hit.groups()
def _get_pgpass(params):
"""Find a password for the given connection parameters.
The params parameter MUST be a dict!"""
def _matchparam(name, value, defval=""):
"""Match .pgpass parameter with a value."""
if value == '*':
return True
else:
return value == str(params.get(name, defval))
for host, port, database, user, password in _read_pgpass():
if _matchparam("host", host) and \
_matchparam("port", port, "5432") and \
_matchparam("database", database) and _matchparam("user", user):
return password
class Connection(connection.Connection):
"""Postgresql database adapter."""
@classmethod
def decode_connection_string(cls, connection_string):
"""Parse a db driver specific connection string into (args, kwargs).
This version works with PostgreSQL, and it tries to use the .pgpass file (when available).
The connection string parameters are parsed into keywords arguments.
:param connection_string: Connection string to be parsed into constructor parameters.
:return: A tuple of (args,kwargs) that can be passed directly to the DB API 2.0 compilant module's
connect method.
"""
# return ((connection_string,),{}) # Use this to disable .pgpass support and defaults.
global _CONNSTRING_PATTERN
res = {}
for item in connection_string.split():
hit = _CONNSTRING_PATTERN.match(item.strip())
if hit:
name, connection_string = hit.groups()
res[name] = connection_string
return ((), res)
@classmethod
def create_factory(cls, *params, **kwparams):
"""Create and return a database connection factory object.
When called, it returns a new Connection instance.
When connection parameters given as keywords arguments and password
is not given, then it tries to read the user's .pgpass file and
find a password.
When connection parameters are given as positional arguments (e.g. dsn) then they are used as is.
Example:
factory = venus.db.dbo.adapter.postgresql.create_factory(
host='127.0.0.1',database='template1',
user='my_user',password='<PASSWORD>')
# Now, create two connection objects.
conn1 = factory()
conn2 = factory()
"""
if kwparams:
kwparams = copy.deepcopy(kwparams)
if "user" not in kwparams:
kwparams["user"] = getpass.getuser()
if "database" not in kwparams:
kwparams["database"] = kwparams["user"]
if "password" not in kwparams:
password = _get_pgpass(kwparams)
if password:
kwparams['password'] = password
def factory():
lowlevel = psycopg2.connect(*params, **kwparams)
conn = cls(psycopg2, lowlevel)
trans_id = conn.starttransaction()
conn.execsql("set transaction isolation level read committed")
conn.committransaction(trans_id)
return conn
return factory
# Max length of identifiers in SQL.
max_identifier_length = 63
# Map that converts logical types to physical types.
#
# Possible fields:
#
# nativename - preferred native name of the type
# names - other aliases understood by the database (optional)
# need_size - set True if the field needs a size (optional)
# need_precision - set True if the field needs a precision (optional)
#
# http://www.postgresql.org/docs/9.2/static/datatype.html
typemap = [
# Numeric types.
# http://www.postgresql.org/docs/9.2/static/datatype-numeric.html
{"nativename": "smallint", "names": {"smallint", "int2"}, },
{"nativename": "integer", "names": {"integer", "int4"}, },
{"nativename": "bigint",
"names": {"bigint", "int8", "identifier"}, },
{"nativename": "numeric", "names": {"numeric", "decimal"},
"need_size": True, "need_precision": True, },
{"nativename": "real", "names": {"real", "single", "float4"}, },
{"nativename": "double precision",
"names": {"double", "double precision", "float8"}, },
# Character types
# http://www.postgresql.org/docs/8.2/static/datatype-character.html
{"nativename": "text", },
{"nativename": "varchar",
"names": {"varchar", "character varying"},
"need_size": True, },
# Binary (flat blob) types
# http://www.postgresql.org/docs/9.2/static/datatype-binary.html
{"nativename": "bytea", "names": {"blob", "bytea"}, },
# Date/Time types
# http://www.postgresql.org/docs/9.2/static/datatype-datetime.html
{"nativename": "timestamp",
"names": {"timestamp without time zone", "timestamp"}, },
{"nativename": "timestamptz",
"names": {"timestamp with time zone", "timestamptz"}, },
{"nativename": "date", },
{"nativename": "time",
"names": {"time without time zone", "time"}},
{"nativename": "timetz",
"names": {"time with time zone", "timetz"}},
{"nativename": "interval", },
# Boolean types
# http://www.postgresql.org/docs/9.2/static/datatype-boolean.html
{"nativename": "boolean", },
# Geometric types
{"nativename": "point", },
{"nativename": "line", },
{"nativename": "lseg", },
{"nativename": "box", },
{"nativename": "path", },
{"nativename": "polygon", },
{"nativename": "circle", },
# Network address types
# http://www.postgresql.org/docs/9.2/static/datatype-net-types.html
{"nativename": "cidr", },
{"nativename": "inet", },
{"nativename": "macaddr", },
# TODO: add bitstring and text search types.
# How is it supported by psycopg2?
# UUID type
# http://www.postgresql.org/docs/9.2/static/datatype-uuid.html
{"nativename": "uuid", },
# TODO: add XML type. How is it supported by psycopg2?
# JSON type
# http://www.postgresql.org/docs/9.2/static/datatype-json.html
{"nativename": "json", },
{"nativename": "jsonb", },
{"nativename": "bigint[]", "names": {"int8[]", "bigint[]"}, },
]
# Existence methods.
# To explore use these:
#
# select table_name from information_schema.tables
# where table_schema='information_schema' order by 1
#
# and
# select table_name from information_schema.tables where
# table_name ilike 'pg_%'
#
def schema_exists(self, schemaname):
"""Tells if the given schema exists."""
return bool(self.getqueryvalue("""select oid from pg_namespace
where nspname=%s""", [schemaname]))
def table_exists(self, schemaname, tablename):
"""Tells if the given table exists."""
return bool(self.getqueryvalue("""
select table_type
from information_schema.tables
where
table_catalog=current_database()
and table_schema=lower(%s)
and table_name=lower(%s)
""", [schemaname, tablename]))
def column_exists(self, schemaname, tablename, columname):
"""Tells if the given column exists."""
return bool(self.getqueryvalue("""
select column_name from information_schema.columns
where
table_catalog=current_database()
and table_schema=lower(%s)
and table_name=lower(%s)
and column_name=lower(%s)
""", [schemaname, tablename, columname]))
def index_exists(self, schemaname, tablename, indexname):
"""Tells if the given index exists."""
# TODO: create a method to list index fields, e.g. a.attname as column_name
return bool(self.getqueryvalue("""
select t.oid
from
pg_catalog.pg_namespace s,
pg_class t,
pg_class i,
pg_index ix,
pg_attribute a
where
t.oid = ix.indrelid
and i.oid = ix.indexrelid
and a.attrelid = t.oid
and a.attnum = ANY(ix.indkey)
and t.relkind = 'r'
and s.oid = t.relnamespace
and lower(s.nspname)=%s
and lower(t.relname)=%s
and lower(i.relname)=%s
""", [schemaname, tablename, indexname]))
connection.DATABASE_DRIVERS["postgresql"] = Connection
if __name__ == '__main__':
test_factory = Connection.create_factory(
host='127.0.0.1',
database='template1', user='postgres', password='<PASSWORD>')
# <venus.db.dbo.connection.Connection object at 0x939a7ec>
print(test_factory())
| StarcoderdataPython |
182372 | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from carla.settings import CarlaSettings
class Experiment(object):
def __init__(self):
self.Id = ''
self.Conditions = CarlaSettings()
self.Poses = [[]]
self.Repetitions = 1
def set(self, **kwargs):
for key, value in kwargs.items():
if not hasattr(self, key):
raise ValueError('Experiment: no key named %r' % key)
setattr(self, key, value)
@property
def id(self):
return self.Id
@property
def conditions(self):
return self.Conditions
@property
def poses(self):
return self.Poses
@property
def repetitions(self):
return self.Repetitions
| StarcoderdataPython |
3260952 | <filename>testpy/testnumpy.py
import numpy as np
x = np.int_([1, 2])
y = np.int_([[2, 4, 6],
[3, 3, 3]])
print np.dot(x, y)
z = np.zeros((2, 3))
print z
| StarcoderdataPython |
3284643 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import errno
import os
import shutil
import zipfile
from argparse import ArgumentParser, Namespace
from collections import defaultdict
from textwrap import dedent
from pex import pex_warnings
from pex.common import chmod_plus_x, pluralize, safe_mkdir
from pex.environment import PEXEnvironment
from pex.pex import PEX
from pex.tools.command import Command, Error, Ok, Result
from pex.tools.commands.virtualenv import PipUnavailableError, Virtualenv
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING
from pex.venv_bin_path import BinPath
if TYPE_CHECKING:
from typing import Iterable, Iterator, Optional, Tuple
# N.B.: We can't use shutil.copytree since we copy from multiple source locations to the same site
# packages directory destination. Since we're forced to stray from the stdlib here, support for
# hardlinks is added to provide a measurable speed up and disk space savings when possible.
def _copytree(
src, # type: str
dst, # type: str
exclude=(), # type: Tuple[str, ...]
):
# type: (...) -> Iterator[Tuple[str, str]]
safe_mkdir(dst)
link = True
for root, dirs, files in os.walk(src, topdown=True, followlinks=False):
if src == root:
dirs[:] = [d for d in dirs if d not in exclude]
files[:] = [f for f in files if f not in exclude]
for d in dirs:
try:
os.mkdir(os.path.join(dst, os.path.relpath(os.path.join(root, d), src)))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
for f in files:
src_entry = os.path.join(root, f)
dst_entry = os.path.join(dst, os.path.relpath(src_entry, src))
yield src_entry, dst_entry
try:
# We only try to link regular files since linking a symlink on Linux can produce
# another symlink, which leaves open the possibility the src_entry target could
# later go missing leaving the dst_entry dangling.
if link and not os.path.islink(src_entry):
try:
os.link(src_entry, dst_entry)
continue
except OSError as e:
if e.errno != errno.EXDEV:
raise e
link = False
shutil.copy(src_entry, dst_entry)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
class CollisionError(Exception):
"""Indicates multiple distributions provided the same file when merging a PEX into a venv."""
def populate_venv_with_pex(
venv, # type: Virtualenv
pex, # type: PEX
bin_path=BinPath.FALSE, # type: BinPath.Value
python=None, # type: Optional[str]
collisions_ok=True, # type: bool
):
# type: (...) -> str
venv_python = python or venv.interpreter.binary
venv_bin_dir = os.path.dirname(python) if python else venv.bin_dir
venv_dir = os.path.dirname(venv_bin_dir) if python else venv.venv_dir
# 1. Populate the venv with the PEX contents.
provenance = defaultdict(list)
def record_provenance(src_to_dst):
# type: (Iterable[Tuple[str, str]]) -> None
for src, dst in src_to_dst:
provenance[dst].append(src)
pex_info = pex.pex_info()
if zipfile.is_zipfile(pex.path()):
record_provenance(
PEXEnvironment(pex.path()).explode_code(
venv.site_packages_dir, exclude=("__main__.py", pex_info.PATH)
)
)
else:
record_provenance(
_copytree(
src=pex.path(),
dst=venv.site_packages_dir,
exclude=(pex_info.internal_cache, pex_info.bootstrap, "__main__.py", pex_info.PATH),
)
)
with open(os.path.join(venv.venv_dir, pex_info.PATH), "w") as fp:
fp.write(pex_info.dump())
for dist in pex.resolve():
record_provenance(
_copytree(src=dist.location, dst=venv.site_packages_dir, exclude=("bin",))
)
dist_bin_dir = os.path.join(dist.location, "bin")
if os.path.isdir(dist_bin_dir):
record_provenance(_copytree(dist_bin_dir, venv.bin_dir))
collisions = {dst: srcs for dst, srcs in provenance.items() if len(srcs) > 1}
if collisions:
message_lines = [
"Encountered {collision} building venv at {venv_dir} from {pex}:".format(
collision=pluralize(collisions, "collision"), venv_dir=venv_dir, pex=pex.path()
)
]
for index, (dst, srcs) in enumerate(collisions.items(), start=1):
message_lines.append(
"{index}. {dst} was provided by:\n\t{srcs}".format(
index=index, dst=dst, srcs="\n\t".join(srcs)
)
)
message = "\n".join(message_lines)
if not collisions_ok:
raise CollisionError(message)
pex_warnings.warn(message)
# 2. Add a __main__ to the root of the venv for running the venv dir like a loose PEX dir
# and a main.py for running as a script.
shebang = "#!{} -sE".format(venv_python)
main_contents = dedent(
"""\
{shebang}
if __name__ == "__main__":
import os
import sys
venv_dir = os.path.abspath(os.path.dirname(__file__))
venv_bin_dir = os.path.join(venv_dir, "bin")
shebang_python = {shebang_python!r}
python = os.path.join(venv_bin_dir, os.path.basename(shebang_python))
def iter_valid_venv_pythons():
# Allow for both the known valid venv pythons and their fully resolved venv path
# version in the case their parent directories contain symlinks.
for python_binary in (python, shebang_python):
yield python_binary
yield os.path.join(
os.path.realpath(os.path.dirname(python_binary)),
os.path.basename(python_binary)
)
current_interpreter_blessed_env_var = "_PEX_SHOULD_EXIT_VENV_REEXEC"
if (
not os.environ.pop(current_interpreter_blessed_env_var, None)
and sys.executable not in tuple(iter_valid_venv_pythons())
):
sys.stderr.write("Re-execing from {{}}\\n".format(sys.executable))
os.environ[current_interpreter_blessed_env_var] = "1"
os.execv(python, [python, "-sE"] + sys.argv)
os.environ["VIRTUAL_ENV"] = venv_dir
sys.path.extend(os.environ.get("PEX_EXTRA_SYS_PATH", "").split(os.pathsep))
bin_path = os.environ.get("PEX_VENV_BIN_PATH", {bin_path!r})
if bin_path != "false":
PATH = os.environ.get("PATH", "").split(os.pathsep)
if bin_path == "prepend":
PATH.insert(0, venv_bin_dir)
elif bin_path == "append":
PATH.append(venv_bin_dir)
else:
sys.stderr.write(
"PEX_VENV_BIN_PATH must be one of 'false', 'prepend' or 'append', given: "
"{{!r}}\\n".format(
bin_path
)
)
sys.exit(1)
os.environ["PATH"] = os.pathsep.join(PATH)
PEX_EXEC_OVERRIDE_KEYS = ("PEX_INTERPRETER", "PEX_SCRIPT", "PEX_MODULE")
pex_overrides = {{
key: os.environ.get(key) for key in PEX_EXEC_OVERRIDE_KEYS if key in os.environ
}}
if len(pex_overrides) > 1:
sys.stderr.write(
"Can only specify one of {{overrides}}; found: {{found}}\\n".format(
overrides=", ".join(PEX_EXEC_OVERRIDE_KEYS),
found=" ".join("{{}}={{}}".format(k, v) for k, v in pex_overrides.items())
)
)
sys.exit(1)
if {strip_pex_env!r}:
for key in list(os.environ):
if key.startswith("PEX_"):
del os.environ[key]
pex_script = pex_overrides.get("PEX_SCRIPT")
if pex_script:
script_path = os.path.join(venv_bin_dir, pex_script)
os.execv(script_path, [script_path] + sys.argv[1:])
pex_interpreter = pex_overrides.get("PEX_INTERPRETER", "").lower() in ("1", "true")
PEX_INTERPRETER_ENTRYPOINT = "code:interact"
entry_point = (
PEX_INTERPRETER_ENTRYPOINT
if pex_interpreter
else pex_overrides.get("PEX_MODULE", {entry_point!r} or PEX_INTERPRETER_ENTRYPOINT)
)
if entry_point == PEX_INTERPRETER_ENTRYPOINT and len(sys.argv) > 1:
args = sys.argv[1:]
arg = args[0]
if arg == "-m":
if len(args) < 2:
sys.stderr.write("Argument expected for the -m option\\n")
sys.exit(2)
entry_point = module = args[1]
sys.argv = args[1:]
# Fall through to entry_point handling below.
else:
filename = arg
sys.argv = args
if arg == "-c":
if len(args) < 2:
sys.stderr.write("Argument expected for the -c option\\n")
sys.exit(2)
filename = "-c <cmd>"
content = args[1]
sys.argv = ["-c"] + args[2:]
elif arg == "-":
content = sys.stdin.read()
else:
with open(arg) as fp:
content = fp.read()
ast = compile(content, filename, "exec", flags=0, dont_inherit=1)
globals_map = globals().copy()
globals_map["__name__"] = "__main__"
globals_map["__file__"] = filename
locals_map = globals_map
{exec_ast}
sys.exit(0)
module_name, _, function = entry_point.partition(":")
if not function:
import runpy
runpy.run_module(module_name, run_name="__main__", alter_sys=True)
else:
import importlib
module = importlib.import_module(module_name)
# N.B.: Functions may be hung off top-level objects in the module namespace,
# e.g.: Class.method; so we drill down through any attributes to the final function
# object.
namespace, func = module, None
for attr in function.split("."):
func = namespace = getattr(namespace, attr)
sys.exit(func())
""".format(
shebang=shebang,
shebang_python=venv_python,
bin_path=bin_path,
strip_pex_env=pex_info.strip_pex_env,
entry_point=pex_info.entry_point,
exec_ast=(
"exec ast in globals_map, locals_map"
if venv.interpreter.version[0] == 2
else "exec(ast, globals_map, locals_map)"
),
)
)
with open(venv.join_path("__main__.py"), "w") as fp:
fp.write(main_contents)
chmod_plus_x(fp.name)
os.symlink(os.path.basename(fp.name), venv.join_path("pex"))
# 3. Re-write any (console) scripts to use the venv Python.
for script in venv.rewrite_scripts(python=venv_python, python_args="-sE"):
TRACER.log("Re-writing {}".format(script))
return shebang
class Venv(Command):
"""Creates a venv from the PEX file."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument(
"venv",
nargs=1,
metavar="PATH",
help="The directory to create the virtual environment in.",
)
parser.add_argument(
"-b",
"--bin-path",
choices=[choice.value for choice in BinPath.values],
default=BinPath.FALSE.value,
help="Add the venv bin dir to the PATH in the __main__.py script.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="If the venv directory already exists, overwrite it.",
)
parser.add_argument(
"--collisions-ok",
action="store_true",
default=False,
help=(
"Don't error if population of the venv encounters distributions in the PEX file "
"with colliding files, just emit a warning."
),
)
parser.add_argument(
"-p",
"--pip",
action="store_true",
default=False,
help="Add pip to the venv.",
)
parser.add_argument(
"--copies",
action="store_true",
default=False,
help="Create the venv using copies of system files instead of symlinks",
)
parser.add_argument(
"--compile",
action="store_true",
default=False,
help="Compile all `.py` files in the venv.",
)
def run(
self,
pex, # type: PEX
options, # type: Namespace
):
# type: (...) -> Result
venv_dir = options.venv[0]
venv = Virtualenv.create(
venv_dir, interpreter=pex.interpreter, force=options.force, copies=options.copies
)
populate_venv_with_pex(
venv,
pex,
bin_path=BinPath.for_value(options.bin_path),
collisions_ok=options.collisions_ok,
)
if options.pip:
try:
venv.install_pip()
except PipUnavailableError as e:
return Error(
"The virtual environment was successfully created, but Pip was not "
"installed:\n{}".format(e)
)
if options.compile:
pex.interpreter.execute(["-m", "compileall", venv_dir])
return Ok()
| StarcoderdataPython |
4826328 | <reponame>154544017/PetrarchChineseServer
# -*- coding: utf-8 -*-
from resource import db
class AnalycisEventResultSubThread(db.Model):
# 事件分类结果
id = db.Column(db.Integer, primary_key=True)
text_id = db.Column(db.String(255))
recall_rate = db.Column(db.DECIMAL)
accuracy_rate = db.Column(db.DECIMAL)
event_num = db.Column(db.Integer)
event_result = db.Column(db.Text)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return '<Analysis_Event_Result {}>'.format(self.id)
| StarcoderdataPython |
139376 | <filename>sp/apps.py
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SPConfig(AppConfig):
name = "sp"
verbose_name = _("SAML SP")
| StarcoderdataPython |
3307989 | import scrapy
from rt.items import *
import re
class PersonSpider(scrapy.Spider):
name = 'person'
allowed_domains = ['rottentomatoes.com']
# start_urls = ['https://www.rottentomatoes.com/m/blade_runner_2049']
start_urls = ['https://www.rottentomatoes.com/celebrity/ben_affleck']
def parse(self, response):
# init
person = Person()
person['Screenwriter'] = 0
person['Director'] = 0
person['Actor'] = 0
person['Producer'] = 0
person['ExecutiveProducer'] = 0
person["url"] = re.findall(r'\/celebrity\S*', response.url)[0]
person["name"] = response.css("div.celeb_name h1::text").extract_first()
person["birthday"] = response.css("div.celeb_bio_row time::attr('datetime')").extract_first()
try:
person["birthplace"] = re.findall(r'\S+.*', response.css("div.celeb_bio div.celeb_bio_row ::text").extract()[-1])[0]
except:
person["birthplace"] = ""
try:
person['bio'] = response.css("div.celeb_bio div.celeb_summary_bio ::text").extract_first()
except:
person['bio'] = ""
person['photo_url'] = re.findall(r'(http\S*)\)', response.css('div.celebHeroImage::attr("style")').extract_first())[0]
mlist = response.css("table#filmographyTbl")
for tr in mlist.css('tr'):
try:
td = tr.css('td')[2]
for li in td.css('li::text').extract():
if "Screenwriter" in li:
person["Screenwriter"] = 1
elif "Director" in li:
person["Director"] = 1
elif "Executive Producer" in li:
person["ExecutiveProducer"] = 1
elif "Producer" in li:
person["Producer"] = 1
else:
person["Actor"] = 1
for li in td.css('em::text').extract():
if "Screenwriter" in li:
person["Screenwriter"] = 1
elif "Director" in li:
person["Director"] = 1
elif "Executive Producer" in li:
person["ExecutiveProducer"] = 1
elif "Producer" in li:
person["Producer"] = 1
else:
person["Actor"] = 1
except:
pass
if person['photo_url'] is None:
person['photo_url'] = ""
if person['bio'] is None:
person['bio'] = ""
if person['birthplace'] is None:
person['birthplace'] = ""
if person['birthday'] is None:
person['birthday'] = ""
yield person | StarcoderdataPython |
1769918 | <filename>artifact_py/completion.py
# artifact_py: the design documentation tool made for everyone.
#
# Copyright (C) 2019 <NAME> <github.com/vitiral>
#
# The source code is Licensed under either of
#
# * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
#
# at your option.
#
# Unless you explicitly state otherwise, any contribution intentionally submitted
# for inclusion in the work by you, as defined in the Apache-2.0 license, shall
# be dual licensed as above, without any additional terms or conditions.
from __future__ import unicode_literals, division
"""
For types and methods associated with the completion ratio of artifacts.
"""
import re
from . import utils
from . import name
from . import code
class Completion(utils.KeyCmp):
def __init__(self, spc, tst):
super(Completion, self).__init__(key=(spc, tst))
self.spc = spc
self.tst = tst
def serialize(self, _settings):
return {
"spc": self.spc,
"tst": self.tst,
}
class ImplDone:
def __init__(self, raw):
self.raw = raw
def serialize(self, _settings):
return self.raw
def impl_to_statistics(impl, subparts):
""""
Return the `(count, value, secondary_count, secondary_value)` that this
impl should contribute to the "specified" and "tested" statistics.
"secondary" is used because the Done field actually does contribute to
both spc AND tst for REQ and SPC types.
`subparts` should contain the subparts the artifact defines.
"""
if impl is None:
if subparts:
# If subparts are defined not being implemented
# in code means that you get counts against you
return (1 + len(subparts), 0.0, 0, 0.0)
else:
return (0, 0.0, 0, 0.0)
if isinstance(impl, ImplDone):
return (1, 1.0, 1, 1.0)
if isinstance(impl, code.ImplCode):
return _implcode_to_statistics(impl, subparts)
else:
raise TypeError(impl)
def _implcode_to_statistics(impl, subparts):
count = 1
value = int(bool(impl.primary))
sec_count = 0
sec_value = 0.0
for sub in subparts:
count += 1
# track if the subname is implemented
contains_key = int(sub in impl.secondary)
value += contains_key
if sub.is_tst():
sec_count += 1
sec_value += contains_key
return (count, value, sec_count, sec_value)
| StarcoderdataPython |
66606 | from __future__ import unicode_literals
import requests
from .basemanager import BaseManager
from .constants import XERO_API_URL
class TrackingCategoryOptionsManager(BaseManager):
def __init__(self, credentials, user_agent=None):
from xero import __version__ as VERSION
self.credentials = credentials
self.singular = 'Option'
self.name = 'TrackingCategoryOptions'
self.base_url = credentials.base_url + XERO_API_URL
if user_agent is None:
self.user_agent = 'pyxero/%s ' % VERSION + requests.utils.default_user_agent()
method = self._put
setattr(self, 'put', self._get_data(method))
def _put(self, tracking_category_id, data, summarize_errors=True, headers=None):
uri = '/'.join([self.base_url, 'TrackingCategories', tracking_category_id, self.name])
params = {}
method = 'put'
body = {'xml': self._prepare_data_for_save(data)}
if not summarize_errors:
params['summarizeErrors'] = 'false'
return uri, params, method, body, headers, False
| StarcoderdataPython |
3362569 |
"""
Copyright 2015, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
SPDX-License-Identifier: BSD-3-Clause
Return codes
ref: http://www.cardwerk.com/smartcards/smartcard_standard_ISO7816-4_6_basic_interindustry_commands.aspx
ref: http://www.cardwerk.com/smartcards/smartcard_standard_ISO7816-4_7_transmission_interindustry_commands.aspx
"""
import logging
class AUTH_KEYS:
# http://www.cryptoshop.com/products/smartcards/gemalto-idcore-10-gemalto-top-im-gx4.html?___store=english&___from_store=default
GEMALTO = [
[0x47, 0x45, 0x4D, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x4F, 0x53, 0x41, 0x4D, 0x50, 0x4C, 0x45],
[0x47, 0x45, 0x4D, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x4F, 0x53, 0x41, 0x4D, 0x50, 0x4C, 0x45],
[0x47, 0x45, 0x4D, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x4F, 0x53, 0x41, 0x4D, 0x50, 0x4C, 0x45]
]
GEMALTO_MODUS_VISA2 = [0xA0, 0x00, 0x00, 0x00, 0x18, 0x43, 0x4D, 0x00]
class AUTH_KEY_IDX:
AUTH = 0
MAC = 1
ENC = 2
class SECURE_CHANNEL:
class DIVERSIFY:
VISA2 = 0x00
class MODE:
NONE = 0x00
MAC = 0X01
MAC_ENC = 0x03
class SET_STATUS_PARAM:
class TYPE:
SECURITY_DOMAIN = 0b10000000
APPLICATION = 0b01000000
class STATE_CARD:
# Card State
OP_READY = 0b00000001
INITIALIZED = 0b00000111
SECURED = 0b00001111
LOCKED = 0b01111111
TERMINATED = 0b11111111
class STATE_SEC_DOM:
# Security Domain
INSTALLED = 0b000011
SELECTABLE = 0b00000111
PERSONALIZED = 0b00001111
LOCKED = 0b10000011
class STATE_APP:
# Application
INSTALLED = 0b00000011
LOCKED = 0b10000000
UNLOCKED = 0b00000000
class SEARCH_CRITERIA:
AID = [0x4F, 0x00]
# APDU Definitions
class APDU_CMD:
"""
Lookup class for ADPU command values
Reference: http://www.informit.com/articles/article.aspx?p=29265&seqNum=6
Reference: http://techmeonline.com/most-used-smart-card-commands-apdu/
"""
# Administrative
GET_RESPONSE = 0xC0
MANAGE_CHANNEL = 0x70
ENVELOPE = 0xC2
GET_DATA = 0xCA
PUT_DATA = 0xDA
GET_STATUS = 0xF2
SET_STATUS = 0xF0
# Data
SELECT = 0xA4
READ_RECORD = 0xB2
WRITE_RECORD = 0xD2
APPEND_RECORD = 0xE2
UPDATE_RECORD = 0xDC
READ_BUFFER = 0x52
GET_DATA_PIV = 0xCB
READ_BINARY = 0xB0
WRITE_BINARY = 0xD0
UPDATE_BINARY = 0xD6
ERASE_BINARY = 0x0E
# Security
INIT_UPDATE = 0x50
VERIFY = 0x20
RESET_RETRY = 0x2C
CHANGE_REF_DATA = 0x24
SIGN_DECRYPT = 0x42
EXTERNAL_AUTH = 0x82
INTERNAL_AUTH = 0x88
GET_CHALLENGE = 0x84
TEST_CLASSES = [0x00, 0xC0, 0xF0, 0x80, 0xBC, 0x01]
class STATUS_WORDS:
"""
Loockup class for common Status Words
"""
SUCCESS = (0x90, 0x00)
# Secure Channel
AUTH_FAIL = (0x63, 0x00)
NOT_FOUND = (0x6a, 0x88)
COND_NOT_SATISFIED = (0x69, 0x85)
# APDU Return Status Codes
class APDU_STATUS:
"""
Lookup class for common APDU SW1
"""
MORE_DATA = 0x61
WRONG_LENGTH = 0x6C
SUCCESS = 0x90
class PIX_CAC:
"""
Lookup class for PIX addresses on the CAC
"""
PKI_APLT = [0x01, 0x00]
PKI_APLT2 = [0x01, 0x02]
PKI_APLT3 = [0x01, 0x01]
GC_APLT = [0x02, 0x00]
GC_APLT2 = [0x02, 0x01]
AXS_CTL_APLT = [0x01, 0x00]
CCC = [0xDB, 0x00]
PIV_TRNS_APLT = [0x30, 0x00]
PIV_END_PNT = [0x00, 0x00, 0x10, 0x00, 0x01, 0x00]
# Known Applet Identification Numbers
class APPLET:
# Credit Cards
MASTERCARD = [0xA0, 0x00, 0x00, 0x00, 0x04, 0x10, 0x10]
VISA = [0xA0, 0x00, 0x00, 0x00, 0x03, 0x10, 0x10]
# CAC
NIST_PIV = [0xA0, 0x00, 0x00, 0x03, 0x08, 0x00, 0x00] + [0x10, 0x00, 0x01, 0x00]
DOD_PIV = [0xA0, 0x00, 0x00, 0x01, 0x16] #, 0xDB, 0x00]
DOD_CAC = [0xA0, 0x00, 0x00, 0x00, 0x79] + [0x01, 0x00]
# Other
HELLO = [0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0x01]
# Security Domains
SECURITY_GEMALTO = [0xA0, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00]
class OBJ_NIST_PIV:
# Ref: Cac End-Point Implementation Guide v1.22 / Page 33
# Keys
KEY_PIV_ATH = [0x5f, 0xc1, 0x05]
KEY_DIG_SIG = [0x5f, 0xc1, 0x0A]
KEY_MNG = [0x5f, 0xc1, 0x0B]
KEY_CRD_ATH = [0x5f, 0xc1, 0x01]
# Other
CHUID = [0x5F, 0xC1, 0x02]
CCC = [0x5F, 0xC1, 0x07]
SEC_OBJ = [0x5f, 0xc1, 0x06]
# Biometrics
FNGR_P1 = [0x5F, 0xC1, 0x03]
FNGR_P2 = [0x5F, 0xC1, 0x04]
FACE = [0x5F, 0xC1, 0x08]
class OBJ_DOD_PIV:
# Ref: Cac End-Point Implementation Guide v1.22 / Page 33
# Keys
# KEY_PIV_ATH = [0xA0, 0x01]
# KEY_DIG_SIG = [0x01, 0x00]
# KEY_MNG = [0x01, 0x02]
# KEY_CRD_ATH = [0x05, 0x00]
# Other
CHUID = [0x30, 0x00]
CCC = [0xDB, 0x00]
SEC_OBJ = [0x90, 0x00]
FACE = [0x60, 0x30]
FNGR_PRNT = [0x60, 0x10]
class OBJ_DOD_CAC:
# Ref: Cac End-Point Implementation Guide v1.22 / Page 33
# Keys
KEY_PKI_SIG = [0x01, 0x01] # Mapped to PIV Key Mgmt Key & PIV Digital Sign Key
KEY_PKI_ID = [0x01, 0x00]
KEY_PKI_ENC = [0x01, 0x02] # Mapped to PIV Key Mgmt Key & PIV Digital Sign Key
# Other
CAC_PERSON = [0x02, 0x00]
CAC_PERSONEL = [0x02, 0x01]
ACCESS_CONTROL = [0x02, 0x01]
# APDU Construction functions
def SIGN_DECRYPT(data, CLA=0x80, P1=0x00, P2=0x00):
"""
CLA INS P1 P2 Lc DATA Le
P1 - 0b1000000 (more blocks to follow), or 0
P2 - 0x00
Lc - length of data
Le - expected length of returned data
"""
return [CLA, APDU_CMD.SIGN_DECRYPT, P1, P2] + [len(data)] + data + [0x00]
def SELECT(data, CLA=0x00, P1=0x04, P2=0x00):
"""
CLA INS P1 P2 Le DATA...
P1 and P2: http://www.cardwerk.com/smartcards/smartcard_standard_ISO7816-4_6_basic_interindustry_commands.aspx#table58
"""
return [CLA, APDU_CMD.SELECT, P1, P2] + [len(data)] + data + [0x00]
def GET_DATA(P1, P2, CLA=0x80, Lc=0x00):
"""
CLA INS P1 P2 Le
Set Le to 0x00 then update when we get the return code
@param P1: Most significant byte of address
@param P2: Least significant byte of address
@param CLA: Class
@param Lc: Length to read
"""
return [CLA, APDU_CMD.GET_DATA, P1, P2, Lc]
def READ_BINARY(P1,P2, CLA=0x00, Lc=0x00):
"""
CLA INS P1 P2 Le
@param P1: If bit8=1 in P1, then bit7-6 are set to 0. bit3-1 of P1 are a short EF (Elementary File)
@param P2: The offset of the first byte to be read in date units from the beginning of the file
@param CLA: Class
@param Lc: Length to read
"""
return [CLA, APDU_CMD.READ_BINARY, P1, P2, Lc]
def GET_DATA_PIV(address):
"""
CLA INS P1 P2 Lc DATA Le
Set Le to 0x00 then update when we get the return code
@param address: Address of PIV object to read
"""
P1 = 0x3F
P2 = 0xFF
tag_list = [0x5c, len(address)] + address
Lc = len(tag_list)
Le = 0x00
return [0x00, APDU_CMD.GET_DATA_PIV, P1, P2, Lc] + tag_list + [Le]
def READ_RECORD(P1, P2, CLA=0x00, Le=0x00):
"""
CLA INS P1 P2 Le
Set Le to 0x00 then update when we get the return code
@param CLA: Class
@param P1: Record Number
@param P2: Reference Control (http://www.cardwerk.com/smartcards/smartcard_standard_ISO7816-4_6_basic_interindustry_commands.aspx#table36)
@param Le: Bytes to read
"""
return [CLA, APDU_CMD.READ_RECORD] + [P1, P2, Le]
def READ_BUFFER(P1, P2, buffer_type, read_length=64, Lc=0x02, CLA=0x80):
"""
CLA INS P1 P2 Lc DATA_FIELD Le
@param P1: MSB of offset
@param P2 LSB of offset
@param buffer_type: 0x01 (Type-Length buffer), 0x02 (Value buffer)
@param read_length: Number of bytes to read
@param CLA: Class
@return: byte list with constructed APDU command
"""
return [CLA, APDU_CMD.READ_BUFFER] + [P1, P2] + [Lc] + [buffer_type, read_length]
def INIT_UPDATE(P1, P2, challenge, CLA=0x80, Le=0x00):
"""
CLA INS P1 P2 Lc DATA_FIELD Le
@param P1: Key version number (Default: 0)
@param P1: Key identifier (Default: 0)
@param challenge: List of 8 bytes to be send as the Nonce
@return: byte list with constructed APDU command
"""
return [CLA, APDU_CMD.INIT_UPDATE] + [P1, P2] + [len(challenge)] + challenge + [Le]
def EXTERNAL_AUTHENTICATE(P1, cryptogram, mac, P2=0x00, CLA=0x84, Le=0x00):
"""
CLA INS P1 P2 Lc DATA_FIELD Le
@param P1: Security Level: 0x00 - None, 0x01, C-MAC, 0x03, C-DECRYPTION and C-MAC
@param P2: Always 0x00
@param cryptogram: Host cryptogram to send to card
@param mac: C-MAC for this APDU
@return: byte list with constructed APDU command
"""
Lc = len(cryptogram)
return [CLA, APDU_CMD.EXTERNAL_AUTH] + [P1, P2] + [Lc] + cryptogram + mac + [Le]
def GET_STATUS(P1, P2, search_criteria, Lc=None, CLA=0x80, Le=0x00):
"""
CLA INS P1 P2 Lc DATA_FIELD Le
@param P1: 80 - Issuer Security Domain
40 - Application Security Domain
20 - Executable Load Files only
10 - Executable Load Files and their Executable Modules only
@param P2: 0bx0 - get all/first occurrence(s)
0bx1 - get next
0b0x - Response Structure 1
0b1x - Response Structure 2
@param search_criteria: 4f00 used to indicated AID
Reference: GP 2.1.1/ page 114
"""
if Lc is None:
Lc = len(search_criteria)
return [CLA, APDU_CMD.GET_STATUS, P1, P2, Lc] + search_criteria + [ Le]
def SET_STATUS(P1, P2, data, CLA=0x80):
"""
CLA INS P1 P2 Lc DATA_FIELD Le
@param P1: Status Type
0x80 Security Domain
0x40 Application
@param P2: State Coapdu_ntrol
0x80 Locked
0x00 Unlocked
(See Table 9-5)
@param data: AID if setting application status
@param CLA: 0x80 or 0x84
Reference: GP 2.1.1/11.10 page 163
"""
Le = 0
return [CLA, APDU_CMD.SET_STATUS, P1, P2, len(data)] + data + [Le]
def VERIFY_PIN(P2, PIN, P1=0x00, CLA=0x00):
"""
@param pin: list of bytes (length 4-8 bytes)
@param p1: 0x00 is only valid
@param p2: Key location
@return (data, sw1, sw2)
"""
return [CLA, APDU_CMD.VERIFY, P1, P2, len(PIN)] + PIN
def RESET_RETRY_COUNT(P1, P2, puk, new_pin, CLA=0x00):
"""
@param puk: list of bytes (length 4-8 bytes)
@param new_pin: list of bytes (length 4-8 bytes)
@param p1: 0x00, 0x01, or 0x02
@param p2: Key location
@return (data, sw1, sw2)
Reference: ISO 7816-4 8.5.9
Refere SP800-73-3 Pat 2
"""
data = []
if puk != None:
data += puk
if new_pin is not None:
data += new_pin
return [CLA, APDU_CMD.RESET_RETRY, P1, P2, len(data)] + data
def CHANGE_REFERENCE_DATA(P1, P2, old_pin, new_pin, CLA=0x00):
"""
@param puk: list of bytes (length 4-8 bytes)
@param new_pin: list of bytes (length 4-8 bytes)
@param p1: 0x00, or 0x01 for the first time
@param p2: Reference Data ID
0x00 - Global PIN
0x80- Application PIN
0x81 - Application PUK
@return (data, sw1, sw2)
Reference: ISO 7816-4 8.5.6
"""
data = []
data += old_pin
data += new_pin
return [CLA, APDU_CMD.CHANGE_REF_DATA, P1, P2, len(data)] + data
def GET_RESPONSE(Le):
"""
CLA INS P1 P2 Le
"""
return [0x00, APDU_CMD.GET_RESPONSE, 0x00, 0x00, Le]
# Supplemntary Functions
def get_hex(input_list):
"""
Convert a list of bytes into hex string
"""
if input_list is None:
return ""
o = ""
for i in input_list:
o += (hex(i)) + " "
return o[:-1]
def get_str(input_list):
"""
Convert list of bytes into a string
"""
o = ""
for i in input_list:
o += (chr(i))
return o
| StarcoderdataPython |
3371817 | <reponame>pgromano/sampy
import numpy as np
__all__ = [
'check_array',
'set_random_state',
'cache_property',
]
def check_array(X, ensure_1d=False, ensure_2d=False, squeeze=False,
atleast_2d=False, feature_axis='col', reduce_args=False,
dtype=None):
""" Check Array
Parameters
----------
X : array-like or numeric
The input data to standardize as a numpy array
squeeze : bool, optional
Whether or not the array should be "squeezed". This results in
flattening all size 1 dimensions, by default False.
Returns
-------
numpy.ndarray
The cleaned data as a numpy.ndarray
"""
if ensure_1d and ensure_2d:
raise ValueError("Cannot ensure 1D and 2D array")
if ensure_1d and atleast_2d:
raise ValueError("Ambiguous expectation: ensure_1d and atleast_2d")
if squeeze and atleast_2d:
raise ValueError("Ambiguous expectation: squeeze and atleast_2d")
if reduce_args:
if len(X) == 1:
X = X[0]
if not hasattr(X, '__iter__') or isinstance(X, str):
X = np.array([X])
elif hasattr(X, '__iter__') and not isinstance(X, str):
X = np.array(X)
if hasattr(X, '__iter__') and not isinstance(X, str) and not isinstance(X, np.ndarray):
X = np.array(X)
if squeeze:
X = np.squeeze(X)
if atleast_2d and np.ndim(X) == 1:
if feature_axis == 'row' or feature_axis == 0:
X = np.atleast_2d(X)
elif feature_axis == 'col' or feature_axis == 1:
X = np.atleast_2d(X).T
else:
raise ValueError(
f"Unable to intepret `feature_axis = '{feature_axis}'`")
if ensure_1d and np.ndim(X) != 1:
raise ValueError("Array must be 1D")
if ensure_2d and np.ndim(X) != 2:
raise ValueError("Array must be 2D")
return X
def set_random_state(seed=None):
if isinstance(seed, np.random.RandomState):
return seed
if isinstance(seed, str):
seed = hash(seed) & ((1 << 32) - 1)
return np.random.RandomState(seed)
class cache_property:
def __init__(self, method):
self.method = method
self.cache_name = "_{}".format(method.__name__)
def __get__(self, instance, *args, **kwargs):
if hasattr(instance, self.cache_name):
return getattr(instance, self.cache_name)
setattr(instance, self.cache_name, self.method(instance))
return getattr(instance, self.cache_name)
| StarcoderdataPython |
3353366 | import asyncio
import hmac
from json import loads
from urllib.parse import parse_qs, urlencode
import aiohttp
import aiohttp_jinja2
from aiohttp import web
from aiohttp_session import get_session
routes = web.RouteTableDef()
def sign(key, msg):
return hmac.new(key.encode("ascii"),
msg=msg.encode("ascii"),
digestmod="sha1").hexdigest()
@routes.view("/")
@aiohttp_jinja2.template("base.html")
async def index(request):
session = await get_session(request)
access_token = session.get("access_token")
if access_token:
post = await request.post()
async with aiohttp.ClientSession() as session:
async with session.get(
"https://api.github.com/user/repos",
headers=dict(Authorization="token " + access_token[0]),
) as response:
repos = await response.json()
repo = post.get("repo")
if repo:
for r in repos:
if repo == r['full_name']:
id = sign(request.app["config"]["SECRET_KEY"],
msg=repo)
url = (request.scheme + "://" + request.host +
"/hook")
async with session.post(
"https://api.github.com/repos/{0}/hooks".
format(repo),
headers=dict(Authorization="token " +
access_token[0]),
json=dict(config=dict(
url=url,
secret=id,
content_type='json',
)),
) as resp:
json = loads(await resp.text())
if json.get('active'):
raise web.HTTPFound(
"/instructions?" +
urlencode(dict(repo=repo)))
return dict(repos=repos, error=json)
raise web.HTTPForbidden(
text='This repository does not belong to you.')
return dict(repos=repos)
return dict(auth="https://github.com/login/oauth/authorize?" + urlencode(
dict(client_id=request.app['config']['GITHUB_CLIENT_ID'],
scope='repo admin:repo_hook')))
@routes.view("/auth")
async def auth(request):
code = request.query.get("code")
if code:
payload = dict(
code=code,
client_id=request.app['config']['GITHUB_CLIENT_ID'],
client_secret=request.app['config']['GITHUB_CLIENT_SECRET'])
async with aiohttp.ClientSession() as session:
async with session.post(
"https://github.com/login/oauth/access_token",
data=payload) as resp:
session = await get_session(request)
text = await resp.text()
session["access_token"] = parse_qs(text)["access_token"]
return web.HTTPFound("/")
return web.Response(text="Authentication code missing.")
@routes.view("/logout")
async def logout(request):
session = await get_session(request)
del session["access_token"]
return web.HTTPFound("/")
@routes.view("/instructions")
@aiohttp_jinja2.template("instructions.html")
async def instructions(request):
# make sure user owns this repo
repo = request.query.get("repo")
session = await get_session(request)
access_token = session.get("access_token")
if access_token:
async with aiohttp.ClientSession() as session:
async with session.get(
"https://api.github.com/user/repos",
headers=dict(Authorization="token " + access_token[0]),
) as response:
repos = await response.json()
for r in repos:
if repo == r['full_name']:
return dict(
repo=repo,
poll=request.url.with_path('/poll/%s/%s/' % (
sign(request.app["config"]["SECRET_KEY"],
msg=repo),
repo,
)))
raise web.HTTPForbidden(text='This repository does not belong to you.')
@routes.view("/hook")
async def hook(request):
redis = request.app["redis"]
post = await request.read()
json = loads(post)
repo = json['repository']['full_name']
secret = sign(request.app["config"]["SECRET_KEY"], msg=repo)
signature = request.headers.get('X-Hub-Signature')
if not signature:
raise web.HTTPForbidden(text='Signature missing.')
if signature != 'sha1=' + hmac.new(
secret.encode('ascii'), msg=post, digestmod="sha1").hexdigest():
raise web.HTTPForbidden(text='Signature does not match')
await redis.publish("hook:" + json['repository']['full_name'], post)
return web.Response(text="OK")
@routes.view("/poll/{id}/{owner}/{repo}/")
async def poll(request):
id = request.match_info.get('id')
owner = request.match_info.get('owner')
repo = request.match_info.get('repo')
if not hmac.compare_digest(
id,
sign(request.app["config"]["SECRET_KEY"], msg=owner + '/' + repo)):
return web.HTTPNotFound()
response = web.StreamResponse(headers={
'Content-Type': 'application/json',
})
response.enable_chunked_encoding()
await response.prepare(request)
await response.write(b"")
redis = request.app["redis"]
channel, = await redis.subscribe("hook:%s/" % owner + repo)
get = asyncio.ensure_future(channel.get())
# has to write something at least every 55 seconds to avoid request timeout
# https://devcenter.heroku.com/articles/request-timeout#long-polling-and-streaming-responses
while True:
done, pending = await asyncio.wait({asyncio.sleep(50), get},
return_when=asyncio.FIRST_COMPLETED)
if get in done:
await response.write(get.result())
break
await response.write(b"\r")
return response
| StarcoderdataPython |
3255590 | <filename>plugins/mixins/aws_service.py
from systems.plugins.index import ProviderMixin
from utility.data import ensure_list
import os
import boto3
import random
class AWSServiceMixin(ProviderMixin('aws_service')):
@classmethod
def generate(cls, plugin, generator):
super().generate(plugin, generator)
def add_credentials(self, config):
self.aws_credentials(config)
def remove_credentials(self, config):
self.clean_aws_credentials(config)
plugin.add_credentials = add_credentials
plugin.remove_credentials = remove_credentials
def aws_credentials(self, config):
try:
config['access_key'] = self.command.get_config('aws_access_key', required = True).strip()
os.environ['AWS_ACCESS_KEY_ID'] = config['access_key']
config['secret_key'] = self.command.get_config('aws_secret_key', required = True).strip()
os.environ['AWS_SECRET_ACCESS_KEY'] = config['secret_key']
except Exception:
self.command.error("To use AWS provider you must have 'aws_access_key' and 'aws_secret_key' environment configurations; see: config save")
return config
def clean_aws_credentials(self, config):
config.pop('access_key', None)
os.environ.pop('AWS_ACCESS_KEY_ID', None)
config.pop('secret_key', None)
os.environ.pop('AWS_SECRET_ACCESS_KEY', None)
def _init_aws_session(self):
if not getattr(self, 'session', None):
config = self.aws_credentials({})
self.session = boto3.Session(
aws_access_key_id = config['access_key'],
aws_secret_access_key = config['secret_key']
)
def ec2(self, network):
self._init_aws_session()
return self.session.client('ec2',
region_name = network.config['region']
)
def efs(self, network):
self._init_aws_session()
return self.session.client('efs',
region_name = network.config['region']
)
def get_aws_ec2_keynames(self, network, ec2 = None):
if not ec2:
ec2 = self.ec2(network)
key_names = []
keypairs = ec2.describe_key_pairs()
for keypair in keypairs['KeyPairs']:
key_names.append(keypair['KeyName'])
return key_names
def create_aws_ec2_keypair(self, network, ec2 = None):
if not ec2:
ec2 = self.ec2(network)
key_names = self.get_aws_ec2_keynames(network, ec2)
while True:
key_name = "zimagi_{}".format(random.randint(1, 1000001))
if key_name not in key_names:
break
keypair = ec2.create_key_pair(KeyName = key_name)
return (key_name, keypair['KeyMaterial'])
def delete_aws_ec2_keypair(self, network, key_name, ec2 = None):
if not ec2:
ec2 = self.ec2(network)
return ec2.delete_key_pair(KeyName = key_name)
| StarcoderdataPython |
1616264 | #!/usr/bin/env python3
""" Produces list of map cell values, and cell offsets where they first appear """
import sys
import argparse
from itsybitser import hextream
def main():
""" Program entry point """
parser = argparse.ArgumentParser(
description=(
"Takes Hextream-encoded map data (as produced by mapextract.py) and produces" +
" a comma-delimited list of unique cell values, and the offsets of the map" +
" cells where those values first appear"
)
)
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding="UTF-8"),
help="Name of Hextream file with map data to be indexed",
default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding="UTF-8"),
help="Name of file in which to write the map index",
default=sys.stdout)
args = parser.parse_args()
build_map_index(args.infile, args.outfile)
def build_map_index(infile, outfile):
""" Produces list of map cell values, and cell offsets where they first appear """
hex_content = infile.read()
binary_content = hextream.decode(hex_content)
map_index = {}
for position, value in enumerate(binary_content):
if not value in map_index:
map_index[value] = position
csv_map_index = "\n".join([
"{},{}".format(value, position)
for value, position in sorted(map_index.items())
])
outfile.write("value,position\n{}\n".format(csv_map_index))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3372231 | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Lightweight objects for storage during collection"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from datetime import datetime
from future.utils import viewitems, viewvalues
from . import content
class ObjectStore(object):
"""Temporary storage for LW objects"""
def __init__(self, cls):
"""Initialize Object Store
Arguments:
cls -- LW object class
"""
self.cls = cls
self.store = {}
self.id = 0 # pylint: disable=invalid-name
self.count = 0
def __getitem__(self, index):
return self.store[index]
def __delitem__(self, index):
self.store[index] = None
self.count -= 1
def add(self, *args):
"""Add object using its __init__ arguments and return id"""
self.id += 1
self.count += 1
self.store[self.id] = self.cls(self.id, *args)
return self.id
def add_object(self, *args):
"""Add object using its __init__ arguments and return object"""
self.id += 1
self.count += 1
self.store[self.id] = self.cls(self.id, *args)
return self.store[self.id]
def dry_add(self, *args):
"""Return object that would be added by add_object
Do not add it to storage
"""
return self.cls(-1, *args)
def remove(self, value):
"""Remove object from storage"""
for key, val in viewitems(self.store):
if val == value:
del self.store[key]
self.count -= 1
def __iter__(self):
"""Iterate on objects, and not ids"""
return viewvalues(self.store)
def items(self):
"""Iterate on both ids and objects"""
for key, value in viewitems(self.store):
yield key, value
def iteritems(self):
"""Iterate on both ids and objects"""
for key, value in viewitems(self.store):
yield key, value
def values(self):
"""Iterate on objects if they exist"""
for value in viewvalues(self.store):
if value is not None:
yield value
def clear(self):
"""Remove deleted objects from storage"""
self.store = {key: val for key, val in viewitems(self.store) if val}
self.count = len(self.store)
def generator(self, trial_id, partial=False):
"""Generator used for storing objects in database"""
for obj in self.values():
if partial and obj.is_complete():
del self[obj.id]
obj.trial_id = trial_id
yield obj
if partial:
self.clear()
def has_items(self):
"""Return true if it has items"""
return bool(self.count)
def define_attrs(required, extra=[]): # pylint: disable=dangerous-default-value
"""Create __slots__ by adding extra attributes to required ones"""
slots = tuple(required + extra)
attributes = tuple(required)
return slots, attributes
class BaseLW: # pylint: disable=too-few-public-methods
"""Lightweight modules base class"""
def keys(self):
"""Return attributes that should be saved"""
return self.attributes # pylint: disable=no-member
def __iter__(self):
return iter(self.attributes) # pylint: disable=no-member
def __getitem__(self, key):
if key in self.special and getattr(self, key) == -1: # pylint: disable=no-member
return None
return getattr(self, key)
# Deployment
class ModuleLW(BaseLW):
"""Module lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "path", "version", "code_hash"],
["trial_id"]
)
special = set()
def __init__(self, oid, name, version, path, code_hash): # pylint: disable=too-many-arguments
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.version = version
self.path = path
self.code_hash = code_hash
def is_complete(self): # pylint: disable=no-self-use
"""Module can always be removed from object store"""
return True
def __repr__(self):
return ("Module(id={}, name={}, version={})").format(
self.id, self.name, self.version)
class DependencyLW(BaseLW):
"""Dependency lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "module_id"], ["id"]
)
special = set()
def __init__(self, oid, module_id):
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.module_id = module_id
def is_complete(self): # pylint: disable=no-self-use
"""Dependency can always be removed from object store"""
return True
def __repr__(self):
return ("Dependency(module_id={})").format(self.module_id)
class EnvironmentAttrLW(BaseLW):
"""EnvironmentAttr lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "id", "name", "value"]
)
special = set()
def __init__(self, oid, name, value):
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.value = value
def is_complete(self): # pylint: disable=no-self-use
"""EnvironmentAttr can always be removed from object store"""
return True
def __repr__(self):
return ("EnvironmentAttr(id={}, name={}, value={})").format(
self.id, self.name, self.value)
# Definition
class DefinitionLW(BaseLW): # pylint: disable=too-many-instance-attributes
"""Definition lightweight object
May represent files, classes and function definitions
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "code_hash", "trial_id", "first_line", "last_line",
"docstring"],
["type", "code", "parent", "namespace"],
)
special = set()
def __init__(self, aid, previous_namespace, name, code, dtype, parent, # pylint: disable=too-many-arguments
first_line, last_line, docstring):
self.trial_id = -1
self.id = aid # pylint: disable=invalid-name
self.namespace = (
previous_namespace +
("." if previous_namespace else "") +
name
)
self.name = self.namespace
self.parent = (parent if parent is not None else -1)
self.type = dtype
self.code = code
self.code_hash = content.put(code.encode("utf-8"), self.name)
self.first_line = first_line
self.last_line = last_line
self.docstring = docstring or ""
def is_complete(self): # pylint: disable=no-self-use
"""DefinitionLW can always be removed from object store"""
return True
def __repr__(self):
return ("DefinitionLW(id={}, name={}, type={})").format(
self.id, self.name, self.type)
class ObjectLW(BaseLW):
"""Object lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "id", "name", "type", "function_def_id"]
)
special = set()
def __init__(self, oid, name, otype, function_def_id):
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.type = otype
self.function_def_id = function_def_id
def is_complete(self): # pylint: disable=no-self-use
"""Object can always be removed from object store"""
return True
def __repr__(self):
return (
"Object(id={}, name={}, type={}, "
"function_def={})"
).format(self.id, self.name, self.type, self.function_def_id)
# Profiler
class ActivationLW(BaseLW): # pylint: disable=too-many-instance-attributes
"""Activation lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "line", "return_value", "start", "finish", "caller_id",
"trial_id"],
["file_accesses", "context", "slice_stack", "lasti", "definition_file",
"args", "kwargs", "starargs", "with_definition", "filename",
"is_main", "has_parameters",
"loops", "conditions", "permanent_conditions",
"temp_context", "temp_line"]
)
special = {"caller_id"}
def __init__(self, aid, definition_file, filename, name, line, lasti, # pylint: disable=too-many-arguments
caller_id, with_definition):
self.trial_id = aid
self.id = aid # pylint: disable=invalid-name
self.name = name
self.line = line
self.start = datetime.now()
self.finish = None
self.caller_id = (caller_id if caller_id else -1)
self.return_value = None
# Name of the script with the call
self.filename = filename
# Name of the script with the function definition
self.definition_file = definition_file
# Activation has definition or not
self.with_definition = with_definition
# Activation is __main__
self.is_main = aid == 1
# Activation has parameters. Use only for slicing!
self.has_parameters = True
# File accesses. Used to get the content after the activation
self.file_accesses = []
# Variable context. Used in the slicing lookup
self.context = {}
# Temporary variables
self.temp_context = set()
self.temp_line = None
# Line execution stack.
# Used to evaluate function calls before execution line
self.slice_stack = []
self.lasti = lasti
self.args = []
self.kwargs = []
self.starargs = []
self.loops = []
self.conditions = []
self.permanent_conditions = []
def is_complete(self):
"""Activation can be removed from object store after setting finish"""
return self.finish is not None
def is_comprehension(self):
"""Check if activation is comprehension"""
return self.name in [
"<setcomp>", "<dictcomp>", "<genexpr>", "<listcomp>"
]
def __repr__(self):
return (
"Activation(id={}, line={}, lasti={}, filename={}, "
" name={}, start={}, finish={}, return={}, caller_id={})"
).format(
self.id, self.line, self.lasti, self.filename, self.name,
self.start, self.finish, self.return_value, self.caller_id
)
class ObjectValueLW(BaseLW):
"""ObjectValue lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["trial_id", "id", "name", "value", "type", "function_activation_id"]
)
special = set()
def __init__(self, oid, name, value, otype, function_activation_id): # pylint: disable=too-many-arguments
self.trial_id = -1
self.id = oid # pylint: disable=invalid-name
self.name = name
self.value = value
self.type = otype
self.function_activation_id = function_activation_id
def is_complete(self): # pylint: disable=no-self-use
"""ObjectValue can always be removed"""
return True
def __repr__(self):
return (
"ObjectValue(id={}, name={}, value={}, type={}, "
"activation={})"
).format(
self.id, self.name,
self.value, self.type, self.function_activation_id
)
class FileAccessLW(BaseLW): # pylint: disable=too-many-instance-attributes
"""FileAccess lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "name", "mode", "buffering", "timestamp", "trial_id",
"content_hash_before", "content_hash_after",
"function_activation_id"],
["done"]
)
special = {"function_activation_id"}
def __init__(self, fid, name):
self.trial_id = -1
self.id = fid # pylint: disable=invalid-name
self.name = name
self.mode = "r"
self.buffering = "default"
self.content_hash_before = None
self.content_hash_after = None
self.timestamp = datetime.now()
self.function_activation_id = -1
self.done = False
def update(self, variables):
"""Update file access with dict"""
for key, value in viewitems(variables):
setattr(self, key, value)
def is_complete(self):
"""FileAccess can be removed once it is tagged as done"""
return self.done
def __repr__(self):
return ("FileAccess(id={}, name={}").format(self.id, self.name)
# Slicing
class VariableLW(BaseLW):
"""Variable lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "activation_id", "name", "line", "value", "time", "trial_id",
"type"]
)
special = set()
def __init__(self, vid, activation_id, name, line, value, time, _type): # pylint: disable=too-many-arguments
self.id = vid # pylint: disable=invalid-name
self.activation_id = activation_id
self.name = name
self.line = line
self.value = value
self.time = time
self.type = _type
def is_complete(self): # pylint: disable=no-self-use
"""Variable can never be removed"""
return False
def __repr__(self):
return ("Variable(id={}, activation_id={}, name={}, line={}, type={},"
"value={})").format(self.id, self.activation_id, self.name,
self.line, self.type, self.value)
class VariableDependencyLW(BaseLW):
"""Variable Dependency lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "source_activation_id", "source_id",
"target_activation_id", "target_id", "trial_id", "type"]
)
special = set()
def __init__(self, vid, source_activation_id, source_id, # pylint: disable=too-many-arguments
target_activation_id, target_id, _type):
self.id = vid # pylint: disable=invalid-name
self.source_activation_id = source_activation_id
self.source_id = source_id
self.target_activation_id = target_activation_id
self.target_id = target_id
self.trial_id = -1
self.type = _type
def is_complete(self): # pylint: disable=no-self-use
"""Variable Dependency can always be removed"""
return True
def __repr__(self):
return (
"Dependent(id={}, "
"sact_id={}, source_id={}, "
"tact_id={}, target_id={}, type={})"
).format(
self.id,
self.source_activation_id, self.source_id,
self.target_activation_id, self.target_id, self.type
)
class VariableUsageLW(BaseLW):
"""Variable Usage lightweight object
There are type definitions on lightweight.pxd
"""
__slots__, attributes = define_attrs(
["id", "activation_id", "variable_id",
"line", "ctx", "trial_id"]
)
special = set()
def __init__(self, vid, activation_id, variable_id, line, ctx): # pylint: disable=too-many-arguments
self.id = vid # pylint: disable=invalid-name
self.activation_id = activation_id
self.variable_id = variable_id
self.line = line
self.ctx = ctx
self.trial_id = -1
def is_complete(self): # pylint: disable=no-self-use
"""Variable Usage can always be removed"""
return True
def __repr__(self):
return (
"Usage(id={}, variable_id={}, line={}, ctx={})"
).format(self.id, self.variable_id, self.line, self.ctx)
| StarcoderdataPython |
1669677 | # -*- coding: utf-8 -*-
from . import settings, config
from . import install
from .oset import OrderedSet
from .adict import AttrDict
from .package import Package, PKG_STATUS_STR, PKG_STATUS_NAMES
from .utils import print_graph, print_array
from .output import ( info as _,
warn as _w,
error as _e )
from math import log10, ceil
import subprocess
import os
import sys
import time
def print_instructions(packages):
print_keep = config.clopt('build_keep') or config.clopt('show_keep')
for key in PKG_STATUS_NAMES:
if key not in packages:
continue
if key == PKG_STATUS_STR.keep and not print_keep:
continue
string = []
string.append("{c.white}Packages for action {c.bold}{0}{c.end}:")
signs = int(ceil(log10(len(packages[key]))))
for n, package in enumerate(packages[key], 1):
number = '{0:>{1}}: '.format(n, signs) if config.clopt('numerate') else ''
string.append("{c.white}" + number + "{c.end}{c." + key + "}" +
package.output(key))
_('\n'.join(string), key.upper())
total = 0
totalstr = []
for k, v in packages.items():
total += len(v)
totalstr.extend([k.capitalize(), ": {c.bold}", str(len(v)), "{c.end} "])
_("Total: {c.version}{c.bold}{0}{c.end} " + ''.join(totalstr), total)
def get_build_instructions(package_list, origin_package_set):
# Place packages according to it's action types
packages = {}
rebuild_installed = settings.opt('rebuild_installed')
build_keep = config.clopt('build_keep')
for package in package_list:
action = package.action(origin_package_set)
if action not in packages:
packages[action] = []
packages[action].append(package)
if action == PKG_STATUS_STR.install and not rebuild_installed:
_e("""{c.red}Internal error: package {c.yellow}{0}{c.red}"""\
""" must not be installed in build phase. Exiting.""")
return
# Add package to build order if it must be rebuilt
rebuild = ((action == PKG_STATUS_STR.install and rebuild_installed)
or (action == PKG_STATUS_STR.keep and build_keep))
if rebuild:
if package.buildable:
build_name = PKG_STATUS_STR.build
else:
build_name = PKG_STATUS_STR.missing
if build_name not in packages:
packages[build_name] = []
packages[build_name].append(package)
for key in (PKG_STATUS_STR.keep, PKG_STATUS_STR.missing):
if key in packages:
packages[key] = list(OrderedSet(packages[key]))
return packages
def build_packages(build):
total = len(build)
_("{c.bold}{c.green}Build started.")
skip = int(config.clopt('start_from', 0))
skip_failed = settings.opt('skip_failed')
mkpkg_opts = '' if settings.opt('no_install') else '-si'
outfile = config.clopt("output_file")
status = []
logdir = os.path.join(settings.LOG_PATH, "build", "{0:.0f}".format(time.time()))
if not os.path.isdir(logdir):
os.makedirs(logdir)
for counter, package in enumerate(build):
if counter < skip:
continue
state_item = {}
s = "[{0}/{1}] {2}: building...".format(counter+1, total, package)
logfile = os.path.join(logdir, "{0}{1:.0f}.log".format(package.name, time.time()))
output_method = ">" if outfile else "| tee"
# FIMXE: bash-only?
output_add = "" if outfile else "; ( exit ${PIPESTATUS[0]} )"
if sys.stdout.isatty():
sys.stdout.write("\x1b]2;{0}\x07".format(s))
sys.stdout.flush()
_("{c.green}" + s)
if config.clopt('accurate'):
_("{c.green} installing dependencies")
install.from_list(package.deps)
install.from_list(package.installdeps)
# TODO: log installed packages to status
path = package.abuild.location
command = "cd {0} && mkpkg {1} {2} {3} 2>&1 {4} ".format(path, mkpkg_opts,
output_method, logfile, output_add)
ext_code = subprocess.call(command, shell=True)
status.append({"code": ext_code, "output": logfile, "success": bool(not ext_code)})
if ext_code:
_w("{c.red}BUILD FAILED")
if not skip_failed:
_e("{c.red}Package {c.cyan}{0}{c.red} failed to build, stopping.", None, package.name)
_("""{c.white}Successfully built: {c.bold}{c.yellow}{0}{c.white}{c.end}"""\
""" of {c.bold}{c.yellow}{1}{c.white}{c.end} packages.""", counter, total)
break
else:
_("{c.green}BUILD OK")
if outfile:
with open(outfile, 'w') as ofile:
json.dump(status, o)
def process_list(package_list, origin_package_set):
packages = get_build_instructions(package_list, origin_package_set)
if packages is None:
return
print_instructions(packages)
no_deps = [p.name for p in package_list if not p.deps]
if no_deps and config.clopt('with_deps', False):
_w("{c.white}Packages without build_deps:{c.end} {c.yellow}{0}",
' '.join(no_deps))
return
# Only print
if config.clopt('list_order'):
return
# Check for missing packages
if 'missing' in packages and not settings.opt('ignore_missing'):
missing = map(lambda x: x.name, packages[PKG_STATUS_STR.missing])
_e("{c.red}Errors detected: packages missing: {c.cyan}{0}",
None, ' '.join(missing))
return
# Create graph if requested
graph = config.clopt('graph_path', None)
if graph:
highlight = config.clopt('highlight_graph', '')
highlight = [Package(p) for p in highlight.split()]
print_graph(package_list, graph, highlight)
return
skip = int(config.clopt('start_from', 0))
build_order = packages.get(PKG_STATUS_STR.build, [])
if skip and skip < len(build_order):
_("{c.white}Build will be started from {c.bold}{c.yellow}{0}{c.end}",
build_order[skip].name)
if settings.opt('ask'):
_("""{c.bold}{c.white}Are you {c.green}ready to build{c.white}"""\
""" packages? [{c.green}Y{c.white}/{c.red}n{c.white}]{c.end}""")
answer = ''.join(sys.stdin.read(1).splitlines())
if answer not in ('', 'y', 'Y'):
return
if not config.clopt('accurate'):
install.from_list(package_list, origin_package_set)
build_packages(build_order)
if config.clopt('accurate'):
_("{c.green} removing installed packages due to accurate mode")
install.remove_installed()
__all__ = ['process_list']
| StarcoderdataPython |
185193 | <filename>grades/migrations/0009_auto_20200514_1432.py<gh_stars>0
# Generated by Django 3.0 on 2020-05-14 14:32
from django.db import migrations
def create_through_relations(apps, schema_editor):
Tag = apps.get_model("grades", "Tag")
CourseTag = apps.get_model("grades", "CourseTag")
for tag in Tag.objects.all():
for course in tag.courses.all():
CourseTag.objects.create(tag=tag, course=course)
class Migration(migrations.Migration):
dependencies = [
("grades", "0008_coursetag"),
]
operations = [
migrations.RunPython(
create_through_relations, reverse_code=migrations.RunPython.noop
),
]
| StarcoderdataPython |
131492 | # Copyright (c) 2018, <NAME>,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A basic test that always passes, as long as everythin in `targets` builds.
This is usefull, for example, with a genrule.
"""
def build_test(name = None, targets = [], tags = []):
"""A test that depends on arbitary targets.
Args:
name: The target name.
targets: Targets to check.
tags: tags for the test.
"""
# Use a genrule to ensure the targets are built
native.genrule(
name = name + "_gen",
srcs = targets,
outs = [name + "_gen.out"],
tags = tags,
visibility = ["//visibility:private"],
cmd = "echo > $@",
)
native.sh_test(
name = name,
srcs = ["@bazel_rules//build_test:blank.sh"],
data = [name + "_gen.out"],
tags = tags,
timeout = "short",
)
| StarcoderdataPython |
3308099 | class Persona:
def __init__(self, nombre):
self.nombre = nombre
def __del__(self):
print("Ha muerto {}".format(self.nombre))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.