id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6459110 | <filename>yandex-contest/training-backend/b.py
class Row:
def __init__(self, seats):
self.seats = seats
self.taken = None
self.side = None
def take(self, number, side, place):
if side == 'right':
seats = {'D', 'E', 'F'}
else:
seats = {'A', 'B', 'C'}
if number == 2:
if place != 'window':
seats -= {'A', 'F'}
else:
seats -= {'C', 'D'}
elif number == 1:
if place != 'window':
seats -= {'A', 'F', 'B', 'E'}
else:
seats -= {'C', 'D', 'B', 'E'}
for seat in seats:
if self.seats[side][seat] != '.':
return False
self.taken = seats
self.side = side
for seat in seats:
self.seats[side][seat] = 'X'
return True
def __str__(self):
return ''.join(self.seats['left'].values()) + '_' + ''.join(self.seats['right'].values())
def recently_taken(self, index):
return f"Passengers can take seats: {' '.join([str(index) + s for s in sorted(self.taken)])}"
def remove_x(self):
if self.taken and self.side:
for seat in self.taken:
self.seats[self.side][seat] = '#'
self.taken = None
self.side = None
n = int(input())
rows = []
i = 0
while i < n:
left, right = input().split('_')
rows.append(Row({
'left': {'A': left[0], 'B': left[1], 'C': left[2]},
'right': {'D': right[0], 'E': right[1], 'F': right[2]}
}))
i += 1
m = int(input())
i = 0
groups = []
while i < m:
flag = True
params = input().split()
for idx, r in enumerate(rows):
if r.take(int(params[0]), params[1], params[2]):
print(r.recently_taken(idx + 1))
for row in rows:
print(row)
r.remove_x()
flag = False
break
if flag:
print('Cannot fulfill passengers requirements')
i += 1
| StarcoderdataPython |
6685751 | from datetime import datetime
from enum import Enum
from typing import List, Optional
from pydantic import Field
from opennem.api.schema import ApiBase
from opennem.core.dispatch_type import DispatchType
from opennem.schema.network import NetworkNEM, NetworkSchema
from opennem.schema.opennem import OpennemBaseDataSchema
class FueltechSchema(ApiBase):
code: str
label: Optional[str]
renewable: Optional[bool]
class FacilityStatusSchema(ApiBase):
code: str
label: Optional[str]
class FacilitySchema(ApiBase):
id: Optional[int]
network: NetworkSchema = NetworkNEM
fueltech: Optional[FueltechSchema]
status: Optional[FacilityStatusSchema]
station_id: Optional[int]
# @TODO no longer optional
code: str = ""
dispatch_type: DispatchType = DispatchType.GENERATOR
active: bool = True
capacity_registered: Optional[float]
registered: Optional[datetime]
deregistered: Optional[datetime]
expected_closure_date: Optional[datetime]
expected_closure_year: Optional[int]
network_region: Optional[str]
unit_id: Optional[int]
unit_number: Optional[int]
unit_alias: Optional[str]
unit_capacity: Optional[float]
emissions_factor_co2: Optional[float]
approved: bool = False
approved_by: Optional[str]
approved_at: Optional[datetime]
class LocationSchema(ApiBase):
id: Optional[int]
address1: Optional[str] = ""
address2: Optional[str] = ""
locality: Optional[str] = ""
state: Optional[str] = ""
postcode: Optional[str] = ""
country: Optional[str] = "au"
# Geo fields
# place_id: Optional[str]
# geocode_approved: bool = False
# geocode_skip: bool = False
# geocode_processed_at: Optional[datetime] = None
# geocode_by: Optional[str]
# geom: Optional[Any] = None
# boundary: Optional[Any]
lat: Optional[float]
lng: Optional[float]
class StationRecord(ApiBase):
id: int
code: str
name: Optional[str]
# Original network fields
network_name: Optional[str]
# location: Optional[LocationSchema]
location_id: int
facilities: List[FacilitySchema]
approved: bool = False
# network: Optional[NetworkSchema] = None
description: Optional[str]
wikipedia_link: Optional[str]
wikidata_id: Optional[str]
created_by: Optional[str]
created_at: Optional[datetime]
class StationResponse(OpennemBaseDataSchema):
record: StationRecord
class StationsResponse(OpennemBaseDataSchema):
data: List[StationRecord]
class StationUpdateResponse(ApiBase):
success: bool = False
record: StationRecord
class StationModificationTypes(str, Enum):
approve = "approve"
reject = "reject"
class StationModification(ApiBase):
comment: Optional[str] = Field(None)
modification: StationModificationTypes
| StarcoderdataPython |
5033342 | import glob
import itertools
import multiprocessing
import os
import shutil
import subprocess
import sys
import time
from parsable import parsable
import pomagma.atlas
import pomagma.cartographer
import pomagma.surveyor
import pomagma.theorist
import pomagma.util
from pomagma.util import DB, suggest_region_sizes
parsable = parsable.Parsable()
DEFAULT_SURVEY_SIZE = 16384 + 512 - 1
MIN_SLEEP_SEC = 1
MAX_SLEEP_SEC = 600
PYTHON = sys.executable
class parsable_fork(object):
def __init__(self, fun, *args, **kwargs):
self.args = [PYTHON, '-m', 'pomagma.workers', fun.__name__]
self.args += map(str, args)
for key, val in kwargs.iteritems():
self.args.append('{}={}'.format(key, val))
self.proc = subprocess.Popen(self.args)
def wait(self):
self.proc.wait()
code = self.proc.returncode
assert code == 0, '\n'.join([
'forked command failed with exit code {}'.format(code),
' '.join(self.args)])
def terminate(self):
if self.proc.poll() is None:
self.proc.terminate()
class fork(object):
def __init__(self, fun, *args, **kwargs):
self.command = '{}({})'.format(fun.__name__, ', '.join([
str(arg) for arg in args
] + [
'{}={}'.format(key, repr(val)) for key, val in kwargs.iteritems()
]))
self.proc = multiprocessing.Process(
target=fun,
args=args,
kwargs=kwargs)
self.proc.start()
def wait(self):
self.proc.join()
code = self.proc.exitcode
assert code == 0, '\n'.join([
'forked command failed with exit code {}'.format(code),
self.command])
def terminate(self):
if self.proc.is_alive():
self.proc.terminate()
class Sleeper(object):
def __init__(self, name):
self.name = name
self.duration = MIN_SLEEP_SEC
def reset(self):
self.duration = MIN_SLEEP_SEC
def sleep(self):
sys.stderr.write('# {} sleeping\n'.format(self.name))
sys.stderr.flush()
time.sleep(self.duration)
self.duration = min(MAX_SLEEP_SEC, 2 * self.duration)
class FileQueue(object):
def __init__(self, path, template='{}'):
self.path = path
self.template = template
self.pattern = os.path.join(self.path, DB(template.format('[0-9]*')))
def get(self):
# specifically ignore temporary files like temp.1234.0.pb
return glob.glob(self.pattern)
def __iter__(self):
return iter(self.get())
def __len__(self):
return len(self.get())
def try_pop(self, destin):
for source in self:
os.rename(source, destin)
return True
return False
def push(self, source):
if self.path and not os.path.exists(self.path):
os.makedirs(self.path)
with pomagma.util.mutex(self.path):
for i in itertools.count():
destin = os.path.join(self.path, DB(self.template.format(i)))
if not os.path.exists(destin):
os.rename(source, destin)
return
def clear(self):
for item in self:
os.remove(item)
class CartographerWorker(object):
def __init__(self, theory, region_size, region_queue_size, **options):
self.options = options
self.log_file = options['log_file']
self.world = DB('world')
self.normal_world = DB('world.normal')
self.normal_region = DB('region.normal.{:d}')
self.min_size = pomagma.util.MIN_SIZES[theory]
self.region_size = region_size
self.region_queue = FileQueue('region.queue')
self.survey_queue = FileQueue('survey.queue')
self.region_queue_size = region_queue_size
self.diverge_conjectures = 'diverge_conjectures.facts'
self.diverge_theorems = 'diverge_theorems.facts'
self.equal_conjectures = 'equal_conjectures.facts'
DEBUG = False
if DEBUG:
options = pomagma.util.use_memcheck(
options,
'cartographer.memcheck.out')
self.server = pomagma.cartographer.serve(theory, self.world, **options)
self.db = self.server.connect()
self.infer_state = 0
if os.path.exists(self.normal_world):
world_digest = pomagma.atlas.get_hash(self.world)
normal_world_digest = pomagma.atlas.get_hash(self.normal_world)
if world_digest == normal_world_digest:
self.infer_state = 2
def stop(self):
self.db.stop()
# self.server.stop()
def log(self, message):
rss = pomagma.util.get_rss(self.server.pid)
message = 'Cartographer {}k {}'.format(rss, message)
pomagma.util.log_print(message, self.log_file)
def is_normal(self):
assert self.infer_state in [0, 1, 2]
return self.infer_state == 2
def garbage_collect(self):
# assume surveyor.dump takes < 1.0 days
pomagma.atlas.garbage_collect(grace_period_days=1.0)
def try_work(self):
return (
self.try_produce_regions() or
self.try_normalize() or
self.try_consume_surveys()
)
def try_produce_regions(self):
queue_size = len(self.region_queue)
if queue_size >= self.region_queue_size:
return False
else:
self.fill_region_queue(self.region_queue)
return True
def try_normalize(self):
if self.is_normal():
return False
else:
self.log('Inferring {}'.format(['pos', 'neg'][self.infer_state]))
if self.db.infer(self.infer_state):
self.db.validate()
self.db.dump(self.world)
self.garbage_collect()
self.replace_region_queue()
else:
self.infer_state += 1
if self.is_normal():
self.log('Normalized')
self.db.dump(self.normal_world)
self.trim_normal_regions()
self.garbage_collect()
self.theorize()
return True
def try_consume_surveys(self):
surveys = self.survey_queue.get()
if not surveys:
return False
else:
self.log('Aggregating {} surveys'.format(len(surveys)))
for survey in surveys:
self.db.aggregate(survey)
self.db.validate()
self.db.dump(self.world)
self.garbage_collect()
self.infer_state = 0
world_size = self.db.info()['item_count']
self.log('world_size = {}'.format(world_size))
os.remove(survey)
self.db.crop()
self.replace_region_queue()
return True
def fill_region_queue(self, queue):
self.log('Filling region queue')
if not os.path.exists(queue.path):
os.makedirs(queue.path)
queue_size = len(queue)
trim_count = max(0, self.region_queue_size - queue_size)
regions_out = []
for i in itertools.count():
region_out = os.path.join(queue.path, DB(i))
if not os.path.exists(region_out):
regions_out.append(region_out)
if len(regions_out) == trim_count:
break
# trim in parallel because these are small
self.db.trim([
{'size': self.region_size, 'filename': r}
for r in regions_out
])
def replace_region_queue(self):
self.log('Replacing region queue')
with pomagma.util.temp_copy(self.region_queue.path) as temp_path:
self.fill_region_queue(FileQueue(temp_path))
self.region_queue.clear()
self.garbage_collect()
def trim_normal_regions(self):
self.log('Trimming normal regions')
assert self.is_normal()
max_size = self.db.info()['item_count']
# trim sequentially because these are large
for size in suggest_region_sizes(self.min_size, max_size):
filename = self.normal_region.format(size)
self.db.trim([{'size': size, 'filename': filename}])
def theorize(self):
self.log('Theorizing')
conjectures = self.diverge_conjectures
theorems = self.diverge_theorems
self.db.conjecture(conjectures, self.equal_conjectures)
with pomagma.util.temp_copy(conjectures) as temp_conjectures:
with pomagma.util.temp_copy(theorems) as temp_theorems:
if os.path.exists(theorems):
shutil.copyfile(theorems, temp_theorems)
theorem_count = pomagma.theorist.try_prove_diverge(
conjectures,
temp_conjectures,
temp_theorems,
**self.options)
if theorem_count > 0:
self.log('Proved {} theorems'.format(theorem_count))
counts = self.db.assume(theorems)
if counts['pos'] + counts['neg']:
self.log('Assumed {} pos + {} neg facts'.format(
counts['pos'],
counts['neg']))
self.db.validate()
self.db.dump(self.world)
self.garbage_collect()
self.infer_state = 0 if counts['pos'] else 1
self.replace_region_queue()
@parsable
def cartographer_work(
theory,
region_size=(DEFAULT_SURVEY_SIZE - 512),
region_queue_size=4,
**options):
"""Start cartographer worker."""
min_size = pomagma.util.MIN_SIZES[theory]
assert region_size >= min_size
options.setdefault('log_file', 'cartographer.log')
with pomagma.atlas.chdir(theory), pomagma.util.mutex(DB('world')):
worker = CartographerWorker(
theory,
region_size,
region_queue_size,
**options)
try:
sleeper = Sleeper('cartographer')
while True:
if not worker.try_work():
sleeper.sleep()
else:
sleeper.reset()
finally:
worker.stop()
def cartographer(*args, **kwargs):
return parsable_fork(cartographer_work, *args, **kwargs)
@parsable
def surveyor_work(theory, step_size=512, **options):
"""Start surveyor worker."""
assert step_size > 0
with pomagma.atlas.chdir(theory):
region_queue = FileQueue('region.queue')
survey_queue = FileQueue('survey.queue')
region = pomagma.util.temp_name(DB('region'))
survey = pomagma.util.temp_name(DB('survey'))
options.setdefault('log_file', 'survey.log')
sleeper = Sleeper('surveyor')
while True:
if not region_queue.try_pop(region):
sleeper.sleep()
else:
sleeper.reset()
region_size = pomagma.atlas.get_item_count(region)
survey_size = region_size + step_size
pomagma.surveyor.survey(
theory,
region,
survey,
survey_size,
**options)
os.remove(region)
survey_queue.push(survey)
def surveyor(*args, **kwargs):
return parsable_fork(surveyor_work, *args, **kwargs)
if __name__ == '__main__':
parsable()
| StarcoderdataPython |
126732 | import sqlite3
conn = sqlite3.connect('northwind_small.sqlite3')
curs = conn.cursor()
# Question 1
q1 = curs.execute("""
SELECT ProductName, UnitPrice FROM Product
ORDER BY UnitPrice DESC
LIMIT 10;
""").fetchall()
print("The ten most expensive items (per unite price) in the database are: ", q1)
# Question 2
q2 = curs.execute("""
SELECT AVG(HireDate - BirthDate) from Employee
""").fetchall()
print("The average age of an employee at the time of their hiring is: ", q2)
# Part 3 Question 1
p3q1 = curs.execute("""
SELECT Supplier.CompanyName AS Supplier, Product.ProductName AS ProductName , Product.UnitPrice as UnitPrice
FROM Product JOIN Supplier
ON Product.SupplierID = Supplier.ID
ORDER BY UnitPrice DESC
LIMIT 10;
""").fetchall()
print("The ten most expensive items (per unit price) and their suppliers are: ", p3q1)
p3q2 = curs.execute("""
SELECT Category.CategoryName, COUNT(DISTINCT Product.ProductName) as unique_prod
FROM Product JOIN Category
WHERE Product.CategoryID = Category.ID
ORDER BY unique_prod DESC
""").fetchall()[0][0]
print("The largest category by number of unique products is: ", p3q2) | StarcoderdataPython |
1936040 | <reponame>uleague/universityleague-match-stats
"""
This module is mainly for dataclasses.
"""
from dataclasses import dataclass
from typing import List, Sequence, TypedDict, Dict, Union
from .exceptions import TournamentError
@dataclass
class MatchPlayer(TypedDict):
account_id: int
active_plus_subscription: bool
hero_id: int
leaver_status: int
mmr_type: int
player_name: str
player_slot: int
@dataclass
class Match(TypedDict):
dire_team_complete: int
dire_team_id: int
dire_team_logo: int
dire_team_logo_url: str
dire_team_name: str
dire_team_tag: str
game_mode: int
leagueid: int
lobby_type: int
match_flags: int
match_id: int
match_outcome: int
negative_votes: int
players: List[Dict["str", Union[MatchPlayer, str, int, List]]]
positive_votes: int
radiant_team_complete: int
radiant_team_id: int
radiant_team_logo: int
radiant_team_logo_url: str
radiant_team_name: str
radiant_team_tag: str
series_id: int
series_type: int
startTime: int
@dataclass
class Series(TypedDict):
series_id: int
matches: List[Dict["str", Union[Match, int, List]]]
series_type: int
@dataclass
class Tournament:
request_id: int
results_remaining: int
series: List[Dict["str", Union[Series, int, List]]]
total_results: int
def get_match(self, start_time: int) -> Match:
"""
Gets match by start time.
:param startTime: int
:return :class: Match
"""
for serie in self.series:
for match in serie["matches"]:
if match["startTime"] == start_time:
return match
return None
| StarcoderdataPython |
399410 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class and functions to store reduced density matrices."""
from __future__ import absolute_import
import copy
import numpy
from fermilib.ops import (FermionOperator,
InteractionTensor,
InteractionOperator,
normal_ordered)
from projectq.ops import QubitOperator
class InteractionRDMError(Exception):
pass
class InteractionRDM(InteractionTensor):
"""Class for storing 1- and 2-body reduced density matrices.
Attributes:
one_body_tensor: The expectation values <a^\dagger_p a_q>.
two_body_tensor: The expectation values
<a^\dagger_p a^\dagger_q a_r a_s>.
n_qubits: An int giving the number of qubits.
"""
def __init__(self, one_body_tensor, two_body_tensor):
"""Initialize the InteractionRDM class.
Args:
one_body_tensor: Expectation values <a^\dagger_p a_q>.
two_body_tensor: Expectation values
<a^\dagger_p a^\dagger_q a_r a_s>.
"""
super(InteractionRDM, self).__init__(None, one_body_tensor,
two_body_tensor)
@classmethod
def from_spatial_rdm(cls, one_rdm_a, one_rdm_b,
two_rdm_aa, two_rdm_ab, two_rdm_bb):
one_rdm, two_rdm = unpack_spatial_rdm(one_rdm_a, one_rdm_b,
two_rdm_aa, two_rdm_ab,
two_rdm_bb)
return cls(constant, one_rdm, two_rdm)
def expectation(self, operator):
"""Return expectation value of an InteractionRDM with an operator.
Args:
operator: A QubitOperator or InteractionOperator.
Returns:
float: Expectation value
Raises:
InteractionRDMError: Invalid operator provided.
"""
if isinstance(operator, QubitOperator):
expectation_value = 0.
for qubit_term in operator:
expectation += qubit_term_expectation(self, qubit_term)
elif isinstance(operator, InteractionOperator):
expectation = operator.constant
expectation += numpy.sum(self.one_body_tensor *
operator.one_body_tensor)
expectation += numpy.sum(self.two_body_tensor *
operator.two_body_tensor)
else:
raise InteractionRDMError('Invalid operator type provided.')
return expectation
def get_qubit_expectations(self, qubit_operator):
"""Return expectations of QubitOperator in new QubitOperator.
Args:
qubit_operator: QubitOperator instance to be evaluated on
this InteractionRDM.
Returns:
QubitOperator: QubitOperator with coefficients
corresponding to expectation values of those operators.
Raises:
InteractionRDMError: Observable not contained in 1-RDM or 2-RDM.
"""
from fermilib.transforms import reverse_jordan_wigner
qubit_operator_expectations = copy.deepcopy(qubit_operator)
del qubit_operator_expectations.terms[()]
for qubit_term in qubit_operator_expectations.terms:
expectation = 0.
# Map qubits back to fermions.
reversed_fermion_operators = reverse_jordan_wigner(
QubitOperator(qubit_term), self.n_qubits)
reversed_fermion_operators = normal_ordered(
reversed_fermion_operators)
# Loop through fermion terms.
for fermion_term in reversed_fermion_operators.terms:
coefficient = reversed_fermion_operators.terms[fermion_term]
# Handle molecular term.
if FermionOperator(fermion_term).is_molecular_term():
if not fermion_term:
expectation += coefficient
else:
indices = [operator[0] for operator in fermion_term]
rdm_element = self[indices]
expectation += rdm_element * coefficient
# Handle non-molecular terms.
elif len(fermion_term) > 4:
raise InteractionRDMError('Observable not contained '
'in 1-RDM or 2-RDM.')
qubit_operator_expectations.terms[qubit_term] = expectation
return qubit_operator_expectations
| StarcoderdataPython |
4932313 | <reponame>likeand/ml<filename>samples/logistic_regression.py
# -*- coding: utf-8 -*-
# @Date : 2020/5/24
# @Author: Luokun
# @Email : <EMAIL>
import sys
from os.path import dirname, abspath
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(dirname(dirname(abspath(__file__))))
def test_logistic_regression():
from models.logistic_regression import LogisticRegression
x, y = np.random.randn(2, 500, 2), np.zeros([2, 500])
x[0] += np.array([1, -1]) # 左上方移动
x[1] += np.array([-1, 1]) # 右下方移动
y[1] = 1
plot_scatter(x[0], x[1], 'Real')
x = x.reshape(-1, 2)
y = y.flatten()
logistic = LogisticRegression(2, lr=1e-3)
train_logistic_regression(logistic, x, y, batch_size=32, epochs=100)
pred = logistic.predict(x)
plot_scatter_with_line(x[pred == 0], x[pred == 1], logistic.weights, 'Pred')
acc = np.sum(pred == y) / len(pred)
print(f'Acc = {100 * acc:.2f}%')
def train_logistic_regression(model, x, y, batch_size, epochs):
indices = np.arange(len(x))
for epoch in range(epochs):
np.random.shuffle(indices)
shf_x, shf_y = x[indices], y[indices]
bat_s, bat_e = 0, batch_size
while bat_e <= len(x):
model.fit(shf_x[bat_s:bat_e], shf_y[bat_s:bat_e])
bat_s = bat_e
bat_e += batch_size
def plot_scatter(xy0, xy1, title):
plt.figure(figsize=[8, 8])
plt.scatter(xy0[:, 0], xy0[:, 1], color='r', marker='.')
plt.scatter(xy1[:, 0], xy1[:, 1], color='g', marker='.')
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title(title)
plt.show()
def plot_scatter_with_line(xy0, xy1, weights, title):
plt.figure(figsize=[8, 8])
plt.scatter(xy0[:, 0], xy0[:, 1], color='r', marker='.')
plt.scatter(xy1[:, 0], xy1[:, 1], color='g', marker='.')
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title(title)
# plot the dividing line
ln_x = np.linspace(-5, 5, 100)
ln_a = - weights[0] / weights[1]
ln_b = - weights[2] / weights[1]
ln_y = ln_a * ln_x + ln_b
plt.plot(ln_x, ln_y, color='b', linewidth=1)
plt.show()
if __name__ == '__main__':
test_logistic_regression()
| StarcoderdataPython |
6645473 | """
Greetings Chatbot
Author: <NAME>
Date: 09-14-20
"""
# This "bot" takes in the value of the users name using input(),
# Then responds with "Nice to meet you, {name}"
# Print function
# print("Hi, what's your name?")
# name = input()
# print("Nice to meet you",name)
# Concatenation
# print("Hi, what's your name?")
# nameconcat = input()
# print("Nice to meet you" + nameconcat)
username = input("Hi, what is your name?: ")
print(f"Nice to meet you, {username}.")
favouritebook = input(f"What's your favourite book, {username}?: ")
print(f"Cool book {username}, I too like {favouritebook}.")
| StarcoderdataPython |
4938700 | <gh_stars>0
import logging
import os
import yaml
from galaxy.tools import GALAXY_LIB_TOOLS_UNVERSIONED
from galaxy.jobs import JobDestination
log = logging.getLogger(__name__)
CONTAINER_RULE_MAPPER_FILE = os.path.join(
os.path.dirname(__file__), 'container-mapper-rules.yml')
def _load_container_mappings():
if os.path.exists(CONTAINER_RULE_MAPPER_FILE):
with open(CONTAINER_RULE_MAPPER_FILE) as f:
return yaml.load(f)
else:
return {}
CONTAINER_RULE_MAP = _load_container_mappings()
def _apply_rule_mappings(tool, params):
if CONTAINER_RULE_MAP:
for mapping in CONTAINER_RULE_MAP.get('mappings', {}):
if tool.id in mapping.get('tool_ids'):
params.update(mapping.get('container'))
return True
return False
def k8s_container_mapper(tool, referrer, k8s_runner_id="k8s"):
params = dict(referrer.params)
params['docker_enabled'] = True
if not _apply_rule_mappings(tool, params):
if tool.id in GALAXY_LIB_TOOLS_UNVERSIONED:
default_container = params.get('docker_default_container_id')
if default_container:
params['docker_container_id_override'] = default_container
log.debug("[k8s_container_mapper] Dispatching to %s with params %s" % (k8s_runner_id, params))
return JobDestination(runner=k8s_runner_id, params=params)
| StarcoderdataPython |
63496 | <filename>main.py
from model import unet
from keras.callbacks import ModelCheckpoint
from data import trainGenerator, geneTrainNpy,testGenerator,saveResult
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGene = trainGenerator(2, './data/membrane/train', 'image', 'label', data_gen_args, save_to_dir=None)
model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit_generator(myGene, steps_per_epoch=300, epochs=5, callbacks=[model_checkpoint])
testGene = testGenerator("./data/membrane/test/images")
results = model.predict_generator(testGene, 30, verbose=1)
saveResult("./data/membrane/test/predicts", results)
| StarcoderdataPython |
6429600 |
#Importe de bibliotecas
from tkinter import *
from tkinter import ttk
from typing import ValuesView
#Declaração da janela
janela=Tk()
janela.title("Calculadora Tkinter - Matemática - IMC")
janela.geometry("510x500")
#Declaração das abas
aba=ttk.Notebook(janela)
aba.place(x=20,y=20, width=470, height=460)
#Variavel para receber comandos
lista_comandos=[]
var=StringVar()
n1=StringVar()
numerador=0
op=""
#Funções associadas aos botões
def num1():
lista_comandos.append(1)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num2():
lista_comandos.append(2)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador=int(operador)
numerador= var.get()
def num3():
lista_comandos.append(3)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num4():
lista_comandos.append(4)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num5():
lista_comandos.append(5)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num6():
lista_comandos.append(6)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num7():
lista_comandos.append(7)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num8():
lista_comandos.append(8)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num9():
lista_comandos.append(9)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador= var.get()
def num0():
lista_comandos.append(0)
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
numerador=int(operador)
def igual():
retorno=0
x1=float(n1.get())
x2=float(var.get())
if op=="+":
retorno=x1+x2
elif op=="-":
retorno=x1-x2
elif op=="*":
retorno=x1*x2
elif op=="/":
retorno=x1/x2
elif op=="%":
retorno=(x1/100)*x2
elif op=="x2":
retorno=x1**x2
elif op=="%":
retorno=(x1/100)*x2
var.set(retorno)
def apagar_ultimo():
lista_comandos.pop()
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
def limpar():
var.set("")
n1.set("")
for x in range(0,len(lista_comandos)):
lista_comandos.pop()
def percentual():
n=var.get()
n1.set(n)
var.set("%")
global op
op="%"
for x in range(0,len(lista_comandos)):
lista_comandos.pop()
def quadrado():
n=var.get()
n1.set(n)
global op
op="x2"
var.set("X2")
for x in range(0,len(lista_comandos)):
lista_comandos.pop()
def divisao():
n=var.get()
n1.set(n)
global op
op="/"
var.set("/")
for x in range(0,len(lista_comandos)):
lista_comandos.pop()
def multiplicacao():
n=var.get()
n1.set(n)
global op
op="*"
var.set("*")
for x in range(0,len(lista_comandos)):
lista_comandos.pop()
def subtracao():
n=var.get()
n1.set(n)
global op
op="-"
var.set("-")
for x in range(0,len(lista_comandos)):
lista_comandos.pop()
def soma():
n=var.get()
n1.set(n)
var.set("+")
global op
op="+"
for x in range(0,len(lista_comandos)):
lista_comandos.pop()
def virgula():
var.set(",")
lista_comandos.append(".")
operador=""
for n in range(0, len(lista_comandos)):
operador=operador+str(lista_comandos[n])
var.set(operador)
#Aba calculadora matemática com os elementos
LabelFrame_calculadora=LabelFrame(aba)
aba.add(LabelFrame_calculadora, text="MATEMÁTICA")
Frame_calculadora=Frame(LabelFrame_calculadora,borderwidth=1, relief="solid")
Frame_calculadora.place(x=20,y=20, width=430, height=100)
l=Label(Frame_calculadora, textvariable=n1).pack()
t=Entry(Frame_calculadora, textvariable=var, justify=RIGHT,font=("Arial",25)).place(x=20,y=20, width=390, height=60)
botao_percentual=Button(LabelFrame_calculadora, text="%", background="#9b9695", font=("Arial",25),command=percentual)
botao_percentual.place(x=25,y=140, width=90, height=44)
botao_limpar=Button(LabelFrame_calculadora, text="C", background="#9b9695", font=("Arial",25),command=limpar)
botao_limpar.place(x=135,y=140, width=90, height=44)
botao_quadrado=Button(LabelFrame_calculadora, text="X2", background="#9b9695", font=("Arial",25),command=quadrado)
botao_quadrado.place(x=245,y=140, width=90, height=44)
botao_percentual=Button(LabelFrame_calculadora, text="/", background="#9b9695", font=("Arial",25),command=divisao)
botao_percentual.place(x=355,y=140, width=90, height=44)
botao_percentual=Button(LabelFrame_calculadora, text="X", background="#9b9695", font=("Arial",25),command=multiplicacao)
botao_percentual.place(x=355,y=199, width=90, height=44)
botao_percentual=Button(LabelFrame_calculadora, text="-", background="#9b9695", font=("Arial",25),command=subtracao)
botao_percentual.place(x=355,y=258, width=90, height=44)
botao_percentual=Button(LabelFrame_calculadora, text="+", background="#9b9695", font=("Arial",25),command=soma)
botao_percentual.place(x=355,y=317, width=90, height=44)
botao_percentual=Button(LabelFrame_calculadora, text="=", background="#9b9695", font=("Arial",25),command=igual)
botao_percentual.place(x=355,y=372, width=90, height=44)
Frame_numero=Frame(LabelFrame_calculadora,borderwidth=1)
Frame_numero.place(x=20,y=204, width=315, height=215)
botao_7=Button(Frame_numero, text="7", background="#9383f1", font=("Arial",25),command=num7)
botao_7.place(x=3,y=0, width=90, height=38)
botao_8=Button(Frame_numero, text="8", background="#9383f1", font=("Arial",25),command=num8)
botao_8.place(x=113,y=0, width=90, height=38)
botao_9=Button(Frame_numero, text="9", background="#9383f1", font=("Arial",25),command=num9)
botao_9.place(x=223,y=0, width=90, height=38)
botao_4=Button(Frame_numero, text="4", background="#9383f1", font=("Arial",25),command=num4)
botao_4.place(x=3,y=58, width=90, height=38)
botao_5=Button(Frame_numero, text="5", background="#9383f1", font=("Arial",25),command=num5)
botao_5.place(x=113,y=58, width=90, height=38)
botao_6=Button(Frame_numero, text="6", background="#9383f1", font=("Arial",25),command=num6)
botao_6.place(x=223,y=58, width=90, height=38)
botao_1=Button(Frame_numero, text="1", background="#9383f1", font=("Arial",25),command=num1)
botao_1.place(x=3,y=116, width=90, height=38)
botao_2=Button(Frame_numero, text="2", background="#9383f1", font=("Arial",25),command=num2)
botao_2.place(x=113,y=116, width=90, height=38)
botao_3=Button(Frame_numero, text="3", background="#9383f1", font=("Arial",25),command=num3)
botao_3.place(x=223,y=116, width=90, height=38)
botao_0=Button(Frame_numero, text="0", background="#9383f1", font=("Arial",25),command=num0)
botao_0.place(x=3,y=174, width=90, height=38)
botao_v=Button(Frame_numero, text=",", background="#9383f1", font=("Arial",25),command=virgula)
botao_v.place(x=113,y=174, width=90, height=38)
botao_a=Button(Frame_numero, text="<", background="#9383f1", font=("Arial",25),command=apagar_ultimo)
botao_a.place(x=223,y=174, width=90, height=38)
#Aba IMC
#variáveis utilizados
t1="Informe o peso e a altura!!!"
p = StringVar()
a = StringVar()
imc = StringVar()
indice=0
texto= StringVar()
texto.set(t1)
#Função associada ao botão
def calcular_imc():
peso=int(p.get())
altura=float(a.get())
indice=peso / (altura**2)
if indice<=18.5:
t1='Você está abaixo do peso!'
elif (indice>18.5) and (indice<=25):
t1='Você está no peso ideal!'
elif (indice>25) and (indice<=30):
t1='Você está com sobrepeso!'
elif (indice>30) and (indice<=40):
t1='Você está obeso!'
else:
t1='Você está com obesidade grave!'
imc.set(round(indice,2))
texto.set(t1)
#Aba calculadora IMC com os elementos
LabelFrame_imc=LabelFrame(aba)
aba.add(LabelFrame_imc, text="IMC")
Frame_imc=Frame(LabelFrame_imc,borderwidth=1, relief="solid").place(x=20,y=20, width=430, height=100)
Label(LabelFrame_imc,text="Peso", font= "20", anchor=W).place(x=80, y=150, width=50, height=50)
peso=Entry(LabelFrame_imc, textvariable=p, font=("Arial",25)).place(x=140, y=153, width=100, height=50)
Label(LabelFrame_imc,text="Altura", font= "20", anchor=W).place(x=80, y=230, width=50, height=80)
altura=Entry(LabelFrame_imc, textvariable=a,font=("Arial",25)).place(x=140, y=243, width=100, height=50)
Button(LabelFrame_imc,text="Calcular IMC", command=calcular_imc, background="#9b9695", font=("Arial",12)).place(x=140, y=330, width=100, height=50)
Label(LabelFrame_imc, textvariable=texto,font=("Arial",20)).place(x=40, y=40, width=380, height=60)
Frame_imc_indice=LabelFrame(LabelFrame_imc, borderwidth=1, relief="solid" ).place(x=290,y=313, width=120, height=80)
Label(LabelFrame_imc, textvariable=imc,font=("Arial",25)).place(x=300, y=330, width=100, height=50)
#Comando para atualização de janela
janela.mainloop() | StarcoderdataPython |
223291 | <reponame>ChoiZ/simple-python-scheduler
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from random import shuffle
tracks_path = "/home/user/tracks_folder/"
separation_artist = 10
separation_title = 10
playlist_path = '/home/user/playlist_folder/playlist.m3u'
playlist_size = len(os.listdir(tracks_path))
nb_pl = 0
bac = []
playlist = []
artists = []
titles = []
error = []
class Track:
def __init__(self, artist, title, filename, duration=0):
self.artist = artist
self.title = title
self.filename = filename
self.duration = duration
def read_folder(folder):
for song_file in os.listdir(folder):
split = os.path.splitext(song_file)[0].split(' - ')
artist = split[0]
title = split[1]
filename = folder+song_file
bac.append(Track(artist, title, filename))
artists.append(artist)
titles.append(title)
def get_track(i):
j = i % len(bac)
print("i : %d" % i)
print("j : %d" % j)
track = bac[i]
print(track.artist)
playlist.append(track)
read_folder(tracks_path)
max_artist = len(list(set(artists)))/2
max_titles = len(list(set(titles)))/2
if separation_artist == 0:
error.append("Warning: separation_artist must be set and greater than 0.")
if separation_title == 0:
error.append("Warning: separation_title must be set and greater than 0.")
if playlist_size == 0:
error.append("Warning: playlist_size must be set and greater than 0.")
if max_artist < separation_artist:
error.append('Warning: separation_artist is too high: %d use a value between 1 and %d' % (separation_artist, max_artist))
if max_titles < separation_title:
error.append('Warning: separation_title is too high: %d use a value between 1 and %d' % (separation_title, max_titles))
if len(error) != 0:
for err in error:
print(err)
sys.exit(0)
shuffle(bac)
while True:
get_track(nb_pl)
nb_pl+=1
if playlist_size == nb_pl:
break
m3u = open(playlist_path, 'w')
m3u.write('#EXTM3U\n')
for song in playlist:
m3u.write('#EXTINF:%s,%s - %s\n' % ('-1',song.artist,song.title))
m3u.write(song.filename+'\n')
m3u.close()
| StarcoderdataPython |
5158137 | """Exercise doctests for all of our modules."""
from doctest import testmod
import pkgutil
import importlib
import zero_ex
def test_all_doctests():
"""Gather zero_ex.* modules and doctest them."""
for (_, modname, _) in pkgutil.walk_packages(
path=zero_ex.__path__, prefix="zero_ex."
):
module = importlib.import_module(modname)
print(module)
(failure_count, _) = testmod(module)
assert failure_count == 0
| StarcoderdataPython |
11303279 | <reponame>waingram/code-embeddings<gh_stars>1-10
from setuptools import setup
setup(
name='code_embeddings',
version='',
packages=['code_embeddings'],
url='',
license='BSD-3-Clause',
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
description=''
)
| StarcoderdataPython |
3589418 | <filename>test/__init__.py<gh_stars>0
import sys
import unittest
from .common import * # noqa: F401, F403
if __name__ == '__main__':
unittest.main(argv=sys.argv)
| StarcoderdataPython |
134565 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hadoop.cluster import get_all_hdfs
from desktop import conf
from hadoop.fs import LocalSubFileSystem
from desktop.lib.apputil import has_hadoop
_filesystems = None
def _init_filesystems():
"""Initialize the module-scoped filesystem dictionary."""
global _filesystems
if _filesystems is not None:
return
_filesystems = {}
if has_hadoop():
# Load HDFSes
_filesystems.update(get_all_hdfs())
# Load local
for identifier in conf.LOCAL_FILESYSTEMS.keys():
local_fs = LocalSubFileSystem(
conf.LOCAL_FILESYSTEMS[identifier].PATH.get())
if identifier in _filesystems:
raise Exception(("Filesystem '%s' configured twice. First is " +
"%s, second is local FS %s") % (identifier, _filesystems[identifier], local_fs))
_filesystems[identifier] = local_fs
def get_filesystem(name):
"""Return the filesystem with the given name. If the filesystem is not defined,
raises KeyError"""
_init_filesystems()
return _filesystems[name]
def reset():
"""
reset() -- Forget all cached filesystems and go to a pristine state.
"""
global _filesystems
_filesystems = None
| StarcoderdataPython |
3584838 | <gh_stars>0
def quickSort(list):
left = []
center = []
right = []
if len(list) > 1:
pivot = list[0]
for i in list:
if i < pivot:
left.append(i)
elif i == pivot:
center.append(i)
elif i > pivot:
right.append(i)
return quickSort(left)+center+quickSort(right)
else:
return list
| StarcoderdataPython |
11205262 | <reponame>drunkwater/leetcode
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#330. Patching Array
#Given a sorted positive integer array nums and an integer n, add/patch elements to the array such that any number in range [1, n] inclusive can be formed by the sum of some elements in the array. Return the minimum number of patches required.
#Example 1:
#nums = [1, 3], n = 6
#Return 1.
#Combinations of nums are [1], [3], [1,3], which form possible sums of: 1, 3, 4.
#Now if we add/patch 2 to nums, the combinations are: [1], [2], [3], [1,3], [2,3], [1,2,3].
#Possible sums are 1, 2, 3, 4, 5, 6, which now covers the range [1, 6].
#So we only need 1 patch.
#Example 2:
#nums = [1, 5, 10], n = 20
#Return 2.
#The two patches can be [2, 4].
#Example 3:
#nums = [1, 2, 2], n = 5
#Return 0.
#Credits:
#Special thanks to @dietpepsi for adding this problem and creating all test cases.
#class Solution:
# def minPatches(self, nums, n):
# """
# :type nums: List[int]
# :type n: int
# :rtype: int
# """
# Time Is Money | StarcoderdataPython |
12806883 | import pandas as pd
import numpy as np
import re
## Different than what was expected, creating a unique for for every DF column
## performed a slower execution than having different fors for each DF column
def cleanInvalidDFEntries(id_key,stamp,actor_column,verb_column,object_column):
day = [] # Check which day of the week the timestamp represent
day_shift = [] # Check which shift of the day the time stamp represent: night, morning, afternoon
actor = [] # List of respective actor's name
lang = [] # List of respective language of the verb
action = [] # List of verbs
object_aim = [] # List of afected object
for i in range(len(id_key)):
# Test which part of the day is and add it to the list
day.append(stamp[i].day_name()) # Check which day of the week is
if stamp[i].hour < 6 or stamp[i].hour >= 20:
day_shift.append('night')
elif stamp[i].hour >= 6 and stamp[i].hour < 12:
day_shift.append('morning')
else:
day_shift.append('afternoon')
# Grab the raw actor text in the CSV file as str, convert it to dict and collect the actor's name
if not (actor_column[i] is np.nan): # Check if actor's name exists, if not, add a NaN which will be purged later
if 'name' in actor_column[i].keys(): # Dict format: {'name':'person'}. Check if the key 'name' exists
if actor_column[i]['name'] != "":
actor.append(actor_column[i]['name']) # This line appends the actor of the current iteration
else:
actor.append(np.nan)
else:
actor.append(np.nan)
else:
actor.append(np.nan)
# Grab the raw verb text in the CSV file as str, convert it to dict and collect the verb and the language
if not (verb_column[i] is np.nan):
if 'id' in verb_column[i].keys(): # dict format: {'display': {'language':'verb'}}. Check if 'display' exists
if verb_column[i]['id'] != "":
action.append(re.split("/",verb_column[i]['id'])[-1]) # collects the verb in the current iteration and append it, NaN otherwise
else:
action.append(np.nan)
else:
action.append(np.nan)
if 'display' in verb_column[i].keys(): # dict format: {'display': {'language':'verb'}}. Check if 'display' exists
if verb_column[i]['display'][list(verb_column[i]['display'].keys())[0]] != "":
lang.append(list(verb_column[i]['display'].keys())[0]) # this line appends the language value to the lang list, NaN otherwise
else:
lang.append(np.nan)
else:
lang.append(np.nan)
else:
action.append(np.nan)
lang.append(np.nan)
# Grab the raw verb text in the CSV file as str, convert it to dict and collect the object
if not (object_column[i] is np.nan): # dict format: {'definition':{'name':{'es-US':'object'}}}
if 'definition' in object_column[i].keys(): # check if the key 'definition' exists. Appends NaN otherwise
if 'name' in object_column[i]['definition'].keys(): # check if the key 'name' exists. Appends NaN otherwise
if object_column[i]['definition'][list(object_column[i]['definition'].keys())[0]] \
[list(object_column[i]['definition'][list(object_column[i]['definition'].keys())[0]].keys())[0]] != "":
object_aim.append(object_column[i]['definition'] # This line appends the object of the current iteration
[list(object_column[i]['definition'].keys())[0]]
[list(object_column[i]['definition'][list(object_column[i]['definition'].keys())[0]].keys())[0]])
else:
object_aim.append(np.nan)
else:
object_aim.append(np.nan)
else:
object_aim.append(np.nan)
else:
object_aim.append(np.nan)
d = pd.DataFrame(data={'id':id_key,'timestamp':stamp,'weekday':day,'dayshift':day_shift,'actor':actor,'verb':action,'object':object_aim,'language':lang})
return dropInvalidValues(d)
def dropInvalidValues(data):
# This function purges all lines that contains NaN or NaT values of a DataFrame
data.dropna(inplace=True)
data.reset_index(drop=True,inplace=True)
return data | StarcoderdataPython |
5128713 | <reponame>lhalb/cura_g-code_converter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def clear_data(path, start="M107", end='M82', cut=None):
"""
:param path: Pfad zur G-Code-Datei
:param start: Teilstring, der den Beginn der Daten markiert (wird übersprungen)
:param end: Teilstring, der das Ende der Daten markiert
:param cut: String, der aus den Daten gelöscht werden soll
:return: Stringliste, die nur G-Code Anweisungen enthält
"""
# öffne die Datei zeilenweise
with open(path) as file:
# entferne die Zeilenumbrüche
raw_lines = [f.strip('\n') for f in file.readlines()]
# finde den Startpunkt des G-Codes
start_idx = [raw_lines.index(i) for i in raw_lines if start in i][0]
# finde das Ende des G-Codes
end_idx = [raw_lines.index(i, start_idx) for i in raw_lines if end in i][0]
# trimme die Daten
cut_lines = raw_lines[start_idx+1: end_idx]
skirts = [i for i, x in enumerate(cut_lines) if x == ';TYPE:SKIRT']
outer_walls = [i for i, x in enumerate(cut_lines) if x == ';TYPE:WALL-OUTER']
if skirts:
# falls es mehrere Skirts gibt, müsste man die Routine hier anpassen
del cut_lines[skirts[0]:(outer_walls[0]-1)]
# lösche die Typenbezeichnungen
if cut is None:
uncommented_lines = cut_lines
else:
uncommented_lines = [i for i in cut_lines if all(c not in i for c in cut)]
cleared_lines = [l for l in uncommented_lines if l != '']
return cleared_lines
def import_data_pandas(path):
df = pd.read_csv(path, sep=" ", comment=';', header=None)
return df
def lines_to_array(lines, z_to_zd=False, r=0.5, offset=0):
if z_to_zd:
z_out = 'ZD'
else:
z_out = 'Z'
possible_axes = {
'G': 'G',
'X': 'X',
'Y': 'Y',
'Z': z_out,
'E': 'E'
}
output_data = {k: [0]*len(lines) for k in possible_axes.keys()}
for i, line in enumerate(lines):
current_line = line.split(' ')
for string in current_line:
for key in possible_axes.keys():
if key in string:
value = string.strip(key)
output_data[key][i] = value
df = pd.DataFrame(output_data, dtype='float64')
for c in df.columns:
if c != 'F':
df[f'd{c}'] = np.ediff1d(df[c], to_begin=0)
df['CNC'] = lines
df['PHI'] = np.arctan(df['dY']/df['dX'])
df['SV'] = np.round(r * np.sin(df['PHI']) + offset, 3)
df['SU'] = np.round(r * np.cos(df['PHI']) + offset, 3)
df = df.fillna(0)
return df
def inspect_data(data, start=0, stop=-1):
fig, ax = plt.subplots()
x = data.index[start:stop]
y1 = data['dX'][start:stop]
y2 = data['dY'][start:stop]
ax.plot(x, y1, label='X-Werte')
ax.plot(x, y2, 'g-', label='Y-Werte')
ax.set_ylabel('dX, dY')
secax = ax.twinx()
y3 = data['dG'][start:stop]
secax.scatter(x, y3, s=2, edgecolor='r', label='G-Werte')
secax.set_ylabel('dG')
secax.set_ylim([-3, 3])
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = secax.get_legend_handles_labels()
ax.legend(h1+h2, l1+l2)
plt.show()
def plot_points(df):
fig, ax = plt.subplots()
ax.plot(df['X'], df['Y'])
ax.scatter(df['X'], df['Y'])
plt.show()
def plot_arrows(df, points=None, plot_susv=False):
fig, axs = plt.subplots(2)
if points is None:
pass
else:
for x, y in points:
plt.scatter(x, y, s=50)
axs[0].quiver(df['X'], df['Y'], df['dX'], df['dY'], angles='xy', scale_units='xy', scale=1, pivot='tip')
axs[1].scatter(df['SU'], df['SV'], s=30)
plt.show()
def write_header():
string = \
'''
;EBA_Cura
;Version 1.02
;Stand: 17.08.2021
;Ersteller: Hengst
;-------------Programmhinweise-----------------
;Programmcode geht vom Modellmittelpunkt aus
;--> aktuelle Tischposition wird Modellmittelpunkt!
;Arbeitsfeldbegrenzung beachten! (sollte eigentlich vorausberechnet werden)
;Die Verfahrgeschwindigkeiten und die Layerhoehen sind im Cura festzulegen!
;Downslope ohne Z-Verfahrbefehl
;-------------Definitionsbereich---------------
DEF REAL _SLoff, _v, _UpS, _DoS, _vD ,_Hd, _XSTART, _YSTART
DEF REAL _ZSTART, _ZDSTART, _SQb, _vs, _Rueck, _DRueck, _tups, _tdos
DEF INT _KWH
DEF BOOL _POSITIONIEREN = 1 ;1 = Positionierung nutzen
;_Layer = 0 ;0 Neustart, 1,2,... Start bei Layer n
;ist noch nicht implementiert
;=================== Prozessparameter ======================
;EB-Parameter
_SQH = 45 ;Strahlstrom Behandlung [mA]
_SLH = 1650 ;Linsenstrom Behandlung [mA]
_SLoff = 0 ;Linsenoffset Behandlung [mA]
_vs = 15 ;wird nicht verwendet!!! Vorschubgeschwindigkeit [mm/s]
_SWXs= 1 ;Feldgroesse X
_SWYs= 1 ;Feldgroesse Y
_UpS = 0 ;Upslope [mm]
_DoS = 0 ;Downslope [mm]
_tups= 0.25 ;Ausgleichzeit Upslope [s]
_tdos= 0.25 ;Ausgleichzeit Downslope [s]
;Draht-Parameter
_Hd = 0.8 ;Hoehedifferenz von Lage zu Lage [mm]
_vD = 2.5 ;Vorschubges. Draht [m/min]
_DRueck = 4 ;Drahtrückzug in [mm] ueber _vD berechnet
;Beobachten-Parameter
_SQb = 1 ;Beobachten Strahlstrom [mA]
_SLb = 1675 ;Beobachten Linsenstrom [mA]
;Allgemeine-Parameter
_KWH = 3425 ;Kalibrierwert
_HV = 60 ;Hochspannung [kV]
_FP = 20 ;Positionierungsgeschwindigkeit [mm/s]
;=================== Hauptprogramm ======================
INITIAL
KALWERT(_KWH)
;-- Abarbeiten der Layer--
;FOR _LAYERZAHL = 1 TO _LAYER
;REPEAT Layer"<<_TEILE<<"
;ENDFOR
_Rueck = (_DRueck/1000)/(_vD*60)
;-------Positionierung des Modelmittelpunktes------------
IF _POSITIONIEREN == 1
MSG("Mittelpunkt des Modells positionieren und [Cycle Start] druecken!")
WRT (B_SWX,20,B_SL,_SLb,Auff,1)
SNS
ELO_EIN(22)
G4 F0.5
G0 SQ _SQb) SL _SLb)
HDWS_X
HDWS_Y
MSG()
SQ 0)
ELO_AUS
ENDIF
;Istwertuebergabe von der aktuellen Position!
_XSTART=$AA_IM[X] ;X-Istwertuebernahme
_YSTART=$AA_IM[Y] ;Y-Istwertuebernahme
_ZSTART=$AA_IM[Z] ;Z-Istwertuebernahme
_ZDSTART=$AA_IM[ZD] ;ZD-Istwertuebernahme
M96
;--------EB-Figur laden------------
;Parametrieren Hauptfigur
WRT(S_FIG,7,S_FRQ,3700,S_SWX,_SWXs,S_SWY,_SWYs)
SNS
VEKTOR_XY
;Nullpunktverschiebung
G17 G55
TRANS X=_XSTART Y=_YSTART
;---------------Hauptteil-------------------
M00
;Hier startet das Cura Programm
'''
return string
def get_jumpmarkers(s):
inds = []
for i in s.index:
if s[i] != 0:
inds.append(i)
return inds
def write_gcode(outpath, df, scaling=1, slopes=True,
up_name='REPEAT UPSLOPE ENDLABEL', down_name='REPEAT DOWNSLOPE ENDLABEL',
marker_name='EB_PATH'):
def get_string(a, b, c, d, e, sf=1):
if e != 0:
return f'G1 X={round(sf*a, 3)} Y={round(sf*b, 3)} SU={c}'# SV={d}'# Z={e}'
else:
return f'G1 X={round(sf*a, 3)} Y={round(sf*b, 3)} SU={c}'# SV={d}'
stringliste = [get_string(x, y, su, sv, z, scaling) for x, y, su, sv, z in zip(df['X'], df['Y'], df['SU'], df['SV'], df['Z'])]
jump_idx = get_jumpmarkers(df['Z'])
if slopes:
slope_pos, slope_label = find_slope_indices(df['E'].values)
corr_idx = 0
for idx in df.index:
for i, s in enumerate(slope_pos):
if idx == s:
if slope_label[i] == 'UP':
stringliste.insert(idx + 1 + corr_idx, up_name)
else:
stringliste.insert(idx + corr_idx, down_name)
corr_idx += 1
for j, z in enumerate(jump_idx):
if idx == z:
if j == 0:
stringliste.insert(idx - 1 + corr_idx, f'{marker_name}_{j+1}:')
else:
stringliste.insert(idx - 1 + corr_idx, f'RET\n{marker_name}_{j+1}:')
corr_idx += 1
stringliste.insert(0, 'PROC PATHCODE (INT _N_LAB)\nDEF STRING[12] _DEST\n_DEST="EB_PATH_" << _N_LAB\nGOTOF _DEST')
stringliste.extend(['\nRET'])
stringliste.append(codes_for_up_and_downslope())
with open(outpath, "w") as outfile:
outfile.write("\n".join(stringliste))
return stringliste
def codes_for_up_and_downslope():
txt = """
;---------------Upslope-------------------
UPSLOPE:
MSG("Weiter mit [Cycle Start]")
M00
;Upslope und Draht foerdern
G0 G90 SQ _SQH) SL _SLH)
M61 ;Draht ein
G0 G90 VD2=(_vD)
G4 F0.25 ;Upslopefehler ausgleichen
RET
;---------------Downslope-------------------
DOWNSLOPE:
;Draht abstellen und Downslope ohne Z-Verfahrbefehl
G4 F0.25 ;Downslopefehler ausgleichen
M62 ;Draht aus
G0 G90 SQ 0) SL _SLb) ;Strahlstrom aus
RET
"""
return txt
def find_slope_indices(s):
def has_neighbours(liste, el):
if liste[el] - 1 in liste and liste[el] + 1 in liste:
return True
else:
return False
all_indices = list(np.where(s == 0)[0])
to_delete = []
for i, idx in enumerate(all_indices):
if has_neighbours(all_indices, i):
to_delete.append(idx)
for i in to_delete:
all_indices.remove(i)
slope_types = get_up_and_downslope_list(all_indices)
return all_indices, slope_types
def get_up_and_downslope_list(slopelist):
if len(slopelist) %2 != 0:
print(f'Slopes bei: {slopelist}')
print('Es wird nicht mit einem Downslope geendet!\nDaten prüfen')
return
else:
outlist = ['DOWN'] * len(slopelist)
even_idx = [i for i in range(len(slopelist)) if (i % 2) == 0]
for i in even_idx:
outlist[i] = 'UP'
return outlist
| StarcoderdataPython |
5007467 | __author__ = '<NAME> <<EMAIL>>'
__version__ = '1.0'
| StarcoderdataPython |
9799895 | #Test code 2. Extra code.
import cv2
import numpy as np
from pyzbar.pyzbar import decode
import time
import RPi.GPIO as GPIO
#Servo X axis
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
pwm= GPIO.PWM(7, 50)
obj = False
faceCascade = cv2.CascadeClassifier("/home/pi/Resources12/haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)
cap.set(3, 640) # width
cap.set(4, 480) # height
cap.set(10, 100) # brightness
need = 0
#barcode reading
#img = cv2.imread('/home/pi/Resources12/frame (1).png')
with open('/home/pi/Resources12/myDataFile.text') as f:
myDataList = f.read().splitlines()
while True:
success, img = cap.read()
for barcode in decode(img):
myData = barcode.data.decode('utf-8')
print(myData)
#The if statement will check if the the QR code is authorized. If it works, face will get detected.
if myData in myDataList:
myOutput = 'Authorized'
myColor = (0, 255, 0)
cv2.destroyWindow("Result")
while True:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(img, myData + ' User Detected', (x - 50,y + h +50 ), cv2.FONT_HERSHEY_COMPLEX, 1,(0, 0, 0), 2)
cv2.rectangle(img, (x + 20 , y + 20), (x + 100, y + 10 ), (0, 0, 255), 2)
if x == 0:
pwm.start(12)
elif x in range(1,60):
pwm.start(10.8)
elif x in range(61,120):
pwm.start(9.6)
elif x in range(121,180):
pwm.start(8.4)
elif x in range(181,239):
pwm.start(7.2)
elif x == 240:
pwm.start(6)
elif x in range(241,300):
pwm.start(5.2)
elif x in range(301,360):
pwm.start(4.4)
elif x in range(361,420):
pwm.start(3.6)
elif x in range(421,479):
pwm.start(2.8)
elif x == 480:
pwm.start(2)
#print("A FACE IS DETECTED")
while need <1:
r = open("/home/pi/Resources12/database", "a")
r.write("\n" + myData + " " + time.ctime())
r.close()
need = need + 1
obj = True
# break
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
myOutput = 'Un-Authorized'
myColor = (0, 0, 255)
pts = np.array([barcode.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, myColor, 5)
pts2 = barcode.rect
cv2.putText(img, myOutput, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX,
0.9, myColor, 2)
cv2.imshow('Result', img)
cv2.waitKey(1) | StarcoderdataPython |
1963462 | <filename>src/gripit/edgelib/curv_disc.py
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import cv2 as cv2
import numpy as np
import gripit.edgelib.util as util
from scipy import stats
def grad_dir(img):
# compute x and y derivatives
# OpenCV's Sobel operator gives better results than numpy gradient
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=-1)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=-1)
# calculate gradient direction angles
# phase needs 64-bit input
angle = cv2.phase(sobelx, sobely)
# truncates number
gradir = np.fix(180 + angle)
return gradir
def curve_discont(depth_im, imageModel):
###NEEDS IMPROVEMENT, NOT THAT GREAT ATM########
# Gradient of depth img
graddir = grad_dir(depth_im)
# Threshold image to get it in the RGB color space
dimg1 = (((graddir - graddir.min()) / (graddir.max() - graddir.min())) * 255.9).astype(np.uint8)
# Further remove noise while keeping edges sharp
blur = cv2.bilateralFilter(dimg1, 9, 25, 25)
blur2 = cv2.bilateralFilter(blur, 9, 25, 25)
# Eliminate salt-and-pepper noise
median = cv2.medianBlur(blur2, 7)
dimg1 = util.auto_canny(median, imageModel.getAttribute("auto_canny_sigma_curve")/100.00)
skel1 = util.morpho(dimg1)
cv2.imshow("Curve Discontinuity", util.create_img(skel1))
######CAN'T FIND USE FOR CNT1, what is the point of finding contours here?########
#cnt1 = util.find_contours(util.create_img(skel1), cv2.RETR_EXTERNAL)
return skel1 | StarcoderdataPython |
4905664 | import time
from typing import Any, Dict
from uuid import uuid4
import boto3
class StepFunctionsRunner:
def __init__(self, endpoint_url: str):
self.endpoint_url: str = endpoint_url
def run(self, definition: str, input_string: str) -> Dict[str, Any]:
sfn_client = boto3.client("stepfunctions", endpoint_url=self.endpoint_url)
state_machine_arn = sfn_client.create_state_machine(
name=str(uuid4()), definition=definition, roleArn="arn:aws:iam::012345678901:role/DummyRole",
)["stateMachineArn"]
execution_arn: str = sfn_client.start_execution(
stateMachineArn=state_machine_arn, name=str(uuid4()), input=input_string
)["executionArn"]
while True:
response = sfn_client.describe_execution(executionArn=execution_arn)
if response["status"] != "RUNNING":
return response
time.sleep(0.5)
| StarcoderdataPython |
6437369 |
import pygame
from pygame.sprite import Group, Sprite
from pygame.font import Font
from pygame.locals import *
import code
import os #for getting the path to current directory
import time #for getting the current time and date
import math #for doing some computation
#pygame
from pygame.locals import *
from pygame.font import Font
from pygame.mixer import Sound
from pygame.sprite import Sprite, Group
#code
from code.game import Game, Scene
from code.gui import TextButton, ButtonGroup
from code.loader import load_resources
| StarcoderdataPython |
4900012 | import cv2
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img = cv2.imread("resize_new.jpg")
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img,
scaleFactor = 1.1,
minNeighbors = 5)
for x, y, w, h in faces:
img = cv2.rectangle(img, (x,y),(x+w,y+h),(0,255,0),3)
print(type(faces))
print(faces)
cv2.imshow("bw",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
8115925 | <reponame>IshanManchanda/project-euler<gh_stars>0
def main():
primes = [2]
x = 3
while primes[-1] <= 2e6:
for p in primes:
if x % p == 0:
break
if p * p > x:
primes.append(x)
break
x += 1
print(sum(primes[:-1]))
main()
| StarcoderdataPython |
75867 | <reponame>marinakolova/Python-Courses
numbers = [int(n) for n in input().split(' ')]
n = len(numbers)
for i in range(n):
for j in range(0, n - i - 1):
if numbers[j] > numbers[j + 1]:
numbers[j], numbers[j + 1] = numbers[j + 1], numbers[j]
print(' '.join([str(n) for n in numbers])) | StarcoderdataPython |
5094197 | <gh_stars>1-10
from horus import resources
from pyramid.decorator import reify
from pyramid.interfaces import ILocation
from zope.interface import implementer
from h import api, models
@implementer(ILocation)
class BaseResource(resources.BaseFactory):
"""Base Resource class from which all resources are derived"""
__name__ = None
__parent__ = None
@reify
def persona(self):
request = self.request
# Transition code until multiple sign-in is implemented
if request.user:
return {
'username': request.user.username,
'provider': request.host,
}
return None
@reify
def personas(self):
request = self.request
# Transition code until multiple sign-in is implemented
if request.user:
return [self.persona]
return []
@reify
def consumer(self):
settings = self.request.registry.settings
key = settings['api.key']
consumer = models.Consumer.get_by_key(key)
assert(consumer)
return consumer
@reify
def token(self):
message = {
'consumerKey': str(self.consumer.key),
'ttl': self.consumer.ttl,
}
if self.persona:
message['userId'] = 'acct:%(username)s@%(provider)s' % self.persona
return api.auth.encode_token(message, self.consumer.secret)
class InnerResource(BaseResource):
"""Helper Resource class for declarative, traversal-based routing
Classes which inherit from this should contain attributes which are either
class constructors for classes whose instances provide the
:class:`pyramid.interfaces.ILocation` interface else attributes which are,
themselves, instances of such a class. Such attributes are treated as
valid traversal children of the Resource whose path component is the name
of the attribute.
"""
def __getitem__(self, name):
"""
Any class attribute which is an instance providing
:class:`pyramid.interfaces.ILocation` will be returned as is.
Attributes which are constructors for implementing classes will
be replaced with a constructed instance by reifying the newly
constructed resource in place of the attribute.
Assignment to the sub-resources `__name__` and `__parent__` properties
is handled automatically.
"""
factory_or_resource = getattr(self, name, None)
if factory_or_resource:
if ILocation.implementedBy(factory_or_resource):
inst = factory_or_resource(self.request)
inst.__name__ = name
inst.__parent__ = self
setattr(self, name, inst)
return inst
if ILocation.providedBy(factory_or_resource):
return factory_or_resource
raise KeyError(name)
class RootFactory(InnerResource, resources.RootFactory):
pass
class APIFactory(InnerResource):
def __init__(self, request):
super(APIFactory, self).__init__(request)
if not 'x-annotator-auth-token' in request.headers:
token = request.params.get('access_token', self.token)
request.headers['x-annotator-auth-token'] = token
class AppFactory(BaseResource):
def __init__(self, request):
super(AppFactory, self).__init__(request)
def includeme(config):
config.include('horus.routes')
RootFactory.api = APIFactory
RootFactory.app = AppFactory
config.add_route('embed', '/embed.js')
config.add_route('index', '/', factory='h.resources.RootFactory')
| StarcoderdataPython |
3365995 | """
Contains the OSU4k class.
"""
import azcam
class OSU4k(object):
"""
Class definition of OSU4k customized commands.
These methods are called remotely through the command server with syntax such as:
osu4k.test 1.0 "some_text".
"""
def __init__(self):
"""
Creates osu4k tool.
"""
azcam.db.osu4k = self
azcam.db.cli_tools["osu4k"] = self
return
def initialize(self):
"""
Initialize OSU4k stuff.
"""
return
def test(self, foo: float = 1.0, bar: str = "") -> str:
"""
A dummy method.
"""
return "OK"
| StarcoderdataPython |
11213706 | '''
Excel_4 main function
-Getting Rows & Columns from Sheet info Part 2
'''
from excel_source import *
work_book = openpyxl.load_workbook('excel_docs/example.xlsx')
work_book_sheet = work_book.active
print(next(work_book_sheet.columns))
# Updated solution aquired from:
# https://stackoverflow.com/questions/42603795/typeerror-generator-object-is-not-subscriptable
print('\n')
for cell_Obj in list(work_book_sheet.columns)[1]:
print(cell_Obj.value)
# this prints out food items
for cell_Obj in list(work_book_sheet.columns)[2]:
print(cell_Obj.value)
# this prints out data #'s with the
for cell_Obj in next(work_book_sheet.columns):
print(cell_Obj.value)
# this prints out row time stamps | StarcoderdataPython |
230339 | <gh_stars>0
################################################################
# File: __main__.py
# Title: MANGAdownloader
# Author: ASL97/ASL <<EMAIL>>
# Version: 4
# Notes : DON'T EMAIL ME UNLESS YOU NEED TO
# TODO: *blank*
################################################################
import threading
import scrapers
import queue
import types
import json
import misc
import time
import os
class main:
def __init__(self):
# basic setup
self.scrapers = {name:obj.id_supported
for name,obj in vars(scrapers).items()
if isinstance(obj, types.ModuleType)
}
self.done = False
self.chapters = {}
self.startup_msg()
self.load_settings()
self.check_setup()
def startup_msg(self):
print("\n".join([
"ASL97 Online Manga Ripper Version 4",
"This is a very simple fast downloader for downloading",
"manga from read manga online sites",
"",
"supported sites: %s" % ", ".join(self.scrapers),
""
]))
def load_settings(self):
if not os.path.exists("./asl97_manga_downloader.ini"):
self.settings = {}
else:
with open("./asl97_manga_downloader.ini","r") as f:
self.settings = json.load(f)
def setup(self):
print("asl97_manga_downloader.ini not found, running setup")
print()
self.settings["zip"] = misc.get_bool_input("zip manga folder: ")
print()
self.settings["thread_number"] = misc.get_int_input(
"number of threads: ")
print()
with open("./asl97_manga_downloader.ini","w") as f:
json.dump(self.settings,f)
def check_setup(self):
# check if all setting is set/exists
if not set(self.settings) == {"zip","thread_number"}:
self.setup()
def predownload(self):
misc.make_folder(self.name)
self.image_queue = queue.Queue()
self.lock = threading.Lock()
for chapter in sorted(self.chapters):
for page in sorted(self.chapters[chapter]):
tmp = self.chapters[chapter][page]
self.image_queue.put({
"chapter":chapter,
"link":tmp["link"],
"page":page,
"name":tmp["name"] if "name" in tmp else False
})
def worker(self):
while True:
tmp = self.image_queue.get()
if tmp is False:
self.image_queue.task_done()
break
link = tmp["link"]
page = tmp["page"]
chapter = tmp["chapter"]
ext = tmp["link"].split("/")[-1].split(".")[-1].split("?")[0]
if tmp["name"]:
file_name = "%s/%s" % (self.name,tmp["name"])
else:
file_name = "%s/chapter_%s/%03d.%s" % (self.name,
chapter,page,ext)
if os.path.exists(file_name):
print("%s already exists"%(file_name))
else:
f = misc.download_image(link)
print("downloaded page %d of chapter %s" % (page+1,chapter))
with self.lock:
self.chapters[chapter][page]["image"] = f
self.chapters[chapter][page]["name"] = file_name
self.chapters[chapter][page]["done"] = True
self.image_queue.task_done()
print("Thread Ended")
def saver(self):
while self.chapters: # check if there are still chapter to save
with self.lock:
# get the top chapter number
chapter_num = sorted(self.chapters)[0]
# get the top chapter (dict)
chapter = self.chapters[chapter_num]
# check if every single image is downloaded
if all("done" in chapter[page] for page in chapter):
print("Saving Chapter: %s"%(chapter_num))
# save the images/pages
for page in sorted(chapter):
tmp = chapter[page]
if "image" in tmp:
# if the directory name doesn't exists, make it
directory = os.path.dirname(tmp["name"])
if not os.path.exists(directory):
os.makedirs(directory)
with open(tmp["name"],"wb") as f:
f.write(tmp["image"])
# finally free up the ram taken by the image
del self.chapters[chapter_num]
time.sleep(0.2)
print("Finish Saving")
# i don't remember if any code depend on this variable
# so i am leaving it here as backward compatibility
self.done = True
def download_finish_msg(self):
self.image_queue.join()
print("Finish Downloading")
def download_thread(self):
# download threads
for _ in range(0,int(self.settings["thread_number"])):
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
self.image_queue.put(False)
t = threading.Thread(target=self.download_finish_msg)
t.daemon = True
t.start()
def run(self):
link, domain = misc.get_url_input()
if hasattr(scrapers,domain):
self.scraper = getattr(scrapers,domain)
if hasattr(self.scraper,"_type"):
print()
print("the scraper for %s is a %s scraper" % (domain,
misc.type_to_str(getattr(self.scraper,"_type"))))
print()
if hasattr(self.scraper,"note"):
getattr(self.scraper,"note")()
print()
self.name = self.scraper.scrap_manga(link, self.chapters)
self.predownload()
self.download_thread()
self.saver()
else:
print("%s is not supported!"%(domain))
if __name__ == "__main__":
m = main()
m.run()
| StarcoderdataPython |
6705459 | from math import sqrt
def is_square(n):
return n > 0 and sqrt(n).is_integer()
print(is_square(16))
print(is_square(15))
print(is_square(4))
| StarcoderdataPython |
3556197 | from typing import Optional
from pydantic import BaseModel
class TmpltBase(BaseModel):
filename: Optional[str] = None
class Tmplt(TmpltBase):
content: str
| StarcoderdataPython |
6459463 | from flasgger import Swagger
from flask import Flask
from maestro_api.db.mongo import init_db
from maestro_api.api_routes import init_api_routes
from maestro_api.frontend_routes import init_frontend_routes
from maestro_api.auth_routes import init_auth_routes
from maestro_api.monitoring import init_monitoring_routes
def create_app(settings):
"Initialize Maestro API routes"
flask_app = Flask(__name__)
flask_app.config.from_object(settings)
if settings.SWAGGER_ENABLED:
Swagger(
flask_app,
template_file="swagger/template.yml",
)
init_db(flask_app)
init_api_routes(flask_app)
return flask_app
def create_frontend_app(settings):
"Initialize Maestro frontend routes"
flask_app = Flask(__name__, static_url_path=settings.FRONTEND_PUBLIC_DIR)
flask_app.config.from_object(settings)
init_frontend_routes(flask_app)
init_auth_routes(flask_app)
init_monitoring_routes(flask_app)
return flask_app
def create_console_app(settings):
flask_app = Flask(__name__)
flask_app.config.from_object(settings)
init_db(flask_app)
return flask_app
| StarcoderdataPython |
162718 | # -*- coding: utf-8 -*-
import re
import os
from django.conf import settings
from django.shortcuts import redirect
from django.db.models import Q
from datetime import datetime, timedelta
from djobberbase.conf import settings as djobberbase_settings
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
It has been slightly modified from the original to support related fields.
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
if field_name == 'category' or field_name == 'jobtype' or field_name == 'city':
q = Q(**{"%s__name__icontains" % field_name: term})
else:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def handle_uploaded_file(f, name):
file_uploads = djobberbase_settings.DJOBBERBASE_FILE_UPLOADS
destination = open(file_uploads + name, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
def delete_uploaded_file(name):
os.remove(name)
def minutes_between():
minutes = djobberbase_settings.DJOBBERBASE_MINUTES_BETWEEN
start = datetime.now() - timedelta(minutes=minutes)
end = datetime.now()
return (start, end)
def last_hour():
start = datetime.now() - timedelta(hours=1)
end = datetime.now()
return (start, end)
def getIP(request):
ip = request.META['REMOTE_ADDR']
if (not ip or ip == '127.0.0.1') and request.META.has_key('HTTP_X_FORWARDED_FOR'):
ip = request.META['HTTP_X_FORWARDED_FOR']
return ip
| StarcoderdataPython |
1680392 | from __future__ import absolute_import, division, print_function
from collections import Iterable, defaultdict, deque
from functools import reduce
import numbers
import operator
import numpy as np
import scipy.sparse
try: # Windows compatibility
int = long
except NameError:
pass
class COO(object):
""" A Sparse Multidimensional Array
This is stored in COO format. It depends on NumPy and Scipy.sparse for
computation, but supports arrays of arbitrary dimension.
Parameters
----------
coords: np.ndarray (ndim, nnz)
An array holding the index locations of every value
Should have shape (number of dimensions, number of non-zeros)
data: np.array (nnz,)
An array of Values
shape: tuple (ndim,), optional
The shape of the array
Examples
--------
>>> x = np.eye(4)
>>> x[2, 3] = 5
>>> s = COO(x)
>>> s
<COO: shape=(4, 4), dtype=float64, nnz=5, sorted=True, duplicates=False>
>>> s.data
array([ 1., 1., 1., 5., 1.])
>>> s.coords
array([[0, 1, 2, 2, 3],
[0, 1, 2, 3, 3]], dtype=uint8)
>>> s.dot(s.T).sum(axis=0).todense()
array([ 1., 1., 31., 6.])
Make a sparse array by passing in an array of coordinates and an array of
values.
>>> coords = [[0, 0, 0, 1, 1],
... [0, 1, 2, 0, 3],
... [0, 3, 2, 0, 1]]
>>> data = [1, 2, 3, 4, 5]
>>> y = COO(coords, data, shape=(3, 4, 5))
>>> y
<COO: shape=(3, 4, 5), dtype=int64, nnz=5, sorted=False, duplicates=True>
>>> tensordot(s, y, axes=(0, 1))
<COO: shape=(4, 3, 5), dtype=float64, nnz=6, sorted=False, duplicates=False>
Following scipy.sparse conventions you can also pass these as a tuple with
rows and columns
>>> rows = [0, 1, 2, 3, 4]
>>> cols = [0, 0, 0, 1, 1]
>>> data = [10, 20, 30, 40, 50]
>>> z = COO((data, (rows, cols)))
>>> z.todense()
array([[10, 0],
[20, 0],
[30, 0],
[ 0, 40],
[ 0, 50]])
You can also pass a dictionary or iterable of index/value pairs. Repeated
indices imply summation:
>>> d = {(0, 0, 0): 1, (1, 2, 3): 2, (1, 1, 0): 3}
>>> COO(d)
<COO: shape=(2, 3, 4), dtype=int64, nnz=3, sorted=False, duplicates=False>
>>> L = [((0, 0), 1),
... ((1, 1), 2),
... ((0, 0), 3)]
>>> COO(L).todense()
array([[4, 0],
[0, 2]])
See Also
--------
COO.from_numpy
COO.from_scipy_sparse
"""
__array_priority__ = 12
def __init__(self, coords, data=None, shape=None, has_duplicates=True,
sorted=False, cache=False):
self._cache = None
if cache:
self.enable_caching()
if data is None:
# {(i, j, k): x, (i, j, k): y, ...}
if isinstance(coords, dict):
coords = list(coords.items())
has_duplicates = False
if isinstance(coords, np.ndarray):
result = COO.from_numpy(coords)
self.coords = result.coords
self.data = result.data
self.has_duplicates = result.has_duplicates
self.sorted = result.sorted
self.shape = result.shape
return
# []
if not coords:
data = []
coords = []
# [((i, j, k), value), (i, j, k), value), ...]
elif isinstance(coords[0][0], Iterable):
if coords:
assert len(coords[0]) == 2
data = [x[1] for x in coords]
coords = [x[0] for x in coords]
coords = np.asarray(coords).T
# (data, (row, col, slab, ...))
else:
data = coords[0]
coords = np.stack(coords[1], axis=0)
self.data = np.asarray(data)
self.coords = np.asarray(coords)
if self.coords.ndim == 1:
self.coords = self.coords[None, :]
if shape and not np.prod(self.coords.shape):
self.coords = np.zeros((len(shape), 0), dtype=np.uint64)
if shape is None:
if self.coords.nbytes:
shape = tuple((self.coords.max(axis=1) + 1).tolist())
else:
shape = ()
self.shape = tuple(shape)
if self.shape:
dtype = np.min_scalar_type(max(self.shape))
else:
dtype = np.int_
self.coords = self.coords.astype(dtype)
assert not self.shape or len(data) == self.coords.shape[1]
self.has_duplicates = has_duplicates
self.sorted = sorted
def enable_caching(self):
""" Enable caching of reshape, transpose, and tocsr/csc operations
This enables efficient iterative workflows that make heavy use of
csr/csc operations, such as tensordot. This maintains a cache of
recent results of reshape and transpose so that operations like
tensordot (which uses both internally) store efficiently stored
representations for repeated use. This can significantly cut down on
computational costs in common numeric algorithms.
However, this also assumes that neither this object, nor the downstream
objects will have their data mutated.
Examples
--------
>>> x.enable_caching() # doctest: +SKIP
>>> csr1 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr2 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr1 is csr2 # doctest: +SKIP
True
"""
self._cache = defaultdict(lambda: deque(maxlen=3))
return self
@classmethod
def from_numpy(cls, x):
if x.shape:
coords = np.where(x)
data = x[coords]
coords = np.vstack(coords)
else:
coords = []
data = x
return cls(coords, data, shape=x.shape, has_duplicates=False,
sorted=True)
def todense(self):
self = self.sum_duplicates()
x = np.zeros(shape=self.shape, dtype=self.dtype)
coords = tuple([self.coords[i, :] for i in range(self.ndim)])
x[coords] = self.data
return x
@classmethod
def from_scipy_sparse(cls, x):
x = scipy.sparse.coo_matrix(x)
coords = np.empty((2, x.nnz), dtype=x.row.dtype)
coords[0, :] = x.row
coords[1, :] = x.col
return COO(coords, x.data, shape=x.shape,
has_duplicates=not x.has_canonical_format,
sorted=x.has_canonical_format)
@property
def dtype(self):
return self.data.dtype
@property
def ndim(self):
return len(self.shape)
@property
def nnz(self):
return self.coords.shape[1]
@property
def nbytes(self):
return self.data.nbytes + self.coords.nbytes
def __sizeof__(self):
return self.nbytes
def __getitem__(self, index):
if not isinstance(index, tuple):
index = (index,)
index = tuple(ind + self.shape[i] if isinstance(ind, numbers.Integral) and ind < 0 else ind
for i, ind in enumerate(index))
if (all(ind == slice(None) or ind == slice(0, d)
for ind, d in zip(index, self.shape))):
return self
mask = np.ones(self.nnz, dtype=bool)
for i, ind in enumerate([i for i in index if i is not None]):
if ind == slice(None, None):
continue
mask &= _mask(self.coords[i], ind)
n = mask.sum()
coords = []
shape = []
i = 0
for ind in index:
if isinstance(ind, numbers.Integral):
i += 1
continue
elif isinstance(ind, slice):
start = ind.start or 0
stop = ind.stop if ind.stop is not None else self.shape[i]
shape.append(min(stop, self.shape[i]) - start)
coords.append(self.coords[i][mask] - start)
i += 1
elif isinstance(ind, list):
old = self.coords[i][mask]
new = np.empty(shape=old.shape, dtype=old.dtype)
for j, item in enumerate(ind):
new[old == item] = j
coords.append(new)
shape.append(len(ind))
i += 1
elif ind is None:
coords.append(np.zeros(n))
shape.append(1)
for j in range(i, self.ndim):
coords.append(self.coords[j][mask])
shape.append(self.shape[j])
coords = np.stack(coords, axis=0)
shape = tuple(shape)
data = self.data[mask]
return COO(coords, data, shape=shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted)
def __str__(self):
return "<COO: shape=%s, dtype=%s, nnz=%d, sorted=%s, duplicates=%s>" % (
self.shape, self.dtype, self.nnz, self.sorted,
self.has_duplicates)
__repr__ = __str__
def reduction(self, method, axis=None, keepdims=False, dtype=None):
if axis is None:
axis = tuple(range(self.ndim))
kwargs = {}
if dtype:
kwargs['dtype'] = dtype
if isinstance(axis, numbers.Integral):
axis = (axis,)
if set(axis) == set(range(self.ndim)):
result = getattr(self.data, method)(**kwargs)
else:
axis = tuple(axis)
neg_axis = list(range(self.ndim))
for ax in axis:
neg_axis.remove(ax)
neg_axis = tuple(neg_axis)
a = self.transpose(axis + neg_axis)
a = a.reshape((np.prod([self.shape[d] for d in axis]),
np.prod([self.shape[d] for d in neg_axis])))
a = a.to_scipy_sparse()
a = getattr(a, method)(axis=0, **kwargs)
if isinstance(a, scipy.sparse.spmatrix):
a = COO.from_scipy_sparse(a)
a.sorted = self.sorted
a.has_duplicates = False
elif isinstance(a, np.matrix):
a = np.asarray(a)[0]
a = COO.from_numpy(a)
a = a.reshape([self.shape[d] for d in neg_axis])
result = a
if keepdims:
result = _keepdims(self, result, axis)
return result
def sum(self, axis=None, keepdims=False, dtype=None, out=None):
return self.reduction('sum', axis=axis, keepdims=keepdims, dtype=dtype)
def max(self, axis=None, keepdims=False, out=None):
x = self.reduction('max', axis=axis, keepdims=keepdims)
# TODO: verify that there are some missing elements in each entry
if isinstance(x, COO):
x.data[x.data < 0] = 0
return x
elif isinstance(x, np.ndarray):
x[x < 0] = 0
return x
else:
return np.max(x, 0)
def transpose(self, axes=None):
if axes is None:
axes = reversed(range(self.ndim))
axes = tuple(axes)
if axes == tuple(range(self.ndim)):
return self
if self._cache is not None:
for ax, value in self._cache['transpose']:
if ax == axes:
return value
shape = tuple(self.shape[ax] for ax in axes)
result = COO(self.coords[axes, :], self.data, shape,
has_duplicates=self.has_duplicates,
cache=self._cache is not None)
if self._cache is not None:
self._cache['transpose'].append((axes, result))
return result
@property
def T(self):
return self.transpose(list(range(self.ndim))[::-1])
def dot(self, other):
return dot(self, other)
def __matmul__(self, other):
try:
return dot(self, other)
except NotImplementedError:
return NotImplemented
def __rmatmul__(self, other):
try:
return dot(other, self)
except NotImplementedError:
return NotImplemented
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs):
return NotImplemented
def linear_loc(self, signed=False):
""" Index location of every piece of data in a flattened array
This is used internally to check for duplicates, re-order, reshape,
etc..
"""
n = reduce(operator.mul, self.shape)
if signed:
n = -n
dtype = np.min_scalar_type(n)
out = np.zeros(self.nnz, dtype=dtype)
tmp = np.zeros(self.nnz, dtype=dtype)
strides = 1
for i, d in enumerate(self.shape[::-1]):
# out += self.coords[-(i + 1), :].astype(dtype) * strides
np.multiply(self.coords[-(i + 1), :], strides, out=tmp, dtype=dtype)
np.add(tmp, out, out=out)
strides *= d
return out
def reshape(self, shape):
if self.shape == shape:
return self
if any(d == -1 for d in shape):
extra = int(np.prod(self.shape) /
np.prod([d for d in shape if d != -1]))
shape = tuple([d if d != -1 else extra for d in shape])
if self.shape == shape:
return self
if self._cache is not None:
for sh, value in self._cache['reshape']:
if sh == shape:
return value
# TODO: this np.prod(self.shape) enforces a 2**64 limit to array size
linear_loc = self.linear_loc()
coords = np.empty((len(shape), self.nnz), dtype=np.min_scalar_type(max(shape)))
strides = 1
for i, d in enumerate(shape[::-1]):
coords[-(i + 1), :] = (linear_loc // strides) % d
strides *= d
result = COO(coords, self.data, shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted, cache=self._cache is not None)
if self._cache is not None:
self._cache['reshape'].append((shape, result))
return result
def to_scipy_sparse(self):
assert self.ndim == 2
result = scipy.sparse.coo_matrix((self.data,
(self.coords[0],
self.coords[1])),
shape=self.shape)
result.has_canonical_format = (not self.has_duplicates and self.sorted)
return result
def _tocsr(self):
assert self.ndim == 2
# Pass 1: sum duplicates
self.sum_duplicates()
# Pass 2: sort indices
self.sort_indices()
row, col = self.coords
# Pass 3: count nonzeros in each row
indptr = np.zeros(self.shape[0] + 1, dtype=np.int64)
np.cumsum(np.bincount(row, minlength=self.shape[0]), out=indptr[1:])
return scipy.sparse.csr_matrix((self.data, col, indptr), shape=self.shape)
def tocsr(self):
if self._cache is not None:
try:
return self._csr
except AttributeError:
pass
try:
self._csr = self._csc.tocsr()
return self._csr
except AttributeError:
pass
self._csr = csr = self._tocsr()
else:
csr = self._tocsr()
return csr
def tocsc(self):
if self._cache is not None:
try:
return self._csc
except AttributeError:
pass
try:
self._csc = self._csr.tocsc()
return self._csc
except AttributeError:
pass
self._csc = csc = self.tocsr().tocsc()
else:
csc = self.tocsr().tocsc()
return csc
def sort_indices(self):
if self.sorted:
return
linear = self.linear_loc(signed=True)
if (np.diff(linear) > 0).all(): # already sorted
self.sorted = True
return self
order = np.argsort(linear)
self.coords = self.coords[:, order]
self.data = self.data[order]
self.sorted = True
return self
def sum_duplicates(self):
# Inspired by scipy/sparse/coo.py::sum_duplicates
# See https://github.com/scipy/scipy/blob/master/LICENSE.txt
if not self.has_duplicates:
return self
if not np.prod(self.coords.shape):
return self
self.sort_indices()
linear = self.linear_loc()
unique_mask = np.diff(linear) != 0
if unique_mask.sum() == len(unique_mask): # already unique
self.has_duplicates = False
return self
unique_mask = np.append(True, unique_mask)
coords = self.coords[:, unique_mask]
(unique_inds,) = np.nonzero(unique_mask)
data = np.add.reduceat(self.data, unique_inds, dtype=self.data.dtype)
self.data = data
self.coords = coords
self.has_duplicates = False
return self
def __add__(self, other):
if isinstance(other, numbers.Number) and other == 0:
return self
if not isinstance(other, COO):
return self.maybe_densify() + other
if self.shape == other.shape:
return self.elemwise_binary(operator.add, other)
else:
raise NotImplementedError("Broadcasting not yet supported")
def __radd__(self, other):
return self + other
def __neg__(self):
return COO(self.coords, -self.data, self.shape, self.has_duplicates,
self.sorted)
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return -self + other
def __mul__(self, other):
if isinstance(other, COO):
return self.elemwise_binary(operator.mul, other)
else:
return self.elemwise(operator.mul, other)
__rmul__ = __mul__
def __truediv__(self, other):
return self.elemwise(operator.truediv, other)
def __floordiv__(self, other):
return self.elemwise(operator.floordiv, other)
__div__ = __truediv__
def __pow__(self, other):
return self.elemwise(operator.pow, other)
def elemwise(self, func, *args, **kwargs):
if kwargs.pop('check', True) and func(0, *args, **kwargs) != 0:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
return COO(self.coords, func(self.data, *args, **kwargs),
shape=self.shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted)
def elemwise_binary(self, func, other, *args, **kwargs):
assert isinstance(other, COO)
if kwargs.pop('check', True) and func(0, 0, *args, **kwargs) != 0:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
if self.shape != other.shape:
raise NotImplementedError("Broadcasting is not supported")
self.sum_duplicates() # TODO: document side-effect or make copy
other.sum_duplicates() # TODO: document side-effect or make copy
# Sort self.coords in lexographical order using record arrays
self_coords = np.rec.fromarrays(self.coords)
i = np.argsort(self_coords)
self_coords = self_coords[i]
self_data = self.data[i]
# Convert other.coords to a record array
other_coords = np.rec.fromarrays(other.coords)
other_data = other.data
# Find matches between self.coords and other.coords
j = np.searchsorted(self_coords, other_coords)
if len(self_coords):
matched_other = (other_coords == self_coords[j % len(self_coords)])
else:
matched_other = np.zeros(shape=(0,), dtype=bool)
matched_self = j[matched_other]
# Locate coordinates without a match
unmatched_other = ~matched_other
unmatched_self = np.ones(len(self_coords), dtype=bool)
unmatched_self[matched_self] = 0
# Concatenate matches and mismatches
data = np.concatenate([func(self_data[matched_self],
other_data[matched_other],
*args, **kwargs),
func(self_data[unmatched_self], 0,
*args, **kwargs),
func(0, other_data[unmatched_other],
*args, **kwargs)])
coords = np.concatenate([self_coords[matched_self],
self_coords[unmatched_self],
other_coords[unmatched_other]])
nonzero = data != 0
data = data[nonzero]
coords = coords[nonzero]
# record array to ND array
coords = np.asarray(coords.view(coords.dtype[0]).reshape(len(coords), self.ndim)).T
return COO(coords, data, shape=self.shape, has_duplicates=False)
def __abs__(self):
return self.elemwise(abs)
def exp(self, out=None):
assert out is None
return np.exp(self.maybe_densify())
def expm1(self, out=None):
assert out is None
return self.elemwise(np.expm1)
def log1p(self, out=None):
assert out is None
return self.elemwise(np.log1p)
def sin(self, out=None):
assert out is None
return self.elemwise(np.sin)
def sinh(self, out=None):
assert out is None
return self.elemwise(np.sinh)
def tan(self, out=None):
assert out is None
return self.elemwise(np.tan)
def tanh(self, out=None):
assert out is None
return self.elemwise(np.tanh)
def sqrt(self, out=None):
assert out is None
return self.elemwise(np.sqrt)
def ceil(self, out=None):
assert out is None
return self.elemwise(np.ceil)
def floor(self, out=None):
assert out is None
return self.elemwise(np.floor)
def round(self, decimals=0, out=None):
assert out is None
return self.elemwise(np.round, decimals)
def rint(self, out=None):
assert out is None
return self.elemwise(np.rint)
def conj(self, out=None):
assert out is None
return self.elemwise(np.conj)
def conjugate(self, out=None):
assert out is None
return self.elemwise(np.conjugate)
def astype(self, dtype, out=None):
assert out is None
return self.elemwise(np.ndarray.astype, dtype, check=False)
def __gt__(self, other):
if not isinstance(other, numbers.Number):
raise NotImplementedError("Only scalars supported")
if other < 0:
raise ValueError("Comparison with negative number would produce "
"dense result")
return self.elemwise(operator.gt, other)
def __ge__(self, other):
if not isinstance(other, numbers.Number):
raise NotImplementedError("Only scalars supported")
if other <= 0:
raise ValueError("Comparison with negative number would produce "
"dense result")
return self.elemwise(operator.ge, other)
def maybe_densify(self, allowed_nnz=1e3, allowed_fraction=0.25):
""" Convert to a dense numpy array if not too costly. Err othrewise """
if reduce(operator.mul, self.shape) <= allowed_nnz or self.nnz >= np.prod(self.shape) * allowed_fraction:
return self.todense()
else:
raise NotImplementedError("Operation would require converting "
"large sparse array to dense")
def tensordot(a, b, axes=2):
# Much of this is stolen from numpy/core/numeric.py::tensordot
# Please see license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
try:
iter(axes)
except:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
# a, b = asarray(a), asarray(b) # <--- modified
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = _dot(at, bt)
if isinstance(res, scipy.sparse.spmatrix):
if res.nnz > reduce(operator.mul, res.shape) / 2:
res = res.todense()
else:
res = COO.from_scipy_sparse(res) # <--- modified
res.has_duplicates = False
if isinstance(res, np.matrix):
res = np.asarray(res)
return res.reshape(olda + oldb)
def dot(a, b):
if not hasattr(a, 'ndim') or not hasattr(b, 'ndim'):
raise NotImplementedError(
"Cannot perform dot product on types %s, %s" %
(type(a), type(b)))
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
def _dot(a, b):
if isinstance(a, COO):
a.sum_duplicates()
if isinstance(b, COO):
b.sum_duplicates()
if isinstance(b, COO) and not isinstance(a, COO):
return _dot(b.T, a.T).T
aa = a.tocsr()
if isinstance(b, (COO, scipy.sparse.spmatrix)):
b = b.tocsc()
return aa.dot(b)
def _keepdims(original, new, axis):
shape = list(original.shape)
for ax in axis:
shape[ax] = 1
return new.reshape(shape)
def _mask(coords, idx):
if isinstance(idx, numbers.Integral):
return coords == idx
elif isinstance(idx, slice):
if idx.step not in (1, None):
raise NotImplementedError("Steped slices not implemented")
start = idx.start if idx.start is not None else 0
stop = idx.stop if idx.stop is not None else np.inf
return (coords >= start) & (coords < stop)
elif isinstance(idx, list):
mask = np.zeros(len(coords), dtype=bool)
for item in idx:
mask |= coords == item
return mask
def concatenate(arrays, axis=0):
arrays = [x if type(x) is COO else COO(x) for x in arrays]
if axis < 0:
axis = axis + arrays[0].ndim
assert all(x.shape[ax] == arrays[0].shape[ax]
for x in arrays
for ax in set(range(arrays[0].ndim)) - {axis})
data = np.concatenate([x.data for x in arrays])
coords = np.concatenate([x.coords for x in arrays], axis=1)
nnz = 0
dim = 0
for x in arrays:
if dim:
coords[axis, nnz:x.nnz + nnz] += dim
dim += x.shape[axis]
nnz += x.nnz
shape = list(arrays[0].shape)
shape[axis] = dim
has_duplicates = any(x.has_duplicates for x in arrays)
return COO(coords, data, shape=shape, has_duplicates=has_duplicates,
sorted=(axis == 0) and all(a.sorted for a in arrays))
def stack(arrays, axis=0):
assert len(set(x.shape for x in arrays)) == 1
arrays = [x if type(x) is COO else COO(x) for x in arrays]
if axis < 0:
axis = axis + arrays[0].ndim + 1
data = np.concatenate([x.data for x in arrays])
coords = np.concatenate([x.coords for x in arrays], axis=1)
nnz = 0
dim = 0
new = np.empty(shape=(coords.shape[1],), dtype=coords.dtype)
for x in arrays:
new[nnz:x.nnz + nnz] = dim
dim += 1
nnz += x.nnz
shape = list(arrays[0].shape)
shape.insert(axis, len(arrays))
has_duplicates = any(x.has_duplicates for x in arrays)
coords = [coords[i] for i in range(coords.shape[0])]
coords.insert(axis, new)
coords = np.stack(coords, axis=0)
return COO(coords, data, shape=shape, has_duplicates=has_duplicates,
sorted=(axis == 0) and all(a.sorted for a in arrays))
| StarcoderdataPython |
6450883 | <filename>bench/qensorbench/ordering_algo.py<gh_stars>10-100
import qtree
import networkx as nx
import sys
import glob
import numpy as np
from pathlib import Path
from qtensor import QtreeQAOAComposer
from qtensor.optimisation.Optimizer import OrderingOptimizer, TamakiOptimizer
from qtensor.optimisation.TensorNet import QtreeTensorNet
def print_row(*args):
row = [str(i) for i in args]
print(','.join(row))
def get_test_problem(n=14, p=2, d=3):
G = nx.random_regular_graph(d, n)
gamma, beta = [np.pi/3]*p, [np.pi/2]*p
return G, gamma, beta
def test_orderings():
opt = OrderingOptimizer()
tam = TamakiOptimizer(wait_time=5)
seed = 43
np.random.seed(seed)
for n in range(14, 45, 2):
p = 3
G, gamma, beta = get_test_problem(n, p=p)
composer = QtreeQAOAComposer(
graph=G, gamma=gamma, beta=beta)
composer.ansatz_state()
tn = QtreeTensorNet.from_qtree_gates(composer.circuit)
peo, tn = opt.optimize(tn)
treewidth = opt.treewidth
print_row(n, p, seed, 'greedy', treewidth)
peo, tn = tam.optimize(tn)
treewidth = tam.treewidth
print_row(n, p, seed, 'tamaki', treewidth)
def test_orderings_bristlecone():
opt = OrderingOptimizer()
tam = TamakiOptimizer(wait_time=15)
seed = 43
np.random.seed(seed)
brists = sys.argv[1]
files = glob.glob(f'{brists}/*_0.txt')
for filename in files:
name = Path(filename).name
n_qubits, circuit = qtree.operators.read_circuit_file(filename)
circuit = sum(circuit, [])
tn = QtreeTensorNet.from_qtree_gates(circuit)
peo, tn = opt.optimize(tn)
treewidth = opt.treewidth
print_row(n_qubits, name, seed, 'greedy', treewidth)
peo, tn = tam.optimize(tn)
treewidth = tam.treewidth
print_row(n_qubits, name, seed, 'tamaki', treewidth)
if __name__=='__main__':
#test_orderings()
test_orderings_bristlecone()
| StarcoderdataPython |
9793522 | <reponame>thornwishstalon/probable-liftoff-machine
from common.credentials import Config
from module.liftoff_module import LiftoffModule
from module.subscriber import SubscriberList
from common.event import EVENT_POST_TRIP_END, EventFactory, EVENT_PRE_TRIP_START, EVENT_POWER_UPDATE, \
EVENT_POWER_TRACK_DONE
import ubinascii
from machine import Timer, unique_id, Pin
import time
import urandom
import uasyncio
from Emonlib import Emonlib
### aka the NOSE
class PowerModule(LiftoffModule):
def __init__(self, config):
super().__init__(config)
self.register = {}
self.power = 0.
def state(self):
return {'watt': self.power}
@property
def subscriber(self):
subs = SubscriberList()
subs.register(EVENT_PRE_TRIP_START, self.start_track, 500)
subs.register(EVENT_POST_TRIP_END, self.stop_track, 500)
return subs
def start_track(self, message):
"""
start recording
:param message:
:return:
"""
# todo: check if we don't overwrite a current state!
transaction_code = message['id']
self.register[transaction_code] = {'power': 0., 'time_ms': 0}
def stop_track(self, message):
"""
transaction has ended: clear the track and report the total consumption to the broker!
:param message:
:return:
"""
transaction_code = message['id']
record = self.register[transaction_code]
module.mqtt.publish(
EVENT_POWER_TRACK_DONE,
EventFactory.create_event(config.mqtt_id, transaction_code, record)
)
# remove record
del self.register[transaction_code]
def update_register(self, power, interval_ms):
"""
update all active transaction tracks with latest power measurement
:param power:
:param interval_ms:
:return:
"""
# for all active transactions, add the consumption, and the interval count respectively
if len(self.register) > 0:
for item in self.register.values():
item['power'] += power
item['time_ms'] += interval_ms
# timers
fetch_timer = Timer(0)
measurement_timer = Timer(1)
publish_timer = Timer(2)
emon = Emonlib()
#
client_id = ubinascii.hexlify(unique_id())
config = Config(client_id).load()
module = PowerModule(config)
module.start()
async def setup():
await emon.current(Pin(36, Pin.IN), 30)
async def measure(emon):
return await emon.calc_current_rms(1480) * 230
# post everybody about the current state
def publish_state(timer):
global module
module.mqtt.publish(
EVENT_POWER_UPDATE,
# transaction id is None, because it's a general event
EventFactory.create_event(config.mqtt_id, None, module.state())
)
def measure_power(timer):
# todo actual measure something:
current_power = uasyncio.run(measure(emon))
global module
module.power = current_power
# update ongoing transaction sums with latest measurement
module.update_register(current_power, 250)
uasyncio.run(setup())
###### TIMERS
print('start mqtt queue')
fetch_timer.init(period=500, mode=Timer.PERIODIC, callback=module.run)
time.sleep_ms(500)
print('start update queue')
publish_timer.init(period=5000, mode=Timer.PERIODIC, callback=publish_state)
measurement_timer.init(period=250, mode=Timer.PERIODIC, callback=measure_power)
| StarcoderdataPython |
4868536 | # Generated by Django 3.0.6 on 2020-05-23 17:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20200523_2006'),
]
operations = [
migrations.RenameField(
model_name='school',
old_name='head',
new_name='headmaster',
),
]
| StarcoderdataPython |
9691399 | import pygame
from pygame import mixer
import os
import random
import csv
mixer.init()
pygame.init()
SCREEN_WIDTH = 800
SCREEN_HEIGHT = int(SCREEN_WIDTH*0.8)
SCREEN = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
pygame.display.set_caption('HALLOWEEN GAME')
#SET FRAME RATE
clock = pygame.time.Clock()
FPS = 60
#GAME VARIABLES
GRAVITY = 1
SCROLL_THRESHOLD = 200
ROWS = 16
COLUMNS = 150
TILE_SIZE = SCREEN_HEIGHT // ROWS
TILE_TYPES = os.listdir('sprites/tiles')
MAX_LEVEL = 3
screen_scroll = 0
bg_scroll = 0
level = 1
start_game = False
start_intro = False
#PLAYER ACTION VARIALES
run = True
moving_left = False
moving_right = False
shoot = False
throw = False
grenade_thrown = False
#DEFINE COLOUR
BG = (144,201,120)
RED = (255, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLACK =(0, 0, 0)
PINK = (235, 65, 54)
#LOADING MUSIC
pygame.mixer.music.load('audio/audio_music2.mp3')
pygame.mixer.music.set_volume(0.6)
pygame.mixer.music.play(-1, 0.0, 5000)# LOOP, BREAK, FADE
jump_snd = pygame.mixer.Sound('audio/audio_jump.wav')
jump_snd.set_volume(0.5)
shot_snd = pygame.mixer.Sound('audio/audio_shot.wav')
shot_snd.set_volume(0.5)
grenade_snd = pygame.mixer.Sound('audio/audio_thunder.wav')
grenade_snd.set_volume(0.3)
water_snd = pygame.mixer.Sound('audio/audio_water.wav')
water_snd.set_volume(0.5)
#LOADING IMAGES
#BUTTON
start_img = pygame.image.load('sprites/button/start_btn.png').convert_alpha()
restart_img = pygame.image.load('sprites/button/restart_btn.png').convert_alpha()
exit_img = pygame.image.load('sprites/button/exit_btn.png').convert_alpha()
#BACKGROUND
pine1_img = pygame.image.load('sprites/background/pine1.png').convert_alpha()
pine2_img = pygame.image.load('sprites/background/pine2.png').convert_alpha()
mountain_img = pygame.image.load('sprites/background/mountain.png').convert_alpha()
sky_img = pygame.image.load('sprites/background/sky_cloud.png').convert_alpha()
#TILE LIST LOADING
tile_list = []
for i in range(len(TILE_TYPES)):
img = pygame.image.load(f'sprites/tiles/{i}.png')
img = pygame.transform.scale(img, (TILE_SIZE, TILE_SIZE)).convert_alpha()
tile_list.append(img)
bullet_img = pygame.image.load('sprites/icons/slash.png').convert_alpha()
grenade_img = pygame.image.load('sprites/icons/grenade.png').convert_alpha()
health_box_img = pygame.image.load('sprites/icons/health_box.png').convert_alpha()
ammo_box_img = pygame.image.load('sprites/icons/ammo_box.png').convert_alpha()
grenade_box_img = pygame.image.load('sprites/icons/grenade_box.png').convert_alpha()
font = pygame.font.SysFont('Futura', 30)
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
SCREEN.blit(img, (x,y))
def background_colour(BG):
SCREEN.fill(BG)
width = sky_img.get_width()
#pygame.draw.line(SCREEN, RED, (0,300),(SCREEN_WIDTH,300))
for x in range(5):
SCREEN.blit(sky_img, ((x * width) - bg_scroll * 0.3, 0))
SCREEN.blit(mountain_img, ((x * width) - bg_scroll * 0.4, SCREEN_HEIGHT - mountain_img.get_height() - 300))
SCREEN.blit(pine1_img, ((x * width) - bg_scroll * 0.5, SCREEN_HEIGHT - pine1_img.get_height() - 150))
SCREEN.blit(pine2_img, ((x * width) - bg_scroll * 0.7, SCREEN_HEIGHT - pine2_img.get_height()))
#RESET_WORLD
def reset_level():
enemy_group.empty()
bullet_group.empty()
grenade_group.empty()
item_box_group.empty()
explosion_group.empty()
decoration_group.empty()
water_group.empty()
exit_group.empty()
#RESET THE WORLD LEVEL
data = []
for row in range(ROWS):
r = [-1] * COLUMNS
data.append(r)
return data
item_boxes = {
'Health' : health_box_img,
'Ammo' : ammo_box_img,
'Grenade' : grenade_box_img
}
class Button():
def __init__(self,x, y, image, scale):
width = image.get_width()
height = image.get_height()
self.image = pygame.transform.scale(image, (int(width * scale), int(height * scale)))
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
self.clicked = False
def draw(self, surface):
action = False
#get mouse position
pos = pygame.mouse.get_pos()
#check mouseover and clicked conditions
if self.rect.collidepoint(pos):
if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:
action = True
self.clicked = True
if pygame.mouse.get_pressed()[0] == 0:
self.clicked = False
#draw button
surface.blit(self.image, (self.rect.x, self.rect.y))
return action
class Character(pygame.sprite.Sprite):
def __init__(self, char_type, x, y, scale, speed, ammo, grenades):
pygame.sprite.Sprite.__init__(self)
self.alive = True
self.char_type = char_type
self.health = 100
self.max_health = self.health
self.speed = speed
self.skill_cooldown = 0
self.ammo = ammo
self.start_ammo = ammo
#self.attack = False
self.grenades = grenades
self.direction = 1
self.vel_y = 0
self.jump = False
self.in_air = True
self.flip = False
self.animation_list = []
self.frame_index = 0
self.action = 0
self.update_time = pygame.time.get_ticks()
#AI SPECIFIC COUNTER
self.vision = pygame.Rect(0, 0, 150, 20)
self.move_counter = 0
self.idling = False
self.idling_counter = 0
self.score = 0
if char_type == 'wizard':
self.health = 200
#LOADING THE ANIMATIONS
animation_list = ['idle', 'run', 'jump', 'death', 'attack']
for animation in animation_list:
#RESET TEMPORARY LIST
temp_list = []
#CHECKS THE MO. OF IMAGES IN A FOLDER
no_of_frames = len(os.listdir(f'sprites/{self.char_type}/{animation}'))
for i in range(no_of_frames):
player_char = pygame.image.load(f'sprites/{self.char_type}/{animation}/{i}.png').convert_alpha()
char = pygame.transform.scale(player_char,(int(player_char.get_width() * scale), int(player_char.get_height() * scale)))
temp_list.append(char)
self.animation_list.append(temp_list)
self.img = self.animation_list[self.action][self.frame_index]
self.rect = self.img.get_rect() #creates a rectangular box around the character to control it.
self.rect.center = (x,y)
self.width = self.img.get_width()
self.height = self.img.get_height()
def update(self):
self.update_animation()
self.death()
#COOLDOWN UPDATE
if self.skill_cooldown > 0:
self.skill_cooldown -= 1
def move(self, move_left, move_right):
#RESET MOVEMENT VARIABLES
screen_scroll = 0
dx = 0
dy = 0
#MOVES THE CHARACTER LEFT AND RIGHT
if move_left:
dx = -self.speed
self.flip = True
self.direction = -1
if move_right:
dx = self.speed
self.flip = False
self.direction = 1
#JUMP
if self.jump == True and self.in_air == False:
self.vel_y = -15
self.jump = False
self.in_air = True
#APPLY GRAVITY
self.vel_y += GRAVITY
if self.vel_y > 10:
self.vel_y
dy += self.vel_y
#COLLISION CHECKING
for tile in world.obstacle_list:
#COLLISIN CHECK IN X DIRECTION
if tile[1].colliderect(self.rect.x + dx, self.rect.y, self.width, self.height):
dx = 0
#CHECK IF ENEMY HAS COLLIDED WITH A WALL
if self.char_type == 'reaper' or self.char_type == 'skeleton' or self.char_type == 'wizard':
self.direction *= -1
self.move_counter = 0
#CHECK COLLISION IN Y DIRECTION
if tile[1].colliderect(self.rect.x, self.rect.y + dy, self.width, self.height):
#CHECK IF BELOW THE GROUND
if self.vel_y < 0:
self.vel_y = 0
self.in_air = False
dy = tile[1].bottom - self.rect.top
#CHECK IF ABOVE THE GROUND
elif self.vel_y >= 0:
self.vel_y = 0
self.in_air = False
dy = tile[1].top - self.rect.bottom
#CHECK FOR COLLISION IN WATER
health = True
if pygame.sprite.spritecollide(self, water_group, False):
self.health = 0
#health
#CHECK COLLISION WITH EXIT
level_complete = False
if pygame.sprite.spritecollide(self, exit_group, False):
level_complete = True
#CHECK IF PLAYER HAS FALL OF THE MAP
if self.rect.bottom > SCREEN_HEIGHT:
self.health = 0
if self.char_type == 'player':
if self.rect.left + dx < 0 or self.rect.right + dx > SCREEN_WIDTH:
dx = 0
#UPDATES RECTANGLE POSITION
self.rect.x += dx
self.rect.y += dy
#UPDATE SCROLL BASED ON PLAYERS POSITION
if self.char_type == 'player':
if (self.rect.right > SCREEN_WIDTH - SCROLL_THRESHOLD and bg_scroll < (world.level_length * TILE_SIZE) - SCREEN_WIDTH) or (self.rect.left < SCROLL_THRESHOLD and screen_scroll > abs(dx)):
self.rect.x -= dx
screen_scroll = -dx
return screen_scroll, level_complete
def shoot(self):
if self.skill_cooldown == 0 and self.ammo > 0:
self.skill_cooldown = 85
if self.char_type == 'player':
self.skill_cooldown = 45
bullet = Bullet(self.rect.centerx + (0.75 * self.rect.size[0] * self.direction), self.rect.centery, self.direction, self.flip)
bullet_group.add(bullet)
self.ammo -= 1
shot_snd.play()
def ai(self):
if self.alive and player.alive:
if self.idling == False and random.randint(1, 200) == 1:
self.update_action(0)#0 : IDLE
self.idling = True
self.idling_counter = 50
#CHECK IF PLAYER IS IN RANGE OF THE AI
if self.vision.colliderect(player.rect):
#STOPS RUNNING AND FACE THE PLAYER
self.update_action(4)#4 : attack
#SHOOT
self.shoot()
else:
if self.idling == False:
if self.direction == 1:
ai_moving_right = True
else:
ai_moving_right = False
ai_moving_left = not ai_moving_right
self.move(ai_moving_left, ai_moving_right)
self.update_action(1)#1 : RUN
self.move_counter += 1
#UPDATE AI VISION AS THE ENEMY MOVES
self.vision.center = (self.rect.centerx + 75 * self.direction, self.rect.centery)
# pygame.draw.rect(SCREEN, RED, self.vision)
if self.move_counter > TILE_SIZE:
self.direction *= -1
self.move_counter *= -1
else:
self.idling_counter -= 1
if self.idling_counter <= 0:
self.idling = False
self.rect.x += screen_scroll
def update_animation(self):
#UPDATE ANIMATION
animation_colldown = 100
#UPDATE IMAGE
self.img = self.animation_list[self.action][self.frame_index]
#CHECKS THE TIME PASSED FROM THE LAST UPDATE
if pygame.time.get_ticks() - self.update_time > animation_colldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#CHECKS THE LENGTH OF ANIMATION LIST
if self.frame_index >= len(self.animation_list[self.action]):
if self.action == 3:
self.frame_index = len(self.animation_list[self.action]) - 1
else:
self.frame_index = 0
def update_action(self, new_action):
#CHECKS FWHATHER THE ACTION IS DIFFERENT FORM THE PREVIOUS ACTION
if new_action != self.action:
self.action = new_action
#UPDATING THE ANIMATION SETTING
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def death(self):
if self.health <= 0:
self.health = 0
self.speed = 0
self.alive = False
self.update_action(3)
def draw(self):
SCREEN.blit(pygame.transform.flip(self.img, self.flip , False ), self.rect) # (what and where)
class World():
def __init__(self):
self.obstacle_list = []
def process_data(self, data):
self.level_length = len(data[0])
#ITERATE THROUGH DATA FILE TO PROCESS DATA
for y, row in enumerate(data):
for x, tile in enumerate(row):
if tile >= 0:
img = tile_list[tile]
img_rect = img.get_rect()
img_rect.x = x * TILE_SIZE
img_rect.y = y * TILE_SIZE
tile_data = (img, img_rect)
if tile >= 0 and tile <= 8:
self.obstacle_list.append(tile_data)
elif tile >= 9 and tile <= 10:
water = Water(img, x * TILE_SIZE, y * TILE_SIZE)
water_group.add(water)
elif tile >= 11 and tile <= 14:
decoration = Decoration(img, x * TILE_SIZE, y * TILE_SIZE)
decoration_group.add(decoration)
elif tile == 15: #CREATE PLAYER
player = Character('player', x * TILE_SIZE, y * TILE_SIZE, 1.25, 5, 20, 5)
health_bar = HealthBar(10,10, player.health, player.health)
elif tile == 16: #CREATE ENEMIES
enemy1 = Character('reaper', x * TILE_SIZE, y * TILE_SIZE, 1.25, 3, 50, 0)
enemy_group.add(enemy1)
elif tile == 17: #AMMO BOX
item_box = ItemRefill('Ammo', x * TILE_SIZE, y * TILE_SIZE)
item_box_group.add(item_box)
elif tile == 18: #GRENADE BOX
item_box = ItemRefill('Grenade', x * TILE_SIZE, y * TILE_SIZE)
item_box_group.add(item_box)
elif tile == 19: #HEALTHBOX
item_box = ItemRefill('Health', x * TILE_SIZE, y * TILE_SIZE)
item_box_group.add(item_box)
elif tile == 20: #CREATE EXIT
img = pygame.transform.scale(img, (130, 240)).convert_alpha()
exit = Exit(img, x * TILE_SIZE, y * TILE_SIZE)
exit_group.add(exit)
elif tile == 21: #CREATE ENEMIES
enemy1 = Character('ghost', x * TILE_SIZE, y * TILE_SIZE, 1, 3, 50, 0)
enemy_group.add(enemy1)
elif tile == 22: #CREATE ENEMIES
enemy2 = Character('wizard', x * TILE_SIZE, y * TILE_SIZE, 1.20, 3, 250, 0)
enemy_group.add(enemy2)
return player, health_bar
def draw(self):
for tile in self.obstacle_list:
tile[1][0] += screen_scroll
SCREEN.blit(tile[0], tile[1])
class Decoration(pygame.sprite.Sprite):
def __init__(self, img, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height()))
def update(self):
self.rect.x += screen_scroll
class Water(pygame.sprite.Sprite):
def __init__(self, img, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height()))
def update(self):
self.rect.x += screen_scroll
class Exit(pygame.sprite.Sprite):
def __init__(self, img, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height() + 20))
def update(self):
self.rect.x += screen_scroll
class ItemRefill(pygame.sprite.Sprite):
def __init__(self, item_type, x, y):
pygame.sprite.Sprite.__init__(self)
self.item_type = item_type
self.image = item_boxes[self.item_type]
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2 , y + (TILE_SIZE - self.image.get_height()))
def update(self):
#SCROLL
self.rect.x += screen_scroll
#CHECKING FOR COLLLISION WITH THE PLAYER
if pygame.sprite.collide_rect(self,player):
#CHECK FOR THE ITEM BOX PICKED
if self.item_type == 'Health':
player.health += 25
if player.health > player.max_health:
player.health = player.max_health
elif self.item_type == 'Ammo':
player.ammo += 15
elif self.item_type == 'Grenade':
player.grenades += 7
#DELETE THE ITEM BOX
self.kill()
class HealthBar():
def __init__(self, x, y, health, max_health):
self.x = x
self.y = y
self.health = health
self.max_health = max_health
def draw(self, health):
#CHANGING THE hEALTH
self.health = health
#CALCULATE HEALTH
ratio = self.health / self.max_health
pygame.draw.rect(SCREEN, BLACK, (self.x - 2, self.y -2, 154, 24))
pygame.draw.rect(SCREEN, RED, (self.x, self.y, 150, 20))
pygame.draw.rect(SCREEN, GREEN, (self.x, self.y, 150 * ratio, 20))
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y, direction, flip):
pygame.sprite.Sprite.__init__(self)
self.speed = 10
self.flip = flip
self.direction = direction
self.image = pygame.transform.flip(bullet_img, self.flip, False)
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def update(self):
self.rect.x += (self.direction * self.speed) +screen_scroll
#CHECKS IF BULLT HAS LEFT THE SCREEN
if self.rect.right < 0 or self.rect.left > SCREEN_WIDTH:
self.kill()
#CHECK FOR COLLISION WITH LEVEL
for tile in world.obstacle_list:
if tile[1].colliderect(self.rect):
self.kill()
#CHECKS BULLET COLLISION
if pygame.sprite.spritecollide(player, bullet_group, False):
if player.alive:
player.health -= 5
self.kill()
for enemy in enemy_group:
if pygame.sprite.spritecollide(enemy, bullet_group, False):
if enemy.alive:
enemy.health -= 20
#print(enemy.health)
self.kill()
class Grenade(pygame.sprite.Sprite):
def __init__(self, x, y, direction):
pygame.sprite.Sprite.__init__(self)
self.timer = 100
self.vel_y = -11
self.speed = 8
self.image = grenade_img
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.width = self.image.get_width()
self.height = self.image.get_height()
self.direction = direction
def update(self):
self.vel_y += GRAVITY
dx = self.direction * self.speed
dy = self.vel_y
#CHECK FOR COLLISION WITH LEVEL
for tile in world.obstacle_list:
#CHECKS IF GRENADE HAS STRUCK THE WALL
if tile[1].colliderect(self.rect.x + dx, self.rect.y, self.width, self.height):
self.direction *= -1
dx = self.direction * self.speed
#CHECK COLLISION IN Y DIRECTION
if tile[1].colliderect(self.rect.x, self.rect.y + dy, self.width, self.height):
self.speed = 0
#CHECK IF BELOW THE GROUND (THROWN UPWARD)
if self.vel_y < 0:
self.vel_y = 0
dy = tile[1].bottom - self.rect.top
#CHECK IF ABOVE THE GROUND (FALLLING)
elif self.vel_y >= 0:
self.vel_y = 0
dy = tile[1].top - self.rect.bottom
#UPDATE GRENADE POSITION
self.rect.x += dx + screen_scroll
self.rect.y += dy
#COUNTDOWN TIMER
self.timer -= 1
if self.timer <= 0:
self.kill()
grenade_snd.play()
explosion = Explosion(self.rect.x, self.rect.y, 0.7)
explosion_group.add(explosion)
#EXPLOSION DAMAGE TO ANYONE
if abs(self.rect.centerx - player.rect.centerx) < TILE_SIZE * 2 and abs(self.rect.centery - player.rect.centery) < TILE_SIZE * 2:
player.health -= 50
for enemy in enemy_group:
if abs(self.rect.centerx - enemy.rect.centerx) < TILE_SIZE * 2 and abs(self.rect.centery - enemy.rect.centery) < TILE_SIZE * 2:
enemy.health -= 50
class Explosion(pygame.sprite.Sprite):
def __init__(self, x, y, scale):
pygame.sprite.Sprite.__init__(self)
self.images =[]
path = 'sprites/explosion'
for num in range(1 , len(os.listdir(path))):
img = pygame.image.load(f'{path}/{num}.png').convert_alpha()
img = pygame.transform.scale(img, (int(img.get_width() * scale), int(img.get_height() * scale)))
self.images.append(img)
self.frame_index = 0
self.image = self.images[self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.counter = 0
def update(self):
#SCROLL
self.rect.x += screen_scroll
explosion_speed = 4
#UPDATE EXPLOSION
self.counter += 1
if self.counter >= explosion_speed:
self.counter = 0
self.frame_index += 1
#IF EXPLOSION IS COMPLETE THAN DELETE IT AND RESET THE INDEX
if self.frame_index >= len(self.images):
self.kill()
else:
self.image = self.images[self.frame_index]
class ScreenFade():
def __init__(self, direction, colour, speed):
self.direction = direction
self.colour = colour
self.speed = speed
self.fade_counter = 0
def fade(self):
fade_complete = False
self.fade_counter += self.speed
if self.direction == 1:#WHOLE SCREEN FADES
pygame.draw.rect(SCREEN, self.colour, (0 - self.fade_counter, 0, SCREEN_WIDTH // 2, SCREEN_HEIGHT))
pygame.draw.rect(SCREEN, self.colour, (SCREEN_WIDTH // 2 + self.fade_counter, 0, SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.draw.rect(SCREEN, self.colour, (0 , 0 - self.fade_counter, SCREEN_WIDTH, SCREEN_HEIGHT // 2))
pygame.draw.rect(SCREEN, self.colour, (0 , SCREEN_HEIGHT // 2 + self.fade_counter, SCREEN_WIDTH, SCREEN_HEIGHT))
if self.direction == 2:# VERTICAL SCREEN FADE
pygame.draw.rect(SCREEN, self.colour, (0, 0, SCREEN_WIDTH, 0 + self.fade_counter))
if self.fade_counter >= SCREEN_WIDTH:
fade_complete = True
return fade_complete
#CRETAE BG FADE
intro_fade = ScreenFade(1, BLACK, 4)
death_fade = ScreenFade(2, PINK, 4)
#CREATE BUTTON
start_button = Button(SCREEN_WIDTH // 2 - 130, SCREEN_HEIGHT // 2 - 150, start_img, 1)
restart_button = Button(SCREEN_WIDTH // 2 - 100, SCREEN_HEIGHT // 2 - 50, restart_img, 2)
exit_button = Button(SCREEN_WIDTH // 2 - 110, SCREEN_HEIGHT // 2 + 50, exit_img, 1)
#CREATE SPRITES GROUP
enemy_group = pygame.sprite.Group()
bullet_group = pygame.sprite.Group()
grenade_group = pygame.sprite.Group()
explosion_group = pygame.sprite.Group()
item_box_group = pygame.sprite.Group()
water_group = pygame.sprite.Group()
decoration_group = pygame.sprite.Group()
exit_group = pygame.sprite.Group()
#CREATE EMPTY TILE LIST
world_data = []
for row in range(ROWS):
r = [-1] * COLUMNS
world_data.append(r)
with open(f'sprites/world_level/level{level}_data.csv', newline = '') as csvfile:
df = csv.reader(csvfile, delimiter = ',')
for x, row in enumerate(df):
for y, tile in enumerate(row):
world_data[x][y] = int(tile)
world = World()
player, health_bar = world.process_data(world_data)
while run:
clock.tick(FPS)
if start_game == False:
#MAIN MENU
SCREEN.fill(BG)
#ADD BUTTON
if start_button.draw(SCREEN):
start_game = True
start_intro = True
if exit_button.draw(SCREEN):
run = False
else:
#DRAW BACKGROUND
background_colour(BG)
#DRAW WORLD MAP
world.draw()
#SHOW HEALTH
health_bar.draw(player.health)
#SHOW AMMO
draw_text(f'SHOT :', font, WHITE , 10, 40)
for x in range(player.ammo):
SCREEN.blit(bullet_img, (90 + x*10, 45))
#SHOW GRENADES
draw_text(f'GRENADE :', font, WHITE , 10, 65)
for x in range(player.grenades):
SCREEN.blit(grenade_img, (135 + x*16, 67))
player.update()
player.draw()
for enemy in enemy_group:
enemy.ai()
enemy.update()
enemy.draw()
#UPDATE AND DRAW GROUPS
bullet_group.update()
grenade_group.update()
explosion_group.update()
item_box_group.update()
water_group.update()
decoration_group.update()
exit_group.update()
bullet_group.draw(SCREEN)
grenade_group.draw(SCREEN)
explosion_group.draw(SCREEN)
item_box_group.draw(SCREEN)
water_group.draw(SCREEN)
decoration_group.draw(SCREEN)
exit_group.draw(SCREEN)
#SHOW INTRO
if start_intro == True:
if intro_fade.fade():
start_intro = False
intro_fade.fade_counter = 0
#UPDATE PLAYER ACTIONS
if player.alive:
#SHOOTS BULLETS
if shoot:
player.shoot()
#THROW GRENADES
elif throw and grenade_thrown == False and player.grenades > 0:
throw = Grenade(player.rect.centerx + (0.5 * player.rect.size[0] * player.direction), player.rect.top, player.direction)
grenade_group.add(throw)
player.grenades -= 1
grenade_thrown = True
#PLAYER ACTIONS
if player.in_air:
player.update_action(2) #2 : jump
elif shoot:
player.update_action(4) #4 : attack
elif moving_left or moving_right:
player.update_action(1) #1 : run
else:
player.update_action(0) #0 : idle
screen_scroll, level_complete = player.move(moving_left, moving_right)
bg_scroll -= screen_scroll
#CHECK IF PLAYER HAS COMPLETED THE LEVEL
if level_complete:
start_intro = True
level += 1
bg_scroll = 0
world_data = reset_level()
if level <= MAX_LEVEL:
with open(f'sprites/world_level/level{level}_data.csv', newline = '') as csvfile:
df = csv.reader(csvfile, delimiter = ',')
for x, row in enumerate(df):
for y, tile in enumerate(row):
world_data[x][y] = int(tile)
world = World()
player, health_bar = world.process_data(world_data)
else:
screen_scroll = 0
if death_fade.fade():
if restart_button.draw(SCREEN):
death_fade.fade_counter = 0
start_intro = True
bg_scroll = 0
world_data = reset_level()
with open(f'sprites/world_level/level{level}_data.csv', newline = '') as csvfile:
df = csv.reader(csvfile, delimiter = ',')
for x, row in enumerate(df):
for y, tile in enumerate(row):
world_data[x][y] = int(tile)
world = World()
player, health_bar = world.process_data(world_data)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
#MOVEMENT OF THE CHARACTER
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_a) or (event.key == pygame.K_LEFT):
moving_left = True
if (event.key == pygame.K_d) or (event.key == pygame.K_RIGHT):
moving_right = True
if event.key == pygame.K_SPACE:
shoot = True
if event.key == pygame.K_q:
throw = True
if ((event.key == pygame.K_w) or (event.key == pygame.K_UP)) and player.alive :
player.jump = True
jump_snd.play()
if event.key == pygame.K_ESCAPE:
run = False
if event.type == pygame.KEYUP:
if (event.key == pygame.K_a) or (event.key == pygame.K_LEFT):
moving_left = False
if (event.key == pygame.K_d) or (event.key == pygame.K_RIGHT):
moving_right = False
if event.key == pygame.K_SPACE:
shoot = False
if event.key == pygame.K_q:
throw = False
grenade_thrown = False
pygame.display.update()
pygame.quit() | StarcoderdataPython |
6608025 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="krinn", # Replace with your own username
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="This module helps in getting the price or title of a product",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/BRAINIFII/krinn",
packages={"krinn"},
install_requires=[
"bs4",
"requests",
"user_agents",
],
extras_require = {
"devlp":[
"pytest>=3.7",
]
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | StarcoderdataPython |
1825522 | # -*- coding: utf-8 -*-
"""Command-line output
Contains most functions, classes, etc. to handle the output of data.
"""
from __future__ import print_function, unicode_literals
from __future__ import absolute_import
from __future__ import division
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import OrderedDict
import sys
import os
# Available space for each label:
free_config = {
'timestamp': 12,
'second': 6,
'detection ID': 13,
'detection type': 24,
'confidence': 11,
'face_recognition_confidence': 28,
'label': 30, # (12, 24)
'labels': 12,
'Valossa concept ID': 19,
'GKG concept ID': 15,
'more information': 50,
'name': 28,
'screentime': 10,
'screentime_s': 10,
'of video length': 17,
'speech valence': 15,
'speech intensity': 17,
'face valence': 15,
'_default': 10,
}
name_config = {
'name': 'Name',
'screentime': 'Time (s)',
'screentime_s': 'Time (s)',
'confidence': 'Confidence',
'face_recognition_confidence': 'Face recognition confidence',
'label': 'Label',
'of video length': 'Of video length',
'visual.context': 'Visual context:',
'human.face': 'Human face:',
'audio.context': 'Audio context:',
'audio.keyword.name.person': 'Audio keyword, person name',
'audio.keyword.name.organization': 'Audio keyword, organization name',
'audio.keyword.name.location': 'Audio keyword, location name',
'audio.keyword.novelty_word': 'Audio keyword, novelty word',
'audio.keyword.name.general': 'Audio keyword, name (general)',
#u'speech valence': u'Speech valence',
}
class MetadataPrinter(object):
__metaclass__ = ABCMeta
def __init__(self, first_line, output=sys.stdout):
if type(first_line) is OrderedDict:
self.print_header(first_line)
self.print_line(first_line)
elif "summary" in first_line: # summary
self.print_header(first_line)
self.print_line(first_line)
else:
raise RuntimeError("Must be OrderedDict!")
@abstractmethod
def print_line(self, line_dict):
pass
@abstractmethod
def print_header(self, line_dict):
pass
def finish(self):
# package type printers does the printing here
pass
@staticmethod
def unicode_printer(func, line):
"""This printer-function replaces characters which cause error with an
question mark."""
try:
func(line)
except UnicodeEncodeError:
# Following error: unicode(x).decode('unicode-escape')
# So user terminal doesn't support unicode
for letter in line:
try:
func(letter)
except UnicodeEncodeError:
func('?')
def unicode_printer(func):
def wrapper(line):
try:
func(line)
except UnicodeEncodeError:
# Following error: unicode(x).decode('unicode-escape')
# So user terminal doesn't support unicode
for letter in line:
try:
func(letter)
except UnicodeEncodeError:
func('?')
return wrapper
class MetadataCSVPrinter(MetadataPrinter):
def __init__(self, header_line, output=sys.stdout):
if sys.version_info[0] < 3:
from .lib.utils import UnicodeWriter
self.writer = UnicodeWriter(output, lineterminator='\n')
else:
import csv
self.writer = csv.writer(output, lineterminator='\n')
if type(header_line) == dict and "summary" in header_line:
self.print_line = self.print_summary
super(MetadataCSVPrinter, self).__init__(header_line)
def print_header(self, line_dict):
try:
self.writer.writerow(line_dict.keys())
except UnicodeEncodeError:
new_line_list = [cell.encode('utf-8') for cell in line_dict.keys()]
self.writer.writerow(new_line_list)
# Output is not anything sensible as used terminal doesn't support unicode !
def print_line(self, line_dict, combine=None):
try:
self.writer.writerow(line_dict.values())
except UnicodeEncodeError:
new_line_list = [cell.encode('utf-8') for cell in line_dict.values()]
self.writer.writerow(new_line_list)
# Output is not anything sensible as used terminal doesn't support unicode !
def print_summary(self, summary):
for dtype in summary["summary"]:
header_row = summary["summary"][dtype][0].keys()
self.writer.writerow([dtype] + [''] * (len(header_row) - 1))
self.writer.writerow(header_row)
for item in summary["summary"][dtype]:
if "screentime_s" in item:
item["screentime_s"] = "{:.2f}".format(float(item["screentime_s"]))
self.writer.writerow(item.values())
class MetadataJSONPrinter(MetadataPrinter):
pass
class MetadataFreePrinter(MetadataPrinter):
def __init__(self, first_line, output=sys.stdout):
self.write = self._writer(output)
# self.spaces = {}
# Summary check:
if type(first_line) == dict and "summary" in first_line:
self.print_row = self.print_line
self.print_line = self.print_summary
# Variables used in class
self.combine = None
self.on_one_line = None
super(MetadataFreePrinter, self).__init__(first_line)
try:
rows, columns = os.popen('stty size', 'r').read().split()
except ValueError:
# Default values for os that doesn't support 'stty size'
rows, columns = (40, 180)
self.columns = int(columns)
@staticmethod
def _writer(output):
def wrapper(line):
# Print when formatted:
try:
print(line, file=output)
except UnicodeEncodeError as e:
# # Following error: unicode(x).decode('unicode-escape')
# # So user terminal doesn't support unicode
for letter in line:
try:
output.write(letter)
except UnicodeEncodeError:
output.write(u'?')
output.write(u'\n')
return wrapper
def print_summary(self, summary):
for dtype in summary["summary"]:
self.write(u"Detection type: " + dtype)
header_line = summary["summary"][dtype][0].keys()
spaces = []
for i, header in enumerate(header_line):
spaces.append(free_config.get(header, free_config["_default"]))
header_line[i] = name_config.get(header, header.capitalize())
self.print_row(header_line)
self.write('-'*(sum(spaces)+len(spaces)-1))
for item in summary["summary"][dtype]:
c = None
if "confidence" in item:
c = "confidence"
elif "face_recognition_confidence" in item:
c = "face_recognition_confidence"
if c and item[c] != "-":
item[c] = "{:.1f}%".format(item[c]*100.0)
self.print_row(item)
self.write('\n')
def print_header(self, line_dict):
if type(line_dict) is not OrderedDict:
raise RuntimeError("Must be ordered dict...")
self._print_line(line_dict, is_header=True)
def print_line(self, line_dict, combine=None):
if type(line_dict) is not OrderedDict:
raise RuntimeError("Must be ordered dict...")
self._print_line(line_dict)
def _print_line(self, line_dict, combine=None, is_header=False):
line = ""
for header, cell in line_dict.items():
space = free_config[header] if header in free_config else len(header)+1
text = header if is_header else cell
if header == 'more information':
line += "{}".format(text)
elif line == "":
line += "{:<{s}}".format(text, s=space)
else:
line += "{:>{s}} ".format(text, s=space)
self.write(line)
class MetadataSubtitlePrinter(MetadataPrinter):
def __init__(self, first_line, output=sys.stdout):
self.writer = output
self.line_number = 1
self.writer = unicode_printer(output.write)
super(MetadataSubtitlePrinter, self).__init__(first_line)
def print_header(self, first_line):
"""srt does not have headers."""
return
def print_line(self, line_dict, **kwargs):
self.writer(str(self.line_number)+'\n')
self.line_number += 1
self.writer("{} --> {}\n".format(self.srt_timestamp(line_dict["start_time"]),
self.srt_timestamp(line_dict["end_time"])))
limit = len(line_dict["labels"]) // 2 if len(line_dict["labels"]) > 5 else None
line = ", ".join(line_dict["labels"][:limit])
if limit:
line += "\n" + ", ".join(line_dict["labels"][limit:])
self.writer(line + "\n\n")
@staticmethod
def srt_timestamp(seconds_par):
"""Transforms float into srt timestamp
Format: hh:mm:ss,mmm
"""
hours = int(seconds_par // 3600)
minutes = int(seconds_par // 60) - 60 * hours
sec = seconds_par - 3600 * hours - 60 * minutes
seconds = int(sec)
milliseconds = int(round(1000 * (sec - seconds)))
# Float causes inaccuracy:
if milliseconds == 1000:
seconds += 1
milliseconds = 0
if seconds == 60:
minutes += 1
seconds = 0
if minutes == 60:
hours += 1
minutes = 0
if minutes >= 60 or seconds >= 60 or milliseconds >= 1000:
e_msg = "srt_timestamp fail: {:02}:{:02}:{:02},{:03}".format(
hours, minutes, seconds, milliseconds)
e_msg += "Input: {}".format(seconds_par)
raise RuntimeError(e_msg)
return "{:02}:{:02}:{:02},{:03}".format(
hours, minutes, seconds, milliseconds)
| StarcoderdataPython |
4876964 | from django.shortcuts import render
from rest_framework import status
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import ModelViewSet
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.parsers import FileUploadParser
from .models import Post
from .serializers import PostSerializer
import STT_models.stt_engine as stt
from SpeechAce.speechace import SpeechAce
import os
import scipy.io.wavfile
import base64
# Create Views here
class PostViewSet(ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = [AllowAny]
class FileUploadView(APIView):
parser_class = (FileUploadParser,)
def put(self, request, format=None):
print("REQUEST FILES: ")
print(request.data)
file_obj = request.data['file']
script = request.data['script']
stt_result, user_text, phonetic_transcription, correct_pronunciation, is_correct, speechace_score, tts_result, orig_audio, google_stt_result, syllable_count, correct_syllable_count, word_count, correct_word_count, ielts_estimate, pte_estimate \
= handle_uploaded_file(file_obj, script)
# print("Put response :" + stt_result)
return Response(data={"stt_result": stt_result,
"user_text": user_text, \
"speechace_score": speechace_score, \
"phonetic_transcription": phonetic_transcription, \
"correct_pronunciation": correct_pronunciation, \
"is_correct": is_correct, \
"tts_result": base64.b64encode(tts_result), \
"orig_audio": base64.b64encode(orig_audio), \
"google_stt_result": google_stt_result, \
"syllable_count": syllable_count, \
"correct_syllable_count": correct_syllable_count, \
"word_count": word_count, \
"correct_word_count": correct_word_count, \
"ielts_estimate": ielts_estimate, \
"pte_estimate": pte_estimate,
}, status=status.HTTP_201_CREATED)
def handle_uploaded_file(raw_audio, script):
# f is Cloass UploadedFile
# https://docs.djangoproject.com/en/3.1/ref/files/uploads/#django.core.files.uploadedfile.UploadedFile
# TODO: Transcribe
# Make this function in a separate file if needed
audio_string = (str(raw_audio))
# print(raw_audio.read())
with open(audio_string, mode='bw') as f:
orig_audio = raw_audio.read()
f.write(orig_audio)
new_audio_path = stt.mp3m4a_to_wav(audio_string)
# stt_result = stt.MozillaSTT('myfile.wav')
tts_path, response_audio = stt.google_tts(script, audio_string)
response, google_stt_result = stt.google_transcribe(new_audio_path)
# print (response, google_stt_result)
# stt.play_audio_pydub(tts_path)
# stt.play_audio_pydub('myfile.wav')
# stt_result = stt.simple_word_scorer(stt.script_converter(script), response)
speechace = SpeechAce(user_text=script, user_file=new_audio_path)
user_text, phonetic_transcription, correct_pronunciation, is_correct = speechace.score_pronunciation()
speechace_score, syllable_count, correct_syllable_count, word_count, correct_word_count, ielts_estimate, pte_estimate = speechace.get_score()
return "unused_text", user_text, phonetic_transcription, correct_pronunciation, is_correct, speechace_score, response_audio, orig_audio, google_stt_result, syllable_count, correct_syllable_count, word_count, correct_word_count, ielts_estimate, pte_estimate | StarcoderdataPython |
118293 | # Copyright 2018--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Code for scoring.
"""
import logging
import math
import time
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import torch as pt
from . import constants as C
from . import data_io
from . import inference
from . import vocab
from .beam_search import CandidateScorer
from .model import SockeyeModel
from .output_handler import OutputHandler
logger = logging.getLogger(__name__)
class BatchScorer(pt.nn.Module):
def __init__(self,
scorer: CandidateScorer,
score_type: str = C.SCORING_TYPE_DEFAULT,
constant_length_ratio: Optional[float] = None,
softmax_temperature: Optional[float] = None) -> None:
super().__init__()
self.score_type = score_type
self.scorer = scorer
self.constant_length_ratio = constant_length_ratio
assert softmax_temperature is None, 'not implemented'
def forward(self,
logits, labels,
length_ratio, source_length, target_length,
factor_logits_and_labels: Optional[List[Tuple[pt.Tensor, pt.Tensor]]] = None):
"""
:param logits: Model logits for primary output words. Shape: (batch, length, vocab_size).
:param labels: Gold targets. Shape: (batch, length).
:param length_ratio: Length Ratios. Shape: (batch,).
:param source_length: Source lengths. Shape: (batch,).
:param target_length: Target lengths. Shape: (batch,).
:param factor_logits_and_labels: List of target factor logits and corresponding labels.
Shape: (batch, length, factor_vocab_size).
:return: Sequence scores. Shape: (batch,).
"""
logprobs = logits.log_softmax(dim=-1)
# Select the label log probability
# logprobs and scores: (batch_size, target_seq_len)
token_scores = logprobs.gather(dim=-1, index=labels.unsqueeze(-1)).squeeze()
if self.score_type == C.SCORING_TYPE_NEGLOGPROB:
token_scores = -token_scores
# Mask pad positions, sum, then apply length penalty. Shape: (batch_size, 1)
scores = token_scores.masked_fill_(labels == C.PAD_ID, .0).sum(dim=-1, keepdims=True)
if self.constant_length_ratio is not None and self.constant_length_ratio > 0.0:
predicted_output_length = source_length * self.constant_length_ratio
else:
predicted_output_length = source_length * length_ratio
scores = self.scorer(scores, target_length, predicted_output_length)
if factor_logits_and_labels is not None:
factor_scores = [] # type: List[pt.Tensor]
for factor_logit, factor_label in factor_logits_and_labels:
factor_logprobs = factor_logit.log_softmax(dim=-1)
factor_token_scores = factor_logprobs.gather(dim=-1, index=factor_label.unsqueeze(-1)).squeeze()
if self.score_type == C.SCORING_TYPE_NEGLOGPROB:
factor_token_scores = -factor_token_scores
fs = factor_token_scores.masked_fill_(factor_label == C.PAD_ID, .0).sum(dim=-1, keepdims=True) # type: ignore
# Note: factor_scores are not normalized by length
factor_scores.append(fs)
scores = pt.cat([scores] + factor_scores, dim=1)
return scores
class Scorer:
"""
Scorer class takes a ScoringModel and uses it to score a stream of parallel sentences.
It also takes the vocabularies so that the original sentences can be printed out, if desired.
:param model: The model to score with.
:param batch_scorer: BatchScorer block to score each batch.
:param source_vocabs: The source vocabularies.
:param target_vocabs: The target vocabularies.
:param device: Torch device to load batches to (should be set to model device).
"""
def __init__(self,
model: SockeyeModel,
batch_scorer: BatchScorer,
source_vocabs: List[vocab.Vocab],
target_vocabs: List[vocab.Vocab],
device: pt.device) -> None:
self.source_vocab_inv = vocab.reverse_vocab(source_vocabs[0])
self.target_vocab_inv = vocab.reverse_vocab(target_vocabs[0])
self.model = model
self.traced_model = None # type: Optional[pt.jit.ScriptModule]
self.batch_scorer = batch_scorer
self.traced_batch_scorer = None # type: Optional[pt.jit.ScriptModule]
self.device = device
self.exclude_list = {C.BOS_ID, C.EOS_ID, C.PAD_ID}
self.num_target_factors = self.model.num_target_factors
def score_batch(self, batch: data_io.Batch):
# TODO: scoring should support multiple devices
batch = batch.load(self.device)
model_inputs = (batch.source, batch.source_length, batch.target, batch.target_length)
if self.traced_model is None:
self.traced_model = pt.jit.trace(self.model, model_inputs, strict=False)
outputs = self.traced_model(*model_inputs) # type: Dict[str, pt.Tensor]
scorer_inputs = [outputs[C.LOGITS_NAME],
batch.labels[C.TARGET_LABEL_NAME].long(),
outputs.get(C.LENRATIO_NAME, pt.zeros_like(batch.source_length)),
batch.source_length,
batch.target_length] # type: List[Union[pt.Tensor, List[Tuple[pt.Tensor, pt.Tensor]]]]
if self.num_target_factors > 1:
factor_logits_and_labels = [(outputs[C.FACTOR_LOGITS_NAME % i],
batch.labels[C.TARGET_FACTOR_LABEL_NAME % i].long())
for i in range(1, self.num_target_factors)]
scorer_inputs.append(factor_logits_and_labels)
if self.traced_batch_scorer is None:
logger.debug("Tracing batch_scorer")
self.traced_batch_scorer = pt.jit.trace(self.batch_scorer, scorer_inputs, strict=False)
scores = self.traced_batch_scorer(*scorer_inputs) # (batch, num_target_factors)
return scores.cpu().numpy()
@pt.inference_mode(True)
def score(self, score_iter: data_io.BaseParallelSampleIter, output_handler: OutputHandler):
total_time = 0.
sentence_no = 0
batch_no = 0
for batch_no, batch in enumerate(score_iter, 1):
batch_tic = time.time()
batch_scores = self.score_batch(batch)
batch_time = time.time() - batch_tic
total_time += batch_time
for sentno, (source, target, scores) in enumerate(zip(batch.source[:, :, 0],
batch.target[:, :, 0],
batch_scores), 1):
sentence_no += 1
# Transform arguments in preparation for printing
source_ids = source.tolist()
source_tokens = list(data_io.ids2tokens(source_ids, self.source_vocab_inv, self.exclude_list))
target_ids = target.tolist()
target_tokens = list(data_io.ids2tokens(target_ids, self.target_vocab_inv, self.exclude_list))
target_string = C.TOKEN_SEPARATOR.join(target_tokens)
# Report a score of -inf for invalid sentence pairs (empty source and/or target)
if source[0] == C.PAD_ID or target[0] == C.PAD_ID:
scores = [-np.inf] * self.num_target_factors
# Output handling routines require us to make use of inference classes.
output_handler.handle(inference.TranslatorInput(sentence_no, source_tokens),
inference.TranslatorOutput(sentence_no, target_string,
target_tokens,
score=scores[0],
factor_scores=scores[1:]),
batch_time)
if sentence_no != 0:
logger.info("Processed %d lines in %d batches. Total time: %.4f, sec/sent: %.4f, sent/sec: %.4f",
sentence_no, math.ceil(sentence_no / batch_no), total_time,
total_time / sentence_no, sentence_no / total_time)
else:
logger.info("Processed 0 lines.")
| StarcoderdataPython |
3402073 | <reponame>zcong1993/django<filename>start/apps/images/serializers.py
from rest_framework import serializers
from .models import Image
from .fields import ChoiceField
from .constants import Gender
class ImageSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
gender = ChoiceField(Gender)
class Meta:
model = Image
fields = ("id", "name", "url", "gender")
| StarcoderdataPython |
4915464 | """
embedeval
~~~~~~~~~
NLP Embedding Evaluation Tool
:copyright: (c) 2019 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
from typing import List, Tuple
from pathlib import Path
import numpy as np
from gensim.models import KeyedVectors
from embedeval.embedding import WordEmbedding
from embedeval.errors import EmbedevalError
class KeyedVectorsWordEmbedding(WordEmbedding):
"""Represents a word2vec KeyedVectors specific Word Embedding
The word2vec file will be parsed by ``gensim``.
The gensim ``KeyedVectors`` instance is made available
in the ``self.keyed_vectors`` attribute.
"""
def __init__(self, path, keyed_vectors):
self._path = path
#: Holds the gensim KeyedVectors instance
self.keyed_vectors = keyed_vectors
@property
def path(self) -> Path:
return self._path # pragma: no cover
@property
def shape(self) -> Tuple[int, int]:
return (len(self.keyed_vectors.vectors), self.keyed_vectors.vector_size)
def get_words(self) -> List[str]:
return list(self.keyed_vectors.vocab.keys())
def get_word_vector(self, word: str) -> np.array:
return self.keyed_vectors.word_vec(word)
def load_embedding(path: Path, binary=False) -> KeyedVectorsWordEmbedding:
"""Load the given Word2Vec Word Embedding using gensim
The ``gensim.load_word2vec_format`` function is used to parse
the word2vec Embdding file.
The ``gensim.models.keyedvectors.KeyedVectors`` is wrapped in the
embedeval specific ``WordEmbedding`` object.
"""
try:
keyed_vectors = KeyedVectors.load_word2vec_format(
path, binary=binary, unicode_errors="ignore"
)
except Exception as exc:
raise EmbedevalError(
f"Failed to parse Embedding with gensim KeyedVectors: {exc}"
)
return KeyedVectorsWordEmbedding(path, keyed_vectors)
| StarcoderdataPython |
3373582 | """
Simple Calculator
=================
Write a program to take two integers as input and output their sum.
Sample Input:
2
8
Sample Output:
10
"""
x, y = int(input()), int(input())
print(x + y) | StarcoderdataPython |
5073285 | <gh_stars>10-100
from collections import defaultdict
import numpy as np
import torch
from torchvision import datasets
from torchvision.datasets.vision import VisionDataset
from code.util.general import make_valid_from_train
# Reference: https://github.com/optimass/Maximally_Interfered_Retrieval/blob/master/data.py
# We use 1 dataloader rather than one per task
class mnist5k(VisionDataset):
train_val_pc = 0.95
def __init__(self, root, data_type=None, non_stat=False, num_iterations=None, classes_per_task=None):
super(mnist5k, self).__init__(root, transform=None, target_transform=None)
self.data_type = data_type
self.non_stat = non_stat
self.classes_per_task = classes_per_task
assert(self.classes_per_task == 2)
self.num_classes = 10
self.orig_train_samples_per_class = 500
self.num_iterations = num_iterations
assert (num_iterations is not None)
# Load data ------------------------------------------------------------------------------------
# splits are deterministic
# follows https://github.com/optimass/Maximally_Interfered_Retrieval/
train = datasets.MNIST(root, train=True, download=False)
test = datasets.MNIST(root, train=False, download=False)
train_x, train_y = train.data, train.targets # 60000, 28, 28; 60000
test_x, test_y = test.data, test.targets
# sort by label
train_ds, test_ds = [], [] # doesn't really matter for test_ds because of batchnorm tracking
# stats
task_i = 0
current_train, current_test = None, None
self.task_dict_classes = defaultdict(list)
for i in range(self.num_classes):
self.task_dict_classes[task_i].append(i)
train_i = train_y == i
test_i = test_y == i
if current_train is None:
current_train, current_test = (train_x[train_i], train_y[train_i]), (
test_x[test_i], test_y[test_i])
else:
current_train = (torch.cat((current_train[0], train_x[train_i]), dim=0),
torch.cat((current_train[1], train_y[train_i]), dim=0))
current_test = (torch.cat((current_test[0], test_x[test_i]), dim=0),
torch.cat((current_test[1], test_y[test_i]), dim=0))
if i % self.classes_per_task == (self.classes_per_task - 1):
train_ds += [current_train]
test_ds += [current_test]
current_train, current_test = None, None
task_i += 1
# separate validation set (randomised)
train_ds, val_ds = make_valid_from_train(train_ds, cut=mnist5k.train_val_pc)
# flatten into single list, and truncate training data into 500 per class
data_summary = {"train": train_ds, "val": val_ds, "test": test_ds}[self.data_type]
self.data = [] # list of tensors
self.targets = []
counts_per_class = torch.zeros(self.num_classes, dtype=torch.int)
task_lengths = []
for task_ds in data_summary:
assert (len(task_ds[1]) == len(task_ds[0]))
num_samples_task = 0
for i in range(len(task_ds[1])):
target = task_ds[1][i]
if self.data_type == "train" and counts_per_class[
target] == self.orig_train_samples_per_class:
continue
else:
self.data.append(task_ds[0][i])
self.targets.append(target)
counts_per_class[target] += 1
num_samples_task += 1
task_lengths.append(num_samples_task)
print(self.task_dict_classes)
# if stationary, shuffle
if not self.non_stat:
perm = np.random.permutation(len(self.data))
self.data, self.targets = [self.data[perm_i] for perm_i in perm], [self.targets[perm_i] for
perm_i in perm]
self.orig_len = len(self.data)
self.actual_len = self.orig_len * self.num_iterations
if self.non_stat: # we need to care about looping over in task order
assert (self.orig_len % self.num_classes == 0)
self.orig_samples_per_task = int(
self.orig_len / self.num_classes) * self.classes_per_task # equally split among tasks
self.actual_samples_per_task = self.orig_samples_per_task * self.num_iterations
# sanity
if self.data_type == "train": assert (self.orig_samples_per_task == 1000)
print("orig samples per task: %d, actual samples per task: %d" % (
self.orig_samples_per_task, self.actual_samples_per_task))
def __len__(self):
return self.actual_len
def __getitem__(self, index):
assert (index < self.actual_len)
if not self.non_stat:
index = index % self.orig_len # looping over stationary data is arbitrary
else:
task_i, actual_offset = divmod(index, self.actual_samples_per_task)
_, orig_offset = divmod(actual_offset, self.orig_samples_per_task)
index = task_i * self.orig_samples_per_task + orig_offset
sample, target = self.data[index], self.targets[index]
sample = sample.view(-1).float() / 255. # flatten and turn from uint8 (255) -> [0., 1.]
assert (self.transform is None)
assert (self.target_transform is None)
return sample, target
def __len__(self):
return self.actual_len
| StarcoderdataPython |
341829 | import factory
from factory import fuzzy
from factory.django import DjangoModelFactory
from . import models
class ProductFactory(DjangoModelFactory):
name = fuzzy.FuzzyText()
unit_price = fuzzy.FuzzyDecimal(1, 200)
class Meta:
model = models.Product
class MyLineItemFactory(DjangoModelFactory):
product = factory.SubFactory(ProductFactory)
quantity = fuzzy.FuzzyInteger(1, 10)
class Meta:
model = models.MyLineItem
class AustralianDeliveryAddressFactory(DjangoModelFactory):
addressee = factory.Faker("name", locale="en_AU")
address = factory.Faker("street_address", locale="en_AU")
suburb = factory.Faker("city", locale="en_AU")
state = factory.Faker("state_abbr", locale="en_AU")
postcode = factory.Faker("postcode", locale="en_AU")
class Meta:
model = models.AustralianDeliveryAddress
class CartDiscountFactory(DjangoModelFactory):
percentage = fuzzy.FuzzyInteger(1, 99)
class Meta:
model = models.CartDiscount
| StarcoderdataPython |
4812749 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vendor: Alentis
# OS: NetPing
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import json
import re
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Alentis.NetPing"
rx_data = re.compile(r"^var data\s*=\s*(?P<var_data>{.+})", re.MULTILINE | re.DOTALL)
def var_data(self, script, url):
try:
data = script.http.get(url)
except Exception:
data = ""
match = self.rx_data.search(data)
if match:
var = match.group("var_data")
var = var.replace("'", '"')
var = var.replace("{", '{"')
var = var.replace(",", ',"')
m = var.split(",")
for i in range(len(m)):
m[i] = m[i].replace(":", '":', 1)
return json.loads(",".join(m))
else:
return {}
| StarcoderdataPython |
5003934 | <reponame>joe28965/cuorabot<filename>cuorabot_bringup/launch/bringup_launch.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration, Command
from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
xacro_path = os.path.join(get_package_share_directory('cuorabot_description'), 'urdf', 'cuorabot.urdf.xacro')
# Launch configuration variables specific to simulation
use_sim_time = LaunchConfiguration('use_sim_time')
# Declare the launch arguments
declare_use_sim_time_cmd = DeclareLaunchArgument(
'use_sim_time',
default_value='false',
description='Use simulation (Gazebo) clock if true')
# Specify the actions
start_robot_state_publisher_cmd = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='screen',
parameters=[{
'use_sim_time': use_sim_time,
'robot_description':Command(['xacro',' ', xacro_path])
}]
)
control_launch_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(get_package_share_directory('cuorabot_control'), 'launch', 'control_launch.py'))
)
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_use_sim_time_cmd)
# Add any conditioned actions
ld.add_action(start_robot_state_publisher_cmd)
ld.add_action(control_launch_cmd)
return ld | StarcoderdataPython |
3564308 | <reponame>AntoineDao/specklepy
from specklepy.logging.exceptions import SpeckleException
from specklepy.api.models import User
import pytest
@pytest.mark.run(order=1)
class TestUser:
def test_user_get_self(self, client, user_dict):
fetched_user = client.user.get()
assert isinstance(fetched_user, User)
assert fetched_user.name == user_dict["name"]
assert fetched_user.email == user_dict["email"]
user_dict["id"] = fetched_user.id
def test_user_search(self, client, second_user_dict):
search_results = client.user.search(search_query=second_user_dict["name"][:5])
assert isinstance(search_results, list)
assert isinstance(search_results[0], User)
assert search_results[0].name == second_user_dict["name"]
second_user_dict["id"] = search_results[0].id
def test_user_get(self, client, second_user_dict):
fetched_user = client.user.get(id=second_user_dict["id"])
assert isinstance(fetched_user, User)
assert fetched_user.name == second_user_dict["name"]
assert fetched_user.email == second_user_dict["email"]
second_user_dict["id"] = fetched_user.id
def test_user_update(self, client):
bio = "i am a ghost in the machine"
failed_update = client.user.update()
updated = client.user.update(bio=bio)
me = client.user.get()
assert isinstance(failed_update, SpeckleException)
assert updated is True
assert me.bio == bio
| StarcoderdataPython |
9798635 | from polyaxon.config_manager import config
POLYAXON_K8S_APP_NAME = config.get_string('POLYAXON_K8S_APP_NAME')
POLYAXON_K8S_API_HOST = config.get_string('POLYAXON_K8S_API_HOST')
POLYAXON_K8S_API_PORT = config.get_int('POLYAXON_K8S_API_PORT')
POLYAXON_K8S_APP_CONFIG_NAME = config.get_string('POLYAXON_K8S_APP_CONFIG_NAME')
POLYAXON_K8S_APP_SECRET_NAME = config.get_string('POLYAXON_K8S_APP_SECRET_NAME')
POLYAXON_K8S_RABBITMQ_SECRET_NAME = config.get_string('POLYAXON_K8S_RABBITMQ_SECRET_NAME')
K8S_AUTHORISATION = config.get_string('POLYAXON_K8S_AUTHORISATION',
is_optional=True,
is_secret=True)
K8S_HOST = config.get_string('POLYAXON_K8S_HOST', is_optional=True)
SSL_CA_CERT = config.get_string('POLYAXON_K8S_SSL_CA_CERT', is_optional=True)
K8S_CONFIG = None
if K8S_AUTHORISATION and K8S_HOST:
import urllib3
from kubernetes import client
K8S_CONFIG = client.Configuration()
K8S_CONFIG.api_key['authorization'] = K8S_AUTHORISATION
K8S_CONFIG.api_key_prefix['authorization'] = 'Bearer'
K8S_CONFIG.host = K8S_HOST
if SSL_CA_CERT:
K8S_CONFIG.verify_ssl = True
K8S_CONFIG.ssl_ca_cert = SSL_CA_CERT
else:
K8S_CONFIG.verify_ssl = False
urllib3.disable_warnings()
| StarcoderdataPython |
3212700 | # type: ignore
import ast
import functools
import re
from copy import deepcopy
from inspect import stack
from pathlib import Path
import executing
import mat2py.config
from mat2py.common.backends import numpy as np
from mat2py.common.logger import logger
from mat2py.common.utils import Singleton
from .array import M, mp_detect_vector
@functools.lru_cache(maxsize=10)
def mp_last_arg_as_kwarg(key: str, value_map: (tuple, dict)):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if args and isinstance(args[-1], str) and args[-1] in value_map:
if isinstance(value_map, dict):
value = value_map[args[-1]]
elif isinstance(value_map, tuple):
value = True if len(value_map) == 1 else args[-1]
kwargs = {**kwargs, key: value}
args = args[:-1]
return func(*args, **kwargs)
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_match_vector_direction(match_arg_position=0, target_arg_position: tuple = None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
if len(args) <= match_arg_position:
return res
vec_type = mp_detect_vector(args[match_arg_position])
if vec_type == 0:
return res
new_res = (
list(res)
if isinstance(res, tuple)
else [
res,
]
)
for i in (
range(len(new_res))
if target_arg_position is None
else target_arg_position
):
res_vec_type = mp_detect_vector(new_res[i])
if res_vec_type != 0 and res_vec_type != vec_type:
new_res[i] = new_res[i].reshape(
(1, -1) if vec_type == 1 else (-1, 1)
)
return tuple(new_res) if isinstance(res, tuple) else new_res[0]
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_argout_wrapper_decorators(nargout: int = 1):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = func(*args, **kwargs)
if nargout == 1:
return M[obj]
else:
assert isinstance(obj, tuple)
return tuple(M[o] for o in obj)
return wrapper
return decorator
def mp_special_variables(value: float, name: str = ""):
return value
@functools.lru_cache(maxsize=10)
def mp_pass_values_decorators(args_position: tuple = None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
# TODO: can we detect temporary right value and avoid deepcopy for throughput? e.g. sys.getrefcount()
if args_position is None:
return func(*deepcopy(args), **kwargs)
for p in args_position:
if p < len(args):
args[p] = deepcopy(args[p])
return func(*args, **kwargs)
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_inference_nargout_decorators(caller_level: int = 2):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, nargout=None, **kwargs):
if nargout is None:
nargout = mp_nargout_from_stack(caller_level, func)
res = func(*args, **kwargs, nargout=nargout)
if not isinstance(res, tuple):
# TODO: we should be smarter
raise SyntaxWarning(
"mp_inference_nargout_decorators can only be used once"
)
return res[0] if nargout == 1 else res[:nargout]
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_inference_nargin_decorators():
def decorator(func):
@functools.wraps(func)
def wrapper(*args):
return func(*args, nargin=len(args))
return wrapper
return decorator
class CodeContext(metaclass=Singleton):
def __init__(self):
self.code = None
self.__ast__ = None
def reset(self):
self.code = None
self.__ast__ = None
@property
def ast(self):
if self.__ast__ is None:
self.__ast__ = ast.parse(self.code).body[0]
return self.__ast__
def __call__(self, code: str):
code = code.strip()
if code != "" and code != self.code:
self.code = code
self.__ast__ = None
return self
def mp_nargout_from_ast(ast_tree, func_name: str):
if (
isinstance(ast_tree, ast.Assign)
and isinstance(ast_tree.value, ast.Call)
and ast_tree.value.func.id == func_name
and isinstance(
ast_tree.targets[0], ast.Tuple
) # `a, = func()` not allowed in matlab
):
return len(ast_tree.targets[0].elts)
else:
return 1
def mp_nargout_from_stack(
caller_level: int = 2,
func=None,
default_if_exception: int = 1
if mat2py.config.ignore_nargout_inference_exception
else None,
):
current, *_, caller = stack()[1 : (caller_level + 1)]
function = func.__name__ if func is not None else current.function
try:
try:
frame_meta = executing.Source.executing(caller.frame)
if frame_meta.node is not None:
call_node = frame_meta.node
assert isinstance(call_node, ast.Call) and call_node.func.id == function
return mp_nargout_from_ast(call_node.parent, function)
else:
if len(frame_meta.statements) == 1:
(ast_tree,) = frame_meta.statements
# TODO: how to handle multiple call with same function?
return mp_nargout_from_ast(ast_tree, function)
elif frame_meta.statements:
raise NotImplementedError(
"only one statement supported in one line for now"
)
elif caller.filename in ("<stdin>", "<console>"):
raise ValueError("can not identify source code, seems to be IDLE")
raise SystemError
except ValueError:
return mp_nargout_from_ast(CodeContext().ast, function)
except Exception as err:
if default_if_exception is None:
raise SyntaxWarning(
"failed to inference nargout from call stack, pass the information explicitly"
)
else:
logger.warning(
f"failed to inference nargout from call stack, set it to be {default_if_exception}: {err}"
)
return default_if_exception
| StarcoderdataPython |
11360097 | <reponame>ckamtsikis/cmssw<filename>RecoLocalTracker/SiStripClusterizer/python/test/testClusterizer_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.add_(cms.Service( "MessageLogger"))
process.source = cms.Source( "EmptySource" )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
process.SiStripNoisesRcdSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "SiStripNoisesRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.SiStripGainRcdSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "SiStripGainRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.SiStripQualityRcdSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "SiStripQualityRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.load("RecoLocalTracker.SiStripClusterizer.test.ClusterizerUnitTestFunctions_cff")
process.load("RecoLocalTracker.SiStripClusterizer.test.ClusterizerUnitTests_cff")
testDefinition = cms.VPSet() + [ process.clusterizerTests ]
process.es = cms.ESProducer("ClusterizerUnitTesterESProducer", ClusterizerTestGroups = testDefinition )
process.runUnitTests = cms.EDAnalyzer("ClusterizerUnitTester", ClusterizerTestGroups = testDefinition )
process.path = cms.Path( process.runUnitTests )
| StarcoderdataPython |
1680089 | <gh_stars>1-10
"""Module to provide logic for the model evaluation"""
import gc
import json
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
from melidatachall19.base import Step
from melidatachall19.metrics import evaluate_metrics
from melidatachall19.utils import get_logger
from melidatachall19 import kb
class EvaluateStep(Step):
"""
Step defined to perform the evaluation of models over the test dataset.
In particular, the evaluation will be performed per model (and overall data),
by applying some selected classification metrics.
Parameters
----------
profile : {dict}
Configuration of execution profile, having at least the entries:
- paths: dict with str / dicts of paths to resources
- logger: dict with entry:
- level: str, indicating logging level
- seed: int, seed for random operations
"""
def __init__(self, profile):
self.profile = profile
self.seed = profile["seed"]
np.random.seed(self.seed)
tf.random.set_seed(self.seed)
# Create logger for execution
self.logger = get_logger(__name__, level=profile["logger"]["level"])
self.logger.debug("ModelingStep initialized")
# Variables to be filled in evaluation
self.data = dict()
self.models = dict()
self.vectorizers = dict()
self.label_encoders = dict()
self.results = dict()
def load(self):
"""Load resources for the evaluation"""
for lang in ["es", "pt"]:
self.logger.info(f"Loading resources for language={lang}")
# Load model from disk
self.models[lang] = load_model(self.profile["paths"]["model"][lang], compile=True)
# Load vectorizer from disk (config + weights)
# From https://stackoverflow.com/a/65225240/5484690
with open(self.profile["paths"]["vectorizer"][lang], "rb") as f:
vec = pickle.load(f)
self.vectorizers[lang] = TextVectorization.from_config(vec['config'])
# You have to call `adapt` with some dummy data (BUG in Keras)
# self.vectorizer_es.adapt(tf.data.Dataset.from_tensor_slices(["xyz"]))
self.vectorizers[lang].set_weights(vec['weights'])
# Load data from disk
self.data[lang] = pd.read_parquet(self.profile["paths"]["test"][lang])
# TODO: data already having column "language"
self.data[lang]["language"] = kb.LANGUAGE_ES if lang == "es" else kb.LANGUAGE_PT
# Load label encoder
with open(self.profile["paths"]["label_encoder"][lang], "rb") as f:
self.label_encoders[lang] = pickle.load(f)
def evaluate(self):
x_all, y_all = [], []
for lang in ["es", "pt"]:
x = self.data[lang]["title"]
y = self.data[lang]["label"]
res = evaluate_metrics(x=x, y=y,
model=self.models[lang],
vectorizer=self.vectorizers[lang],
label_encoder=self.label_encoders[lang])
self.logger.info(f"Results of test for model of language={lang}: ")
self.logger.info(res)
self.results[lang] = res
# Save x y arrays for overall metrics
x_all.append(x)
y_all.append(y)
# TODO: get predictions and evaluate, to then evaluate results overall
def save(self):
"""Save all results for later analysis"""
for lang, res in self.results.items():
with open(self.profile["paths"]["results"]["test"][lang], "w") as f:
json.dump(res, f)
def flush(self):
"""Remove from memory heavy objects that are not longer needed after run()"""
self.logger.info("Flushing objects")
del self.data
del self.models
del self.vectorizers
del self.label_encoders
del self.results
gc.collect()
def run(self):
"""Entry point to run step"""
self.load()
self.evaluate()
self.save()
self.flush()
| StarcoderdataPython |
1825388 | from django.contrib.gis.db import models
class FederativeUnit(models.Model):
name = models.CharField(max_length=40, blank=True)
short = models.CharField(max_length=2)
country = models.CharField(max_length=40, default='Brazil')
def __str__(self):
return '{} ({})'.format(self.name or self.short, self.country)
class Topography(models.Model):
poly = models.MultiPolygonField()
altitude = models.IntegerField()
class Population(models.Model):
poly = models.MultiPolygonField()
count = models.PositiveIntegerField()
| StarcoderdataPython |
1677781 | <gh_stars>1-10
from optparse import OptionParser
from pronounLSTM import PronounLSTM
import pickle, utils, os, time, sys
import utils
# This code is partially based on:
# BIST parser by <NAME> and <NAME>: https://github.com/elikip/bist-parser
# uuparser by <NAME> et al.: https://github.com/UppsalaNLP/uuparser
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--train", dest="pron_train", help="Annotated PRON train file", metavar="FILE")
parser.add_option("--dev", dest="pron_dev", help="Annotated PRON dev file", metavar="FILE")
parser.add_option("--test", dest="pron_test", help="Annotated PRON test file", metavar="FILE")
parser.add_option("--params", dest="params", help="Parameters file", metavar="FILE", default="params.pickle")
parser.add_option("--model", dest="model", help="Load/Save model file", metavar="FILE", default="pronoun.model")
parser.add_option("--test-out", dest="test_out", help="Name of output file at test time", metavar="FILE", default="test_pred.pron_out")
parser.add_option("--wembedding", type="int", dest="wembedding_dims", default=100)
parser.add_option("--pembedding", type="int", dest="pembedding_dims", default=10)
parser.add_option("--dembedding", type="int", dest="dembedding_dims", default=15)
parser.add_option("--lembedding", type="int", dest="lembedding_dims", default=100)
parser.add_option("--tpembedding", type="int", dest="tpembedding_dims", default=10)
parser.add_option("--lpembedding", type="int", dest="lpembedding_dims", default=100)
parser.add_option("--cembedding", type="int", dest="cembedding_dims", default=12)
parser.add_option("--chlstmdims", type="int", dest="chlstm_dims", default=50)
parser.add_option("--epochs", type="int", dest="epochs", default=30)
parser.add_option("--hidden", type="int", dest="hidden_units", default=100)
parser.add_option("--hidden2", type="int", dest="hidden2_units", default=0)
parser.add_option("--lr", type="float", dest="learning_rate", default=0.1)
parser.add_option("--outdir", type="string", dest="output", default="results")
parser.add_option("--activation", type="string", dest="activation", default="tanh")
parser.add_option("--lstmdims", type="int", dest="lstm_dims", default=100)
parser.add_option("--dynet-seed", type="int", dest="seed", default=7)
parser.add_option("--disable-bibi-lstm", action="store_false", dest="bibiFlag", default=True)
parser.add_option("--disableblstm", action="store_false", dest="blstmFlag", default=True)
parser.add_option("--usehead", action="store_true", dest="headFlag", default=False)
parser.add_option("--predict", action="store_true", dest="predictFlag", default=False)
parser.add_option("--disablePredictEval", action="store_false", dest="predictEval", default=True)
parser.add_option("--dynet-mem", type="int", dest="cnn_mem", default=512)
parser.add_option("--continue", dest="continueTraining", action="store_true", default=False)
parser.add_option("--continueModel", dest="continueModel", help="Load model file, when continuing to train a previously trained model", metavar="FILE", default=None)
parser.add_option("--debug", action="store_true", dest="debug", default=False)
parser.add_option("--langPair", type="string", dest="langPair", default="en-de")
parser.add_option("--updateLimit", type="int", dest="updateLimit", default=25)
parser.add_option("--use-pron-emb", action="store_true", dest="pronEmbedding", default=False)
parser.add_option("--defaultDropRate", type="float", dest="defaultDropRate", help="default value for dropout, in LSTMS", default=0.33)
parser.add_option("--offlineSampling", action="store_true", dest="offlineSampling", default=False)
parser.add_option("--samples-path", type="string", dest="samplesPath", default="./")
parser.add_option("--class-weighting", action="store_true", dest="classWeighting", default=False)
parser.add_option("--filter-rare", type="int", dest="filter_rare", default=-1)
parser.add_option("--online-sampling", action="store_true", dest="onlineSampling", default=False, help="the proportion of the data to use in each epoch")
parser.add_option("--online-sample-prop", type="float", dest="sampleProp", default=0.1, help="sample with equal proportions of classes isntead of using dev distribution")
parser.add_option("--online-sampling-equal", action="store_true", dest="sampleEqual", default=False)
parser.add_option("--first-epoch", type="int", dest="first_epoch", default=0)
(options, args) = parser.parse_args()
# if characters are not used, set charlstm to 0 as well!!
if options.cembedding_dims <= 0:
options.chlstm_dims = 0
noSampling = not (options.onlineSampling or options.offlineSampling)
############
# TRAINING #
############
if not options.predictFlag:
# TODO: sanity checks (?)
# READ DATA #
print 'Reading dev data'
devData = []
with open(options.pron_dev, 'r') as pronFP:
devData = list(utils.read_prons(pronFP))
#write only pronouns, for evaluation purposes
gold_path = os.path.join(options.output, 'devdata.gold')
utils.write_gold(gold_path, devData)
trainData = []
if noSampling:
print 'Reading training data'
with open(options.pron_train, 'r') as pronFP:
trainData = list(utils.read_prons(pronFP))
print 'Preparing vocab'
if not options.continueTraining:
s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch = utils.vocab(trainData, options.filter_rare)
elif options.offlineSampling:
if not options.continueTraining:
# save vocab by reading through file without storing examples!
s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, pronFreqs = utils.vocabFromFile(options.pron_train, options.filter_rare)
f_id = 0
Samples = options.samplesPath + options.langPair + "/"
dirs = os.listdir(Samples)
else: # do onlineSampling
# save vocab by reading through file without storing examples!
# if not options.continueTraining: read this only for pronfreqs, regardless of continue.
# Overwrite the other stuff later if necessary (could be cleaned up!)
s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, pronFreqs = utils.vocabFromFile(options.pron_train, options.filter_rare)
# store pronoun percentages in dev
pronPercent = utils.getDistributionPercentage(devData)
pronProbs = utils.getPronounProbabilities(pronFreqs, pronPercent, options.sampleProp, options.sampleEqual)
if not options.continueTraining:
with open(os.path.join(options.output, options.params), 'w') as paramsfp:
pickle.dump((s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, options), paramsfp)
print 'Finished collecting vocab'
print 'Initializing pronoun lstm:'
predictor = PronounLSTM(s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, options)
else:
with open(options.params, 'r') as paramsfp:
s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, stored_opt = pickle.load(paramsfp)
print 'Initializing pronoun lstm:'
predictor = PronounLSTM(s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, stored_opt)
# read in an already trained model, and continue to train it!
print "continue model: ", options.continueModel
predictor.Load(options.continueModel)
#################
# FOREACH EPOCH #
#################
for epoch in xrange(options.first_epoch, options.epochs):
print 'Starting epoch', epoch
data = trainData #default when no sampling is used
if options.onlineSampling:
data = utils.readSample(options.pron_train, pronProbs)
elif options.offlineSampling:
print 'sample file --->', dirs[f_id]
with open(Samples + dirs[f_id], 'r') as pronFP:
data = list(utils.read_prons(pronFP))
if f_id < len(dirs):
f_id += 1
else:
f_id == 0
predictor.Train(data)
print "Training done", epoch
devpath = os.path.join(options.output, 'dev_epoch_' + str(epoch+1) + '.pron_out')
utils.write_prons(devpath, predictor.Predict(devData), predictor.classes)
utils.evaluate_pronouns(gold_path, devpath, devpath + '.res', options.langPair)
print 'Finished predicting dev'
predictor.Save(os.path.join(options.output, options.model + str(epoch+1)))
##############
# PREDICTION #
##############
else:
print 'Reading test data'
testData = []
with open(options.pron_test, 'r') as pronFP:
testData = list(utils.read_prons(pronFP))
if options.predictEval:
#write only pronouns, for eval purposes
gold_path = os.path.join(options.output, 'testdata.gold')
utils.write_gold(gold_path, testData)
with open(options.params, 'r') as paramsfp:
s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, stored_opt = pickle.load(paramsfp)
predictor = PronounLSTM(s_words, w2i, s_pos, s_deps, t_lemmas, l2i, t_pos, ch, stored_opt)
predictor.Load(options.model)
testpath = os.path.join(options.output, options.test_out)
ts = time.time()
pred = list(predictor.Predict(testData))
te = time.time()
utils.write_prons(testpath, pred, predictor.classes)
if options.predictEval:
utils.evaluate_pronouns(gold_path, testpath, testpath + '.res', options.langPair)
print 'Finished predicting test',te-ts
| StarcoderdataPython |
11337512 | <gh_stars>0
"""
python对XML的解析
常见的XML编程接口有DOM和SAX,这两种接口处理XML文件的方式不同,当然使用场合也不同。
python有三种方法解析XML,SAX,DOM,以及ElementTree:
1.SAX (simple API for XML )
python 标准库包含SAX解析器,SAX用事件驱动模型,通过在解析XML的过程中触发一个个的事件并调用用户定义的回调函数来处理XML文件。
2.DOM(Document Object Model)
将XML数据在内存中解析成一个树,通过对树的操作来操作XML。
""" | StarcoderdataPython |
1855055 | <reponame>gruunday/useradm
# --------------------------------------------------------------------------- #
# MODULE DESCRIPTION #
# --------------------------------------------------------------------------- #
"""RedBrick Error Module; contains RedBrick exception classes."""
# --------------------------------------------------------------------------- #
# DATA #
# --------------------------------------------------------------------------- #
__version__ = '$Revision: 1.2 $'
__author__ = '<NAME>'
# --------------------------------------------------------------------------- #
# CLASSES #
# --------------------------------------------------------------------------- #
class RBError(Exception):
"""Base class for RedBrick exceptions"""
def __init__(self, mesg):
"""Create new RBError object with given error message."""
super(RBError, self).__init__(mesg)
self.mesg = mesg
def __str__(self):
"""Return exception error message."""
return "ERROR: %s" % self.mesg
class RBFatalError(RBError):
"""Class for fatal RedBrick exceptions"""
def __str__(self):
"""Return exception error message."""
return "FATAL: %s" % self.mesg
class RBWarningError(RBError):
"""Class for warning RedBrick exceptions. These can be overrided."""
def __str__(self):
"""Return exception error message."""
return "WARNING: %s" % self.mesg
| StarcoderdataPython |
1772004 | <filename>blog/models/maintext.py
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
class MainText(models.Model):
TEXT_TYPE_CHOICES = (
('Draft', 'Draft'),
('Main Page', 'Main'),
('Footer', 'Footer'),
('Base Title', 'Base Title'),
('Default Title', 'Default Title'),
)
body = models.TextField()
pub_date = models.DateTimeField('date published', auto_now_add=True)
text_type = models.CharField(max_length=254, default='Draft', choices=TEXT_TYPE_CHOICES)
def __str__(self):
return f'{self.text_type}={self.pk}'
| StarcoderdataPython |
4954439 | import datetime
import json
import Queue
import re
import redis
import socket
import threading
import time
UDP_IP = "0.0.0.0"
UDP_PORT = 8225
class StatsReceiver(object):
TEMPERATURE_RE = re.compile(r"^.*-t\d$")
POWER_READING_RE = re.compile(r"^E\d+")
PIR_RE = re.compile(r"^.*pir$")
SWITCH_RE = re.compile(r"^.*-sw$")
PM_RE = re.compile(r"^.*pm\d+$")
def __init__(self):
self.sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.sock.bind((UDP_IP, UDP_PORT))
self.last_values = {}
self.send_queue = Queue.Queue()
self.timer = None
self.redis = redis.StrictRedis()
self.running = False
self.node_value_sets = {}
def send_timer(self):
while True:
if not self.running:
return
time.sleep(3)
items = []
while True:
try:
items.append(self.send_queue.get_nowait())
except Queue.Empty:
break
if len(items) > 0:
print("Sending", items)
self.redis.publish("influx-update-pubsub", json.dumps(items))
def process(self, node_name, key, value):
item_type = "generic"
fields = {}
if self.TEMPERATURE_RE.match(key):
value = float(value) / 100
item_type = "temperature"
if value < -70 or value == 127:
print(f"Invalid value for temperature: {value} degC, key: {key}")
return
elif self.PIR_RE.match(key):
value = value == "1"
item_type = "pir"
elif self.POWER_READING_RE.match(key):
value = float(value) * 230
item_type = "watts"
if value < 0 or value > 5000:
print(f"Invalid value for power consumption: {value} W, key {key}")
elif node_name == "tea" and key == "nfc-id":
item_type = "tea-reader"
elif self.SWITCH_RE.match(key):
value = value == "1"
item_type = "switch"
elif self.PM_RE.match(key):
value = int(value)
item_type = "dust_pm"
elif key == "dust":
value = int(value)
item_type = "dust"
fields[item_type] = value
output = {
"time": datetime.datetime.utcnow().isoformat() + "Z",
"measurement": "node-" + node_name,
"tags": {
"key": key,
},
"fields": fields,
}
if node_name not in self.node_value_sets:
self.node_value_sets[node_name] = {}
self.node_value_sets[node_name][key] = value
self.redis.publish("watchdog-input", json.dumps({"name": f"node-{node_name}", "values": self.node_value_sets[node_name]}))
self.redis.publish(f"node-{node_name}-pubsub", json.dumps({"name": f"node-{node_name}", "key": key, "value": value, "item_type": item_type}))
if item_type == "pir" or item_type == "switch":
if value:
self.redis.publish("lightcontrol-triggers-pubsub", json.dumps({"key": node_name, "trigger": item_type}))
if key not in self.last_values:
self.last_values[key] = {"value": None, "seen": datetime.datetime.now()}
if self.last_values[key]["value"] == value and datetime.datetime.now() - self.last_values[key]["seen"] < datetime.timedelta(seconds=120):
return
self.last_values[key]["value"] = value
self.last_values[key]["seen"] = datetime.datetime.now()
elif item_type == "tea-reader":
self.redis.publish("tea-reader-pubsub", json.dumps({"id": value}))
self.send_queue.put(output)
def run(self):
try:
self.running = True
self.timer = threading.Thread(target=self.send_timer)
self.timer.start()
while True:
data, addr = self.sock.recvfrom(1024) # buffer size is 1024 bytes
splitted_data = data.split(":")
if len(splitted_data) != 2:
print(f"Malformed data from {addr}: {data}")
continue
key = splitted_data[0]
splitted_key = key.split(".")
node_name = splitted_key[0]
if len(splitted_key) < 2:
print(f"Malformed key from {addr}: {data}")
continue
value = splitted_data[1].strip()
if "|" in value:
values = value.split("|")
for value in values:
item_key, item_value = value.split("=")
self.process(node_name, item_key, item_value)
else:
key = ".".join(splitted_key[1:])
self.process(node_name, key, value)
except KeyboardInterrupt as err:
self.running = False
raise err
def close(self):
if self.sock:
self.sock.close()
def main():
stats_receiver = StatsReceiver()
stats_receiver.run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8050801 | """
Setup of Traffic Camera Streaming Pipeline
Author: <NAME>
"""
from setuptools import setup
setup(name='tcp',
version='0.1.dev0',
description='Pipeline to extract trajectories from traffic camera streams',
author='<NAME>',
author_email='<EMAIL>',
package_dir = {'': 'src'},
packages=['tcp', 'tcp.object_detection', 'tcp.streaming', 'tcp.registration', 'tcp.configs', 'tcp.utils'],
)
| StarcoderdataPython |
9606339 | <reponame>chzhan/reinforcement-learning-algorithms<gh_stars>1-10
from baselines.common.cmd_util import make_atari_env
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from arguments import get_args
from a2c_agent import a2c_agent
from baselines import logger
if __name__ == '__main__':
args = get_args()
logger.configure(dir=args.log_dir)
# create environments
envs = VecFrameStack(make_atari_env(args.env_name, args.num_processes, args.seed), 4)
trainer = a2c_agent(envs, args)
trainer.learn()
envs.close()
| StarcoderdataPython |
1652640 | from .unknown import Unknown
| StarcoderdataPython |
5110119 | class Solution(object):
def find_diff(self, str1, str2):
if str1 is None or str2 is None:
raise TypeError("str1 or str2 cannot be None")
long_str = str1 if len(str1) > len(str2) else str2
short_str = str2 if len(str1) > len(str2) else str1
for i in range(len(short_str)):
if long_str[i] != short_str[i]:
return long_str[i]
return long_str[i+1]
if __name__ == "__main__":
obj = Solution()
print(obj.find_diff("abcd", "acd"))
print(obj.find_diff("abcd", "abcde")) | StarcoderdataPython |
399562 | <filename>avalares/__init__.py
from .utils import to_numpy, to_pandas
from .parser import parse
| StarcoderdataPython |
1909543 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 21 15:53:34 2019
@author: leeh43
"""
import os
import nibabel.cmdline.ls
import nibabel as nib
#import cv2
nifti_deeplesion = os.path.join('/nfs/masi/leeh43/DeepLesion/Images_nifti/'
+ '000001_03_01_058-118.nii.gz')
n = nib.load(nifti_deeplesion)
data = n.get_data()
r_1 = n.header.get_zooms()
nifti_zb = os.path.join('/nfs/masi/tangy5/share2_tangy5/tangy5/DeepAbo3D/experiment/data/zhoubing100/img/img0001.nii.gz')
n_zb = nib.load(nifti_zb)
data_zb = n_zb.get_data()
r_2 = n_zb.header.get_zooms()
#data_zb_resize = cv2.resize(data_zb, (512,512))
nifti_imagevu = os.path.join('/share2/leeh43/First_Round_Images/scan_2/image_2/'
+ 'landab_109_1.2.124.113532.192.168.127.12.20070726.40748.12940586_6324.nii.gz')
n_imagevu = nib.load(nifti_imagevu)
data_imagevu = n_imagevu.get_data()
r_3 = n_imagevu.header.get_zooms()
nifti_LITS = os.path.join('/nfs/masi/leeh43/LITS_datasets/input_volumes_train/'
+ 'volume-0.nii')
n_LITS = nib.load(nifti_LITS)
data_LITS = n_LITS.get_data()
r_4 = n_LITS.header.get_zooms() | StarcoderdataPython |
1919257 | import pandas as pd
import datetime
import subprocess
import localModuleForMinpou
#----
def atting_program(row):
recorder_for_minpou_command_line = 'python '+localModuleForMinpou.RECORDER_FOR_MINPOU+' "{0}" "{1}" "{2}" "{3}" "{4}" "{5}" "{6}"'.format(
row.station_id,
int((row.air_time + datetime.timedelta(seconds=localModuleForMinpou.MARGIN_SECOND*2)).total_seconds()),
row.start_time.strftime('%Y'),
row.station_name,
row.title,
row.image_url,
row.start_time
)
at_launch_time = row.start_time - datetime.timedelta(seconds=localModuleForMinpou.MARGIN_SECOND)
command_line = "echo 'sleep {0}; {1}' | at -t {2}".format(
at_launch_time.strftime('%S'),
recorder_for_minpou_command_line,
at_launch_time.strftime('%Y%m%d%H%M'),
)
res = subprocess.check_output(command_line, shell=True)
# print(command_line)
#----
table = pd.read_csv(localModuleForMinpou.TABLE_FILE)
table['start_time'] = pd.to_datetime(table['start_time'])
table['end_time'] = pd.to_datetime(table['end_time'])
table['air_time'] = pd.to_timedelta(table['air_time'])
for row in table.itertuples():
atting_program(row)
| StarcoderdataPython |
3224168 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'dylib',
'exe',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
'chrome/test/data/extensions/uitest/plugins/plugin.plugin/contents/'
'macos/testnetscapeplugin',
'chrome/test/data/extensions/uitest/plugins_private/plugin.plugin/contents/'
'macos/testnetscapeplugin',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'chrome/test/data/extensions/uitest/plugins/plugin32.so',
'chrome/test/data/extensions/uitest/plugins/plugin64.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin32.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin64.so',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
# TODO(maruel): Fix these.
'third_party/bintrees/',
'third_party/closure_linter/',
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/hyphen/',
'third_party/lcov-1.9/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov-1.9/contrib/galaxy/gen_makefile.sh',
'third_party/lcov/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov/contrib/galaxy/gen_makefile.sh',
'third_party/libevent/autogen.sh',
'third_party/libevent/test/test.sh',
'third_party/libxml/linux/xml2-config',
'third_party/libxml/src/ltmain.sh',
'third_party/mesa/',
'third_party/protobuf/',
'third_party/python_gflags/gflags.py',
'third_party/sqlite/',
'third_party/talloc/script/mksyms.sh',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
# TODO(nednguyen): Remove this when telemetry is moved to catapult
'tools/telemetry/third_party/',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert set(EXECUTABLE_EXTENSIONS) & set(NON_EXECUTABLE_EXTENSIONS) == set()
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/' or data == '#! /', data == '\x7fELF')
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). Chromium
# tree happens to have invalid symlinks under
# third_party/openssl/openssl/test/.
return None
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set')
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set')
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bit:
return result_dict('Has executable bit but not shebang or ELF header')
if shebang:
return result_dict('Has shebang but not executable bit')
return result_dict('Has ELF header but not executable bit')
def check_files(root, files):
gen = (check_file(root, f) for f in files if not is_ignored(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print 'Found %s files' % len(self._files)
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
# --file implies --bare (for PRESUBMIT.py).
options.bare = True
errors = check_files(options.root, options.files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print '\n'.join(e['full_path'] for e in errors)
else:
print '\nFAILED\n'
print '\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors)
return 1
if not options.bare:
print '\nSUCCESS\n'
return 0
if '__main__' == __name__:
sys.exit(main())
| StarcoderdataPython |
45297 | # Copyright (c) 2017 <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
DMU_TYPE_DESC = [
"unallocated", # 0
"object directory", # 1
"object array", # 2
"packed nvlist", # 3
"packed nvlist size", # 4
"bpobj", # 5
"bpobj header", # 6
"SPA space map header", # 7
"SPA space map", # 8
"ZIL intent log", # 9
"DMU dnode", # 10
"DMU objset", # 11
"DSL directory", # 12
"DSL directory child map", # 13
"DSL dataset snap map", # 14
"DSL props", # 15
"DSL dataset", # 16
"ZFS znode", # 17
"ZFS V0 ACL", # 18
"ZFS plain file", # 19
"ZFS directory", # 20
"ZFS master node", # 21
"ZFS delete queue", # 22
"zvol object", # 23
"zvol prop", # 24
"other uint8[]", # 25
"other uint64[]", # 26
"other ZAP", # 27
"persistent error log", # 28
"SPA history", # 29
"SPA history offsets", # 30
"Pool properties", # 31
"DSL permissions", # 32
"ZFS ACL", # 33
"ZFS SYSACL", # 34
"FUID table", # 35
"FUID table size", # 36
"DSL dataset next clones", # 37
"scan work queue", # 38
"ZFS user/group used", # 39
"ZFS user/group quota", # 40
"snapshot refcount tags", # 41
"DDT ZAP algorithm", # 42
"DDT statistics", # 43
"System attributes", # 44
"SA master node", # 45
"SA attr registration", # 46
"SA attr layouts", # 47
"scan translations", # 48
"deduplicated block", # 49
"DSL deadlist map", # 50
"DSL deadlist map hdr", # 51
"DSL dir clones", # 52
"bpobj subobj" # 53
]
COMP_DESC = [
"invalid",
"lzjb",
"off",
"lzjb",
"empty",
"gzip1",
"gzip2",
"gzip3",
"gzip4",
"gzip5",
"gzip6",
"gzip7",
"gzip8",
"gzip9",
"zle",
"lz4"
]
CHKSUM_DESC = ["invalid", "fletcher2", "none", "SHA-256", "SHA-256", "fletcher2", "fletcher2", "fletcher4", "SHA-256"]
ENDIAN_DESC = ["BE", "LE"]
| StarcoderdataPython |
3578573 | <filename>todo_project/todo/views.py
# Function based view
import json
from django.http import HttpResponse
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
import datetime
from todo import utils
from todo.models import TaskList
from todo.models import Event
from todo.models import Task
from todo.forms import TaskListForm
from todo.forms import TaskForm
from todo.forms import EventForm
def task_delete(request, id):
""" Delete a task
"""
template_name = 'task_delete_confirm.html'
task = get_object_or_404(Task, pk=id)
if request.method == 'POST':
task.delete()
return HttpResponseRedirect(reverse('todo.views.task_list'))
return render_to_response(template_name, {'task': task},
context_instance=RequestContext(request))
def task_update(request, id):
""" Update a task
"""
template_name = 'task_update.html'
task = get_object_or_404(Task, pk=id)
print "method %r" % request.method
if request.method == 'POST':
form = TaskForm(request.POST or None)
print "form valid %r" % form.is_valid()
if form.is_valid():
start_date = form.data.get('start_date')
if start_date:
start_date = datetime.datetime.strptime(start_date, '%d/%M/%Y')
else:
start_date = None
due_date = form.data.get('due_date')
if due_date:
due_date = datetime.datetime.strptime(due_date, '%d/%m/%Y')
else:
due_date = None
completed = form.data.get('completed')
if not completed:
completed = False
task.title = form.data.get('title')
task.start_date = start_date
task.due_date = due_date
task.completed = completed
task.todo_list_id = form.data.get('todo_list')
task.priority = form.data.get('priority')
task.save()
return HttpResponseRedirect(reverse('todo.views.task_list'))
return HttpResponseRedirect(reverse('todo.views.task_list'))
def task_list(request):
""" List status of all todo items
"""
task_listing = []
for todo_task in TaskList.objects.all():
todo_dict = {}
todo_dict['list_object'] = todo_task
tasks = []
for task in todo_task.task_set.all():
task_map = task.__dict__
task_map['start_date'] = task_map['start_date'].strftime('%d/%M/%Y')
task_map['form'] = TaskForm(initial=task_map)
tasks.append(task_map)
todo_dict['list_tasks'] = tasks
todo_dict['item_count'] = todo_task.num_tasks()
todo_dict['items_complete'] = todo_task.num_tasks_completed()
todo_dict['percent_complete'] = todo_task.percent_completed()
task_listing.append(todo_dict)
print "Task listing %r" % task_listing
return render_to_response('tasks.html', {'task_listing': task_listing},
context_instance=RequestContext(request))
def create_event(request):
""" Create new event.
"""
import ipdb;ipdb.set_trace()
print "Create new event"
if request.method == 'POST':
post_obj = request.POST
# If the form has been submitted...
# A form bound to the POST data
#return render_to_response('events.html', {},
# context_instance=RequestContext(request))
def events(request):
""" Event page
"""
print "Event page %r" % request.method
if request.method == 'POST':
form = EventForm(request.POST)
# A form bound to the POST data
print "Process the form"
if form.is_valid():
print("Form is valid")
created_date = form.data.get('created_date')
if created_date:
created_date = datetime.datetime.strptime(created_date, '%Y/%m/%d')
else:
created_date = None
event = Event(title=form.data.get('title'), created_date=created_date,
description=form.data.get("description"), location=form.data.get("location"))
event.save()
# reload event page
form = EventForm()
return render_to_response('events.html', {'form': form},
context_instance=RequestContext(request))
def event_detail(request, id):
""" Return a JSON dict mapping for event given id
"""
event = get_object_or_404(Event, pk=id)
event_dict = {
"success": 1,
"result": [{
"id": event.id,
"title": event.title,
"description": event.description,
"created_date": event.created_date.strftime('%Y/%m/%d'),
"location": event.location
}]
}
return HttpResponse(json.dumps(event_dict),
content_type="application/json")
def event_list(request, *args):
""" Event data
"""
print "Event list %r" % request.method
event_obj = Event.objects.all()
event_data = []
for event in event_obj:
time_mili = utils.unix_time_millis(event.created_date.replace(tzinfo=None))
event_dict = {
"id": event.id,
"title": event.title,
"url": "#",
"class": "event-special",
"start": time_mili,
"end": time_mili
}
event_data.append(event_dict)
event_list_data = {
"success": 1,
"result": event_data}
print event_list_data
return HttpResponse(json.dumps(event_list_data), content_type="application/json")
def task_list_create(request):
""" Create new list
"""
if request.method == 'POST':
# If the form has been submitted...
form = TaskListForm(request.POST)
# A form bound to the POST data
print "Process the form"
if form.is_valid():
print("Form is valid")
t = TaskList(title=form.data.get('title'))
t.save()
return HttpResponseRedirect(reverse('todo.views.task_list'))
print "Form is invalid"
print form._errors
else:
# An unbound form
form = TaskListForm()
return render_to_response('new_list.html', {'form': form},
context_instance=RequestContext(request))
def task_create(request):
""" Create new task view
"""
if request.method == 'POST':
# If the form has been submitted...
form = TaskForm(request.POST)
# A form bound to the POST data
print "Process the form"
if form.is_valid():
print("Form is valid")
# All validation rules pass
# Process the data in form.cleaned_data
start_date = form.data.get('start_date')
print start_date
if start_date:
start_date = datetime.datetime.strptime(start_date, '%d/%M/%Y')
else:
start_date = None
due_date = form.data.get('due_date')
if due_date:
due_date = datetime.datetime.strptime(due_date, '%d/%m/%Y')
else:
due_date = None
completed = form.data.get('completed')
if not completed:
completed = False
t = Task(title=form.data.get('title'), start_date=start_date,
due_date=due_date, completed=completed,
todo_list_id=form.data.get('todo_list'),
priority=form.data.get('priority'))
t.save()
return HttpResponseRedirect(reverse('todo.views.task_list'))
print "Form is invalid"
else:
# An unbound form
form = TaskForm()
return render_to_response('new_task.html', {'form': form},
context_instance=RequestContext(request))
def contact_us(request):
""" DIsplay input form for sending message.
"""
return render_to_response('contact.html', {},
context_instance=RequestContext(request))
| StarcoderdataPython |
1774376 | # Code is generated: DO NOT EDIT
# Copyright 2019 Machine Zone, Inc. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from kubespec import context
from kubespec import types
from kubespec.k8s import base
from kubespec.k8s import resource
from kubespec.k8s import v1 as k8sv1
from kubespec.k8s.meta import v1 as metav1
from typeguard import check_type, typechecked
from typing import Any, Dict, List
class ContainerMetrics(types.Object):
"""
ContainerMetrics sets resource usage metrics of a container.
"""
@context.scoped
@typechecked
def __init__(
self,
name: str = "",
usage: Dict[k8sv1.ResourceName, "resource.Quantity"] = None,
):
super().__init__()
self.__name = name
self.__usage = usage if usage is not None else {}
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
name = self.name()
check_type("name", name, str)
v["name"] = name
usage = self.usage()
check_type("usage", usage, Dict[k8sv1.ResourceName, "resource.Quantity"])
v["usage"] = usage
return v
def name(self) -> str:
"""
Container name corresponding to the one from pod.spec.containers.
"""
return self.__name
def usage(self) -> Dict[k8sv1.ResourceName, "resource.Quantity"]:
"""
The memory usage is the memory working set.
"""
return self.__usage
class NodeMetrics(base.TypedObject, base.MetadataObject):
"""
NodeMetrics sets resource usage metrics of a node.
"""
@context.scoped
@typechecked
def __init__(
self,
name: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
timestamp: "base.Time" = None,
window: "base.Duration" = None,
usage: Dict[k8sv1.ResourceName, "resource.Quantity"] = None,
):
super().__init__(
api_version="metrics.k8s.io/v1alpha1",
kind="NodeMetrics",
**({"name": name} if name is not None else {}),
**({"labels": labels} if labels is not None else {}),
**({"annotations": annotations} if annotations is not None else {}),
)
self.__timestamp = timestamp
self.__window = window if window is not None else metav1.Duration()
self.__usage = usage if usage is not None else {}
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
timestamp = self.timestamp()
check_type("timestamp", timestamp, "base.Time")
v["timestamp"] = timestamp
window = self.window()
check_type("window", window, "base.Duration")
v["window"] = window
usage = self.usage()
check_type("usage", usage, Dict[k8sv1.ResourceName, "resource.Quantity"])
v["usage"] = usage
return v
def timestamp(self) -> "base.Time":
"""
The following fields define time interval from which metrics were
collected from the interval [Timestamp-Window, Timestamp].
"""
return self.__timestamp
def window(self) -> "base.Duration":
return self.__window
def usage(self) -> Dict[k8sv1.ResourceName, "resource.Quantity"]:
"""
The memory usage is the memory working set.
"""
return self.__usage
class PodMetrics(base.TypedObject, base.NamespacedMetadataObject):
"""
PodMetrics sets resource usage metrics of a pod.
"""
@context.scoped
@typechecked
def __init__(
self,
namespace: str = None,
name: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
timestamp: "base.Time" = None,
window: "base.Duration" = None,
containers: List["ContainerMetrics"] = None,
):
super().__init__(
api_version="metrics.k8s.io/v1alpha1",
kind="PodMetrics",
**({"namespace": namespace} if namespace is not None else {}),
**({"name": name} if name is not None else {}),
**({"labels": labels} if labels is not None else {}),
**({"annotations": annotations} if annotations is not None else {}),
)
self.__timestamp = timestamp
self.__window = window if window is not None else metav1.Duration()
self.__containers = containers if containers is not None else []
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
timestamp = self.timestamp()
check_type("timestamp", timestamp, "base.Time")
v["timestamp"] = timestamp
window = self.window()
check_type("window", window, "base.Duration")
v["window"] = window
containers = self.containers()
check_type("containers", containers, List["ContainerMetrics"])
v["containers"] = containers
return v
def timestamp(self) -> "base.Time":
"""
The following fields define time interval from which metrics were
collected from the interval [Timestamp-Window, Timestamp].
"""
return self.__timestamp
def window(self) -> "base.Duration":
return self.__window
def containers(self) -> List["ContainerMetrics"]:
"""
Metrics for all containers are collected within the same time window.
"""
return self.__containers
| StarcoderdataPython |
6402897 | <reponame>fossabot/tujian_python<filename>setup.py
# -*- coding: utf-8 -*-
import setuptools
with open("README.md", "r", encoding='UTF-8') as fh:
long_description = fh.read()
setuptools.setup(
name="PyTujian",
version="0.1.25",
author="gggxbbb",
author_email="<EMAIL>",
description="A simlpe tool for Tujian",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gggxbbb/tujian_python",
packages=setuptools.find_packages(),
python_requires='>=3.5',
install_requires=[
'pytz>=2021.1',
'tqdm>=4.61.2',
'requests>=2.26.0',
'rich>=11.0.0',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': [
'PyTujian = PyTujian.__main__:main'
]
}
)
| StarcoderdataPython |
4975912 | import numpy as np
def make_obarray(klass, dtype):
class Obarray(np.ndarray):
def __new__(cls, obj):
A = np.array(obj,dtype=np.object)
N = np.empty(shape=A.shape, dtype=dtype)
for idx in np.ndindex(A.shape):
for name, type in dtype:
N[name][idx] = type(getattr(A[idx],name))
return N.view(cls)
def __getitem__(self, idx):
V = np.ndarray.__getitem__(self,idx)
if np.isscalar(V):
kwargs = {}
for i, (name, type) in enumerate(dtype):
kwargs[name] = V[i]
return klass(**kwargs)
else:
return V
def __setitem__(self, idx, value):
if isinstance(value, klass):
value = tuple(getattr(value, name) for name, type in dtype)
# FIXME: treat lists of lists and whatnot as arrays
return np.ndarray.__setitem__(self, idx, value)
return Obarray
| StarcoderdataPython |
12864047 | <filename>cpdb/popup/factories.py
import factory
from faker import Faker
from popup.models import Popup
fake = Faker()
class PopupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Popup
name = factory.LazyFunction(lambda: fake.word())
page = factory.LazyFunction(lambda: fake.word())
title = factory.LazyFunction(lambda: fake.word())
text = factory.LazyFunction(lambda: fake.text(512))
| StarcoderdataPython |
3568580 | # =============================================================================
# Author: <NAME> - https://github.com/mjenrungrot/
# FileName: 10500.py
# Description: UVa Online Judge - 10500
# =============================================================================
def f(boards, x, y, nn):
boards[y][x] = "*0"
for (dy, dx) in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
newy = y + dy
newx = x + dx
if newy < 0 or newy >= len(boards):
continue
if newx < 0 or newx >= len(boards[y]):
continue
boards[newy][newx] = "{}*".format(boards[newy][newx])
for (dy, dx) in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
newy = y + dy
newx = x + dx
if newy < 0 or newy >= len(boards):
continue
if newx < 0 or newx >= len(boards[y]):
continue
if boards[newy][newx][0] == "*":
continue
if "0" in boards[newy][newx]:
nn[0] += 1
f(boards, newx, newy, nn)
return
while True:
N, M = list(map(int, input().split()))
if N == M == 0:
break
y0, x0 = list(map(int, input().split()))
x0 -= 1
y0 -= 1
boards = []
for i in range(N):
boards.append(input().split())
n_movements = [0]
f(boards, x0, y0, n_movements)
print("")
print("---".join(list("|" * (M + 1))))
for i in range(N):
print("|", end="")
for j in range(M):
val = boards[i][j]
if "*" not in val:
val = "?"
else:
val = val.replace("*", "")
print(" {} |".format(val), end="")
print("")
print("---".join(list("|" * (M + 1))))
print("")
print("NUMBER OF MOVEMENTS: {}".format(n_movements[0]))
| StarcoderdataPython |
3458668 | <gh_stars>10-100
from build.management.commands.build_release_notes import Command as BuildReleaseNotes
class Command(BuildReleaseNotes):
pass | StarcoderdataPython |
209961 | <reponame>jpkarlsberg/readux
from django.contrib import admin
from readux.collection.models import CollectionImage
class CollectionImageAdmin(admin.ModelAdmin):
list_display = ('collection_label', 'cover', 'cover_thumbnail',
'banner', 'banner_thumbnail')
admin.site.register(CollectionImage, CollectionImageAdmin)
| StarcoderdataPython |
256395 | from django.apps import AppConfig
class SportsquadsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'sportSquads'
| StarcoderdataPython |
91043 | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from rango.models import Category, Page
from rango.forms import CategoryForm, PageForm
from rango.forms import UserForm, UserProfileForm
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from datetime import datetime
from rango.webhose_search import run_query
def index(request):
# request.session.set_test_cookie()
# Query the database for a list of ALL categories currently stored.
# Order the categories by no. likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in our context_dict dictionary
# that will be passed to the template engine.
category_list = Category.objects.order_by('-likes')[:5]
pages_list = Page.objects.order_by('-views')[:5]
context_dict = {'categories': category_list, 'pages': pages_list}
print("context_dict:", context_dict)
# Render the response and send it back!
# Call the helper function to handle the cookies
p = visitor_cookie_handler(request)
print("visitor_cookie_handler:", p)
# Return response back to the user, updating any cookies that need changed.
context_dict['visits'] = request.session['visits']
print("request_session_visits:", request.session['visits'])
print("context_dict_visits:", context_dict['visits'])
response = render(request, 'rango/index.html', context=context_dict)
print("response:", response)
return response
# return HttpResponse("Rango says hey there partner! <br/> <a href='/rango/about/'>About</a>")
def about(request):
context_dict = {'name': 'Roman'}
print(request.method)
print(request.user)
visitor_cookie_handler(request)
# return HttpResponse("Rango says here is the about page. <br/> <a href='/rango/'>Index</a>")
context_dict['visits'] = request.session['visits']
print(context_dict['visits'])
response = render(request, 'rango/about.html', context=context_dict)
print("response:", response)
return response
# return render(request, reverse('about'), context=context_dict)
def show_category(request, category_name_slug):
# Create a context dictionary which we can pass
# to the template rendering engine.
context_dict = {}
try:
# Can we find a category name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
category = Category.objects.get(slug=category_name_slug)
# Retrieve all of the associated pages.
# Note that filter() will return a list of page objects or an empty list
pages = Page.objects.filter(category=category)
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
# We also add the category object from
# the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['category'] = category
except Category.DoesNotExist:
# We get here if we didn't find the specified category.
# Don't do anything -
# the template will display the "no category" message for us.
context_dict['category'] = None
context_dict['pages'] = None
# Go render the response and return it to the client.
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
form = CategoryForm()
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
cat = form.save(commit=True)
print("Category name: {0} Slug: {1}".format(cat, cat.slug))
# Now that the category is saved
# We could give a confirmation message
# But since the most recent category added is on the index page
# Then we can direct the user back to the index page.
return index(request)
else:
# The supplied form contained errors -
# just print them to the terminal.
print(form.errors)
# Will handle the bad form, new form, or no form supplied cases.
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
category = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
category = None
form = PageForm()
# A HTTP POST?
if request.method == 'POST':
form = PageForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
if category:
page = form.save(commit=False)
page.category = category
page.views = 0
page.save()
return show_category(request, category_name_slug)
else:
print(form.errors)
context_dict = {'form':form, 'category': category}
return render(request, 'rango/add_page.html', context_dict)
# def register(request):
# # A boolean value for telling the template
# # whether the registration was successful.
# # Set to False initially. Code changes value to # True when registration succeeds.
# registered = False
#
# # If it's a HTTP POST, we're interested in processing form data.
# if request.method == 'POST':
# # Attempt to grab information from the raw form information.
# # Note that we make use of both UserForm and UserProfileForm.
# user_form = UserForm(data=request.POST)
# profile_form = UserProfileForm(data=request.POST)
#
# # If the two forms are valid...
# if user_form.is_valid() and profile_form.is_valid():
# # Save the user's form data to the database.
# user = user_form.save()
#
# # Now we hash the password with the set_password method.
# # Once hashed, we can update the user object.
# user.set_password(<PASSWORD>)
# user.save()
#
# # Now sort out the UserProfile instance.
# # Since we need to set the user attribute ourselves,
# # we set commit=False. This delays saving the model
# # until we're ready to avoid integrity problems.
# profile = profile_form.save(commit=False)
# profile.user = user
# # Did the user provide a profile picture?
# # If so, we need to get it from the input form and #put it in the UserProfile model.
# if 'picture' in request.FILES:
# profile.picture = request.FILES['picture']
#
# # Now we save the UserProfile model instance.
# profile.save()
# # Update our variable to indicate that the template
# # registration was successful.
# registered = True
# else:
# # Invalid form or forms - mistakes or something else?
# # Print problems to the terminal.
# print(user_form.errors, profile_form.errors)
# else:
# # Not a HTTP POST, so we render our form using two ModelForm instances.
# # These forms will be blank, ready for user input.
# user_form = UserForm()
# profile_form = UserProfileForm()
#
# # Render the template depending on the context.
# return render(request, 'rango/register.html',
# {'user_form': user_form,
# 'profile_form': profile_form,
# 'registered': registered})
# def user_login(request):
# # If the request is a HTTP POST, try to pull out the relevant information.
# if request.method == 'POST':
# # Gather the username and password provided by the user.
# # This information is obtained from the login form.
# # We use request.POST.get('<variable>') as opposed
# # to request.POST['<variable>'], because the
# # request.POST.get('<variable>') returns None if the
# # value does not exist, while request.POST['<variable>']
# # will raise a KeyError exception.
# username = request.POST.get('username')
# password = request.POST.get('password')
# # Use Django's machinery to attempt to see if the username/password
# # combination is valid - a User object is returned if it is.
# user = authenticate(username=username, password=password)
#
# # If we have a User object, the details are correct.
# # If None (Python's way of representing the absence of a value), no user
# # with matching credentials was found.
# if user:
# # Is the account active? It could have been disabled.
# if user.is_active:
# # If the account is valid and active, we can log the user in.
# # We'll send the user back to the homepage.
# login(request, user)
# return HttpResponseRedirect(reverse('index'))
# else:
# # An inactive account was used - no logging in!
# return HttpResponse("Your Rango account is disabled.")
# else:
# # Does the username exist in DB?
# if User.objects.filter(username=username).exists():
# return HttpResponse("Your password is incorrect")
# else:
# return HttpResponse("{0} doesn't exist".format(username))
# # The request is not a HTTP POST, so display the login form.
# # This scenario would most likely be a HTTP GET.
# else:
# # No context variables to pass to the template system, hence the
# # blank dictionary object...
# return render(request, 'rango/login.html', {})
@login_required
def restricted(request):
return render(request, 'rango/restricted.html', {})
# return render(request, reverse('about'), context=context_dict)
# Use the login_required() decorator to ensure only those logged in can # access the view.
# @login_required
# def user_logout(request):
# # Since we know the user is logged in, we can now just log them out.
# logout(request)
# # Take the user back to the homepage.
# return HttpResponseRedirect(reverse('index'))
def get_server_side_cookie(request, cookie, default_val=None):
val = request.session.get(cookie)
print(val)
if not val:
val = default_val
return val
def visitor_cookie_handler(request):
# Get the number of visits to the site.
# We use the COOKIES.get() function to obtain the visits cookie.
# If the cookie exists, the value returned is casted to an integer.
# If the cookie doesn't exist, then the default value of 1 is used.
visits = int(get_server_side_cookie(request,
'visits',
'1'))
print("visits:", visits)
last_visit_cookie = get_server_side_cookie(request,
'last_visit',
str(datetime.now()))
last_visit_time = datetime.strptime(last_visit_cookie[:-7],
'%Y-%m-%d %H:%M:%S')
# If it's been more than a day since the last visit...
if (datetime.now() - last_visit_time).seconds > 0:
visits = visits + 1
# Update the last visit cookie now that we have updated the count
request.session['last_visit'] = str(datetime.now())
else:
visits = 1
# Set the last visit cookie
request.session['last_visit'] = last_visit_cookie
# Update/set the visits cookie
request.session['visits'] = visits
def search(request):
query = ''
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
print(query)
print(type(query))
# Run our Webhose search function to get the results list!
result_list = run_query(query)
context_dict = {'result_list': result_list, 'query': query}
return render(request, 'rango/search.html', context_dict) | StarcoderdataPython |
277611 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
class Tests:
look_modification_creation = (
"Look Modification Entity successfully created",
"P0: Look Modification Entity failed to be created")
look_modification_component = (
"Entity has a Look Modification component",
"P0: Entity failed to find Look Modification component")
look_modification_component_removal = (
"Look Modification component successfully removed",
"P1: Look Modification component failed to be removed")
removal_undo = (
"UNDO Look Modification component removal success",
"P1: UNDO Look Modification component removal failed")
look_modification_disabled = (
"Look Modification component disabled",
"P0: Look Modification component was not disabled")
postfx_layer_component = (
"Entity has a PostFX Layer component",
"P0: Entity did not have an PostFX Layer component")
look_modification_enabled = (
"Look Modification component enabled",
"P0: Look Modification component was not enabled")
toggle_enable_parameter_on = (
"Enable look modification parameter enabled",
"P0: Enable look modification parameter was not enabled")
toggle_enable_parameter_off = (
"Enable look modification parameter disabled",
"P1: Enable look modification parameter was not disabled")
color_grading_lut_set = (
"Entity has the Color Grading LUT set",
"P0: Color Grading LUT failed to be set")
lut_intensity_min_value = (
"Lut Intensity set to minimum value",
"P1: Lut Intensity failed to be set to minimum value")
lut_intensity_max_value = (
"Lut Intensity set to maximum value",
"P1: Lut Intensity failed to be set to maximum value")
lut_override_min_value = (
"Lut Override set to minimum value",
"P1: Lut Override failed to be set to minimum value")
lut_override_max_value = (
"Lut Override set to maximum value",
"P1: Lut Override failed to be set to maximum value")
enter_game_mode = (
"Entered game mode",
"P0: Failed to enter game mode")
exit_game_mode = (
"Exited game mode",
"P0: Couldn't exit game mode")
is_visible = (
"Entity is visible",
"P0: Entity was not visible")
is_hidden = (
"Entity is hidden",
"P0: Entity was not hidden")
entity_deleted = (
"Entity deleted",
"P0: Entity was not deleted")
deletion_undo = (
"UNDO deletion success",
"P0: UNDO deletion failed")
deletion_redo = (
"REDO deletion success",
"P0: REDO deletion failed")
def AtomEditorComponents_LookModification_AddedToEntity():
"""
Summary:
Tests the Look Modification component can be added to an entity and has the expected functionality.
Test setup:
- Wait for Editor idle loop.
- Open the "Base" level.
Expected Behavior:
The component can be added, used in game mode, hidden/shown, deleted, and has accurate required components.
Creation and deletion undo/redo should also work.
Test Steps:
1) Create an Look Modification entity with no components.
2) Add Look Modification component to Look Modification entity.
3) Remove the Look Modification component.
4) Undo Look Modification component removal.
5) Verify Look Modification component not enabled.
6) Add PostFX Layer component since it is required by the Look Modification component.
7) Verify Look Modification component is enabled.
8) Toggle the "Enable look modification" parameter (default False).
9) Add LUT asset to the Color Grading LUT parameter.
10) Set the Shaper Type parameter (from atom_constants.py SHAPER_TYPE, default none)
11) LUT Intensity (float range 0.0 to 1.0, default 1.0)
12) LUT Override (float range 0.0 to 1.0, default 1.0)
13) Enter/Exit game mode.
14) Test IsHidden.
15) Test IsVisible.
16) Delete Look Modification entity.
17) UNDO deletion.
18) REDO deletion.
19) Look for errors.
:return: None
"""
import os
import azlmbr.legacy.general as general
from editor_python_test_tools.asset_utils import Asset
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import Report, Tracer, TestHelper
from Atom.atom_utils.atom_constants import AtomComponentProperties, SHAPER_TYPE
with Tracer() as error_tracer:
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create an Look Modification entity with no components.
look_modification_entity = EditorEntity.create_editor_entity(AtomComponentProperties.look_modification())
Report.critical_result(Tests.look_modification_creation, look_modification_entity.exists())
# 2. Add Look Modification component to Look Modification entity.
look_modification_component = look_modification_entity.add_component(
AtomComponentProperties.look_modification())
Report.critical_result(
Tests.look_modification_component,
look_modification_entity.has_component(AtomComponentProperties.look_modification()))
# 3. Remove the Look Modification component.
look_modification_component.remove()
general.idle_wait_frames(1)
Report.result(Tests.look_modification_component_removal,
not look_modification_entity.has_component(AtomComponentProperties.look_modification()))
# 4. Undo Look Modification component removal.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.removal_undo,
look_modification_entity.has_component(AtomComponentProperties.look_modification()))
# 5. Verify Look Modification component not enabled.
Report.result(Tests.look_modification_disabled, not look_modification_component.is_enabled())
# 6. Add PostFX Layer component since it is required by the Look Modification component.
look_modification_entity.add_component(AtomComponentProperties.postfx_layer())
Report.result(
Tests.postfx_layer_component,
look_modification_entity.has_component(AtomComponentProperties.postfx_layer()))
# 7. Verify Look Modification component is enabled.
Report.result(Tests.look_modification_enabled, look_modification_component.is_enabled())
# 8. Toggle the "Enable look modification" parameter (default False).
# Toggle the "Enable look modification" parameter on.
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('Enable look modification'), True)
Report.result(Tests.toggle_enable_parameter_on,
look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('Enable look modification')) is True)
# Toggle the "Enable look modification" parameter off.
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('Enable look modification'), False)
Report.result(Tests.toggle_enable_parameter_off,
look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('Enable look modification')) is False)
# Toggle the "Enable look modification" parameter back on for testing.
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('Enable look modification'), True)
# 9. Set the Color Grading LUT asset on the Look Modification entity.
color_grading_lut_path = os.path.join("ColorGrading", "TestData", "Photoshop", "inv-Log2-48nits",
"test_3dl_32_lut.azasset")
color_grading_lut_asset = Asset.find_asset_by_path(color_grading_lut_path, False)
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('Color Grading LUT'), color_grading_lut_asset.id)
Report.result(
Tests.color_grading_lut_set,
color_grading_lut_asset.id == look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('Color Grading LUT')))
# Cycle through options in the Shaper Type parameter.
for shaper_type in SHAPER_TYPE.keys():
# 10. Set the Shaper Type parameter (from atom_constants.py SHAPER_TYPE, default none)
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('Shaper Type'), SHAPER_TYPE[shaper_type])
test_shaper_type = (
f"Set Shaper Type to: {shaper_type}",
f"P1: Shaper Type failed to be set to {shaper_type} ")
Report.result(test_shaper_type, look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('Shaper Type')) == SHAPER_TYPE[shaper_type])
# 11. LUT Intensity (float range 0.0 to 1.0, default 1.0)
# Set LUT Intensity to its minimum value.
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('LUT Intensity'), 0.0)
Report.result(Tests.lut_intensity_min_value,
look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('LUT Intensity')) == 0.0)
# Set LUT Intensity to its maximum value.
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('LUT Intensity'), 1.0)
Report.result(Tests.lut_intensity_max_value,
look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('LUT Intensity')) == 1.0)
# 12. LUT Override (float range 0.0 to 1.0, default 1.0)
# Set LUT Override to its minimum value.
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('LUT Override'), 0.0)
Report.result(Tests.lut_override_min_value,
look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('LUT Override')) == 0.0)
# Set LUT Override to its maximum value.
look_modification_component.set_component_property_value(
AtomComponentProperties.look_modification('LUT Override'), 1.0)
Report.result(Tests.lut_override_max_value,
look_modification_component.get_component_property_value(
AtomComponentProperties.look_modification('LUT Override')) == 1.0)
# 13. Enter/Exit game mode.
TestHelper.enter_game_mode(Tests.enter_game_mode)
general.idle_wait_frames(1)
TestHelper.exit_game_mode(Tests.exit_game_mode)
# 14. Test IsHidden.
look_modification_entity.set_visibility_state(False)
Report.result(Tests.is_hidden, look_modification_entity.is_hidden() is True)
# 15. Test IsVisible.
look_modification_entity.set_visibility_state(True)
general.idle_wait_frames(1)
Report.result(Tests.is_visible, look_modification_entity.is_visible() is True)
# 16. Delete Look Modification entity.
look_modification_entity.delete()
Report.result(Tests.entity_deleted, not look_modification_entity.exists())
# 17. UNDO deletion.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_undo, look_modification_entity.exists())
# 18. REDO deletion.
general.redo()
general.idle_wait_frames(1)
Report.result(Tests.deletion_redo, not look_modification_entity.exists())
# 19. Look for errors and asserts.
TestHelper.wait_for_condition(lambda: error_tracer.has_errors or error_tracer.has_asserts, 1.0)
for error_info in error_tracer.errors:
Report.info(f"Error: {error_info.filename} {error_info.function} | {error_info.message}")
for assert_info in error_tracer.asserts:
Report.info(f"Assert: {assert_info.filename} {assert_info.function} | {assert_info.message}")
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(AtomEditorComponents_LookModification_AddedToEntity)
| StarcoderdataPython |
8133505 | <reponame>RomaLash/async_dns<gh_stars>0
import urllib.parse
from async_dns import types
from ..util import ConnectionHandle
class Response:
def __init__(self, status, message, headers, data, url):
self.status = status
self.message = message
self.headers = headers
self.data = data
self.url = url
def __repr__(self):
return f'<Response status={self.status} message="{self.message}" url="{self.url}" data={self.data}>'
async def read_data(reader):
headers = []
first_line = await reader.readline()
_proto, status, message = first_line.strip().decode().split(' ', 2)
status = int(status)
length = 0 if status == 204 else None
while True:
line = await reader.readline()
line = line.strip().decode()
if not line:
break
key, _, value = line.partition(':')
headers.append((key, value.strip()))
if key.lower() == 'content-length':
length = int(value)
data = await reader.read(length)
return status, message, headers, data
async def send_request(url,
method='GET',
params=None,
data=None,
headers=None,
resolver=None):
if '://' not in url:
url = 'http://' + url
if params:
url += '&' if '?' in url else '?'
qs = urllib.parse.urlencode(params)
url += qs
res = urllib.parse.urlparse(url)
kw = {}
if res.port: kw['port'] = res.port
path = res.path or '/'
if res.query: path += '?' + res.query
ssl = res.scheme == 'https'
host = res.hostname
if resolver is not None:
msg = await resolver.query(host)
host = msg.get_record((types.A, types.AAAA))
assert host, 'DNS lookup failed'
async with ConnectionHandle(host, res.port, ssl, res.hostname) as conn:
reader = conn.reader
writer = conn.writer
writer.write(f'{method} {path} HTTP/1.1\r\n'.encode())
merged_headers = {
'Host': res.hostname,
}
if headers: merged_headers.update(headers)
if data:
merged_headers['Content-Length'] = len(data)
for key, value in merged_headers.items():
writer.write(f'{key}: {value}\r\n'.encode())
writer.write(b'\r\n')
if data:
writer.write(data)
await writer.drain()
status, message, headers, data = await read_data(reader)
resp = Response(status, message, headers, data, url)
return resp
if __name__ == '__main__':
async def main():
print(await send_request('https://www.baidu.com'))
import asyncio
asyncio.run(main())
| StarcoderdataPython |
9738727 | <reponame>pbloem/gated-rgcn<gh_stars>1-10
from time import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Wedge, Polygon, Ellipse, Rectangle
from matplotlib.collections import PatchCollection
from matplotlib.axes import Axes
import os, errno, random, time, string, sys
import torch
from torch import nn
import torch.nn.functional as F
from torch import FloatTensor, LongTensor
from torch.autograd import Variable
from torch.utils.data import sampler, dataloader
import torchvision
from collections import OrderedDict
import subprocess
import numpy as np
import math
tics = []
DEBUG = False
DIR = os.path.dirname(os.path.realpath(__file__))
def kl_loss(zmean, zlsig):
b, l = zmean.size()
kl = 0.5 * torch.sum(zlsig.exp() - zlsig + zmean.pow(2) - 1, dim=1)
assert kl.size() == (b,)
return kl
def kl_batch(batch):
"""
Computes the KL loss between the standard normal MVN and a diagonal MVN fitted to the batch
:param batch:
:return:
"""
b, d = batch.size()
mean = batch.mean(dim=0, keepdim=True)
batch = batch - mean
diacov = torch.bmm(batch.view(d, 1, b), batch.view(d, b, 1)).squeeze() / (b - 1)
logvar = torch.log(diacov)
return -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())
def vae_sample(zmean, zlsig, eps=None):
b, l = zmean.size()
if eps is None:
eps = torch.randn(b, l, device='cuda' if zmean.is_cuda else 'cpu')
eps = Variable(eps)
return zmean + eps * (zlsig * 0.5).exp()
def tic():
tics.append(time())
def toc():
if len(tics)==0:
return None
else:
return time()-tics.pop()
def clean(axes=None):
if axes is None:
axes = plt.gca()
axes.spines["right"].set_visible(False)
axes.spines["top"].set_visible(False)
axes.spines["bottom"].set_visible(False)
axes.spines["left"].set_visible(False)
# axes.get_xaxis().set_tick_params(which='both', top='off', bottom='off', labelbottom='off')
# axes.get_yaxis().set_tick_params(which='both', left='off', right='off')
def basic(axes=None):
if axes is None:
axes = plt.gca()
axes.spines["right"].set_visible(False)
axes.spines["top"].set_visible(False)
axes.spines["bottom"].set_visible(True)
axes.spines["left"].set_visible(True)
axes.get_xaxis().set_tick_params(which='both', top='off', bottom='on', labelbottom='on')
axes.get_yaxis().set_tick_params(which='both', left='on', right='off')
def plot(means, sigmas, values, shape=None, axes=None, flip_y=None, alpha_global=1.0):
"""
:param means:
:param sigmas:
:param values:
:param shape:
:param axes:
:param flip_y: If not None, interpreted as the max y value. y values in the scatterplot are
flipped so that the max is equal to zero and vice versa.
:return:
"""
b, n, d = means.size()
means = means.data[0, :,:].cpu().numpy()
sigmas = sigmas.data[0, :].cpu().numpy()
values = nn.functional.tanh(values).data[0, :].cpu().numpy()
if flip_y is not None:
means[:, 0] = flip_y - means[:, 0]
norm = mpl.colors.Normalize(vmin=-1.0, vmax=1.0)
cmap = mpl.cm.RdYlBu
map = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
if axes is None:
axes = plt.gca()
colors = []
for i in range(n):
color = map.to_rgba(values[i])
alpha = min(0.8, max(0.05, ((sigmas[i, 0] * sigmas[i, 0])+1.0)**-2)) * alpha_global
axes.add_patch(Ellipse((means[i, 1], means[i, 0]), width=sigmas[i,1], height=sigmas[i,0], color=color, alpha=alpha, linewidth=0))
colors.append(color)
axes.scatter(means[:, 1], means[:, 0], s=5, c=colors, zorder=100, linewidth=0, edgecolor='k', alpha=alpha_global)
if shape is not None:
m = max(shape)
step = 1 if m < 100 else m//25
# gray points for the integer index tuples
x, y = np.mgrid[0:shape[0]:step, 0:shape[1]:step]
axes.scatter(x.ravel(), y.ravel(), c='k', s=5, marker='D', zorder=-100, linewidth=0, alpha=0.1* alpha_global)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
def plot1d(means, sigmas, values, shape=None, axes=None):
h = 0.1
n, d = means.size()
means = means.cpu().numpy()
sigmas = sigmas.cpu().numpy()
values = nn.functional.tanh(values).data.cpu().numpy()
norm = mpl.colors.Normalize(vmin=-1.0, vmax=1.0)
cmap = mpl.cm.RdYlBu
map = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
if axes is None:
axes = plt.gca()
colors = []
for i in range(n):
color = map.to_rgba(values[i])
alpha = 0.7 # max(0.05, (sigmas[i, 0]+1.0)**-1)
axes.add_patch(Rectangle(xy=(means[i, 1] - sigmas[i, 0]*0.5, means[i, 0] - h*0.5), width=sigmas[i,0] , height=h, color=color, alpha=alpha, linewidth=0))
colors.append(color)
axes.scatter(means[:, 1], means[:, 0], c=colors, zorder=100, linewidth=0, s=5)
if shape is not None:
m = max(shape)
step = 1 if m < 100 else m//25
# gray points for the integer index tuples
x, y = np.mgrid[0:shape[0]:step, 0:shape[1]:step]
axes.scatter(x.ravel(), y.ravel(), c='k', s=5, marker='D', zorder=-100, linewidth=0, alpha=0.1)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
def plot1d(means, sigmas, values, shape=None, axes=None):
h = 0.1
n, d = means.size()
means = means.cpu().numpy()
sigmas = sigmas.cpu().numpy()
values = nn.functional.tanh(values).data.cpu().numpy()
norm = mpl.colors.Normalize(vmin=-1.0, vmax=1.0)
cmap = mpl.cm.RdYlBu
map = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
if axes is None:
axes = plt.gca()
colors = []
for i in range(n):
color = map.to_rgba(values[i])
alpha = 0.7 # max(0.05, (sigmas[i, 0]+1.0)**-1)
axes.add_patch(Rectangle(xy=(means[i, 1] - sigmas[i, 0]*0.5, means[i, 0] - h*0.5), width=sigmas[i,0] , height=h, color=color, alpha=alpha, linewidth=0))
colors.append(color)
axes.scatter(means[:, 1], means[:, 0], c=colors, zorder=100, linewidth=0, s=3)
if shape is not None:
m = max(shape)
step = 1 if m < 100 else m//25
# gray points for the integer index tuples
x, y = np.mgrid[0:shape[0]:step, 0:shape[1]:step]
axes.scatter(x.ravel(), y.ravel(), c='k', s=5, marker='D', zorder=-100, linewidth=0, alpha=0.1)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
def plot1dvert(means, sigmas, values, shape=None, axes=None):
h = 0.1
n, d = means.size()
means = means.cpu().numpy()
sigmas = sigmas.cpu().numpy()
values = nn.functional.tanh(values).data.cpu().numpy()
norm = mpl.colors.Normalize(vmin=-1.0, vmax=1.0)
cmap = mpl.cm.RdYlBu
map = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
if axes is None:
axes = plt.gca()
colors = []
for i in range(n):
color = map.to_rgba(values[i])
alpha = 0.7 # max(0.05, (sigmas[i, 0]+1.0)**-1)
axes.add_patch(Rectangle(xy=(means[i, 1] - h*0.5, means[i, 0] - sigmas[i, 0]*0.5), width=h , height=sigmas[i,0], color=color, alpha=alpha, linewidth=0))
colors.append(color)
axes.scatter(means[:, 1], means[:, 0], c=colors, zorder=100, linewidth=0, s=3)
if shape is not None:
m = max(shape)
step = 1 if m < 100 else m//25
# gray points for the integer index tuples
x, y = np.mgrid[0:shape[0]:step, 0:shape[1]:step]
axes.scatter(x.ravel(), y.ravel(), c='k', s=5, marker='D', zorder=-100, linewidth=0, alpha=0.1)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.spines['left'].set_visible(False)
def norm(x):
"""
Normalize a tensor to a tensor with unit norm (treating first dim as batch dim)
:param x:
:return:
"""
b = x.size()[0]
n = torch.norm(x.view(b, -1), p=2, dim=1)
while len(n.size()) < len(x.size()):
n = n.unsqueeze(1)
n.expand_as(x)
return x/n
def makedirs(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def sample(collection, k, required):
"""
Sample, without replacement, k elements from 'collection', ensuring that 'required' are always contained in the
sample (but never twice).
currently only works if collection and required contain only unique elements
:param k:
:param collection:
:param required:
:return:
"""
if(k + len(required) > len(collection)):
# use rejection sampling
sample = list(collection)
while len(sample) > k:
ri = random.choice(range(len(sample)))
if sample[ri] not in required:
del(sample[ri])
return sample
else:
required = set(required)
sample0 = set(random.sample(collection, k + len(required)))
sample = list(sample0 - required)
while len(sample) > k - len(required):
ri = random.choice(range(len(sample)))
del(sample[ri])
sample.extend(required)
return sample
#
# if __name__ == '__main__':
#
# print('.')
# print(sample(range(6), 5, [0, 1, 2]))
# print('.')
# print(sample(range(100), 6, [0, 1, 2]))
# print(sample(range(100), 6, [0, 1, 2]))
# print(sample(range(100), 6, [0, 1, 2]))
# print('.')
def sparsemult(use_cuda):
return SparseMultGPU.apply if use_cuda else SparseMultCPU.apply
class SparseMultCPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, vector):
# print(type(size), size, list(size), intlist(size))
# print(indices.size(), values.size(), torch.Size(intlist(size)))
matrix = torch.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.vector = indices, matrix, vector
return torch.mm(matrix, vector.unsqueeze(1))
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output.view(-1)[i_ixs]
vector_select = ctx.vector.view(-1)[j_ixs]
grad_values = output_select * vector_select
grad_vector = torch.mm(ctx.matrix.t(), grad_output).t()
return None, Variable(grad_values), None, Variable(grad_vector)
class SparseMultGPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, vector):
# print(type(size), size, list(size), intlist(size))
matrix = torch.cuda.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.vector = indices, matrix, vector
return torch.mm(matrix, vector.unsqueeze(1))
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output.view(-1)[i_ixs]
vector_select = ctx.vector.view(-1)[j_ixs]
grad_values = output_select * vector_select
grad_vector = torch.mm(ctx.matrix.t(), grad_output).t()
return None, Variable(grad_values), None, Variable(grad_vector)
def nvidia_smi():
command = 'nvidia-smi'
return subprocess.check_output(command, shell=True)
def orth_loss(batch_size, x_size, model, use_cuda):
"""
:param batch_size:
:param x_size:
:param model:
:param use_cuda:
:return:
"""
x_size = (batch_size,) + x_size
x1o, x2o = torch.randn(x_size), torch.randn(x_size)
# normalize to unit tensors
x1o, x2o = norm(x1o), norm(x2o)
if use_cuda:
x1o, x2o = x1o.cuda(), x2o.cuda()
x1o, x2o = Variable(x1o), Variable(x2o)
y1 = model(x1o)
y2 = model(x2o)
x1 = x1o.view(batch_size, 1, -1)
x2 = x2o.view(batch_size, 1, -1)
y1 = y1.view(batch_size, 1, -1)
y2 = y2.view(batch_size, 1, -1)
print('x1 v y1', x1[0, :], y1[0, ])
xnorm = torch.bmm(x1, x2.transpose(1, 2))
ynorm = torch.bmm(y1, y2.transpose(1, 2))
loss = torch.sum(torch.pow((xnorm - ynorm), 2)) / batch_size
return loss, x1o, x2o
def bmultinomial(mat, num_samples=1, replacement=False):
"""
Take multinomial samples from a batch of matrices with multinomial parameters on the
rows
:param mat:
:param num_samples:
:param replacement:
:return:
"""
batches, rows, columns = mat.size()
mat = mat.view(1, -1, columns).squeeze(0)
sample = torch.multinomial(mat, num_samples, replacement)
return sample.view(batches, rows, num_samples), sample
def bsoftmax(input):
b, r, c = input.size()
input = input.view(1, -1, c)
input = nn.functional.softmax(input.squeeze(0)).unsqueeze(0)
return input.view(b, r, c)
def contains_nan(tensor):
return (tensor != tensor).sum() > 0
#
# if __name__ == '__main__':
#
#
# i = torch.LongTensor([[0, 16, 1],
# [2, 0, 2]])
# v = torch.FloatTensor([1, 1, 1])
#
# matrix = torch.sparse.FloatTensor(i, v, torch.Size((16, 16)))
def od(lst):
od = OrderedDict()
for i, elem in enumerate(lst):
od[str(i)] = elem
return od
class Lambda(nn.Module):
def __init__(self, lambd):
super(Lambda, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class Debug(nn.Module):
def __init__(self, lambd):
super(Debug, self).__init__()
self.lambd = lambd
def forward(self, x):
self.lambd(x)
return x
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def flatten(input):
return input.view(input.size(0), -1)
class NoActivation(nn.Module):
def forward(self, input):
return input
def prod(tuple):
result = 1
for v in tuple:
result *= v
return result
def add_noise(input, std=0.1):
"""
In-place
:param input:
:param std:
:return:
"""
noise = torch.cuda.FloatTensor(input.size()) if input.is_cuda else FloatTensor(input.size())
noise.normal_(std=std)
return input + noise
def corrupt_(input, prop=0.3):
"""
Sets a random proportion of the input to zero
:param input:
:param prop:
:return:
"""
t0 = time.time()
FT = torch.cuda.FloatTensor if input.is_cuda else torch.FloatTensor
mask = FT(input.size())
mask.uniform_()
mask.sub_(prop).ceil_()
input.mul_(mask)
def rstring(n):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=n))
def count_params(model):
sum = 0
for tensor in model.parameters():
sum += prod(tensor.size())
return sum
def logit(x):
if type(x) == float:
return math.log(x / (1 - x))
return torch.log(x/ (1 - x))
def inv(i):
sc = (i/27) * 0.9999 + 0.00005
return logit(sc)
def sigmoid(x):
return 1 / (1 + math.exp(-x))
class ChunkSampler(sampler.Sampler):
"""Samples elements sequentially from some offset, using a fixed permutation
initial source: https://github.com/pytorch/vision/issues/168
Arguments:
num_samples: # of desired datapoints
start: offset where we should start selecting from
"""
def __init__(self, start, num, total, seed = 0):
self.start = start
self.num = num
self.random = random.Random(seed)
self.l = list(range(total))
self.random.shuffle(self.l)
def __iter__(self):
return iter(self.l[self.start : self.start + self.num])
def __len__(self):
return self.num
def bmult(width, height, num_indices, batchsize, use_cuda):
"""
?
:param width:
:param height:
:param num_indices:
:param batchsize:
:param use_cuda:
:return:
"""
bmult = torch.cuda.LongTensor([height, width]) if use_cuda else LongTensor([height, width])
m = torch.cuda.LongTensor(range(batchsize)) if use_cuda else LongTensor(range(batchsize))
bmult = bmult.unsqueeze(0).unsqueeze(0)
m = m.unsqueeze(1).unsqueeze(1)
bmult = bmult.expand(batchsize, num_indices, 2)
m = m.expand(batchsize, num_indices, 2)
return m * bmult
def intlist(tensor):
"""
A slow and stupid way to turn a tensor into an iterable over ints
:param tensor:
:return:
"""
if type(tensor) is list:
return tensor
tensor = tensor.squeeze()
assert len(tensor.size()) == 1
s = tensor.size()[0]
l = [None] * s
for i in range(s):
l[i] = int(tensor[i])
return l
def totensor(dataset, batch_size=512, shuffle=True):
"""
Takes a dataset and loads the whole thing into a tensor
:param dataset:
:return:
"""
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=2)
index = 0
for i, batch in enumerate(loader):
batch = batch[0]
if i == 0:
size = list(batch.size())
size[0] = len(dataset)
result = torch.zeros(*size)
result[index:index+batch.size(0)] = batch
index += batch.size(0)
return result
class Reshape(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, input):
return input.view( (input.size(0),) + self.shape)
def logsoftmax(indices, values, size, p=20, row=True, cuda=torch.cuda.is_available()):
"""
Row or column log-softmaxes a sparse matrix (using logsumexp trick)
:param indices:
:param values:
:param size:
:param row:
:return:
"""
epsilon = 0.00000001
dv = 'cuda' if cuda else 'cpu'
spm = sparsemult(cuda)
relud = F.relu(values)
maxes = rowpnorm(indices, relud, size, p)
mvalues = torch.exp(relud - maxes)
if row:
ones = torch.ones((size[1],), device=dv)
else:
ones = torch.ones((size[0],), device=dv)
# transpose the matrix
indices = torch.cat([indices[:, 1:2], indices[:, 0:1]], dim=1)
sums = spm(indices.t(), mvalues, torch.tensor(size, device=dv), ones) # row/column sums]
# select the sums corresponding to each index
sums = torch.index_select(sums, 0, indices[:, 0]).squeeze() + epsilon
lse = torch.log(sums) + maxes
return relud - lse
def rowpnorm(indices, values, size, p, row=True, cuda=torch.cuda.is_available()):
"""
Row or column p-norms a sparse matrix (using logsumexp trick)
:param indices:
:param values:
:param size:
:param row:
:return:
"""
epsilon = 0.00000001
dv = 'cuda' if cuda else 'cpu'
spm = sparsemult(cuda)
pvalues = torch.pow(values, p)
if row:
ones = torch.ones((size[1],), device=dv)
else:
ones = torch.ones((size[0],), device=dv)
# transpose the matrix
indices = torch.cat([indices[:, 1:2], indices[:, 0:1]], dim=1)
sums = spm(indices.t(), pvalues, torch.tensor(size, device=dv), ones) # row/column sums
# select the sums corresponding to each index
sums = torch.index_select(sums, 0, indices[:, 0]).squeeze() + epsilon
return torch.pow(sums, 1.0/p)
if __name__ == "__main__":
tind = torch.tensor([[0, 0],[0, 1], [4, 4], [4, 3]])
tv = torch.tensor([0.0, 1.0, 0.0, 10.0])
print(torch.exp(logsoftmax(tind, tv, (5, 5))))
print(torch.exp(logsoftmax(tind, tv, (5, 5), row=False)))
def absmax(indices, values, size, row=True, cuda=torch.cuda.is_available()):
"""
Row or column softmaxes a sparse matrix
:param indices:
:param values:
:param size:
:param row:
:return:
"""
if len(indices.size()) == 2:
indices = indices[None, :, :]
values = values[None, :]
values = torch.abs(values)
return normalize(indices, values, size, row=row, cuda=cuda).squeeze()
values = torch.abs(values)
return normalize(indices, values, size, row=row, cuda=cuda)
def normalize(indices, values, size, row=True, cuda=None, epsilon=0.00000001):
"""
Row or column normalizes a sparse matrix, defined by the given indices and values. Expects a batch dimension.
:param indices: (b, k, 2) LongTensor of index tuples
:param values: (b, k) DoubleTensor of values
:param size: dimensions of the matrix (no batch dim)
:param row: If true, we normalize the rows, otherwise the columns
:return: The normalized values (the indices stay the same)
"""
if cuda is None:
cuda = indices.is_cuda
dv = 'cuda' if cuda else 'cpu'
spm = sparsemult(cuda)
b, k, r = indices.size()
assert r == 2
# unroll the batch dimension
# (think of this as putting all the matrices in the batch along the diagonal of one huge matrix)
ran = torch.arange(b, device=dv).unsqueeze(1).expand(b, 2)
ran = ran * torch.tensor(size, device=dv).unsqueeze(0).expand(b, 2)
offset = ran.unsqueeze(1).expand(b, k, 2).contiguous().view(-1, 2)
indices = indices.view(-1, 2)
indices = indices + offset
values = values.view(-1)
if row:
ones = torch.ones((b*size[1],), device=dv)
else:
ones = torch.ones((b*size[0],), device=dv)
# transpose the matrix
indices = torch.cat([indices[:, 1:2], indices[:, 0:1]], dim=1)
sums = spm(indices.t(), values, torch.tensor(size, device=dv)*b, ones) # row/column sums
# select the sums corresponding to each index
div = torch.index_select(sums, 0, indices[:, 0]).squeeze() + epsilon
return (values/div).view(b, k)
# if __name__ == "__main__":
# tind = torch.tensor([[[0, 0],[0, 1], [4, 4], [4, 3]], [[0, 1],[1, 0],[0, 2], [2, 0]]])
# tv = torch.tensor([[0.5, 0.5, 0.4, 0.6], [0.5, 1, 0.5, 1]])
#
# print(normalize(tind, tv, (5, 5)))
# print(normalize(tind, tv, (5, 5), row=False))
PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
def duplicates(tuples):
"""
Takes a list of tuples, and for each tuple that occurs mutiple times marks all but one of the occurences.
:param tuples: A size (batch, k, rank) tensor of integer tuples
:return: A size (batch, k) mask indicating the duplicates
"""
b, k, r = tuples.size()
primes = torch.tensor(PRIMES[:r])
primes = primes.unsqueeze(0).unsqueeze(0).expand(b, k, r)
unique = ((tuples+1) ** primes).prod(dim=2) # unique identifier for each tuple
sorted, sort_idx = torch.sort(unique, dim=1)
_, unsort_idx = torch.sort(sort_idx, dim=1)
mask = sorted[:, 1:] == sorted[:, :-1]
mask = torch.cat([torch.zeros(b, 1, dtype=torch.uint8), mask], dim=1)
return torch.gather(mask, 1, unsort_idx)
#
# if __name__ == "__main__":
# # tuples = torch.tensor([
# # [[5, 5], [1, 1], [2, 3], [1, 1]],
# # [[3, 2], [3, 2], [5, 5], [5, 5]]
# # ])
# #
# # print(tuples)
# # dup = duplicates(tuples)
# # tuples[dup, :] = tuples[dup, :] * 0
# # print(tuples)
#
# tuples = torch.tensor([[
# [3, 1],
# [3, 2],
# [3, 1],
# [0, 3],
# [0, 2],
# [3, 0],
# [0, 3],
# [0, 0]]])
#
# print(duplicates(tuples))
def scatter_imgs(latents, images, size=None, ax=None, color=None, alpha=1.0):
assert(latents.shape[0] == images.shape[0])
if ax is None:
fig = plt.figure(figsize=(16, 16))
ax = fig.add_subplot(111)
ax.set_xlim(0, 1e-7)
ax.set_ylim(0, 1e-7)
if color is None:
color = np.asarray([0.0, 0.0, 0.0, 1.0])
else:
color = np.asarray(color)
# print(color)
xmn, ymn = np.min(latents, axis=0)
xmx, ymx = np.max(latents, axis=0)
oxmn, oxmx = ax.get_xlim()
oymn, oymx = ax.get_ylim()
ax.set_xlim(min(oxmn, xmn), max(oxmx, xmx))
ax.set_ylim(min(oymn, ymn), max(oymx, ymx))
# print(ax.get_xlim(), ax.get_ylim())
if size is None:
size = (xmx - xmn)/np.sqrt(latents.shape[0])
size *= 0.5
n, h, w, c = images.shape
aspect = h/w
images = images * (1.0 - color[:3])
images = 1.0 - images
for i in range(n):
x, y = latents[i, 0:2]
im = images[i, :]
ax.imshow(im, extent=(x, x + size, y, y + size*aspect), alpha=alpha)
ax.scatter(latents[:, 0], latents[:, 1], linewidth=0, s=2, color=color)
return ax, size
def linmoid(x, inf_in, up):
"""
Squeeze the given input into the range (0, up). All points are translated linearly, except those above and below the
inflection points (on the input range), which are squeezed through a sigmoid function.
:param input:
:param inflections:
:param range:
:return:
"""
ilow = x < inf_in[0]
ihigh = x > inf_in[1]
# linear transform
s = (up - 1)/(inf_in[1] - inf_in[0])
y = x * s + 0.5 - inf_in[0] * s
scale = s * 4
y[ilow] = torch.sigmoid((x[ilow] - inf_in[0])*scale)
y[ihigh] = torch.sigmoid((x[ihigh] - inf_in[1])*scale) - 0.5 + (up - 0.5)
return y
# if __name__ == "__main__":
# x = torch.linspace(-0.5, 1.5, 1000)
# y = linmoid(x, inf_in=(0.25, 0.75), up=3)
#
# plt.scatter(x.numpy(), y.numpy(), s=2)
# plt.ylim([0, 3])
#
# clean()
# plt.savefig('test_linmoid.png')
def sparsemm(use_cuda):
return SparseMMGPU.apply if use_cuda else SparseMMCPU.apply
class SparseMMCPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, xmatrix):
matrix = torch.sparse.FloatTensor(indices, values, tosize(size))
ctx.indices, ctx.matrix, ctx.xmatrix = indices, matrix, xmatrix
return torch.mm(matrix, xmatrix)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output[i_ixs, :]
xmatrix_select = ctx.xmatrix[j_ixs, :]
grad_values = (output_select * xmatrix_select).sum(dim=1)
grad_xmatrix = torch.mm(ctx.matrix.t(), grad_output)
return None, Variable(grad_values), None, Variable(grad_xmatrix)
def tosize(size):
if isinstance(size, tuple):
return torch.Size(size)
return torch.Size(intlist(size))
class SparseMMGPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, xmatrix):
# print(type(size), size, list(size), intlist(size))
matrix = torch.cuda.sparse.FloatTensor(indices, values, tosize(size))
ctx.indices, ctx.matrix, ctx.xmatrix = indices, matrix, xmatrix
return torch.mm(matrix, xmatrix)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output[i_ixs]
xmatrix_select = ctx.xmatrix[j_ixs]
grad_values = (output_select * xmatrix_select).sum(dim=1)
grad_xmatrix = torch.mm(ctx.matrix.t(), grad_output)
return None, Variable(grad_values), None, Variable(grad_xmatrix)
def batchmm(indices, values, size, xmatrix, cuda=None):
"""
Multiply a batch of sparse matrices with a batch of dense matrices
:param indices:
:param values:
:param size:
:param xmatrix:
:return:
"""
if cuda is None:
cuda = indices.is_cuda
b, n, r = indices.size()
dv = 'cuda' if cuda else 'cpu'
height, width = size
size = torch.tensor(size, device=dv, dtype=torch.long)
bmult = size[None, None, :].expand(b, n, 2)
m = torch.arange(b, device=dv, dtype=torch.long)[:, None, None].expand(b, n, 2)
bindices = (m * bmult).view(b*n, r) + indices.view(b*n, r)
bfsize = Variable(size * b)
bvalues = values.view(-1)
b, w, z = xmatrix.size()
bxmatrix = xmatrix.view(-1, z)
sm = sparsemm(cuda)
result = sm(bindices.t(), bvalues, bfsize, bxmatrix)
return result.view(b, height, -1)
def split(offset, depth):
dv = 'cuda' if offset.is_cuda else 'cpu'
b, n, s = offset.size()
bn = b*n
offset = offset.view(bn, s)
numbuckets = 2 ** depth # number of buckets in the input
bsize = s // numbuckets # size of the output buckets
lo = torch.arange(numbuckets, device=dv, dtype=torch.long) * bsize # minimum index of each downbucket
lo = lo[None, :, None].expand(bn, numbuckets, bsize).contiguous().view(bn, -1)
hi = torch.arange(numbuckets, device=dv, dtype=torch.long) * bsize + bsize//2 # minimum index of each upbucket
hi = hi[None, :, None].expand(bn, numbuckets, bsize).contiguous().view(bn, -1)
upchoices = offset.long()
downchoices = 1 - upchoices
numupchoices = upchoices.view(bn, numbuckets, bsize).cumsum(dim=2).view(bn, -1)
numdownchoices = downchoices.view(bn, numbuckets, bsize).cumsum(dim=2).view(bn, -1)
result = torch.zeros(bn, s, dtype=torch.long, device=dv)
# print(result.dtype, upchoices.dtype, hi.dtype, numupchoices.dtype)
result = result + upchoices * (hi + numupchoices - 1)
result = result + downchoices * (lo + numdownchoices - 1)
# If offset is not arranged correctly (equal numbers of ups and downs per bucket)
# we get a non-permutation. This is fine, but we must clamp the result to make sure the
# indices are still legal
result = result.clamp(0, s-1)
return result.view(b, n, s)
def sample_offsets(batch, num, size, depth, cuda=False):
dv = 'cuda' if cuda else 'cpu'
numbuckets = 2 ** depth # number of buckets in the input
bsize = size // numbuckets # size of the input buckets
ordered = torch.tensor([0,1], dtype=torch.uint8, device=dv)[None, None, None, :, None].expand(batch, num, numbuckets, 2, bsize // 2)
ordered = ordered.contiguous().view(batch, num, numbuckets, bsize)
# shuffle the buckets
ordered = ordered.view(batch * num * numbuckets, bsize)
ordered = shuffle_rows(ordered)
ordered = ordered.view(batch, num, numbuckets, bsize)
return ordered.contiguous().view(batch, num, -1)
shufflecache = {}
cache_size = 500_000
def shuffle_rows(x):
r, c = x.size()
if c not in shufflecache:
cached = torch.zeros(cache_size, c, dtype=torch.long, device='cpu')
for i in range(cache_size):
cached[i, :] = torch.randperm(c)
shufflecache[c] = cached
cache = shufflecache[c]
rows = random.sample(range(cache_size), k=r)
sample = cache[rows, :]
if x.is_cuda:
sample = sample.cuda()
out = x.gather(dim=1, index=sample)
if x.is_cuda:
out = out.cuda()
return out
def unique(tuples):
"""
Assigns a single unique identifier to each tuple expressed in the row
:param tuples:
:return:
"""
b, s = tuples.size()
if s == 2:
k1, k2 = tuples[:, 0], tuples[:, 1]
res = ((k1 + k2) * (k1 + k2 + 1)) / 2 + k2
return res[:, None]
sub = unique(tuples[:, 1:])
res = torch.cat([tuples[:, 0:1], sub], dim=1)
return unique(res)
def xent(out, tgt):
"""
Binary cross-entropy. Manual implementation so we get gradient over both inputs
:param out:
:param tgt:
:return:
"""
assert out.size() == tgt.size()
out = out.clamp(0, 1)
tgt = tgt.clamp(0, 1)
return - tgt * (out + 1e-10).log() - (1.0 - tgt) * (1.0 - out + 1e-10).log()
#
# if __name__ == '__main__':
# #
# # size = 8
#
# # offset = torch.tensor([1, 1, 0, 1, 1, 0, 0, 0]).byte()
# # offset = torch.tensor([[0, 0, 1, 0, 1, 1, 1, 0], [0, 1, 0, 1, 0, 1, 1, 0]]).byte()
#
# offset = torch.tensor([[0, 1, 1, 0]]).byte()
#
# indices = split(offset[:, None, :], 0)
#
# print(indices)
# #
# # print(sample_offsets(3, 4, 16, 3))
# #
# # print(unique(torch.tensor( [[1,2,3,4],[4,3,2,1],[1,2,3,4]] )))
# #
# #
# indices = torch.tensor([[[0, 0], [1, 1]], [[0, 1], [1, 0]]])
# values = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
# inputs = torch.tensor([[[1.0, 2.0, 2.0, 2.0, 2.0], [3.0, 4.0, 2.0, 2.0, 2.0]], [[1.0, 2.0, 2.0, 2.0, 2.0], [3.0, 4.0, 4.0, 4.0, 4.0]]])
#
# print(inputs.size())
#
# print(batchmm(indices, values, (2,2), inputs))
| StarcoderdataPython |
6602760 | <reponame>pallav1991/Optimus
from pyspark.sql import functions as F
from optimus.helpers.functions import parse_columns
from optimus.helpers.checkit import is_dataframe, is_int
class OutlierDetector:
"""
Outlier detection for pyspark data frames.
"""
@staticmethod
def iqr(df, columns):
"""
Delete outliers using inter quartile range
:param df:
:param columns:
:return:
"""
if not is_dataframe(df):
raise TypeError("Spark Dataframe expected")
columns = parse_columns(df, columns)
for column in columns:
iqr = df.cols.iqr(column, more=True)
lower_bound = iqr["q1"] - (iqr["iqr"] * 1.5)
upper_bound = iqr["q3"] + (iqr["iqr"] * 1.5)
df = df.rows.drop((F.col(column) > upper_bound) | (F.col(column) < lower_bound))
return df
@staticmethod
def z_score(df, columns, threshold=None):
"""
Delete outlier using z score
:param df:
:param columns:
:param threshold:
:return:
"""
if not is_dataframe(df):
raise TypeError("Spark Dataframe expected")
if not is_int(threshold):
raise TypeError("Integer expected")
columns = parse_columns(df, columns)
for c in columns:
# the column with the z_col value is always the string z_col plus the name of column
z_col = "z_col_" + c
df = df.cols.z_score(c) \
.rows.drop(F.col(z_col) > threshold) \
.cols.drop(z_col)
return df
@staticmethod
def mad(df, columns, threshold=None):
"""
Delete outlier using mad
:param df:
:param columns:
:param threshold:
:return:
"""
if not is_dataframe(df):
raise TypeError("Spark Dataframe expected")
if not is_int(threshold):
raise TypeError("Integer expected")
columns = parse_columns(df, columns)
for c in columns:
mad_value = df.cols.mad(c, more=True)
lower_bound = mad_value["median"] - threshold * mad_value["mad"]
upper_bound = mad_value["median"] + threshold * mad_value["mad"]
df = df.rows.drop((F.col(c) > upper_bound) | (F.col(c) < lower_bound))
return df
@staticmethod
def modified_z_score(df, col_name, threshold):
"""
Delete outliers from a DataFrame using modified z score
Reference: http://colingorrie.github.io/outlier-detection.html#modified-z-score-method
:param df:
:param col_name:
:param threshold:
:return:
"""
median = df.cols.median(col_name)
median_absolute_deviation = df.select(F.abs(F.col(col_name) - median).alias(col_name)).cols.median(col_name)
df = df.withColumn('m_z_score', F.abs(0.6745 * (F.col(col_name) - median) / median_absolute_deviation))
df = df.rows.drop(F.col("m_z_score") > threshold)
return df
| StarcoderdataPython |
1978373 | <reponame>filhodomauro/bt_integration
#! /usr/bin/env python
import httplib2
import jamal_google_auth as gauth
from googleapiclient import discovery
def add_row(sheet_id, range, values):
credentials = gauth.get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
rangeName = range
body = {
"range" : range,
"values" :values
}
result = service.spreadsheets().values().append(
spreadsheetId=sheet_id, range=rangeName, valueInputOption='RAW', body=body).execute()
| StarcoderdataPython |
8109660 | <reponame>deepcoder42/python-lib
#!/usr/bin/python
# Classification (U)
"""Program: normalize.py
Description: Unit testing of normalize in gen_libs.py.
Usage:
test/unit/gen_libs/normalize.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import gen_libs
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_rng3
test_rng2
test_rng1
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.rng = [(12, 15), (1, 10)]
self.rng2 = [(1, 12), (12, 15)]
self.rng3 = [(1, 22), (12, 15)]
self.rng4 = [[(1, 22)]]
self.result = [(1, 10), (12, 15)]
self.result2 = [(1, 15)]
self.result3 = [(1, 22)]
self.result4 = [((1, 22), (1, 22))]
def test_rng4(self):
"""Function: test_rng4
Description: Test with one range encompassing all ranges.
Arguments:
"""
self.assertEqual(gen_libs.normalize(self.rng4), self.result4)
def test_rng3(self):
"""Function: test_rng3
Description: Test with one range encompassing all ranges.
Arguments:
"""
self.assertEqual(gen_libs.normalize(self.rng3), self.result3)
def test_rng2(self):
"""Function: test_rng2
Description: Test with two consective sets of ranges.
Arguments:
"""
self.assertEqual(gen_libs.normalize(self.rng2), self.result2)
def test_rng1(self):
"""Function: test_rng1
Description: Test with two sets of ranges.
Arguments:
"""
self.assertEqual(gen_libs.normalize(self.rng), self.result)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
121854 | <gh_stars>0
"""Component for the Somfy MyLink device supporting the Synergy API."""
CONF_ENTITY_CONFIG = "entity_config"
CONF_SYSTEM_ID = "system_id"
CONF_REVERSE = "reverse"
CONF_DEFAULT_REVERSE = "default_reverse"
DEFAULT_CONF_DEFAULT_REVERSE = False
DATA_SOMFY_MYLINK = "somfy_mylink_data"
MYLINK_STATUS = "mylink_status"
MYLINK_ENTITY_IDS = "mylink_entity_ids"
DOMAIN = "somfy_mylink"
SOMFY_MYLINK_COMPONENTS = ["cover"]
MANUFACTURER = "Somfy"
DEFAULT_PORT = 44100
| StarcoderdataPython |
11316410 | <gh_stars>0
from turtle import Turtle
ALIGNMENT = "center"
FONT = ("Courier", 14, "normal")
class Score(Turtle):
def __init__(self):
super().__init__()
self.color("white")
self.speed("fastest")
self.penup()
self.ht()
self.goto(0, 280)
self.current_score = 0
self.show_score()
def show_score(self):
self.write(f"Score: {self.current_score}", align=ALIGNMENT, font=FONT)
def increase_score(self):
self.clear()
self.current_score += 1
self.show_score()
def game_over(self):
self.goto(0, 0)
self.write("Game Over.", align=ALIGNMENT, font=FONT)
| StarcoderdataPython |
3579148 | import pytest
from ray.util.ml_utils.checkpoint_manager import (
_CheckpointManager,
CheckpointStorage,
CheckpointStrategy,
_TrackedCheckpoint,
)
def test_unlimited_persistent_checkpoints():
cpm = _CheckpointManager(checkpoint_strategy=CheckpointStrategy(num_to_keep=None))
for i in range(10):
cpm.register_checkpoint(
_TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.PERSISTENT)
)
assert len(cpm._top_persisted_checkpoints) == 10
def test_limited_persistent_checkpoints():
cpm = _CheckpointManager(checkpoint_strategy=CheckpointStrategy(num_to_keep=2))
for i in range(10):
cpm.register_checkpoint(
_TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.PERSISTENT)
)
assert len(cpm._top_persisted_checkpoints) == 2
def test_no_persistent_checkpoints():
cpm = _CheckpointManager(checkpoint_strategy=CheckpointStrategy(num_to_keep=0))
for i in range(10):
cpm.register_checkpoint(
_TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.PERSISTENT)
)
assert len(cpm._top_persisted_checkpoints) == 0
def test_dont_persist_memory_checkpoints():
cpm = _CheckpointManager(checkpoint_strategy=CheckpointStrategy(num_to_keep=None))
cpm._persist_memory_checkpoints = False
for i in range(10):
cpm.register_checkpoint(
_TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.MEMORY)
)
assert len(cpm._top_persisted_checkpoints) == 0
def test_persist_memory_checkpoints():
cpm = _CheckpointManager(checkpoint_strategy=CheckpointStrategy(num_to_keep=None))
cpm._persist_memory_checkpoints = True
for i in range(10):
cpm.register_checkpoint(
_TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.MEMORY)
)
assert len(cpm._top_persisted_checkpoints) == 10
def test_keep_best_checkpoints():
cpm = _CheckpointManager(
checkpoint_strategy=CheckpointStrategy(
num_to_keep=2,
checkpoint_score_attribute="metric",
checkpoint_score_order="min",
)
)
cpm._persist_memory_checkpoints = True
for i in range(10):
cpm.register_checkpoint(
_TrackedCheckpoint(
{"data": i},
storage_mode=CheckpointStorage.MEMORY,
metrics={"metric": i},
)
)
# Sorted from worst (max) to best (min)
assert [
cp.tracked_checkpoint.metrics["metric"] for cp in cpm._top_persisted_checkpoints
] == [1, 0]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| StarcoderdataPython |
11259227 | from selenium.webdriver.firefox.options import Options
from selenium import webdriver
import time
import os
import requests
from unipd_login import *
import argparse
def main(dep, course, index_video):
dep_name = dep.upper()
course_name = course
video_tag = "Kaltura Video Resource"
download_dir = os.getcwd()
usr, pwd = input_data()
# set some useful options, like download in the same directory
firefoxOptions = Options()
firefoxOptions.set_preference("browser.download.folderList", 2)
firefoxOptions.set_preference("browser.download.manager.showWhenStarting", False)
firefoxOptions.set_preference("browser.download.dir", download_dir)
# automatically download .m3u8 files, without asking
firefoxOptions.set_preference(
"browser.helperApps.neverAsk.saveToDisk", "application/vnd.apple.mpegurl"
)
# load firefox geckodriver with dowload options
driver = webdriver.Firefox(options=firefoxOptions)
# wait 10 seconds when doing a find_element before carrying on
driver.implicitly_wait(10)
# find the login button based on the department
department(dep_name, driver)
# wait some more seconds for the loading
time.sleep(5)
# get the input box of username and password
username = driver.find_element_by_id("j_username_js")
password = driver.find_element_by_id("password")
# type their values, read from the pwd.asd
username.send_keys(usr)
password.send_keys(<PASSWORD>)
# sign in
driver.find_element_by_id("login_button_js").click()
# find and click over the selected course
course = driver.find_element_by_partial_link_text(course_name).click()
# find and click over the selected video
all_videos = driver.find_elements_by_xpath(
"//*[span[contains(text(),'" + video_tag + "')]]"
)
all_videos[index_video].click()
# find the src of the kaltura video only
url_videobase = driver.find_element_by_id("contentframe").get_attribute("src")
driver.get(url_videobase)
# wait some more seconds for the loading
time.sleep(5)
# find the iframe dynamically generated tag and search inside it
driver.switch_to.default_content()
frame = driver.find_element_by_xpath("//iframe")
driver.switch_to.frame(frame)
# find the src for the a.m3u8
url_video2 = driver.find_element_by_id("pid_kplayer").get_attribute("src")
# get the a.m3u8 file
r = requests.get(url_video2)
# write the a.m3u8 as txt
with open("./a.txt", "wb") as f:
f.write(r.content)
# read all the video links inside the a.txt
video_links = open("./a.txt").readlines()
# select one video (ie line 5, typically the HD, but check the a.txt and feel free to change it)
choosen_index = video_links[4]
# download the index.m3u8 file (which is the video stream of the lecture)
driver.get(choosen_index)
driver.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Download the playlist .m3u8 of the video lecture selected"
)
parser.add_argument("--dep", required=True, help="department of the course")
parser.add_argument("--course", required=True, help="name of the course")
parser.add_argument(
"--index", required=False, help="index of the video to be downloaded"
)
args = parser.parse_args()
main(dep=args.dep, course=args.course, index_video=args.index)
| StarcoderdataPython |
236317 | <filename>homeassistant/components/google_assistant/helpers.py
"""Helper classes for Google Assistant integration."""
from homeassistant.core import Context
class SmartHomeError(Exception):
"""Google Assistant Smart Home errors.
https://developers.google.com/actions/smarthome/create-app#error_responses
"""
def __init__(self, code, msg):
"""Log error code."""
super().__init__(msg)
self.code = code
class Config:
"""Hold the configuration for Google Assistant."""
def __init__(self, should_expose, allow_unlock,
entity_config=None):
"""Initialize the configuration."""
self.should_expose = should_expose
self.entity_config = entity_config or {}
self.allow_unlock = allow_unlock
class RequestData:
"""Hold data associated with a particular request."""
def __init__(self, config, user_id, request_id):
"""Initialize the request data."""
self.config = config
self.request_id = request_id
self.context = Context(user_id=user_id)
| StarcoderdataPython |
9720341 | <reponame>alijasim/frappe
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.utils import getdate, formatdate, get_last_day
from frappe.utils.dateutils import get_period_ending, get_period
from frappe.desk.doctype.dashboard_chart.dashboard_chart import get
from datetime import datetime
from dateutil.relativedelta import relativedelta
class TestDashboardChart(unittest.TestCase):
def test_period_ending(self):
self.assertEqual(get_period_ending('2019-04-10', 'Daily'),
getdate('2019-04-10'))
# week starts on monday
self.assertEqual(get_period_ending('2019-04-10', 'Weekly'),
getdate('2019-04-14'))
self.assertEqual(get_period_ending('2019-04-10', 'Monthly'),
getdate('2019-04-30'))
self.assertEqual(get_period_ending('2019-04-30', 'Monthly'),
getdate('2019-04-30'))
self.assertEqual(get_period_ending('2019-03-31', 'Monthly'),
getdate('2019-03-31'))
self.assertEqual(get_period_ending('2019-04-10', 'Quarterly'),
getdate('2019-06-30'))
self.assertEqual(get_period_ending('2019-06-30', 'Quarterly'),
getdate('2019-06-30'))
self.assertEqual(get_period_ending('2019-10-01', 'Quarterly'),
getdate('2019-12-31'))
def test_dashboard_chart(self):
if frappe.db.exists('Dashboard Chart', 'Test Dashboard Chart'):
frappe.delete_doc('Dashboard Chart', 'Test Dashboard Chart')
frappe.get_doc(dict(
doctype = 'Dashboard Chart',
chart_name = 'Test Dashboard Chart',
chart_type = 'Count',
document_type = 'DocType',
based_on = 'creation',
timespan = 'Last Year',
time_interval = 'Monthly',
filters_json = '{}',
timeseries = 1
)).insert()
cur_date = datetime.now() - relativedelta(years=1)
result = get(chart_name='Test Dashboard Chart', refresh=1)
for idx in range(13):
month = get_last_day(cur_date)
month = formatdate(month.strftime('%Y-%m-%d'))
self.assertEqual(result.get('labels')[idx], get_period(month))
cur_date += relativedelta(months=1)
frappe.db.rollback()
def test_empty_dashboard_chart(self):
if frappe.db.exists('Dashboard Chart', 'Test Empty Dashboard Chart'):
frappe.delete_doc('Dashboard Chart', 'Test Empty Dashboard Chart')
frappe.db.sql('delete from `tabError Log`')
frappe.get_doc(dict(
doctype = 'Dashboard Chart',
chart_name = 'Test Empty Dashboard Chart',
chart_type = 'Count',
document_type = 'Error Log',
based_on = 'creation',
timespan = 'Last Year',
time_interval = 'Monthly',
filters_json = '[]',
timeseries = 1
)).insert()
cur_date = datetime.now() - relativedelta(years=1)
result = get(chart_name ='Test Empty Dashboard Chart', refresh=1)
for idx in range(13):
month = get_last_day(cur_date)
month = formatdate(month.strftime('%Y-%m-%d'))
self.assertEqual(result.get('labels')[idx], get_period(month))
cur_date += relativedelta(months=1)
frappe.db.rollback()
def test_chart_wih_one_value(self):
if frappe.db.exists('Dashboard Chart', 'Test Empty Dashboard Chart 2'):
frappe.delete_doc('Dashboard Chart', 'Test Empty Dashboard Chart 2')
frappe.db.sql('delete from `tabError Log`')
# create one data point
frappe.get_doc(dict(doctype = 'Error Log', creation = '2018-06-01 00:00:00')).insert()
frappe.get_doc(dict(
doctype = 'Dashboard Chart',
chart_name = 'Test Empty Dashboard Chart 2',
chart_type = 'Count',
document_type = 'Error Log',
based_on = 'creation',
timespan = 'Last Year',
time_interval = 'Monthly',
filters_json = '[]',
timeseries = 1
)).insert()
cur_date = datetime.now() - relativedelta(years=1)
result = get(chart_name ='Test Empty Dashboard Chart 2', refresh = 1)
for idx in range(13):
month = get_last_day(cur_date)
month = formatdate(month.strftime('%Y-%m-%d'))
self.assertEqual(result.get('labels')[idx], get_period(month))
cur_date += relativedelta(months=1)
# only 1 data point with value
self.assertEqual(result.get('datasets')[0].get('values')[2], 0)
frappe.db.rollback()
def test_group_by_chart_type(self):
if frappe.db.exists('Dashboard Chart', 'Test Group By Dashboard Chart'):
frappe.delete_doc('Dashboard Chart', 'Test Group By Dashboard Chart')
frappe.get_doc({"doctype":"ToDo", "description": "test"}).insert()
frappe.get_doc(dict(
doctype = 'Dashboard Chart',
chart_name = 'Test Group By Dashboard Chart',
chart_type = 'Group By',
document_type = 'ToDo',
group_by_based_on = 'status',
filters_json = '[]',
)).insert()
result = get(chart_name ='Test Group By Dashboard Chart', refresh = 1)
todo_status_count = frappe.db.count('ToDo', {'status': result.get('labels')[0]})
self.assertEqual(result.get('datasets')[0].get('values')[0], todo_status_count)
frappe.db.rollback()
def test_daily_dashboard_chart(self):
insert_test_records()
if frappe.db.exists('Dashboard Chart', 'Test Daily Dashboard Chart'):
frappe.delete_doc('Dashboard Chart', 'Test Daily Dashboard Chart')
frappe.get_doc(dict(
doctype = 'Dashboard Chart',
chart_name = 'Test Daily Dashboard Chart',
chart_type = 'Sum',
document_type = 'Communication',
based_on = 'communication_date',
value_based_on = 'rating',
timespan = 'Select Date Range',
time_interval = 'Daily',
from_date = datetime(2019, 1, 6),
to_date = datetime(2019, 1, 11),
filters_json = '[]',
timeseries = 1
)).insert()
result = get(chart_name = 'Test Daily Dashboard Chart', refresh = 1)
self.assertEqual(result.get('datasets')[0].get('values'), [200.0, 400.0, 300.0, 0.0, 100.0, 0.0])
self.assertEqual(
result.get('labels'),
['06-01-19', '07-01-19', '08-01-19', '09-01-19', '10-01-19', '11-01-19']
)
frappe.db.rollback()
def test_weekly_dashboard_chart(self):
insert_test_records()
if frappe.db.exists('Dashboard Chart', 'Test Weekly Dashboard Chart'):
frappe.delete_doc('Dashboard Chart', 'Test Weekly Dashboard Chart')
frappe.get_doc(dict(
doctype = 'Dashboard Chart',
chart_name = 'Test Weekly Dashboard Chart',
chart_type = 'Sum',
document_type = 'Communication',
based_on = 'communication_date',
value_based_on = 'rating',
timespan = 'Select Date Range',
time_interval = 'Weekly',
from_date = datetime(2018, 12, 30),
to_date = datetime(2019, 1, 15),
filters_json = '[]',
timeseries = 1
)).insert()
result = get(chart_name ='Test Weekly Dashboard Chart', refresh = 1)
self.assertEqual(result.get('datasets')[0].get('values'), [50.0, 300.0, 800.0, 0.0])
self.assertEqual(
result.get('labels'),
['30-12-18', '06-01-19', '13-01-19', '20-01-19']
)
frappe.db.rollback()
def test_avg_dashboard_chart(self):
insert_test_records()
if frappe.db.exists('Dashboard Chart', 'Test Average Dashboard Chart'):
frappe.delete_doc('Dashboard Chart', 'Test Average Dashboard Chart')
frappe.get_doc(dict(
doctype = 'Dashboard Chart',
chart_name = 'Test Average Dashboard Chart',
chart_type = 'Average',
document_type = 'Communication',
based_on = 'communication_date',
value_based_on = 'rating',
timespan = 'Select Date Range',
time_interval = 'Weekly',
from_date = datetime(2018, 12, 30),
to_date = datetime(2019, 1, 15),
filters_json = '[]',
timeseries = 1
)).insert()
result = get(chart_name='Test Average Dashboard Chart', refresh = 1)
self.assertEqual(result.get('datasets')[0].get('values'), [50.0, 150.0, 266.6666666666667, 0.0])
self.assertEqual(
result.get('labels'),
['30-12-18', '06-01-19', '13-01-19', '20-01-19']
)
frappe.db.rollback()
def insert_test_records():
create_new_communication('Communication 1', datetime(2018, 12, 30), 50)
create_new_communication('Communication 2', datetime(2019, 1, 4), 100)
create_new_communication('Communication 3', datetime(2019, 1, 6), 200)
create_new_communication('Communication 4', datetime(2019, 1, 7), 400)
create_new_communication('Communication 5', datetime(2019, 1, 8), 300)
create_new_communication('Communication 6', datetime(2019, 1, 10), 100)
def create_new_communication(subject, date, rating):
communication = {
'doctype': 'Communication',
'subject': subject,
'rating': rating,
'communication_date': date
}
comm = frappe.get_doc(communication)
if not frappe.db.exists("Communication", {'subject' : comm.subject}):
comm.insert()
| StarcoderdataPython |
163786 | """
Code that goes along with the Airflow located at:
http://airflow.readthedocs.org/en/latest/tutorial.html
"""
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from taxi import get_taxi_data, transform_taxi_data, load_taxi_data, get_position_taxi
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2018, 5, 24),
'end_date': datetime(2018, 4, 24),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'taxi', default_args=default_args,
schedule_interval='0 */10 * * 1',
catchup=False
)
# t1, t2 and t3 are examples of tasks created by instantiating operators
extract_data_taxis = PythonOperator(
task_id='extract_data_taxis',
python_callable=get_taxi_data,
provide_context=True,
op_args=[
'taxi_mtl',
'taxis'
],
dag=dag
)
extract_data_ads = PythonOperator(
task_id='extract_data_ads',
python_callable=get_taxi_data,
provide_context=True,
op_args=[
'taxi_mtl',
'ads'
],
dag=dag
)
extract_data_vehicles = PythonOperator(
task_id='extract_data_vehicles',
python_callable=get_taxi_data,
provide_context=True,
op_args=[
'taxi_mtl',
'vehicles'
],
dag=dag
)
extract_data_positions = PythonOperator(
task_id='extract_data_positions',
python_callable=get_position_taxi,
provide_context=True,
op_args=[
'taxi_mtl'
],
dag=dag
)
transform_data = PythonOperator(
task_id='transform_data_taxi',
python_callable=transform_taxi_data,
provide_context=True,
dag=dag
)
load_data = PythonOperator(
task_id='load_data_taxi',
python_callable=load_taxi_data,
provide_context=True,
op_args=[
'home_taxi_mtl'
],
dag=dag
)
extract_data_ads >> transform_data >> load_data
extract_data_taxis >> transform_data
extract_data_vehicles >> transform_data
extract_data_positions >> transform_data
| StarcoderdataPython |
3267421 | <filename>scenes/__init__.py
from .game import GameScene
from .inventory import InventoryScene
from .main_menu import MainMenuScene
from .character_creation import CharacterCreationScene | StarcoderdataPython |
1660527 | <filename>mellplayer/mell_logger.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
MellPlayer logger
Created on 2017-03-06
@author: Mellcap
'''
import os
import logging
from mellplayer.directory import BASE_DIRECTORY
LOG_FILE = os.path.join(BASE_DIRECTORY, 'mell_logger.log')
# create logger
mell_logger = logging.getLogger('mell_logger')
mell_logger.setLevel(logging.DEBUG)
# define handler write in file
fh = logging.FileHandler(LOG_FILE)
fh.setLevel(logging.DEBUG)
# define formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s \n- %(message)s')
fh.setFormatter(formatter)
# add handler
mell_logger.addHandler(fh)
| StarcoderdataPython |
312127 | <gh_stars>1000+
class BlockTerminationNotice(Exception):
pass
class IncorrectLocationException(Exception):
pass
class SootMethodNotLoadedException(Exception):
pass
class SootFieldNotLoadedException(Exception):
pass
| StarcoderdataPython |
8052119 | <reponame>tl-hbk/boa<gh_stars>0
from glob import glob
import os
from pathlib import Path
from math import log
from mamba.mamba_api import transmute as mamba_transmute
from joblib import Parallel, delayed
from rich.console import Console
console = Console()
unit_list = list(zip(["bytes", "kB", "MB", "GB", "TB", "PB"], [0, 0, 1, 2, 2, 2]))
def sizeof_fmt(num):
"""Human friendly file size"""
if num > 1:
exponent = min(int(log(num, 1024)), len(unit_list) - 1)
quotient = float(num) / 1024 ** exponent
unit, num_decimals = unit_list[exponent]
format_string = "{:.%sf} {}" % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return "0 bytes"
if num == 1:
return "1 byte"
def transmute_task(f, args):
filename = os.path.basename(f)
outpath = os.path.abspath(args.output_directory)
if f.endswith(".tar.bz2"):
filename = filename[:-8]
outfile = os.path.join(outpath, filename + ".conda")
elif f.endswith(".conda"):
filename = filename[:-6]
outfile = os.path.join(outpath, filename + ".tar.bz2")
else:
console.print("[bold red]Transmute can only handle .tar.bz2 and .conda formats")
console.print(f"Processing {filename}")
mamba_transmute(f, outfile, args.compression_level)
stat_before = Path(f).stat()
stat_after = Path(outfile).stat()
saved_percent = 1.0 - (stat_after.st_size / stat_before.st_size)
color = "[bold green]" if saved_percent > 0 else "[bold red]"
return filename, outfile, stat_before, stat_after, saved_percent, color
def main(args):
# from mamba.mamba_api import Context
# api_ctx = Context()
# api_ctx.set_verbosity(1)
# from mamba.mamba_api import Context
# api_ctx = Context()
# api_ctx.set_verbosity(1)
files = args.files
final_files = []
if not os.path.exists(args.output_directory):
Path(args.output_directory).mkdir(parents=True, exist_ok=True)
for f in files:
final_files += [os.path.abspath(fx) for fx in glob(f)]
logs = Parallel(n_jobs=args.num_jobs)(
delayed(transmute_task)(f, args) for f in final_files
)
for filename, outfile, stat_before, stat_after, saved_percent, color in logs:
console.print(f"\nConverting [bold]{filename}")
console.print(f"Done: [bold]{outfile}")
console.print(f" Before : {sizeof_fmt(stat_before.st_size)}")
console.print(f" After : {sizeof_fmt(stat_after.st_size)}")
console.print(f" Difference: {color}{saved_percent * 100:.2f}%")
| StarcoderdataPython |
6661531 | <gh_stars>10-100
from ballclient.service.clientteam import ClientPlayer, ClientTeam
from ballclient.util import logger
import random
"""
TODO 追击敌人时,多只鲲一起协作
2. 鲲原地打转问题
3. 增加一个可视区域,Privilege 尽量保证可视区域最大
4. 己方不能重叠
- 计算权重时,不考虑方向了
- 权重的计算公式
weight funciton (x-1)^3-x+2 or y= 4.25-5.04 x + 1.75 x^2
"""
class Strategy():
Direction = ['right', 'left', 'down', 'up']
WeightMeteor = 3
WeightTunnel = 0.5
WeightWormhole = 0.9
WeightTunnelPeer = 1.2
WeightWormholePeer = 2.0
WeightTunnelUnprivilege = 15
WeightWormholeUnprivilege = 18
WeightEnemyUnprivilege = 200
WeightPower = 160
WeightEnemy = 120
WeightEmpty = 0.5
RightHint = 0x800
LeftHint = 0x400
DownHint = 0x200
UpHint = 0x100
RightDownHint = 0x80
RightUpHint = 0x40
LeftDownHint = 0x20
LeftUpHint = 0x10
DownRightHint = 0x8
DownLeftHint = 0x4
UpRightHint = 0x2
UpLeftHint = 0x1
def __init__(self, myteam, enemy):
self._direction = None
self.weight = 0
self.myteam = myteam
self.enemy = enemy
self._weightDir = [0] * 4
self.pos2 = [
[1, 1], [2, 1], [1, 2], [2, 2]
]
self.pos3 = [
# fisrt vx, second vy
[1, 1], [2, 1], [1, 2], [2, 2],
[3, 1], [1, 3], [3, 2], [2, 3], [3, 3]
]
@property
def direction(self):
if self._direction is None:
return []
return [self._direction]
def ableToGo(self, x, y):
if self.myteam.isMeteor(x, y):
# logger.debug(f"[R{self.round} - abletogo ({x}, {y})] Meteor False")
return False
if self.enemy.isPlayer(x, y) is not None:
return self.myteam.privilege
return True
def _calcTunnelWeight(self, player, tunnel, direct, **kwargs):
# TODO judge direction and next
nx, ny = self.myteam.tunnelNext(tunnel)
if nx == player.x and ny == player.y:
# logger.debug(f"tunnel next is current loc")
return - 100000
tdis = abs(nx - player.x - 1) + abs(ny - player.y - 1)
if tdis > ClientTeam.Vision:
tdis = abs(tdis - ClientTeam.Vision) + 1
elif tdis == 0:
tdis += 1
# tmp = self._calcWeight(player, nx, ny, dis=tdis, trans=False, direct=direct)
tmpList = [0, 0, 0, 0]
# if (direct & Strategy.RightHint) == Strategy.RightHint: # 右
# direct = 0x8c0
# if (direct & Strategy.LeftHint) == Strategy.LeftHint: # 左
# direct = 0x430
# if (direct & Strategy.DownHint) == Strategy.DownHint: # 下
# direct = 0x20c
# if (direct & Strategy.UpHint) == Strategy.UpHint: # 上
# direct = 0x103
self.weightDirection(player, {"x": nx, "y": ny, "vx": 1, "vy": 1}, weightDir=tmpList, direct=direct, trans = False)
# logger.debug(tmpList)
tmp = sum(tmpList)
if self.myteam.privilege == False:
res = (tmp * Strategy.WeightTunnelPeer + random.randint(6, 8) * Strategy.WeightTunnelUnprivilege)
else:
res = (tmp * Strategy.WeightTunnelPeer + random.randint(2, 3) * Strategy.WeightTunnel)
return res
def _calcWormholeWeight(self, player, wormhole, direct):
# TODO weight change
pwh = self.myteam.peerWormhole(wormhole)
nx = pwh["x"]
ny = pwh["y"]
tmpList = [0, 0, 0, 0]
self.weightDirection(player, {"x": nx, "y": ny, "vx": 1, "vy": 1}, weightDir=tmpList, direct=0xfff)
tmp = sum(tmpList)
if self.myteam.privilege == False:
return random.randint(10, 15) * Strategy.WeightWormholeUnprivilege + tmp * Strategy.WeightWormholePeer
return random.randint(1, 2) * Strategy.WeightWormhole + tmp * Strategy.WeightWormholePeer
def _calcWeightIgnoreDis(self, player, x, y, trans = True, direct = 0xfff):
"""x, y tried pos(next)
@return weight
不需要计算距离
"""
tunnel = self.myteam.isTunnel(x, y)
if tunnel is not None:
if not trans:
return Strategy.WeightEmpty
# logger.debug(f"[R{self.round}] Tunnel ({x}, {y})")
return self._calcTunnelWeight(player, tunnel, direct)
wormhole = self.myteam.isWormhole(x, y)
if wormhole is not None:
# logger.debug(f"[R{self.round}] Wormhole ({x}, {y})")
return self._calcWormholeWeight(player, wormhole, direct)
# dis = pow(dis, 2)
if self.myteam.isMeteor(x, y):
# logger.debug(f"[R{self.round}] Meteor ({x}, {y})")
return -0.5 * Strategy.WeightMeteor
power = self.myteam.isPower(x, y)
if power is not None:
# logger.debug(f"[R{self.round}] Pos({x}, {y}) Power {power}")
return power * Strategy.WeightPower
ene = self.enemy.isPlayer(x, y)
if ene is not None:
if self.myteam.privilege == False:
score = (- ene.score - player.score) * Strategy.WeightEnemyUnprivilege - 80 * self.enemy.nearmate(x, y, dis=3)
else:
nearMate = self.myteam.nearmate(x, y, dis=ClientTeam.Vision) * self.myteam.remain_life
score = (ene.score + 10) * Strategy.WeightEnemy * (0.5 + nearMate)
# logger.debug(f"Tri to catch enemy({x}, {y}), near: {nearMate}")
# logger.debug(f"[R{self.round}] Enemy ({x}, {y}) score: {score}")
return score
teammate = self.myteam.isPlayer(x, y, player)
if teammate is not None:
# TODO A -> X <- B
return - 100
return Strategy.WeightEmpty * random.randint(0, 3) # empty
def _calcWeight(self, player, x, y, dis = 1, trans = True, direct = 0xfff):
return self._calcWeightIgnoreDis(player, x, y, trans, direct=direct) / dis
def weightDirection(self, player, coord, weightDir, specdis = None, direct = 0xfff, **kwargs):
"""vx vy 都是距离,正值
从目标点 (x, y) 开始,计算附近的位置的权值
coord {"x": x, "y": y, "vx": vx, "vy": vy}
weightDir array of 4 elements: weightDir 中的值会被累加
"""
x = coord["x"]
y = coord["y"]
vx = coord["vx"]
vy = coord["vy"]
txr = x + vx # 往右
txl = x - vx # 往左
tyd = y + vy # 往下
tyu = y - vy # 往上
# 直线距离 4.25-5.04 x + 1.75 x^2
otherDis = 1.75 * vx * vx - 5.04 * vx + 4.25
if (direct & Strategy.RightHint) == Strategy.RightHint: # 右
weightDir[0] += self._calcWeight(player, txr, y, dis=otherDis, direct=Strategy.RightHint, **kwargs)
if (direct & Strategy.LeftHint) == Strategy.LeftHint: # 左
weightDir[1] += self._calcWeight(player, txl, y, dis=otherDis, direct=Strategy.LeftHint, **kwargs)
otherDis = 1.75 * vy * vy - 5.04 * vy + 4.25
if (direct & Strategy.DownHint) == Strategy.DownHint: # 下
weightDir[2] += self._calcWeight(player, x, tyd, dis=otherDis, direct=Strategy.DownHint, **kwargs)
if (direct & Strategy.UpHint) == Strategy.UpHint: # 上
weightDir[3] += self._calcWeight(player, x, tyu, dis=otherDis, direct=Strategy.UpHint, **kwargs)
# otherDis = ((vx + vy - 0.5) * (vx + vy - 0.5) + abs(vx - vy) + 1)
otherDis = 1.75 * (vx+vy) * (vx+vy) - 5.04 * (vx+vy) + 4.25
if vy < vx or (vx < 2 and vy < 2):
# 以横向为主
if (direct & Strategy.RightDownHint) == Strategy.RightDownHint:
weightDir[0] += self._calcWeight(player, txr, tyd, otherDis, **kwargs) # 右下
if (direct & Strategy.RightUpHint) == Strategy.RightUpHint:
weightDir[0] += self._calcWeight(player, txr, tyu, otherDis, **kwargs) # 右上
if (direct & Strategy.LeftDownHint) == Strategy.LeftDownHint:
weightDir[1] += self._calcWeight(player, txl, tyd, otherDis, **kwargs) # 左下
if (direct & Strategy.LeftUpHint) == Strategy.LeftUpHint:
weightDir[1] += self._calcWeight(player, txl, tyu, otherDis, **kwargs) # 左上
elif vy > vx or (vx < 2 and vy < 2):
# 以纵向为主
if (direct & Strategy.DownRightHint) == Strategy.DownRightHint:
weightDir[2] += self._calcWeight(player, txr, tyd, otherDis, **kwargs) # 下右
if (direct & Strategy.DownLeftHint) == Strategy.DownLeftHint:
weightDir[2] += self._calcWeight(player, txl, tyd, otherDis, **kwargs) # 下左
if (direct & Strategy.UpRightHint) == Strategy.UpRightHint:
weightDir[3] += self._calcWeight(player, txr, tyu, otherDis, **kwargs) # 上右
if (direct & Strategy.UpLeftHint) == Strategy.UpLeftHint:
weightDir[3] += self._calcWeight(player, txl, tyu, otherDis, **kwargs) # 上左
else:
pass
def compare(self, player):
"""
"""
mweight = -1000000
d = -1
if player.lastAction is not None and self._weightDir[0] == self._weightDir[1] and self._weightDir[1] == self._weightDir[2] \
and self._weightDir[2] == self._weightDir[3]:
self._weightDir[Strategy.Direction.index(player.lastAction)] += 30
for i in range(4):
if mweight < self._weightDir[i]:
mweight = self._weightDir[i]
d = i
logger.debug("[R%d %s - %s] <===> %s"
% (self.round, player, Strategy.Direction[d], self._weightDir))
return Strategy.Direction[d]
def makeAction(self, action, round):
self.round = round
for player in self.myteam.players.values():
# logger.debug(f"[R{self.round}] {player}")
x = player.x
y = player.y
# iterate
# 四种组合,先右后左,先下后上
for i in range(4):
self._weightDir[i] = 0
if not self.ableToGo(x+1, y):
self._weightDir[0] = -100000
if not self.ableToGo(x-1, y):
self._weightDir[1] = -100000
if not self.ableToGo(x, y+1):
self._weightDir[2] = -100000
if not self.ableToGo(x, y-1):
self._weightDir[3] = -100000
for p in self.pos3:
self.weightDirection(player,
coord = {"x": player.x, "y":player.y, "vx": p[0], "vy": p[1]},
weightDir=self._weightDir,
direct=0xfff)
player.lastAction = self.compare(player)
player.nextPos = self.playerNextPos(player)
# if len(player.posStack) < 5:
for player in self.myteam.players.values():
# logger.debug(f"[R{self.round} {self.myteam.privilege}]{player}: {res}")
action.append({"team": player.team, "player_id": player.id,
"move": [player.lastAction]})
def playerNextPos(self, player):
if player.lastAction == 'right':
return (player.x + 1, player.y)
elif player.lastAction == 'left':
return (player.x - 1, player.y)
elif player.lastAction == 'up':
return (player.x, player.y - 1)
elif player.lastAction == 'down':
return (player.x, player.y + 1)
class Strategy2(Strategy):
def __init__(self, myteam, enemy):
Strategy.__init__(self, myteam, enemy)
self.pos3 = [
# fisrt vx, second vy
[1, 0], [0, 1], [1, 1],
[2, 0], [0, 2], [2, 1], [1, 2], [2, 2],
[3, 0], [0, 3], [3, 1], [1, 3], [3, 2], [2, 3], [3, 3]
]
def makeAction(self, action, round):
self.round = round
for player in self.myteam.players.values():
# logger.debug(f"[R{self.round}] {player}")
x = player.x
y = player.y
# iterate
# 四种组合,先右后左,先下后上
for i in range(4):
self._weightDir[i] = 0
if not self.ableToGo(x+1, y):
self._weightDir[0] = -100000
if not self.ableToGo(x-1, y):
self._weightDir[1] = -100000
if not self.ableToGo(x, y+1):
self._weightDir[2] = -100000
if not self.ableToGo(x, y-1):
self._weightDir[3] = -100000
for p in self.pos3:
coord = {"x": player.x, "y":player.y, "vx": p[0], "vy": p[1]}
coord["tx"] = player.x + p[0]
coord["ty"] = player.y + p[1]
self.weightPos(player, coord = coord, weightDir=self._weightDir, direct=0xfff)
coord["tx"] = player.x + p[0]
coord["ty"] = player.y - p[1]
self.weightPos(player, coord = coord, weightDir=self._weightDir, direct=0xfff)
coord["tx"] = player.x - p[0]
coord["ty"] = player.y + p[1]
self.weightPos(player, coord = coord, weightDir=self._weightDir, direct=0xfff)
coord["tx"] = player.x - p[0]
coord["ty"] = player.y - p[1]
self.weightPos(player, coord = coord, weightDir=self._weightDir, direct=0xfff)
player.lastAction = self.compare(player)
player.nextPos = self.playerNextPos(player)
# if len(player.posStack) < 5:
# TODO make a controller to control all actions
for player in self.myteam.players.values():
# logger.debug(f"[R{self.round} {self.myteam.privilege}]{player}: {res}")
action.append({"team": player.team, "player_id": player.id,
"move": [player.lastAction]})
def _calcWeightPos(self, player, x, y, dis=1, dir=-1, trans=True):
tunnel = self.myteam.isTunnel(x, y)
if tunnel is not None:
if not trans:
return Strategy.WeightEmpty
# logger.debug(f"[R{self.round}] Tunnel ({x}, {y})")
return self._calcTunnelWeight(player, tunnel, 0xfff)
wormhole = self.myteam.isWormhole(x, y)
if wormhole is not None:
# logger.debug(f"[R{self.round}] Wormhole ({x}, {y})")
return self._calcWormholeWeight(player, wormhole, 0xfff)
# dis = pow(dis, 2)
if self.myteam.isMeteor(x, y):
# logger.debug(f"[R{self.round}] Meteor ({x}, {y})")
return -0.5 * Strategy.WeightMeteor
power = self.myteam.isPower(x, y)
if power is not None:
# logger.debug(f"[R{self.round}] Pos({x}, {y}) Power {power}")
return power * Strategy.WeightPower
ene = self.enemy.isPlayer(x, y)
if ene is not None:
if self.myteam.privilege == False:
score = (- ene.score - player.score) * Strategy.WeightEnemyUnprivilege - 80 * self.enemy.nearmate(x, y, dis=3)
else:
nearMate = self.myteam.nearmate(x, y, dis=ClientTeam.Vision) * self.myteam.remain_life
score = (ene.score + 10) * Strategy.WeightEnemy * (0.5 + nearMate)
# logger.debug(f"Tri to catch enemy({x}, {y}), near: {nearMate}")
# logger.debug(f"[R{self.round}] Enemy ({x}, {y}) score: {score}")
return score
return Strategy.WeightEmpty * random.randint(0, 3) # empty
def weightPos(self, player, coord, weightDir, **kwargs):
"""
coord x, y 起始点坐标, vx vy 距离,可以为 0,tx ty 目标点坐标
"""
x = coord["x"]
y = coord["y"]
vx = coord["vx"]
vy = coord["vy"]
tx = coord["tx"]
ty = coord["ty"]
distance = 1.75 * (vx+vy) * (vx+vy) - 5.04 * (vx+vy) + 4.25
dir = self.judegDir(x, y, vx, vy, tx, ty)
if dir[0] > -1:
weightDir[dir[0]] += self._calcWeightIgnoreDis(player, tx, ty) / distance
if dir[1] > -1:
weightDir[dir[1]] += self._calcWeightIgnoreDis(player, tx, ty) / distance
def judegDir(self, x, y, vx, vy, tx, ty):
dirX = -1
dirY = -1
# warning: vy == vx 出现了两次
if vy <= vx:
if tx > x:
# right
dirX = 0
elif tx < x:
# left
dirX = 1
# return dirX
if vx <= vy:
if ty > y:
# down
dirY = 2
elif ty < y:
# up
dirY = 3
# return dirY
return dirX, dirY | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.